summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorMatt Morehouse <mascasa@google.com>2017-12-01 22:20:26 +0000
committerMatt Morehouse <mascasa@google.com>2017-12-01 22:20:26 +0000
commit79f1f0032c4d34fa0108b1ad0653b66f1970a322 (patch)
treeeec040ed374edcbfa049f4542dbd51288b2defe3 /test
parent1fe1bd128c218a113d2f16f3260122cadd949b91 (diff)
Revert "[X86] Improvement in CodeGen instruction selection for LEAs."
This reverts r319543, due to ASan bot breakage. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319591 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/X86/GlobalISel/callingconv.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/gep.ll34
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar.ll2
-rw-r--r--test/CodeGen/X86/lea-opt-cse1.ll12
-rw-r--r--test/CodeGen/X86/lea-opt-cse2.ll40
-rw-r--r--test/CodeGen/X86/lea-opt-cse3.ll34
-rw-r--r--test/CodeGen/X86/lea-opt-cse4.ll68
-rw-r--r--test/CodeGen/X86/mul-constant-i16.ll12
-rw-r--r--test/CodeGen/X86/mul-constant-i32.ll15
-rw-r--r--test/CodeGen/X86/mul-constant-i64.ll9
-rw-r--r--test/CodeGen/X86/mul-constant-result.ll14
-rw-r--r--test/CodeGen/X86/umul-with-overflow.ll16
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll6
13 files changed, 151 insertions, 113 deletions
diff --git a/test/CodeGen/X86/GlobalISel/callingconv.ll b/test/CodeGen/X86/GlobalISel/callingconv.ll
index a55ff862e2b..4100a7217ac 100644
--- a/test/CodeGen/X86/GlobalISel/callingconv.ll
+++ b/test/CodeGen/X86/GlobalISel/callingconv.ll
@@ -388,7 +388,7 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
; X32-NEXT: movl 4(%ecx), %ecx
; X32-NEXT: movl %eax, (%esp)
; X32-NEXT: movl $4, %eax
-; X32-NEXT: addl %esp, %eax
+; X32-NEXT: leal (%esp,%eax), %eax
; X32-NEXT: movl %edx, 4(%esp)
; X32-NEXT: movl %ecx, 4(%eax)
; X32-NEXT: calll variadic_callee
diff --git a/test/CodeGen/X86/GlobalISel/gep.ll b/test/CodeGen/X86/GlobalISel/gep.ll
index 95ad8d4eb30..ee66accc77d 100644
--- a/test/CodeGen/X86/GlobalISel/gep.ll
+++ b/test/CodeGen/X86/GlobalISel/gep.ll
@@ -5,10 +5,10 @@
define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
; X64_GISEL-LABEL: test_gep_i8:
; X64_GISEL: # BB#0:
-; X64_GISEL-NEXT: movq $4, %rcx
-; X64_GISEL-NEXT: movsbq %sil, %rax
-; X64_GISEL-NEXT: imulq %rcx, %rax
-; X64_GISEL-NEXT: addq %rdi, %rax
+; X64_GISEL-NEXT: movq $4, %rax
+; X64_GISEL-NEXT: movsbq %sil, %rcx
+; X64_GISEL-NEXT: imulq %rax, %rcx
+; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i8:
@@ -25,7 +25,7 @@ define i32* @test_gep_i8_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i8_const:
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $80, %rax
-; X64_GISEL-NEXT: addq %rdi, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i8_const:
@@ -39,10 +39,10 @@ define i32* @test_gep_i8_const(i32 *%arr) {
define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
; X64_GISEL-LABEL: test_gep_i16:
; X64_GISEL: # BB#0:
-; X64_GISEL-NEXT: movq $4, %rcx
-; X64_GISEL-NEXT: movswq %si, %rax
-; X64_GISEL-NEXT: imulq %rcx, %rax
-; X64_GISEL-NEXT: addq %rdi, %rax
+; X64_GISEL-NEXT: movq $4, %rax
+; X64_GISEL-NEXT: movswq %si, %rcx
+; X64_GISEL-NEXT: imulq %rax, %rcx
+; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i16:
@@ -59,7 +59,7 @@ define i32* @test_gep_i16_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i16_const:
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $80, %rax
-; X64_GISEL-NEXT: addq %rdi, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i16_const:
@@ -73,10 +73,10 @@ define i32* @test_gep_i16_const(i32 *%arr) {
define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
; X64_GISEL-LABEL: test_gep_i32:
; X64_GISEL: # BB#0:
-; X64_GISEL-NEXT: movq $4, %rcx
-; X64_GISEL-NEXT: movslq %esi, %rax
-; X64_GISEL-NEXT: imulq %rcx, %rax
-; X64_GISEL-NEXT: addq %rdi, %rax
+; X64_GISEL-NEXT: movq $4, %rax
+; X64_GISEL-NEXT: movslq %esi, %rcx
+; X64_GISEL-NEXT: imulq %rax, %rcx
+; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i32:
@@ -92,7 +92,7 @@ define i32* @test_gep_i32_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i32_const:
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $20, %rax
-; X64_GISEL-NEXT: addq %rdi, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i32_const:
@@ -108,7 +108,7 @@ define i32* @test_gep_i64(i32 *%arr, i64 %ind) {
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $4, %rax
; X64_GISEL-NEXT: imulq %rsi, %rax
-; X64_GISEL-NEXT: addq %rdi, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i64:
@@ -123,7 +123,7 @@ define i32* @test_gep_i64_const(i32 *%arr) {
; X64_GISEL-LABEL: test_gep_i64_const:
; X64_GISEL: # BB#0:
; X64_GISEL-NEXT: movq $20, %rax
-; X64_GISEL-NEXT: addq %rdi, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
; X64_GISEL-NEXT: retq
;
; X64-LABEL: test_gep_i64_const:
diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
index c73a43177e3..2097a3b0bfc 100644
--- a/test/CodeGen/X86/GlobalISel/memop-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
@@ -181,7 +181,7 @@ define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) {
; ALL-LABEL: test_gep_folding_largeGepIndex:
; ALL: # BB#0:
; ALL-NEXT: movabsq $228719476720, %rax # imm = 0x3540BE3FF0
-; ALL-NEXT: addq %rdi, %rax
+; ALL-NEXT: leaq (%rdi,%rax), %rax
; ALL-NEXT: movl %esi, (%rax)
; ALL-NEXT: movl (%rax), %eax
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/lea-opt-cse1.ll b/test/CodeGen/X86/lea-opt-cse1.ll
index 512740ce075..05b47690e81 100644
--- a/test/CodeGen/X86/lea-opt-cse1.ll
+++ b/test/CodeGen/X86/lea-opt-cse1.ll
@@ -9,21 +9,27 @@ define void @test_func(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr {
; X64: # BB#0: # %entry
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl 16(%rdi), %ecx
+; X64-NEXT: leal (%rax,%rcx), %edx
; X64-NEXT: leal 1(%rax,%rcx), %eax
; X64-NEXT: movl %eax, 12(%rdi)
-; X64-NEXT: addq %ecx, %eax
+; X64-NEXT: leal 1(%rcx,%rdx), %eax
; X64-NEXT: movl %eax, 16(%rdi)
; X64-NEXT: retq
;
; X86-LABEL: test_func:
; X86: # BB#0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl (%eax), %ecx
; X86-NEXT: movl 16(%eax), %edx
-; X86-NEXT: leal 1(%ecx,%edx), %ecx
-; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: leal 1(%ecx,%edx), %esi
; X86-NEXT: addl %edx, %ecx
+; X86-NEXT: movl %esi, 12(%eax)
+; X86-NEXT: leal 1(%edx,%ecx), %ecx
; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: popl %esi
; X86-NEXT: retl
entry:
%h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
diff --git a/test/CodeGen/X86/lea-opt-cse2.ll b/test/CodeGen/X86/lea-opt-cse2.ll
index 4226e3d25c8..865dd49a6e1 100644
--- a/test/CodeGen/X86/lea-opt-cse2.ll
+++ b/test/CodeGen/X86/lea-opt-cse2.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86
%struct.SA = type { i32 , i32 , i32 , i32 , i32};
@@ -10,39 +10,43 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: .LBB0_1: # %loop
; X64-NEXT: # =>This Inner Loop Header: Depth=1
-; X64-NEXT: movl 16(%rdi), %eax
-; X64-NEXT: movl (%rdi), %ecx
-; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: incl %ecx
-; X64-NEXT: movl %ecx, 12(%rdi)
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl 16(%rdi), %ecx
+; X64-NEXT: leal 1(%rax,%rcx), %edx
+; X64-NEXT: movl %edx, 12(%rdi)
; X64-NEXT: decl %esi
; X64-NEXT: jne .LBB0_1
; X64-NEXT: # BB#2: # %exit
-; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: movl %ecx, 16(%rdi)
+; X64-NEXT: addl %ecx, %eax
+; X64-NEXT: leal 1(%rcx,%rax), %eax
+; X64-NEXT: movl %eax, 16(%rdi)
; X64-NEXT: retq
;
; X86-LABEL: foo:
; X86: # BB#0: # %entry
-; X86-NEXT: pushl %esi
+; X86-NEXT: pushl %edi
; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: .cfi_offset %esi, -8
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 12
+; X86-NEXT: .cfi_offset %esi, -12
+; X86-NEXT: .cfi_offset %edi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB0_1: # %loop
; X86-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NEXT: movl 16(%eax), %edx
-; X86-NEXT: movl (%eax), %esi
-; X86-NEXT: addl %edx, %esi
-; X86-NEXT: incl %esi
-; X86-NEXT: movl %esi, 12(%eax)
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl 16(%eax), %esi
+; X86-NEXT: leal 1(%edx,%esi), %edi
+; X86-NEXT: movl %edi, 12(%eax)
; X86-NEXT: decl %ecx
; X86-NEXT: jne .LBB0_1
; X86-NEXT: # BB#2: # %exit
-; X86-NEXT: addl %edx, %esi
-; X86-NEXT: movl %esi, 16(%eax)
+; X86-NEXT: addl %esi, %edx
+; X86-NEXT: leal 1(%esi,%edx), %ecx
+; X86-NEXT: movl %ecx, 16(%eax)
; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
; X86-NEXT: retl
entry:
br label %loop
diff --git a/test/CodeGen/X86/lea-opt-cse3.ll b/test/CodeGen/X86/lea-opt-cse3.ll
index 57c20cceed1..48ab3130bf0 100644
--- a/test/CodeGen/X86/lea-opt-cse3.ll
+++ b/test/CodeGen/X86/lea-opt-cse3.ll
@@ -8,7 +8,7 @@ define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
-; X64-NEXT: leal (%ecx,%esi,2), %eax
+; X64-NEXT: leal 4(%rdi,%rsi,4), %eax
; X64-NEXT: imull %ecx, %eax
; X64-NEXT: retq
;
@@ -16,9 +16,9 @@ define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X86: # BB#0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: leal 4(%ecx,%eax,2), %ecx
-; X86-NEXT: leal (%ecx,%eax,2), %eax
-; X86-NEXT: imull %ecx, %eax
+; X86-NEXT: leal 4(%ecx,%eax,2), %edx
+; X86-NEXT: leal 4(%ecx,%eax,4), %eax
+; X86-NEXT: imull %edx, %eax
; X86-NEXT: retl
entry:
%mul = shl i32 %b, 1
@@ -36,7 +36,7 @@ define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
-; X64-NEXT: leal (%ecx,%esi,4), %eax
+; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
; X64-NEXT: imull %ecx, %eax
; X64-NEXT: retq
;
@@ -44,9 +44,9 @@ define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X86: # BB#0: # %entry
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: leal 4(%ecx,%eax,4), %ecx
-; X86-NEXT: leal (%ecx,%eax,4), %eax
-; X86-NEXT: imull %ecx, %eax
+; X86-NEXT: leal 4(%ecx,%eax,4), %edx
+; X86-NEXT: leal 4(%ecx,%eax,8), %eax
+; X86-NEXT: imull %edx, %eax
; X86-NEXT: retl
entry:
%mul = shl i32 %b, 2
@@ -68,23 +68,29 @@ define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-NEXT: cmpl $10, %ecx
; X64-NEXT: je .LBB2_2
; X64-NEXT: # BB#1: # %mid
-; X64-NEXT: leal (%ecx,%esi,4), %eax
-; X64-NEXT: imull %ecx, %eax
+; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
+; X64-NEXT: imull %eax, %ecx
+; X64-NEXT: movl %ecx, %eax
; X64-NEXT: .LBB2_2: # %exit
; X64-NEXT: retq
;
; X86-LABEL: foo1_mult_basic_blocks:
; X86: # BB#0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: leal 4(%eax,%edx,4), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: leal 4(%esi,%edx,4), %ecx
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $10, %ecx
; X86-NEXT: je .LBB2_2
; X86-NEXT: # BB#1: # %mid
-; X86-NEXT: leal (%ecx,%edx,4), %eax
-; X86-NEXT: imull %ecx, %eax
+; X86-NEXT: leal 4(%esi,%edx,8), %eax
+; X86-NEXT: imull %eax, %ecx
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: .LBB2_2: # %exit
+; X86-NEXT: popl %esi
; X86-NEXT: retl
entry:
%mul = shl i32 %b, 2
diff --git a/test/CodeGen/X86/lea-opt-cse4.ll b/test/CodeGen/X86/lea-opt-cse4.ll
index 6f1fe282f92..31f31a73d44 100644
--- a/test/CodeGen/X86/lea-opt-cse4.ll
+++ b/test/CodeGen/X86/lea-opt-cse4.ll
@@ -1,31 +1,41 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86
%struct.SA = type { i32 , i32 , i32 , i32 , i32};
define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
; X64-LABEL: foo:
; X64: # BB#0: # %entry
-; X64-NEXT: movl (%rdi), %eax
-; X64-NEXT: movl 16(%rdi), %ecx
-; X64-NEXT: leal (%rax,%rcx,4), %eax
-; X64-NEXT: addl $1, %eax
-; X64-NEXT: movl %eax, 12(%rdi)
-; X64-NEXT: addl %ecx, %eax
+; X64-NEXT: movl 16(%rdi), %eax
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: addl %eax, %ecx
+; X64-NEXT: addl %eax, %ecx
+; X64-NEXT: addl %eax, %ecx
+; X64-NEXT: leal (%rcx,%rax), %edx
+; X64-NEXT: leal 1(%rax,%rcx), %ecx
+; X64-NEXT: movl %ecx, 12(%rdi)
+; X64-NEXT: leal 1(%rax,%rdx), %eax
; X64-NEXT: movl %eax, 16(%rdi)
; X64-NEXT: retq
;
; X86-LABEL: foo:
; X86: # BB#0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: .cfi_offset %esi, -8
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl (%eax), %ecx
-; X86-NEXT: movl 16(%eax), %edx
-; X86-NEXT: leal (%ecx,%edx,4), %ecx
-; X86-NEXT: addl $1, %ecx
-; X86-NEXT: movl %ecx, 12(%eax)
-; X86-NEXT: addl %edx, %ecx
+; X86-NEXT: movl 16(%eax), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: leal 1(%ecx,%edx), %esi
+; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: movl %esi, 12(%eax)
+; X86-NEXT: leal 1(%ecx,%edx), %ecx
; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: popl %esi
; X86-NEXT: retl
entry:
%h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
@@ -52,15 +62,15 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
; X64-NEXT: .p2align 4, 0x90
; X64-NEXT: .LBB1_1: # %loop
; X64-NEXT: # =>This Inner Loop Header: Depth=1
-; X64-NEXT: movl 16(%rdi), %eax
; X64-NEXT: movl (%rdi), %ecx
-; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: incl %ecx
-; X64-NEXT: movl %ecx, 12(%rdi)
+; X64-NEXT: movl 16(%rdi), %eax
+; X64-NEXT: leal 1(%rcx,%rax), %edx
+; X64-NEXT: movl %edx, 12(%rdi)
; X64-NEXT: decl %esi
; X64-NEXT: jne .LBB1_1
; X64-NEXT: # BB#2: # %exit
; X64-NEXT: addl %eax, %ecx
+; X64-NEXT: leal 1(%rax,%rcx), %ecx
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: addl %eax, %ecx
@@ -72,23 +82,26 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
;
; X86-LABEL: foo_loop:
; X86: # BB#0: # %entry
-; X86-NEXT: pushl %esi
+; X86-NEXT: pushl %edi
; X86-NEXT: .cfi_def_cfa_offset 8
-; X86-NEXT: .cfi_offset %esi, -8
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 12
+; X86-NEXT: .cfi_offset %esi, -12
+; X86-NEXT: .cfi_offset %edi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB1_1: # %loop
; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: movl (%eax), %esi
; X86-NEXT: movl 16(%eax), %ecx
-; X86-NEXT: movl (%eax), %edx
-; X86-NEXT: addl %ecx, %edx
-; X86-NEXT: incl %edx
-; X86-NEXT: movl %edx, 12(%eax)
-; X86-NEXT: decl %esi
+; X86-NEXT: leal 1(%esi,%ecx), %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: decl %edx
; X86-NEXT: jne .LBB1_1
; X86-NEXT: # BB#2: # %exit
-; X86-NEXT: addl %ecx, %edx
+; X86-NEXT: addl %ecx, %esi
+; X86-NEXT: leal 1(%ecx,%esi), %edx
; X86-NEXT: addl %ecx, %edx
; X86-NEXT: addl %ecx, %edx
; X86-NEXT: addl %ecx, %edx
@@ -97,6 +110,7 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0
; X86-NEXT: addl %ecx, %edx
; X86-NEXT: movl %edx, 16(%eax)
; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
; X86-NEXT: retl
entry:
br label %loop
diff --git a/test/CodeGen/X86/mul-constant-i16.ll b/test/CodeGen/X86/mul-constant-i16.ll
index 72731b3ba67..c3b822ac214 100644
--- a/test/CodeGen/X86/mul-constant-i16.ll
+++ b/test/CodeGen/X86/mul-constant-i16.ll
@@ -558,10 +558,11 @@ define i16 @test_mul_by_28(i16 %x) {
define i16 @test_mul_by_29(i16 %x) {
; X86-LABEL: test_mul_by_29:
; X86: # BB#0:
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: leal (%eax,%eax,8), %ecx
-; X86-NEXT: leal (%ecx,%ecx,2), %ecx
-; X86-NEXT: leal (%ecx,%eax,2), %eax
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: leal (%ecx,%ecx,8), %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: addl %ecx, %eax
; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
@@ -570,7 +571,8 @@ define i16 @test_mul_by_29(i16 %x) {
; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
-; X64-NEXT: leal (%rax,%rdi,2), %eax
+; X64-NEXT: addl %edi, %eax
+; X64-NEXT: addl %edi, %eax
; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 29
diff --git a/test/CodeGen/X86/mul-constant-i32.ll b/test/CodeGen/X86/mul-constant-i32.ll
index c8ac6a70927..228dd5e5f37 100644
--- a/test/CodeGen/X86/mul-constant-i32.ll
+++ b/test/CodeGen/X86/mul-constant-i32.ll
@@ -1457,10 +1457,11 @@ define i32 @test_mul_by_28(i32 %x) {
define i32 @test_mul_by_29(i32 %x) {
; X86-LABEL: test_mul_by_29:
; X86: # BB#0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: leal (%eax,%eax,8), %ecx
-; X86-NEXT: leal (%ecx,%ecx,2), %ecx
-; X86-NEXT: leal (%ecx,%eax,2), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: leal (%ecx,%ecx,8), %eax
+; X86-NEXT: leal (%eax,%eax,2), %eax
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: addl %ecx, %eax
; X86-NEXT: retl
;
; X64-HSW-LABEL: test_mul_by_29:
@@ -1468,7 +1469,8 @@ define i32 @test_mul_by_29(i32 %x) {
; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
-; X64-HSW-NEXT: leal (%rax,%rdi,2), %eax # sched: [1:0.50]
+; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
+; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_29:
@@ -1476,7 +1478,8 @@ define i32 @test_mul_by_29(i32 %x) {
; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
-; X64-JAG-NEXT: leal (%rax,%rdi,2), %eax # sched: [1:0.50]
+; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
+; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_29:
diff --git a/test/CodeGen/X86/mul-constant-i64.ll b/test/CodeGen/X86/mul-constant-i64.ll
index 8e171f973e0..98568a6fc8e 100644
--- a/test/CodeGen/X86/mul-constant-i64.ll
+++ b/test/CodeGen/X86/mul-constant-i64.ll
@@ -1523,7 +1523,8 @@ define i64 @test_mul_by_29(i64 %x) {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
-; X86-NEXT: leal (%ecx,%eax,2), %ecx
+; X86-NEXT: addl %eax, %ecx
+; X86-NEXT: addl %eax, %ecx
; X86-NEXT: movl $29, %eax
; X86-NEXT: mull {{[0-9]+}}(%esp)
; X86-NEXT: addl %ecx, %edx
@@ -1533,14 +1534,16 @@ define i64 @test_mul_by_29(i64 %x) {
; X64-HSW: # BB#0:
; X64-HSW-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-HSW-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
-; X64-HSW-NEXT: leaq (%rax,%rdi,2), %rax # sched: [1:0.50]
+; X64-HSW-NEXT: addq %rdi, %rax # sched: [1:0.25]
+; X64-HSW-NEXT: addq %rdi, %rax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_29:
; X64-JAG: # BB#0:
; X64-JAG-NEXT: leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
; X64-JAG-NEXT: leaq (%rax,%rax,2), %rax # sched: [1:0.50]
-; X64-JAG-NEXT: leaq (%rax,%rdi,2), %rax # sched: [1:0.50]
+; X64-JAG-NEXT: addq %rdi, %rax # sched: [1:0.50]
+; X64-JAG-NEXT: addq %rdi, %rax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X86-NOOPT-LABEL: test_mul_by_29:
diff --git a/test/CodeGen/X86/mul-constant-result.ll b/test/CodeGen/X86/mul-constant-result.ll
index f5846ab37ba..6e74c1d4e9e 100644
--- a/test/CodeGen/X86/mul-constant-result.ll
+++ b/test/CodeGen/X86/mul-constant-result.ll
@@ -164,7 +164,8 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X86-NEXT: .LBB0_35:
; X86-NEXT: leal (%eax,%eax,8), %ecx
; X86-NEXT: leal (%ecx,%ecx,2), %ecx
-; X86-NEXT: leal (%ecx,%eax,2), %eax
+; X86-NEXT: addl %eax, %ecx
+; X86-NEXT: addl %ecx, %eax
; X86-NEXT: popl %esi
; X86-NEXT: retl
; X86-NEXT: .LBB0_36:
@@ -322,15 +323,14 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_31:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
; X64-HSW-NEXT: leal (%rcx,%rcx,2), %ecx
-; X64-HSW-NEXT: .LBB0_17:
-; X64-HSW-NEXT: addl %eax, %ecx
-; X64-HSW-NEXT: movl %ecx, %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
-; X64-HSW-NEXT: retq
+; X64-HSW-NEXT: jmp .LBB0_17
; X64-HSW-NEXT: .LBB0_32:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
; X64-HSW-NEXT: leal (%rcx,%rcx,2), %ecx
-; X64-HSW-NEXT: leal (%rcx,%rax,2), %eax
+; X64-HSW-NEXT: addl %eax, %ecx
+; X64-HSW-NEXT: .LBB0_17:
+; X64-HSW-NEXT: addl %eax, %ecx
+; X64-HSW-NEXT: movl %ecx, %eax
; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_33:
diff --git a/test/CodeGen/X86/umul-with-overflow.ll b/test/CodeGen/X86/umul-with-overflow.ll
index 70b7daa6c16..2e877a0b6e0 100644
--- a/test/CodeGen/X86/umul-with-overflow.ll
+++ b/test/CodeGen/X86/umul-with-overflow.ll
@@ -40,10 +40,10 @@ define i32 @test2(i32 %a, i32 %b) nounwind readnone {
; X64-NEXT: leal (%rdi,%rdi), %eax
; X64-NEXT: retq
entry:
- %tmp0 = add i32 %b, %a
- %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2)
- %tmp2 = extractvalue { i32, i1 } %tmp1, 0
- ret i32 %tmp2
+ %tmp0 = add i32 %b, %a
+ %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2)
+ %tmp2 = extractvalue { i32, i1 } %tmp1, 0
+ ret i32 %tmp2
}
define i32 @test3(i32 %a, i32 %b) nounwind readnone {
@@ -64,8 +64,8 @@ define i32 @test3(i32 %a, i32 %b) nounwind readnone {
; X64-NEXT: mull %ecx
; X64-NEXT: retq
entry:
- %tmp0 = add i32 %b, %a
- %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4)
- %tmp2 = extractvalue { i32, i1 } %tmp1, 0
- ret i32 %tmp2
+ %tmp0 = add i32 %b, %a
+ %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4)
+ %tmp2 = extractvalue { i32, i1 } %tmp1, 0
+ ret i32 %tmp2
}
diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index 3f6d234c929..7c01432914f 100644
--- a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -13,14 +13,14 @@
; X64-NEXT: .p2align
; X64: %loop
; no complex address modes
-; X64-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
+; X64-NOT: (%{{[^)]+}},%{{[^)]+}},
;
; X32: @simple
; no expensive address computation in the preheader
; X32-NOT: imul
; X32: %loop
; no complex address modes
-; X32-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
+; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
entry:
br label %loop
@@ -103,7 +103,7 @@ exit:
; X32-NOT: mov{{.*}}(%esp){{$}}
; X32: %for.body{{$}}
; no complex address modes
-; X32-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
+; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
; no reloads
; X32-NOT: (%esp)
define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {