diff options
author | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-08-19 12:42:31 +0000 |
---|---|---|
committer | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-08-19 12:42:31 +0000 |
commit | 80f54784da0bd42fb79176bbf447a31d69287fe3 (patch) | |
tree | b20cb02fd15a201dc7f0d31482e031916ad2ab86 /test/CodeGen/SystemZ/int-conv-10.ll | |
parent | 2063637fa7c9ebc880cf858674eb45727d4ea295 (diff) |
[SystemZ] Add support for sibling calls
This first cut is pretty conservative. The final argument register (R6)
is call-saved, so we would need to make sure that the R6 argument to a
sibling call is the same as the R6 argument to the calling function,
which seems worth keeping as a separate patch.
Saying that integer truncations are free means that we no longer
use the extending instructions LGF and LLGF for spills in int-conv-09.ll
and int-conv-10.ll. Instead we treat the registers as 64 bits wide and
truncate them to 32-bits where necessary. I think it's unlikely we'd
use LGF and LLGF for spills in other situations for the same reason,
so I'm removing the tests rather than replacing them. The associated
code is generic and applies to many more instructions than just
LGF and LLGF, so there is no corresponding code removal.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188669 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/SystemZ/int-conv-10.ll')
-rw-r--r-- | test/CodeGen/SystemZ/int-conv-10.ll | 77 |
1 files changed, 0 insertions, 77 deletions
diff --git a/test/CodeGen/SystemZ/int-conv-10.ll b/test/CodeGen/SystemZ/int-conv-10.ll index f2f71d90dce..781c74c7fa2 100644 --- a/test/CodeGen/SystemZ/int-conv-10.ll +++ b/test/CodeGen/SystemZ/int-conv-10.ll @@ -111,80 +111,3 @@ define i64 @f10(i64 %src, i64 %index) { %ext = zext i32 %word to i64 ret i64 %ext } - -; Test a case where we spill the source of at least one LLGFR. We want -; to use LLGF if possible. -define void @f11(i64 *%ptr1, i32 *%ptr2) { -; CHECK-LABEL: f11: -; CHECK: llgf {{%r[0-9]+}}, 16{{[04]}}(%r15) -; CHECK: br %r14 - %val0 = load volatile i32 *%ptr2 - %val1 = load volatile i32 *%ptr2 - %val2 = load volatile i32 *%ptr2 - %val3 = load volatile i32 *%ptr2 - %val4 = load volatile i32 *%ptr2 - %val5 = load volatile i32 *%ptr2 - %val6 = load volatile i32 *%ptr2 - %val7 = load volatile i32 *%ptr2 - %val8 = load volatile i32 *%ptr2 - %val9 = load volatile i32 *%ptr2 - %val10 = load volatile i32 *%ptr2 - %val11 = load volatile i32 *%ptr2 - %val12 = load volatile i32 *%ptr2 - %val13 = load volatile i32 *%ptr2 - %val14 = load volatile i32 *%ptr2 - %val15 = load volatile i32 *%ptr2 - - %ext0 = zext i32 %val0 to i64 - %ext1 = zext i32 %val1 to i64 - %ext2 = zext i32 %val2 to i64 - %ext3 = zext i32 %val3 to i64 - %ext4 = zext i32 %val4 to i64 - %ext5 = zext i32 %val5 to i64 - %ext6 = zext i32 %val6 to i64 - %ext7 = zext i32 %val7 to i64 - %ext8 = zext i32 %val8 to i64 - %ext9 = zext i32 %val9 to i64 - %ext10 = zext i32 %val10 to i64 - %ext11 = zext i32 %val11 to i64 - %ext12 = zext i32 %val12 to i64 - %ext13 = zext i32 %val13 to i64 - %ext14 = zext i32 %val14 to i64 - %ext15 = zext i32 %val15 to i64 - - store volatile i32 %val0, i32 *%ptr2 - store volatile i32 %val1, i32 *%ptr2 - store volatile i32 %val2, i32 *%ptr2 - store volatile i32 %val3, i32 *%ptr2 - store volatile i32 %val4, i32 *%ptr2 - store volatile i32 %val5, i32 *%ptr2 - store volatile i32 %val6, i32 *%ptr2 - store volatile i32 %val7, i32 *%ptr2 - store volatile i32 %val8, i32 *%ptr2 - store volatile i32 %val9, i32 *%ptr2 - store volatile i32 %val10, i32 *%ptr2 - store volatile i32 %val11, i32 *%ptr2 - store volatile i32 %val12, i32 *%ptr2 - store volatile i32 %val13, i32 *%ptr2 - store volatile i32 %val14, i32 *%ptr2 - store volatile i32 %val15, i32 *%ptr2 - - store volatile i64 %ext0, i64 *%ptr1 - store volatile i64 %ext1, i64 *%ptr1 - store volatile i64 %ext2, i64 *%ptr1 - store volatile i64 %ext3, i64 *%ptr1 - store volatile i64 %ext4, i64 *%ptr1 - store volatile i64 %ext5, i64 *%ptr1 - store volatile i64 %ext6, i64 *%ptr1 - store volatile i64 %ext7, i64 *%ptr1 - store volatile i64 %ext8, i64 *%ptr1 - store volatile i64 %ext9, i64 *%ptr1 - store volatile i64 %ext10, i64 *%ptr1 - store volatile i64 %ext11, i64 *%ptr1 - store volatile i64 %ext12, i64 *%ptr1 - store volatile i64 %ext13, i64 *%ptr1 - store volatile i64 %ext14, i64 *%ptr1 - store volatile i64 %ext15, i64 *%ptr1 - - ret void -} |