summaryrefslogtreecommitdiff
path: root/test/CodeGen/RISCV/calling-conv.ll
diff options
context:
space:
mode:
authorAlex Bradbury <asb@lowrisc.org>2017-12-11 12:49:02 +0000
committerAlex Bradbury <asb@lowrisc.org>2017-12-11 12:49:02 +0000
commit181a9c9bcdd43f26e2f48e379b76dcd14c8a253c (patch)
treeec10df7aa8baf03f5df764102bd3a389451cc8aa /test/CodeGen/RISCV/calling-conv.ll
parentd001eea165090d3b24301b6419ad3fdfc06e1edc (diff)
[RISCV] Add custom CC_RISCV calling convention and improved call support
The TableGen-based calling convention definitions are inflexible, while writing a function to implement the calling convention is very straight-forward, and allows difficult cases to be handled more easily. With this patch adds support for: * Passing large scalars according to the RV32I calling convention * Byval arguments * Passing values on the stack when the argument registers are exhausted The custom CC_RISCV calling convention is also used for returns. This patch also documents the ABI lowering that a language frontend is expected to perform. I would like to work to simplify these requirements over time, but this will require further discussion within the LLVM community. We add PendingArgFlags CCState, as a companion to PendingLocs. The PendingLocs vector is used by a number of backends to handle arguments that are split during legalisation. However CCValAssign doesn't keep track of the original argument alignment. Therefore, add a PendingArgFlags vector which can be used to keep track of the ISD::ArgFlagsTy for every value added to PendingLocs. Differential Revision: https://reviews.llvm.org/D39898 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@320359 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/RISCV/calling-conv.ll')
-rw-r--r--test/CodeGen/RISCV/calling-conv.ll719
1 files changed, 719 insertions, 0 deletions
diff --git a/test/CodeGen/RISCV/calling-conv.ll b/test/CodeGen/RISCV/calling-conv.ll
new file mode 100644
index 00000000000..3a55752d2cb
--- /dev/null
+++ b/test/CodeGen/RISCV/calling-conv.ll
@@ -0,0 +1,719 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+
+; As well as calling convention details, we check that ra and fp are
+; consistently stored to fp-4 and fp-8.
+
+; Check that on RV32, i64 and double are passed in a pair of registers. Unlike
+; the convention for varargs, this need not be an aligned pair.
+
+define i32 @callee_scalars(i32 %a, i64 %b, i32 %c, i32 %d, double %e) nounwind {
+; RV32I-LABEL: callee_scalars:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: sw ra, 28(sp)
+; RV32I-NEXT: sw s0, 24(sp)
+; RV32I-NEXT: sw s1, 20(sp)
+; RV32I-NEXT: sw s2, 16(sp)
+; RV32I-NEXT: sw s3, 12(sp)
+; RV32I-NEXT: sw s4, 8(sp)
+; RV32I-NEXT: addi s0, sp, 32
+; RV32I-NEXT: addi s1, a4, 0
+; RV32I-NEXT: addi s2, a3, 0
+; RV32I-NEXT: addi s3, a1, 0
+; RV32I-NEXT: addi s4, a0, 0
+; RV32I-NEXT: lui a0, %hi(__fixdfsi)
+; RV32I-NEXT: addi a2, a0, %lo(__fixdfsi)
+; RV32I-NEXT: addi a0, a5, 0
+; RV32I-NEXT: addi a1, a6, 0
+; RV32I-NEXT: jalr ra, a2, 0
+; RV32I-NEXT: add a1, s4, s3
+; RV32I-NEXT: add a1, a1, s2
+; RV32I-NEXT: add a1, a1, s1
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: lw s4, 8(sp)
+; RV32I-NEXT: lw s3, 12(sp)
+; RV32I-NEXT: lw s2, 16(sp)
+; RV32I-NEXT: lw s1, 20(sp)
+; RV32I-NEXT: lw s0, 24(sp)
+; RV32I-NEXT: lw ra, 28(sp)
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: jalr zero, ra, 0
+ %b_trunc = trunc i64 %b to i32
+ %e_fptosi = fptosi double %e to i32
+ %1 = add i32 %a, %b_trunc
+ %2 = add i32 %1, %c
+ %3 = add i32 %2, %d
+ %4 = add i32 %3, %e_fptosi
+ ret i32 %4
+}
+
+define i32 @caller_scalars() nounwind {
+; RV32I-LABEL: caller_scalars:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lui a0, 262464
+; RV32I-NEXT: addi a6, a0, 0
+; RV32I-NEXT: lui a0, %hi(callee_scalars)
+; RV32I-NEXT: addi a7, a0, %lo(callee_scalars)
+; RV32I-NEXT: addi a0, zero, 1
+; RV32I-NEXT: addi a1, zero, 2
+; RV32I-NEXT: addi a3, zero, 3
+; RV32I-NEXT: addi a4, zero, 4
+; RV32I-NEXT: addi a2, zero, 0
+; RV32I-NEXT: addi a5, zero, 0
+; RV32I-NEXT: jalr ra, a7, 0
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 @callee_scalars(i32 1, i64 2, i32 3, i32 4, double 5.000000e+00)
+ ret i32 %1
+}
+
+; Check that i128 and fp128 are passed indirectly
+
+define i32 @callee_large_scalars(i128 %a, fp128 %b) nounwind {
+; RV32I-LABEL: callee_large_scalars:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lw a2, 12(a1)
+; RV32I-NEXT: lw a3, 12(a0)
+; RV32I-NEXT: xor a2, a3, a2
+; RV32I-NEXT: lw a3, 4(a1)
+; RV32I-NEXT: lw a4, 4(a0)
+; RV32I-NEXT: xor a3, a4, a3
+; RV32I-NEXT: or a2, a3, a2
+; RV32I-NEXT: lw a3, 8(a1)
+; RV32I-NEXT: lw a4, 8(a0)
+; RV32I-NEXT: xor a3, a4, a3
+; RV32I-NEXT: lw a1, 0(a1)
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: or a0, a0, a3
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: xor a0, a0, zero
+; RV32I-NEXT: sltiu a0, a0, 1
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %b_bitcast = bitcast fp128 %b to i128
+ %1 = icmp eq i128 %a, %b_bitcast
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @caller_large_scalars() nounwind {
+; RV32I-LABEL: caller_large_scalars:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -48
+; RV32I-NEXT: sw ra, 44(sp)
+; RV32I-NEXT: sw s0, 40(sp)
+; RV32I-NEXT: addi s0, sp, 48
+; RV32I-NEXT: sw zero, -40(s0)
+; RV32I-NEXT: sw zero, -44(s0)
+; RV32I-NEXT: sw zero, -48(s0)
+; RV32I-NEXT: sw zero, -12(s0)
+; RV32I-NEXT: sw zero, -16(s0)
+; RV32I-NEXT: sw zero, -20(s0)
+; RV32I-NEXT: addi a0, zero, 1
+; RV32I-NEXT: sw a0, -24(s0)
+; RV32I-NEXT: lui a0, 524272
+; RV32I-NEXT: addi a0, a0, 0
+; RV32I-NEXT: sw a0, -36(s0)
+; RV32I-NEXT: lui a0, %hi(callee_large_scalars)
+; RV32I-NEXT: addi a2, a0, %lo(callee_large_scalars)
+; RV32I-NEXT: addi a0, s0, -24
+; RV32I-NEXT: addi a1, s0, -48
+; RV32I-NEXT: jalr ra, a2, 0
+; RV32I-NEXT: lw s0, 40(sp)
+; RV32I-NEXT: lw ra, 44(sp)
+; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 @callee_large_scalars(i128 1, fp128 0xL00000000000000007FFF000000000000)
+ ret i32 %1
+}
+
+; Must keep define on a single line due to an update_llc_test_checks.py limitation
+define i32 @callee_large_scalars_exhausted_regs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i128 %h, i32 %i, fp128 %j) nounwind {
+; Check that arguments larger than 2*xlen are handled correctly when their
+; address is passed on the stack rather than in memory
+; RV32I-LABEL: callee_large_scalars_exhausted_regs:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lw a0, 4(s0)
+; RV32I-NEXT: lw a1, 12(a0)
+; RV32I-NEXT: lw a2, 12(a7)
+; RV32I-NEXT: xor a1, a2, a1
+; RV32I-NEXT: lw a2, 4(a0)
+; RV32I-NEXT: lw a3, 4(a7)
+; RV32I-NEXT: xor a2, a3, a2
+; RV32I-NEXT: or a1, a2, a1
+; RV32I-NEXT: lw a2, 8(a0)
+; RV32I-NEXT: lw a3, 8(a7)
+; RV32I-NEXT: xor a2, a3, a2
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: lw a3, 0(a7)
+; RV32I-NEXT: xor a0, a3, a0
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: xor a0, a0, zero
+; RV32I-NEXT: sltiu a0, a0, 1
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %j_bitcast = bitcast fp128 %j to i128
+ %1 = icmp eq i128 %h, %j_bitcast
+ %2 = zext i1 %1 to i32
+ ret i32 %2
+}
+
+define i32 @caller_large_scalars_exhausted_regs() nounwind {
+; RV32I-LABEL: caller_large_scalars_exhausted_regs:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: sw ra, 60(sp)
+; RV32I-NEXT: sw s0, 56(sp)
+; RV32I-NEXT: addi s0, sp, 64
+; RV32I-NEXT: addi a0, s0, -48
+; RV32I-NEXT: sw a0, 4(sp)
+; RV32I-NEXT: addi a0, zero, 9
+; RV32I-NEXT: sw a0, 0(sp)
+; RV32I-NEXT: sw zero, -40(s0)
+; RV32I-NEXT: sw zero, -44(s0)
+; RV32I-NEXT: sw zero, -48(s0)
+; RV32I-NEXT: sw zero, -12(s0)
+; RV32I-NEXT: sw zero, -16(s0)
+; RV32I-NEXT: sw zero, -20(s0)
+; RV32I-NEXT: addi a0, zero, 8
+; RV32I-NEXT: sw a0, -24(s0)
+; RV32I-NEXT: lui a0, 524272
+; RV32I-NEXT: addi a0, a0, 0
+; RV32I-NEXT: sw a0, -36(s0)
+; RV32I-NEXT: lui a0, %hi(callee_large_scalars_exhausted_regs)
+; RV32I-NEXT: addi t0, a0, %lo(callee_large_scalars_exhausted_regs)
+; RV32I-NEXT: addi a0, zero, 1
+; RV32I-NEXT: addi a1, zero, 2
+; RV32I-NEXT: addi a2, zero, 3
+; RV32I-NEXT: addi a3, zero, 4
+; RV32I-NEXT: addi a4, zero, 5
+; RV32I-NEXT: addi a5, zero, 6
+; RV32I-NEXT: addi a6, zero, 7
+; RV32I-NEXT: addi a7, s0, -24
+; RV32I-NEXT: jalr ra, t0, 0
+; RV32I-NEXT: lw s0, 56(sp)
+; RV32I-NEXT: lw ra, 60(sp)
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 @callee_large_scalars_exhausted_regs(
+ i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 8, i32 9,
+ fp128 0xL00000000000000007FFF000000000000)
+ ret i32 %1
+}
+
+; Ensure that libcalls generated in the middle-end obey the calling convention
+
+define i32 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
+; RV32I-LABEL: caller_mixed_scalar_libcalls:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: sw ra, 28(sp)
+; RV32I-NEXT: sw s0, 24(sp)
+; RV32I-NEXT: addi s0, sp, 32
+; RV32I-NEXT: addi a2, a1, 0
+; RV32I-NEXT: addi a1, a0, 0
+; RV32I-NEXT: lui a0, %hi(__floatditf)
+; RV32I-NEXT: addi a3, a0, %lo(__floatditf)
+; RV32I-NEXT: addi a0, s0, -24
+; RV32I-NEXT: jalr ra, a3, 0
+; RV32I-NEXT: lw a0, -24(s0)
+; RV32I-NEXT: lw s0, 24(sp)
+; RV32I-NEXT: lw ra, 28(sp)
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = sitofp i64 %a to fp128
+ %2 = bitcast fp128 %1 to i128
+ %3 = trunc i128 %2 to i32
+ ret i32 %3
+}
+
+; Check that the stack is used once the GPRs are exhausted
+
+define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i64 %g, i32 %h) nounwind {
+; RV32I-LABEL: callee_many_scalars:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lw t0, 0(s0)
+; RV32I-NEXT: xor a4, a4, t0
+; RV32I-NEXT: xor a3, a3, a7
+; RV32I-NEXT: or a3, a3, a4
+; RV32I-NEXT: xor a3, a3, zero
+; RV32I-NEXT: lui a4, 16
+; RV32I-NEXT: addi a4, a4, -1
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: sltiu a1, a3, 1
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: add a0, a0, a5
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: lw a1, 4(s0)
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %a_ext = zext i8 %a to i32
+ %b_ext = zext i16 %b to i32
+ %1 = add i32 %a_ext, %b_ext
+ %2 = add i32 %1, %c
+ %3 = icmp eq i64 %d, %g
+ %4 = zext i1 %3 to i32
+ %5 = add i32 %4, %2
+ %6 = add i32 %5, %e
+ %7 = add i32 %6, %f
+ %8 = add i32 %7, %h
+ ret i32 %8
+}
+
+define i32 @caller_many_scalars() nounwind {
+; RV32I-LABEL: caller_many_scalars:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: sw ra, 28(sp)
+; RV32I-NEXT: sw s0, 24(sp)
+; RV32I-NEXT: addi s0, sp, 32
+; RV32I-NEXT: addi a0, zero, 8
+; RV32I-NEXT: sw a0, 4(sp)
+; RV32I-NEXT: sw zero, 0(sp)
+; RV32I-NEXT: lui a0, %hi(callee_many_scalars)
+; RV32I-NEXT: addi t0, a0, %lo(callee_many_scalars)
+; RV32I-NEXT: addi a0, zero, 1
+; RV32I-NEXT: addi a1, zero, 2
+; RV32I-NEXT: addi a2, zero, 3
+; RV32I-NEXT: addi a3, zero, 4
+; RV32I-NEXT: addi a5, zero, 5
+; RV32I-NEXT: addi a6, zero, 6
+; RV32I-NEXT: addi a7, zero, 7
+; RV32I-NEXT: addi a4, zero, 0
+; RV32I-NEXT: jalr ra, t0, 0
+; RV32I-NEXT: lw s0, 24(sp)
+; RV32I-NEXT: lw ra, 28(sp)
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i32 5, i32 6, i64 7, i32 8)
+ ret i32 %1
+}
+
+; Check passing of coerced integer arrays
+
+%struct.small = type { i32, i32* }
+
+define i32 @callee_small_coerced_struct([2 x i32] %a.coerce) nounwind {
+; RV32I-LABEL: callee_small_coerced_struct:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: sltiu a0, a0, 1
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = extractvalue [2 x i32] %a.coerce, 0
+ %2 = extractvalue [2 x i32] %a.coerce, 1
+ %3 = icmp eq i32 %1, %2
+ %4 = zext i1 %3 to i32
+ ret i32 %4
+}
+
+define i32 @caller_small_coerced_struct() nounwind {
+; RV32I-LABEL: caller_small_coerced_struct:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lui a0, %hi(callee_small_coerced_struct)
+; RV32I-NEXT: addi a2, a0, %lo(callee_small_coerced_struct)
+; RV32I-NEXT: addi a0, zero, 1
+; RV32I-NEXT: addi a1, zero, 2
+; RV32I-NEXT: jalr ra, a2, 0
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 @callee_small_coerced_struct([2 x i32] [i32 1, i32 2])
+ ret i32 %1
+}
+
+; Check large struct arguments, which are passed byval
+
+%struct.large = type { i32, i32, i32, i32 }
+
+define i32 @callee_large_struct(%struct.large* byval align 4 %a) nounwind {
+; RV32I-LABEL: callee_large_struct:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lw a1, 12(a0)
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = getelementptr inbounds %struct.large, %struct.large* %a, i32 0, i32 0
+ %2 = getelementptr inbounds %struct.large, %struct.large* %a, i32 0, i32 3
+ %3 = load i32, i32* %1
+ %4 = load i32, i32* %2
+ %5 = add i32 %3, %4
+ ret i32 %5
+}
+
+define i32 @caller_large_struct() nounwind {
+; RV32I-LABEL: caller_large_struct:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -48
+; RV32I-NEXT: sw ra, 44(sp)
+; RV32I-NEXT: sw s0, 40(sp)
+; RV32I-NEXT: addi s0, sp, 48
+; RV32I-NEXT: addi a0, zero, 1
+; RV32I-NEXT: sw a0, -24(s0)
+; RV32I-NEXT: sw a0, -40(s0)
+; RV32I-NEXT: addi a0, zero, 2
+; RV32I-NEXT: sw a0, -20(s0)
+; RV32I-NEXT: sw a0, -36(s0)
+; RV32I-NEXT: addi a0, zero, 3
+; RV32I-NEXT: sw a0, -16(s0)
+; RV32I-NEXT: sw a0, -32(s0)
+; RV32I-NEXT: addi a0, zero, 4
+; RV32I-NEXT: sw a0, -12(s0)
+; RV32I-NEXT: sw a0, -28(s0)
+; RV32I-NEXT: lui a0, %hi(callee_large_struct)
+; RV32I-NEXT: addi a1, a0, %lo(callee_large_struct)
+; RV32I-NEXT: addi a0, s0, -40
+; RV32I-NEXT: jalr ra, a1, 0
+; RV32I-NEXT: lw s0, 40(sp)
+; RV32I-NEXT: lw ra, 44(sp)
+; RV32I-NEXT: addi sp, sp, 48
+; RV32I-NEXT: jalr zero, ra, 0
+ %ls = alloca %struct.large, align 4
+ %1 = bitcast %struct.large* %ls to i8*
+ %a = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 0
+ store i32 1, i32* %a
+ %b = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 1
+ store i32 2, i32* %b
+ %c = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 2
+ store i32 3, i32* %c
+ %d = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 3
+ store i32 4, i32* %d
+ %2 = call i32 @callee_large_struct(%struct.large* byval align 4 %ls)
+ ret i32 %2
+}
+
+; Check 2x*xlen values are aligned appropriately when passed on the stack
+; Must keep define on a single line due to an update_llc_test_checks.py limitation
+define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 %f, i32 %g, i32 %h, double %i, i32 %j, [2 x i32] %k) nounwind {
+; The double should be 8-byte aligned on the stack, but the two-element array
+; should only be 4-byte aligned
+; RV32I-LABEL: callee_aligned_stack:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lw a0, 0(a2)
+; RV32I-NEXT: add a0, a0, a7
+; RV32I-NEXT: lw a1, 0(s0)
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw a1, 8(s0)
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw a1, 16(s0)
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw a1, 20(s0)
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = bitcast fp128 %c to i128
+ %2 = trunc i128 %1 to i32
+ %3 = add i32 %2, %g
+ %4 = add i32 %3, %h
+ %5 = bitcast double %i to i64
+ %6 = trunc i64 %5 to i32
+ %7 = add i32 %4, %6
+ %8 = add i32 %7, %j
+ %9 = extractvalue [2 x i32] %k, 0
+ %10 = add i32 %8, %9
+ ret i32 %10
+}
+
+define void @caller_aligned_stack() nounwind {
+; The double should be 8-byte aligned on the stack, but the two-element array
+; should only be 4-byte aligned
+; RV32I-LABEL: caller_aligned_stack:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -64
+; RV32I-NEXT: sw ra, 60(sp)
+; RV32I-NEXT: sw s0, 56(sp)
+; RV32I-NEXT: addi s0, sp, 64
+; RV32I-NEXT: addi a0, zero, 18
+; RV32I-NEXT: sw a0, 24(sp)
+; RV32I-NEXT: addi a0, zero, 17
+; RV32I-NEXT: sw a0, 20(sp)
+; RV32I-NEXT: addi a0, zero, 16
+; RV32I-NEXT: sw a0, 16(sp)
+; RV32I-NEXT: lui a0, 262236
+; RV32I-NEXT: addi a0, a0, 655
+; RV32I-NEXT: sw a0, 12(sp)
+; RV32I-NEXT: lui a0, 377487
+; RV32I-NEXT: addi a0, a0, 1475
+; RV32I-NEXT: sw a0, 8(sp)
+; RV32I-NEXT: addi a0, zero, 15
+; RV32I-NEXT: sw a0, 0(sp)
+; RV32I-NEXT: lui a0, 262153
+; RV32I-NEXT: addi a0, a0, 491
+; RV32I-NEXT: sw a0, -20(s0)
+; RV32I-NEXT: lui a0, 545260
+; RV32I-NEXT: addi a0, a0, -1967
+; RV32I-NEXT: sw a0, -24(s0)
+; RV32I-NEXT: lui a0, 964690
+; RV32I-NEXT: addi a0, a0, -328
+; RV32I-NEXT: sw a0, -28(s0)
+; RV32I-NEXT: lui a0, 335544
+; RV32I-NEXT: addi a0, a0, 1311
+; RV32I-NEXT: sw a0, -32(s0)
+; RV32I-NEXT: lui a0, 688509
+; RV32I-NEXT: addi a5, a0, -2048
+; RV32I-NEXT: lui a0, %hi(callee_aligned_stack)
+; RV32I-NEXT: addi t0, a0, %lo(callee_aligned_stack)
+; RV32I-NEXT: addi a0, zero, 1
+; RV32I-NEXT: addi a1, zero, 11
+; RV32I-NEXT: addi a2, s0, -32
+; RV32I-NEXT: addi a3, zero, 12
+; RV32I-NEXT: addi a4, zero, 13
+; RV32I-NEXT: addi a6, zero, 4
+; RV32I-NEXT: addi a7, zero, 14
+; RV32I-NEXT: jalr ra, t0, 0
+; RV32I-NEXT: lw s0, 56(sp)
+; RV32I-NEXT: lw ra, 60(sp)
+; RV32I-NEXT: addi sp, sp, 64
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i32 @callee_aligned_stack(i32 1, i32 11,
+ fp128 0xLEB851EB851EB851F400091EB851EB851, i32 12, i32 13,
+ i64 20000000000, i32 14, i32 15, double 2.720000e+00, i32 16,
+ [2 x i32] [i32 17, i32 18])
+ ret void
+}
+
+; Check return of 2x xlen scalars
+
+define i64 @callee_small_scalar_ret() nounwind {
+; RV32I-LABEL: callee_small_scalar_ret:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lui a0, 466866
+; RV32I-NEXT: addi a0, a0, 1677
+; RV32I-NEXT: addi a1, zero, 287
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ ret i64 1234567898765
+}
+
+define i32 @caller_small_scalar_ret() nounwind {
+; RV32I-LABEL: caller_small_scalar_ret:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lui a0, %hi(callee_small_scalar_ret)
+; RV32I-NEXT: addi a0, a0, %lo(callee_small_scalar_ret)
+; RV32I-NEXT: jalr ra, a0, 0
+; RV32I-NEXT: lui a2, 56
+; RV32I-NEXT: addi a2, a2, 580
+; RV32I-NEXT: xor a1, a1, a2
+; RV32I-NEXT: lui a2, 200614
+; RV32I-NEXT: addi a2, a2, 647
+; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: xor a0, a0, zero
+; RV32I-NEXT: sltiu a0, a0, 1
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call i64 @callee_small_scalar_ret()
+ %2 = icmp eq i64 987654321234567, %1
+ %3 = zext i1 %2 to i32
+ ret i32 %3
+}
+
+; Check return of 2x xlen structs
+
+define %struct.small @callee_small_struct_ret() nounwind {
+; RV32I-LABEL: callee_small_struct_ret:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: addi a0, zero, 1
+; RV32I-NEXT: addi a1, zero, 0
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ ret %struct.small { i32 1, i32* null }
+}
+
+define i32 @caller_small_struct_ret() nounwind {
+; RV32I-LABEL: caller_small_struct_ret:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lui a0, %hi(callee_small_struct_ret)
+; RV32I-NEXT: addi a0, a0, %lo(callee_small_struct_ret)
+; RV32I-NEXT: jalr ra, a0, 0
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call %struct.small @callee_small_struct_ret()
+ %2 = extractvalue %struct.small %1, 0
+ %3 = extractvalue %struct.small %1, 1
+ %4 = ptrtoint i32* %3 to i32
+ %5 = add i32 %2, %4
+ ret i32 %5
+}
+
+; Check return of >2x xlen scalars
+
+define fp128 @callee_large_scalar_ret() nounwind {
+; RV32I-LABEL: callee_large_scalar_ret:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: lui a1, 524272
+; RV32I-NEXT: addi a1, a1, 0
+; RV32I-NEXT: sw a1, 12(a0)
+; RV32I-NEXT: sw zero, 8(a0)
+; RV32I-NEXT: sw zero, 4(a0)
+; RV32I-NEXT: sw zero, 0(a0)
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ ret fp128 0xL00000000000000007FFF000000000000
+}
+
+define void @caller_large_scalar_ret() nounwind {
+; RV32I-LABEL: caller_large_scalar_ret:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: sw ra, 28(sp)
+; RV32I-NEXT: sw s0, 24(sp)
+; RV32I-NEXT: addi s0, sp, 32
+; RV32I-NEXT: lui a0, %hi(callee_large_scalar_ret)
+; RV32I-NEXT: addi a1, a0, %lo(callee_large_scalar_ret)
+; RV32I-NEXT: addi a0, s0, -32
+; RV32I-NEXT: jalr ra, a1, 0
+; RV32I-NEXT: lw s0, 24(sp)
+; RV32I-NEXT: lw ra, 28(sp)
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = call fp128 @callee_large_scalar_ret()
+ ret void
+}
+
+; Check return of >2x xlen structs
+
+define void @callee_large_struct_ret(%struct.large* noalias sret %agg.result) nounwind {
+; RV32I-LABEL: callee_large_struct_ret:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp)
+; RV32I-NEXT: sw s0, 8(sp)
+; RV32I-NEXT: addi s0, sp, 16
+; RV32I-NEXT: addi a1, zero, 2
+; RV32I-NEXT: sw a1, 4(a0)
+; RV32I-NEXT: addi a1, zero, 1
+; RV32I-NEXT: sw a1, 0(a0)
+; RV32I-NEXT: addi a1, zero, 3
+; RV32I-NEXT: sw a1, 8(a0)
+; RV32I-NEXT: addi a1, zero, 4
+; RV32I-NEXT: sw a1, 12(a0)
+; RV32I-NEXT: lw s0, 8(sp)
+; RV32I-NEXT: lw ra, 12(sp)
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: jalr zero, ra, 0
+ %a = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 0
+ store i32 1, i32* %a, align 4
+ %b = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 1
+ store i32 2, i32* %b, align 4
+ %c = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 2
+ store i32 3, i32* %c, align 4
+ %d = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 3
+ store i32 4, i32* %d, align 4
+ ret void
+}
+
+define i32 @caller_large_struct_ret() nounwind {
+; RV32I-LABEL: caller_large_struct_ret:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -32
+; RV32I-NEXT: sw ra, 28(sp)
+; RV32I-NEXT: sw s0, 24(sp)
+; RV32I-NEXT: addi s0, sp, 32
+; RV32I-NEXT: lui a0, %hi(callee_large_struct_ret)
+; RV32I-NEXT: addi a1, a0, %lo(callee_large_struct_ret)
+; RV32I-NEXT: addi a0, s0, -24
+; RV32I-NEXT: jalr ra, a1, 0
+; RV32I-NEXT: lw a0, -12(s0)
+; RV32I-NEXT: lw a1, -24(s0)
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: lw s0, 24(sp)
+; RV32I-NEXT: lw ra, 28(sp)
+; RV32I-NEXT: addi sp, sp, 32
+; RV32I-NEXT: jalr zero, ra, 0
+ %1 = alloca %struct.large
+ call void @callee_large_struct_ret(%struct.large* sret %1)
+ %2 = getelementptr inbounds %struct.large, %struct.large* %1, i32 0, i32 0
+ %3 = load i32, i32* %2
+ %4 = getelementptr inbounds %struct.large, %struct.large* %1, i32 0, i32 3
+ %5 = load i32, i32* %4
+ %6 = add i32 %3, %5
+ ret i32 %6
+}