summaryrefslogtreecommitdiff
path: root/test/CodeGen/Thumb2
diff options
context:
space:
mode:
authorSaleem Abdulrasool <compnerd@compnerd.org>2016-09-07 03:17:19 +0000
committerSaleem Abdulrasool <compnerd@compnerd.org>2016-09-07 03:17:19 +0000
commit4ccc33a9abf64d68652d49bb5ffdd6dcd1a7096a (patch)
treeeb10f77298b4f192323321c48b937b5238b73c3e /test/CodeGen/Thumb2
parent36a0b39faeca8b7d2e53c3999d1650d59bfbdb92 (diff)
Revert "CodeGen: ensure that libcalls are always AAPCS CC"
This reverts SVN r280683. Revert until I figure out why this is breaking lli tests. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@280778 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/Thumb2')
-rw-r--r--test/CodeGen/Thumb2/float-intrinsics-double.ll119
-rw-r--r--test/CodeGen/Thumb2/float-intrinsics-float.ll115
-rw-r--r--test/CodeGen/Thumb2/float-ops.ll5
3 files changed, 118 insertions, 121 deletions
diff --git a/test/CodeGen/Thumb2/float-intrinsics-double.ll b/test/CodeGen/Thumb2/float-intrinsics-double.ll
index 19d80eb3770..657d1b172da 100644
--- a/test/CodeGen/Thumb2/float-intrinsics-double.ll
+++ b/test/CodeGen/Thumb2/float-intrinsics-double.ll
@@ -5,107 +5,107 @@
; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4
; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8
-declare arm_aapcscc double @llvm.sqrt.f64(double %Val)
+declare double @llvm.sqrt.f64(double %Val)
define double @sqrt_d(double %a) {
; CHECK-LABEL: sqrt_d:
; SOFT: {{(bl|b)}} sqrt
; HARD: vsqrt.f64 d0, d0
- %1 = call arm_aapcscc double @llvm.sqrt.f64(double %a)
+ %1 = call double @llvm.sqrt.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.powi.f64(double %Val, i32 %power)
+declare double @llvm.powi.f64(double %Val, i32 %power)
define double @powi_d(double %a, i32 %b) {
; CHECK-LABEL: powi_d:
; SOFT: {{(bl|b)}} __powidf2
-; HARD: bl __powidf2
- %1 = call arm_aapcscc double @llvm.powi.f64(double %a, i32 %b)
+; HARD: b __powidf2
+ %1 = call double @llvm.powi.f64(double %a, i32 %b)
ret double %1
}
-declare arm_aapcscc double @llvm.sin.f64(double %Val)
+declare double @llvm.sin.f64(double %Val)
define double @sin_d(double %a) {
; CHECK-LABEL: sin_d:
; SOFT: {{(bl|b)}} sin
-; HARD: bl sin
- %1 = call arm_aapcscc double @llvm.sin.f64(double %a)
+; HARD: b sin
+ %1 = call double @llvm.sin.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.cos.f64(double %Val)
+declare double @llvm.cos.f64(double %Val)
define double @cos_d(double %a) {
; CHECK-LABEL: cos_d:
; SOFT: {{(bl|b)}} cos
-; HARD: bl cos
- %1 = call arm_aapcscc double @llvm.cos.f64(double %a)
+; HARD: b cos
+ %1 = call double @llvm.cos.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.pow.f64(double %Val, double %power)
+declare double @llvm.pow.f64(double %Val, double %power)
define double @pow_d(double %a, double %b) {
; CHECK-LABEL: pow_d:
; SOFT: {{(bl|b)}} pow
-; HARD: bl pow
- %1 = call arm_aapcscc double @llvm.pow.f64(double %a, double %b)
+; HARD: b pow
+ %1 = call double @llvm.pow.f64(double %a, double %b)
ret double %1
}
-declare arm_aapcscc double @llvm.exp.f64(double %Val)
+declare double @llvm.exp.f64(double %Val)
define double @exp_d(double %a) {
; CHECK-LABEL: exp_d:
; SOFT: {{(bl|b)}} exp
-; HARD: bl exp
- %1 = call arm_aapcscc double @llvm.exp.f64(double %a)
+; HARD: b exp
+ %1 = call double @llvm.exp.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.exp2.f64(double %Val)
+declare double @llvm.exp2.f64(double %Val)
define double @exp2_d(double %a) {
; CHECK-LABEL: exp2_d:
; SOFT: {{(bl|b)}} exp2
-; HARD: bl exp2
- %1 = call arm_aapcscc double @llvm.exp2.f64(double %a)
+; HARD: b exp2
+ %1 = call double @llvm.exp2.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.log.f64(double %Val)
+declare double @llvm.log.f64(double %Val)
define double @log_d(double %a) {
; CHECK-LABEL: log_d:
; SOFT: {{(bl|b)}} log
-; HARD: bl log
- %1 = call arm_aapcscc double @llvm.log.f64(double %a)
+; HARD: b log
+ %1 = call double @llvm.log.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.log10.f64(double %Val)
+declare double @llvm.log10.f64(double %Val)
define double @log10_d(double %a) {
; CHECK-LABEL: log10_d:
; SOFT: {{(bl|b)}} log10
-; HARD: bl log10
- %1 = call arm_aapcscc double @llvm.log10.f64(double %a)
+; HARD: b log10
+ %1 = call double @llvm.log10.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.log2.f64(double %Val)
+declare double @llvm.log2.f64(double %Val)
define double @log2_d(double %a) {
; CHECK-LABEL: log2_d:
; SOFT: {{(bl|b)}} log2
-; HARD: bl log2
- %1 = call arm_aapcscc double @llvm.log2.f64(double %a)
+; HARD: b log2
+ %1 = call double @llvm.log2.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.fma.f64(double %a, double %b, double %c)
+declare double @llvm.fma.f64(double %a, double %b, double %c)
define double @fma_d(double %a, double %b, double %c) {
; CHECK-LABEL: fma_d:
; SOFT: {{(bl|b)}} fma
; HARD: vfma.f64
- %1 = call arm_aapcscc double @llvm.fma.f64(double %a, double %b, double %c)
+ %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
ret double %1
}
; FIXME: the FPv4-SP version is less efficient than the no-FPU version
-declare arm_aapcscc double @llvm.fabs.f64(double %Val)
+declare double @llvm.fabs.f64(double %Val)
define double @abs_d(double %a) {
; CHECK-LABEL: abs_d:
; NONE: bic r1, r1, #-2147483648
@@ -116,11 +116,11 @@ define double @abs_d(double %a) {
; SP: bfi r1, r2, #31, #1
; SP: vmov d0, r0, r1
; DP: vabs.f64 d0, d0
- %1 = call arm_aapcscc double @llvm.fabs.f64(double %a)
+ %1 = call double @llvm.fabs.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.copysign.f64(double %Mag, double %Sgn)
+declare double @llvm.copysign.f64(double %Mag, double %Sgn)
define double @copysign_d(double %a, double %b) {
; CHECK-LABEL: copysign_d:
; SOFT: lsrs [[REG:r[0-9]+]], r3, #31
@@ -130,71 +130,71 @@ define double @copysign_d(double %a, double %b) {
; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
; NEON: vshl.i64 [[REG]], [[REG]], #32
; NEON: vbsl [[REG]], d
- %1 = call arm_aapcscc double @llvm.copysign.f64(double %a, double %b)
+ %1 = call double @llvm.copysign.f64(double %a, double %b)
ret double %1
}
-declare arm_aapcscc double @llvm.floor.f64(double %Val)
+declare double @llvm.floor.f64(double %Val)
define double @floor_d(double %a) {
; CHECK-LABEL: floor_d:
; SOFT: {{(bl|b)}} floor
-; VFP4: bl floor
+; VFP4: b floor
; FP-ARMv8: vrintm.f64
- %1 = call arm_aapcscc double @llvm.floor.f64(double %a)
+ %1 = call double @llvm.floor.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.ceil.f64(double %Val)
+declare double @llvm.ceil.f64(double %Val)
define double @ceil_d(double %a) {
; CHECK-LABEL: ceil_d:
; SOFT: {{(bl|b)}} ceil
-; VFP4: bl ceil
+; VFP4: b ceil
; FP-ARMv8: vrintp.f64
- %1 = call arm_aapcscc double @llvm.ceil.f64(double %a)
+ %1 = call double @llvm.ceil.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.trunc.f64(double %Val)
+declare double @llvm.trunc.f64(double %Val)
define double @trunc_d(double %a) {
; CHECK-LABEL: trunc_d:
; SOFT: {{(bl|b)}} trunc
-; FFP4: bl trunc
+; FFP4: b trunc
; FP-ARMv8: vrintz.f64
- %1 = call arm_aapcscc double @llvm.trunc.f64(double %a)
+ %1 = call double @llvm.trunc.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.rint.f64(double %Val)
+declare double @llvm.rint.f64(double %Val)
define double @rint_d(double %a) {
; CHECK-LABEL: rint_d:
; SOFT: {{(bl|b)}} rint
-; VFP4: bl rint
+; VFP4: b rint
; FP-ARMv8: vrintx.f64
- %1 = call arm_aapcscc double @llvm.rint.f64(double %a)
+ %1 = call double @llvm.rint.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.nearbyint.f64(double %Val)
+declare double @llvm.nearbyint.f64(double %Val)
define double @nearbyint_d(double %a) {
; CHECK-LABEL: nearbyint_d:
; SOFT: {{(bl|b)}} nearbyint
-; VFP4: bl nearbyint
+; VFP4: b nearbyint
; FP-ARMv8: vrintr.f64
- %1 = call arm_aapcscc double @llvm.nearbyint.f64(double %a)
+ %1 = call double @llvm.nearbyint.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.round.f64(double %Val)
+declare double @llvm.round.f64(double %Val)
define double @round_d(double %a) {
; CHECK-LABEL: round_d:
; SOFT: {{(bl|b)}} round
-; VFP4: bl round
+; VFP4: b round
; FP-ARMv8: vrinta.f64
- %1 = call arm_aapcscc double @llvm.round.f64(double %a)
+ %1 = call double @llvm.round.f64(double %a)
ret double %1
}
-declare arm_aapcscc double @llvm.fmuladd.f64(double %a, double %b, double %c)
+declare double @llvm.fmuladd.f64(double %a, double %b, double %c)
define double @fmuladd_d(double %a, double %b, double %c) {
; CHECK-LABEL: fmuladd_d:
; SOFT: bl __aeabi_dmul
@@ -202,21 +202,21 @@ define double @fmuladd_d(double %a, double %b, double %c) {
; VFP4: vmul.f64
; VFP4: vadd.f64
; FP-ARMv8: vmla.f64
- %1 = call arm_aapcscc double @llvm.fmuladd.f64(double %a, double %b, double %c)
+ %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
ret double %1
}
-declare arm_aapcscc i16 @llvm.convert.to.fp16.f64(double %a)
+declare i16 @llvm.convert.to.fp16.f64(double %a)
define i16 @d_to_h(double %a) {
; CHECK-LABEL: d_to_h:
; SOFT: bl __aeabi_d2h
; VFP4: bl __aeabi_d2h
; FP-ARMv8: vcvt{{[bt]}}.f16.f64
- %1 = call arm_aapcscc i16 @llvm.convert.to.fp16.f64(double %a)
+ %1 = call i16 @llvm.convert.to.fp16.f64(double %a)
ret i16 %1
}
-declare arm_aapcscc double @llvm.convert.from.fp16.f64(i16 %a)
+declare double @llvm.convert.from.fp16.f64(i16 %a)
define double @h_to_d(i16 %a) {
; CHECK-LABEL: h_to_d:
; NONE: bl __aeabi_h2f
@@ -226,7 +226,6 @@ define double @h_to_d(i16 %a) {
; VFPv4: vcvt{{[bt]}}.f32.f16
; VFPv4: vcvt.f64.f32
; FP-ARMv8: vcvt{{[bt]}}.f64.f16
- %1 = call arm_aapcscc double @llvm.convert.from.fp16.f64(i16 %a)
+ %1 = call double @llvm.convert.from.fp16.f64(i16 %a)
ret double %1
}
-
diff --git a/test/CodeGen/Thumb2/float-intrinsics-float.ll b/test/CodeGen/Thumb2/float-intrinsics-float.ll
index 99d662ff169..847aeacd2f9 100644
--- a/test/CodeGen/Thumb2/float-intrinsics-float.ll
+++ b/test/CodeGen/Thumb2/float-intrinsics-float.ll
@@ -14,106 +14,106 @@ define float @sqrt_f(float %a) {
ret float %1
}
-declare arm_aapcscc float @llvm.powi.f32(float %Val, i32 %power)
+declare float @llvm.powi.f32(float %Val, i32 %power)
define float @powi_f(float %a, i32 %b) {
; CHECK-LABEL: powi_f:
; SOFT: bl __powisf2
-; HARD: bl __powisf2
- %1 = call arm_aapcscc float @llvm.powi.f32(float %a, i32 %b)
+; HARD: b __powisf2
+ %1 = call float @llvm.powi.f32(float %a, i32 %b)
ret float %1
}
-declare arm_aapcscc float @llvm.sin.f32(float %Val)
+declare float @llvm.sin.f32(float %Val)
define float @sin_f(float %a) {
; CHECK-LABEL: sin_f:
; SOFT: bl sinf
-; HARD: bl sinf
- %1 = call arm_aapcscc float @llvm.sin.f32(float %a)
+; HARD: b sinf
+ %1 = call float @llvm.sin.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.cos.f32(float %Val)
+declare float @llvm.cos.f32(float %Val)
define float @cos_f(float %a) {
; CHECK-LABEL: cos_f:
; SOFT: bl cosf
-; HARD: bl cosf
- %1 = call arm_aapcscc float @llvm.cos.f32(float %a)
+; HARD: b cosf
+ %1 = call float @llvm.cos.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.pow.f32(float %Val, float %power)
+declare float @llvm.pow.f32(float %Val, float %power)
define float @pow_f(float %a, float %b) {
; CHECK-LABEL: pow_f:
; SOFT: bl powf
-; HARD: bl powf
- %1 = call arm_aapcscc float @llvm.pow.f32(float %a, float %b)
+; HARD: b powf
+ %1 = call float @llvm.pow.f32(float %a, float %b)
ret float %1
}
-declare arm_aapcscc float @llvm.exp.f32(float %Val)
+declare float @llvm.exp.f32(float %Val)
define float @exp_f(float %a) {
; CHECK-LABEL: exp_f:
; SOFT: bl expf
-; HARD: bl expf
- %1 = call arm_aapcscc float @llvm.exp.f32(float %a)
+; HARD: b expf
+ %1 = call float @llvm.exp.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.exp2.f32(float %Val)
+declare float @llvm.exp2.f32(float %Val)
define float @exp2_f(float %a) {
; CHECK-LABEL: exp2_f:
; SOFT: bl exp2f
-; HARD: bl exp2f
- %1 = call arm_aapcscc float @llvm.exp2.f32(float %a)
+; HARD: b exp2f
+ %1 = call float @llvm.exp2.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.log.f32(float %Val)
+declare float @llvm.log.f32(float %Val)
define float @log_f(float %a) {
; CHECK-LABEL: log_f:
; SOFT: bl logf
-; HARD: bl logf
- %1 = call arm_aapcscc float @llvm.log.f32(float %a)
+; HARD: b logf
+ %1 = call float @llvm.log.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.log10.f32(float %Val)
+declare float @llvm.log10.f32(float %Val)
define float @log10_f(float %a) {
; CHECK-LABEL: log10_f:
; SOFT: bl log10f
-; HARD: bl log10f
- %1 = call arm_aapcscc float @llvm.log10.f32(float %a)
+; HARD: b log10f
+ %1 = call float @llvm.log10.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.log2.f32(float %Val)
+declare float @llvm.log2.f32(float %Val)
define float @log2_f(float %a) {
; CHECK-LABEL: log2_f:
; SOFT: bl log2f
-; HARD: bl log2f
- %1 = call arm_aapcscc float @llvm.log2.f32(float %a)
+; HARD: b log2f
+ %1 = call float @llvm.log2.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.fma.f32(float %a, float %b, float %c)
+declare float @llvm.fma.f32(float %a, float %b, float %c)
define float @fma_f(float %a, float %b, float %c) {
; CHECK-LABEL: fma_f:
; SOFT: bl fmaf
; HARD: vfma.f32
- %1 = call arm_aapcscc float @llvm.fma.f32(float %a, float %b, float %c)
+ %1 = call float @llvm.fma.f32(float %a, float %b, float %c)
ret float %1
}
-declare arm_aapcscc float @llvm.fabs.f32(float %Val)
+declare float @llvm.fabs.f32(float %Val)
define float @abs_f(float %a) {
; CHECK-LABEL: abs_f:
; SOFT: bic r0, r0, #-2147483648
; HARD: vabs.f32
- %1 = call arm_aapcscc float @llvm.fabs.f32(float %a)
+ %1 = call float @llvm.fabs.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.copysign.f32(float %Mag, float %Sgn)
+declare float @llvm.copysign.f32(float %Mag, float %Sgn)
define float @copysign_f(float %a, float %b) {
; CHECK-LABEL: copysign_f:
; NONE: lsrs [[REG:r[0-9]+]], r{{[0-9]+}}, #31
@@ -124,73 +124,73 @@ define float @copysign_f(float %a, float %b) {
; VFP: bfi r{{[0-9]+}}, [[REG]], #31, #1
; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
; NEON: vbsl [[REG]], d
- %1 = call arm_aapcscc float @llvm.copysign.f32(float %a, float %b)
+ %1 = call float @llvm.copysign.f32(float %a, float %b)
ret float %1
}
-declare arm_aapcscc float @llvm.floor.f32(float %Val)
+declare float @llvm.floor.f32(float %Val)
define float @floor_f(float %a) {
; CHECK-LABEL: floor_f:
; SOFT: bl floorf
-; VFP4: bl floorf
+; VFP4: b floorf
; FP-ARMv8: vrintm.f32
- %1 = call arm_aapcscc float @llvm.floor.f32(float %a)
+ %1 = call float @llvm.floor.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.ceil.f32(float %Val)
+declare float @llvm.ceil.f32(float %Val)
define float @ceil_f(float %a) {
; CHECK-LABEL: ceil_f:
; SOFT: bl ceilf
-; VFP4: bl ceilf
+; VFP4: b ceilf
; FP-ARMv8: vrintp.f32
- %1 = call arm_aapcscc float @llvm.ceil.f32(float %a)
+ %1 = call float @llvm.ceil.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.trunc.f32(float %Val)
+declare float @llvm.trunc.f32(float %Val)
define float @trunc_f(float %a) {
; CHECK-LABEL: trunc_f:
; SOFT: bl truncf
-; VFP4: bl truncf
+; VFP4: b truncf
; FP-ARMv8: vrintz.f32
- %1 = call arm_aapcscc float @llvm.trunc.f32(float %a)
+ %1 = call float @llvm.trunc.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.rint.f32(float %Val)
+declare float @llvm.rint.f32(float %Val)
define float @rint_f(float %a) {
; CHECK-LABEL: rint_f:
; SOFT: bl rintf
-; VFP4: bl rintf
+; VFP4: b rintf
; FP-ARMv8: vrintx.f32
- %1 = call arm_aapcscc float @llvm.rint.f32(float %a)
+ %1 = call float @llvm.rint.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.nearbyint.f32(float %Val)
+declare float @llvm.nearbyint.f32(float %Val)
define float @nearbyint_f(float %a) {
; CHECK-LABEL: nearbyint_f:
; SOFT: bl nearbyintf
-; VFP4: bl nearbyintf
+; VFP4: b nearbyintf
; FP-ARMv8: vrintr.f32
- %1 = call arm_aapcscc float @llvm.nearbyint.f32(float %a)
+ %1 = call float @llvm.nearbyint.f32(float %a)
ret float %1
}
-declare arm_aapcscc float @llvm.round.f32(float %Val)
+declare float @llvm.round.f32(float %Val)
define float @round_f(float %a) {
; CHECK-LABEL: round_f:
; SOFT: bl roundf
-; VFP4: bl roundf
+; VFP4: b roundf
; FP-ARMv8: vrinta.f32
- %1 = call arm_aapcscc float @llvm.round.f32(float %a)
+ %1 = call float @llvm.round.f32(float %a)
ret float %1
}
; FIXME: why does cortex-m4 use vmla, while cortex-a7 uses vmul+vadd?
; (these should be equivalent, even the rounding is the same)
-declare arm_aapcscc float @llvm.fmuladd.f32(float %a, float %b, float %c)
+declare float @llvm.fmuladd.f32(float %a, float %b, float %c)
define float @fmuladd_f(float %a, float %b, float %c) {
; CHECK-LABEL: fmuladd_f:
; SOFT: bl __aeabi_fmul
@@ -198,25 +198,24 @@ define float @fmuladd_f(float %a, float %b, float %c) {
; VMLA: vmla.f32
; NO-VMLA: vmul.f32
; NO-VMLA: vadd.f32
- %1 = call arm_aapcscc float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ %1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
ret float %1
}
-declare arm_aapcscc i16 @llvm.convert.to.fp16.f32(float %a)
+declare i16 @llvm.convert.to.fp16.f32(float %a)
define i16 @f_to_h(float %a) {
; CHECK-LABEL: f_to_h:
; SOFT: bl __aeabi_f2h
; HARD: vcvt{{[bt]}}.f16.f32
- %1 = call arm_aapcscc i16 @llvm.convert.to.fp16.f32(float %a)
+ %1 = call i16 @llvm.convert.to.fp16.f32(float %a)
ret i16 %1
}
-declare arm_aapcscc float @llvm.convert.from.fp16.f32(i16 %a)
+declare float @llvm.convert.from.fp16.f32(i16 %a)
define float @h_to_f(i16 %a) {
; CHECK-LABEL: h_to_f:
; SOFT: bl __aeabi_h2f
; HARD: vcvt{{[bt]}}.f32.f16
- %1 = call arm_aapcscc float @llvm.convert.from.fp16.f32(i16 %a)
+ %1 = call float @llvm.convert.from.fp16.f32(i16 %a)
ret float %1
}
-
diff --git a/test/CodeGen/Thumb2/float-ops.ll b/test/CodeGen/Thumb2/float-ops.ll
index 4101984e3f7..c9f93f2d613 100644
--- a/test/CodeGen/Thumb2/float-ops.ll
+++ b/test/CodeGen/Thumb2/float-ops.ll
@@ -4,7 +4,6 @@
; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP4-ALL -check-prefix=VFP4-DP
define float @add_f(float %a, float %b) {
-
entry:
; CHECK-LABEL: add_f:
; NONE: bl __aeabi_fadd
@@ -84,7 +83,7 @@ define float @rem_f(float %a, float %b) {
entry:
; CHECK-LABEL: rem_f:
; NONE: bl fmodf
-; HARD: bl fmodf
+; HARD: b fmodf
%0 = frem float %a, %b
ret float %0
}
@@ -93,7 +92,7 @@ define double @rem_d(double %a, double %b) {
entry:
; CHECK-LABEL: rem_d:
; NONE: bl fmod
-; HARD: bl fmod
+; HARD: b fmod
%0 = frem double %a, %b
ret double %0
}