summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
blob: af4a9da9c2aa06b48248ed0630f60cae9b5d62a6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=ALL --check-prefix=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=ALL --check-prefix=AVX
;
; Verify that fast-isel doesn't select legacy SSE instructions on targets that
; feature AVX.
;
; Test cases are obtained from the following code snippet:
; ///
; double single_to_double_rr(float x) {
;   return (double)x;
; }
; float double_to_single_rr(double x) {
;   return (float)x;
; }
; double single_to_double_rm(float *x) {
;   return (double)*x;
; }
; float double_to_single_rm(double *x) {
;   return (float)*x;
; }
; ///

define double @single_to_double_rr(float %x) {
; SSE-LABEL: single_to_double_rr:
; SSE:       # %bb.0: # %entry
; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: single_to_double_rr:
; AVX:       # %bb.0: # %entry
; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
entry:
  %conv = fpext float %x to double
  ret double %conv
}

define float @double_to_single_rr(double %x) {
; SSE-LABEL: double_to_single_rr:
; SSE:       # %bb.0: # %entry
; SSE-NEXT:    cvtsd2ss %xmm0, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: double_to_single_rr:
; AVX:       # %bb.0: # %entry
; AVX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
entry:
  %conv = fptrunc double %x to float
  ret float %conv
}

define double @single_to_double_rm(float* %x) {
; SSE-LABEL: single_to_double_rm:
; SSE:       # %bb.0: # %entry
; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: single_to_double_rm:
; AVX:       # %bb.0: # %entry
; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
entry:
  %0 = load float, float* %x, align 4
  %conv = fpext float %0 to double
  ret double %conv
}

define double @single_to_double_rm_optsize(float* %x) optsize {
; SSE-LABEL: single_to_double_rm_optsize:
; SSE:       # %bb.0: # %entry
; SSE-NEXT:    cvtss2sd (%rdi), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: single_to_double_rm_optsize:
; AVX:       # %bb.0: # %entry
; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
entry:
  %0 = load float, float* %x, align 4
  %conv = fpext float %0 to double
  ret double %conv
}

define float @double_to_single_rm(double* %x) {
; SSE-LABEL: double_to_single_rm:
; SSE:       # %bb.0: # %entry
; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT:    cvtsd2ss %xmm0, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: double_to_single_rm:
; AVX:       # %bb.0: # %entry
; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
entry:
  %0 = load double, double* %x, align 8
  %conv = fptrunc double %0 to float
  ret float %conv
}

define float @double_to_single_rm_optsize(double* %x) optsize {
; SSE-LABEL: double_to_single_rm_optsize:
; SSE:       # %bb.0: # %entry
; SSE-NEXT:    cvtsd2ss (%rdi), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: double_to_single_rm_optsize:
; AVX:       # %bb.0: # %entry
; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT:    vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
entry:
  %0 = load double, double* %x, align 8
  %conv = fptrunc double %0 to float
  ret float %conv
}