Skip to content

Commit 644899a

Browse files
committed
[RISCV][GISel] Port portions of float-intrinsics.ll and double-intrinsics.ll. NFC
Remove the legalizer test for the same intrinsics as it is no longer interesting with end to end tests.
1 parent bde51d9 commit 644899a

File tree

3 files changed

+705
-328
lines changed

3 files changed

+705
-328
lines changed
Lines changed: 264 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,264 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d \
3+
; RUN: -verify-machineinstrs -target-abi=ilp32d \
4+
; RUN: | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5+
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d \
6+
; RUN: -verify-machineinstrs -target-abi=lp64d \
7+
; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8+
9+
declare double @llvm.sqrt.f64(double)
10+
11+
define double @sqrt_f64(double %a) nounwind {
12+
; CHECKIFD-LABEL: sqrt_f64:
13+
; CHECKIFD: # %bb.0:
14+
; CHECKIFD-NEXT: fsqrt.d fa0, fa0
15+
; CHECKIFD-NEXT: ret
16+
%1 = call double @llvm.sqrt.f64(double %a)
17+
ret double %1
18+
}
19+
20+
declare double @llvm.fma.f64(double, double, double)
21+
22+
define double @fma_f64(double %a, double %b, double %c) nounwind {
23+
; CHECKIFD-LABEL: fma_f64:
24+
; CHECKIFD: # %bb.0:
25+
; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2
26+
; CHECKIFD-NEXT: ret
27+
%1 = call double @llvm.fma.f64(double %a, double %b, double %c)
28+
ret double %1
29+
}
30+
31+
declare double @llvm.fmuladd.f64(double, double, double)
32+
33+
define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
34+
; CHECKIFD-LABEL: fmuladd_f64:
35+
; CHECKIFD: # %bb.0:
36+
; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2
37+
; CHECKIFD-NEXT: ret
38+
%1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
39+
ret double %1
40+
}
41+
42+
declare double @llvm.fabs.f64(double)
43+
44+
define double @fabs_f64(double %a) nounwind {
45+
; CHECKIFD-LABEL: fabs_f64:
46+
; CHECKIFD: # %bb.0:
47+
; CHECKIFD-NEXT: fabs.d fa0, fa0
48+
; CHECKIFD-NEXT: ret
49+
%1 = call double @llvm.fabs.f64(double %a)
50+
ret double %1
51+
}
52+
53+
declare double @llvm.minnum.f64(double, double)
54+
55+
define double @minnum_f64(double %a, double %b) nounwind {
56+
; CHECKIFD-LABEL: minnum_f64:
57+
; CHECKIFD: # %bb.0:
58+
; CHECKIFD-NEXT: fmin.d fa0, fa0, fa1
59+
; CHECKIFD-NEXT: ret
60+
%1 = call double @llvm.minnum.f64(double %a, double %b)
61+
ret double %1
62+
}
63+
64+
declare double @llvm.maxnum.f64(double, double)
65+
66+
define double @maxnum_f64(double %a, double %b) nounwind {
67+
; CHECKIFD-LABEL: maxnum_f64:
68+
; CHECKIFD: # %bb.0:
69+
; CHECKIFD-NEXT: fmax.d fa0, fa0, fa1
70+
; CHECKIFD-NEXT: ret
71+
%1 = call double @llvm.maxnum.f64(double %a, double %b)
72+
ret double %1
73+
}
74+
75+
declare double @llvm.copysign.f64(double, double)
76+
77+
define double @copysign_f64(double %a, double %b) nounwind {
78+
; CHECKIFD-LABEL: copysign_f64:
79+
; CHECKIFD: # %bb.0:
80+
; CHECKIFD-NEXT: fsgnj.d fa0, fa0, fa1
81+
; CHECKIFD-NEXT: ret
82+
%1 = call double @llvm.copysign.f64(double %a, double %b)
83+
ret double %1
84+
}
85+
86+
declare double @llvm.floor.f64(double)
87+
88+
define double @floor_f64(double %a) nounwind {
89+
; RV32IFD-LABEL: floor_f64:
90+
; RV32IFD: # %bb.0:
91+
; RV32IFD-NEXT: addi sp, sp, -16
92+
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
93+
; RV32IFD-NEXT: call floor
94+
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
95+
; RV32IFD-NEXT: addi sp, sp, 16
96+
; RV32IFD-NEXT: ret
97+
;
98+
; RV64IFD-LABEL: floor_f64:
99+
; RV64IFD: # %bb.0:
100+
; RV64IFD-NEXT: addi sp, sp, -16
101+
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
102+
; RV64IFD-NEXT: call floor
103+
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
104+
; RV64IFD-NEXT: addi sp, sp, 16
105+
; RV64IFD-NEXT: ret
106+
%1 = call double @llvm.floor.f64(double %a)
107+
ret double %1
108+
}
109+
110+
declare double @llvm.ceil.f64(double)
111+
112+
define double @ceil_f64(double %a) nounwind {
113+
; RV32IFD-LABEL: ceil_f64:
114+
; RV32IFD: # %bb.0:
115+
; RV32IFD-NEXT: addi sp, sp, -16
116+
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
117+
; RV32IFD-NEXT: call ceil
118+
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
119+
; RV32IFD-NEXT: addi sp, sp, 16
120+
; RV32IFD-NEXT: ret
121+
;
122+
; RV64IFD-LABEL: ceil_f64:
123+
; RV64IFD: # %bb.0:
124+
; RV64IFD-NEXT: addi sp, sp, -16
125+
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
126+
; RV64IFD-NEXT: call ceil
127+
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
128+
; RV64IFD-NEXT: addi sp, sp, 16
129+
; RV64IFD-NEXT: ret
130+
%1 = call double @llvm.ceil.f64(double %a)
131+
ret double %1
132+
}
133+
134+
declare double @llvm.trunc.f64(double)
135+
136+
define double @trunc_f64(double %a) nounwind {
137+
; RV32IFD-LABEL: trunc_f64:
138+
; RV32IFD: # %bb.0:
139+
; RV32IFD-NEXT: addi sp, sp, -16
140+
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
141+
; RV32IFD-NEXT: call trunc
142+
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
143+
; RV32IFD-NEXT: addi sp, sp, 16
144+
; RV32IFD-NEXT: ret
145+
;
146+
; RV64IFD-LABEL: trunc_f64:
147+
; RV64IFD: # %bb.0:
148+
; RV64IFD-NEXT: addi sp, sp, -16
149+
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
150+
; RV64IFD-NEXT: call trunc
151+
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
152+
; RV64IFD-NEXT: addi sp, sp, 16
153+
; RV64IFD-NEXT: ret
154+
%1 = call double @llvm.trunc.f64(double %a)
155+
ret double %1
156+
}
157+
158+
declare double @llvm.rint.f64(double)
159+
160+
define double @rint_f64(double %a) nounwind {
161+
; RV32IFD-LABEL: rint_f64:
162+
; RV32IFD: # %bb.0:
163+
; RV32IFD-NEXT: addi sp, sp, -16
164+
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
165+
; RV32IFD-NEXT: call rint
166+
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
167+
; RV32IFD-NEXT: addi sp, sp, 16
168+
; RV32IFD-NEXT: ret
169+
;
170+
; RV64IFD-LABEL: rint_f64:
171+
; RV64IFD: # %bb.0:
172+
; RV64IFD-NEXT: addi sp, sp, -16
173+
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
174+
; RV64IFD-NEXT: call rint
175+
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
176+
; RV64IFD-NEXT: addi sp, sp, 16
177+
; RV64IFD-NEXT: ret
178+
%1 = call double @llvm.rint.f64(double %a)
179+
ret double %1
180+
}
181+
182+
declare double @llvm.nearbyint.f64(double)
183+
184+
define double @nearbyint_f64(double %a) nounwind {
185+
; RV32IFD-LABEL: nearbyint_f64:
186+
; RV32IFD: # %bb.0:
187+
; RV32IFD-NEXT: addi sp, sp, -16
188+
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
189+
; RV32IFD-NEXT: call nearbyint
190+
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
191+
; RV32IFD-NEXT: addi sp, sp, 16
192+
; RV32IFD-NEXT: ret
193+
;
194+
; RV64IFD-LABEL: nearbyint_f64:
195+
; RV64IFD: # %bb.0:
196+
; RV64IFD-NEXT: addi sp, sp, -16
197+
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
198+
; RV64IFD-NEXT: call nearbyint
199+
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
200+
; RV64IFD-NEXT: addi sp, sp, 16
201+
; RV64IFD-NEXT: ret
202+
%1 = call double @llvm.nearbyint.f64(double %a)
203+
ret double %1
204+
}
205+
206+
declare double @llvm.round.f64(double)
207+
208+
define double @round_f64(double %a) nounwind {
209+
; RV32IFD-LABEL: round_f64:
210+
; RV32IFD: # %bb.0:
211+
; RV32IFD-NEXT: addi sp, sp, -16
212+
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
213+
; RV32IFD-NEXT: call round
214+
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
215+
; RV32IFD-NEXT: addi sp, sp, 16
216+
; RV32IFD-NEXT: ret
217+
;
218+
; RV64IFD-LABEL: round_f64:
219+
; RV64IFD: # %bb.0:
220+
; RV64IFD-NEXT: addi sp, sp, -16
221+
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
222+
; RV64IFD-NEXT: call round
223+
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
224+
; RV64IFD-NEXT: addi sp, sp, 16
225+
; RV64IFD-NEXT: ret
226+
%1 = call double @llvm.round.f64(double %a)
227+
ret double %1
228+
}
229+
230+
declare double @llvm.roundeven.f64(double)
231+
232+
define double @roundeven_f64(double %a) nounwind {
233+
; RV32IFD-LABEL: roundeven_f64:
234+
; RV32IFD: # %bb.0:
235+
; RV32IFD-NEXT: addi sp, sp, -16
236+
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
237+
; RV32IFD-NEXT: call roundeven
238+
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
239+
; RV32IFD-NEXT: addi sp, sp, 16
240+
; RV32IFD-NEXT: ret
241+
;
242+
; RV64IFD-LABEL: roundeven_f64:
243+
; RV64IFD: # %bb.0:
244+
; RV64IFD-NEXT: addi sp, sp, -16
245+
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
246+
; RV64IFD-NEXT: call roundeven
247+
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
248+
; RV64IFD-NEXT: addi sp, sp, 16
249+
; RV64IFD-NEXT: ret
250+
%1 = call double @llvm.roundeven.f64(double %a)
251+
ret double %1
252+
}
253+
254+
declare i1 @llvm.is.fpclass.f64(double, i32)
255+
define i1 @isnan_d_fpclass(double %x) {
256+
; CHECKIFD-LABEL: isnan_d_fpclass:
257+
; CHECKIFD: # %bb.0:
258+
; CHECKIFD-NEXT: fclass.d a0, fa0
259+
; CHECKIFD-NEXT: andi a0, a0, 768
260+
; CHECKIFD-NEXT: snez a0, a0
261+
; CHECKIFD-NEXT: ret
262+
%1 = call i1 @llvm.is.fpclass.f64(double %x, i32 3) ; nan
263+
ret i1 %1
264+
}

0 commit comments

Comments
 (0)