testcase: Add testcase for PR 117330 [PR117330]
[official-gcc.git] / gcc / testsuite / gcc.target / aarch64 / bitfield-bitint-abi-align16.c
blobc29a230a771324aba6bb4bf649c352a7c95effab
1 /* { dg-do compile { target bitint } } */
2 /* { dg-additional-options "-std=c23 -O2 -fno-stack-protector -save-temps -fno-schedule-insns -fno-schedule-insns2 -fno-late-combine-instructions" } */
3 /* { dg-final { check-function-bodies "**" "" "" } } */
5 #define ALIGN 16
6 #include "bitfield-bitint-abi.h"
8 // f1-f16 are all the same
11 ** f1:
12 ** and x0, x2, 1
13 ** ret
16 ** f8:
17 ** and x0, x2, 1
18 ** ret
21 ** f16:
22 ** and x0, x2, 1
23 ** ret
26 /* fp seems to be unable to optimize away stack-usage, TODO: to fix. */
29 ** fp:
30 **...
31 ** and x0, x1, 1
32 **...
33 ** ret
36 // all other f1p-f8p generate the same code, for f16p the value comes from x2
38 ** f1p:
39 ** and x0, x1, 1
40 ** ret
43 ** f8p:
44 ** and x0, x1, 1
45 ** ret
48 ** f16p:
49 ** and x0, x2, 1
50 ** ret
53 // g1-g16 are all the same
55 ** g1:
56 ** mov (x[0-9]+), x0
57 ** mov w0, w1
58 ** and x4, \1, 9223372036854775807
59 ** and x2, \1, 1
60 ** mov x3, 0
61 ** b f1
65 ** g8:
66 ** mov (x[0-9]+), x0
67 ** mov w0, w1
68 ** and x4, \1, 9223372036854775807
69 ** and x2, \1, 1
70 ** mov x3, 0
71 ** b f8
74 ** g16:
75 ** mov (x[0-9]+), x0
76 ** mov w0, w1
77 ** and x4, \1, 9223372036854775807
78 ** and x2, \1, 1
79 ** mov x3, 0
80 ** b f16
83 // again gp different from the rest
86 ** gp:
87 ** sub sp, sp, #16
88 ** mov (x[0-9]+), x0
89 ** mov w0, w1
90 ** sbfx x([0-9]+), \1, 0, 63
91 ** mov (w[0-9]+), 0
92 ** bfi \3, w\2, 0, 1
93 ** and x3, x\2, 9223372036854775807
94 ** mov x2, 0
95 ** str xzr, \[sp\]
96 ** strb \3, \[sp\]
97 ** ldr x1, \[sp\]
98 ** add sp, sp, 16
99 ** b fp
102 // g1p-g8p are all the same, g16p uses x2 to pass parameter to f16p
105 ** g1p:
106 ** mov (w[0-9]+), w1
107 ** and x3, x0, 9223372036854775807
108 ** and x1, x0, 1
109 ** mov x2, 0
110 ** mov w0, \1
111 ** b f1p
114 ** g8p:
115 ** mov (w[0-9]+), w1
116 ** and x3, x0, 9223372036854775807
117 ** and x1, x0, 1
118 ** mov x2, 0
119 ** mov w0, \1
120 ** b f8p
123 ** g16p:
124 ** mov (x[0-9]+), x0
125 ** mov w0, w1
126 ** and x4, \1, 9223372036854775807
127 ** and x2, \1, 1
128 ** mov x3, 0
129 ** b f16p
132 // f*_stack are all the same
134 ** f1_stack:
135 ** ldr (x[0-9]+), \[sp, 16\]
136 ** and x0, \1, 1
137 ** ret
140 ** f8_stack:
141 ** ldr (x[0-9]+), \[sp, 16\]
142 ** and x0, \1, 1
143 ** ret
146 ** f16_stack:
147 ** ldr (x[0-9]+), \[sp, 16\]
148 ** and x0, \1, 1
149 ** ret
152 // fp{,1,8}_stack are all the same but fp16_stack loads from sp+16
154 ** fp_stack:
155 ** ldr (x[0-9]+), \[sp, 8\]
156 ** and x0, \1, 1
157 ** ret
160 ** f1p_stack:
161 ** ldr (x[0-9]+), \[sp, 8\]
162 ** and x0, \1, 1
163 ** ret
166 ** f8p_stack:
167 ** ldr (x[0-9]+), \[sp, 8\]
168 ** and x0, \1, 1
169 ** ret
173 ** f16p_stack:
174 ** ldr (x[0-9]+), \[sp, 16\]
175 ** and x0, \1, 1
176 ** ret
180 ** gp_stack:
181 **...
182 ** mov x([0-9]+), x0
183 ** sxtw (x[0-9]+), w1
184 ** mov x0, \2
185 ** and x7, \2, 9223372036854775807
186 ** mov (w[0-9]+), 0
187 ** bfi \3, w\1, 0, 1
188 ** strb wzr, \[sp, 16\]
189 ** mov x6, x7
190 ** mov x5, x7
191 ** mov x4, x7
192 ** mov x3, x7
193 ** mov x2, x7
194 ** str xzr, \[sp, 48\]
195 ** strb \3, \[sp, 48\]
196 ** ldr (x[0-9]+), \[sp, 48\]
197 ** stp x7, \4, \[sp\]
198 ** mov x1, x7
199 ** bl fp_stack
200 ** sbfx x0, x0, 0, 63
201 **...
202 ** ret
206 ** g1_stack:
207 **...
208 ** mov (x[0-9]+), x0
209 ** sxtw (x[0-9]+), w1
210 ** mov x0, \2
211 ** and x7, \2, 9223372036854775807
212 ** mov (x[0-9]+), 0
213 ** sbfx (x[0-9]+), \1, 0, 63
214 ** bfi \3, \4, 0, 1
215 ** stp \3, xzr, \[sp, 16\]
216 ** mov x6, x7
217 ** mov x5, x7
218 ** mov x4, x7
219 ** mov x3, x7
220 ** mov x2, x7
221 ** mov x1, x7
222 ** str x7, \[sp\]
223 ** bl f1_stack
224 ** sbfx x0, x0, 0, 63
225 **...
226 ** ret
231 ** g8_stack:
232 **...
233 ** mov (x[0-9]+), x0
234 ** sxtw (x[0-9]+), w1
235 ** mov x0, \2
236 ** and x7, \2, 9223372036854775807
237 ** mov (x[0-9]+), 0
238 ** sbfx (x[0-9]+), \1, 0, 63
239 ** bfi \3, \4, 0, 1
240 ** stp \3, xzr, \[sp, 16\]
241 ** mov x6, x7
242 ** mov x5, x7
243 ** mov x4, x7
244 ** mov x3, x7
245 ** mov x2, x7
246 ** mov x1, x7
247 ** str x7, \[sp\]
248 ** bl f8_stack
249 ** sbfx x0, x0, 0, 63
250 **...
251 ** ret
255 ** g16_stack:
256 **...
257 ** mov (x[0-9]+), x0
258 ** sxtw (x[0-9]+), w1
259 ** mov x0, \2
260 ** and (x[0-9]+), \2, 9223372036854775807
261 ** mov (x[0-9]+), 0
262 ** sbfx (x[0-9]+), \1, 0, 63
263 ** bfi \4, \5, 0, 1
264 ** stp \4, xzr, \[sp, 16\]
265 ** mov x6, \3
266 ** mov x5, \3
267 ** mov x4, \3
268 ** mov x3, \3
269 ** mov x2, \3
270 ** mov x1, \3
271 ** str x7, \[sp\]
272 ** bl f16_stack
273 ** sbfx x0, x0, 0, 63
274 **...
275 ** ret
279 ** f1_stdarg:
280 **...
281 ** and x0, x2, 1
282 **...
283 ** ret
286 ** f16_stdarg:
287 **...
288 ** and x0, x2, 1
289 **...
290 ** ret
294 ** fp_stdarg:
295 **...
296 ** and x0, x1, 1
297 **...
298 ** ret
302 ** f1p_stdarg:
303 **...
304 ** and x0, x1, 1
305 **...
306 ** ret
309 ** f8p_stdarg:
310 **...
311 ** and x0, x1, 1
312 **...
313 ** ret
316 ** f16p_stdarg:
317 **...
318 ** and x0, x2, 1
319 **...
320 ** ret
324 ** g1_stdarg:
325 ** and x2, x0, 1
326 ** mov x3, 0
327 ** mov w0, w1
328 ** b f1_stdarg
332 ** g16_stdarg:
333 ** and x2, x0, 1
334 ** mov x3, 0
335 ** mov w0, w1
336 ** b f16_stdarg
340 ** gp_stdarg:
341 **...
342 ** mov x([0-9]+), x0
343 ** mov w0, w1
344 ** mov (w[0-9]+), 0
345 ** bfi \2, w\1, 0, 1
346 ** mov x2, 0
347 ** str xzr, \[sp\]
348 ** strb \2, \[sp\]
349 ** ldr x1, \[sp\]
350 **...
351 ** b fp_stdarg
355 ** g1p_stdarg:
356 ** mov (x[0-9]+), x0
357 ** mov w0, w1
358 ** and x1, \1, 1
359 ** mov x2, 0
360 ** b f1p_stdarg
364 ** g8p_stdarg:
365 ** mov (x[0-9]+), x0
366 ** mov w0, w1
367 ** and x1, \1, 1
368 ** mov x2, 0
369 ** b f8p_stdarg
373 ** g16p_stdarg:
374 ** and x2, x0, 1
375 ** mov x3, 0
376 ** mov w0, w1
377 ** b f16p_stdarg