c++: retval dtor on rethrow [PR112301]
[official-gcc.git] / gcc / config / sparc / sparc.cc
blob82e579524142001f20f563b627e37d8bc92cba10
1 /* Subroutines for insn-output.cc for SPARC.
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #define IN_TARGET_CODE 1
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "memmodel.h"
33 #include "gimple.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "varasm.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "explow.h"
52 #include "expr.h"
53 #include "debug.h"
54 #include "cfgrtl.h"
55 #include "common/common-target.h"
56 #include "gimplify.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "tree-pass.h"
60 #include "context.h"
61 #include "builtins.h"
62 #include "tree-vector-builder.h"
63 #include "opts.h"
65 /* This file should be included last. */
66 #include "target-def.h"
68 /* Processor costs */
70 struct processor_costs {
71 /* Integer load */
72 const int int_load;
74 /* Integer signed load */
75 const int int_sload;
77 /* Integer zeroed load */
78 const int int_zload;
80 /* Float load */
81 const int float_load;
83 /* fmov, fneg, fabs */
84 const int float_move;
86 /* fadd, fsub */
87 const int float_plusminus;
89 /* fcmp */
90 const int float_cmp;
92 /* fmov, fmovr */
93 const int float_cmove;
95 /* fmul */
96 const int float_mul;
98 /* fdivs */
99 const int float_div_sf;
101 /* fdivd */
102 const int float_div_df;
104 /* fsqrts */
105 const int float_sqrt_sf;
107 /* fsqrtd */
108 const int float_sqrt_df;
110 /* umul/smul */
111 const int int_mul;
113 /* mulX */
114 const int int_mulX;
116 /* integer multiply cost for each bit set past the most
117 significant 3, so the formula for multiply cost becomes:
119 if (rs1 < 0)
120 highest_bit = highest_clear_bit(rs1);
121 else
122 highest_bit = highest_set_bit(rs1);
123 if (highest_bit < 3)
124 highest_bit = 3;
125 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
127 A value of zero indicates that the multiply costs is fixed,
128 and not variable. */
129 const int int_mul_bit_factor;
131 /* udiv/sdiv */
132 const int int_div;
134 /* divX */
135 const int int_divX;
137 /* movcc, movr */
138 const int int_cmove;
140 /* penalty for shifts, due to scheduling rules etc. */
141 const int shift_penalty;
143 /* cost of a (predictable) branch. */
144 const int branch_cost;
147 static const
148 struct processor_costs cypress_costs = {
149 COSTS_N_INSNS (2), /* int load */
150 COSTS_N_INSNS (2), /* int signed load */
151 COSTS_N_INSNS (2), /* int zeroed load */
152 COSTS_N_INSNS (2), /* float load */
153 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
154 COSTS_N_INSNS (5), /* fadd, fsub */
155 COSTS_N_INSNS (1), /* fcmp */
156 COSTS_N_INSNS (1), /* fmov, fmovr */
157 COSTS_N_INSNS (7), /* fmul */
158 COSTS_N_INSNS (37), /* fdivs */
159 COSTS_N_INSNS (37), /* fdivd */
160 COSTS_N_INSNS (63), /* fsqrts */
161 COSTS_N_INSNS (63), /* fsqrtd */
162 COSTS_N_INSNS (1), /* imul */
163 COSTS_N_INSNS (1), /* imulX */
164 0, /* imul bit factor */
165 COSTS_N_INSNS (1), /* idiv */
166 COSTS_N_INSNS (1), /* idivX */
167 COSTS_N_INSNS (1), /* movcc/movr */
168 0, /* shift penalty */
169 3 /* branch cost */
172 static const
173 struct processor_costs supersparc_costs = {
174 COSTS_N_INSNS (1), /* int load */
175 COSTS_N_INSNS (1), /* int signed load */
176 COSTS_N_INSNS (1), /* int zeroed load */
177 COSTS_N_INSNS (0), /* float load */
178 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
179 COSTS_N_INSNS (3), /* fadd, fsub */
180 COSTS_N_INSNS (3), /* fcmp */
181 COSTS_N_INSNS (1), /* fmov, fmovr */
182 COSTS_N_INSNS (3), /* fmul */
183 COSTS_N_INSNS (6), /* fdivs */
184 COSTS_N_INSNS (9), /* fdivd */
185 COSTS_N_INSNS (12), /* fsqrts */
186 COSTS_N_INSNS (12), /* fsqrtd */
187 COSTS_N_INSNS (4), /* imul */
188 COSTS_N_INSNS (4), /* imulX */
189 0, /* imul bit factor */
190 COSTS_N_INSNS (4), /* idiv */
191 COSTS_N_INSNS (4), /* idivX */
192 COSTS_N_INSNS (1), /* movcc/movr */
193 1, /* shift penalty */
194 3 /* branch cost */
197 static const
198 struct processor_costs hypersparc_costs = {
199 COSTS_N_INSNS (1), /* int load */
200 COSTS_N_INSNS (1), /* int signed load */
201 COSTS_N_INSNS (1), /* int zeroed load */
202 COSTS_N_INSNS (1), /* float load */
203 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
204 COSTS_N_INSNS (1), /* fadd, fsub */
205 COSTS_N_INSNS (1), /* fcmp */
206 COSTS_N_INSNS (1), /* fmov, fmovr */
207 COSTS_N_INSNS (1), /* fmul */
208 COSTS_N_INSNS (8), /* fdivs */
209 COSTS_N_INSNS (12), /* fdivd */
210 COSTS_N_INSNS (17), /* fsqrts */
211 COSTS_N_INSNS (17), /* fsqrtd */
212 COSTS_N_INSNS (17), /* imul */
213 COSTS_N_INSNS (17), /* imulX */
214 0, /* imul bit factor */
215 COSTS_N_INSNS (17), /* idiv */
216 COSTS_N_INSNS (17), /* idivX */
217 COSTS_N_INSNS (1), /* movcc/movr */
218 0, /* shift penalty */
219 3 /* branch cost */
222 static const
223 struct processor_costs leon_costs = {
224 COSTS_N_INSNS (1), /* int load */
225 COSTS_N_INSNS (1), /* int signed load */
226 COSTS_N_INSNS (1), /* int zeroed load */
227 COSTS_N_INSNS (1), /* float load */
228 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
229 COSTS_N_INSNS (1), /* fadd, fsub */
230 COSTS_N_INSNS (1), /* fcmp */
231 COSTS_N_INSNS (1), /* fmov, fmovr */
232 COSTS_N_INSNS (1), /* fmul */
233 COSTS_N_INSNS (15), /* fdivs */
234 COSTS_N_INSNS (15), /* fdivd */
235 COSTS_N_INSNS (23), /* fsqrts */
236 COSTS_N_INSNS (23), /* fsqrtd */
237 COSTS_N_INSNS (5), /* imul */
238 COSTS_N_INSNS (5), /* imulX */
239 0, /* imul bit factor */
240 COSTS_N_INSNS (5), /* idiv */
241 COSTS_N_INSNS (5), /* idivX */
242 COSTS_N_INSNS (1), /* movcc/movr */
243 0, /* shift penalty */
244 3 /* branch cost */
247 static const
248 struct processor_costs leon3_costs = {
249 COSTS_N_INSNS (1), /* int load */
250 COSTS_N_INSNS (1), /* int signed load */
251 COSTS_N_INSNS (1), /* int zeroed load */
252 COSTS_N_INSNS (1), /* float load */
253 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
254 COSTS_N_INSNS (1), /* fadd, fsub */
255 COSTS_N_INSNS (1), /* fcmp */
256 COSTS_N_INSNS (1), /* fmov, fmovr */
257 COSTS_N_INSNS (1), /* fmul */
258 COSTS_N_INSNS (14), /* fdivs */
259 COSTS_N_INSNS (15), /* fdivd */
260 COSTS_N_INSNS (22), /* fsqrts */
261 COSTS_N_INSNS (23), /* fsqrtd */
262 COSTS_N_INSNS (5), /* imul */
263 COSTS_N_INSNS (5), /* imulX */
264 0, /* imul bit factor */
265 COSTS_N_INSNS (35), /* idiv */
266 COSTS_N_INSNS (35), /* idivX */
267 COSTS_N_INSNS (1), /* movcc/movr */
268 0, /* shift penalty */
269 3 /* branch cost */
272 static const
273 struct processor_costs leon5_costs = {
274 COSTS_N_INSNS (1), /* int load */
275 COSTS_N_INSNS (1), /* int signed load */
276 COSTS_N_INSNS (1), /* int zeroed load */
277 COSTS_N_INSNS (1), /* float load */
278 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
279 COSTS_N_INSNS (1), /* fadd, fsub */
280 COSTS_N_INSNS (1), /* fcmp */
281 COSTS_N_INSNS (1), /* fmov, fmovr */
282 COSTS_N_INSNS (1), /* fmul */
283 COSTS_N_INSNS (17), /* fdivs */
284 COSTS_N_INSNS (18), /* fdivd */
285 COSTS_N_INSNS (25), /* fsqrts */
286 COSTS_N_INSNS (26), /* fsqrtd */
287 COSTS_N_INSNS (4), /* imul */
288 COSTS_N_INSNS (4), /* imulX */
289 0, /* imul bit factor */
290 COSTS_N_INSNS (35), /* idiv */
291 COSTS_N_INSNS (35), /* idivX */
292 COSTS_N_INSNS (1), /* movcc/movr */
293 0, /* shift penalty */
294 3 /* branch cost */
297 static const
298 struct processor_costs sparclet_costs = {
299 COSTS_N_INSNS (3), /* int load */
300 COSTS_N_INSNS (3), /* int signed load */
301 COSTS_N_INSNS (1), /* int zeroed load */
302 COSTS_N_INSNS (1), /* float load */
303 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
304 COSTS_N_INSNS (1), /* fadd, fsub */
305 COSTS_N_INSNS (1), /* fcmp */
306 COSTS_N_INSNS (1), /* fmov, fmovr */
307 COSTS_N_INSNS (1), /* fmul */
308 COSTS_N_INSNS (1), /* fdivs */
309 COSTS_N_INSNS (1), /* fdivd */
310 COSTS_N_INSNS (1), /* fsqrts */
311 COSTS_N_INSNS (1), /* fsqrtd */
312 COSTS_N_INSNS (5), /* imul */
313 COSTS_N_INSNS (5), /* imulX */
314 0, /* imul bit factor */
315 COSTS_N_INSNS (5), /* idiv */
316 COSTS_N_INSNS (5), /* idivX */
317 COSTS_N_INSNS (1), /* movcc/movr */
318 0, /* shift penalty */
319 3 /* branch cost */
322 static const
323 struct processor_costs ultrasparc_costs = {
324 COSTS_N_INSNS (2), /* int load */
325 COSTS_N_INSNS (3), /* int signed load */
326 COSTS_N_INSNS (2), /* int zeroed load */
327 COSTS_N_INSNS (2), /* float load */
328 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
329 COSTS_N_INSNS (4), /* fadd, fsub */
330 COSTS_N_INSNS (1), /* fcmp */
331 COSTS_N_INSNS (2), /* fmov, fmovr */
332 COSTS_N_INSNS (4), /* fmul */
333 COSTS_N_INSNS (13), /* fdivs */
334 COSTS_N_INSNS (23), /* fdivd */
335 COSTS_N_INSNS (13), /* fsqrts */
336 COSTS_N_INSNS (23), /* fsqrtd */
337 COSTS_N_INSNS (4), /* imul */
338 COSTS_N_INSNS (4), /* imulX */
339 2, /* imul bit factor */
340 COSTS_N_INSNS (37), /* idiv */
341 COSTS_N_INSNS (68), /* idivX */
342 COSTS_N_INSNS (2), /* movcc/movr */
343 2, /* shift penalty */
344 2 /* branch cost */
347 static const
348 struct processor_costs ultrasparc3_costs = {
349 COSTS_N_INSNS (2), /* int load */
350 COSTS_N_INSNS (3), /* int signed load */
351 COSTS_N_INSNS (3), /* int zeroed load */
352 COSTS_N_INSNS (2), /* float load */
353 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
354 COSTS_N_INSNS (4), /* fadd, fsub */
355 COSTS_N_INSNS (5), /* fcmp */
356 COSTS_N_INSNS (3), /* fmov, fmovr */
357 COSTS_N_INSNS (4), /* fmul */
358 COSTS_N_INSNS (17), /* fdivs */
359 COSTS_N_INSNS (20), /* fdivd */
360 COSTS_N_INSNS (20), /* fsqrts */
361 COSTS_N_INSNS (29), /* fsqrtd */
362 COSTS_N_INSNS (6), /* imul */
363 COSTS_N_INSNS (6), /* imulX */
364 0, /* imul bit factor */
365 COSTS_N_INSNS (40), /* idiv */
366 COSTS_N_INSNS (71), /* idivX */
367 COSTS_N_INSNS (2), /* movcc/movr */
368 0, /* shift penalty */
369 2 /* branch cost */
372 static const
373 struct processor_costs niagara_costs = {
374 COSTS_N_INSNS (3), /* int load */
375 COSTS_N_INSNS (3), /* int signed load */
376 COSTS_N_INSNS (3), /* int zeroed load */
377 COSTS_N_INSNS (9), /* float load */
378 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
379 COSTS_N_INSNS (8), /* fadd, fsub */
380 COSTS_N_INSNS (26), /* fcmp */
381 COSTS_N_INSNS (8), /* fmov, fmovr */
382 COSTS_N_INSNS (29), /* fmul */
383 COSTS_N_INSNS (54), /* fdivs */
384 COSTS_N_INSNS (83), /* fdivd */
385 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
386 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
387 COSTS_N_INSNS (11), /* imul */
388 COSTS_N_INSNS (11), /* imulX */
389 0, /* imul bit factor */
390 COSTS_N_INSNS (72), /* idiv */
391 COSTS_N_INSNS (72), /* idivX */
392 COSTS_N_INSNS (1), /* movcc/movr */
393 0, /* shift penalty */
394 4 /* branch cost */
397 static const
398 struct processor_costs niagara2_costs = {
399 COSTS_N_INSNS (3), /* int load */
400 COSTS_N_INSNS (3), /* int signed load */
401 COSTS_N_INSNS (3), /* int zeroed load */
402 COSTS_N_INSNS (3), /* float load */
403 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
404 COSTS_N_INSNS (6), /* fadd, fsub */
405 COSTS_N_INSNS (6), /* fcmp */
406 COSTS_N_INSNS (6), /* fmov, fmovr */
407 COSTS_N_INSNS (6), /* fmul */
408 COSTS_N_INSNS (19), /* fdivs */
409 COSTS_N_INSNS (33), /* fdivd */
410 COSTS_N_INSNS (19), /* fsqrts */
411 COSTS_N_INSNS (33), /* fsqrtd */
412 COSTS_N_INSNS (5), /* imul */
413 COSTS_N_INSNS (5), /* imulX */
414 0, /* imul bit factor */
415 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
416 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
417 COSTS_N_INSNS (1), /* movcc/movr */
418 0, /* shift penalty */
419 5 /* branch cost */
422 static const
423 struct processor_costs niagara3_costs = {
424 COSTS_N_INSNS (3), /* int load */
425 COSTS_N_INSNS (3), /* int signed load */
426 COSTS_N_INSNS (3), /* int zeroed load */
427 COSTS_N_INSNS (3), /* float load */
428 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
429 COSTS_N_INSNS (9), /* fadd, fsub */
430 COSTS_N_INSNS (9), /* fcmp */
431 COSTS_N_INSNS (9), /* fmov, fmovr */
432 COSTS_N_INSNS (9), /* fmul */
433 COSTS_N_INSNS (23), /* fdivs */
434 COSTS_N_INSNS (37), /* fdivd */
435 COSTS_N_INSNS (23), /* fsqrts */
436 COSTS_N_INSNS (37), /* fsqrtd */
437 COSTS_N_INSNS (9), /* imul */
438 COSTS_N_INSNS (9), /* imulX */
439 0, /* imul bit factor */
440 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
441 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
442 COSTS_N_INSNS (1), /* movcc/movr */
443 0, /* shift penalty */
444 5 /* branch cost */
447 static const
448 struct processor_costs niagara4_costs = {
449 COSTS_N_INSNS (5), /* int load */
450 COSTS_N_INSNS (5), /* int signed load */
451 COSTS_N_INSNS (5), /* int zeroed load */
452 COSTS_N_INSNS (5), /* float load */
453 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
454 COSTS_N_INSNS (11), /* fadd, fsub */
455 COSTS_N_INSNS (11), /* fcmp */
456 COSTS_N_INSNS (11), /* fmov, fmovr */
457 COSTS_N_INSNS (11), /* fmul */
458 COSTS_N_INSNS (24), /* fdivs */
459 COSTS_N_INSNS (37), /* fdivd */
460 COSTS_N_INSNS (24), /* fsqrts */
461 COSTS_N_INSNS (37), /* fsqrtd */
462 COSTS_N_INSNS (12), /* imul */
463 COSTS_N_INSNS (12), /* imulX */
464 0, /* imul bit factor */
465 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
466 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
467 COSTS_N_INSNS (1), /* movcc/movr */
468 0, /* shift penalty */
469 2 /* branch cost */
472 static const
473 struct processor_costs niagara7_costs = {
474 COSTS_N_INSNS (5), /* int load */
475 COSTS_N_INSNS (5), /* int signed load */
476 COSTS_N_INSNS (5), /* int zeroed load */
477 COSTS_N_INSNS (5), /* float load */
478 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
479 COSTS_N_INSNS (11), /* fadd, fsub */
480 COSTS_N_INSNS (11), /* fcmp */
481 COSTS_N_INSNS (11), /* fmov, fmovr */
482 COSTS_N_INSNS (11), /* fmul */
483 COSTS_N_INSNS (24), /* fdivs */
484 COSTS_N_INSNS (37), /* fdivd */
485 COSTS_N_INSNS (24), /* fsqrts */
486 COSTS_N_INSNS (37), /* fsqrtd */
487 COSTS_N_INSNS (12), /* imul */
488 COSTS_N_INSNS (12), /* imulX */
489 0, /* imul bit factor */
490 COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
491 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
492 COSTS_N_INSNS (1), /* movcc/movr */
493 0, /* shift penalty */
494 1 /* branch cost */
497 static const
498 struct processor_costs m8_costs = {
499 COSTS_N_INSNS (3), /* int load */
500 COSTS_N_INSNS (3), /* int signed load */
501 COSTS_N_INSNS (3), /* int zeroed load */
502 COSTS_N_INSNS (3), /* float load */
503 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
504 COSTS_N_INSNS (9), /* fadd, fsub */
505 COSTS_N_INSNS (9), /* fcmp */
506 COSTS_N_INSNS (9), /* fmov, fmovr */
507 COSTS_N_INSNS (9), /* fmul */
508 COSTS_N_INSNS (26), /* fdivs */
509 COSTS_N_INSNS (30), /* fdivd */
510 COSTS_N_INSNS (33), /* fsqrts */
511 COSTS_N_INSNS (41), /* fsqrtd */
512 COSTS_N_INSNS (12), /* imul */
513 COSTS_N_INSNS (10), /* imulX */
514 0, /* imul bit factor */
515 COSTS_N_INSNS (57), /* udiv/sdiv */
516 COSTS_N_INSNS (30), /* udivx/sdivx */
517 COSTS_N_INSNS (1), /* movcc/movr */
518 0, /* shift penalty */
519 1 /* branch cost */
522 static const struct processor_costs *sparc_costs = &cypress_costs;
524 #ifdef HAVE_AS_RELAX_OPTION
525 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
526 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
527 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
528 somebody does not branch between the sethi and jmp. */
529 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
530 #else
531 #define LEAF_SIBCALL_SLOT_RESERVED_P \
532 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
533 #endif
535 /* Vector, indexed by hard register number, which contains 1
536 for a register that is allowable in a candidate for leaf
537 function treatment. */
538 char sparc_leaf_regs[] =
539 { 1, 1, 1, 1, 1, 1, 1, 1,
540 0, 0, 0, 0, 0, 0, 1, 0,
541 0, 0, 0, 0, 0, 0, 0, 0,
542 1, 1, 1, 1, 1, 1, 0, 1,
543 1, 1, 1, 1, 1, 1, 1, 1,
544 1, 1, 1, 1, 1, 1, 1, 1,
545 1, 1, 1, 1, 1, 1, 1, 1,
546 1, 1, 1, 1, 1, 1, 1, 1,
547 1, 1, 1, 1, 1, 1, 1, 1,
548 1, 1, 1, 1, 1, 1, 1, 1,
549 1, 1, 1, 1, 1, 1, 1, 1,
550 1, 1, 1, 1, 1, 1, 1, 1,
551 1, 1, 1, 1, 1, 1, 1};
553 struct GTY(()) machine_function
555 /* Size of the frame of the function. */
556 HOST_WIDE_INT frame_size;
558 /* Size of the frame of the function minus the register window save area
559 and the outgoing argument area. */
560 HOST_WIDE_INT apparent_frame_size;
562 /* Register we pretend the frame pointer is allocated to. Normally, this
563 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
564 record "offset" separately as it may be too big for (reg + disp). */
565 rtx frame_base_reg;
566 HOST_WIDE_INT frame_base_offset;
568 /* Number of global or FP registers to be saved (as 4-byte quantities). */
569 int n_global_fp_regs;
571 /* True if the current function is leaf and uses only leaf regs,
572 so that the SPARC leaf function optimization can be applied.
573 Private version of crtl->uses_only_leaf_regs, see
574 sparc_expand_prologue for the rationale. */
575 int leaf_function_p;
577 /* True if the prologue saves local or in registers. */
578 bool save_local_in_regs_p;
580 /* True if the data calculated by sparc_expand_prologue are valid. */
581 bool prologue_data_valid_p;
584 #define sparc_frame_size cfun->machine->frame_size
585 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
586 #define sparc_frame_base_reg cfun->machine->frame_base_reg
587 #define sparc_frame_base_offset cfun->machine->frame_base_offset
588 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
589 #define sparc_leaf_function_p cfun->machine->leaf_function_p
590 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
591 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
593 /* 1 if the next opcode is to be specially indented. */
594 int sparc_indent_opcode = 0;
596 static void sparc_option_override (void);
597 static void sparc_init_modes (void);
598 static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
599 const_tree, bool, bool, int *, int *);
601 static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
602 static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
603 static int leon5_adjust_cost (rtx_insn *, int, rtx_insn *, int);
605 static void sparc_emit_set_const32 (rtx, rtx);
606 static void sparc_emit_set_const64 (rtx, rtx);
607 static void sparc_output_addr_vec (rtx);
608 static void sparc_output_addr_diff_vec (rtx);
609 static void sparc_output_deferred_case_vectors (void);
610 static bool sparc_legitimate_address_p (machine_mode, rtx, bool,
611 code_helper = ERROR_MARK);
612 static bool sparc_legitimate_constant_p (machine_mode, rtx);
613 static rtx sparc_builtin_saveregs (void);
614 static int epilogue_renumber (rtx *, int);
615 static bool sparc_assemble_integer (rtx, unsigned int, int);
616 static int set_extends (rtx_insn *);
617 static void sparc_asm_function_prologue (FILE *);
618 static void sparc_asm_function_epilogue (FILE *);
619 #ifdef TARGET_SOLARIS
620 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
621 tree) ATTRIBUTE_UNUSED;
622 #endif
623 static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
624 static int sparc_issue_rate (void);
625 static void sparc_sched_init (FILE *, int, int);
626 static int sparc_use_sched_lookahead (void);
628 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
629 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
630 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
631 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
632 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
634 static bool sparc_function_ok_for_sibcall (tree, tree);
635 static void sparc_init_libfuncs (void);
636 static void sparc_init_builtins (void);
637 static void sparc_fpu_init_builtins (void);
638 static void sparc_vis_init_builtins (void);
639 static tree sparc_builtin_decl (unsigned, bool);
640 static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
641 static tree sparc_fold_builtin (tree, int, tree *, bool);
642 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
643 HOST_WIDE_INT, tree);
644 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
645 HOST_WIDE_INT, const_tree);
646 static struct machine_function * sparc_init_machine_status (void);
647 static bool sparc_cannot_force_const_mem (machine_mode, rtx);
648 static rtx sparc_tls_get_addr (void);
649 static rtx sparc_tls_got (void);
650 static int sparc_register_move_cost (machine_mode,
651 reg_class_t, reg_class_t);
652 static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
653 static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
654 int *, const_tree, int);
655 static bool sparc_strict_argument_naming (cumulative_args_t);
656 static void sparc_va_start (tree, rtx);
657 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
658 static bool sparc_vector_mode_supported_p (machine_mode);
659 static bool sparc_tls_referenced_p (rtx);
660 static rtx sparc_legitimize_tls_address (rtx);
661 static rtx sparc_legitimize_pic_address (rtx, rtx);
662 static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
663 static rtx sparc_delegitimize_address (rtx);
664 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
665 static bool sparc_pass_by_reference (cumulative_args_t,
666 const function_arg_info &);
667 static void sparc_function_arg_advance (cumulative_args_t,
668 const function_arg_info &);
669 static rtx sparc_function_arg (cumulative_args_t, const function_arg_info &);
670 static rtx sparc_function_incoming_arg (cumulative_args_t,
671 const function_arg_info &);
672 static pad_direction sparc_function_arg_padding (machine_mode, const_tree);
673 static unsigned int sparc_function_arg_boundary (machine_mode,
674 const_tree);
675 static int sparc_arg_partial_bytes (cumulative_args_t,
676 const function_arg_info &);
677 static bool sparc_return_in_memory (const_tree, const_tree);
678 static rtx sparc_struct_value_rtx (tree, int);
679 static rtx sparc_function_value (const_tree, const_tree, bool);
680 static rtx sparc_libcall_value (machine_mode, const_rtx);
681 static bool sparc_function_value_regno_p (const unsigned int);
682 static unsigned HOST_WIDE_INT sparc_asan_shadow_offset (void);
683 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
684 static void sparc_file_end (void);
685 static bool sparc_frame_pointer_required (void);
686 static bool sparc_can_eliminate (const int, const int);
687 static void sparc_conditional_register_usage (void);
688 static bool sparc_use_pseudo_pic_reg (void);
689 static void sparc_init_pic_reg (void);
690 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
691 static const char *sparc_mangle_type (const_tree);
692 #endif
693 static void sparc_trampoline_init (rtx, tree, rtx);
694 static machine_mode sparc_preferred_simd_mode (scalar_mode);
695 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
696 static bool sparc_lra_p (void);
697 static bool sparc_print_operand_punct_valid_p (unsigned char);
698 static void sparc_print_operand (FILE *, rtx, int);
699 static void sparc_print_operand_address (FILE *, machine_mode, rtx);
700 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
701 machine_mode,
702 secondary_reload_info *);
703 static bool sparc_secondary_memory_needed (machine_mode, reg_class_t,
704 reg_class_t);
705 static machine_mode sparc_secondary_memory_needed_mode (machine_mode);
706 static scalar_int_mode sparc_cstore_mode (enum insn_code icode);
707 static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
708 static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
709 static unsigned int sparc_min_arithmetic_precision (void);
710 static unsigned int sparc_hard_regno_nregs (unsigned int, machine_mode);
711 static bool sparc_hard_regno_mode_ok (unsigned int, machine_mode);
712 static bool sparc_modes_tieable_p (machine_mode, machine_mode);
713 static bool sparc_can_change_mode_class (machine_mode, machine_mode,
714 reg_class_t);
715 static HOST_WIDE_INT sparc_constant_alignment (const_tree, HOST_WIDE_INT);
716 static bool sparc_vectorize_vec_perm_const (machine_mode, machine_mode,
717 rtx, rtx, rtx,
718 const vec_perm_indices &);
719 static bool sparc_can_follow_jump (const rtx_insn *, const rtx_insn *);
720 static HARD_REG_SET sparc_zero_call_used_regs (HARD_REG_SET);
722 #ifdef SUBTARGET_ATTRIBUTE_TABLE
723 /* Table of valid machine attributes. */
724 static const struct attribute_spec sparc_attribute_table[] =
726 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
727 do_diagnostic, handler, exclude } */
728 SUBTARGET_ATTRIBUTE_TABLE,
729 { NULL, 0, 0, false, false, false, false, NULL, NULL }
731 #endif
733 char sparc_hard_reg_printed[8];
735 /* Initialize the GCC target structure. */
737 /* The default is to use .half rather than .short for aligned HI objects. */
738 #undef TARGET_ASM_ALIGNED_HI_OP
739 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
741 #undef TARGET_ASM_UNALIGNED_HI_OP
742 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
743 #undef TARGET_ASM_UNALIGNED_SI_OP
744 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
745 #undef TARGET_ASM_UNALIGNED_DI_OP
746 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
748 /* The target hook has to handle DI-mode values. */
749 #undef TARGET_ASM_INTEGER
750 #define TARGET_ASM_INTEGER sparc_assemble_integer
752 #undef TARGET_ASM_FUNCTION_PROLOGUE
753 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
754 #undef TARGET_ASM_FUNCTION_EPILOGUE
755 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
757 #undef TARGET_SCHED_ADJUST_COST
758 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
759 #undef TARGET_SCHED_ISSUE_RATE
760 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
761 #undef TARGET_SCHED_INIT
762 #define TARGET_SCHED_INIT sparc_sched_init
763 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
764 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
766 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
767 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
769 #undef TARGET_INIT_LIBFUNCS
770 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
772 #undef TARGET_LEGITIMIZE_ADDRESS
773 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
774 #undef TARGET_DELEGITIMIZE_ADDRESS
775 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
776 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
777 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
779 #undef TARGET_INIT_BUILTINS
780 #define TARGET_INIT_BUILTINS sparc_init_builtins
781 #undef TARGET_BUILTIN_DECL
782 #define TARGET_BUILTIN_DECL sparc_builtin_decl
783 #undef TARGET_EXPAND_BUILTIN
784 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
785 #undef TARGET_FOLD_BUILTIN
786 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
788 #if TARGET_TLS
789 #undef TARGET_HAVE_TLS
790 #define TARGET_HAVE_TLS true
791 #endif
793 #undef TARGET_CANNOT_FORCE_CONST_MEM
794 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
796 #undef TARGET_ASM_OUTPUT_MI_THUNK
797 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
798 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
799 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
801 #undef TARGET_RTX_COSTS
802 #define TARGET_RTX_COSTS sparc_rtx_costs
803 #undef TARGET_ADDRESS_COST
804 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
805 #undef TARGET_REGISTER_MOVE_COST
806 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
808 #undef TARGET_PROMOTE_FUNCTION_MODE
809 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
810 #undef TARGET_STRICT_ARGUMENT_NAMING
811 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
813 #undef TARGET_MUST_PASS_IN_STACK
814 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
815 #undef TARGET_PASS_BY_REFERENCE
816 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
817 #undef TARGET_ARG_PARTIAL_BYTES
818 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
819 #undef TARGET_FUNCTION_ARG_ADVANCE
820 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
821 #undef TARGET_FUNCTION_ARG
822 #define TARGET_FUNCTION_ARG sparc_function_arg
823 #undef TARGET_FUNCTION_INCOMING_ARG
824 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
825 #undef TARGET_FUNCTION_ARG_PADDING
826 #define TARGET_FUNCTION_ARG_PADDING sparc_function_arg_padding
827 #undef TARGET_FUNCTION_ARG_BOUNDARY
828 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
830 #undef TARGET_RETURN_IN_MEMORY
831 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
832 #undef TARGET_STRUCT_VALUE_RTX
833 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
834 #undef TARGET_FUNCTION_VALUE
835 #define TARGET_FUNCTION_VALUE sparc_function_value
836 #undef TARGET_LIBCALL_VALUE
837 #define TARGET_LIBCALL_VALUE sparc_libcall_value
838 #undef TARGET_FUNCTION_VALUE_REGNO_P
839 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
841 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
842 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
844 #undef TARGET_ASAN_SHADOW_OFFSET
845 #define TARGET_ASAN_SHADOW_OFFSET sparc_asan_shadow_offset
847 #undef TARGET_EXPAND_BUILTIN_VA_START
848 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
849 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
850 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
852 #undef TARGET_VECTOR_MODE_SUPPORTED_P
853 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
855 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
856 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
858 #ifdef SUBTARGET_INSERT_ATTRIBUTES
859 #undef TARGET_INSERT_ATTRIBUTES
860 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
861 #endif
863 #ifdef SUBTARGET_ATTRIBUTE_TABLE
864 #undef TARGET_ATTRIBUTE_TABLE
865 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
866 #endif
868 #undef TARGET_OPTION_OVERRIDE
869 #define TARGET_OPTION_OVERRIDE sparc_option_override
871 #ifdef TARGET_THREAD_SSP_OFFSET
872 #undef TARGET_STACK_PROTECT_GUARD
873 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
874 #endif
876 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
877 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
878 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
879 #endif
881 #undef TARGET_ASM_FILE_END
882 #define TARGET_ASM_FILE_END sparc_file_end
884 #undef TARGET_FRAME_POINTER_REQUIRED
885 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
887 #undef TARGET_CAN_ELIMINATE
888 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
890 #undef TARGET_PREFERRED_RELOAD_CLASS
891 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
893 #undef TARGET_SECONDARY_RELOAD
894 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
895 #undef TARGET_SECONDARY_MEMORY_NEEDED
896 #define TARGET_SECONDARY_MEMORY_NEEDED sparc_secondary_memory_needed
897 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
898 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE sparc_secondary_memory_needed_mode
900 #undef TARGET_CONDITIONAL_REGISTER_USAGE
901 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
903 #undef TARGET_INIT_PIC_REG
904 #define TARGET_INIT_PIC_REG sparc_init_pic_reg
906 #undef TARGET_USE_PSEUDO_PIC_REG
907 #define TARGET_USE_PSEUDO_PIC_REG sparc_use_pseudo_pic_reg
909 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
910 #undef TARGET_MANGLE_TYPE
911 #define TARGET_MANGLE_TYPE sparc_mangle_type
912 #endif
914 #undef TARGET_LRA_P
915 #define TARGET_LRA_P sparc_lra_p
917 #undef TARGET_LEGITIMATE_ADDRESS_P
918 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
920 #undef TARGET_LEGITIMATE_CONSTANT_P
921 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
923 #undef TARGET_TRAMPOLINE_INIT
924 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
926 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
927 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
928 #undef TARGET_PRINT_OPERAND
929 #define TARGET_PRINT_OPERAND sparc_print_operand
930 #undef TARGET_PRINT_OPERAND_ADDRESS
931 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
933 /* The value stored by LDSTUB. */
934 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
935 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
937 #undef TARGET_CSTORE_MODE
938 #define TARGET_CSTORE_MODE sparc_cstore_mode
940 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
941 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
943 #undef TARGET_FIXED_CONDITION_CODE_REGS
944 #define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
946 #undef TARGET_MIN_ARITHMETIC_PRECISION
947 #define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
949 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
950 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
952 #undef TARGET_HARD_REGNO_NREGS
953 #define TARGET_HARD_REGNO_NREGS sparc_hard_regno_nregs
954 #undef TARGET_HARD_REGNO_MODE_OK
955 #define TARGET_HARD_REGNO_MODE_OK sparc_hard_regno_mode_ok
957 #undef TARGET_MODES_TIEABLE_P
958 #define TARGET_MODES_TIEABLE_P sparc_modes_tieable_p
960 #undef TARGET_CAN_CHANGE_MODE_CLASS
961 #define TARGET_CAN_CHANGE_MODE_CLASS sparc_can_change_mode_class
963 #undef TARGET_CONSTANT_ALIGNMENT
964 #define TARGET_CONSTANT_ALIGNMENT sparc_constant_alignment
966 #undef TARGET_VECTORIZE_VEC_PERM_CONST
967 #define TARGET_VECTORIZE_VEC_PERM_CONST sparc_vectorize_vec_perm_const
969 #undef TARGET_CAN_FOLLOW_JUMP
970 #define TARGET_CAN_FOLLOW_JUMP sparc_can_follow_jump
972 #undef TARGET_ZERO_CALL_USED_REGS
973 #define TARGET_ZERO_CALL_USED_REGS sparc_zero_call_used_regs
975 #ifdef SPARC_GCOV_TYPE_SIZE
976 static HOST_WIDE_INT
977 sparc_gcov_type_size (void)
979 return SPARC_GCOV_TYPE_SIZE;
982 #undef TARGET_GCOV_TYPE_SIZE
983 #define TARGET_GCOV_TYPE_SIZE sparc_gcov_type_size
984 #endif
986 struct gcc_target targetm = TARGET_INITIALIZER;
988 /* Return the memory reference contained in X if any, zero otherwise. */
990 static rtx
991 mem_ref (rtx x)
993 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
994 x = XEXP (x, 0);
996 if (MEM_P (x))
997 return x;
999 return NULL_RTX;
1002 /* True if any of INSN's source register(s) is REG. */
1004 static bool
1005 insn_uses_reg_p (rtx_insn *insn, unsigned int reg)
1007 extract_insn (insn);
1008 return ((REG_P (recog_data.operand[1])
1009 && REGNO (recog_data.operand[1]) == reg)
1010 || (recog_data.n_operands == 3
1011 && REG_P (recog_data.operand[2])
1012 && REGNO (recog_data.operand[2]) == reg));
1015 /* True if INSN is a floating-point division or square-root. */
1017 static bool
1018 div_sqrt_insn_p (rtx_insn *insn)
1020 if (GET_CODE (PATTERN (insn)) != SET)
1021 return false;
1023 switch (get_attr_type (insn))
1025 case TYPE_FPDIVS:
1026 case TYPE_FPSQRTS:
1027 case TYPE_FPDIVD:
1028 case TYPE_FPSQRTD:
1029 return true;
1030 default:
1031 return false;
1035 /* True if INSN is a floating-point instruction. */
1037 static bool
1038 fpop_insn_p (rtx_insn *insn)
1040 if (GET_CODE (PATTERN (insn)) != SET)
1041 return false;
1043 switch (get_attr_type (insn))
1045 case TYPE_FPMOVE:
1046 case TYPE_FPCMOVE:
1047 case TYPE_FP:
1048 case TYPE_FPCMP:
1049 case TYPE_FPMUL:
1050 case TYPE_FPDIVS:
1051 case TYPE_FPSQRTS:
1052 case TYPE_FPDIVD:
1053 case TYPE_FPSQRTD:
1054 return true;
1055 default:
1056 return false;
1060 /* True if INSN is an atomic instruction. */
1062 static bool
1063 atomic_insn_for_leon3_p (rtx_insn *insn)
1065 switch (INSN_CODE (insn))
1067 case CODE_FOR_swapsi:
1068 case CODE_FOR_ldstub:
1069 case CODE_FOR_atomic_compare_and_swap_leon3_1:
1070 return true;
1071 default:
1072 return false;
1076 /* True if INSN is a store instruction. */
1078 static bool
1079 store_insn_p (rtx_insn *insn)
1081 if (GET_CODE (PATTERN (insn)) != SET)
1082 return false;
1084 switch (get_attr_type (insn))
1086 case TYPE_STORE:
1087 case TYPE_FPSTORE:
1088 return true;
1089 default:
1090 return false;
1094 /* True if INSN is a load instruction. */
1096 static bool
1097 load_insn_p (rtx_insn *insn)
1099 if (GET_CODE (PATTERN (insn)) != SET)
1100 return false;
1102 switch (get_attr_type (insn))
1104 case TYPE_LOAD:
1105 case TYPE_SLOAD:
1106 case TYPE_FPLOAD:
1107 return true;
1108 default:
1109 return false;
1113 /* We use a machine specific pass to enable workarounds for errata.
1115 We need to have the (essentially) final form of the insn stream in order
1116 to properly detect the various hazards. Therefore, this machine specific
1117 pass runs as late as possible. */
1119 /* True if INSN is a md pattern or asm statement. */
1120 #define USEFUL_INSN_P(INSN) \
1121 (NONDEBUG_INSN_P (INSN) \
1122 && GET_CODE (PATTERN (INSN)) != USE \
1123 && GET_CODE (PATTERN (INSN)) != CLOBBER)
1125 rtx_insn *
1126 next_active_non_empty_insn (rtx_insn *insn)
1128 insn = next_active_insn (insn);
1130 while (insn
1131 && (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
1132 || GET_CODE (PATTERN (insn)) == ASM_INPUT
1133 || (USEFUL_INSN_P (insn)
1134 && (asm_noperands (PATTERN (insn)) >= 0)
1135 && !strcmp (decode_asm_operands (PATTERN (insn),
1136 NULL, NULL, NULL,
1137 NULL, NULL), ""))))
1138 insn = next_active_insn (insn);
1140 return insn;
1143 static unsigned int
1144 sparc_do_work_around_errata (void)
1146 rtx_insn *insn, *next;
1147 bool find_first_useful = true;
1149 /* Force all instructions to be split into their final form. */
1150 split_all_insns_noflow ();
1152 /* Now look for specific patterns in the insn stream. */
1153 for (insn = get_insns (); insn; insn = next)
1155 bool insert_nop = false;
1156 rtx set;
1157 rtx_insn *jump;
1158 rtx_sequence *seq;
1160 /* Look into the instruction in a delay slot. */
1161 if (NONJUMP_INSN_P (insn)
1162 && (seq = dyn_cast <rtx_sequence *> (PATTERN (insn))))
1164 jump = seq->insn (0);
1165 insn = seq->insn (1);
1167 else if (JUMP_P (insn))
1168 jump = insn;
1169 else
1170 jump = NULL;
1172 /* Do not begin function with atomic instruction. */
1173 if (sparc_fix_ut700
1174 && find_first_useful
1175 && USEFUL_INSN_P (insn))
1177 find_first_useful = false;
1178 if (atomic_insn_for_leon3_p (insn))
1179 emit_insn_before (gen_nop (), insn);
1182 /* Place a NOP at the branch target of an integer branch if it is a
1183 floating-point operation or a floating-point branch. */
1184 if (sparc_fix_gr712rc
1185 && jump
1186 && jump_to_label_p (jump)
1187 && get_attr_branch_type (jump) == BRANCH_TYPE_ICC)
1189 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1190 if (target
1191 && (fpop_insn_p (target)
1192 || (JUMP_P (target)
1193 && get_attr_branch_type (target) == BRANCH_TYPE_FCC)))
1194 emit_insn_before (gen_nop (), target);
1197 /* Insert a NOP between load instruction and atomic instruction. Insert
1198 a NOP at branch target if there is a load in delay slot and an atomic
1199 instruction at branch target. */
1200 if (sparc_fix_ut700
1201 && NONJUMP_INSN_P (insn)
1202 && load_insn_p (insn))
1204 if (jump && jump_to_label_p (jump))
1206 rtx_insn *target = next_active_insn (JUMP_LABEL_AS_INSN (jump));
1207 if (target && atomic_insn_for_leon3_p (target))
1208 emit_insn_before (gen_nop (), target);
1211 next = next_active_non_empty_insn (insn);
1212 if (!next)
1213 break;
1215 if (atomic_insn_for_leon3_p (next))
1216 insert_nop = true;
1219 /* Look for a sequence that starts with a fdiv or fsqrt instruction and
1220 ends with another fdiv or fsqrt instruction with no dependencies on
1221 the former, along with an appropriate pattern in between. */
1222 if (sparc_fix_lost_divsqrt
1223 && NONJUMP_INSN_P (insn)
1224 && div_sqrt_insn_p (insn))
1226 int i;
1227 int fp_found = 0;
1228 rtx_insn *after;
1230 const unsigned int dest_reg = REGNO (SET_DEST (single_set (insn)));
1232 next = next_active_insn (insn);
1233 if (!next)
1234 break;
1236 for (after = next, i = 0; i < 4; i++)
1238 /* Count floating-point operations. */
1239 if (i != 3 && fpop_insn_p (after))
1241 /* If the insn uses the destination register of
1242 the div/sqrt, then it cannot be problematic. */
1243 if (insn_uses_reg_p (after, dest_reg))
1244 break;
1245 fp_found++;
1248 /* Count floating-point loads. */
1249 if (i != 3
1250 && (set = single_set (after)) != NULL_RTX
1251 && REG_P (SET_DEST (set))
1252 && REGNO (SET_DEST (set)) > 31)
1254 /* If the insn uses the destination register of
1255 the div/sqrt, then it cannot be problematic. */
1256 if (REGNO (SET_DEST (set)) == dest_reg)
1257 break;
1258 fp_found++;
1261 /* Check if this is a problematic sequence. */
1262 if (i > 1
1263 && fp_found >= 2
1264 && div_sqrt_insn_p (after))
1266 /* If this is the short version of the problematic
1267 sequence we add two NOPs in a row to also prevent
1268 the long version. */
1269 if (i == 2)
1270 emit_insn_before (gen_nop (), next);
1271 insert_nop = true;
1272 break;
1275 /* No need to scan past a second div/sqrt. */
1276 if (div_sqrt_insn_p (after))
1277 break;
1279 /* Insert NOP before branch. */
1280 if (i < 3
1281 && (!NONJUMP_INSN_P (after)
1282 || GET_CODE (PATTERN (after)) == SEQUENCE))
1284 insert_nop = true;
1285 break;
1288 after = next_active_insn (after);
1289 if (!after)
1290 break;
1294 /* Look for either of these two sequences:
1296 Sequence A:
1297 1. store of word size or less (e.g. st / stb / sth / stf)
1298 2. any single instruction that is not a load or store
1299 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
1301 Sequence B:
1302 1. store of double word size (e.g. std / stdf)
1303 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
1304 if (sparc_fix_b2bst
1305 && NONJUMP_INSN_P (insn)
1306 && (set = single_set (insn)) != NULL_RTX
1307 && store_insn_p (insn))
1309 /* Sequence B begins with a double-word store. */
1310 bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
1311 rtx_insn *after;
1312 int i;
1314 next = next_active_non_empty_insn (insn);
1315 if (!next)
1316 break;
1318 for (after = next, i = 0; i < 2; i++)
1320 /* If the insn is a branch, then it cannot be problematic. */
1321 if (!NONJUMP_INSN_P (after)
1322 || GET_CODE (PATTERN (after)) == SEQUENCE)
1323 break;
1325 /* Sequence B is only two instructions long. */
1326 if (seq_b)
1328 /* Add NOP if followed by a store. */
1329 if (store_insn_p (after))
1330 insert_nop = true;
1332 /* Otherwise it is ok. */
1333 break;
1336 /* If the second instruction is a load or a store,
1337 then the sequence cannot be problematic. */
1338 if (i == 0)
1340 if ((set = single_set (after)) != NULL_RTX
1341 && (MEM_P (SET_DEST (set)) || mem_ref (SET_SRC (set))))
1342 break;
1344 after = next_active_non_empty_insn (after);
1345 if (!after)
1346 break;
1349 /* Add NOP if third instruction is a store. */
1350 if (i == 1
1351 && store_insn_p (after))
1352 insert_nop = true;
1356 /* Look for a single-word load into an odd-numbered FP register. */
1357 else if (sparc_fix_at697f
1358 && NONJUMP_INSN_P (insn)
1359 && (set = single_set (insn)) != NULL_RTX
1360 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1361 && mem_ref (SET_SRC (set))
1362 && REG_P (SET_DEST (set))
1363 && REGNO (SET_DEST (set)) > 31
1364 && REGNO (SET_DEST (set)) % 2 != 0)
1366 /* The wrong dependency is on the enclosing double register. */
1367 const unsigned int x = REGNO (SET_DEST (set)) - 1;
1368 unsigned int src1, src2, dest;
1369 int code;
1371 next = next_active_insn (insn);
1372 if (!next)
1373 break;
1374 /* If the insn is a branch, then it cannot be problematic. */
1375 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1376 continue;
1378 extract_insn (next);
1379 code = INSN_CODE (next);
1381 switch (code)
1383 case CODE_FOR_adddf3:
1384 case CODE_FOR_subdf3:
1385 case CODE_FOR_muldf3:
1386 case CODE_FOR_divdf3:
1387 dest = REGNO (recog_data.operand[0]);
1388 src1 = REGNO (recog_data.operand[1]);
1389 src2 = REGNO (recog_data.operand[2]);
1390 if (src1 != src2)
1392 /* Case [1-4]:
1393 ld [address], %fx+1
1394 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
1395 if ((src1 == x || src2 == x)
1396 && (dest == src1 || dest == src2))
1397 insert_nop = true;
1399 else
1401 /* Case 5:
1402 ld [address], %fx+1
1403 FPOPd %fx, %fx, %fx */
1404 if (src1 == x
1405 && dest == src1
1406 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
1407 insert_nop = true;
1409 break;
1411 case CODE_FOR_sqrtdf2:
1412 dest = REGNO (recog_data.operand[0]);
1413 src1 = REGNO (recog_data.operand[1]);
1414 /* Case 6:
1415 ld [address], %fx+1
1416 fsqrtd %fx, %fx */
1417 if (src1 == x && dest == src1)
1418 insert_nop = true;
1419 break;
1421 default:
1422 break;
1426 /* Look for a single-word load into an integer register. */
1427 else if (sparc_fix_ut699
1428 && NONJUMP_INSN_P (insn)
1429 && (set = single_set (insn)) != NULL_RTX
1430 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
1431 && (mem_ref (SET_SRC (set)) != NULL_RTX
1432 || INSN_CODE (insn) == CODE_FOR_movsi_pic_gotdata_op)
1433 && REG_P (SET_DEST (set))
1434 && REGNO (SET_DEST (set)) < 32)
1436 /* There is no problem if the second memory access has a data
1437 dependency on the first single-cycle load. */
1438 rtx x = SET_DEST (set);
1440 next = next_active_insn (insn);
1441 if (!next)
1442 break;
1443 /* If the insn is a branch, then it cannot be problematic. */
1444 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1445 continue;
1447 /* Look for a second memory access to/from an integer register. */
1448 if ((set = single_set (next)) != NULL_RTX)
1450 rtx src = SET_SRC (set);
1451 rtx dest = SET_DEST (set);
1452 rtx mem;
1454 /* LDD is affected. */
1455 if ((mem = mem_ref (src)) != NULL_RTX
1456 && REG_P (dest)
1457 && REGNO (dest) < 32
1458 && !reg_mentioned_p (x, XEXP (mem, 0)))
1459 insert_nop = true;
1461 /* STD is *not* affected. */
1462 else if (MEM_P (dest)
1463 && GET_MODE_SIZE (GET_MODE (dest)) <= 4
1464 && (src == CONST0_RTX (GET_MODE (dest))
1465 || (REG_P (src)
1466 && REGNO (src) < 32
1467 && REGNO (src) != REGNO (x)))
1468 && !reg_mentioned_p (x, XEXP (dest, 0)))
1469 insert_nop = true;
1471 /* GOT accesses uses LD. */
1472 else if (INSN_CODE (next) == CODE_FOR_movsi_pic_gotdata_op
1473 && !reg_mentioned_p (x, XEXP (XEXP (src, 0), 1)))
1474 insert_nop = true;
1478 /* Look for a single-word load/operation into an FP register. */
1479 else if (sparc_fix_ut699
1480 && NONJUMP_INSN_P (insn)
1481 && (set = single_set (insn)) != NULL_RTX
1482 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1483 && REG_P (SET_DEST (set))
1484 && REGNO (SET_DEST (set)) > 31)
1486 /* Number of instructions in the problematic window. */
1487 const int n_insns = 4;
1488 /* The problematic combination is with the sibling FP register. */
1489 const unsigned int x = REGNO (SET_DEST (set));
1490 const unsigned int y = x ^ 1;
1491 rtx_insn *after;
1492 int i;
1494 next = next_active_insn (insn);
1495 if (!next)
1496 break;
1497 /* If the insn is a branch, then it cannot be problematic. */
1498 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1499 continue;
1501 /* Look for a second load/operation into the sibling FP register. */
1502 if (!((set = single_set (next)) != NULL_RTX
1503 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1504 && REG_P (SET_DEST (set))
1505 && REGNO (SET_DEST (set)) == y))
1506 continue;
1508 /* Look for a (possible) store from the FP register in the next N
1509 instructions, but bail out if it is again modified or if there
1510 is a store from the sibling FP register before this store. */
1511 for (after = next, i = 0; i < n_insns; i++)
1513 bool branch_p;
1515 after = next_active_insn (after);
1516 if (!after)
1517 break;
1519 /* This is a branch with an empty delay slot. */
1520 if (!NONJUMP_INSN_P (after))
1522 if (++i == n_insns)
1523 break;
1524 branch_p = true;
1525 after = NULL;
1527 /* This is a branch with a filled delay slot. */
1528 else if (rtx_sequence *seq =
1529 dyn_cast <rtx_sequence *> (PATTERN (after)))
1531 if (++i == n_insns)
1532 break;
1533 branch_p = true;
1534 after = seq->insn (1);
1536 /* This is a regular instruction. */
1537 else
1538 branch_p = false;
1540 if (after && (set = single_set (after)) != NULL_RTX)
1542 const rtx src = SET_SRC (set);
1543 const rtx dest = SET_DEST (set);
1544 const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
1546 /* If the FP register is again modified before the store,
1547 then the store isn't affected. */
1548 if (REG_P (dest)
1549 && (REGNO (dest) == x
1550 || (REGNO (dest) == y && size == 8)))
1551 break;
1553 if (MEM_P (dest) && REG_P (src))
1555 /* If there is a store from the sibling FP register
1556 before the store, then the store is not affected. */
1557 if (REGNO (src) == y || (REGNO (src) == x && size == 8))
1558 break;
1560 /* Otherwise, the store is affected. */
1561 if (REGNO (src) == x && size == 4)
1563 insert_nop = true;
1564 break;
1569 /* If we have a branch in the first M instructions, then we
1570 cannot see the (M+2)th instruction so we play safe. */
1571 if (branch_p && i <= (n_insns - 2))
1573 insert_nop = true;
1574 break;
1579 else
1580 next = NEXT_INSN (insn);
1582 if (insert_nop)
1583 emit_insn_before (gen_nop (), next);
1586 return 0;
1589 namespace {
1591 const pass_data pass_data_work_around_errata =
1593 RTL_PASS, /* type */
1594 "errata", /* name */
1595 OPTGROUP_NONE, /* optinfo_flags */
1596 TV_MACH_DEP, /* tv_id */
1597 0, /* properties_required */
1598 0, /* properties_provided */
1599 0, /* properties_destroyed */
1600 0, /* todo_flags_start */
1601 0, /* todo_flags_finish */
1604 class pass_work_around_errata : public rtl_opt_pass
1606 public:
1607 pass_work_around_errata(gcc::context *ctxt)
1608 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1611 /* opt_pass methods: */
1612 virtual bool gate (function *)
1614 return sparc_fix_at697f
1615 || sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc
1616 || sparc_fix_b2bst || sparc_fix_lost_divsqrt;
1619 virtual unsigned int execute (function *)
1621 return sparc_do_work_around_errata ();
1624 }; // class pass_work_around_errata
1626 } // anon namespace
1628 rtl_opt_pass *
1629 make_pass_work_around_errata (gcc::context *ctxt)
1631 return new pass_work_around_errata (ctxt);
1634 /* Helpers for TARGET_DEBUG_OPTIONS. */
1635 static void
1636 dump_target_flag_bits (const int flags)
1638 if (flags & MASK_64BIT)
1639 fprintf (stderr, "64BIT ");
1640 if (flags & MASK_APP_REGS)
1641 fprintf (stderr, "APP_REGS ");
1642 if (flags & MASK_FASTER_STRUCTS)
1643 fprintf (stderr, "FASTER_STRUCTS ");
1644 if (flags & MASK_FLAT)
1645 fprintf (stderr, "FLAT ");
1646 if (flags & MASK_FMAF)
1647 fprintf (stderr, "FMAF ");
1648 if (flags & MASK_FSMULD)
1649 fprintf (stderr, "FSMULD ");
1650 if (flags & MASK_FPU)
1651 fprintf (stderr, "FPU ");
1652 if (flags & MASK_HARD_QUAD)
1653 fprintf (stderr, "HARD_QUAD ");
1654 if (flags & MASK_POPC)
1655 fprintf (stderr, "POPC ");
1656 if (flags & MASK_PTR64)
1657 fprintf (stderr, "PTR64 ");
1658 if (flags & MASK_STACK_BIAS)
1659 fprintf (stderr, "STACK_BIAS ");
1660 if (flags & MASK_UNALIGNED_DOUBLES)
1661 fprintf (stderr, "UNALIGNED_DOUBLES ");
1662 if (flags & MASK_V8PLUS)
1663 fprintf (stderr, "V8PLUS ");
1664 if (flags & MASK_VIS)
1665 fprintf (stderr, "VIS ");
1666 if (flags & MASK_VIS2)
1667 fprintf (stderr, "VIS2 ");
1668 if (flags & MASK_VIS3)
1669 fprintf (stderr, "VIS3 ");
1670 if (flags & MASK_VIS4)
1671 fprintf (stderr, "VIS4 ");
1672 if (flags & MASK_VIS4B)
1673 fprintf (stderr, "VIS4B ");
1674 if (flags & MASK_CBCOND)
1675 fprintf (stderr, "CBCOND ");
1676 if (flags & MASK_DEPRECATED_V8_INSNS)
1677 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1678 if (flags & MASK_LEON)
1679 fprintf (stderr, "LEON ");
1680 if (flags & MASK_LEON3)
1681 fprintf (stderr, "LEON3 ");
1682 if (flags & MASK_SPARCLET)
1683 fprintf (stderr, "SPARCLET ");
1684 if (flags & MASK_SPARCLITE)
1685 fprintf (stderr, "SPARCLITE ");
1686 if (flags & MASK_V8)
1687 fprintf (stderr, "V8 ");
1688 if (flags & MASK_V9)
1689 fprintf (stderr, "V9 ");
1692 static void
1693 dump_target_flags (const char *prefix, const int flags)
1695 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1696 dump_target_flag_bits (flags);
1697 fprintf(stderr, "]\n");
1700 /* Validate and override various options, and do some machine dependent
1701 initialization. */
1703 static void
1704 sparc_option_override (void)
1706 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1707 static struct cpu_default {
1708 const int cpu;
1709 const enum sparc_processor_type processor;
1710 } const cpu_default[] = {
1711 /* There must be one entry here for each TARGET_CPU value. */
1712 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1713 { TARGET_CPU_v8, PROCESSOR_V8 },
1714 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1715 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1716 { TARGET_CPU_leon, PROCESSOR_LEON },
1717 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1718 { TARGET_CPU_leon5, PROCESSOR_LEON5 },
1719 { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
1720 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1721 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1722 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1723 { TARGET_CPU_v9, PROCESSOR_V9 },
1724 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1725 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1726 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1727 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1728 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1729 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1730 { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
1731 { TARGET_CPU_m8, PROCESSOR_M8 },
1732 { -1, PROCESSOR_V7 }
1734 const struct cpu_default *def;
1735 /* Table of values for -m{cpu,tune}=. This must match the order of
1736 the enum processor_type in sparc-opts.h. */
1737 static struct cpu_table {
1738 const char *const name;
1739 const int disable;
1740 const int enable;
1741 } const cpu_table[] = {
1742 { "v7", MASK_ISA, 0 },
1743 { "cypress", MASK_ISA, 0 },
1744 { "v8", MASK_ISA, MASK_V8 },
1745 /* TI TMS390Z55 supersparc */
1746 { "supersparc", MASK_ISA, MASK_V8 },
1747 { "hypersparc", MASK_ISA, MASK_V8 },
1748 { "leon", MASK_ISA|MASK_FSMULD, MASK_V8|MASK_LEON },
1749 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3 },
1750 { "leon5", MASK_ISA, MASK_V8|MASK_LEON3 },
1751 { "leon3v7", MASK_ISA, MASK_LEON3 },
1752 { "sparclite", MASK_ISA, MASK_SPARCLITE },
1753 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1754 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1755 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1756 { "f934", MASK_ISA, MASK_SPARCLITE },
1757 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1758 { "sparclet", MASK_ISA, MASK_SPARCLET },
1759 /* TEMIC sparclet */
1760 { "tsc701", MASK_ISA, MASK_SPARCLET },
1761 { "v9", MASK_ISA, MASK_V9 },
1762 /* UltraSPARC I, II, IIi */
1763 { "ultrasparc", MASK_ISA,
1764 /* Although insns using %y are deprecated, it is a clear win. */
1765 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1766 /* UltraSPARC III */
1767 /* ??? Check if %y issue still holds true. */
1768 { "ultrasparc3", MASK_ISA,
1769 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1770 /* UltraSPARC T1 */
1771 { "niagara", MASK_ISA,
1772 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1773 /* UltraSPARC T2 */
1774 { "niagara2", MASK_ISA,
1775 MASK_V9|MASK_POPC|MASK_VIS2 },
1776 /* UltraSPARC T3 */
1777 { "niagara3", MASK_ISA,
1778 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
1779 /* UltraSPARC T4 */
1780 { "niagara4", MASK_ISA,
1781 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1782 /* UltraSPARC M7 */
1783 { "niagara7", MASK_ISA,
1784 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
1785 /* UltraSPARC M8 */
1786 { "m8", MASK_ISA,
1787 MASK_V9|MASK_POPC|MASK_VIS4B|MASK_FMAF|MASK_CBCOND|MASK_SUBXC }
1789 const struct cpu_table *cpu;
1790 unsigned int i;
1792 if (sparc_debug_string != NULL)
1794 const char *q;
1795 char *p;
1797 p = ASTRDUP (sparc_debug_string);
1798 while ((q = strtok (p, ",")) != NULL)
1800 bool invert;
1801 int mask;
1803 p = NULL;
1804 if (*q == '!')
1806 invert = true;
1807 q++;
1809 else
1810 invert = false;
1812 if (! strcmp (q, "all"))
1813 mask = MASK_DEBUG_ALL;
1814 else if (! strcmp (q, "options"))
1815 mask = MASK_DEBUG_OPTIONS;
1816 else
1817 error ("unknown %<-mdebug-%s%> switch", q);
1819 if (invert)
1820 sparc_debug &= ~mask;
1821 else
1822 sparc_debug |= mask;
1826 /* Enable the FsMULd instruction by default if not explicitly specified by
1827 the user. It may be later disabled by the CPU (explicitly or not). */
1828 if (TARGET_FPU && !(target_flags_explicit & MASK_FSMULD))
1829 target_flags |= MASK_FSMULD;
1831 if (TARGET_DEBUG_OPTIONS)
1833 dump_target_flags("Initial target_flags", target_flags);
1834 dump_target_flags("target_flags_explicit", target_flags_explicit);
1837 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1838 SUBTARGET_OVERRIDE_OPTIONS;
1839 #endif
1841 #ifndef SPARC_BI_ARCH
1842 /* Check for unsupported architecture size. */
1843 if (!TARGET_64BIT != DEFAULT_ARCH32_P)
1844 error ("%s is not supported by this configuration",
1845 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1846 #endif
1848 /* We force all 64bit archs to use 128 bit long double */
1849 if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
1851 error ("%<-mlong-double-64%> not allowed with %<-m64%>");
1852 target_flags |= MASK_LONG_DOUBLE_128;
1855 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1856 for (i = 8; i < 16; i++)
1857 if (!call_used_regs [i])
1859 error ("%<-fcall-saved-REG%> is not supported for out registers");
1860 call_used_regs [i] = 1;
1863 /* Set the default CPU if no -mcpu option was specified. */
1864 if (!OPTION_SET_P (sparc_cpu_and_features))
1866 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1867 if (def->cpu == TARGET_CPU_DEFAULT)
1868 break;
1869 gcc_assert (def->cpu != -1);
1870 sparc_cpu_and_features = def->processor;
1873 /* Set the default CPU if no -mtune option was specified. */
1874 if (!OPTION_SET_P (sparc_cpu))
1875 sparc_cpu = sparc_cpu_and_features;
1877 cpu = &cpu_table[(int) sparc_cpu_and_features];
1879 if (TARGET_DEBUG_OPTIONS)
1881 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1882 dump_target_flags ("cpu->disable", cpu->disable);
1883 dump_target_flags ("cpu->enable", cpu->enable);
1886 target_flags &= ~cpu->disable;
1887 target_flags |= (cpu->enable
1888 #ifndef HAVE_AS_FMAF_HPC_VIS3
1889 & ~(MASK_FMAF | MASK_VIS3)
1890 #endif
1891 #ifndef HAVE_AS_SPARC4
1892 & ~MASK_CBCOND
1893 #endif
1894 #ifndef HAVE_AS_SPARC5_VIS4
1895 & ~(MASK_VIS4 | MASK_SUBXC)
1896 #endif
1897 #ifndef HAVE_AS_SPARC6
1898 & ~(MASK_VIS4B)
1899 #endif
1900 #ifndef HAVE_AS_LEON
1901 & ~(MASK_LEON | MASK_LEON3)
1902 #endif
1903 & ~(target_flags_explicit & MASK_FEATURES)
1906 /* FsMULd is a V8 instruction. */
1907 if (!TARGET_V8 && !TARGET_V9)
1908 target_flags &= ~MASK_FSMULD;
1910 /* -mvis2 implies -mvis. */
1911 if (TARGET_VIS2)
1912 target_flags |= MASK_VIS;
1914 /* -mvis3 implies -mvis2 and -mvis. */
1915 if (TARGET_VIS3)
1916 target_flags |= MASK_VIS2 | MASK_VIS;
1918 /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
1919 if (TARGET_VIS4)
1920 target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1922 /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
1923 if (TARGET_VIS4B)
1924 target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1926 /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b, -mfmaf and -mfsmuld if
1927 FPU is disabled. */
1928 if (!TARGET_FPU)
1929 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
1930 | MASK_VIS4B | MASK_FMAF | MASK_FSMULD);
1932 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1933 are available; -m64 also implies v9. */
1934 if (TARGET_VIS || TARGET_ARCH64)
1936 target_flags |= MASK_V9;
1937 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1940 /* -mvis also implies -mv8plus on 32-bit. */
1941 if (TARGET_VIS && !TARGET_ARCH64)
1942 target_flags |= MASK_V8PLUS;
1944 /* Use the deprecated v8 insns for sparc64 in 32-bit mode. */
1945 if (TARGET_V9 && TARGET_ARCH32)
1946 target_flags |= MASK_DEPRECATED_V8_INSNS;
1948 /* V8PLUS requires V9 and makes no sense in 64-bit mode. */
1949 if (!TARGET_V9 || TARGET_ARCH64)
1950 target_flags &= ~MASK_V8PLUS;
1952 /* Don't use stack biasing in 32-bit mode. */
1953 if (TARGET_ARCH32)
1954 target_flags &= ~MASK_STACK_BIAS;
1956 /* Use LRA instead of reload, unless otherwise instructed. */
1957 if (!(target_flags_explicit & MASK_LRA))
1958 target_flags |= MASK_LRA;
1960 /* Enable applicable errata workarounds for LEON3FT. */
1961 if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
1963 sparc_fix_b2bst = 1;
1964 sparc_fix_lost_divsqrt = 1;
1967 /* Disable FsMULd for the UT699 since it doesn't work correctly. */
1968 if (sparc_fix_ut699)
1969 target_flags &= ~MASK_FSMULD;
1971 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1972 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1973 target_flags |= MASK_LONG_DOUBLE_128;
1974 #endif
1976 if (TARGET_DEBUG_OPTIONS)
1977 dump_target_flags ("Final target_flags", target_flags);
1979 /* Set the code model if no -mcmodel option was specified. */
1980 if (OPTION_SET_P (sparc_code_model))
1982 if (TARGET_ARCH32)
1983 error ("%<-mcmodel=%> is not supported in 32-bit mode");
1985 else
1987 if (TARGET_ARCH32)
1988 sparc_code_model = CM_32;
1989 else
1990 sparc_code_model = SPARC_DEFAULT_CMODEL;
1993 /* Set the memory model if no -mmemory-model option was specified. */
1994 if (!OPTION_SET_P (sparc_memory_model))
1996 /* Choose the memory model for the operating system. */
1997 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1998 if (os_default != SMM_DEFAULT)
1999 sparc_memory_model = os_default;
2000 /* Choose the most relaxed model for the processor. */
2001 else if (TARGET_V9)
2002 sparc_memory_model = SMM_RMO;
2003 else if (TARGET_LEON3)
2004 sparc_memory_model = SMM_TSO;
2005 else if (TARGET_LEON)
2006 sparc_memory_model = SMM_SC;
2007 else if (TARGET_V8)
2008 sparc_memory_model = SMM_PSO;
2009 else
2010 sparc_memory_model = SMM_SC;
2013 /* Supply a default value for align_functions. */
2014 if (flag_align_functions && !str_align_functions)
2016 if (sparc_cpu == PROCESSOR_ULTRASPARC
2017 || sparc_cpu == PROCESSOR_ULTRASPARC3
2018 || sparc_cpu == PROCESSOR_NIAGARA
2019 || sparc_cpu == PROCESSOR_NIAGARA2
2020 || sparc_cpu == PROCESSOR_NIAGARA3
2021 || sparc_cpu == PROCESSOR_NIAGARA4)
2022 str_align_functions = "32";
2023 else if (sparc_cpu == PROCESSOR_NIAGARA7
2024 || sparc_cpu == PROCESSOR_M8)
2025 str_align_functions = "64";
2028 /* Validate PCC_STRUCT_RETURN. */
2029 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
2030 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
2032 /* Only use .uaxword when compiling for a 64-bit target. */
2033 if (!TARGET_ARCH64)
2034 targetm.asm_out.unaligned_op.di = NULL;
2036 /* Set the processor costs. */
2037 switch (sparc_cpu)
2039 case PROCESSOR_V7:
2040 case PROCESSOR_CYPRESS:
2041 sparc_costs = &cypress_costs;
2042 break;
2043 case PROCESSOR_V8:
2044 case PROCESSOR_SPARCLITE:
2045 case PROCESSOR_SUPERSPARC:
2046 sparc_costs = &supersparc_costs;
2047 break;
2048 case PROCESSOR_F930:
2049 case PROCESSOR_F934:
2050 case PROCESSOR_HYPERSPARC:
2051 case PROCESSOR_SPARCLITE86X:
2052 sparc_costs = &hypersparc_costs;
2053 break;
2054 case PROCESSOR_LEON:
2055 sparc_costs = &leon_costs;
2056 break;
2057 case PROCESSOR_LEON3:
2058 case PROCESSOR_LEON3V7:
2059 sparc_costs = &leon3_costs;
2060 break;
2061 case PROCESSOR_LEON5:
2062 sparc_costs = &leon5_costs;
2063 break;
2064 case PROCESSOR_SPARCLET:
2065 case PROCESSOR_TSC701:
2066 sparc_costs = &sparclet_costs;
2067 break;
2068 case PROCESSOR_V9:
2069 case PROCESSOR_ULTRASPARC:
2070 sparc_costs = &ultrasparc_costs;
2071 break;
2072 case PROCESSOR_ULTRASPARC3:
2073 sparc_costs = &ultrasparc3_costs;
2074 break;
2075 case PROCESSOR_NIAGARA:
2076 sparc_costs = &niagara_costs;
2077 break;
2078 case PROCESSOR_NIAGARA2:
2079 sparc_costs = &niagara2_costs;
2080 break;
2081 case PROCESSOR_NIAGARA3:
2082 sparc_costs = &niagara3_costs;
2083 break;
2084 case PROCESSOR_NIAGARA4:
2085 sparc_costs = &niagara4_costs;
2086 break;
2087 case PROCESSOR_NIAGARA7:
2088 sparc_costs = &niagara7_costs;
2089 break;
2090 case PROCESSOR_M8:
2091 sparc_costs = &m8_costs;
2092 break;
2093 case PROCESSOR_NATIVE:
2094 gcc_unreachable ();
2097 /* param_simultaneous_prefetches is the number of prefetches that
2098 can run at the same time. More important, it is the threshold
2099 defining when additional prefetches will be dropped by the
2100 hardware.
2102 The UltraSPARC-III features a documented prefetch queue with a
2103 size of 8. Additional prefetches issued in the cpu are
2104 dropped.
2106 Niagara processors are different. In these processors prefetches
2107 are handled much like regular loads. The L1 miss buffer is 32
2108 entries, but prefetches start getting affected when 30 entries
2109 become occupied. That occupation could be a mix of regular loads
2110 and prefetches though. And that buffer is shared by all threads.
2111 Once the threshold is reached, if the core is running a single
2112 thread the prefetch will retry. If more than one thread is
2113 running, the prefetch will be dropped.
2115 All this makes it very difficult to determine how many
2116 simultaneous prefetches can be issued simultaneously, even in a
2117 single-threaded program. Experimental results show that setting
2118 this parameter to 32 works well when the number of threads is not
2119 high. */
2120 SET_OPTION_IF_UNSET (&global_options, &global_options_set,
2121 param_simultaneous_prefetches,
2122 ((sparc_cpu == PROCESSOR_ULTRASPARC
2123 || sparc_cpu == PROCESSOR_NIAGARA
2124 || sparc_cpu == PROCESSOR_NIAGARA2
2125 || sparc_cpu == PROCESSOR_NIAGARA3
2126 || sparc_cpu == PROCESSOR_NIAGARA4)
2128 : (sparc_cpu == PROCESSOR_ULTRASPARC3
2129 ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
2130 || sparc_cpu == PROCESSOR_M8)
2131 ? 32 : 3))));
2133 /* param_l1_cache_line_size is the size of the L1 cache line, in
2134 bytes.
2136 The Oracle SPARC Architecture (previously the UltraSPARC
2137 Architecture) specification states that when a PREFETCH[A]
2138 instruction is executed an implementation-specific amount of data
2139 is prefetched, and that it is at least 64 bytes long (aligned to
2140 at least 64 bytes).
2142 However, this is not correct. The M7 (and implementations prior
2143 to that) does not guarantee a 64B prefetch into a cache if the
2144 line size is smaller. A single cache line is all that is ever
2145 prefetched. So for the M7, where the L1D$ has 32B lines and the
2146 L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
2147 L2 and L3, but only 32B are brought into the L1D$. (Assuming it
2148 is a read_n prefetch, which is the only type which allocates to
2149 the L1.) */
2150 SET_OPTION_IF_UNSET (&global_options, &global_options_set,
2151 param_l1_cache_line_size,
2152 (sparc_cpu == PROCESSOR_M8 ? 64 : 32));
2154 /* param_l1_cache_size is the size of the L1D$ (most SPARC chips use
2155 Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
2156 Niagara processors feature a L1D$ of 16KB. */
2157 SET_OPTION_IF_UNSET (&global_options, &global_options_set,
2158 param_l1_cache_size,
2159 ((sparc_cpu == PROCESSOR_ULTRASPARC
2160 || sparc_cpu == PROCESSOR_ULTRASPARC3
2161 || sparc_cpu == PROCESSOR_NIAGARA
2162 || sparc_cpu == PROCESSOR_NIAGARA2
2163 || sparc_cpu == PROCESSOR_NIAGARA3
2164 || sparc_cpu == PROCESSOR_NIAGARA4
2165 || sparc_cpu == PROCESSOR_NIAGARA7
2166 || sparc_cpu == PROCESSOR_M8)
2167 ? 16 : 64));
2169 /* param_l2_cache_size is the size fo the L2 in kilobytes. Note
2170 that 512 is the default in params.def. */
2171 SET_OPTION_IF_UNSET (&global_options, &global_options_set,
2172 param_l2_cache_size,
2173 ((sparc_cpu == PROCESSOR_NIAGARA4
2174 || sparc_cpu == PROCESSOR_M8)
2175 ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
2176 ? 256 : 512)));
2179 /* Disable save slot sharing for call-clobbered registers by default.
2180 The IRA sharing algorithm works on single registers only and this
2181 pessimizes for double floating-point registers. */
2182 if (!OPTION_SET_P (flag_ira_share_save_slots))
2183 flag_ira_share_save_slots = 0;
2185 /* Only enable REE by default in 64-bit mode where it helps to eliminate
2186 redundant 32-to-64-bit extensions. */
2187 if (!OPTION_SET_P (flag_ree) && TARGET_ARCH32)
2188 flag_ree = 0;
2190 /* Do various machine dependent initializations. */
2191 sparc_init_modes ();
2193 /* Set up function hooks. */
2194 init_machine_status = sparc_init_machine_status;
2197 /* Miscellaneous utilities. */
2199 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
2200 or branch on register contents instructions. */
2203 v9_regcmp_p (enum rtx_code code)
2205 return (code == EQ || code == NE || code == GE || code == LT
2206 || code == LE || code == GT);
2209 /* Nonzero if OP is a floating point constant which can
2210 be loaded into an integer register using a single
2211 sethi instruction. */
2214 fp_sethi_p (rtx op)
2216 if (GET_CODE (op) == CONST_DOUBLE)
2218 long i;
2220 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2221 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
2224 return 0;
2227 /* Nonzero if OP is a floating point constant which can
2228 be loaded into an integer register using a single
2229 mov instruction. */
2232 fp_mov_p (rtx op)
2234 if (GET_CODE (op) == CONST_DOUBLE)
2236 long i;
2238 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2239 return SPARC_SIMM13_P (i);
2242 return 0;
2245 /* Nonzero if OP is a floating point constant which can
2246 be loaded into an integer register using a high/losum
2247 instruction sequence. */
2250 fp_high_losum_p (rtx op)
2252 /* The constraints calling this should only be in
2253 SFmode move insns, so any constant which cannot
2254 be moved using a single insn will do. */
2255 if (GET_CODE (op) == CONST_DOUBLE)
2257 long i;
2259 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
2260 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
2263 return 0;
2266 /* Return true if the address of LABEL can be loaded by means of the
2267 mov{si,di}_pic_label_ref patterns in PIC mode. */
2269 static bool
2270 can_use_mov_pic_label_ref (rtx label)
2272 /* VxWorks does not impose a fixed gap between segments; the run-time
2273 gap can be different from the object-file gap. We therefore can't
2274 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
2275 are absolutely sure that X is in the same segment as the GOT.
2276 Unfortunately, the flexibility of linker scripts means that we
2277 can't be sure of that in general, so assume that GOT-relative
2278 accesses are never valid on VxWorks. */
2279 if (TARGET_VXWORKS_RTP)
2280 return false;
2282 /* Similarly, if the label is non-local, it might end up being placed
2283 in a different section than the current one; now mov_pic_label_ref
2284 requires the label and the code to be in the same section. */
2285 if (LABEL_REF_NONLOCAL_P (label))
2286 return false;
2288 /* Finally, if we are reordering basic blocks and partition into hot
2289 and cold sections, this might happen for any label. */
2290 if (flag_reorder_blocks_and_partition)
2291 return false;
2293 return true;
2296 /* Expand a move instruction. Return true if all work is done. */
2298 bool
2299 sparc_expand_move (machine_mode mode, rtx *operands)
2301 /* Handle sets of MEM first. */
2302 if (GET_CODE (operands[0]) == MEM)
2304 /* 0 is a register (or a pair of registers) on SPARC. */
2305 if (register_or_zero_operand (operands[1], mode))
2306 return false;
2308 if (!reload_in_progress)
2310 operands[0] = validize_mem (operands[0]);
2311 operands[1] = force_reg (mode, operands[1]);
2315 /* Fix up TLS cases. */
2316 if (TARGET_HAVE_TLS
2317 && CONSTANT_P (operands[1])
2318 && sparc_tls_referenced_p (operands [1]))
2320 operands[1] = sparc_legitimize_tls_address (operands[1]);
2321 return false;
2324 /* Fix up PIC cases. */
2325 if (flag_pic && CONSTANT_P (operands[1]))
2327 if (pic_address_needs_scratch (operands[1]))
2328 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
2330 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
2331 if ((GET_CODE (operands[1]) == LABEL_REF
2332 && can_use_mov_pic_label_ref (operands[1]))
2333 || (GET_CODE (operands[1]) == CONST
2334 && GET_CODE (XEXP (operands[1], 0)) == PLUS
2335 && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
2336 && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT
2337 && can_use_mov_pic_label_ref (XEXP (XEXP (operands[1], 0), 0))))
2339 if (mode == SImode)
2341 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
2342 return true;
2345 if (mode == DImode)
2347 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
2348 return true;
2352 if (symbolic_operand (operands[1], mode))
2354 operands[1]
2355 = sparc_legitimize_pic_address (operands[1],
2356 reload_in_progress
2357 ? operands[0] : NULL_RTX);
2358 return false;
2362 /* If we are trying to toss an integer constant into FP registers,
2363 or loading a FP or vector constant, force it into memory. */
2364 if (CONSTANT_P (operands[1])
2365 && REG_P (operands[0])
2366 && (SPARC_FP_REG_P (REGNO (operands[0]))
2367 || SCALAR_FLOAT_MODE_P (mode)
2368 || VECTOR_MODE_P (mode)))
2370 /* emit_group_store will send such bogosity to us when it is
2371 not storing directly into memory. So fix this up to avoid
2372 crashes in output_constant_pool. */
2373 if (operands [1] == const0_rtx)
2374 operands[1] = CONST0_RTX (mode);
2376 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
2377 always other regs. */
2378 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
2379 && (const_zero_operand (operands[1], mode)
2380 || const_all_ones_operand (operands[1], mode)))
2381 return false;
2383 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
2384 /* We are able to build any SF constant in integer registers
2385 with at most 2 instructions. */
2386 && (mode == SFmode
2387 /* And any DF constant in integer registers if needed. */
2388 || (mode == DFmode && !can_create_pseudo_p ())))
2389 return false;
2391 operands[1] = force_const_mem (mode, operands[1]);
2392 if (!reload_in_progress)
2393 operands[1] = validize_mem (operands[1]);
2394 return false;
2397 /* Accept non-constants and valid constants unmodified. */
2398 if (!CONSTANT_P (operands[1])
2399 || GET_CODE (operands[1]) == HIGH
2400 || input_operand (operands[1], mode))
2401 return false;
2403 switch (mode)
2405 case E_QImode:
2406 /* All QImode constants require only one insn, so proceed. */
2407 break;
2409 case E_HImode:
2410 case E_SImode:
2411 sparc_emit_set_const32 (operands[0], operands[1]);
2412 return true;
2414 case E_DImode:
2415 /* input_operand should have filtered out 32-bit mode. */
2416 sparc_emit_set_const64 (operands[0], operands[1]);
2417 return true;
2419 case E_TImode:
2421 rtx high, low;
2422 /* TImode isn't available in 32-bit mode. */
2423 split_double (operands[1], &high, &low);
2424 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
2425 high));
2426 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
2427 low));
2429 return true;
2431 default:
2432 gcc_unreachable ();
2435 return false;
2438 /* Load OP1, a 32-bit constant, into OP0, a register.
2439 We know it can't be done in one insn when we get
2440 here, the move expander guarantees this. */
2442 static void
2443 sparc_emit_set_const32 (rtx op0, rtx op1)
2445 machine_mode mode = GET_MODE (op0);
2446 rtx temp = op0;
2448 if (can_create_pseudo_p ())
2449 temp = gen_reg_rtx (mode);
2451 if (GET_CODE (op1) == CONST_INT)
2453 gcc_assert (!small_int_operand (op1, mode)
2454 && !const_high_operand (op1, mode));
2456 /* Emit them as real moves instead of a HIGH/LO_SUM,
2457 this way CSE can see everything and reuse intermediate
2458 values if it wants. */
2459 emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
2460 & ~(HOST_WIDE_INT) 0x3ff)));
2462 emit_insn (gen_rtx_SET (op0,
2463 gen_rtx_IOR (mode, temp,
2464 GEN_INT (INTVAL (op1) & 0x3ff))));
2466 else
2468 /* A symbol, emit in the traditional way. */
2469 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
2470 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
2474 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
2475 If TEMP is nonzero, we are forbidden to use any other scratch
2476 registers. Otherwise, we are allowed to generate them as needed.
2478 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
2479 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
2481 void
2482 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
2484 rtx cst, temp1, temp2, temp3, temp4, temp5;
2485 rtx ti_temp = 0;
2487 /* Deal with too large offsets. */
2488 if (GET_CODE (op1) == CONST
2489 && GET_CODE (XEXP (op1, 0)) == PLUS
2490 && CONST_INT_P (cst = XEXP (XEXP (op1, 0), 1))
2491 && trunc_int_for_mode (INTVAL (cst), SImode) != INTVAL (cst))
2493 gcc_assert (!temp);
2494 temp1 = gen_reg_rtx (DImode);
2495 temp2 = gen_reg_rtx (DImode);
2496 sparc_emit_set_const64 (temp2, cst);
2497 sparc_emit_set_symbolic_const64 (temp1, XEXP (XEXP (op1, 0), 0),
2498 NULL_RTX);
2499 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp1, temp2)));
2500 return;
2503 if (temp && GET_MODE (temp) == TImode)
2505 ti_temp = temp;
2506 temp = gen_rtx_REG (DImode, REGNO (temp));
2509 /* SPARC-V9 code model support. */
2510 switch (sparc_code_model)
2512 case CM_MEDLOW:
2513 /* The range spanned by all instructions in the object is less
2514 than 2^31 bytes (2GB) and the distance from any instruction
2515 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2516 than 2^31 bytes (2GB).
2518 The executable must be in the low 4TB of the virtual address
2519 space.
2521 sethi %hi(symbol), %temp1
2522 or %temp1, %lo(symbol), %reg */
2523 if (temp)
2524 temp1 = temp; /* op0 is allowed. */
2525 else
2526 temp1 = gen_reg_rtx (DImode);
2528 emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
2529 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
2530 break;
2532 case CM_MEDMID:
2533 /* The range spanned by all instructions in the object is less
2534 than 2^31 bytes (2GB) and the distance from any instruction
2535 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2536 than 2^31 bytes (2GB).
2538 The executable must be in the low 16TB of the virtual address
2539 space.
2541 sethi %h44(symbol), %temp1
2542 or %temp1, %m44(symbol), %temp2
2543 sllx %temp2, 12, %temp3
2544 or %temp3, %l44(symbol), %reg */
2545 if (temp)
2547 temp1 = op0;
2548 temp2 = op0;
2549 temp3 = temp; /* op0 is allowed. */
2551 else
2553 temp1 = gen_reg_rtx (DImode);
2554 temp2 = gen_reg_rtx (DImode);
2555 temp3 = gen_reg_rtx (DImode);
2558 emit_insn (gen_seth44 (temp1, op1));
2559 emit_insn (gen_setm44 (temp2, temp1, op1));
2560 emit_insn (gen_rtx_SET (temp3,
2561 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
2562 emit_insn (gen_setl44 (op0, temp3, op1));
2563 break;
2565 case CM_MEDANY:
2566 /* The range spanned by all instructions in the object is less
2567 than 2^31 bytes (2GB) and the distance from any instruction
2568 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2569 than 2^31 bytes (2GB).
2571 The executable can be placed anywhere in the virtual address
2572 space.
2574 sethi %hh(symbol), %temp1
2575 sethi %lm(symbol), %temp2
2576 or %temp1, %hm(symbol), %temp3
2577 sllx %temp3, 32, %temp4
2578 or %temp4, %temp2, %temp5
2579 or %temp5, %lo(symbol), %reg */
2580 if (temp)
2582 /* It is possible that one of the registers we got for operands[2]
2583 might coincide with that of operands[0] (which is why we made
2584 it TImode). Pick the other one to use as our scratch. */
2585 if (rtx_equal_p (temp, op0))
2587 gcc_assert (ti_temp);
2588 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2590 temp1 = op0;
2591 temp2 = temp; /* op0 is _not_ allowed, see above. */
2592 temp3 = op0;
2593 temp4 = op0;
2594 temp5 = op0;
2596 else
2598 temp1 = gen_reg_rtx (DImode);
2599 temp2 = gen_reg_rtx (DImode);
2600 temp3 = gen_reg_rtx (DImode);
2601 temp4 = gen_reg_rtx (DImode);
2602 temp5 = gen_reg_rtx (DImode);
2605 emit_insn (gen_sethh (temp1, op1));
2606 emit_insn (gen_setlm (temp2, op1));
2607 emit_insn (gen_sethm (temp3, temp1, op1));
2608 emit_insn (gen_rtx_SET (temp4,
2609 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2610 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2611 emit_insn (gen_setlo (op0, temp5, op1));
2612 break;
2614 case CM_EMBMEDANY:
2615 /* Old old old backwards compatibility kruft here.
2616 Essentially it is MEDLOW with a fixed 64-bit
2617 virtual base added to all data segment addresses.
2618 Text-segment stuff is computed like MEDANY, we can't
2619 reuse the code above because the relocation knobs
2620 look different.
2622 Data segment: sethi %hi(symbol), %temp1
2623 add %temp1, EMBMEDANY_BASE_REG, %temp2
2624 or %temp2, %lo(symbol), %reg */
2625 if (data_segment_operand (op1, GET_MODE (op1)))
2627 if (temp)
2629 temp1 = temp; /* op0 is allowed. */
2630 temp2 = op0;
2632 else
2634 temp1 = gen_reg_rtx (DImode);
2635 temp2 = gen_reg_rtx (DImode);
2638 emit_insn (gen_embmedany_sethi (temp1, op1));
2639 emit_insn (gen_embmedany_brsum (temp2, temp1));
2640 emit_insn (gen_embmedany_losum (op0, temp2, op1));
2643 /* Text segment: sethi %uhi(symbol), %temp1
2644 sethi %hi(symbol), %temp2
2645 or %temp1, %ulo(symbol), %temp3
2646 sllx %temp3, 32, %temp4
2647 or %temp4, %temp2, %temp5
2648 or %temp5, %lo(symbol), %reg */
2649 else
2651 if (temp)
2653 /* It is possible that one of the registers we got for operands[2]
2654 might coincide with that of operands[0] (which is why we made
2655 it TImode). Pick the other one to use as our scratch. */
2656 if (rtx_equal_p (temp, op0))
2658 gcc_assert (ti_temp);
2659 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2661 temp1 = op0;
2662 temp2 = temp; /* op0 is _not_ allowed, see above. */
2663 temp3 = op0;
2664 temp4 = op0;
2665 temp5 = op0;
2667 else
2669 temp1 = gen_reg_rtx (DImode);
2670 temp2 = gen_reg_rtx (DImode);
2671 temp3 = gen_reg_rtx (DImode);
2672 temp4 = gen_reg_rtx (DImode);
2673 temp5 = gen_reg_rtx (DImode);
2676 emit_insn (gen_embmedany_textuhi (temp1, op1));
2677 emit_insn (gen_embmedany_texthi (temp2, op1));
2678 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
2679 emit_insn (gen_rtx_SET (temp4,
2680 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2681 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2682 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
2684 break;
2686 default:
2687 gcc_unreachable ();
2691 /* These avoid problems when cross compiling. If we do not
2692 go through all this hair then the optimizer will see
2693 invalid REG_EQUAL notes or in some cases none at all. */
2694 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2695 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2696 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2697 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2699 /* The optimizer is not to assume anything about exactly
2700 which bits are set for a HIGH, they are unspecified.
2701 Unfortunately this leads to many missed optimizations
2702 during CSE. We mask out the non-HIGH bits, and matches
2703 a plain movdi, to alleviate this problem. */
2704 static rtx
2705 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2707 return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2710 static rtx
2711 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2713 return gen_rtx_SET (dest, GEN_INT (val));
2716 static rtx
2717 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2719 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2722 static rtx
2723 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2725 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2728 /* Worker routines for 64-bit constant formation on arch64.
2729 One of the key things to be doing in these emissions is
2730 to create as many temp REGs as possible. This makes it
2731 possible for half-built constants to be used later when
2732 such values are similar to something required later on.
2733 Without doing this, the optimizer cannot see such
2734 opportunities. */
2736 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2737 unsigned HOST_WIDE_INT, int);
2739 static void
2740 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2741 unsigned HOST_WIDE_INT low_bits, int is_neg)
2743 unsigned HOST_WIDE_INT high_bits;
2745 if (is_neg)
2746 high_bits = (~low_bits) & 0xffffffff;
2747 else
2748 high_bits = low_bits;
2750 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2751 if (!is_neg)
2753 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2755 else
2757 /* If we are XOR'ing with -1, then we should emit a one's complement
2758 instead. This way the combiner will notice logical operations
2759 such as ANDN later on and substitute. */
2760 if ((low_bits & 0x3ff) == 0x3ff)
2762 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2764 else
2766 emit_insn (gen_rtx_SET (op0,
2767 gen_safe_XOR64 (temp,
2768 (-(HOST_WIDE_INT)0x400
2769 | (low_bits & 0x3ff)))));
2774 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2775 unsigned HOST_WIDE_INT, int);
2777 static void
2778 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2779 unsigned HOST_WIDE_INT high_bits,
2780 unsigned HOST_WIDE_INT low_immediate,
2781 int shift_count)
2783 rtx temp2 = op0;
2785 if ((high_bits & 0xfffffc00) != 0)
2787 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2788 if ((high_bits & ~0xfffffc00) != 0)
2789 emit_insn (gen_rtx_SET (op0,
2790 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2791 else
2792 temp2 = temp;
2794 else
2796 emit_insn (gen_safe_SET64 (temp, high_bits));
2797 temp2 = temp;
2800 /* Now shift it up into place. */
2801 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
2802 GEN_INT (shift_count))));
2804 /* If there is a low immediate part piece, finish up by
2805 putting that in as well. */
2806 if (low_immediate != 0)
2807 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
2810 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2811 unsigned HOST_WIDE_INT);
2813 /* Full 64-bit constant decomposition. Even though this is the
2814 'worst' case, we still optimize a few things away. */
2815 static void
2816 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2817 unsigned HOST_WIDE_INT high_bits,
2818 unsigned HOST_WIDE_INT low_bits)
2820 rtx sub_temp = op0;
2822 if (can_create_pseudo_p ())
2823 sub_temp = gen_reg_rtx (DImode);
2825 if ((high_bits & 0xfffffc00) != 0)
2827 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2828 if ((high_bits & ~0xfffffc00) != 0)
2829 emit_insn (gen_rtx_SET (sub_temp,
2830 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2831 else
2832 sub_temp = temp;
2834 else
2836 emit_insn (gen_safe_SET64 (temp, high_bits));
2837 sub_temp = temp;
2840 if (can_create_pseudo_p ())
2842 rtx temp2 = gen_reg_rtx (DImode);
2843 rtx temp3 = gen_reg_rtx (DImode);
2844 rtx temp4 = gen_reg_rtx (DImode);
2846 emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
2847 GEN_INT (32))));
2849 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2850 if ((low_bits & ~0xfffffc00) != 0)
2852 emit_insn (gen_rtx_SET (temp3,
2853 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2854 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
2856 else
2858 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
2861 else
2863 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2864 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2865 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2866 int to_shift = 12;
2868 /* We are in the middle of reload, so this is really
2869 painful. However we do still make an attempt to
2870 avoid emitting truly stupid code. */
2871 if (low1 != const0_rtx)
2873 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2874 GEN_INT (to_shift))));
2875 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
2876 sub_temp = op0;
2877 to_shift = 12;
2879 else
2881 to_shift += 12;
2883 if (low2 != const0_rtx)
2885 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2886 GEN_INT (to_shift))));
2887 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
2888 sub_temp = op0;
2889 to_shift = 8;
2891 else
2893 to_shift += 8;
2895 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2896 GEN_INT (to_shift))));
2897 if (low3 != const0_rtx)
2898 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
2899 /* phew... */
2903 /* Analyze a 64-bit constant for certain properties. */
2904 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2905 unsigned HOST_WIDE_INT,
2906 int *, int *, int *);
2908 static void
2909 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2910 unsigned HOST_WIDE_INT low_bits,
2911 int *hbsp, int *lbsp, int *abbasp)
2913 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2914 int i;
2916 lowest_bit_set = highest_bit_set = -1;
2917 i = 0;
2920 if ((lowest_bit_set == -1)
2921 && ((low_bits >> i) & 1))
2922 lowest_bit_set = i;
2923 if ((highest_bit_set == -1)
2924 && ((high_bits >> (32 - i - 1)) & 1))
2925 highest_bit_set = (64 - i - 1);
2927 while (++i < 32
2928 && ((highest_bit_set == -1)
2929 || (lowest_bit_set == -1)));
2930 if (i == 32)
2932 i = 0;
2935 if ((lowest_bit_set == -1)
2936 && ((high_bits >> i) & 1))
2937 lowest_bit_set = i + 32;
2938 if ((highest_bit_set == -1)
2939 && ((low_bits >> (32 - i - 1)) & 1))
2940 highest_bit_set = 32 - i - 1;
2942 while (++i < 32
2943 && ((highest_bit_set == -1)
2944 || (lowest_bit_set == -1)));
2946 /* If there are no bits set this should have gone out
2947 as one instruction! */
2948 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2949 all_bits_between_are_set = 1;
2950 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2952 if (i < 32)
2954 if ((low_bits & (1 << i)) != 0)
2955 continue;
2957 else
2959 if ((high_bits & (1 << (i - 32))) != 0)
2960 continue;
2962 all_bits_between_are_set = 0;
2963 break;
2965 *hbsp = highest_bit_set;
2966 *lbsp = lowest_bit_set;
2967 *abbasp = all_bits_between_are_set;
2970 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2972 static int
2973 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2974 unsigned HOST_WIDE_INT low_bits)
2976 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2978 if (high_bits == 0
2979 || high_bits == 0xffffffff)
2980 return 1;
2982 analyze_64bit_constant (high_bits, low_bits,
2983 &highest_bit_set, &lowest_bit_set,
2984 &all_bits_between_are_set);
2986 if ((highest_bit_set == 63
2987 || lowest_bit_set == 0)
2988 && all_bits_between_are_set != 0)
2989 return 1;
2991 if ((highest_bit_set - lowest_bit_set) < 21)
2992 return 1;
2994 return 0;
2997 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2998 unsigned HOST_WIDE_INT,
2999 int, int);
3001 static unsigned HOST_WIDE_INT
3002 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
3003 unsigned HOST_WIDE_INT low_bits,
3004 int lowest_bit_set, int shift)
3006 HOST_WIDE_INT hi, lo;
3008 if (lowest_bit_set < 32)
3010 lo = (low_bits >> lowest_bit_set) << shift;
3011 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
3013 else
3015 lo = 0;
3016 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
3018 gcc_assert (! (hi & lo));
3019 return (hi | lo);
3022 /* Here we are sure to be arch64 and this is an integer constant
3023 being loaded into a register. Emit the most efficient
3024 insn sequence possible. Detection of all the 1-insn cases
3025 has been done already. */
3026 static void
3027 sparc_emit_set_const64 (rtx op0, rtx op1)
3029 unsigned HOST_WIDE_INT high_bits, low_bits;
3030 int lowest_bit_set, highest_bit_set;
3031 int all_bits_between_are_set;
3032 rtx temp = 0;
3034 /* Sanity check that we know what we are working with. */
3035 gcc_assert (TARGET_ARCH64
3036 && (GET_CODE (op0) == SUBREG
3037 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
3039 if (! can_create_pseudo_p ())
3040 temp = op0;
3042 if (GET_CODE (op1) != CONST_INT)
3044 sparc_emit_set_symbolic_const64 (op0, op1, temp);
3045 return;
3048 if (! temp)
3049 temp = gen_reg_rtx (DImode);
3051 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
3052 low_bits = (INTVAL (op1) & 0xffffffff);
3054 /* low_bits bits 0 --> 31
3055 high_bits bits 32 --> 63 */
3057 analyze_64bit_constant (high_bits, low_bits,
3058 &highest_bit_set, &lowest_bit_set,
3059 &all_bits_between_are_set);
3061 /* First try for a 2-insn sequence. */
3063 /* These situations are preferred because the optimizer can
3064 * do more things with them:
3065 * 1) mov -1, %reg
3066 * sllx %reg, shift, %reg
3067 * 2) mov -1, %reg
3068 * srlx %reg, shift, %reg
3069 * 3) mov some_small_const, %reg
3070 * sllx %reg, shift, %reg
3072 if (((highest_bit_set == 63
3073 || lowest_bit_set == 0)
3074 && all_bits_between_are_set != 0)
3075 || ((highest_bit_set - lowest_bit_set) < 12))
3077 HOST_WIDE_INT the_const = -1;
3078 int shift = lowest_bit_set;
3080 if ((highest_bit_set != 63
3081 && lowest_bit_set != 0)
3082 || all_bits_between_are_set == 0)
3084 the_const =
3085 create_simple_focus_bits (high_bits, low_bits,
3086 lowest_bit_set, 0);
3088 else if (lowest_bit_set == 0)
3089 shift = -(63 - highest_bit_set);
3091 gcc_assert (SPARC_SIMM13_P (the_const));
3092 gcc_assert (shift != 0);
3094 emit_insn (gen_safe_SET64 (temp, the_const));
3095 if (shift > 0)
3096 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
3097 GEN_INT (shift))));
3098 else if (shift < 0)
3099 emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
3100 GEN_INT (-shift))));
3101 return;
3104 /* Now a range of 22 or less bits set somewhere.
3105 * 1) sethi %hi(focus_bits), %reg
3106 * sllx %reg, shift, %reg
3107 * 2) sethi %hi(focus_bits), %reg
3108 * srlx %reg, shift, %reg
3110 if ((highest_bit_set - lowest_bit_set) < 21)
3112 unsigned HOST_WIDE_INT focus_bits =
3113 create_simple_focus_bits (high_bits, low_bits,
3114 lowest_bit_set, 10);
3116 gcc_assert (SPARC_SETHI_P (focus_bits));
3117 gcc_assert (lowest_bit_set != 10);
3119 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
3121 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
3122 if (lowest_bit_set < 10)
3123 emit_insn (gen_rtx_SET (op0,
3124 gen_rtx_LSHIFTRT (DImode, temp,
3125 GEN_INT (10 - lowest_bit_set))));
3126 else if (lowest_bit_set > 10)
3127 emit_insn (gen_rtx_SET (op0,
3128 gen_rtx_ASHIFT (DImode, temp,
3129 GEN_INT (lowest_bit_set - 10))));
3130 return;
3133 /* 1) sethi %hi(low_bits), %reg
3134 * or %reg, %lo(low_bits), %reg
3135 * 2) sethi %hi(~low_bits), %reg
3136 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
3138 if (high_bits == 0
3139 || high_bits == 0xffffffff)
3141 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
3142 (high_bits == 0xffffffff));
3143 return;
3146 /* Now, try 3-insn sequences. */
3148 /* 1) sethi %hi(high_bits), %reg
3149 * or %reg, %lo(high_bits), %reg
3150 * sllx %reg, 32, %reg
3152 if (low_bits == 0)
3154 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
3155 return;
3158 /* We may be able to do something quick
3159 when the constant is negated, so try that. */
3160 if (const64_is_2insns ((~high_bits) & 0xffffffff,
3161 (~low_bits) & 0xfffffc00))
3163 /* NOTE: The trailing bits get XOR'd so we need the
3164 non-negated bits, not the negated ones. */
3165 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
3167 if ((((~high_bits) & 0xffffffff) == 0
3168 && ((~low_bits) & 0x80000000) == 0)
3169 || (((~high_bits) & 0xffffffff) == 0xffffffff
3170 && ((~low_bits) & 0x80000000) != 0))
3172 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
3174 if ((SPARC_SETHI_P (fast_int)
3175 && (~high_bits & 0xffffffff) == 0)
3176 || SPARC_SIMM13_P (fast_int))
3177 emit_insn (gen_safe_SET64 (temp, fast_int));
3178 else
3179 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
3181 else
3183 rtx negated_const;
3184 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
3185 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
3186 sparc_emit_set_const64 (temp, negated_const);
3189 /* If we are XOR'ing with -1, then we should emit a one's complement
3190 instead. This way the combiner will notice logical operations
3191 such as ANDN later on and substitute. */
3192 if (trailing_bits == 0x3ff)
3194 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
3196 else
3198 emit_insn (gen_rtx_SET (op0,
3199 gen_safe_XOR64 (temp,
3200 (-0x400 | trailing_bits))));
3202 return;
3205 /* 1) sethi %hi(xxx), %reg
3206 * or %reg, %lo(xxx), %reg
3207 * sllx %reg, yyy, %reg
3209 * ??? This is just a generalized version of the low_bits==0
3210 * thing above, FIXME...
3212 if ((highest_bit_set - lowest_bit_set) < 32)
3214 unsigned HOST_WIDE_INT focus_bits =
3215 create_simple_focus_bits (high_bits, low_bits,
3216 lowest_bit_set, 0);
3218 /* We can't get here in this state. */
3219 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
3221 /* So what we know is that the set bits straddle the
3222 middle of the 64-bit word. */
3223 sparc_emit_set_const64_quick2 (op0, temp,
3224 focus_bits, 0,
3225 lowest_bit_set);
3226 return;
3229 /* 1) sethi %hi(high_bits), %reg
3230 * or %reg, %lo(high_bits), %reg
3231 * sllx %reg, 32, %reg
3232 * or %reg, low_bits, %reg
3234 if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
3236 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
3237 return;
3240 /* The easiest way when all else fails, is full decomposition. */
3241 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
3244 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
3246 static bool
3247 sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
3249 *p1 = SPARC_ICC_REG;
3250 *p2 = SPARC_FCC_REG;
3251 return true;
3254 /* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
3256 static unsigned int
3257 sparc_min_arithmetic_precision (void)
3259 return 32;
3262 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
3263 return the mode to be used for the comparison. For floating-point,
3264 CCFP[E]mode is used. CCNZmode should be used when the first operand
3265 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
3266 processing is needed. */
3268 machine_mode
3269 select_cc_mode (enum rtx_code op, rtx x, rtx y)
3271 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3273 switch (op)
3275 case EQ:
3276 case NE:
3277 case UNORDERED:
3278 case ORDERED:
3279 case UNLT:
3280 case UNLE:
3281 case UNGT:
3282 case UNGE:
3283 case UNEQ:
3284 return CCFPmode;
3286 case LT:
3287 case LE:
3288 case GT:
3289 case GE:
3290 case LTGT:
3291 return CCFPEmode;
3293 default:
3294 gcc_unreachable ();
3297 else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
3298 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
3299 && y == const0_rtx)
3301 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3302 return CCXNZmode;
3303 else
3304 return CCNZmode;
3306 else
3308 /* This is for the cmp<mode>_sne pattern. */
3309 if (GET_CODE (x) == NOT && y == constm1_rtx)
3311 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3312 return CCXCmode;
3313 else
3314 return CCCmode;
3317 /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
3318 if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
3320 if (GET_CODE (y) == UNSPEC
3321 && (XINT (y, 1) == UNSPEC_ADDV
3322 || XINT (y, 1) == UNSPEC_SUBV
3323 || XINT (y, 1) == UNSPEC_NEGV))
3324 return CCVmode;
3325 else
3326 return CCCmode;
3329 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
3330 return CCXmode;
3331 else
3332 return CCmode;
3336 /* Emit the compare insn and return the CC reg for a CODE comparison
3337 with operands X and Y. */
3339 static rtx
3340 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
3342 machine_mode mode;
3343 rtx cc_reg;
3345 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
3346 return x;
3348 mode = SELECT_CC_MODE (code, x, y);
3350 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
3351 fcc regs (cse can't tell they're really call clobbered regs and will
3352 remove a duplicate comparison even if there is an intervening function
3353 call - it will then try to reload the cc reg via an int reg which is why
3354 we need the movcc patterns). It is possible to provide the movcc
3355 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
3356 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
3357 to tell cse that CCFPE mode registers (even pseudos) are call
3358 clobbered. */
3360 /* ??? This is an experiment. Rather than making changes to cse which may
3361 or may not be easy/clean, we do our own cse. This is possible because
3362 we will generate hard registers. Cse knows they're call clobbered (it
3363 doesn't know the same thing about pseudos). If we guess wrong, no big
3364 deal, but if we win, great! */
3366 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3367 #if 1 /* experiment */
3369 int reg;
3370 /* We cycle through the registers to ensure they're all exercised. */
3371 static int next_fcc_reg = 0;
3372 /* Previous x,y for each fcc reg. */
3373 static rtx prev_args[4][2];
3375 /* Scan prev_args for x,y. */
3376 for (reg = 0; reg < 4; reg++)
3377 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
3378 break;
3379 if (reg == 4)
3381 reg = next_fcc_reg;
3382 prev_args[reg][0] = x;
3383 prev_args[reg][1] = y;
3384 next_fcc_reg = (next_fcc_reg + 1) & 3;
3386 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
3388 #else
3389 cc_reg = gen_reg_rtx (mode);
3390 #endif /* ! experiment */
3391 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3392 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
3393 else
3394 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
3396 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
3397 will only result in an unrecognizable insn so no point in asserting. */
3398 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
3400 return cc_reg;
3404 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
3407 gen_compare_reg (rtx cmp)
3409 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
3412 /* This function is used for v9 only.
3413 DEST is the target of the Scc insn.
3414 CODE is the code for an Scc's comparison.
3415 X and Y are the values we compare.
3417 This function is needed to turn
3419 (set (reg:SI 110)
3420 (gt (reg:CCX 100 %icc)
3421 (const_int 0)))
3422 into
3423 (set (reg:SI 110)
3424 (gt:DI (reg:CCX 100 %icc)
3425 (const_int 0)))
3427 IE: The instruction recognizer needs to see the mode of the comparison to
3428 find the right instruction. We could use "gt:DI" right in the
3429 define_expand, but leaving it out allows us to handle DI, SI, etc. */
3431 static int
3432 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
3434 if (! TARGET_ARCH64
3435 && (GET_MODE (x) == DImode
3436 || GET_MODE (dest) == DImode))
3437 return 0;
3439 /* Try to use the movrCC insns. */
3440 if (TARGET_ARCH64
3441 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3442 && y == const0_rtx
3443 && v9_regcmp_p (compare_code))
3445 rtx op0 = x;
3446 rtx temp;
3448 /* Special case for op0 != 0. This can be done with one instruction if
3449 dest == x. */
3451 if (compare_code == NE
3452 && GET_MODE (dest) == DImode
3453 && rtx_equal_p (op0, dest))
3455 emit_insn (gen_rtx_SET (dest,
3456 gen_rtx_IF_THEN_ELSE (DImode,
3457 gen_rtx_fmt_ee (compare_code, DImode,
3458 op0, const0_rtx),
3459 const1_rtx,
3460 dest)));
3461 return 1;
3464 if (reg_overlap_mentioned_p (dest, op0))
3466 /* Handle the case where dest == x.
3467 We "early clobber" the result. */
3468 op0 = gen_reg_rtx (GET_MODE (x));
3469 emit_move_insn (op0, x);
3472 emit_insn (gen_rtx_SET (dest, const0_rtx));
3473 if (GET_MODE (op0) != DImode)
3475 temp = gen_reg_rtx (DImode);
3476 convert_move (temp, op0, 0);
3478 else
3479 temp = op0;
3480 emit_insn (gen_rtx_SET (dest,
3481 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3482 gen_rtx_fmt_ee (compare_code, DImode,
3483 temp, const0_rtx),
3484 const1_rtx,
3485 dest)));
3486 return 1;
3488 else
3490 x = gen_compare_reg_1 (compare_code, x, y);
3491 y = const0_rtx;
3493 emit_insn (gen_rtx_SET (dest, const0_rtx));
3494 emit_insn (gen_rtx_SET (dest,
3495 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3496 gen_rtx_fmt_ee (compare_code,
3497 GET_MODE (x), x, y),
3498 const1_rtx, dest)));
3499 return 1;
3504 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
3505 without jumps using the addx/subx instructions. */
3507 bool
3508 emit_scc_insn (rtx operands[])
3510 rtx tem, x, y;
3511 enum rtx_code code;
3512 machine_mode mode;
3514 /* The quad-word fp compare library routines all return nonzero to indicate
3515 true, which is different from the equivalent libgcc routines, so we must
3516 handle them specially here. */
3517 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
3519 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
3520 GET_CODE (operands[1]));
3521 operands[2] = XEXP (operands[1], 0);
3522 operands[3] = XEXP (operands[1], 1);
3525 code = GET_CODE (operands[1]);
3526 x = operands[2];
3527 y = operands[3];
3528 mode = GET_MODE (x);
3530 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
3531 more applications). The exception to this is "reg != 0" which can
3532 be done in one instruction on v9 (so we do it). */
3533 if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
3535 if (y != const0_rtx)
3536 x = force_reg (mode, gen_rtx_XOR (mode, x, y));
3538 rtx pat = gen_rtx_SET (operands[0],
3539 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3540 x, const0_rtx));
3542 /* If we can use addx/subx or addxc, add a clobber for CC. */
3543 if (mode == SImode || (code == NE && TARGET_VIS3))
3545 rtx clobber
3546 = gen_rtx_CLOBBER (VOIDmode,
3547 gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
3548 SPARC_ICC_REG));
3549 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
3552 emit_insn (pat);
3553 return true;
3556 /* We can do LTU in DImode using the addxc instruction with VIS3. */
3557 if (TARGET_ARCH64
3558 && mode == DImode
3559 && !((code == LTU || code == GTU) && TARGET_VIS3)
3560 && gen_v9_scc (operands[0], code, x, y))
3561 return true;
3563 /* We can do LTU and GEU using the addx/subx instructions too. And
3564 for GTU/LEU, if both operands are registers swap them and fall
3565 back to the easy case. */
3566 if (code == GTU || code == LEU)
3568 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3569 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
3571 tem = x;
3572 x = y;
3573 y = tem;
3574 code = swap_condition (code);
3578 if (code == LTU || code == GEU)
3580 emit_insn (gen_rtx_SET (operands[0],
3581 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3582 gen_compare_reg_1 (code, x, y),
3583 const0_rtx)));
3584 return true;
3587 /* All the posibilities to use addx/subx based sequences has been
3588 exhausted, try for a 3 instruction sequence using v9 conditional
3589 moves. */
3590 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
3591 return true;
3593 /* Nope, do branches. */
3594 return false;
3597 /* Emit a conditional jump insn for the v9 architecture using comparison code
3598 CODE and jump target LABEL.
3599 This function exists to take advantage of the v9 brxx insns. */
3601 static void
3602 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
3604 emit_jump_insn (gen_rtx_SET (pc_rtx,
3605 gen_rtx_IF_THEN_ELSE (VOIDmode,
3606 gen_rtx_fmt_ee (code, GET_MODE (op0),
3607 op0, const0_rtx),
3608 gen_rtx_LABEL_REF (VOIDmode, label),
3609 pc_rtx)));
3612 /* Emit a conditional jump insn for the UA2011 architecture using
3613 comparison code CODE and jump target LABEL. This function exists
3614 to take advantage of the UA2011 Compare and Branch insns. */
3616 static void
3617 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
3619 rtx if_then_else;
3621 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
3622 gen_rtx_fmt_ee(code, GET_MODE(op0),
3623 op0, op1),
3624 gen_rtx_LABEL_REF (VOIDmode, label),
3625 pc_rtx);
3627 emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
3630 void
3631 emit_conditional_branch_insn (rtx operands[])
3633 /* The quad-word fp compare library routines all return nonzero to indicate
3634 true, which is different from the equivalent libgcc routines, so we must
3635 handle them specially here. */
3636 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
3638 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
3639 GET_CODE (operands[0]));
3640 operands[1] = XEXP (operands[0], 0);
3641 operands[2] = XEXP (operands[0], 1);
3644 /* If we can tell early on that the comparison is against a constant
3645 that won't fit in the 5-bit signed immediate field of a cbcond,
3646 use one of the other v9 conditional branch sequences. */
3647 if (TARGET_CBCOND
3648 && GET_CODE (operands[1]) == REG
3649 && (GET_MODE (operands[1]) == SImode
3650 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
3651 && (GET_CODE (operands[2]) != CONST_INT
3652 || SPARC_SIMM5_P (INTVAL (operands[2]))))
3654 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
3655 return;
3658 if (TARGET_ARCH64 && operands[2] == const0_rtx
3659 && GET_CODE (operands[1]) == REG
3660 && GET_MODE (operands[1]) == DImode)
3662 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
3663 return;
3666 operands[1] = gen_compare_reg (operands[0]);
3667 operands[2] = const0_rtx;
3668 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
3669 operands[1], operands[2]);
3670 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
3671 operands[3]));
3675 /* Generate a DFmode part of a hard TFmode register.
3676 REG is the TFmode hard register, LOW is 1 for the
3677 low 64bit of the register and 0 otherwise.
3680 gen_df_reg (rtx reg, int low)
3682 int regno = REGNO (reg);
3684 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3685 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3686 return gen_rtx_REG (DFmode, regno);
3689 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3690 Unlike normal calls, TFmode operands are passed by reference. It is
3691 assumed that no more than 3 operands are required. */
3693 static void
3694 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3696 rtx ret_slot = NULL, arg[3], func_sym;
3697 int i;
3699 /* We only expect to be called for conversions, unary, and binary ops. */
3700 gcc_assert (nargs == 2 || nargs == 3);
3702 for (i = 0; i < nargs; ++i)
3704 rtx this_arg = operands[i];
3705 rtx this_slot;
3707 /* TFmode arguments and return values are passed by reference. */
3708 if (GET_MODE (this_arg) == TFmode)
3710 int force_stack_temp;
3712 force_stack_temp = 0;
3713 if (TARGET_BUGGY_QP_LIB && i == 0)
3714 force_stack_temp = 1;
3716 if (GET_CODE (this_arg) == MEM
3717 && ! force_stack_temp)
3719 tree expr = MEM_EXPR (this_arg);
3720 if (expr)
3721 mark_addressable (expr);
3722 this_arg = XEXP (this_arg, 0);
3724 else if (CONSTANT_P (this_arg)
3725 && ! force_stack_temp)
3727 this_slot = force_const_mem (TFmode, this_arg);
3728 this_arg = XEXP (this_slot, 0);
3730 else
3732 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3734 /* Operand 0 is the return value. We'll copy it out later. */
3735 if (i > 0)
3736 emit_move_insn (this_slot, this_arg);
3737 else
3738 ret_slot = this_slot;
3740 this_arg = XEXP (this_slot, 0);
3744 arg[i] = this_arg;
3747 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3749 if (GET_MODE (operands[0]) == TFmode)
3751 if (nargs == 2)
3752 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3753 arg[0], GET_MODE (arg[0]),
3754 arg[1], GET_MODE (arg[1]));
3755 else
3756 emit_library_call (func_sym, LCT_NORMAL, VOIDmode,
3757 arg[0], GET_MODE (arg[0]),
3758 arg[1], GET_MODE (arg[1]),
3759 arg[2], GET_MODE (arg[2]));
3761 if (ret_slot)
3762 emit_move_insn (operands[0], ret_slot);
3764 else
3766 rtx ret;
3768 gcc_assert (nargs == 2);
3770 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3771 GET_MODE (operands[0]),
3772 arg[1], GET_MODE (arg[1]));
3774 if (ret != operands[0])
3775 emit_move_insn (operands[0], ret);
3779 /* Expand soft-float TFmode calls to sparc abi routines. */
3781 static void
3782 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3784 const char *func;
3786 switch (code)
3788 case PLUS:
3789 func = "_Qp_add";
3790 break;
3791 case MINUS:
3792 func = "_Qp_sub";
3793 break;
3794 case MULT:
3795 func = "_Qp_mul";
3796 break;
3797 case DIV:
3798 func = "_Qp_div";
3799 break;
3800 default:
3801 gcc_unreachable ();
3804 emit_soft_tfmode_libcall (func, 3, operands);
3807 static void
3808 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3810 const char *func;
3812 gcc_assert (code == SQRT);
3813 func = "_Qp_sqrt";
3815 emit_soft_tfmode_libcall (func, 2, operands);
3818 static void
3819 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3821 const char *func;
3823 switch (code)
3825 case FLOAT_EXTEND:
3826 switch (GET_MODE (operands[1]))
3828 case E_SFmode:
3829 func = "_Qp_stoq";
3830 break;
3831 case E_DFmode:
3832 func = "_Qp_dtoq";
3833 break;
3834 default:
3835 gcc_unreachable ();
3837 break;
3839 case FLOAT_TRUNCATE:
3840 switch (GET_MODE (operands[0]))
3842 case E_SFmode:
3843 func = "_Qp_qtos";
3844 break;
3845 case E_DFmode:
3846 func = "_Qp_qtod";
3847 break;
3848 default:
3849 gcc_unreachable ();
3851 break;
3853 case FLOAT:
3854 switch (GET_MODE (operands[1]))
3856 case E_SImode:
3857 func = "_Qp_itoq";
3858 if (TARGET_ARCH64)
3859 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3860 break;
3861 case E_DImode:
3862 func = "_Qp_xtoq";
3863 break;
3864 default:
3865 gcc_unreachable ();
3867 break;
3869 case UNSIGNED_FLOAT:
3870 switch (GET_MODE (operands[1]))
3872 case E_SImode:
3873 func = "_Qp_uitoq";
3874 if (TARGET_ARCH64)
3875 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3876 break;
3877 case E_DImode:
3878 func = "_Qp_uxtoq";
3879 break;
3880 default:
3881 gcc_unreachable ();
3883 break;
3885 case FIX:
3886 switch (GET_MODE (operands[0]))
3888 case E_SImode:
3889 func = "_Qp_qtoi";
3890 break;
3891 case E_DImode:
3892 func = "_Qp_qtox";
3893 break;
3894 default:
3895 gcc_unreachable ();
3897 break;
3899 case UNSIGNED_FIX:
3900 switch (GET_MODE (operands[0]))
3902 case E_SImode:
3903 func = "_Qp_qtoui";
3904 break;
3905 case E_DImode:
3906 func = "_Qp_qtoux";
3907 break;
3908 default:
3909 gcc_unreachable ();
3911 break;
3913 default:
3914 gcc_unreachable ();
3917 emit_soft_tfmode_libcall (func, 2, operands);
3920 /* Expand a hard-float tfmode operation. All arguments must be in
3921 registers. */
3923 static void
3924 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3926 rtx op, dest;
3928 if (GET_RTX_CLASS (code) == RTX_UNARY)
3930 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3931 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3933 else
3935 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3936 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3937 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3938 operands[1], operands[2]);
3941 if (register_operand (operands[0], VOIDmode))
3942 dest = operands[0];
3943 else
3944 dest = gen_reg_rtx (GET_MODE (operands[0]));
3946 emit_insn (gen_rtx_SET (dest, op));
3948 if (dest != operands[0])
3949 emit_move_insn (operands[0], dest);
3952 void
3953 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3955 if (TARGET_HARD_QUAD)
3956 emit_hard_tfmode_operation (code, operands);
3957 else
3958 emit_soft_tfmode_binop (code, operands);
3961 void
3962 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3964 if (TARGET_HARD_QUAD)
3965 emit_hard_tfmode_operation (code, operands);
3966 else
3967 emit_soft_tfmode_unop (code, operands);
3970 void
3971 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3973 if (TARGET_HARD_QUAD)
3974 emit_hard_tfmode_operation (code, operands);
3975 else
3976 emit_soft_tfmode_cvt (code, operands);
3979 /* Return nonzero if a branch/jump/call instruction will be emitting
3980 nop into its delay slot. */
3983 empty_delay_slot (rtx_insn *insn)
3985 rtx seq;
3987 /* If no previous instruction (should not happen), return true. */
3988 if (PREV_INSN (insn) == NULL)
3989 return 1;
3991 seq = NEXT_INSN (PREV_INSN (insn));
3992 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3993 return 0;
3995 return 1;
3998 /* Return nonzero if we should emit a nop after a cbcond instruction.
3999 The cbcond instruction does not have a delay slot, however there is
4000 a severe performance penalty if a control transfer appears right
4001 after a cbcond. Therefore we emit a nop when we detect this
4002 situation. */
4005 emit_cbcond_nop (rtx_insn *insn)
4007 rtx next = next_active_insn (insn);
4009 if (!next)
4010 return 1;
4012 if (NONJUMP_INSN_P (next)
4013 && GET_CODE (PATTERN (next)) == SEQUENCE)
4014 next = XVECEXP (PATTERN (next), 0, 0);
4015 else if (CALL_P (next)
4016 && GET_CODE (PATTERN (next)) == PARALLEL)
4018 rtx delay = XVECEXP (PATTERN (next), 0, 1);
4020 if (GET_CODE (delay) == RETURN)
4022 /* It's a sibling call. Do not emit the nop if we're going
4023 to emit something other than the jump itself as the first
4024 instruction of the sibcall sequence. */
4025 if (sparc_leaf_function_p || TARGET_FLAT)
4026 return 0;
4030 if (NONJUMP_INSN_P (next))
4031 return 0;
4033 return 1;
4036 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
4037 instruction. RETURN_P is true if the v9 variant 'return' is to be
4038 considered in the test too.
4040 TRIAL must be a SET whose destination is a REG appropriate for the
4041 'restore' instruction or, if RETURN_P is true, for the 'return'
4042 instruction. */
4044 static int
4045 eligible_for_restore_insn (rtx trial, bool return_p)
4047 rtx pat = PATTERN (trial);
4048 rtx src = SET_SRC (pat);
4049 bool src_is_freg = false;
4050 rtx src_reg;
4052 /* Since we now can do moves between float and integer registers when
4053 VIS3 is enabled, we have to catch this case. We can allow such
4054 moves when doing a 'return' however. */
4055 src_reg = src;
4056 if (GET_CODE (src_reg) == SUBREG)
4057 src_reg = SUBREG_REG (src_reg);
4058 if (GET_CODE (src_reg) == REG
4059 && SPARC_FP_REG_P (REGNO (src_reg)))
4060 src_is_freg = true;
4062 /* The 'restore src,%g0,dest' pattern for word mode and below. */
4063 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4064 && arith_operand (src, GET_MODE (src))
4065 && ! src_is_freg)
4067 if (TARGET_ARCH64)
4068 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4069 else
4070 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
4073 /* The 'restore src,%g0,dest' pattern for double-word mode. */
4074 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
4075 && arith_double_operand (src, GET_MODE (src))
4076 && ! src_is_freg)
4077 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
4079 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
4080 else if (! TARGET_FPU && register_operand (src, SFmode))
4081 return 1;
4083 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
4084 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
4085 return 1;
4087 /* If we have the 'return' instruction, anything that does not use
4088 local or output registers and can go into a delay slot wins. */
4089 else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
4090 return 1;
4092 /* The 'restore src1,src2,dest' pattern for SImode. */
4093 else if (GET_CODE (src) == PLUS
4094 && register_operand (XEXP (src, 0), SImode)
4095 && arith_operand (XEXP (src, 1), SImode))
4096 return 1;
4098 /* The 'restore src1,src2,dest' pattern for DImode. */
4099 else if (GET_CODE (src) == PLUS
4100 && register_operand (XEXP (src, 0), DImode)
4101 && arith_double_operand (XEXP (src, 1), DImode))
4102 return 1;
4104 /* The 'restore src1,%lo(src2),dest' pattern. */
4105 else if (GET_CODE (src) == LO_SUM
4106 && ! TARGET_CM_MEDMID
4107 && ((register_operand (XEXP (src, 0), SImode)
4108 && immediate_operand (XEXP (src, 1), SImode))
4109 || (TARGET_ARCH64
4110 && register_operand (XEXP (src, 0), DImode)
4111 && immediate_operand (XEXP (src, 1), DImode))))
4112 return 1;
4114 /* The 'restore src,src,dest' pattern. */
4115 else if (GET_CODE (src) == ASHIFT
4116 && (register_operand (XEXP (src, 0), SImode)
4117 || register_operand (XEXP (src, 0), DImode))
4118 && XEXP (src, 1) == const1_rtx)
4119 return 1;
4121 return 0;
4124 /* Return nonzero if TRIAL can go into the function return's delay slot. */
4127 eligible_for_return_delay (rtx_insn *trial)
4129 int regno;
4130 rtx pat;
4132 /* If the function uses __builtin_eh_return, the eh_return machinery
4133 occupies the delay slot. */
4134 if (crtl->calls_eh_return)
4135 return 0;
4137 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4138 return 0;
4140 /* In the case of a leaf or flat function, anything can go into the slot. */
4141 if (sparc_leaf_function_p || TARGET_FLAT)
4142 return 1;
4144 if (!NONJUMP_INSN_P (trial))
4145 return 0;
4147 pat = PATTERN (trial);
4148 if (GET_CODE (pat) == PARALLEL)
4150 int i;
4152 if (! TARGET_V9)
4153 return 0;
4154 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
4156 rtx expr = XVECEXP (pat, 0, i);
4157 if (GET_CODE (expr) != SET)
4158 return 0;
4159 if (GET_CODE (SET_DEST (expr)) != REG)
4160 return 0;
4161 regno = REGNO (SET_DEST (expr));
4162 if (regno >= 8 && regno < 24)
4163 return 0;
4165 return !epilogue_renumber (&pat, 1);
4168 if (GET_CODE (pat) != SET)
4169 return 0;
4171 if (GET_CODE (SET_DEST (pat)) != REG)
4172 return 0;
4174 regno = REGNO (SET_DEST (pat));
4176 /* Otherwise, only operations which can be done in tandem with
4177 a `restore' or `return' insn can go into the delay slot. */
4178 if (regno >= 8 && regno < 24)
4179 return 0;
4181 /* If this instruction sets up floating point register and we have a return
4182 instruction, it can probably go in. But restore will not work
4183 with FP_REGS. */
4184 if (! SPARC_INT_REG_P (regno))
4185 return TARGET_V9 && !epilogue_renumber (&pat, 1);
4187 return eligible_for_restore_insn (trial, true);
4190 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
4193 eligible_for_sibcall_delay (rtx_insn *trial)
4195 rtx pat;
4197 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
4198 return 0;
4200 if (!NONJUMP_INSN_P (trial))
4201 return 0;
4203 pat = PATTERN (trial);
4205 if (sparc_leaf_function_p || TARGET_FLAT)
4207 /* If the tail call is done using the call instruction,
4208 we have to restore %o7 in the delay slot. */
4209 if (LEAF_SIBCALL_SLOT_RESERVED_P)
4210 return 0;
4212 /* %g1 is used to build the function address */
4213 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
4214 return 0;
4216 return 1;
4219 if (GET_CODE (pat) != SET)
4220 return 0;
4222 /* Otherwise, only operations which can be done in tandem with
4223 a `restore' insn can go into the delay slot. */
4224 if (GET_CODE (SET_DEST (pat)) != REG
4225 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
4226 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
4227 return 0;
4229 /* If it mentions %o7, it can't go in, because sibcall will clobber it
4230 in most cases. */
4231 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
4232 return 0;
4234 return eligible_for_restore_insn (trial, false);
4237 /* Determine if it's legal to put X into the constant pool. This
4238 is not possible if X contains the address of a symbol that is
4239 not constant (TLS) or not known at final link time (PIC). */
4241 static bool
4242 sparc_cannot_force_const_mem (machine_mode mode, rtx x)
4244 switch (GET_CODE (x))
4246 case CONST_INT:
4247 case CONST_WIDE_INT:
4248 case CONST_DOUBLE:
4249 case CONST_VECTOR:
4250 /* Accept all non-symbolic constants. */
4251 return false;
4253 case LABEL_REF:
4254 /* Labels are OK iff we are non-PIC. */
4255 return flag_pic != 0;
4257 case SYMBOL_REF:
4258 /* 'Naked' TLS symbol references are never OK,
4259 non-TLS symbols are OK iff we are non-PIC. */
4260 if (SYMBOL_REF_TLS_MODEL (x))
4261 return true;
4262 else
4263 return flag_pic != 0;
4265 case CONST:
4266 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
4267 case PLUS:
4268 case MINUS:
4269 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
4270 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
4271 case UNSPEC:
4272 return true;
4273 default:
4274 gcc_unreachable ();
4278 /* Global Offset Table support. */
4279 static GTY(()) rtx got_symbol_rtx = NULL_RTX;
4280 static GTY(()) rtx got_register_rtx = NULL_RTX;
4281 static GTY(()) rtx got_helper_rtx = NULL_RTX;
4283 static GTY(()) bool got_helper_needed = false;
4285 /* Return the SYMBOL_REF for the Global Offset Table. */
4287 static rtx
4288 sparc_got (void)
4290 if (!got_symbol_rtx)
4291 got_symbol_rtx = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
4293 return got_symbol_rtx;
4296 /* Output the load_pcrel_sym pattern. */
4298 const char *
4299 output_load_pcrel_sym (rtx *operands)
4301 if (flag_delayed_branch)
4303 output_asm_insn ("sethi\t%%hi(%a1-4), %0", operands);
4304 output_asm_insn ("call\t%a2", operands);
4305 output_asm_insn (" add\t%0, %%lo(%a1+4), %0", operands);
4307 else
4309 output_asm_insn ("sethi\t%%hi(%a1-8), %0", operands);
4310 output_asm_insn ("add\t%0, %%lo(%a1-4), %0", operands);
4311 output_asm_insn ("call\t%a2", operands);
4312 output_asm_insn (" nop", NULL);
4315 if (operands[2] == got_helper_rtx)
4316 got_helper_needed = true;
4318 return "";
4321 #ifdef HAVE_GAS_HIDDEN
4322 # define USE_HIDDEN_LINKONCE 1
4323 #else
4324 # define USE_HIDDEN_LINKONCE 0
4325 #endif
4327 /* Emit code to load the GOT register. */
4329 void
4330 load_got_register (void)
4332 rtx insn;
4334 if (TARGET_VXWORKS_RTP)
4336 if (!got_register_rtx)
4337 got_register_rtx = pic_offset_table_rtx;
4339 insn = gen_vxworks_load_got ();
4341 else
4343 if (!got_register_rtx)
4344 got_register_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4346 /* The GOT symbol is subject to a PC-relative relocation so we need a
4347 helper function to add the PC value and thus get the final value. */
4348 if (!got_helper_rtx)
4350 char name[32];
4352 /* Skip the leading '%' as that cannot be used in a symbol name. */
4353 if (USE_HIDDEN_LINKONCE)
4354 sprintf (name, "__sparc_get_pc_thunk.%s",
4355 reg_names[REGNO (got_register_rtx)] + 1);
4356 else
4357 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC",
4358 REGNO (got_register_rtx));
4360 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4363 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4364 const int orig_flag_pic = flag_pic;
4365 flag_pic = 0;
4366 insn = gen_load_pcrel_sym (Pmode,
4367 got_register_rtx,
4368 sparc_got (),
4369 got_helper_rtx,
4370 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM));
4371 flag_pic = orig_flag_pic;
4374 emit_insn (insn);
4377 /* Ensure that we are not using patterns that are not OK with PIC. */
4380 check_pic (int i)
4382 rtx op;
4384 switch (flag_pic)
4386 case 1:
4387 op = recog_data.operand[i];
4388 gcc_assert (GET_CODE (op) != SYMBOL_REF
4389 && (GET_CODE (op) != CONST
4390 || (GET_CODE (XEXP (op, 0)) == MINUS
4391 && XEXP (XEXP (op, 0), 0) == sparc_got ()
4392 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
4393 /* fallthrough */
4394 case 2:
4395 default:
4396 return 1;
4400 /* Return true if X is an address which needs a temporary register when
4401 reloaded while generating PIC code. */
4404 pic_address_needs_scratch (rtx x)
4406 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
4407 if (GET_CODE (x) == CONST
4408 && GET_CODE (XEXP (x, 0)) == PLUS
4409 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
4410 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4411 && !SMALL_INT (XEXP (XEXP (x, 0), 1)))
4412 return 1;
4414 return 0;
4417 /* Determine if a given RTX is a valid constant. We already know this
4418 satisfies CONSTANT_P. */
4420 static bool
4421 sparc_legitimate_constant_p (machine_mode mode, rtx x)
4423 switch (GET_CODE (x))
4425 case CONST:
4426 case SYMBOL_REF:
4427 if (sparc_tls_referenced_p (x))
4428 return false;
4429 break;
4431 case CONST_DOUBLE:
4432 /* Floating point constants are generally not ok.
4433 The only exception is 0.0 and all-ones in VIS. */
4434 if (TARGET_VIS
4435 && SCALAR_FLOAT_MODE_P (mode)
4436 && (const_zero_operand (x, mode)
4437 || const_all_ones_operand (x, mode)))
4438 return true;
4440 return false;
4442 case CONST_VECTOR:
4443 /* Vector constants are generally not ok.
4444 The only exception is 0 or -1 in VIS. */
4445 if (TARGET_VIS
4446 && (const_zero_operand (x, mode)
4447 || const_all_ones_operand (x, mode)))
4448 return true;
4450 return false;
4452 default:
4453 break;
4456 return true;
4459 /* Determine if a given RTX is a valid constant address. */
4461 bool
4462 constant_address_p (rtx x)
4464 switch (GET_CODE (x))
4466 case LABEL_REF:
4467 case CONST_INT:
4468 case HIGH:
4469 return true;
4471 case CONST:
4472 if (flag_pic && pic_address_needs_scratch (x))
4473 return false;
4474 return sparc_legitimate_constant_p (Pmode, x);
4476 case SYMBOL_REF:
4477 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
4479 default:
4480 return false;
4484 /* Nonzero if the constant value X is a legitimate general operand
4485 when generating PIC code. It is given that flag_pic is on and
4486 that X satisfies CONSTANT_P. */
4488 bool
4489 legitimate_pic_operand_p (rtx x)
4491 if (pic_address_needs_scratch (x))
4492 return false;
4493 if (sparc_tls_referenced_p (x))
4494 return false;
4495 return true;
4498 /* Return true if X is a representation of the PIC register. */
4500 static bool
4501 sparc_pic_register_p (rtx x)
4503 if (!REG_P (x) || !pic_offset_table_rtx)
4504 return false;
4506 if (x == pic_offset_table_rtx)
4507 return true;
4509 if (!HARD_REGISTER_P (pic_offset_table_rtx)
4510 && (HARD_REGISTER_P (x) || lra_in_progress || reload_in_progress)
4511 && ORIGINAL_REGNO (x) == REGNO (pic_offset_table_rtx))
4512 return true;
4514 return false;
4517 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
4518 (CONST_INT_P (X) \
4519 && INTVAL (X) >= -0x1000 \
4520 && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
4522 #define RTX_OK_FOR_OLO10_P(X, MODE) \
4523 (CONST_INT_P (X) \
4524 && INTVAL (X) >= -0x1000 \
4525 && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
4527 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
4529 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
4530 ordinarily. This changes a bit when generating PIC. */
4532 static bool
4533 sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict,
4534 code_helper)
4536 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
4538 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
4539 rs1 = addr;
4540 else if (GET_CODE (addr) == PLUS)
4542 rs1 = XEXP (addr, 0);
4543 rs2 = XEXP (addr, 1);
4545 /* Canonicalize. REG comes first, if there are no regs,
4546 LO_SUM comes first. */
4547 if (!REG_P (rs1)
4548 && GET_CODE (rs1) != SUBREG
4549 && (REG_P (rs2)
4550 || GET_CODE (rs2) == SUBREG
4551 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
4553 rs1 = XEXP (addr, 1);
4554 rs2 = XEXP (addr, 0);
4557 if ((flag_pic == 1
4558 && sparc_pic_register_p (rs1)
4559 && !REG_P (rs2)
4560 && GET_CODE (rs2) != SUBREG
4561 && GET_CODE (rs2) != LO_SUM
4562 && GET_CODE (rs2) != MEM
4563 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
4564 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
4565 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
4566 || ((REG_P (rs1)
4567 || GET_CODE (rs1) == SUBREG)
4568 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
4570 imm1 = rs2;
4571 rs2 = NULL;
4573 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
4574 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
4576 /* We prohibit REG + REG for TFmode when there are no quad move insns
4577 and we consequently need to split. We do this because REG+REG
4578 is not an offsettable address. If we get the situation in reload
4579 where source and destination of a movtf pattern are both MEMs with
4580 REG+REG address, then only one of them gets converted to an
4581 offsettable address. */
4582 if (mode == TFmode
4583 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
4584 return 0;
4586 /* Likewise for TImode, but in all cases. */
4587 if (mode == TImode)
4588 return 0;
4590 /* We prohibit REG + REG on ARCH32 if not optimizing for
4591 DFmode/DImode because then mem_min_alignment is likely to be zero
4592 after reload and the forced split would lack a matching splitter
4593 pattern. */
4594 if (TARGET_ARCH32 && !optimize
4595 && (mode == DFmode || mode == DImode))
4596 return 0;
4598 else if (USE_AS_OFFSETABLE_LO10
4599 && GET_CODE (rs1) == LO_SUM
4600 && TARGET_ARCH64
4601 && ! TARGET_CM_MEDMID
4602 && RTX_OK_FOR_OLO10_P (rs2, mode))
4604 rs2 = NULL;
4605 imm1 = XEXP (rs1, 1);
4606 rs1 = XEXP (rs1, 0);
4607 if (!CONSTANT_P (imm1)
4608 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4609 return 0;
4612 else if (GET_CODE (addr) == LO_SUM)
4614 rs1 = XEXP (addr, 0);
4615 imm1 = XEXP (addr, 1);
4617 if (!CONSTANT_P (imm1)
4618 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4619 return 0;
4621 /* We can't allow TFmode in 32-bit mode, because an offset greater
4622 than the alignment (8) may cause the LO_SUM to overflow. */
4623 if (mode == TFmode && TARGET_ARCH32)
4624 return 0;
4626 /* During reload, accept the HIGH+LO_SUM construct generated by
4627 sparc_legitimize_reload_address. */
4628 if (reload_in_progress
4629 && GET_CODE (rs1) == HIGH
4630 && XEXP (rs1, 0) == imm1)
4631 return 1;
4633 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
4634 return 1;
4635 else
4636 return 0;
4638 if (GET_CODE (rs1) == SUBREG)
4639 rs1 = SUBREG_REG (rs1);
4640 if (!REG_P (rs1))
4641 return 0;
4643 if (rs2)
4645 if (GET_CODE (rs2) == SUBREG)
4646 rs2 = SUBREG_REG (rs2);
4647 if (!REG_P (rs2))
4648 return 0;
4651 if (strict)
4653 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
4654 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
4655 return 0;
4657 else
4659 if ((! SPARC_INT_REG_P (REGNO (rs1))
4660 && REGNO (rs1) != FRAME_POINTER_REGNUM
4661 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
4662 || (rs2
4663 && (! SPARC_INT_REG_P (REGNO (rs2))
4664 && REGNO (rs2) != FRAME_POINTER_REGNUM
4665 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
4666 return 0;
4668 return 1;
4671 /* Return the SYMBOL_REF for the tls_get_addr function. */
4673 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
4675 static rtx
4676 sparc_tls_get_addr (void)
4678 if (!sparc_tls_symbol)
4679 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
4681 return sparc_tls_symbol;
4684 /* Return the Global Offset Table to be used in TLS mode. */
4686 static rtx
4687 sparc_tls_got (void)
4689 /* In PIC mode, this is just the PIC offset table. */
4690 if (flag_pic)
4692 crtl->uses_pic_offset_table = 1;
4693 return pic_offset_table_rtx;
4696 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4697 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4698 if (TARGET_SUN_TLS && TARGET_ARCH32)
4700 load_got_register ();
4701 return got_register_rtx;
4704 /* In all other cases, we load a new pseudo with the GOT symbol. */
4705 return copy_to_reg (sparc_got ());
4708 /* Return true if X contains a thread-local symbol. */
4710 static bool
4711 sparc_tls_referenced_p (rtx x)
4713 if (!TARGET_HAVE_TLS)
4714 return false;
4716 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4717 x = XEXP (XEXP (x, 0), 0);
4719 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
4720 return true;
4722 /* That's all we handle in sparc_legitimize_tls_address for now. */
4723 return false;
4726 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4727 this (thread-local) address. */
4729 static rtx
4730 sparc_legitimize_tls_address (rtx addr)
4732 rtx temp1, temp2, temp3, ret, o0, got;
4733 rtx_insn *insn;
4735 gcc_assert (can_create_pseudo_p ());
4737 if (GET_CODE (addr) == SYMBOL_REF)
4738 /* Although the various sethi/or sequences generate SImode values, many of
4739 them can be transformed by the linker when relaxing and, if relaxing to
4740 local-exec, will become a sethi/xor pair, which is signed and therefore
4741 a full DImode value in 64-bit mode. Thus we must use Pmode, lest these
4742 values be spilled onto the stack in 64-bit mode. */
4743 switch (SYMBOL_REF_TLS_MODEL (addr))
4745 case TLS_MODEL_GLOBAL_DYNAMIC:
4746 start_sequence ();
4747 temp1 = gen_reg_rtx (Pmode);
4748 temp2 = gen_reg_rtx (Pmode);
4749 ret = gen_reg_rtx (Pmode);
4750 o0 = gen_rtx_REG (Pmode, 8);
4751 got = sparc_tls_got ();
4752 emit_insn (gen_tgd_hi22 (Pmode, temp1, addr));
4753 emit_insn (gen_tgd_lo10 (Pmode, temp2, temp1, addr));
4754 emit_insn (gen_tgd_add (Pmode, o0, got, temp2, addr));
4755 insn = emit_call_insn (gen_tgd_call (Pmode, o0, sparc_tls_get_addr (),
4756 addr, const1_rtx));
4757 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4758 RTL_CONST_CALL_P (insn) = 1;
4759 insn = get_insns ();
4760 end_sequence ();
4761 emit_libcall_block (insn, ret, o0, addr);
4762 break;
4764 case TLS_MODEL_LOCAL_DYNAMIC:
4765 start_sequence ();
4766 temp1 = gen_reg_rtx (Pmode);
4767 temp2 = gen_reg_rtx (Pmode);
4768 temp3 = gen_reg_rtx (Pmode);
4769 ret = gen_reg_rtx (Pmode);
4770 o0 = gen_rtx_REG (Pmode, 8);
4771 got = sparc_tls_got ();
4772 emit_insn (gen_tldm_hi22 (Pmode, temp1));
4773 emit_insn (gen_tldm_lo10 (Pmode, temp2, temp1));
4774 emit_insn (gen_tldm_add (Pmode, o0, got, temp2));
4775 insn = emit_call_insn (gen_tldm_call (Pmode, o0, sparc_tls_get_addr (),
4776 const1_rtx));
4777 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4778 RTL_CONST_CALL_P (insn) = 1;
4779 insn = get_insns ();
4780 end_sequence ();
4781 /* Attach a unique REG_EQUAL, to allow the RTL optimizers to
4782 share the LD_BASE result with other LD model accesses. */
4783 emit_libcall_block (insn, temp3, o0,
4784 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4785 UNSPEC_TLSLD_BASE));
4786 temp1 = gen_reg_rtx (Pmode);
4787 temp2 = gen_reg_rtx (Pmode);
4788 emit_insn (gen_tldo_hix22 (Pmode, temp1, addr));
4789 emit_insn (gen_tldo_lox10 (Pmode, temp2, temp1, addr));
4790 emit_insn (gen_tldo_add (Pmode, ret, temp3, temp2, addr));
4791 break;
4793 case TLS_MODEL_INITIAL_EXEC:
4794 temp1 = gen_reg_rtx (Pmode);
4795 temp2 = gen_reg_rtx (Pmode);
4796 temp3 = gen_reg_rtx (Pmode);
4797 got = sparc_tls_got ();
4798 emit_insn (gen_tie_hi22 (Pmode, temp1, addr));
4799 emit_insn (gen_tie_lo10 (Pmode, temp2, temp1, addr));
4800 if (TARGET_ARCH32)
4801 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4802 else
4803 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4804 if (TARGET_SUN_TLS)
4806 ret = gen_reg_rtx (Pmode);
4807 emit_insn (gen_tie_add (Pmode, ret, gen_rtx_REG (Pmode, 7),
4808 temp3, addr));
4810 else
4811 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4812 break;
4814 case TLS_MODEL_LOCAL_EXEC:
4815 temp1 = gen_reg_rtx (Pmode);
4816 temp2 = gen_reg_rtx (Pmode);
4817 emit_insn (gen_tle_hix22 (Pmode, temp1, addr));
4818 emit_insn (gen_tle_lox10 (Pmode, temp2, temp1, addr));
4819 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4820 break;
4822 default:
4823 gcc_unreachable ();
4826 else if (GET_CODE (addr) == CONST)
4828 rtx base, offset;
4830 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4832 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4833 offset = XEXP (XEXP (addr, 0), 1);
4835 base = force_operand (base, NULL_RTX);
4836 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4837 offset = force_reg (Pmode, offset);
4838 ret = gen_rtx_PLUS (Pmode, base, offset);
4841 else
4842 gcc_unreachable (); /* for now ... */
4844 return ret;
4847 /* Legitimize PIC addresses. If the address is already position-independent,
4848 we return ORIG. Newly generated position-independent addresses go into a
4849 reg. This is REG if nonzero, otherwise we allocate register(s) as
4850 necessary. */
4852 static rtx
4853 sparc_legitimize_pic_address (rtx orig, rtx reg)
4855 if (GET_CODE (orig) == SYMBOL_REF
4856 /* See the comment in sparc_expand_move. */
4857 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4859 bool gotdata_op = false;
4860 rtx pic_ref, address;
4861 rtx_insn *insn;
4863 if (!reg)
4865 gcc_assert (can_create_pseudo_p ());
4866 reg = gen_reg_rtx (Pmode);
4869 if (flag_pic == 2)
4871 /* If not during reload, allocate another temp reg here for loading
4872 in the address, so that these instructions can be optimized
4873 properly. */
4874 rtx temp_reg = can_create_pseudo_p () ? gen_reg_rtx (Pmode) : reg;
4876 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4877 won't get confused into thinking that these two instructions
4878 are loading in the true address of the symbol. If in the
4879 future a PIC rtx exists, that should be used instead. */
4880 if (TARGET_ARCH64)
4882 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4883 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4885 else
4887 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4888 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4891 address = temp_reg;
4892 gotdata_op = true;
4894 else
4895 address = orig;
4897 crtl->uses_pic_offset_table = 1;
4898 if (gotdata_op)
4900 if (TARGET_ARCH64)
4901 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4902 pic_offset_table_rtx,
4903 address, orig));
4904 else
4905 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4906 pic_offset_table_rtx,
4907 address, orig));
4909 else
4911 pic_ref
4912 = gen_const_mem (Pmode,
4913 gen_rtx_PLUS (Pmode,
4914 pic_offset_table_rtx, address));
4915 insn = emit_move_insn (reg, pic_ref);
4918 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4919 by loop. */
4920 set_unique_reg_note (insn, REG_EQUAL, orig);
4921 return reg;
4923 else if (GET_CODE (orig) == CONST)
4925 rtx base, offset;
4927 if (GET_CODE (XEXP (orig, 0)) == PLUS
4928 && sparc_pic_register_p (XEXP (XEXP (orig, 0), 0)))
4929 return orig;
4931 if (!reg)
4933 gcc_assert (can_create_pseudo_p ());
4934 reg = gen_reg_rtx (Pmode);
4937 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4938 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4939 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4940 base == reg ? NULL_RTX : reg);
4942 if (GET_CODE (offset) == CONST_INT)
4944 if (SMALL_INT (offset))
4945 return plus_constant (Pmode, base, INTVAL (offset));
4946 else if (can_create_pseudo_p ())
4947 offset = force_reg (Pmode, offset);
4948 else
4949 /* If we reach here, then something is seriously wrong. */
4950 gcc_unreachable ();
4952 return gen_rtx_PLUS (Pmode, base, offset);
4954 else if (GET_CODE (orig) == LABEL_REF)
4955 /* ??? We ought to be checking that the register is live instead, in case
4956 it is eliminated. */
4957 crtl->uses_pic_offset_table = 1;
4959 return orig;
4962 /* Try machine-dependent ways of modifying an illegitimate address X
4963 to be legitimate. If we find one, return the new, valid address.
4965 OLDX is the address as it was before break_out_memory_refs was called.
4966 In some cases it is useful to look at this to decide what needs to be done.
4968 MODE is the mode of the operand pointed to by X.
4970 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4972 static rtx
4973 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4974 machine_mode mode)
4976 rtx orig_x = x;
4978 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4979 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4980 force_operand (XEXP (x, 0), NULL_RTX));
4981 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4982 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4983 force_operand (XEXP (x, 1), NULL_RTX));
4984 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4985 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4986 XEXP (x, 1));
4987 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4988 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4989 force_operand (XEXP (x, 1), NULL_RTX));
4991 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4992 return x;
4994 if (sparc_tls_referenced_p (x))
4995 x = sparc_legitimize_tls_address (x);
4996 else if (flag_pic)
4997 x = sparc_legitimize_pic_address (x, NULL_RTX);
4998 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4999 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
5000 copy_to_mode_reg (Pmode, XEXP (x, 1)));
5001 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
5002 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
5003 copy_to_mode_reg (Pmode, XEXP (x, 0)));
5004 else if (GET_CODE (x) == SYMBOL_REF
5005 || GET_CODE (x) == CONST
5006 || GET_CODE (x) == LABEL_REF)
5007 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
5009 return x;
5012 /* Delegitimize an address that was legitimized by the above function. */
5014 static rtx
5015 sparc_delegitimize_address (rtx x)
5017 x = delegitimize_mem_from_attrs (x);
5019 if (GET_CODE (x) == LO_SUM)
5020 x = XEXP (x, 1);
5022 if (GET_CODE (x) == UNSPEC)
5023 switch (XINT (x, 1))
5025 case UNSPEC_MOVE_PIC:
5026 case UNSPEC_TLSLE:
5027 x = XVECEXP (x, 0, 0);
5028 gcc_assert (GET_CODE (x) == SYMBOL_REF);
5029 break;
5030 case UNSPEC_MOVE_GOTDATA:
5031 x = XVECEXP (x, 0, 2);
5032 gcc_assert (GET_CODE (x) == SYMBOL_REF);
5033 break;
5034 default:
5035 break;
5038 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
5039 if (GET_CODE (x) == MINUS
5040 && (XEXP (x, 0) == got_register_rtx
5041 || sparc_pic_register_p (XEXP (x, 0))))
5043 rtx y = XEXP (x, 1);
5045 if (GET_CODE (y) == LO_SUM)
5046 y = XEXP (y, 1);
5048 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MOVE_PIC_LABEL)
5050 x = XVECEXP (y, 0, 0);
5051 gcc_assert (GET_CODE (x) == LABEL_REF
5052 || (GET_CODE (x) == CONST
5053 && GET_CODE (XEXP (x, 0)) == PLUS
5054 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
5055 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT));
5059 return x;
5062 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
5063 replace the input X, or the original X if no replacement is called for.
5064 The output parameter *WIN is 1 if the calling macro should goto WIN,
5065 0 if it should not.
5067 For SPARC, we wish to handle addresses by splitting them into
5068 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
5069 This cuts the number of extra insns by one.
5071 Do nothing when generating PIC code and the address is a symbolic
5072 operand or requires a scratch register. */
5075 sparc_legitimize_reload_address (rtx x, machine_mode mode,
5076 int opnum, int type,
5077 int ind_levels ATTRIBUTE_UNUSED, int *win)
5079 /* Decompose SImode constants into HIGH+LO_SUM. */
5080 if (CONSTANT_P (x)
5081 && (mode != TFmode || TARGET_ARCH64)
5082 && GET_MODE (x) == SImode
5083 && GET_CODE (x) != LO_SUM
5084 && GET_CODE (x) != HIGH
5085 && sparc_code_model <= CM_MEDLOW
5086 && !(flag_pic
5087 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
5089 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
5090 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5091 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5092 opnum, (enum reload_type)type);
5093 *win = 1;
5094 return x;
5097 /* We have to recognize what we have already generated above. */
5098 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
5100 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
5101 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
5102 opnum, (enum reload_type)type);
5103 *win = 1;
5104 return x;
5107 *win = 0;
5108 return x;
5111 /* Return true if ADDR (a legitimate address expression)
5112 has an effect that depends on the machine mode it is used for.
5114 In PIC mode,
5116 (mem:HI [%l7+a])
5118 is not equivalent to
5120 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
5122 because [%l7+a+1] is interpreted as the address of (a+1). */
5125 static bool
5126 sparc_mode_dependent_address_p (const_rtx addr,
5127 addr_space_t as ATTRIBUTE_UNUSED)
5129 if (GET_CODE (addr) == PLUS
5130 && sparc_pic_register_p (XEXP (addr, 0))
5131 && symbolic_operand (XEXP (addr, 1), VOIDmode))
5132 return true;
5134 return false;
5137 /* Emit a call instruction with the pattern given by PAT. ADDR is the
5138 address of the call target. */
5140 void
5141 sparc_emit_call_insn (rtx pat, rtx addr)
5143 rtx_insn *insn;
5145 insn = emit_call_insn (pat);
5147 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
5148 if (TARGET_VXWORKS_RTP
5149 && flag_pic
5150 && GET_CODE (addr) == SYMBOL_REF
5151 && (SYMBOL_REF_DECL (addr)
5152 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
5153 : !SYMBOL_REF_LOCAL_P (addr)))
5155 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5156 crtl->uses_pic_offset_table = 1;
5160 /* Return 1 if RTX is a MEM which is known to be aligned to at
5161 least a DESIRED byte boundary. */
5164 mem_min_alignment (rtx mem, int desired)
5166 rtx addr, base, offset;
5168 /* If it's not a MEM we can't accept it. */
5169 if (GET_CODE (mem) != MEM)
5170 return 0;
5172 /* Obviously... */
5173 if (!TARGET_UNALIGNED_DOUBLES
5174 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
5175 return 1;
5177 /* ??? The rest of the function predates MEM_ALIGN so
5178 there is probably a bit of redundancy. */
5179 addr = XEXP (mem, 0);
5180 base = offset = NULL_RTX;
5181 if (GET_CODE (addr) == PLUS)
5183 if (GET_CODE (XEXP (addr, 0)) == REG)
5185 base = XEXP (addr, 0);
5187 /* What we are saying here is that if the base
5188 REG is aligned properly, the compiler will make
5189 sure any REG based index upon it will be so
5190 as well. */
5191 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
5192 offset = XEXP (addr, 1);
5193 else
5194 offset = const0_rtx;
5197 else if (GET_CODE (addr) == REG)
5199 base = addr;
5200 offset = const0_rtx;
5203 if (base != NULL_RTX)
5205 int regno = REGNO (base);
5207 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
5209 /* Check if the compiler has recorded some information
5210 about the alignment of the base REG. If reload has
5211 completed, we already matched with proper alignments.
5212 If not running global_alloc, reload might give us
5213 unaligned pointer to local stack though. */
5214 if (((cfun != 0
5215 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
5216 || (optimize && reload_completed))
5217 && (INTVAL (offset) & (desired - 1)) == 0)
5218 return 1;
5220 else
5222 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
5223 return 1;
5226 else if (! TARGET_UNALIGNED_DOUBLES
5227 || CONSTANT_P (addr)
5228 || GET_CODE (addr) == LO_SUM)
5230 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
5231 is true, in which case we can only assume that an access is aligned if
5232 it is to a constant address, or the address involves a LO_SUM. */
5233 return 1;
5236 /* An obviously unaligned address. */
5237 return 0;
5241 /* Vectors to keep interesting information about registers where it can easily
5242 be got. We used to use the actual mode value as the bit number, but there
5243 are more than 32 modes now. Instead we use two tables: one indexed by
5244 hard register number, and one indexed by mode. */
5246 /* The purpose of sparc_mode_class is to shrink the range of modes so that
5247 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
5248 mapped into one sparc_mode_class mode. */
5250 enum sparc_mode_class {
5251 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
5252 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
5253 CC_MODE, CCFP_MODE
5256 /* Modes for single-word and smaller quantities. */
5257 #define S_MODES \
5258 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
5260 /* Modes for double-word and smaller quantities. */
5261 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5263 /* Modes for quad-word and smaller quantities. */
5264 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
5266 /* Modes for 8-word and smaller quantities. */
5267 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
5269 /* Modes for single-float quantities. */
5270 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
5272 /* Modes for double-float and smaller quantities. */
5273 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
5275 /* Modes for quad-float and smaller quantities. */
5276 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
5278 /* Modes for quad-float pairs and smaller quantities. */
5279 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
5281 /* Modes for double-float only quantities. */
5282 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
5284 /* Modes for quad-float and double-float only quantities. */
5285 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
5287 /* Modes for quad-float pairs and double-float only quantities. */
5288 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
5290 /* Modes for condition codes. */
5291 #define CC_MODES (1 << (int) CC_MODE)
5292 #define CCFP_MODES (1 << (int) CCFP_MODE)
5294 /* Value is 1 if register/mode pair is acceptable on sparc.
5296 The funny mixture of D and T modes is because integer operations
5297 do not specially operate on tetra quantities, so non-quad-aligned
5298 registers can hold quadword quantities (except %o4 and %i4 because
5299 they cross fixed registers).
5301 ??? Note that, despite the settings, non-double-aligned parameter
5302 registers can hold double-word quantities in 32-bit mode. */
5304 /* This points to either the 32-bit or the 64-bit version. */
5305 static const int *hard_regno_mode_classes;
5307 static const int hard_32bit_mode_classes[] = {
5308 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5309 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5310 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
5311 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
5313 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5314 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5315 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5316 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5318 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5319 and none can hold SFmode/SImode values. */
5320 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5321 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5322 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5323 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5325 /* %fcc[0123] */
5326 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5328 /* %icc, %sfp, %gsr */
5329 CC_MODES, 0, D_MODES
5332 static const int hard_64bit_mode_classes[] = {
5333 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5334 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5335 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5336 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5338 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5339 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5340 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5341 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5343 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5344 and none can hold SFmode/SImode values. */
5345 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5346 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5347 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5348 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5350 /* %fcc[0123] */
5351 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5353 /* %icc, %sfp, %gsr */
5354 CC_MODES, 0, D_MODES
5357 static int sparc_mode_class [NUM_MACHINE_MODES];
5359 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
5361 static void
5362 sparc_init_modes (void)
5364 int i;
5366 for (i = 0; i < NUM_MACHINE_MODES; i++)
5368 machine_mode m = (machine_mode) i;
5369 unsigned int size = GET_MODE_SIZE (m);
5371 switch (GET_MODE_CLASS (m))
5373 case MODE_INT:
5374 case MODE_PARTIAL_INT:
5375 case MODE_COMPLEX_INT:
5376 if (size < 4)
5377 sparc_mode_class[i] = 1 << (int) H_MODE;
5378 else if (size == 4)
5379 sparc_mode_class[i] = 1 << (int) S_MODE;
5380 else if (size == 8)
5381 sparc_mode_class[i] = 1 << (int) D_MODE;
5382 else if (size == 16)
5383 sparc_mode_class[i] = 1 << (int) T_MODE;
5384 else if (size == 32)
5385 sparc_mode_class[i] = 1 << (int) O_MODE;
5386 else
5387 sparc_mode_class[i] = 0;
5388 break;
5389 case MODE_VECTOR_INT:
5390 if (size == 4)
5391 sparc_mode_class[i] = 1 << (int) SF_MODE;
5392 else if (size == 8)
5393 sparc_mode_class[i] = 1 << (int) DF_MODE;
5394 else
5395 sparc_mode_class[i] = 0;
5396 break;
5397 case MODE_FLOAT:
5398 case MODE_COMPLEX_FLOAT:
5399 if (size == 4)
5400 sparc_mode_class[i] = 1 << (int) SF_MODE;
5401 else if (size == 8)
5402 sparc_mode_class[i] = 1 << (int) DF_MODE;
5403 else if (size == 16)
5404 sparc_mode_class[i] = 1 << (int) TF_MODE;
5405 else if (size == 32)
5406 sparc_mode_class[i] = 1 << (int) OF_MODE;
5407 else
5408 sparc_mode_class[i] = 0;
5409 break;
5410 case MODE_CC:
5411 if (m == CCFPmode || m == CCFPEmode)
5412 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
5413 else
5414 sparc_mode_class[i] = 1 << (int) CC_MODE;
5415 break;
5416 default:
5417 sparc_mode_class[i] = 0;
5418 break;
5422 if (TARGET_ARCH64)
5423 hard_regno_mode_classes = hard_64bit_mode_classes;
5424 else
5425 hard_regno_mode_classes = hard_32bit_mode_classes;
5427 /* Initialize the array used by REGNO_REG_CLASS. */
5428 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5430 if (i < 16 && TARGET_V8PLUS)
5431 sparc_regno_reg_class[i] = I64_REGS;
5432 else if (i < 32 || i == FRAME_POINTER_REGNUM)
5433 sparc_regno_reg_class[i] = GENERAL_REGS;
5434 else if (i < 64)
5435 sparc_regno_reg_class[i] = FP_REGS;
5436 else if (i < 96)
5437 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
5438 else if (i < 100)
5439 sparc_regno_reg_class[i] = FPCC_REGS;
5440 else
5441 sparc_regno_reg_class[i] = NO_REGS;
5445 /* Return whether REGNO, a global or FP register, must be saved/restored. */
5447 static inline bool
5448 save_global_or_fp_reg_p (unsigned int regno,
5449 int leaf_function ATTRIBUTE_UNUSED)
5451 return !call_used_or_fixed_reg_p (regno) && df_regs_ever_live_p (regno);
5454 /* Return whether the return address register (%i7) is needed. */
5456 static inline bool
5457 return_addr_reg_needed_p (int leaf_function)
5459 /* If it is live, for example because of __builtin_return_address (0). */
5460 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
5461 return true;
5463 /* Otherwise, it is needed as save register if %o7 is clobbered. */
5464 if (!leaf_function
5465 /* Loading the GOT register clobbers %o7. */
5466 || crtl->uses_pic_offset_table
5467 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
5468 return true;
5470 return false;
5473 /* Return whether REGNO, a local or in register, must be saved/restored. */
5475 static bool
5476 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
5478 /* General case: call-saved registers live at some point. */
5479 if (!call_used_or_fixed_reg_p (regno) && df_regs_ever_live_p (regno))
5480 return true;
5482 /* Frame pointer register (%fp) if needed. */
5483 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5484 return true;
5486 /* Return address register (%i7) if needed. */
5487 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
5488 return true;
5490 /* GOT register (%l7) if needed. */
5491 if (got_register_rtx && regno == REGNO (got_register_rtx))
5492 return true;
5494 /* If the function accesses prior frames, the frame pointer and the return
5495 address of the previous frame must be saved on the stack. */
5496 if (crtl->accesses_prior_frames
5497 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
5498 return true;
5500 return false;
5503 /* Compute the frame size required by the function. This function is called
5504 during the reload pass and also by sparc_expand_prologue. */
5506 static HOST_WIDE_INT
5507 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
5509 HOST_WIDE_INT frame_size, apparent_frame_size;
5510 int args_size, n_global_fp_regs = 0;
5511 bool save_local_in_regs_p = false;
5512 unsigned int i;
5514 /* If the function allocates dynamic stack space, the dynamic offset is
5515 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
5516 if (leaf_function && !cfun->calls_alloca)
5517 args_size = 0;
5518 else
5519 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
5521 /* Calculate space needed for global registers. */
5522 if (TARGET_ARCH64)
5524 for (i = 0; i < 8; i++)
5525 if (save_global_or_fp_reg_p (i, 0))
5526 n_global_fp_regs += 2;
5528 else
5530 for (i = 0; i < 8; i += 2)
5531 if (save_global_or_fp_reg_p (i, 0)
5532 || save_global_or_fp_reg_p (i + 1, 0))
5533 n_global_fp_regs += 2;
5536 /* In the flat window model, find out which local and in registers need to
5537 be saved. We don't reserve space in the current frame for them as they
5538 will be spilled into the register window save area of the caller's frame.
5539 However, as soon as we use this register window save area, we must create
5540 that of the current frame to make it the live one. */
5541 if (TARGET_FLAT)
5542 for (i = 16; i < 32; i++)
5543 if (save_local_or_in_reg_p (i, leaf_function))
5545 save_local_in_regs_p = true;
5546 break;
5549 /* Calculate space needed for FP registers. */
5550 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
5551 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
5552 n_global_fp_regs += 2;
5554 if (size == 0
5555 && n_global_fp_regs == 0
5556 && args_size == 0
5557 && !save_local_in_regs_p)
5558 frame_size = apparent_frame_size = 0;
5559 else
5561 /* Start from the apparent frame size. */
5562 apparent_frame_size = ROUND_UP (size, 8) + n_global_fp_regs * 4;
5564 /* We need to add the size of the outgoing argument area. */
5565 frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
5567 /* And that of the register window save area. */
5568 frame_size += FIRST_PARM_OFFSET (cfun->decl);
5570 /* Finally, bump to the appropriate alignment. */
5571 frame_size = SPARC_STACK_ALIGN (frame_size);
5574 /* Set up values for use in prologue and epilogue. */
5575 sparc_frame_size = frame_size;
5576 sparc_apparent_frame_size = apparent_frame_size;
5577 sparc_n_global_fp_regs = n_global_fp_regs;
5578 sparc_save_local_in_regs_p = save_local_in_regs_p;
5580 return frame_size;
5583 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5586 sparc_initial_elimination_offset (int to)
5588 int offset;
5590 if (to == STACK_POINTER_REGNUM)
5591 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
5592 else
5593 offset = 0;
5595 offset += SPARC_STACK_BIAS;
5596 return offset;
5599 /* Output any necessary .register pseudo-ops. */
5601 void
5602 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
5604 int i;
5606 if (TARGET_ARCH32)
5607 return;
5609 /* Check if %g[2367] were used without
5610 .register being printed for them already. */
5611 for (i = 2; i < 8; i++)
5613 if (df_regs_ever_live_p (i)
5614 && ! sparc_hard_reg_printed [i])
5616 sparc_hard_reg_printed [i] = 1;
5617 /* %g7 is used as TLS base register, use #ignore
5618 for it instead of #scratch. */
5619 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
5620 i == 7 ? "ignore" : "scratch");
5622 if (i == 3) i = 5;
5626 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5628 #if PROBE_INTERVAL > 4096
5629 #error Cannot use indexed addressing mode for stack probing
5630 #endif
5632 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5633 inclusive. These are offsets from the current stack pointer.
5635 Note that we don't use the REG+REG addressing mode for the probes because
5636 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5637 so the advantages of having a single code win here. */
5639 static void
5640 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
5642 rtx g1 = gen_rtx_REG (Pmode, 1);
5644 /* See if we have a constant small number of probes to generate. If so,
5645 that's the easy case. */
5646 if (size <= PROBE_INTERVAL)
5648 emit_move_insn (g1, GEN_INT (first));
5649 emit_insn (gen_rtx_SET (g1,
5650 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5651 emit_stack_probe (plus_constant (Pmode, g1, -size));
5654 /* The run-time loop is made up of 9 insns in the generic case while the
5655 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5656 else if (size <= 4 * PROBE_INTERVAL)
5658 HOST_WIDE_INT i;
5660 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
5661 emit_insn (gen_rtx_SET (g1,
5662 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5663 emit_stack_probe (g1);
5665 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5666 it exceeds SIZE. If only two probes are needed, this will not
5667 generate any code. Then probe at FIRST + SIZE. */
5668 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5670 emit_insn (gen_rtx_SET (g1,
5671 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5672 emit_stack_probe (g1);
5675 emit_stack_probe (plus_constant (Pmode, g1,
5676 (i - PROBE_INTERVAL) - size));
5679 /* Otherwise, do the same as above, but in a loop. Note that we must be
5680 extra careful with variables wrapping around because we might be at
5681 the very top (or the very bottom) of the address space and we have
5682 to be able to handle this case properly; in particular, we use an
5683 equality test for the loop condition. */
5684 else
5686 HOST_WIDE_INT rounded_size;
5687 rtx g4 = gen_rtx_REG (Pmode, 4);
5689 emit_move_insn (g1, GEN_INT (first));
5692 /* Step 1: round SIZE to the previous multiple of the interval. */
5694 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
5695 emit_move_insn (g4, GEN_INT (rounded_size));
5698 /* Step 2: compute initial and final value of the loop counter. */
5700 /* TEST_ADDR = SP + FIRST. */
5701 emit_insn (gen_rtx_SET (g1,
5702 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5704 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5705 emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
5708 /* Step 3: the loop
5710 while (TEST_ADDR != LAST_ADDR)
5712 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5713 probe at TEST_ADDR
5716 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5717 until it is equal to ROUNDED_SIZE. */
5719 emit_insn (gen_probe_stack_range (Pmode, g1, g1, g4));
5722 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5723 that SIZE is equal to ROUNDED_SIZE. */
5725 if (size != rounded_size)
5726 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5729 /* Make sure nothing is scheduled before we are done. */
5730 emit_insn (gen_blockage ());
5733 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5734 absolute addresses. */
5736 const char *
5737 output_probe_stack_range (rtx reg1, rtx reg2)
5739 static int labelno = 0;
5740 char loop_lab[32];
5741 rtx xops[2];
5743 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
5745 /* Loop. */
5746 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5748 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5749 xops[0] = reg1;
5750 xops[1] = GEN_INT (-PROBE_INTERVAL);
5751 output_asm_insn ("add\t%0, %1, %0", xops);
5753 /* Test if TEST_ADDR == LAST_ADDR. */
5754 xops[1] = reg2;
5755 output_asm_insn ("cmp\t%0, %1", xops);
5757 /* Probe at TEST_ADDR and branch. */
5758 if (TARGET_ARCH64)
5759 fputs ("\tbne,pt\t%xcc,", asm_out_file);
5760 else
5761 fputs ("\tbne\t", asm_out_file);
5762 assemble_name_raw (asm_out_file, loop_lab);
5763 fputc ('\n', asm_out_file);
5764 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5765 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5767 return "";
5770 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5771 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5772 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5773 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5774 the action to be performed if it returns false. Return the new offset. */
5776 typedef bool (*sorr_pred_t) (unsigned int, int);
5777 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5779 static int
5780 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5781 int offset, int leaf_function, sorr_pred_t save_p,
5782 sorr_act_t action_true, sorr_act_t action_false)
5784 unsigned int i;
5785 rtx mem;
5786 rtx_insn *insn;
5788 if (TARGET_ARCH64 && high <= 32)
5790 int fp_offset = -1;
5792 for (i = low; i < high; i++)
5794 if (save_p (i, leaf_function))
5796 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5797 base, offset));
5798 if (action_true == SORR_SAVE)
5800 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5801 RTX_FRAME_RELATED_P (insn) = 1;
5803 else /* action_true == SORR_RESTORE */
5805 /* The frame pointer must be restored last since its old
5806 value may be used as base address for the frame. This
5807 is problematic in 64-bit mode only because of the lack
5808 of double-word load instruction. */
5809 if (i == HARD_FRAME_POINTER_REGNUM)
5810 fp_offset = offset;
5811 else
5812 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5814 offset += 8;
5816 else if (action_false == SORR_ADVANCE)
5817 offset += 8;
5820 if (fp_offset >= 0)
5822 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5823 emit_move_insn (hard_frame_pointer_rtx, mem);
5826 else
5828 for (i = low; i < high; i += 2)
5830 bool reg0 = save_p (i, leaf_function);
5831 bool reg1 = save_p (i + 1, leaf_function);
5832 machine_mode mode;
5833 int regno;
5835 if (reg0 && reg1)
5837 mode = SPARC_INT_REG_P (i) ? E_DImode : E_DFmode;
5838 regno = i;
5840 else if (reg0)
5842 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5843 regno = i;
5845 else if (reg1)
5847 mode = SPARC_INT_REG_P (i) ? E_SImode : E_SFmode;
5848 regno = i + 1;
5849 offset += 4;
5851 else
5853 if (action_false == SORR_ADVANCE)
5854 offset += 8;
5855 continue;
5858 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5859 if (action_true == SORR_SAVE)
5861 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5862 RTX_FRAME_RELATED_P (insn) = 1;
5863 if (mode == DImode)
5865 rtx set1, set2;
5866 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5867 offset));
5868 set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
5869 RTX_FRAME_RELATED_P (set1) = 1;
5871 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5872 offset + 4));
5873 set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
5874 RTX_FRAME_RELATED_P (set2) = 1;
5875 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5876 gen_rtx_PARALLEL (VOIDmode,
5877 gen_rtvec (2, set1, set2)));
5880 else /* action_true == SORR_RESTORE */
5881 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5883 /* Bump and round down to double word
5884 in case we already bumped by 4. */
5885 offset = ROUND_DOWN (offset + 8, 8);
5889 return offset;
5892 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5894 static rtx
5895 emit_adjust_base_to_offset (rtx base, int offset)
5897 /* ??? This might be optimized a little as %g1 might already have a
5898 value close enough that a single add insn will do. */
5899 /* ??? Although, all of this is probably only a temporary fix because
5900 if %g1 can hold a function result, then sparc_expand_epilogue will
5901 lose (the result will be clobbered). */
5902 rtx new_base = gen_rtx_REG (Pmode, 1);
5903 emit_move_insn (new_base, GEN_INT (offset));
5904 emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5905 return new_base;
5908 /* Emit code to save/restore call-saved global and FP registers. */
5910 static void
5911 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5913 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5915 base = emit_adjust_base_to_offset (base, offset);
5916 offset = 0;
5919 offset
5920 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5921 save_global_or_fp_reg_p, action, SORR_NONE);
5922 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5923 save_global_or_fp_reg_p, action, SORR_NONE);
5926 /* Emit code to save/restore call-saved local and in registers. */
5928 static void
5929 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5931 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5933 base = emit_adjust_base_to_offset (base, offset);
5934 offset = 0;
5937 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5938 save_local_or_in_reg_p, action, SORR_ADVANCE);
5941 /* Emit a window_save insn. */
5943 static rtx_insn *
5944 emit_window_save (rtx increment)
5946 rtx_insn *insn = emit_insn (gen_window_save (increment));
5947 RTX_FRAME_RELATED_P (insn) = 1;
5949 /* The incoming return address (%o7) is saved in %i7. */
5950 add_reg_note (insn, REG_CFA_REGISTER,
5951 gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5952 gen_rtx_REG (Pmode,
5953 INCOMING_RETURN_ADDR_REGNUM)));
5955 /* The window save event. */
5956 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5958 /* The CFA is %fp, the hard frame pointer. */
5959 add_reg_note (insn, REG_CFA_DEF_CFA,
5960 plus_constant (Pmode, hard_frame_pointer_rtx,
5961 INCOMING_FRAME_SP_OFFSET));
5963 return insn;
5966 /* Generate an increment for the stack pointer. */
5968 static rtx
5969 gen_stack_pointer_inc (rtx increment)
5971 return gen_rtx_SET (stack_pointer_rtx,
5972 gen_rtx_PLUS (Pmode,
5973 stack_pointer_rtx,
5974 increment));
5977 /* Expand the function prologue. The prologue is responsible for reserving
5978 storage for the frame, saving the call-saved registers and loading the
5979 GOT register if needed. */
5981 void
5982 sparc_expand_prologue (void)
5984 HOST_WIDE_INT size;
5985 rtx_insn *insn;
5987 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5988 on the final value of the flag means deferring the prologue/epilogue
5989 expansion until just before the second scheduling pass, which is too
5990 late to emit multiple epilogues or return insns.
5992 Of course we are making the assumption that the value of the flag
5993 will not change between now and its final value. Of the three parts
5994 of the formula, only the last one can reasonably vary. Let's take a
5995 closer look, after assuming that the first two ones are set to true
5996 (otherwise the last value is effectively silenced).
5998 If only_leaf_regs_used returns false, the global predicate will also
5999 be false so the actual frame size calculated below will be positive.
6000 As a consequence, the save_register_window insn will be emitted in
6001 the instruction stream; now this insn explicitly references %fp
6002 which is not a leaf register so only_leaf_regs_used will always
6003 return false subsequently.
6005 If only_leaf_regs_used returns true, we hope that the subsequent
6006 optimization passes won't cause non-leaf registers to pop up. For
6007 example, the regrename pass has special provisions to not rename to
6008 non-leaf registers in a leaf function. */
6009 sparc_leaf_function_p
6010 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
6012 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6014 if (flag_stack_usage_info)
6015 current_function_static_stack_size = size;
6017 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6018 || flag_stack_clash_protection)
6020 if (crtl->is_leaf && !cfun->calls_alloca)
6022 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6023 sparc_emit_probe_stack_range (get_stack_check_protect (),
6024 size - get_stack_check_protect ());
6026 else if (size > 0)
6027 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6030 if (size == 0)
6031 ; /* do nothing. */
6032 else if (sparc_leaf_function_p)
6034 rtx size_int_rtx = GEN_INT (-size);
6036 if (size <= 4096)
6037 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6038 else if (size <= 8192)
6040 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6041 RTX_FRAME_RELATED_P (insn) = 1;
6043 /* %sp is still the CFA register. */
6044 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6046 else
6048 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6049 emit_move_insn (size_rtx, size_int_rtx);
6050 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6051 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
6052 gen_stack_pointer_inc (size_int_rtx));
6055 RTX_FRAME_RELATED_P (insn) = 1;
6057 /* Ensure no memory access is done before the frame is established. */
6058 emit_insn (gen_frame_blockage ());
6060 else
6062 rtx size_int_rtx = GEN_INT (-size);
6064 if (size <= 4096)
6065 emit_window_save (size_int_rtx);
6066 else if (size <= 8192)
6068 emit_window_save (GEN_INT (-4096));
6070 /* %sp is not the CFA register anymore. */
6071 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6073 /* Likewise. */
6074 emit_insn (gen_frame_blockage ());
6076 else
6078 rtx size_rtx = gen_rtx_REG (Pmode, 1);
6079 emit_move_insn (size_rtx, size_int_rtx);
6080 emit_window_save (size_rtx);
6084 if (sparc_leaf_function_p)
6086 sparc_frame_base_reg = stack_pointer_rtx;
6087 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6089 else
6091 sparc_frame_base_reg = hard_frame_pointer_rtx;
6092 sparc_frame_base_offset = SPARC_STACK_BIAS;
6095 if (sparc_n_global_fp_regs > 0)
6096 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6097 sparc_frame_base_offset
6098 - sparc_apparent_frame_size,
6099 SORR_SAVE);
6101 /* Advertise that the data calculated just above are now valid. */
6102 sparc_prologue_data_valid_p = true;
6105 /* Expand the function prologue. The prologue is responsible for reserving
6106 storage for the frame, saving the call-saved registers and loading the
6107 GOT register if needed. */
6109 void
6110 sparc_flat_expand_prologue (void)
6112 HOST_WIDE_INT size;
6113 rtx_insn *insn;
6115 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
6117 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
6119 if (flag_stack_usage_info)
6120 current_function_static_stack_size = size;
6122 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
6123 || flag_stack_clash_protection)
6125 if (crtl->is_leaf && !cfun->calls_alloca)
6127 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
6128 sparc_emit_probe_stack_range (get_stack_check_protect (),
6129 size - get_stack_check_protect ());
6131 else if (size > 0)
6132 sparc_emit_probe_stack_range (get_stack_check_protect (), size);
6135 if (sparc_save_local_in_regs_p)
6136 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
6137 SORR_SAVE);
6139 if (size == 0)
6140 ; /* do nothing. */
6141 else
6143 rtx size_int_rtx, size_rtx;
6145 size_rtx = size_int_rtx = GEN_INT (-size);
6147 /* We establish the frame (i.e. decrement the stack pointer) first, even
6148 if we use a frame pointer, because we cannot clobber any call-saved
6149 registers, including the frame pointer, if we haven't created a new
6150 register save area, for the sake of compatibility with the ABI. */
6151 if (size <= 4096)
6152 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
6153 else if (size <= 8192 && !frame_pointer_needed)
6155 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
6156 RTX_FRAME_RELATED_P (insn) = 1;
6157 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
6159 else
6161 size_rtx = gen_rtx_REG (Pmode, 1);
6162 emit_move_insn (size_rtx, size_int_rtx);
6163 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
6164 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6165 gen_stack_pointer_inc (size_int_rtx));
6167 RTX_FRAME_RELATED_P (insn) = 1;
6169 /* Ensure no memory access is done before the frame is established. */
6170 emit_insn (gen_frame_blockage ());
6172 if (frame_pointer_needed)
6174 insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
6175 gen_rtx_MINUS (Pmode,
6176 stack_pointer_rtx,
6177 size_rtx)));
6178 RTX_FRAME_RELATED_P (insn) = 1;
6180 add_reg_note (insn, REG_CFA_ADJUST_CFA,
6181 gen_rtx_SET (hard_frame_pointer_rtx,
6182 plus_constant (Pmode, stack_pointer_rtx,
6183 size)));
6186 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6188 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
6189 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
6191 insn = emit_move_insn (i7, o7);
6192 RTX_FRAME_RELATED_P (insn) = 1;
6194 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
6196 /* Prevent this instruction from ever being considered dead,
6197 even if this function has no epilogue. */
6198 emit_use (i7);
6202 if (frame_pointer_needed)
6204 sparc_frame_base_reg = hard_frame_pointer_rtx;
6205 sparc_frame_base_offset = SPARC_STACK_BIAS;
6207 else
6209 sparc_frame_base_reg = stack_pointer_rtx;
6210 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
6213 if (sparc_n_global_fp_regs > 0)
6214 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6215 sparc_frame_base_offset
6216 - sparc_apparent_frame_size,
6217 SORR_SAVE);
6219 /* Advertise that the data calculated just above are now valid. */
6220 sparc_prologue_data_valid_p = true;
6223 /* This function generates the assembly code for function entry, which boils
6224 down to emitting the necessary .register directives. */
6226 static void
6227 sparc_asm_function_prologue (FILE *file)
6229 /* Check that the assumption we made in sparc_expand_prologue is valid. */
6230 if (!TARGET_FLAT)
6231 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
6233 sparc_output_scratch_registers (file);
6236 /* Expand the function epilogue, either normal or part of a sibcall.
6237 We emit all the instructions except the return or the call. */
6239 void
6240 sparc_expand_epilogue (bool for_eh)
6242 HOST_WIDE_INT size = sparc_frame_size;
6244 if (cfun->calls_alloca)
6245 emit_insn (gen_frame_blockage ());
6247 if (sparc_n_global_fp_regs > 0)
6248 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6249 sparc_frame_base_offset
6250 - sparc_apparent_frame_size,
6251 SORR_RESTORE);
6253 if (size == 0 || for_eh)
6254 ; /* do nothing. */
6255 else if (sparc_leaf_function_p)
6257 /* Ensure no memory access is done after the frame is destroyed. */
6258 emit_insn (gen_frame_blockage ());
6260 if (size <= 4096)
6261 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6262 else if (size <= 8192)
6264 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6265 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6267 else
6269 rtx reg = gen_rtx_REG (Pmode, 1);
6270 emit_move_insn (reg, GEN_INT (size));
6271 emit_insn (gen_stack_pointer_inc (reg));
6276 /* Expand the function epilogue, either normal or part of a sibcall.
6277 We emit all the instructions except the return or the call. */
6279 void
6280 sparc_flat_expand_epilogue (bool for_eh)
6282 HOST_WIDE_INT size = sparc_frame_size;
6284 if (sparc_n_global_fp_regs > 0)
6285 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
6286 sparc_frame_base_offset
6287 - sparc_apparent_frame_size,
6288 SORR_RESTORE);
6290 /* If we have a frame pointer, we'll need both to restore it before the
6291 frame is destroyed and use its current value in destroying the frame.
6292 Since we don't have an atomic way to do that in the flat window model,
6293 we save the current value into a temporary register (%g1). */
6294 if (frame_pointer_needed && !for_eh)
6295 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
6297 if (return_addr_reg_needed_p (sparc_leaf_function_p))
6298 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
6299 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
6301 if (sparc_save_local_in_regs_p)
6302 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
6303 sparc_frame_base_offset,
6304 SORR_RESTORE);
6306 if (size == 0 || for_eh)
6307 ; /* do nothing. */
6308 else if (frame_pointer_needed)
6310 /* Ensure no memory access is done after the frame is destroyed. */
6311 emit_insn (gen_frame_blockage ());
6313 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
6315 else
6317 /* Likewise. */
6318 emit_insn (gen_frame_blockage ());
6320 if (size <= 4096)
6321 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6322 else if (size <= 8192)
6324 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6325 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6327 else
6329 rtx reg = gen_rtx_REG (Pmode, 1);
6330 emit_move_insn (reg, GEN_INT (size));
6331 emit_insn (gen_stack_pointer_inc (reg));
6336 /* Return true if it is appropriate to emit `return' instructions in the
6337 body of a function. */
6339 bool
6340 sparc_can_use_return_insn_p (void)
6342 return sparc_prologue_data_valid_p
6343 && sparc_n_global_fp_regs == 0
6344 && TARGET_FLAT
6345 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
6346 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
6349 /* This function generates the assembly code for function exit. */
6351 static void
6352 sparc_asm_function_epilogue (FILE *file)
6354 /* If the last two instructions of a function are "call foo; dslot;"
6355 the return address might point to the first instruction in the next
6356 function and we have to output a dummy nop for the sake of sane
6357 backtraces in such cases. This is pointless for sibling calls since
6358 the return address is explicitly adjusted. */
6360 rtx_insn *insn = get_last_insn ();
6362 rtx last_real_insn = prev_real_insn (insn);
6363 if (last_real_insn
6364 && NONJUMP_INSN_P (last_real_insn)
6365 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
6366 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
6368 if (last_real_insn
6369 && CALL_P (last_real_insn)
6370 && !SIBLING_CALL_P (last_real_insn))
6371 fputs("\tnop\n", file);
6373 sparc_output_deferred_case_vectors ();
6376 /* Output a 'restore' instruction. */
6378 static void
6379 output_restore (rtx pat)
6381 rtx operands[3];
6383 if (! pat)
6385 fputs ("\t restore\n", asm_out_file);
6386 return;
6389 gcc_assert (GET_CODE (pat) == SET);
6391 operands[0] = SET_DEST (pat);
6392 pat = SET_SRC (pat);
6394 switch (GET_CODE (pat))
6396 case PLUS:
6397 operands[1] = XEXP (pat, 0);
6398 operands[2] = XEXP (pat, 1);
6399 output_asm_insn (" restore %r1, %2, %Y0", operands);
6400 break;
6401 case LO_SUM:
6402 operands[1] = XEXP (pat, 0);
6403 operands[2] = XEXP (pat, 1);
6404 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
6405 break;
6406 case ASHIFT:
6407 operands[1] = XEXP (pat, 0);
6408 gcc_assert (XEXP (pat, 1) == const1_rtx);
6409 output_asm_insn (" restore %r1, %r1, %Y0", operands);
6410 break;
6411 default:
6412 operands[1] = pat;
6413 output_asm_insn (" restore %%g0, %1, %Y0", operands);
6414 break;
6418 /* Output a return. */
6420 const char *
6421 output_return (rtx_insn *insn)
6423 if (crtl->calls_eh_return)
6425 /* If the function uses __builtin_eh_return, the eh_return
6426 machinery occupies the delay slot. */
6427 gcc_assert (!final_sequence);
6429 if (flag_delayed_branch)
6431 if (!TARGET_FLAT && TARGET_V9)
6432 fputs ("\treturn\t%i7+8\n", asm_out_file);
6433 else
6435 if (!TARGET_FLAT)
6436 fputs ("\trestore\n", asm_out_file);
6438 fputs ("\tjmp\t%o7+8\n", asm_out_file);
6441 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
6443 else
6445 if (!TARGET_FLAT)
6446 fputs ("\trestore\n", asm_out_file);
6448 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
6449 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
6452 else if (sparc_leaf_function_p || TARGET_FLAT)
6454 /* This is a leaf or flat function so we don't have to bother restoring
6455 the register window, which frees us from dealing with the convoluted
6456 semantics of restore/return. We simply output the jump to the
6457 return address and the insn in the delay slot (if any). */
6459 return "jmp\t%%o7+%)%#";
6461 else
6463 /* This is a regular function so we have to restore the register window.
6464 We may have a pending insn for the delay slot, which will be either
6465 combined with the 'restore' instruction or put in the delay slot of
6466 the 'return' instruction. */
6468 if (final_sequence)
6470 rtx_insn *delay;
6471 rtx pat;
6473 delay = NEXT_INSN (insn);
6474 gcc_assert (delay);
6476 pat = PATTERN (delay);
6478 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
6480 epilogue_renumber (&pat, 0);
6481 return "return\t%%i7+%)%#";
6483 else
6485 output_asm_insn ("jmp\t%%i7+%)", NULL);
6487 /* We're going to output the insn in the delay slot manually.
6488 Make sure to output its source location first. */
6489 PATTERN (delay) = gen_blockage ();
6490 INSN_CODE (delay) = -1;
6491 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6492 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6494 output_restore (pat);
6497 else
6499 /* The delay slot is empty. */
6500 if (TARGET_V9)
6501 return "return\t%%i7+%)\n\t nop";
6502 else if (flag_delayed_branch)
6503 return "jmp\t%%i7+%)\n\t restore";
6504 else
6505 return "restore\n\tjmp\t%%o7+%)\n\t nop";
6509 return "";
6512 /* Output a sibling call. */
6514 const char *
6515 output_sibcall (rtx_insn *insn, rtx call_operand)
6517 rtx operands[1];
6519 gcc_assert (flag_delayed_branch);
6521 operands[0] = call_operand;
6523 if (sparc_leaf_function_p || TARGET_FLAT)
6525 /* This is a leaf or flat function so we don't have to bother restoring
6526 the register window. We simply output the jump to the function and
6527 the insn in the delay slot (if any). */
6529 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
6531 if (final_sequence)
6532 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
6533 operands);
6534 else
6535 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6536 it into branch if possible. */
6537 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6538 operands);
6540 else
6542 /* This is a regular function so we have to restore the register window.
6543 We may have a pending insn for the delay slot, which will be combined
6544 with the 'restore' instruction. */
6546 output_asm_insn ("call\t%a0, 0", operands);
6548 if (final_sequence)
6550 rtx_insn *delay;
6551 rtx pat;
6553 delay = NEXT_INSN (insn);
6554 gcc_assert (delay);
6556 pat = PATTERN (delay);
6558 /* We're going to output the insn in the delay slot manually.
6559 Make sure to output its source location first. */
6560 PATTERN (delay) = gen_blockage ();
6561 INSN_CODE (delay) = -1;
6562 final_scan_insn (delay, asm_out_file, optimize, 0, NULL);
6563 INSN_LOCATION (delay) = UNKNOWN_LOCATION;
6565 output_restore (pat);
6567 else
6568 output_restore (NULL_RTX);
6571 return "";
6574 /* Functions for handling argument passing.
6576 For 32-bit, the first 6 args are normally in registers and the rest are
6577 pushed. Any arg that starts within the first 6 words is at least
6578 partially passed in a register unless its data type forbids.
6580 For 64-bit, the argument registers are laid out as an array of 16 elements
6581 and arguments are added sequentially. The first 6 int args and up to the
6582 first 16 fp args (depending on size) are passed in regs.
6584 Slot Stack Integral Float Float in structure Double Long Double
6585 ---- ----- -------- ----- ------------------ ------ -----------
6586 15 [SP+248] %f31 %f30,%f31 %d30
6587 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6588 13 [SP+232] %f27 %f26,%f27 %d26
6589 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6590 11 [SP+216] %f23 %f22,%f23 %d22
6591 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6592 9 [SP+200] %f19 %f18,%f19 %d18
6593 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6594 7 [SP+184] %f15 %f14,%f15 %d14
6595 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6596 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6597 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6598 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6599 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6600 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6601 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6603 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6605 Integral arguments are always passed as 64-bit quantities appropriately
6606 extended.
6608 Passing of floating point values is handled as follows.
6609 If a prototype is in scope:
6610 If the value is in a named argument (i.e. not a stdarg function or a
6611 value not part of the `...') then the value is passed in the appropriate
6612 fp reg.
6613 If the value is part of the `...' and is passed in one of the first 6
6614 slots then the value is passed in the appropriate int reg.
6615 If the value is part of the `...' and is not passed in one of the first 6
6616 slots then the value is passed in memory.
6617 If a prototype is not in scope:
6618 If the value is one of the first 6 arguments the value is passed in the
6619 appropriate integer reg and the appropriate fp reg.
6620 If the value is not one of the first 6 arguments the value is passed in
6621 the appropriate fp reg and in memory.
6624 Summary of the calling conventions implemented by GCC on the SPARC:
6626 32-bit ABI:
6627 size argument return value
6629 small integer <4 int. reg. int. reg.
6630 word 4 int. reg. int. reg.
6631 double word 8 int. reg. int. reg.
6633 _Complex small integer <8 int. reg. int. reg.
6634 _Complex word 8 int. reg. int. reg.
6635 _Complex double word 16 memory int. reg.
6637 vector integer <=8 int. reg. FP reg.
6638 vector integer >8 memory memory
6640 float 4 int. reg. FP reg.
6641 double 8 int. reg. FP reg.
6642 long double 16 memory memory
6644 _Complex float 8 memory FP reg.
6645 _Complex double 16 memory FP reg.
6646 _Complex long double 32 memory FP reg.
6648 vector float any memory memory
6650 aggregate any memory memory
6654 64-bit ABI:
6655 size argument return value
6657 small integer <8 int. reg. int. reg.
6658 word 8 int. reg. int. reg.
6659 double word 16 int. reg. int. reg.
6661 _Complex small integer <16 int. reg. int. reg.
6662 _Complex word 16 int. reg. int. reg.
6663 _Complex double word 32 memory int. reg.
6665 vector integer <=16 FP reg. FP reg.
6666 vector integer 16<s<=32 memory FP reg.
6667 vector integer >32 memory memory
6669 float 4 FP reg. FP reg.
6670 double 8 FP reg. FP reg.
6671 long double 16 FP reg. FP reg.
6673 _Complex float 8 FP reg. FP reg.
6674 _Complex double 16 FP reg. FP reg.
6675 _Complex long double 32 memory FP reg.
6677 vector float <=16 FP reg. FP reg.
6678 vector float 16<s<=32 memory FP reg.
6679 vector float >32 memory memory
6681 aggregate <=16 reg. reg.
6682 aggregate 16<s<=32 memory reg.
6683 aggregate >32 memory memory
6687 Note #1: complex floating-point types follow the extended SPARC ABIs as
6688 implemented by the Sun compiler.
6690 Note #2: integer vector types follow the scalar floating-point types
6691 conventions to match what is implemented by the Sun VIS SDK.
6693 Note #3: floating-point vector types follow the aggregate types
6694 conventions. */
6697 /* Maximum number of int regs for args. */
6698 #define SPARC_INT_ARG_MAX 6
6699 /* Maximum number of fp regs for args. */
6700 #define SPARC_FP_ARG_MAX 16
6701 /* Number of words (partially) occupied for a given size in units. */
6702 #define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
6704 /* Handle the INIT_CUMULATIVE_ARGS macro.
6705 Initialize a variable CUM of type CUMULATIVE_ARGS
6706 for a call to a function whose data type is FNTYPE.
6707 For a library call, FNTYPE is 0. */
6709 void
6710 init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
6712 cum->words = 0;
6713 cum->prototype_p = fntype && prototype_p (fntype);
6714 cum->libcall_p = !fntype;
6717 /* Handle promotion of pointer and integer arguments. */
6719 static machine_mode
6720 sparc_promote_function_mode (const_tree type, machine_mode mode,
6721 int *punsignedp, const_tree, int)
6723 if (type && POINTER_TYPE_P (type))
6725 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6726 return Pmode;
6729 /* Integral arguments are passed as full words, as per the ABI. */
6730 if (GET_MODE_CLASS (mode) == MODE_INT
6731 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6732 return word_mode;
6734 return mode;
6737 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6739 static bool
6740 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6742 return TARGET_ARCH64 ? true : false;
6745 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6746 Specify whether to pass the argument by reference. */
6748 static bool
6749 sparc_pass_by_reference (cumulative_args_t, const function_arg_info &arg)
6751 tree type = arg.type;
6752 machine_mode mode = arg.mode;
6753 if (TARGET_ARCH32)
6754 /* Original SPARC 32-bit ABI says that structures and unions,
6755 and quad-precision floats are passed by reference.
6756 All other base types are passed in registers.
6758 Extended ABI (as implemented by the Sun compiler) says that all
6759 complex floats are passed by reference. Pass complex integers
6760 in registers up to 8 bytes. More generally, enforce the 2-word
6761 cap for passing arguments in registers.
6763 Vector ABI (as implemented by the Sun VIS SDK) says that integer
6764 vectors are passed like floats of the same size, that is in
6765 registers up to 8 bytes. Pass all vector floats by reference
6766 like structure and unions. */
6767 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6768 || mode == SCmode
6769 /* Catch CDImode, TFmode, DCmode and TCmode. */
6770 || GET_MODE_SIZE (mode) > 8
6771 || (type
6772 && VECTOR_TYPE_P (type)
6773 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6774 else
6775 /* Original SPARC 64-bit ABI says that structures and unions
6776 smaller than 16 bytes are passed in registers, as well as
6777 all other base types.
6779 Extended ABI (as implemented by the Sun compiler) says that
6780 complex floats are passed in registers up to 16 bytes. Pass
6781 all complex integers in registers up to 16 bytes. More generally,
6782 enforce the 2-word cap for passing arguments in registers.
6784 Vector ABI (as implemented by the Sun VIS SDK) says that integer
6785 vectors are passed like floats of the same size, that is in
6786 registers (up to 16 bytes). Pass all vector floats like structure
6787 and unions. */
6788 return ((type
6789 && (AGGREGATE_TYPE_P (type) || VECTOR_TYPE_P (type))
6790 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6791 /* Catch CTImode and TCmode. */
6792 || GET_MODE_SIZE (mode) > 16);
6795 /* Traverse the record TYPE recursively and call FUNC on its fields.
6796 NAMED is true if this is for a named parameter. DATA is passed
6797 to FUNC for each field. OFFSET is the starting position and
6798 PACKED is true if we are inside a packed record. */
6800 template <typename T, void Func (const_tree, int, bool, T*)>
6801 static void
6802 traverse_record_type (const_tree type, bool named, T *data,
6803 int offset = 0, bool packed = false)
6805 /* The ABI obviously doesn't specify how packed structures are passed.
6806 These are passed in integer regs if possible, otherwise memory. */
6807 if (!packed)
6808 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6809 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6811 packed = true;
6812 break;
6815 /* Walk the real fields, but skip those with no size or a zero size.
6816 ??? Fields with variable offset are handled as having zero offset. */
6817 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6818 if (TREE_CODE (field) == FIELD_DECL)
6820 if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
6821 continue;
6823 int bitpos = offset;
6824 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
6825 bitpos += int_bit_position (field);
6827 tree field_type = TREE_TYPE (field);
6828 if (TREE_CODE (field_type) == RECORD_TYPE)
6829 traverse_record_type<T, Func> (field_type, named, data, bitpos,
6830 packed);
6831 else
6833 const bool fp_type
6834 = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
6835 Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
6836 data);
6841 /* Handle recursive register classifying for structure layout. */
6843 typedef struct
6845 bool fp_regs; /* true if field eligible to FP registers. */
6846 bool fp_regs_in_first_word; /* true if such field in first word. */
6847 } classify_data_t;
6849 /* A subroutine of function_arg_slotno. Classify the field. */
6851 inline void
6852 classify_registers (const_tree, int bitpos, bool fp, classify_data_t *data)
6854 if (fp)
6856 data->fp_regs = true;
6857 if (bitpos < BITS_PER_WORD)
6858 data->fp_regs_in_first_word = true;
6862 /* Compute the slot number to pass an argument in.
6863 Return the slot number or -1 if passing on the stack.
6865 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6866 the preceding args and about the function being called.
6867 MODE is the argument's machine mode.
6868 TYPE is the data type of the argument (as a tree).
6869 This is null for libcalls where that information may
6870 not be available.
6871 NAMED is nonzero if this argument is a named parameter
6872 (otherwise it is an extra parameter matching an ellipsis).
6873 INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6874 *PREGNO records the register number to use if scalar type.
6875 *PPADDING records the amount of padding needed in words. */
6877 static int
6878 function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
6879 const_tree type, bool named, bool incoming,
6880 int *pregno, int *ppadding)
6882 const int regbase
6883 = incoming ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
6884 int slotno = cum->words, regno;
6885 enum mode_class mclass = GET_MODE_CLASS (mode);
6887 /* Silence warnings in the callers. */
6888 *pregno = -1;
6889 *ppadding = -1;
6891 if (type && TREE_ADDRESSABLE (type))
6892 return -1;
6894 /* In 64-bit mode, objects requiring 16-byte alignment get it. */
6895 if (TARGET_ARCH64
6896 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6897 && (slotno & 1) != 0)
6899 slotno++;
6900 *ppadding = 1;
6902 else
6903 *ppadding = 0;
6905 /* Vector types deserve special treatment because they are polymorphic wrt
6906 their mode, depending upon whether VIS instructions are enabled. */
6907 if (type && VECTOR_TYPE_P (type))
6909 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (type)))
6911 /* The SPARC port defines no floating-point vector modes. */
6912 gcc_assert (mode == BLKmode);
6914 else
6916 /* Integer vector types should either have a vector
6917 mode or an integral mode, because we are guaranteed
6918 by pass_by_reference that their size is not greater
6919 than 16 bytes and TImode is 16-byte wide. */
6920 gcc_assert (mode != BLKmode);
6922 /* Integer vectors are handled like floats as per
6923 the Sun VIS SDK. */
6924 mclass = MODE_FLOAT;
6928 switch (mclass)
6930 case MODE_FLOAT:
6931 case MODE_COMPLEX_FLOAT:
6932 case MODE_VECTOR_INT:
6933 if (TARGET_ARCH64 && TARGET_FPU && named)
6935 /* If all arg slots are filled, then must pass on stack. */
6936 if (slotno >= SPARC_FP_ARG_MAX)
6937 return -1;
6939 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6940 /* Arguments filling only one single FP register are
6941 right-justified in the outer double FP register. */
6942 if (GET_MODE_SIZE (mode) <= 4)
6943 regno++;
6944 break;
6946 /* fallthrough */
6948 case MODE_INT:
6949 case MODE_COMPLEX_INT:
6950 /* If all arg slots are filled, then must pass on stack. */
6951 if (slotno >= SPARC_INT_ARG_MAX)
6952 return -1;
6954 regno = regbase + slotno;
6955 break;
6957 case MODE_RANDOM:
6958 /* MODE is VOIDmode when generating the actual call. */
6959 if (mode == VOIDmode)
6960 return -1;
6962 if (TARGET_64BIT && TARGET_FPU && named
6963 && type
6964 && (TREE_CODE (type) == RECORD_TYPE || VECTOR_TYPE_P (type)))
6966 /* If all arg slots are filled, then must pass on stack. */
6967 if (slotno >= SPARC_FP_ARG_MAX)
6968 return -1;
6970 if (TREE_CODE (type) == RECORD_TYPE)
6972 classify_data_t data = { false, false };
6973 traverse_record_type<classify_data_t, classify_registers>
6974 (type, named, &data);
6976 if (data.fp_regs)
6978 /* If all FP slots are filled except for the last one and
6979 there is no FP field in the first word, then must pass
6980 on stack. */
6981 if (slotno >= SPARC_FP_ARG_MAX - 1
6982 && !data.fp_regs_in_first_word)
6983 return -1;
6985 else
6987 /* If all int slots are filled, then must pass on stack. */
6988 if (slotno >= SPARC_INT_ARG_MAX)
6989 return -1;
6992 /* PREGNO isn't set since both int and FP regs can be used. */
6993 return slotno;
6996 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6998 else
7000 /* If all arg slots are filled, then must pass on stack. */
7001 if (slotno >= SPARC_INT_ARG_MAX)
7002 return -1;
7004 regno = regbase + slotno;
7006 break;
7008 default :
7009 gcc_unreachable ();
7012 *pregno = regno;
7013 return slotno;
7016 /* Handle recursive register counting/assigning for structure layout. */
7018 typedef struct
7020 int slotno; /* slot number of the argument. */
7021 int regbase; /* regno of the base register. */
7022 int intoffset; /* offset of the first pending integer field. */
7023 int nregs; /* number of words passed in registers. */
7024 bool stack; /* true if part of the argument is on the stack. */
7025 rtx ret; /* return expression being built. */
7026 } assign_data_t;
7028 /* A subroutine of function_arg_record_value. Compute the number of integer
7029 registers to be assigned between PARMS->intoffset and BITPOS. Return
7030 true if at least one integer register is assigned or false otherwise. */
7032 static bool
7033 compute_int_layout (int bitpos, assign_data_t *data, int *pnregs)
7035 if (data->intoffset < 0)
7036 return false;
7038 const int intoffset = data->intoffset;
7039 data->intoffset = -1;
7041 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7042 const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
7043 const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
7044 int nregs = (endbit - startbit) / BITS_PER_WORD;
7046 if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
7048 nregs = SPARC_INT_ARG_MAX - this_slotno;
7050 /* We need to pass this field (partly) on the stack. */
7051 data->stack = 1;
7054 if (nregs <= 0)
7055 return false;
7057 *pnregs = nregs;
7058 return true;
7061 /* A subroutine of function_arg_record_value. Compute the number and the mode
7062 of the FP registers to be assigned for FIELD. Return true if at least one
7063 FP register is assigned or false otherwise. */
7065 static bool
7066 compute_fp_layout (const_tree field, int bitpos, assign_data_t *data,
7067 int *pnregs, machine_mode *pmode)
7069 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7070 machine_mode mode = DECL_MODE (field);
7071 int nregs, nslots;
7073 /* Slots are counted as words while regs are counted as having the size of
7074 the (inner) mode. */
7075 if (VECTOR_TYPE_P (TREE_TYPE (field)) && mode == BLKmode)
7077 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7078 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
7080 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
7082 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
7083 nregs = 2;
7085 else
7086 nregs = 1;
7088 nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
7090 if (nslots > SPARC_FP_ARG_MAX - this_slotno)
7092 nslots = SPARC_FP_ARG_MAX - this_slotno;
7093 nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
7095 /* We need to pass this field (partly) on the stack. */
7096 data->stack = 1;
7098 if (nregs <= 0)
7099 return false;
7102 *pnregs = nregs;
7103 *pmode = mode;
7104 return true;
7107 /* A subroutine of function_arg_record_value. Count the number of registers
7108 to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
7110 inline void
7111 count_registers (const_tree field, int bitpos, bool fp, assign_data_t *data)
7113 if (fp)
7115 int nregs;
7116 machine_mode mode;
7118 if (compute_int_layout (bitpos, data, &nregs))
7119 data->nregs += nregs;
7121 if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
7122 data->nregs += nregs;
7124 else
7126 if (data->intoffset < 0)
7127 data->intoffset = bitpos;
7131 /* A subroutine of function_arg_record_value. Assign the bits of the
7132 structure between PARMS->intoffset and BITPOS to integer registers. */
7134 static void
7135 assign_int_registers (int bitpos, assign_data_t *data)
7137 int intoffset = data->intoffset;
7138 machine_mode mode;
7139 int nregs;
7141 if (!compute_int_layout (bitpos, data, &nregs))
7142 return;
7144 /* If this is the trailing part of a word, only load that much into
7145 the register. Otherwise load the whole register. Note that in
7146 the latter case we may pick up unwanted bits. It's not a problem
7147 at the moment but may wish to revisit. */
7148 if (intoffset % BITS_PER_WORD != 0)
7149 mode = smallest_int_mode_for_size (BITS_PER_WORD
7150 - intoffset % BITS_PER_WORD);
7151 else
7152 mode = word_mode;
7154 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
7155 unsigned int regno = data->regbase + this_slotno;
7156 intoffset /= BITS_PER_UNIT;
7160 rtx reg = gen_rtx_REG (mode, regno);
7161 XVECEXP (data->ret, 0, data->stack + data->nregs)
7162 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
7163 data->nregs += 1;
7164 mode = word_mode;
7165 regno += 1;
7166 intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
7168 while (--nregs > 0);
7171 /* A subroutine of function_arg_record_value. Assign FIELD at position
7172 BITPOS to FP registers. */
7174 static void
7175 assign_fp_registers (const_tree field, int bitpos, assign_data_t *data)
7177 int nregs;
7178 machine_mode mode;
7180 if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
7181 return;
7183 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
7184 int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
7185 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
7186 regno++;
7187 int pos = bitpos / BITS_PER_UNIT;
7191 rtx reg = gen_rtx_REG (mode, regno);
7192 XVECEXP (data->ret, 0, data->stack + data->nregs)
7193 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
7194 data->nregs += 1;
7195 regno += GET_MODE_SIZE (mode) / 4;
7196 pos += GET_MODE_SIZE (mode);
7198 while (--nregs > 0);
7201 /* A subroutine of function_arg_record_value. Assign FIELD and the bits of
7202 the structure between PARMS->intoffset and BITPOS to registers. */
7204 inline void
7205 assign_registers (const_tree field, int bitpos, bool fp, assign_data_t *data)
7207 if (fp)
7209 assign_int_registers (bitpos, data);
7211 assign_fp_registers (field, bitpos, data);
7213 else
7215 if (data->intoffset < 0)
7216 data->intoffset = bitpos;
7220 /* Used by function_arg and function_value to implement the complex
7221 conventions of the 64-bit ABI for passing and returning structures.
7222 Return an expression valid as a return value for the FUNCTION_ARG
7223 and TARGET_FUNCTION_VALUE.
7225 TYPE is the data type of the argument (as a tree).
7226 This is null for libcalls where that information may
7227 not be available.
7228 MODE is the argument's machine mode.
7229 SLOTNO is the index number of the argument's slot in the parameter array.
7230 NAMED is true if this argument is a named parameter
7231 (otherwise it is an extra parameter matching an ellipsis).
7232 REGBASE is the regno of the base register for the parameter array. */
7234 static rtx
7235 function_arg_record_value (const_tree type, machine_mode mode,
7236 int slotno, bool named, int regbase)
7238 const int size = int_size_in_bytes (type);
7239 assign_data_t data;
7240 int nregs;
7242 data.slotno = slotno;
7243 data.regbase = regbase;
7245 /* Count how many registers we need. */
7246 data.nregs = 0;
7247 data.intoffset = 0;
7248 data.stack = false;
7249 traverse_record_type<assign_data_t, count_registers> (type, named, &data);
7251 /* Take into account pending integer fields. */
7252 if (compute_int_layout (size * BITS_PER_UNIT, &data, &nregs))
7253 data.nregs += nregs;
7255 /* Allocate the vector and handle some annoying special cases. */
7256 nregs = data.nregs;
7258 if (nregs == 0)
7260 /* ??? Empty structure has no value? Duh? */
7261 if (size <= 0)
7263 /* Though there's nothing really to store, return a word register
7264 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
7265 leads to breakage due to the fact that there are zero bytes to
7266 load. */
7267 return gen_rtx_REG (mode, regbase);
7270 /* ??? C++ has structures with no fields, and yet a size. Give up
7271 for now and pass everything back in integer registers. */
7272 nregs = CEIL_NWORDS (size);
7273 if (nregs + slotno > SPARC_INT_ARG_MAX)
7274 nregs = SPARC_INT_ARG_MAX - slotno;
7277 gcc_assert (nregs > 0);
7279 data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
7281 /* If at least one field must be passed on the stack, generate
7282 (parallel [(expr_list (nil) ...) ...]) so that all fields will
7283 also be passed on the stack. We can't do much better because the
7284 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
7285 of structures for which the fields passed exclusively in registers
7286 are not at the beginning of the structure. */
7287 if (data.stack)
7288 XVECEXP (data.ret, 0, 0)
7289 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7291 /* Assign the registers. */
7292 data.nregs = 0;
7293 data.intoffset = 0;
7294 traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
7296 /* Assign pending integer fields. */
7297 assign_int_registers (size * BITS_PER_UNIT, &data);
7299 gcc_assert (data.nregs == nregs);
7301 return data.ret;
7304 /* Used by function_arg and function_value to implement the conventions
7305 of the 64-bit ABI for passing and returning unions.
7306 Return an expression valid as a return value for the FUNCTION_ARG
7307 and TARGET_FUNCTION_VALUE.
7309 SIZE is the size in bytes of the union.
7310 MODE is the argument's machine mode.
7311 SLOTNO is the index number of the argument's slot in the parameter array.
7312 REGNO is the hard register the union will be passed in. */
7314 static rtx
7315 function_arg_union_value (int size, machine_mode mode, int slotno, int regno)
7317 unsigned int nwords;
7319 /* See comment in function_arg_record_value for empty structures. */
7320 if (size <= 0)
7321 return gen_rtx_REG (mode, regno);
7323 if (slotno == SPARC_INT_ARG_MAX - 1)
7324 nwords = 1;
7325 else
7326 nwords = CEIL_NWORDS (size);
7328 rtx regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
7330 /* Unions are passed left-justified. */
7331 for (unsigned int i = 0; i < nwords; i++)
7332 XVECEXP (regs, 0, i)
7333 = gen_rtx_EXPR_LIST (VOIDmode,
7334 gen_rtx_REG (word_mode, regno + i),
7335 GEN_INT (UNITS_PER_WORD * i));
7337 return regs;
7340 /* Used by function_arg and function_value to implement the conventions
7341 of the 64-bit ABI for passing and returning BLKmode vectors.
7342 Return an expression valid as a return value for the FUNCTION_ARG
7343 and TARGET_FUNCTION_VALUE.
7345 SIZE is the size in bytes of the vector.
7346 SLOTNO is the index number of the argument's slot in the parameter array.
7347 NAMED is true if this argument is a named parameter
7348 (otherwise it is an extra parameter matching an ellipsis).
7349 REGNO is the hard register the vector will be passed in. */
7351 static rtx
7352 function_arg_vector_value (int size, int slotno, bool named, int regno)
7354 const int mult = (named ? 2 : 1);
7355 unsigned int nwords;
7357 if (slotno == (named ? SPARC_FP_ARG_MAX : SPARC_INT_ARG_MAX) - 1)
7358 nwords = 1;
7359 else
7360 nwords = CEIL_NWORDS (size);
7362 rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nwords));
7364 if (size < UNITS_PER_WORD)
7365 XVECEXP (regs, 0, 0)
7366 = gen_rtx_EXPR_LIST (VOIDmode,
7367 gen_rtx_REG (SImode, regno),
7368 const0_rtx);
7369 else
7370 for (unsigned int i = 0; i < nwords; i++)
7371 XVECEXP (regs, 0, i)
7372 = gen_rtx_EXPR_LIST (VOIDmode,
7373 gen_rtx_REG (word_mode, regno + i * mult),
7374 GEN_INT (i * UNITS_PER_WORD));
7376 return regs;
7379 /* Determine where to put an argument to a function.
7380 Value is zero to push the argument on the stack,
7381 or a hard register in which to store the argument.
7383 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7384 the preceding args and about the function being called.
7385 ARG is a description of the argument.
7386 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
7387 TARGET_FUNCTION_INCOMING_ARG. */
7389 static rtx
7390 sparc_function_arg_1 (cumulative_args_t cum_v, const function_arg_info &arg,
7391 bool incoming)
7393 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7394 const int regbase
7395 = incoming ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
7396 int slotno, regno, padding;
7397 tree type = arg.type;
7398 machine_mode mode = arg.mode;
7399 enum mode_class mclass = GET_MODE_CLASS (mode);
7400 bool named = arg.named;
7402 slotno
7403 = function_arg_slotno (cum, mode, type, named, incoming, &regno, &padding);
7404 if (slotno == -1)
7405 return 0;
7407 /* Integer vectors are handled like floats as per the Sun VIS SDK. */
7408 if (type && VECTOR_INTEGER_TYPE_P (type))
7409 mclass = MODE_FLOAT;
7411 if (TARGET_ARCH32)
7412 return gen_rtx_REG (mode, regno);
7414 /* Structures up to 16 bytes in size are passed in arg slots on the stack
7415 and are promoted to registers if possible. */
7416 if (type && TREE_CODE (type) == RECORD_TYPE)
7418 const int size = int_size_in_bytes (type);
7419 gcc_assert (size <= 16);
7421 return function_arg_record_value (type, mode, slotno, named, regbase);
7424 /* Unions up to 16 bytes in size are passed in integer registers. */
7425 else if (type && TREE_CODE (type) == UNION_TYPE)
7427 const int size = int_size_in_bytes (type);
7428 gcc_assert (size <= 16);
7430 return function_arg_union_value (size, mode, slotno, regno);
7433 /* Floating-point vectors up to 16 bytes are passed in registers. */
7434 else if (type && VECTOR_TYPE_P (type) && mode == BLKmode)
7436 const int size = int_size_in_bytes (type);
7437 gcc_assert (size <= 16);
7439 return function_arg_vector_value (size, slotno, named, regno);
7442 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
7443 but also have the slot allocated for them.
7444 If no prototype is in scope fp values in register slots get passed
7445 in two places, either fp regs and int regs or fp regs and memory. */
7446 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7447 && SPARC_FP_REG_P (regno))
7449 rtx reg = gen_rtx_REG (mode, regno);
7450 if (cum->prototype_p || cum->libcall_p)
7451 return reg;
7452 else
7454 rtx v0, v1;
7456 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
7458 int intreg;
7460 /* On incoming, we don't need to know that the value
7461 is passed in %f0 and %i0, and it confuses other parts
7462 causing needless spillage even on the simplest cases. */
7463 if (incoming)
7464 return reg;
7466 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
7467 + (regno - SPARC_FP_ARG_FIRST) / 2);
7469 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7470 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
7471 const0_rtx);
7472 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7474 else
7476 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7477 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7478 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7483 /* All other aggregate types are passed in an integer register in a mode
7484 corresponding to the size of the type. */
7485 else if (type && AGGREGATE_TYPE_P (type))
7487 const int size = int_size_in_bytes (type);
7488 gcc_assert (size <= 16);
7490 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7493 return gen_rtx_REG (mode, regno);
7496 /* Handle the TARGET_FUNCTION_ARG target hook. */
7498 static rtx
7499 sparc_function_arg (cumulative_args_t cum, const function_arg_info &arg)
7501 return sparc_function_arg_1 (cum, arg, false);
7504 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
7506 static rtx
7507 sparc_function_incoming_arg (cumulative_args_t cum,
7508 const function_arg_info &arg)
7510 return sparc_function_arg_1 (cum, arg, true);
7513 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7515 static unsigned int
7516 sparc_function_arg_boundary (machine_mode mode, const_tree type)
7518 return ((TARGET_ARCH64
7519 && (GET_MODE_ALIGNMENT (mode) == 128
7520 || (type && TYPE_ALIGN (type) == 128)))
7521 ? 128
7522 : PARM_BOUNDARY);
7525 /* For an arg passed partly in registers and partly in memory,
7526 this is the number of bytes of registers used.
7527 For args passed entirely in registers or entirely in memory, zero.
7529 Any arg that starts in the first 6 regs but won't entirely fit in them
7530 needs partial registers on v8. On v9, structures with integer
7531 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7532 values that begin in the last fp reg [where "last fp reg" varies with the
7533 mode] will be split between that reg and memory. */
7535 static int
7536 sparc_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
7538 int slotno, regno, padding;
7540 /* We pass false for incoming here, it doesn't matter. */
7541 slotno = function_arg_slotno (get_cumulative_args (cum), arg.mode, arg.type,
7542 arg.named, false, &regno, &padding);
7544 if (slotno == -1)
7545 return 0;
7547 if (TARGET_ARCH32)
7549 /* We are guaranteed by pass_by_reference that the size of the
7550 argument is not greater than 8 bytes, so we only need to return
7551 one word if the argument is partially passed in registers. */
7552 const int size = GET_MODE_SIZE (arg.mode);
7554 if (size > UNITS_PER_WORD && slotno == SPARC_INT_ARG_MAX - 1)
7555 return UNITS_PER_WORD;
7557 else
7559 /* We are guaranteed by pass_by_reference that the size of the
7560 argument is not greater than 16 bytes, so we only need to return
7561 one word if the argument is partially passed in registers. */
7562 if (arg.aggregate_type_p ())
7564 const int size = int_size_in_bytes (arg.type);
7566 if (size > UNITS_PER_WORD
7567 && (slotno == SPARC_INT_ARG_MAX - 1
7568 || slotno == SPARC_FP_ARG_MAX - 1))
7569 return UNITS_PER_WORD;
7571 else if (GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_INT
7572 || ((GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_FLOAT
7573 || (arg.type && VECTOR_TYPE_P (arg.type)))
7574 && !(TARGET_FPU && arg.named)))
7576 const int size = (arg.type && VECTOR_FLOAT_TYPE_P (arg.type))
7577 ? int_size_in_bytes (arg.type)
7578 : GET_MODE_SIZE (arg.mode);
7580 if (size > UNITS_PER_WORD && slotno == SPARC_INT_ARG_MAX - 1)
7581 return UNITS_PER_WORD;
7583 else if (GET_MODE_CLASS (arg.mode) == MODE_COMPLEX_FLOAT
7584 || (arg.type && VECTOR_TYPE_P (arg.type)))
7586 const int size = (arg.type && VECTOR_FLOAT_TYPE_P (arg.type))
7587 ? int_size_in_bytes (arg.type)
7588 : GET_MODE_SIZE (arg.mode);
7590 if (size > UNITS_PER_WORD && slotno == SPARC_FP_ARG_MAX - 1)
7591 return UNITS_PER_WORD;
7595 return 0;
7598 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7599 Update the data in CUM to advance over argument ARG. */
7601 static void
7602 sparc_function_arg_advance (cumulative_args_t cum_v,
7603 const function_arg_info &arg)
7605 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7606 tree type = arg.type;
7607 machine_mode mode = arg.mode;
7608 int regno, padding;
7610 /* We pass false for incoming here, it doesn't matter. */
7611 function_arg_slotno (cum, mode, type, arg.named, false, &regno, &padding);
7613 /* If argument requires leading padding, add it. */
7614 cum->words += padding;
7616 if (TARGET_ARCH32)
7617 cum->words += CEIL_NWORDS (GET_MODE_SIZE (mode));
7618 else
7620 /* For types that can have BLKmode, get the size from the type. */
7621 if (type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
7623 const int size = int_size_in_bytes (type);
7625 /* See comment in function_arg_record_value for empty structures. */
7626 if (size <= 0)
7627 cum->words++;
7628 else
7629 cum->words += CEIL_NWORDS (size);
7631 else
7632 cum->words += CEIL_NWORDS (GET_MODE_SIZE (mode));
7636 /* Implement TARGET_FUNCTION_ARG_PADDING. For the 64-bit ABI structs
7637 are always stored left shifted in their argument slot. */
7639 static pad_direction
7640 sparc_function_arg_padding (machine_mode mode, const_tree type)
7642 if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
7643 return PAD_UPWARD;
7645 /* Fall back to the default. */
7646 return default_function_arg_padding (mode, type);
7649 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7650 Specify whether to return the return value in memory. */
7652 static bool
7653 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7655 if (TARGET_ARCH32)
7656 /* Original SPARC 32-bit ABI says that structures and unions, and
7657 quad-precision floats are returned in memory. But note that the
7658 first part is implemented through -fpcc-struct-return being the
7659 default, so here we only implement -freg-struct-return instead.
7660 All other base types are returned in registers.
7662 Extended ABI (as implemented by the Sun compiler) says that
7663 all complex floats are returned in registers (8 FP registers
7664 at most for '_Complex long double'). Return all complex integers
7665 in registers (4 at most for '_Complex long long').
7667 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7668 integers are returned like floats of the same size, that is in
7669 registers up to 8 bytes and in memory otherwise. Return all
7670 vector floats in memory like structure and unions; note that
7671 they always have BLKmode like the latter. */
7672 return (TYPE_MODE (type) == BLKmode
7673 || TYPE_MODE (type) == TFmode
7674 || (TREE_CODE (type) == VECTOR_TYPE
7675 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7676 else
7677 /* Original SPARC 64-bit ABI says that structures and unions
7678 smaller than 32 bytes are returned in registers, as well as
7679 all other base types.
7681 Extended ABI (as implemented by the Sun compiler) says that all
7682 complex floats are returned in registers (8 FP registers at most
7683 for '_Complex long double'). Return all complex integers in
7684 registers (4 at most for '_Complex TItype').
7686 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7687 integers are returned like floats of the same size, that is in
7688 registers. Return all vector floats like structure and unions;
7689 note that they always have BLKmode like the latter. */
7690 return (TYPE_MODE (type) == BLKmode
7691 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7694 /* Handle the TARGET_STRUCT_VALUE target hook.
7695 Return where to find the structure return value address. */
7697 static rtx
7698 sparc_struct_value_rtx (tree fndecl, int incoming)
7700 if (TARGET_ARCH64)
7701 return NULL_RTX;
7702 else
7704 rtx mem;
7706 if (incoming)
7707 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7708 STRUCT_VALUE_OFFSET));
7709 else
7710 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7711 STRUCT_VALUE_OFFSET));
7713 /* Only follow the SPARC ABI for fixed-size structure returns.
7714 Variable size structure returns are handled per the normal
7715 procedures in GCC. This is enabled by -mstd-struct-return */
7716 if (incoming == 2
7717 && sparc_std_struct_return
7718 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7719 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7721 /* We must check and adjust the return address, as it is optional
7722 as to whether the return object is really provided. */
7723 rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
7724 rtx scratch = gen_reg_rtx (SImode);
7725 rtx_code_label *endlab = gen_label_rtx ();
7727 /* Calculate the return object size. */
7728 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7729 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7730 /* Construct a temporary return value. */
7731 rtx temp_val
7732 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7734 /* Implement SPARC 32-bit psABI callee return struct checking:
7736 Fetch the instruction where we will return to and see if
7737 it's an unimp instruction (the most significant 10 bits
7738 will be zero). */
7739 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7740 plus_constant (Pmode,
7741 ret_reg, 8)));
7742 /* Assume the size is valid and pre-adjust. */
7743 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7744 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7745 0, endlab);
7746 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7747 /* Write the address of the memory pointed to by temp_val into
7748 the memory pointed to by mem. */
7749 emit_move_insn (mem, XEXP (temp_val, 0));
7750 emit_label (endlab);
7753 return mem;
7757 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7758 For v9, function return values are subject to the same rules as arguments,
7759 except that up to 32 bytes may be returned in registers. */
7761 static rtx
7762 sparc_function_value_1 (const_tree type, machine_mode mode, bool outgoing)
7764 /* Beware that the two values are swapped here wrt function_arg. */
7765 const int regbase
7766 = outgoing ? SPARC_INCOMING_INT_ARG_FIRST : SPARC_OUTGOING_INT_ARG_FIRST;
7767 enum mode_class mclass = GET_MODE_CLASS (mode);
7768 int regno;
7770 /* Integer vectors are handled like floats as per the Sun VIS SDK.
7771 Note that integer vectors larger than 16 bytes have BLKmode so
7772 they need to be handled like floating-point vectors below. */
7773 if (type && VECTOR_INTEGER_TYPE_P (type) && mode != BLKmode)
7774 mclass = MODE_FLOAT;
7776 if (TARGET_ARCH64 && type)
7778 /* Structures up to 32 bytes in size are returned in registers. */
7779 if (TREE_CODE (type) == RECORD_TYPE)
7781 const int size = int_size_in_bytes (type);
7782 gcc_assert (size <= 32);
7784 return function_arg_record_value (type, mode, 0, true, regbase);
7787 /* Unions up to 32 bytes in size are returned in integer registers. */
7788 else if (TREE_CODE (type) == UNION_TYPE)
7790 const int size = int_size_in_bytes (type);
7791 gcc_assert (size <= 32);
7793 return function_arg_union_value (size, mode, 0, regbase);
7796 /* Vectors up to 32 bytes are returned in FP registers. */
7797 else if (VECTOR_TYPE_P (type) && mode == BLKmode)
7799 const int size = int_size_in_bytes (type);
7800 gcc_assert (size <= 32);
7802 return function_arg_vector_value (size, 0, true, SPARC_FP_ARG_FIRST);
7805 /* Objects that require it are returned in FP registers. */
7806 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7809 /* All other aggregate types are returned in an integer register in a
7810 mode corresponding to the size of the type. */
7811 else if (AGGREGATE_TYPE_P (type))
7813 /* All other aggregate types are passed in an integer register
7814 in a mode corresponding to the size of the type. */
7815 const int size = int_size_in_bytes (type);
7816 gcc_assert (size <= 32);
7818 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).else_blk ();
7820 /* ??? We probably should have made the same ABI change in
7821 3.4.0 as the one we made for unions. The latter was
7822 required by the SCD though, while the former is not
7823 specified, so we favored compatibility and efficiency.
7825 Now we're stuck for aggregates larger than 16 bytes,
7826 because OImode vanished in the meantime. Let's not
7827 try to be unduly clever, and simply follow the ABI
7828 for unions in that case. */
7829 if (mode == BLKmode)
7830 return function_arg_union_value (size, mode, 0, regbase);
7831 else
7832 mclass = MODE_INT;
7835 /* We should only have pointer and integer types at this point. This
7836 must match sparc_promote_function_mode. */
7837 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7838 mode = word_mode;
7841 /* We should only have pointer and integer types at this point, except with
7842 -freg-struct-return. This must match sparc_promote_function_mode. */
7843 else if (TARGET_ARCH32
7844 && !(type && AGGREGATE_TYPE_P (type))
7845 && mclass == MODE_INT
7846 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7847 mode = word_mode;
7849 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7850 regno = SPARC_FP_ARG_FIRST;
7851 else
7852 regno = regbase;
7854 return gen_rtx_REG (mode, regno);
7857 /* Handle TARGET_FUNCTION_VALUE.
7858 On the SPARC, the value is found in the first "output" register, but the
7859 called function leaves it in the first "input" register. */
7861 static rtx
7862 sparc_function_value (const_tree valtype,
7863 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7864 bool outgoing)
7866 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7869 /* Handle TARGET_LIBCALL_VALUE. */
7871 static rtx
7872 sparc_libcall_value (machine_mode mode,
7873 const_rtx fun ATTRIBUTE_UNUSED)
7875 return sparc_function_value_1 (NULL_TREE, mode, false);
7878 /* Handle FUNCTION_VALUE_REGNO_P.
7879 On the SPARC, the first "output" reg is used for integer values, and the
7880 first floating point register is used for floating point values. */
7882 static bool
7883 sparc_function_value_regno_p (const unsigned int regno)
7885 return (regno == 8 || (TARGET_FPU && regno == 32));
7888 /* Do what is necessary for `va_start'. We look at the current function
7889 to determine if stdarg or varargs is used and return the address of
7890 the first unnamed parameter. */
7892 static rtx
7893 sparc_builtin_saveregs (void)
7895 int first_reg = crtl->args.info.words;
7896 rtx address;
7897 int regno;
7899 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7900 emit_move_insn (gen_rtx_MEM (word_mode,
7901 gen_rtx_PLUS (Pmode,
7902 frame_pointer_rtx,
7903 GEN_INT (FIRST_PARM_OFFSET (0)
7904 + (UNITS_PER_WORD
7905 * regno)))),
7906 gen_rtx_REG (word_mode,
7907 SPARC_INCOMING_INT_ARG_FIRST + regno));
7909 address = gen_rtx_PLUS (Pmode,
7910 frame_pointer_rtx,
7911 GEN_INT (FIRST_PARM_OFFSET (0)
7912 + UNITS_PER_WORD * first_reg));
7914 return address;
7917 /* Implement `va_start' for stdarg. */
7919 static void
7920 sparc_va_start (tree valist, rtx nextarg)
7922 nextarg = expand_builtin_saveregs ();
7923 std_expand_builtin_va_start (valist, nextarg);
7926 /* Implement `va_arg' for stdarg. */
7928 static tree
7929 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7930 gimple_seq *post_p)
7932 HOST_WIDE_INT size, rsize, align;
7933 tree addr, incr;
7934 bool indirect;
7935 tree ptrtype = build_pointer_type (type);
7937 if (pass_va_arg_by_reference (type))
7939 indirect = true;
7940 size = rsize = UNITS_PER_WORD;
7941 align = 0;
7943 else
7945 indirect = false;
7946 size = int_size_in_bytes (type);
7947 rsize = ROUND_UP (size, UNITS_PER_WORD);
7948 align = 0;
7950 if (TARGET_ARCH64)
7952 /* For SPARC64, objects requiring 16-byte alignment get it. */
7953 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7954 align = 2 * UNITS_PER_WORD;
7956 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7957 are left-justified in their slots. */
7958 if (AGGREGATE_TYPE_P (type))
7960 if (size == 0)
7961 size = rsize = UNITS_PER_WORD;
7962 else
7963 size = rsize;
7968 incr = valist;
7969 if (align)
7971 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7972 incr = fold_convert (sizetype, incr);
7973 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7974 size_int (-align));
7975 incr = fold_convert (ptr_type_node, incr);
7978 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7979 addr = incr;
7981 if (BYTES_BIG_ENDIAN && size < rsize)
7982 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7984 if (indirect)
7986 addr = fold_convert (build_pointer_type (ptrtype), addr);
7987 addr = build_va_arg_indirect_ref (addr);
7990 /* If the address isn't aligned properly for the type, we need a temporary.
7991 FIXME: This is inefficient, usually we can do this in registers. */
7992 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7994 tree tmp = create_tmp_var (type, "va_arg_tmp");
7995 tree dest_addr = build_fold_addr_expr (tmp);
7996 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7997 3, dest_addr, addr, size_int (rsize));
7998 TREE_ADDRESSABLE (tmp) = 1;
7999 gimplify_and_add (copy, pre_p);
8000 addr = dest_addr;
8003 else
8004 addr = fold_convert (ptrtype, addr);
8006 incr = fold_build_pointer_plus_hwi (incr, rsize);
8007 gimplify_assign (valist, incr, post_p);
8009 return build_va_arg_indirect_ref (addr);
8012 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
8013 Specify whether the vector mode is supported by the hardware. */
8015 static bool
8016 sparc_vector_mode_supported_p (machine_mode mode)
8018 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
8021 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
8023 static machine_mode
8024 sparc_preferred_simd_mode (scalar_mode mode)
8026 if (TARGET_VIS)
8027 switch (mode)
8029 case E_SImode:
8030 return V2SImode;
8031 case E_HImode:
8032 return V4HImode;
8033 case E_QImode:
8034 return V8QImode;
8036 default:;
8039 return word_mode;
8042 \f/* Implement TARGET_CAN_FOLLOW_JUMP. */
8044 static bool
8045 sparc_can_follow_jump (const rtx_insn *follower, const rtx_insn *followee)
8047 /* Do not fold unconditional jumps that have been created for crossing
8048 partition boundaries. */
8049 if (CROSSING_JUMP_P (followee) && !CROSSING_JUMP_P (follower))
8050 return false;
8052 return true;
8055 /* Return the string to output an unconditional branch to LABEL, which is
8056 the operand number of the label.
8058 DEST is the destination insn (i.e. the label), INSN is the source. */
8060 const char *
8061 output_ubranch (rtx dest, rtx_insn *insn)
8063 static char string[64];
8064 bool v9_form = false;
8065 int delta;
8066 char *p;
8068 /* Even if we are trying to use cbcond for this, evaluate
8069 whether we can use V9 branches as our backup plan. */
8070 delta = 5000000;
8071 if (!CROSSING_JUMP_P (insn) && INSN_ADDRESSES_SET_P ())
8072 delta = (INSN_ADDRESSES (INSN_UID (dest))
8073 - INSN_ADDRESSES (INSN_UID (insn)));
8075 /* Leave some instructions for "slop". */
8076 if (TARGET_V9 && delta >= -260000 && delta < 260000)
8077 v9_form = true;
8079 if (TARGET_CBCOND)
8081 bool emit_nop = emit_cbcond_nop (insn);
8082 bool far = false;
8083 const char *rval;
8085 if (delta < -500 || delta > 500)
8086 far = true;
8088 if (far)
8090 if (v9_form)
8091 rval = "ba,a,pt\t%%xcc, %l0";
8092 else
8093 rval = "b,a\t%l0";
8095 else
8097 if (emit_nop)
8098 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
8099 else
8100 rval = "cwbe\t%%g0, %%g0, %l0";
8102 return rval;
8105 if (v9_form)
8106 strcpy (string, "ba%*,pt\t%%xcc, ");
8107 else
8108 strcpy (string, "b%*\t");
8110 p = strchr (string, '\0');
8111 *p++ = '%';
8112 *p++ = 'l';
8113 *p++ = '0';
8114 *p++ = '%';
8115 *p++ = '(';
8116 *p = '\0';
8118 return string;
8121 /* Return the string to output a conditional branch to LABEL, which is
8122 the operand number of the label. OP is the conditional expression.
8123 XEXP (OP, 0) is assumed to be a condition code register (integer or
8124 floating point) and its mode specifies what kind of comparison we made.
8126 DEST is the destination insn (i.e. the label), INSN is the source.
8128 REVERSED is nonzero if we should reverse the sense of the comparison.
8130 ANNUL is nonzero if we should generate an annulling branch. */
8132 const char *
8133 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
8134 rtx_insn *insn)
8136 static char string[64];
8137 enum rtx_code code = GET_CODE (op);
8138 rtx cc_reg = XEXP (op, 0);
8139 machine_mode mode = GET_MODE (cc_reg);
8140 const char *labelno, *branch;
8141 int spaces = 8, far;
8142 char *p;
8144 /* v9 branches are limited to +-1MB. If it is too far away,
8145 change
8147 bne,pt %xcc, .LC30
8151 be,pn %xcc, .+12
8153 ba .LC30
8157 fbne,a,pn %fcc2, .LC29
8161 fbe,pt %fcc2, .+16
8163 ba .LC29 */
8165 far = TARGET_V9 && (get_attr_length (insn) >= 3);
8166 if (reversed ^ far)
8168 /* Reversal of FP compares takes care -- an ordered compare
8169 becomes an unordered compare and vice versa. */
8170 if (mode == CCFPmode || mode == CCFPEmode)
8171 code = reverse_condition_maybe_unordered (code);
8172 else
8173 code = reverse_condition (code);
8176 /* Start by writing the branch condition. */
8177 if (mode == CCFPmode || mode == CCFPEmode)
8179 switch (code)
8181 case NE:
8182 branch = "fbne";
8183 break;
8184 case EQ:
8185 branch = "fbe";
8186 break;
8187 case GE:
8188 branch = "fbge";
8189 break;
8190 case GT:
8191 branch = "fbg";
8192 break;
8193 case LE:
8194 branch = "fble";
8195 break;
8196 case LT:
8197 branch = "fbl";
8198 break;
8199 case UNORDERED:
8200 branch = "fbu";
8201 break;
8202 case ORDERED:
8203 branch = "fbo";
8204 break;
8205 case UNGT:
8206 branch = "fbug";
8207 break;
8208 case UNLT:
8209 branch = "fbul";
8210 break;
8211 case UNEQ:
8212 branch = "fbue";
8213 break;
8214 case UNGE:
8215 branch = "fbuge";
8216 break;
8217 case UNLE:
8218 branch = "fbule";
8219 break;
8220 case LTGT:
8221 branch = "fblg";
8222 break;
8223 default:
8224 gcc_unreachable ();
8227 /* ??? !v9: FP branches cannot be preceded by another floating point
8228 insn. Because there is currently no concept of pre-delay slots,
8229 we can fix this only by always emitting a nop before a floating
8230 point branch. */
8232 string[0] = '\0';
8233 if (! TARGET_V9)
8234 strcpy (string, "nop\n\t");
8235 strcat (string, branch);
8237 else
8239 switch (code)
8241 case NE:
8242 if (mode == CCVmode || mode == CCXVmode)
8243 branch = "bvs";
8244 else
8245 branch = "bne";
8246 break;
8247 case EQ:
8248 if (mode == CCVmode || mode == CCXVmode)
8249 branch = "bvc";
8250 else
8251 branch = "be";
8252 break;
8253 case GE:
8254 if (mode == CCNZmode || mode == CCXNZmode)
8255 branch = "bpos";
8256 else
8257 branch = "bge";
8258 break;
8259 case GT:
8260 branch = "bg";
8261 break;
8262 case LE:
8263 branch = "ble";
8264 break;
8265 case LT:
8266 if (mode == CCNZmode || mode == CCXNZmode)
8267 branch = "bneg";
8268 else
8269 branch = "bl";
8270 break;
8271 case GEU:
8272 branch = "bgeu";
8273 break;
8274 case GTU:
8275 branch = "bgu";
8276 break;
8277 case LEU:
8278 branch = "bleu";
8279 break;
8280 case LTU:
8281 branch = "blu";
8282 break;
8283 default:
8284 gcc_unreachable ();
8286 strcpy (string, branch);
8288 spaces -= strlen (branch);
8289 p = strchr (string, '\0');
8291 /* Now add the annulling, the label, and a possible noop. */
8292 if (annul && ! far)
8294 strcpy (p, ",a");
8295 p += 2;
8296 spaces -= 2;
8299 if (TARGET_V9)
8301 rtx note;
8302 int v8 = 0;
8304 if (! far && insn && INSN_ADDRESSES_SET_P ())
8306 int delta = (INSN_ADDRESSES (INSN_UID (dest))
8307 - INSN_ADDRESSES (INSN_UID (insn)));
8308 /* Leave some instructions for "slop". */
8309 if (delta < -260000 || delta >= 260000)
8310 v8 = 1;
8313 switch (mode)
8315 case E_CCmode:
8316 case E_CCNZmode:
8317 case E_CCCmode:
8318 case E_CCVmode:
8319 labelno = "%%icc, ";
8320 if (v8)
8321 labelno = "";
8322 break;
8323 case E_CCXmode:
8324 case E_CCXNZmode:
8325 case E_CCXCmode:
8326 case E_CCXVmode:
8327 labelno = "%%xcc, ";
8328 gcc_assert (!v8);
8329 break;
8330 case E_CCFPmode:
8331 case E_CCFPEmode:
8333 static char v9_fcc_labelno[] = "%%fccX, ";
8334 /* Set the char indicating the number of the fcc reg to use. */
8335 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
8336 labelno = v9_fcc_labelno;
8337 if (v8)
8339 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
8340 labelno = "";
8343 break;
8344 default:
8345 gcc_unreachable ();
8348 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8350 strcpy (p,
8351 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8352 >= profile_probability::even ()) ^ far)
8353 ? ",pt" : ",pn");
8354 p += 3;
8355 spaces -= 3;
8358 else
8359 labelno = "";
8361 if (spaces > 0)
8362 *p++ = '\t';
8363 else
8364 *p++ = ' ';
8365 strcpy (p, labelno);
8366 p = strchr (p, '\0');
8367 if (far)
8369 strcpy (p, ".+12\n\t nop\n\tb\t");
8370 /* Skip the next insn if requested or
8371 if we know that it will be a nop. */
8372 if (annul || ! final_sequence)
8373 p[3] = '6';
8374 p += 14;
8376 *p++ = '%';
8377 *p++ = 'l';
8378 *p++ = label + '0';
8379 *p++ = '%';
8380 *p++ = '#';
8381 *p = '\0';
8383 return string;
8386 /* Emit a library call comparison between floating point X and Y.
8387 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
8388 Return the new operator to be used in the comparison sequence.
8390 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
8391 values as arguments instead of the TFmode registers themselves,
8392 that's why we cannot call emit_float_lib_cmp. */
8395 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
8397 const char *qpfunc;
8398 rtx slot0, slot1, result, tem, tem2, libfunc;
8399 machine_mode mode;
8400 enum rtx_code new_comparison;
8402 switch (comparison)
8404 case EQ:
8405 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
8406 break;
8408 case NE:
8409 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
8410 break;
8412 case GT:
8413 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
8414 break;
8416 case GE:
8417 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
8418 break;
8420 case LT:
8421 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
8422 break;
8424 case LE:
8425 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
8426 break;
8428 case ORDERED:
8429 case UNORDERED:
8430 case UNGT:
8431 case UNLT:
8432 case UNEQ:
8433 case UNGE:
8434 case UNLE:
8435 case LTGT:
8436 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
8437 break;
8439 default:
8440 gcc_unreachable ();
8443 if (TARGET_ARCH64)
8445 if (MEM_P (x))
8447 tree expr = MEM_EXPR (x);
8448 if (expr)
8449 mark_addressable (expr);
8450 slot0 = x;
8452 else
8454 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8455 emit_move_insn (slot0, x);
8458 if (MEM_P (y))
8460 tree expr = MEM_EXPR (y);
8461 if (expr)
8462 mark_addressable (expr);
8463 slot1 = y;
8465 else
8467 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8468 emit_move_insn (slot1, y);
8471 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8472 emit_library_call (libfunc, LCT_NORMAL,
8473 DImode,
8474 XEXP (slot0, 0), Pmode,
8475 XEXP (slot1, 0), Pmode);
8476 mode = DImode;
8478 else
8480 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8481 emit_library_call (libfunc, LCT_NORMAL,
8482 SImode,
8483 x, TFmode, y, TFmode);
8484 mode = SImode;
8488 /* Immediately move the result of the libcall into a pseudo
8489 register so reload doesn't clobber the value if it needs
8490 the return register for a spill reg. */
8491 result = gen_reg_rtx (mode);
8492 emit_move_insn (result, hard_libcall_value (mode, libfunc));
8494 switch (comparison)
8496 default:
8497 return gen_rtx_NE (VOIDmode, result, const0_rtx);
8498 case ORDERED:
8499 case UNORDERED:
8500 new_comparison = (comparison == UNORDERED ? EQ : NE);
8501 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
8502 case UNGT:
8503 case UNGE:
8504 new_comparison = (comparison == UNGT ? GT : NE);
8505 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
8506 case UNLE:
8507 return gen_rtx_NE (VOIDmode, result, const2_rtx);
8508 case UNLT:
8509 tem = gen_reg_rtx (mode);
8510 if (TARGET_ARCH32)
8511 emit_insn (gen_andsi3 (tem, result, const1_rtx));
8512 else
8513 emit_insn (gen_anddi3 (tem, result, const1_rtx));
8514 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
8515 case UNEQ:
8516 case LTGT:
8517 tem = gen_reg_rtx (mode);
8518 if (TARGET_ARCH32)
8519 emit_insn (gen_addsi3 (tem, result, const1_rtx));
8520 else
8521 emit_insn (gen_adddi3 (tem, result, const1_rtx));
8522 tem2 = gen_reg_rtx (mode);
8523 if (TARGET_ARCH32)
8524 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
8525 else
8526 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
8527 new_comparison = (comparison == UNEQ ? EQ : NE);
8528 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
8531 gcc_unreachable ();
8534 /* Generate an unsigned DImode to FP conversion. This is the same code
8535 optabs would emit if we didn't have TFmode patterns. */
8537 void
8538 sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
8540 rtx i0, i1, f0, in, out;
8542 out = operands[0];
8543 in = force_reg (DImode, operands[1]);
8544 rtx_code_label *neglab = gen_label_rtx ();
8545 rtx_code_label *donelab = gen_label_rtx ();
8546 i0 = gen_reg_rtx (DImode);
8547 i1 = gen_reg_rtx (DImode);
8548 f0 = gen_reg_rtx (mode);
8550 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
8552 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
8553 emit_jump_insn (gen_jump (donelab));
8554 emit_barrier ();
8556 emit_label (neglab);
8558 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
8559 emit_insn (gen_anddi3 (i1, in, const1_rtx));
8560 emit_insn (gen_iordi3 (i0, i0, i1));
8561 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
8562 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
8564 emit_label (donelab);
8567 /* Generate an FP to unsigned DImode conversion. This is the same code
8568 optabs would emit if we didn't have TFmode patterns. */
8570 void
8571 sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
8573 rtx i0, i1, f0, in, out, limit;
8575 out = operands[0];
8576 in = force_reg (mode, operands[1]);
8577 rtx_code_label *neglab = gen_label_rtx ();
8578 rtx_code_label *donelab = gen_label_rtx ();
8579 i0 = gen_reg_rtx (DImode);
8580 i1 = gen_reg_rtx (DImode);
8581 limit = gen_reg_rtx (mode);
8582 f0 = gen_reg_rtx (mode);
8584 emit_move_insn (limit,
8585 const_double_from_real_value (
8586 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
8587 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
8589 emit_insn (gen_rtx_SET (out,
8590 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
8591 emit_jump_insn (gen_jump (donelab));
8592 emit_barrier ();
8594 emit_label (neglab);
8596 emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
8597 emit_insn (gen_rtx_SET (i0,
8598 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
8599 emit_insn (gen_movdi (i1, const1_rtx));
8600 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
8601 emit_insn (gen_xordi3 (out, i0, i1));
8603 emit_label (donelab);
8606 /* Return the string to output a compare and branch instruction to DEST.
8607 DEST is the destination insn (i.e. the label), INSN is the source,
8608 and OP is the conditional expression. */
8610 const char *
8611 output_cbcond (rtx op, rtx dest, rtx_insn *insn)
8613 machine_mode mode = GET_MODE (XEXP (op, 0));
8614 enum rtx_code code = GET_CODE (op);
8615 const char *cond_str, *tmpl;
8616 int far, emit_nop, len;
8617 static char string[64];
8618 char size_char;
8620 /* Compare and Branch is limited to +-2KB. If it is too far away,
8621 change
8623 cxbne X, Y, .LC30
8627 cxbe X, Y, .+16
8629 ba,pt xcc, .LC30
8630 nop */
8632 len = get_attr_length (insn);
8634 far = len == 4;
8635 emit_nop = len == 2;
8637 if (far)
8638 code = reverse_condition (code);
8640 size_char = ((mode == SImode) ? 'w' : 'x');
8642 switch (code)
8644 case NE:
8645 cond_str = "ne";
8646 break;
8648 case EQ:
8649 cond_str = "e";
8650 break;
8652 case GE:
8653 cond_str = "ge";
8654 break;
8656 case GT:
8657 cond_str = "g";
8658 break;
8660 case LE:
8661 cond_str = "le";
8662 break;
8664 case LT:
8665 cond_str = "l";
8666 break;
8668 case GEU:
8669 cond_str = "cc";
8670 break;
8672 case GTU:
8673 cond_str = "gu";
8674 break;
8676 case LEU:
8677 cond_str = "leu";
8678 break;
8680 case LTU:
8681 cond_str = "cs";
8682 break;
8684 default:
8685 gcc_unreachable ();
8688 if (far)
8690 int veryfar = 1, delta;
8692 if (INSN_ADDRESSES_SET_P ())
8694 delta = (INSN_ADDRESSES (INSN_UID (dest))
8695 - INSN_ADDRESSES (INSN_UID (insn)));
8696 /* Leave some instructions for "slop". */
8697 if (delta >= -260000 && delta < 260000)
8698 veryfar = 0;
8701 if (veryfar)
8702 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8703 else
8704 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8706 else
8708 if (emit_nop)
8709 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8710 else
8711 tmpl = "c%cb%s\t%%1, %%2, %%3";
8714 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8716 return string;
8719 /* Return the string to output a conditional branch to LABEL, testing
8720 register REG. LABEL is the operand number of the label; REG is the
8721 operand number of the reg. OP is the conditional expression. The mode
8722 of REG says what kind of comparison we made.
8724 DEST is the destination insn (i.e. the label), INSN is the source.
8726 REVERSED is nonzero if we should reverse the sense of the comparison.
8728 ANNUL is nonzero if we should generate an annulling branch. */
8730 const char *
8731 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8732 int annul, rtx_insn *insn)
8734 static char string[64];
8735 enum rtx_code code = GET_CODE (op);
8736 machine_mode mode = GET_MODE (XEXP (op, 0));
8737 rtx note;
8738 int far;
8739 char *p;
8741 /* branch on register are limited to +-128KB. If it is too far away,
8742 change
8744 brnz,pt %g1, .LC30
8748 brz,pn %g1, .+12
8750 ba,pt %xcc, .LC30
8754 brgez,a,pn %o1, .LC29
8758 brlz,pt %o1, .+16
8760 ba,pt %xcc, .LC29 */
8762 far = get_attr_length (insn) >= 3;
8764 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8765 if (reversed ^ far)
8766 code = reverse_condition (code);
8768 /* Only 64-bit versions of these instructions exist. */
8769 gcc_assert (mode == DImode);
8771 /* Start by writing the branch condition. */
8773 switch (code)
8775 case NE:
8776 strcpy (string, "brnz");
8777 break;
8779 case EQ:
8780 strcpy (string, "brz");
8781 break;
8783 case GE:
8784 strcpy (string, "brgez");
8785 break;
8787 case LT:
8788 strcpy (string, "brlz");
8789 break;
8791 case LE:
8792 strcpy (string, "brlez");
8793 break;
8795 case GT:
8796 strcpy (string, "brgz");
8797 break;
8799 default:
8800 gcc_unreachable ();
8803 p = strchr (string, '\0');
8805 /* Now add the annulling, reg, label, and nop. */
8806 if (annul && ! far)
8808 strcpy (p, ",a");
8809 p += 2;
8812 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8814 strcpy (p,
8815 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8816 >= profile_probability::even ()) ^ far)
8817 ? ",pt" : ",pn");
8818 p += 3;
8821 *p = p < string + 8 ? '\t' : ' ';
8822 p++;
8823 *p++ = '%';
8824 *p++ = '0' + reg;
8825 *p++ = ',';
8826 *p++ = ' ';
8827 if (far)
8829 int veryfar = 1, delta;
8831 if (INSN_ADDRESSES_SET_P ())
8833 delta = (INSN_ADDRESSES (INSN_UID (dest))
8834 - INSN_ADDRESSES (INSN_UID (insn)));
8835 /* Leave some instructions for "slop". */
8836 if (delta >= -260000 && delta < 260000)
8837 veryfar = 0;
8840 strcpy (p, ".+12\n\t nop\n\t");
8841 /* Skip the next insn if requested or
8842 if we know that it will be a nop. */
8843 if (annul || ! final_sequence)
8844 p[3] = '6';
8845 p += 12;
8846 if (veryfar)
8848 strcpy (p, "b\t");
8849 p += 2;
8851 else
8853 strcpy (p, "ba,pt\t%%xcc, ");
8854 p += 13;
8857 *p++ = '%';
8858 *p++ = 'l';
8859 *p++ = '0' + label;
8860 *p++ = '%';
8861 *p++ = '#';
8862 *p = '\0';
8864 return string;
8867 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8868 Such instructions cannot be used in the delay slot of return insn on v9.
8869 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8872 static int
8873 epilogue_renumber (rtx *where, int test)
8875 const char *fmt;
8876 int i;
8877 enum rtx_code code;
8879 if (*where == 0)
8880 return 0;
8882 code = GET_CODE (*where);
8884 switch (code)
8886 case REG:
8887 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8888 return 1;
8889 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8891 if (ORIGINAL_REGNO (*where))
8893 rtx n = gen_raw_REG (GET_MODE (*where),
8894 OUTGOING_REGNO (REGNO (*where)));
8895 ORIGINAL_REGNO (n) = ORIGINAL_REGNO (*where);
8896 *where = n;
8898 else
8899 *where = gen_rtx_REG (GET_MODE (*where),
8900 OUTGOING_REGNO (REGNO (*where)));
8902 return 0;
8904 case SCRATCH:
8905 case PC:
8906 case CONST_INT:
8907 case CONST_WIDE_INT:
8908 case CONST_DOUBLE:
8909 return 0;
8911 /* Do not replace the frame pointer with the stack pointer because
8912 it can cause the delayed instruction to load below the stack.
8913 This occurs when instructions like:
8915 (set (reg/i:SI 24 %i0)
8916 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8917 (const_int -20 [0xffffffec])) 0))
8919 are in the return delayed slot. */
8920 case PLUS:
8921 if (GET_CODE (XEXP (*where, 0)) == REG
8922 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8923 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8924 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8925 return 1;
8926 break;
8928 case MEM:
8929 if (SPARC_STACK_BIAS
8930 && GET_CODE (XEXP (*where, 0)) == REG
8931 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8932 return 1;
8933 break;
8935 default:
8936 break;
8939 fmt = GET_RTX_FORMAT (code);
8941 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8943 if (fmt[i] == 'E')
8945 int j;
8946 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8947 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8948 return 1;
8950 else if (fmt[i] == 'e'
8951 && epilogue_renumber (&(XEXP (*where, i)), test))
8952 return 1;
8954 return 0;
8957 /* Leaf functions and non-leaf functions have different needs. */
8959 static const int reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8961 static const int reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8963 static const int *const reg_alloc_orders[] =
8965 reg_leaf_alloc_order,
8966 reg_nonleaf_alloc_order
8969 void
8970 sparc_order_regs_for_local_alloc (void)
8972 static int last_order_nonleaf = 1;
8974 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8976 last_order_nonleaf = !last_order_nonleaf;
8977 memcpy ((char *) reg_alloc_order,
8978 (const char *) reg_alloc_orders[last_order_nonleaf],
8979 FIRST_PSEUDO_REGISTER * sizeof (int));
8984 sparc_leaf_reg_remap (int regno)
8986 gcc_checking_assert (regno >= 0);
8988 /* Do not remap in flat mode. */
8989 if (TARGET_FLAT)
8990 return regno;
8992 /* Do not remap global, stack pointer or floating-point registers. */
8993 if (regno < 8 || regno == STACK_POINTER_REGNUM || regno > SPARC_LAST_INT_REG)
8994 return regno;
8996 /* Neither out nor local nor frame pointer registers must appear. */
8997 if ((regno >= 8 && regno <= 23) || regno == HARD_FRAME_POINTER_REGNUM)
8998 return -1;
9000 /* Remap in to out registers. */
9001 return regno - 16;
9004 /* Return 1 if REG and MEM are legitimate enough to allow the various
9005 MEM<-->REG splits to be run. */
9008 sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
9010 /* Punt if we are here by mistake. */
9011 gcc_assert (reload_completed);
9013 /* We must have an offsettable memory reference. */
9014 if (!offsettable_memref_p (mem))
9015 return 0;
9017 /* If we have legitimate args for ldd/std, we do not want
9018 the split to happen. */
9019 if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
9020 return 0;
9022 /* Success. */
9023 return 1;
9026 /* Split a REG <-- MEM move into a pair of moves in MODE. */
9028 void
9029 sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
9031 rtx high_part = gen_highpart (mode, dest);
9032 rtx low_part = gen_lowpart (mode, dest);
9033 rtx word0 = adjust_address (src, mode, 0);
9034 rtx word1 = adjust_address (src, mode, 4);
9036 if (reg_overlap_mentioned_p (high_part, word1))
9038 emit_move_insn_1 (low_part, word1);
9039 emit_move_insn_1 (high_part, word0);
9041 else
9043 emit_move_insn_1 (high_part, word0);
9044 emit_move_insn_1 (low_part, word1);
9048 /* Split a MEM <-- REG move into a pair of moves in MODE. */
9050 void
9051 sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
9053 rtx word0 = adjust_address (dest, mode, 0);
9054 rtx word1 = adjust_address (dest, mode, 4);
9055 rtx high_part = gen_highpart (mode, src);
9056 rtx low_part = gen_lowpart (mode, src);
9058 emit_move_insn_1 (word0, high_part);
9059 emit_move_insn_1 (word1, low_part);
9062 /* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
9065 sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
9067 /* Punt if we are here by mistake. */
9068 gcc_assert (reload_completed);
9070 if (GET_CODE (reg1) == SUBREG)
9071 reg1 = SUBREG_REG (reg1);
9072 if (GET_CODE (reg1) != REG)
9073 return 0;
9074 const int regno1 = REGNO (reg1);
9076 if (GET_CODE (reg2) == SUBREG)
9077 reg2 = SUBREG_REG (reg2);
9078 if (GET_CODE (reg2) != REG)
9079 return 0;
9080 const int regno2 = REGNO (reg2);
9082 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
9083 return 1;
9085 if (TARGET_VIS3)
9087 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
9088 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
9089 return 1;
9092 return 0;
9095 /* Split a REG <--> REG move into a pair of moves in MODE. */
9097 void
9098 sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
9100 rtx dest1 = gen_highpart (mode, dest);
9101 rtx dest2 = gen_lowpart (mode, dest);
9102 rtx src1 = gen_highpart (mode, src);
9103 rtx src2 = gen_lowpart (mode, src);
9105 /* Now emit using the real source and destination we found, swapping
9106 the order if we detect overlap. */
9107 if (reg_overlap_mentioned_p (dest1, src2))
9109 emit_move_insn_1 (dest2, src2);
9110 emit_move_insn_1 (dest1, src1);
9112 else
9114 emit_move_insn_1 (dest1, src1);
9115 emit_move_insn_1 (dest2, src2);
9119 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
9120 This makes them candidates for using ldd and std insns.
9122 Note reg1 and reg2 *must* be hard registers. */
9125 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
9127 /* We might have been passed a SUBREG. */
9128 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
9129 return 0;
9131 if (REGNO (reg1) % 2 != 0)
9132 return 0;
9134 /* Integer ldd is deprecated in SPARC V9 */
9135 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
9136 return 0;
9138 return (REGNO (reg1) == REGNO (reg2) - 1);
9141 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
9142 an ldd or std insn.
9144 This can only happen when addr1 and addr2, the addresses in mem1
9145 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
9146 addr1 must also be aligned on a 64-bit boundary.
9148 Also iff dependent_reg_rtx is not null it should not be used to
9149 compute the address for mem1, i.e. we cannot optimize a sequence
9150 like:
9151 ld [%o0], %o0
9152 ld [%o0 + 4], %o1
9154 ldd [%o0], %o0
9155 nor:
9156 ld [%g3 + 4], %g3
9157 ld [%g3], %g2
9159 ldd [%g3], %g2
9161 But, note that the transformation from:
9162 ld [%g2 + 4], %g3
9163 ld [%g2], %g2
9165 ldd [%g2], %g2
9166 is perfectly fine. Thus, the peephole2 patterns always pass us
9167 the destination register of the first load, never the second one.
9169 For stores we don't have a similar problem, so dependent_reg_rtx is
9170 NULL_RTX. */
9173 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
9175 rtx addr1, addr2;
9176 unsigned int reg1;
9177 HOST_WIDE_INT offset1;
9179 /* The mems cannot be volatile. */
9180 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
9181 return 0;
9183 /* MEM1 should be aligned on a 64-bit boundary. */
9184 if (MEM_ALIGN (mem1) < 64)
9185 return 0;
9187 addr1 = XEXP (mem1, 0);
9188 addr2 = XEXP (mem2, 0);
9190 /* Extract a register number and offset (if used) from the first addr. */
9191 if (GET_CODE (addr1) == PLUS)
9193 /* If not a REG, return zero. */
9194 if (GET_CODE (XEXP (addr1, 0)) != REG)
9195 return 0;
9196 else
9198 reg1 = REGNO (XEXP (addr1, 0));
9199 /* The offset must be constant! */
9200 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
9201 return 0;
9202 offset1 = INTVAL (XEXP (addr1, 1));
9205 else if (GET_CODE (addr1) != REG)
9206 return 0;
9207 else
9209 reg1 = REGNO (addr1);
9210 /* This was a simple (mem (reg)) expression. Offset is 0. */
9211 offset1 = 0;
9214 /* Make sure the second address is a (mem (plus (reg) (const_int). */
9215 if (GET_CODE (addr2) != PLUS)
9216 return 0;
9218 if (GET_CODE (XEXP (addr2, 0)) != REG
9219 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
9220 return 0;
9222 if (reg1 != REGNO (XEXP (addr2, 0)))
9223 return 0;
9225 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
9226 return 0;
9228 /* The first offset must be evenly divisible by 8 to ensure the
9229 address is 64-bit aligned. */
9230 if (offset1 % 8 != 0)
9231 return 0;
9233 /* The offset for the second addr must be 4 more than the first addr. */
9234 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
9235 return 0;
9237 /* All the tests passed. addr1 and addr2 are valid for ldd and std
9238 instructions. */
9239 return 1;
9242 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
9245 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
9247 rtx x = widen_memory_access (mem1, mode, 0);
9248 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
9249 return x;
9252 /* Return 1 if reg is a pseudo, or is the first register in
9253 a hard register pair. This makes it suitable for use in
9254 ldd and std insns. */
9257 register_ok_for_ldd (rtx reg)
9259 /* We might have been passed a SUBREG. */
9260 if (!REG_P (reg))
9261 return 0;
9263 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
9264 return (REGNO (reg) % 2 == 0);
9266 return 1;
9269 /* Return 1 if OP, a MEM, has an address which is known to be
9270 aligned to an 8-byte boundary. */
9273 memory_ok_for_ldd (rtx op)
9275 if (!mem_min_alignment (op, 8))
9276 return 0;
9278 /* We need to perform the job of a memory constraint. */
9279 if ((reload_in_progress || reload_completed)
9280 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
9281 return 0;
9283 if (lra_in_progress && !memory_address_p (Pmode, XEXP (op, 0)))
9284 return 0;
9286 return 1;
9289 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
9291 static bool
9292 sparc_print_operand_punct_valid_p (unsigned char code)
9294 if (code == '#'
9295 || code == '*'
9296 || code == '('
9297 || code == ')'
9298 || code == '_'
9299 || code == '&')
9300 return true;
9302 return false;
9305 /* Implement TARGET_PRINT_OPERAND.
9306 Print operand X (an rtx) in assembler syntax to file FILE.
9307 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
9308 For `%' followed by punctuation, CODE is the punctuation and X is null. */
9310 static void
9311 sparc_print_operand (FILE *file, rtx x, int code)
9313 const char *s;
9315 switch (code)
9317 case '#':
9318 /* Output an insn in a delay slot. */
9319 if (final_sequence)
9320 sparc_indent_opcode = 1;
9321 else
9322 fputs ("\n\t nop", file);
9323 return;
9324 case '*':
9325 /* Output an annul flag if there's nothing for the delay slot and we
9326 are optimizing. This is always used with '(' below.
9327 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
9328 this is a dbx bug. So, we only do this when optimizing.
9329 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
9330 Always emit a nop in case the next instruction is a branch. */
9331 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
9332 fputs (",a", file);
9333 return;
9334 case '(':
9335 /* Output a 'nop' if there's nothing for the delay slot and we are
9336 not optimizing. This is always used with '*' above. */
9337 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
9338 fputs ("\n\t nop", file);
9339 else if (final_sequence)
9340 sparc_indent_opcode = 1;
9341 return;
9342 case ')':
9343 /* Output the right displacement from the saved PC on function return.
9344 The caller may have placed an "unimp" insn immediately after the call
9345 so we have to account for it. This insn is used in the 32-bit ABI
9346 when calling a function that returns a non zero-sized structure. The
9347 64-bit ABI doesn't have it. Be careful to have this test be the same
9348 as that for the call. The exception is when sparc_std_struct_return
9349 is enabled, the psABI is followed exactly and the adjustment is made
9350 by the code in sparc_struct_value_rtx. The call emitted is the same
9351 when sparc_std_struct_return is enabled. */
9352 if (!TARGET_ARCH64
9353 && cfun->returns_struct
9354 && !sparc_std_struct_return
9355 && DECL_SIZE (DECL_RESULT (current_function_decl))
9356 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
9357 == INTEGER_CST
9358 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
9359 fputs ("12", file);
9360 else
9361 fputc ('8', file);
9362 return;
9363 case '_':
9364 /* Output the Embedded Medium/Anywhere code model base register. */
9365 fputs (EMBMEDANY_BASE_REG, file);
9366 return;
9367 case '&':
9368 /* Print some local dynamic TLS name. */
9369 if (const char *name = get_some_local_dynamic_name ())
9370 assemble_name (file, name);
9371 else
9372 output_operand_lossage ("'%%&' used without any "
9373 "local dynamic TLS references");
9374 return;
9376 case 'Y':
9377 /* Adjust the operand to take into account a RESTORE operation. */
9378 if (GET_CODE (x) == CONST_INT)
9379 break;
9380 else if (GET_CODE (x) != REG)
9381 output_operand_lossage ("invalid %%Y operand");
9382 else if (REGNO (x) < 8)
9383 fputs (reg_names[REGNO (x)], file);
9384 else if (REGNO (x) >= 24 && REGNO (x) < 32)
9385 fputs (reg_names[REGNO (x)-16], file);
9386 else
9387 output_operand_lossage ("invalid %%Y operand");
9388 return;
9389 case 'L':
9390 /* Print out the low order register name of a register pair. */
9391 if (WORDS_BIG_ENDIAN)
9392 fputs (reg_names[REGNO (x)+1], file);
9393 else
9394 fputs (reg_names[REGNO (x)], file);
9395 return;
9396 case 'H':
9397 /* Print out the high order register name of a register pair. */
9398 if (WORDS_BIG_ENDIAN)
9399 fputs (reg_names[REGNO (x)], file);
9400 else
9401 fputs (reg_names[REGNO (x)+1], file);
9402 return;
9403 case 'R':
9404 /* Print out the second register name of a register pair or quad.
9405 I.e., R (%o0) => %o1. */
9406 fputs (reg_names[REGNO (x)+1], file);
9407 return;
9408 case 'S':
9409 /* Print out the third register name of a register quad.
9410 I.e., S (%o0) => %o2. */
9411 fputs (reg_names[REGNO (x)+2], file);
9412 return;
9413 case 'T':
9414 /* Print out the fourth register name of a register quad.
9415 I.e., T (%o0) => %o3. */
9416 fputs (reg_names[REGNO (x)+3], file);
9417 return;
9418 case 'x':
9419 /* Print a condition code register. */
9420 if (REGNO (x) == SPARC_ICC_REG)
9422 switch (GET_MODE (x))
9424 case E_CCmode:
9425 case E_CCNZmode:
9426 case E_CCCmode:
9427 case E_CCVmode:
9428 s = "%icc";
9429 break;
9430 case E_CCXmode:
9431 case E_CCXNZmode:
9432 case E_CCXCmode:
9433 case E_CCXVmode:
9434 s = "%xcc";
9435 break;
9436 default:
9437 gcc_unreachable ();
9439 fputs (s, file);
9441 else
9442 /* %fccN register */
9443 fputs (reg_names[REGNO (x)], file);
9444 return;
9445 case 'm':
9446 /* Print the operand's address only. */
9447 output_address (GET_MODE (x), XEXP (x, 0));
9448 return;
9449 case 'r':
9450 /* In this case we need a register. Use %g0 if the
9451 operand is const0_rtx. */
9452 if (x == const0_rtx
9453 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
9455 fputs ("%g0", file);
9456 return;
9458 else
9459 break;
9461 case 'A':
9462 switch (GET_CODE (x))
9464 case IOR:
9465 s = "or";
9466 break;
9467 case AND:
9468 s = "and";
9469 break;
9470 case XOR:
9471 s = "xor";
9472 break;
9473 default:
9474 output_operand_lossage ("invalid %%A operand");
9475 s = "";
9476 break;
9478 fputs (s, file);
9479 return;
9481 case 'B':
9482 switch (GET_CODE (x))
9484 case IOR:
9485 s = "orn";
9486 break;
9487 case AND:
9488 s = "andn";
9489 break;
9490 case XOR:
9491 s = "xnor";
9492 break;
9493 default:
9494 output_operand_lossage ("invalid %%B operand");
9495 s = "";
9496 break;
9498 fputs (s, file);
9499 return;
9501 /* This is used by the conditional move instructions. */
9502 case 'C':
9504 machine_mode mode = GET_MODE (XEXP (x, 0));
9505 switch (GET_CODE (x))
9507 case NE:
9508 if (mode == CCVmode || mode == CCXVmode)
9509 s = "vs";
9510 else
9511 s = "ne";
9512 break;
9513 case EQ:
9514 if (mode == CCVmode || mode == CCXVmode)
9515 s = "vc";
9516 else
9517 s = "e";
9518 break;
9519 case GE:
9520 if (mode == CCNZmode || mode == CCXNZmode)
9521 s = "pos";
9522 else
9523 s = "ge";
9524 break;
9525 case GT:
9526 s = "g";
9527 break;
9528 case LE:
9529 s = "le";
9530 break;
9531 case LT:
9532 if (mode == CCNZmode || mode == CCXNZmode)
9533 s = "neg";
9534 else
9535 s = "l";
9536 break;
9537 case GEU:
9538 s = "geu";
9539 break;
9540 case GTU:
9541 s = "gu";
9542 break;
9543 case LEU:
9544 s = "leu";
9545 break;
9546 case LTU:
9547 s = "lu";
9548 break;
9549 case LTGT:
9550 s = "lg";
9551 break;
9552 case UNORDERED:
9553 s = "u";
9554 break;
9555 case ORDERED:
9556 s = "o";
9557 break;
9558 case UNLT:
9559 s = "ul";
9560 break;
9561 case UNLE:
9562 s = "ule";
9563 break;
9564 case UNGT:
9565 s = "ug";
9566 break;
9567 case UNGE:
9568 s = "uge"
9569 ; break;
9570 case UNEQ:
9571 s = "ue";
9572 break;
9573 default:
9574 output_operand_lossage ("invalid %%C operand");
9575 s = "";
9576 break;
9578 fputs (s, file);
9579 return;
9582 /* This are used by the movr instruction pattern. */
9583 case 'D':
9585 switch (GET_CODE (x))
9587 case NE:
9588 s = "ne";
9589 break;
9590 case EQ:
9591 s = "e";
9592 break;
9593 case GE:
9594 s = "gez";
9595 break;
9596 case LT:
9597 s = "lz";
9598 break;
9599 case LE:
9600 s = "lez";
9601 break;
9602 case GT:
9603 s = "gz";
9604 break;
9605 default:
9606 output_operand_lossage ("invalid %%D operand");
9607 s = "";
9608 break;
9610 fputs (s, file);
9611 return;
9614 case 'b':
9616 /* Print a sign-extended character. */
9617 int i = trunc_int_for_mode (INTVAL (x), QImode);
9618 fprintf (file, "%d", i);
9619 return;
9622 case 'f':
9623 /* Operand must be a MEM; write its address. */
9624 if (GET_CODE (x) != MEM)
9625 output_operand_lossage ("invalid %%f operand");
9626 output_address (GET_MODE (x), XEXP (x, 0));
9627 return;
9629 case 's':
9631 /* Print a sign-extended 32-bit value. */
9632 HOST_WIDE_INT i;
9633 if (GET_CODE(x) == CONST_INT)
9634 i = INTVAL (x);
9635 else
9637 output_operand_lossage ("invalid %%s operand");
9638 return;
9640 i = trunc_int_for_mode (i, SImode);
9641 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
9642 return;
9645 case 0:
9646 /* Do nothing special. */
9647 break;
9649 default:
9650 /* Undocumented flag. */
9651 output_operand_lossage ("invalid operand output code");
9654 if (GET_CODE (x) == REG)
9655 fputs (reg_names[REGNO (x)], file);
9656 else if (GET_CODE (x) == MEM)
9658 fputc ('[', file);
9659 /* Poor Sun assembler doesn't understand absolute addressing. */
9660 if (CONSTANT_P (XEXP (x, 0)))
9661 fputs ("%g0+", file);
9662 output_address (GET_MODE (x), XEXP (x, 0));
9663 fputc (']', file);
9665 else if (GET_CODE (x) == HIGH)
9667 fputs ("%hi(", file);
9668 output_addr_const (file, XEXP (x, 0));
9669 fputc (')', file);
9671 else if (GET_CODE (x) == LO_SUM)
9673 sparc_print_operand (file, XEXP (x, 0), 0);
9674 if (TARGET_CM_MEDMID)
9675 fputs ("+%l44(", file);
9676 else
9677 fputs ("+%lo(", file);
9678 output_addr_const (file, XEXP (x, 1));
9679 fputc (')', file);
9681 else if (GET_CODE (x) == CONST_DOUBLE)
9682 output_operand_lossage ("floating-point constant not a valid immediate operand");
9683 else
9684 output_addr_const (file, x);
9687 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9689 static void
9690 sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9692 rtx base, index = 0;
9693 int offset = 0;
9694 rtx addr = x;
9696 if (REG_P (addr))
9697 fputs (reg_names[REGNO (addr)], file);
9698 else if (GET_CODE (addr) == PLUS)
9700 if (CONST_INT_P (XEXP (addr, 0)))
9701 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
9702 else if (CONST_INT_P (XEXP (addr, 1)))
9703 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
9704 else
9705 base = XEXP (addr, 0), index = XEXP (addr, 1);
9706 if (GET_CODE (base) == LO_SUM)
9708 gcc_assert (USE_AS_OFFSETABLE_LO10
9709 && TARGET_ARCH64
9710 && ! TARGET_CM_MEDMID);
9711 output_operand (XEXP (base, 0), 0);
9712 fputs ("+%lo(", file);
9713 output_address (VOIDmode, XEXP (base, 1));
9714 fprintf (file, ")+%d", offset);
9716 else
9718 fputs (reg_names[REGNO (base)], file);
9719 if (index == 0)
9720 fprintf (file, "%+d", offset);
9721 else if (REG_P (index))
9722 fprintf (file, "+%s", reg_names[REGNO (index)]);
9723 else if (GET_CODE (index) == SYMBOL_REF
9724 || GET_CODE (index) == LABEL_REF
9725 || GET_CODE (index) == CONST)
9726 fputc ('+', file), output_addr_const (file, index);
9727 else gcc_unreachable ();
9730 else if (GET_CODE (addr) == MINUS
9731 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
9733 output_addr_const (file, XEXP (addr, 0));
9734 fputs ("-(", file);
9735 output_addr_const (file, XEXP (addr, 1));
9736 fputs ("-.)", file);
9738 else if (GET_CODE (addr) == LO_SUM)
9740 output_operand (XEXP (addr, 0), 0);
9741 if (TARGET_CM_MEDMID)
9742 fputs ("+%l44(", file);
9743 else
9744 fputs ("+%lo(", file);
9745 output_address (VOIDmode, XEXP (addr, 1));
9746 fputc (')', file);
9748 else if (flag_pic
9749 && GET_CODE (addr) == CONST
9750 && GET_CODE (XEXP (addr, 0)) == MINUS
9751 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
9752 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
9753 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
9755 addr = XEXP (addr, 0);
9756 output_addr_const (file, XEXP (addr, 0));
9757 /* Group the args of the second CONST in parenthesis. */
9758 fputs ("-(", file);
9759 /* Skip past the second CONST--it does nothing for us. */
9760 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
9761 /* Close the parenthesis. */
9762 fputc (')', file);
9764 else
9766 output_addr_const (file, addr);
9770 /* Target hook for assembling integer objects. The sparc version has
9771 special handling for aligned DI-mode objects. */
9773 static bool
9774 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9776 /* ??? We only output .xword's for symbols and only then in environments
9777 where the assembler can handle them. */
9778 if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
9780 if (TARGET_V9)
9782 assemble_integer_with_op ("\t.xword\t", x);
9783 return true;
9785 else
9787 assemble_aligned_integer (4, const0_rtx);
9788 assemble_aligned_integer (4, x);
9789 return true;
9792 return default_assemble_integer (x, size, aligned_p);
9795 /* Return the value of a code used in the .proc pseudo-op that says
9796 what kind of result this function returns. For non-C types, we pick
9797 the closest C type. */
9799 #ifndef SHORT_TYPE_SIZE
9800 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9801 #endif
9803 #ifndef INT_TYPE_SIZE
9804 #define INT_TYPE_SIZE BITS_PER_WORD
9805 #endif
9807 #ifndef LONG_TYPE_SIZE
9808 #define LONG_TYPE_SIZE BITS_PER_WORD
9809 #endif
9811 #ifndef LONG_LONG_TYPE_SIZE
9812 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9813 #endif
9815 #ifndef FLOAT_TYPE_SIZE
9816 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9817 #endif
9819 #ifndef DOUBLE_TYPE_SIZE
9820 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9821 #endif
9823 #ifndef LONG_DOUBLE_TYPE_SIZE
9824 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9825 #endif
9827 unsigned long
9828 sparc_type_code (tree type)
9830 unsigned long qualifiers = 0;
9831 unsigned shift;
9833 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9834 setting more, since some assemblers will give an error for this. Also,
9835 we must be careful to avoid shifts of 32 bits or more to avoid getting
9836 unpredictable results. */
9838 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9840 switch (TREE_CODE (type))
9842 case ERROR_MARK:
9843 return qualifiers;
9845 case ARRAY_TYPE:
9846 qualifiers |= (3 << shift);
9847 break;
9849 case FUNCTION_TYPE:
9850 case METHOD_TYPE:
9851 qualifiers |= (2 << shift);
9852 break;
9854 case POINTER_TYPE:
9855 case REFERENCE_TYPE:
9856 case OFFSET_TYPE:
9857 qualifiers |= (1 << shift);
9858 break;
9860 case RECORD_TYPE:
9861 return (qualifiers | 8);
9863 case UNION_TYPE:
9864 case QUAL_UNION_TYPE:
9865 return (qualifiers | 9);
9867 case ENUMERAL_TYPE:
9868 return (qualifiers | 10);
9870 case VOID_TYPE:
9871 return (qualifiers | 16);
9873 case INTEGER_TYPE:
9874 /* If this is a range type, consider it to be the underlying
9875 type. */
9876 if (TREE_TYPE (type) != 0)
9877 break;
9879 /* Carefully distinguish all the standard types of C,
9880 without messing up if the language is not C. We do this by
9881 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9882 look at both the names and the above fields, but that's redundant.
9883 Any type whose size is between two C types will be considered
9884 to be the wider of the two types. Also, we do not have a
9885 special code to use for "long long", so anything wider than
9886 long is treated the same. Note that we can't distinguish
9887 between "int" and "long" in this code if they are the same
9888 size, but that's fine, since neither can the assembler. */
9890 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9891 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9893 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9894 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9896 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9897 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9899 else
9900 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9902 case REAL_TYPE:
9903 /* If this is a range type, consider it to be the underlying
9904 type. */
9905 if (TREE_TYPE (type) != 0)
9906 break;
9908 /* Carefully distinguish all the standard types of C,
9909 without messing up if the language is not C. */
9911 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9912 return (qualifiers | 6);
9914 else
9915 return (qualifiers | 7);
9917 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9918 /* ??? We need to distinguish between double and float complex types,
9919 but I don't know how yet because I can't reach this code from
9920 existing front-ends. */
9921 return (qualifiers | 7); /* Who knows? */
9923 case VECTOR_TYPE:
9924 case BOOLEAN_TYPE: /* Boolean truth value type. */
9925 case LANG_TYPE:
9926 case NULLPTR_TYPE:
9927 return qualifiers;
9929 default:
9930 gcc_unreachable (); /* Not a type! */
9934 return qualifiers;
9937 /* Nested function support. */
9939 /* Emit RTL insns to initialize the variable parts of a trampoline.
9940 FNADDR is an RTX for the address of the function's pure code.
9941 CXT is an RTX for the static chain value for the function.
9943 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9944 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9945 (to store insns). This is a bit excessive. Perhaps a different
9946 mechanism would be better here.
9948 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9950 static void
9951 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9953 /* SPARC 32-bit trampoline:
9955 sethi %hi(fn), %g1
9956 sethi %hi(static), %g2
9957 jmp %g1+%lo(fn)
9958 or %g2, %lo(static), %g2
9960 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9961 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9964 emit_move_insn
9965 (adjust_address (m_tramp, SImode, 0),
9966 expand_binop (SImode, ior_optab,
9967 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9968 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9969 NULL_RTX, 1, OPTAB_DIRECT));
9971 emit_move_insn
9972 (adjust_address (m_tramp, SImode, 4),
9973 expand_binop (SImode, ior_optab,
9974 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9975 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9976 NULL_RTX, 1, OPTAB_DIRECT));
9978 emit_move_insn
9979 (adjust_address (m_tramp, SImode, 8),
9980 expand_binop (SImode, ior_optab,
9981 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9982 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9983 NULL_RTX, 1, OPTAB_DIRECT));
9985 emit_move_insn
9986 (adjust_address (m_tramp, SImode, 12),
9987 expand_binop (SImode, ior_optab,
9988 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9989 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9990 NULL_RTX, 1, OPTAB_DIRECT));
9992 emit_insn
9993 (gen_flush (SImode, validize_mem (adjust_address (m_tramp, SImode, 0))));
9995 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9996 aligned on a 16 byte boundary so one flush clears it all. */
9997 if (sparc_cpu != PROCESSOR_ULTRASPARC
9998 && sparc_cpu != PROCESSOR_ULTRASPARC3
9999 && sparc_cpu != PROCESSOR_NIAGARA
10000 && sparc_cpu != PROCESSOR_NIAGARA2
10001 && sparc_cpu != PROCESSOR_NIAGARA3
10002 && sparc_cpu != PROCESSOR_NIAGARA4
10003 && sparc_cpu != PROCESSOR_NIAGARA7
10004 && sparc_cpu != PROCESSOR_M8)
10005 emit_insn
10006 (gen_flush (SImode, validize_mem (adjust_address (m_tramp, SImode, 8))));
10008 /* Call __enable_execute_stack after writing onto the stack to make sure
10009 the stack address is accessible. */
10010 #ifdef HAVE_ENABLE_EXECUTE_STACK
10011 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10012 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10013 #endif
10017 /* The 64-bit version is simpler because it makes more sense to load the
10018 values as "immediate" data out of the trampoline. It's also easier since
10019 we can read the PC without clobbering a register. */
10021 static void
10022 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
10024 /* SPARC 64-bit trampoline:
10026 rd %pc, %g1
10027 ldx [%g1+24], %g5
10028 jmp %g5
10029 ldx [%g1+16], %g5
10030 +16 bytes data
10033 emit_move_insn (adjust_address (m_tramp, SImode, 0),
10034 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
10035 emit_move_insn (adjust_address (m_tramp, SImode, 4),
10036 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
10037 emit_move_insn (adjust_address (m_tramp, SImode, 8),
10038 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
10039 emit_move_insn (adjust_address (m_tramp, SImode, 12),
10040 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
10041 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
10042 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
10043 emit_insn
10044 (gen_flush (DImode, validize_mem (adjust_address (m_tramp, DImode, 0))));
10046 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
10047 aligned on a 16 byte boundary so one flush clears it all. */
10048 if (sparc_cpu != PROCESSOR_ULTRASPARC
10049 && sparc_cpu != PROCESSOR_ULTRASPARC3
10050 && sparc_cpu != PROCESSOR_NIAGARA
10051 && sparc_cpu != PROCESSOR_NIAGARA2
10052 && sparc_cpu != PROCESSOR_NIAGARA3
10053 && sparc_cpu != PROCESSOR_NIAGARA4
10054 && sparc_cpu != PROCESSOR_NIAGARA7
10055 && sparc_cpu != PROCESSOR_M8)
10056 emit_insn
10057 (gen_flush (DImode, validize_mem (adjust_address (m_tramp, DImode, 8))));
10059 /* Call __enable_execute_stack after writing onto the stack to make sure
10060 the stack address is accessible. */
10061 #ifdef HAVE_ENABLE_EXECUTE_STACK
10062 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10063 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
10064 #endif
10067 /* Worker for TARGET_TRAMPOLINE_INIT. */
10069 static void
10070 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
10072 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
10073 cxt = force_reg (Pmode, cxt);
10074 if (TARGET_ARCH64)
10075 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
10076 else
10077 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
10080 /* Adjust the cost of a scheduling dependency. Return the new cost of
10081 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
10083 static int
10084 supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
10085 int cost)
10087 enum attr_type insn_type;
10089 if (recog_memoized (insn) < 0)
10090 return cost;
10092 insn_type = get_attr_type (insn);
10094 if (dep_type == 0)
10096 /* Data dependency; DEP_INSN writes a register that INSN reads some
10097 cycles later. */
10099 /* if a load, then the dependence must be on the memory address;
10100 add an extra "cycle". Note that the cost could be two cycles
10101 if the reg was written late in an instruction group; we ca not tell
10102 here. */
10103 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
10104 return cost + 3;
10106 /* Get the delay only if the address of the store is the dependence. */
10107 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
10109 rtx pat = PATTERN(insn);
10110 rtx dep_pat = PATTERN (dep_insn);
10112 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10113 return cost; /* This should not happen! */
10115 /* The dependency between the two instructions was on the data that
10116 is being stored. Assume that this implies that the address of the
10117 store is not dependent. */
10118 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10119 return cost;
10121 return cost + 3; /* An approximation. */
10124 /* A shift instruction cannot receive its data from an instruction
10125 in the same cycle; add a one cycle penalty. */
10126 if (insn_type == TYPE_SHIFT)
10127 return cost + 3; /* Split before cascade into shift. */
10129 else
10131 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
10132 INSN writes some cycles later. */
10134 /* These are only significant for the fpu unit; writing a fp reg before
10135 the fpu has finished with it stalls the processor. */
10137 /* Reusing an integer register causes no problems. */
10138 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10139 return 0;
10142 return cost;
10145 static int
10146 hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
10147 int cost)
10149 enum attr_type insn_type, dep_type;
10150 rtx pat = PATTERN(insn);
10151 rtx dep_pat = PATTERN (dep_insn);
10153 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
10154 return cost;
10156 insn_type = get_attr_type (insn);
10157 dep_type = get_attr_type (dep_insn);
10159 switch (dtype)
10161 case 0:
10162 /* Data dependency; DEP_INSN writes a register that INSN reads some
10163 cycles later. */
10165 switch (insn_type)
10167 case TYPE_STORE:
10168 case TYPE_FPSTORE:
10169 /* Get the delay iff the address of the store is the dependence. */
10170 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10171 return cost;
10173 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10174 return cost;
10175 return cost + 3;
10177 case TYPE_LOAD:
10178 case TYPE_SLOAD:
10179 case TYPE_FPLOAD:
10180 /* If a load, then the dependence must be on the memory address. If
10181 the addresses aren't equal, then it might be a false dependency */
10182 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
10184 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
10185 || GET_CODE (SET_DEST (dep_pat)) != MEM
10186 || GET_CODE (SET_SRC (pat)) != MEM
10187 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
10188 XEXP (SET_SRC (pat), 0)))
10189 return cost + 2;
10191 return cost + 8;
10193 break;
10195 case TYPE_BRANCH:
10196 /* Compare to branch latency is 0. There is no benefit from
10197 separating compare and branch. */
10198 if (dep_type == TYPE_COMPARE)
10199 return 0;
10200 /* Floating point compare to branch latency is less than
10201 compare to conditional move. */
10202 if (dep_type == TYPE_FPCMP)
10203 return cost - 1;
10204 break;
10205 default:
10206 break;
10208 break;
10210 case REG_DEP_ANTI:
10211 /* Anti-dependencies only penalize the fpu unit. */
10212 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
10213 return 0;
10214 break;
10216 default:
10217 break;
10220 return cost;
10223 static int
10224 leon5_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
10225 int cost)
10227 enum attr_type insn_type, dep_type;
10228 rtx pat = PATTERN (insn);
10229 rtx dep_pat = PATTERN (dep_insn);
10231 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
10232 return cost;
10234 insn_type = get_attr_type (insn);
10235 dep_type = get_attr_type (dep_insn);
10237 switch (dtype)
10239 case REG_DEP_TRUE:
10240 /* Data dependency; DEP_INSN writes a register that INSN reads some
10241 cycles later. */
10243 switch (insn_type)
10245 case TYPE_STORE:
10246 /* Try to schedule three instructions between the store and
10247 the ALU instruction that generated the data. */
10248 if (dep_type == TYPE_IALU || dep_type == TYPE_SHIFT)
10250 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
10251 break;
10253 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
10254 return 4;
10256 break;
10257 default:
10258 break;
10260 break;
10261 case REG_DEP_ANTI:
10262 /* Penalize anti-dependencies for FPU instructions. */
10263 if (fpop_insn_p (insn) || insn_type == TYPE_FPLOAD)
10264 return 4;
10265 break;
10266 default:
10267 break;
10270 return cost;
10273 static int
10274 sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
10275 unsigned int)
10277 switch (sparc_cpu)
10279 case PROCESSOR_LEON5:
10280 cost = leon5_adjust_cost (insn, dep_type, dep, cost);
10281 break;
10282 case PROCESSOR_SUPERSPARC:
10283 cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
10284 break;
10285 case PROCESSOR_HYPERSPARC:
10286 case PROCESSOR_SPARCLITE86X:
10287 cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
10288 break;
10289 default:
10290 break;
10292 return cost;
10295 static void
10296 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
10297 int sched_verbose ATTRIBUTE_UNUSED,
10298 int max_ready ATTRIBUTE_UNUSED)
10301 static int
10302 sparc_use_sched_lookahead (void)
10304 switch (sparc_cpu)
10306 case PROCESSOR_ULTRASPARC:
10307 case PROCESSOR_ULTRASPARC3:
10308 return 4;
10309 case PROCESSOR_SUPERSPARC:
10310 case PROCESSOR_HYPERSPARC:
10311 case PROCESSOR_SPARCLITE86X:
10312 return 3;
10313 case PROCESSOR_NIAGARA4:
10314 case PROCESSOR_NIAGARA7:
10315 case PROCESSOR_M8:
10316 return 2;
10317 case PROCESSOR_NIAGARA:
10318 case PROCESSOR_NIAGARA2:
10319 case PROCESSOR_NIAGARA3:
10320 default:
10321 return 0;
10325 static int
10326 sparc_issue_rate (void)
10328 switch (sparc_cpu)
10330 case PROCESSOR_ULTRASPARC:
10331 case PROCESSOR_ULTRASPARC3:
10332 case PROCESSOR_M8:
10333 return 4;
10334 case PROCESSOR_SUPERSPARC:
10335 return 3;
10336 case PROCESSOR_HYPERSPARC:
10337 case PROCESSOR_SPARCLITE86X:
10338 case PROCESSOR_V9:
10339 /* Assume V9 processors are capable of at least dual-issue. */
10340 case PROCESSOR_NIAGARA4:
10341 case PROCESSOR_NIAGARA7:
10342 return 2;
10343 case PROCESSOR_NIAGARA:
10344 case PROCESSOR_NIAGARA2:
10345 case PROCESSOR_NIAGARA3:
10346 default:
10347 return 1;
10352 sparc_branch_cost (bool speed_p, bool predictable_p)
10354 if (!speed_p)
10355 return 2;
10357 /* For pre-V9 processors we use a single value (usually 3) to take into
10358 account the potential annulling of the delay slot (which ends up being
10359 a bubble in the pipeline slot) plus a cycle to take into consideration
10360 the instruction cache effects.
10362 On V9 and later processors, which have branch prediction facilities,
10363 we take into account whether the branch is (easily) predictable. */
10364 const int cost = sparc_costs->branch_cost;
10366 switch (sparc_cpu)
10368 case PROCESSOR_V9:
10369 case PROCESSOR_ULTRASPARC:
10370 case PROCESSOR_ULTRASPARC3:
10371 case PROCESSOR_NIAGARA:
10372 case PROCESSOR_NIAGARA2:
10373 case PROCESSOR_NIAGARA3:
10374 case PROCESSOR_NIAGARA4:
10375 case PROCESSOR_NIAGARA7:
10376 case PROCESSOR_M8:
10377 return cost + (predictable_p ? 0 : 2);
10379 default:
10380 return cost;
10384 static int
10385 set_extends (rtx_insn *insn)
10387 rtx pat = PATTERN (insn);
10389 switch (GET_CODE (SET_SRC (pat)))
10391 /* Load and some shift instructions zero extend. */
10392 case MEM:
10393 case ZERO_EXTEND:
10394 /* sethi clears the high bits */
10395 case HIGH:
10396 /* LO_SUM is used with sethi. sethi cleared the high
10397 bits and the values used with lo_sum are positive */
10398 case LO_SUM:
10399 /* Store flag stores 0 or 1 */
10400 case LT: case LTU:
10401 case GT: case GTU:
10402 case LE: case LEU:
10403 case GE: case GEU:
10404 case EQ:
10405 case NE:
10406 return 1;
10407 case AND:
10409 rtx op0 = XEXP (SET_SRC (pat), 0);
10410 rtx op1 = XEXP (SET_SRC (pat), 1);
10411 if (GET_CODE (op1) == CONST_INT)
10412 return INTVAL (op1) >= 0;
10413 if (GET_CODE (op0) != REG)
10414 return 0;
10415 if (sparc_check_64 (op0, insn) == 1)
10416 return 1;
10417 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10419 case IOR:
10420 case XOR:
10422 rtx op0 = XEXP (SET_SRC (pat), 0);
10423 rtx op1 = XEXP (SET_SRC (pat), 1);
10424 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
10425 return 0;
10426 if (GET_CODE (op1) == CONST_INT)
10427 return INTVAL (op1) >= 0;
10428 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
10430 case LSHIFTRT:
10431 return GET_MODE (SET_SRC (pat)) == SImode;
10432 /* Positive integers leave the high bits zero. */
10433 case CONST_INT:
10434 return !(INTVAL (SET_SRC (pat)) & 0x80000000);
10435 case ASHIFTRT:
10436 case SIGN_EXTEND:
10437 return - (GET_MODE (SET_SRC (pat)) == SImode);
10438 case REG:
10439 return sparc_check_64 (SET_SRC (pat), insn);
10440 default:
10441 return 0;
10445 /* We _ought_ to have only one kind per function, but... */
10446 static GTY(()) rtx sparc_addr_diff_list;
10447 static GTY(()) rtx sparc_addr_list;
10449 void
10450 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
10452 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10453 if (diff)
10454 sparc_addr_diff_list
10455 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
10456 else
10457 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
10460 static void
10461 sparc_output_addr_vec (rtx vec)
10463 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10464 int idx, vlen = XVECLEN (body, 0);
10466 #ifdef ASM_OUTPUT_ADDR_VEC_START
10467 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10468 #endif
10470 #ifdef ASM_OUTPUT_CASE_LABEL
10471 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10472 NEXT_INSN (lab));
10473 #else
10474 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10475 #endif
10477 for (idx = 0; idx < vlen; idx++)
10479 ASM_OUTPUT_ADDR_VEC_ELT
10480 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10483 #ifdef ASM_OUTPUT_ADDR_VEC_END
10484 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10485 #endif
10488 static void
10489 sparc_output_addr_diff_vec (rtx vec)
10491 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10492 rtx base = XEXP (XEXP (body, 0), 0);
10493 int idx, vlen = XVECLEN (body, 1);
10495 #ifdef ASM_OUTPUT_ADDR_VEC_START
10496 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10497 #endif
10499 #ifdef ASM_OUTPUT_CASE_LABEL
10500 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10501 NEXT_INSN (lab));
10502 #else
10503 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10504 #endif
10506 for (idx = 0; idx < vlen; idx++)
10508 ASM_OUTPUT_ADDR_DIFF_ELT
10509 (asm_out_file,
10510 body,
10511 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10512 CODE_LABEL_NUMBER (base));
10515 #ifdef ASM_OUTPUT_ADDR_VEC_END
10516 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10517 #endif
10520 static void
10521 sparc_output_deferred_case_vectors (void)
10523 rtx t;
10524 int align;
10526 if (sparc_addr_list == NULL_RTX
10527 && sparc_addr_diff_list == NULL_RTX)
10528 return;
10530 /* Align to cache line in the function's code section. */
10531 switch_to_section (current_function_section ());
10533 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10534 if (align > 0)
10535 ASM_OUTPUT_ALIGN (asm_out_file, align);
10537 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
10538 sparc_output_addr_vec (XEXP (t, 0));
10539 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
10540 sparc_output_addr_diff_vec (XEXP (t, 0));
10542 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
10545 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
10546 unknown. Return 1 if the high bits are zero, -1 if the register is
10547 sign extended. */
10549 sparc_check_64 (rtx x, rtx_insn *insn)
10551 /* If a register is set only once it is safe to ignore insns this
10552 code does not know how to handle. The loop will either recognize
10553 the single set and return the correct value or fail to recognize
10554 it and return 0. */
10555 int set_once = 0;
10556 rtx y = x;
10558 gcc_assert (GET_CODE (x) == REG);
10560 if (GET_MODE (x) == DImode)
10561 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
10563 if (flag_expensive_optimizations
10564 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
10565 set_once = 1;
10567 if (insn == 0)
10569 if (set_once)
10570 insn = get_last_insn_anywhere ();
10571 else
10572 return 0;
10575 while ((insn = PREV_INSN (insn)))
10577 switch (GET_CODE (insn))
10579 case JUMP_INSN:
10580 case NOTE:
10581 break;
10582 case CODE_LABEL:
10583 case CALL_INSN:
10584 default:
10585 if (! set_once)
10586 return 0;
10587 break;
10588 case INSN:
10590 rtx pat = PATTERN (insn);
10591 if (GET_CODE (pat) != SET)
10592 return 0;
10593 if (rtx_equal_p (x, SET_DEST (pat)))
10594 return set_extends (insn);
10595 if (y && rtx_equal_p (y, SET_DEST (pat)))
10596 return set_extends (insn);
10597 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
10598 return 0;
10602 return 0;
10605 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
10606 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
10608 const char *
10609 output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
10611 static char asm_code[60];
10613 /* The scratch register is only required when the destination
10614 register is not a 64-bit global or out register. */
10615 if (which_alternative != 2)
10616 operands[3] = operands[0];
10618 /* We can only shift by constants <= 63. */
10619 if (GET_CODE (operands[2]) == CONST_INT)
10620 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
10622 if (GET_CODE (operands[1]) == CONST_INT)
10624 output_asm_insn ("mov\t%1, %3", operands);
10626 else
10628 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10629 if (sparc_check_64 (operands[1], insn) <= 0)
10630 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10631 output_asm_insn ("or\t%L1, %3, %3", operands);
10634 strcpy (asm_code, opcode);
10636 if (which_alternative != 2)
10637 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
10638 else
10639 return
10640 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
10643 /* Output rtl to increment the profiler label LABELNO
10644 for profiling a function entry. */
10646 void
10647 sparc_profile_hook (int labelno)
10649 char buf[32];
10650 rtx lab, fun;
10652 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
10653 if (NO_PROFILE_COUNTERS)
10655 emit_library_call (fun, LCT_NORMAL, VOIDmode);
10657 else
10659 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10660 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
10661 emit_library_call (fun, LCT_NORMAL, VOIDmode, lab, Pmode);
10665 #ifdef TARGET_SOLARIS
10666 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
10668 static void
10669 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
10670 tree decl ATTRIBUTE_UNUSED)
10672 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
10674 solaris_elf_asm_comdat_section (name, flags, decl);
10675 return;
10678 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
10680 if (!(flags & SECTION_DEBUG))
10681 fputs (",#alloc", asm_out_file);
10682 #if HAVE_GAS_SECTION_EXCLUDE
10683 if (flags & SECTION_EXCLUDE)
10684 fputs (",#exclude", asm_out_file);
10685 #endif
10686 if (flags & SECTION_WRITE)
10687 fputs (",#write", asm_out_file);
10688 if (flags & SECTION_TLS)
10689 fputs (",#tls", asm_out_file);
10690 if (flags & SECTION_CODE)
10691 fputs (",#execinstr", asm_out_file);
10693 if (flags & SECTION_NOTYPE)
10695 else if (flags & SECTION_BSS)
10696 fputs (",#nobits", asm_out_file);
10697 else
10698 fputs (",#progbits", asm_out_file);
10700 fputc ('\n', asm_out_file);
10702 #endif /* TARGET_SOLARIS */
10704 /* We do not allow indirect calls to be optimized into sibling calls.
10706 We cannot use sibling calls when delayed branches are disabled
10707 because they will likely require the call delay slot to be filled.
10709 Also, on SPARC 32-bit we cannot emit a sibling call when the
10710 current function returns a structure. This is because the "unimp
10711 after call" convention would cause the callee to return to the
10712 wrong place. The generic code already disallows cases where the
10713 function being called returns a structure.
10715 It may seem strange how this last case could occur. Usually there
10716 is code after the call which jumps to epilogue code which dumps the
10717 return value into the struct return area. That ought to invalidate
10718 the sibling call right? Well, in the C++ case we can end up passing
10719 the pointer to the struct return area to a constructor (which returns
10720 void) and then nothing else happens. Such a sibling call would look
10721 valid without the added check here.
10723 VxWorks PIC PLT entries require the global pointer to be initialized
10724 on entry. We therefore can't emit sibling calls to them. */
10725 static bool
10726 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10728 return (decl
10729 && flag_delayed_branch
10730 && (TARGET_ARCH64 || ! cfun->returns_struct)
10731 && !(TARGET_VXWORKS_RTP
10732 && flag_pic
10733 && !targetm.binds_local_p (decl)));
10736 /* libfunc renaming. */
10738 static void
10739 sparc_init_libfuncs (void)
10741 if (TARGET_ARCH32)
10743 /* Use the subroutines that Sun's library provides for integer
10744 multiply and divide. The `*' prevents an underscore from
10745 being prepended by the compiler. .umul is a little faster
10746 than .mul. */
10747 set_optab_libfunc (smul_optab, SImode, "*.umul");
10748 set_optab_libfunc (sdiv_optab, SImode, "*.div");
10749 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
10750 set_optab_libfunc (smod_optab, SImode, "*.rem");
10751 set_optab_libfunc (umod_optab, SImode, "*.urem");
10753 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
10754 set_optab_libfunc (add_optab, TFmode, "_Q_add");
10755 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
10756 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
10757 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
10758 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
10760 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10761 is because with soft-float, the SFmode and DFmode sqrt
10762 instructions will be absent, and the compiler will notice and
10763 try to use the TFmode sqrt instruction for calls to the
10764 builtin function sqrt, but this fails. */
10765 if (TARGET_FPU)
10766 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
10768 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
10769 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
10770 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
10771 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
10772 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
10773 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
10775 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
10776 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
10777 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
10778 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
10780 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
10781 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
10782 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
10783 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
10785 if (DITF_CONVERSION_LIBFUNCS)
10787 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
10788 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
10789 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
10790 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
10793 if (SUN_CONVERSION_LIBFUNCS)
10795 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
10796 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
10797 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
10798 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
10801 if (TARGET_ARCH64)
10803 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10804 do not exist in the library. Make sure the compiler does not
10805 emit calls to them by accident. (It should always use the
10806 hardware instructions.) */
10807 set_optab_libfunc (smul_optab, SImode, 0);
10808 set_optab_libfunc (sdiv_optab, SImode, 0);
10809 set_optab_libfunc (udiv_optab, SImode, 0);
10810 set_optab_libfunc (smod_optab, SImode, 0);
10811 set_optab_libfunc (umod_optab, SImode, 0);
10813 if (SUN_INTEGER_MULTIPLY_64)
10815 set_optab_libfunc (smul_optab, DImode, "__mul64");
10816 set_optab_libfunc (sdiv_optab, DImode, "__div64");
10817 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
10818 set_optab_libfunc (smod_optab, DImode, "__rem64");
10819 set_optab_libfunc (umod_optab, DImode, "__urem64");
10822 if (SUN_CONVERSION_LIBFUNCS)
10824 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
10825 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
10826 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
10827 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
10832 /* SPARC builtins. */
10833 enum sparc_builtins
10835 /* FPU builtins. */
10836 SPARC_BUILTIN_LDFSR,
10837 SPARC_BUILTIN_STFSR,
10839 /* VIS 1.0 builtins. */
10840 SPARC_BUILTIN_FPACK16,
10841 SPARC_BUILTIN_FPACK32,
10842 SPARC_BUILTIN_FPACKFIX,
10843 SPARC_BUILTIN_FEXPAND,
10844 SPARC_BUILTIN_FPMERGE,
10845 SPARC_BUILTIN_FMUL8X16,
10846 SPARC_BUILTIN_FMUL8X16AU,
10847 SPARC_BUILTIN_FMUL8X16AL,
10848 SPARC_BUILTIN_FMUL8SUX16,
10849 SPARC_BUILTIN_FMUL8ULX16,
10850 SPARC_BUILTIN_FMULD8SUX16,
10851 SPARC_BUILTIN_FMULD8ULX16,
10852 SPARC_BUILTIN_FALIGNDATAV4HI,
10853 SPARC_BUILTIN_FALIGNDATAV8QI,
10854 SPARC_BUILTIN_FALIGNDATAV2SI,
10855 SPARC_BUILTIN_FALIGNDATADI,
10856 SPARC_BUILTIN_WRGSR,
10857 SPARC_BUILTIN_RDGSR,
10858 SPARC_BUILTIN_ALIGNADDR,
10859 SPARC_BUILTIN_ALIGNADDRL,
10860 SPARC_BUILTIN_PDIST,
10861 SPARC_BUILTIN_EDGE8,
10862 SPARC_BUILTIN_EDGE8L,
10863 SPARC_BUILTIN_EDGE16,
10864 SPARC_BUILTIN_EDGE16L,
10865 SPARC_BUILTIN_EDGE32,
10866 SPARC_BUILTIN_EDGE32L,
10867 SPARC_BUILTIN_FCMPLE16,
10868 SPARC_BUILTIN_FCMPLE32,
10869 SPARC_BUILTIN_FCMPNE16,
10870 SPARC_BUILTIN_FCMPNE32,
10871 SPARC_BUILTIN_FCMPGT16,
10872 SPARC_BUILTIN_FCMPGT32,
10873 SPARC_BUILTIN_FCMPEQ16,
10874 SPARC_BUILTIN_FCMPEQ32,
10875 SPARC_BUILTIN_FPADD16,
10876 SPARC_BUILTIN_FPADD16S,
10877 SPARC_BUILTIN_FPADD32,
10878 SPARC_BUILTIN_FPADD32S,
10879 SPARC_BUILTIN_FPSUB16,
10880 SPARC_BUILTIN_FPSUB16S,
10881 SPARC_BUILTIN_FPSUB32,
10882 SPARC_BUILTIN_FPSUB32S,
10883 SPARC_BUILTIN_ARRAY8,
10884 SPARC_BUILTIN_ARRAY16,
10885 SPARC_BUILTIN_ARRAY32,
10887 /* VIS 2.0 builtins. */
10888 SPARC_BUILTIN_EDGE8N,
10889 SPARC_BUILTIN_EDGE8LN,
10890 SPARC_BUILTIN_EDGE16N,
10891 SPARC_BUILTIN_EDGE16LN,
10892 SPARC_BUILTIN_EDGE32N,
10893 SPARC_BUILTIN_EDGE32LN,
10894 SPARC_BUILTIN_BMASK,
10895 SPARC_BUILTIN_BSHUFFLEV4HI,
10896 SPARC_BUILTIN_BSHUFFLEV8QI,
10897 SPARC_BUILTIN_BSHUFFLEV2SI,
10898 SPARC_BUILTIN_BSHUFFLEDI,
10900 /* VIS 3.0 builtins. */
10901 SPARC_BUILTIN_CMASK8,
10902 SPARC_BUILTIN_CMASK16,
10903 SPARC_BUILTIN_CMASK32,
10904 SPARC_BUILTIN_FCHKSM16,
10905 SPARC_BUILTIN_FSLL16,
10906 SPARC_BUILTIN_FSLAS16,
10907 SPARC_BUILTIN_FSRL16,
10908 SPARC_BUILTIN_FSRA16,
10909 SPARC_BUILTIN_FSLL32,
10910 SPARC_BUILTIN_FSLAS32,
10911 SPARC_BUILTIN_FSRL32,
10912 SPARC_BUILTIN_FSRA32,
10913 SPARC_BUILTIN_PDISTN,
10914 SPARC_BUILTIN_FMEAN16,
10915 SPARC_BUILTIN_FPADD64,
10916 SPARC_BUILTIN_FPSUB64,
10917 SPARC_BUILTIN_FPADDS16,
10918 SPARC_BUILTIN_FPADDS16S,
10919 SPARC_BUILTIN_FPSUBS16,
10920 SPARC_BUILTIN_FPSUBS16S,
10921 SPARC_BUILTIN_FPADDS32,
10922 SPARC_BUILTIN_FPADDS32S,
10923 SPARC_BUILTIN_FPSUBS32,
10924 SPARC_BUILTIN_FPSUBS32S,
10925 SPARC_BUILTIN_FUCMPLE8,
10926 SPARC_BUILTIN_FUCMPNE8,
10927 SPARC_BUILTIN_FUCMPGT8,
10928 SPARC_BUILTIN_FUCMPEQ8,
10929 SPARC_BUILTIN_FHADDS,
10930 SPARC_BUILTIN_FHADDD,
10931 SPARC_BUILTIN_FHSUBS,
10932 SPARC_BUILTIN_FHSUBD,
10933 SPARC_BUILTIN_FNHADDS,
10934 SPARC_BUILTIN_FNHADDD,
10935 SPARC_BUILTIN_UMULXHI,
10936 SPARC_BUILTIN_XMULX,
10937 SPARC_BUILTIN_XMULXHI,
10939 /* VIS 4.0 builtins. */
10940 SPARC_BUILTIN_FPADD8,
10941 SPARC_BUILTIN_FPADDS8,
10942 SPARC_BUILTIN_FPADDUS8,
10943 SPARC_BUILTIN_FPADDUS16,
10944 SPARC_BUILTIN_FPCMPLE8,
10945 SPARC_BUILTIN_FPCMPGT8,
10946 SPARC_BUILTIN_FPCMPULE16,
10947 SPARC_BUILTIN_FPCMPUGT16,
10948 SPARC_BUILTIN_FPCMPULE32,
10949 SPARC_BUILTIN_FPCMPUGT32,
10950 SPARC_BUILTIN_FPMAX8,
10951 SPARC_BUILTIN_FPMAX16,
10952 SPARC_BUILTIN_FPMAX32,
10953 SPARC_BUILTIN_FPMAXU8,
10954 SPARC_BUILTIN_FPMAXU16,
10955 SPARC_BUILTIN_FPMAXU32,
10956 SPARC_BUILTIN_FPMIN8,
10957 SPARC_BUILTIN_FPMIN16,
10958 SPARC_BUILTIN_FPMIN32,
10959 SPARC_BUILTIN_FPMINU8,
10960 SPARC_BUILTIN_FPMINU16,
10961 SPARC_BUILTIN_FPMINU32,
10962 SPARC_BUILTIN_FPSUB8,
10963 SPARC_BUILTIN_FPSUBS8,
10964 SPARC_BUILTIN_FPSUBUS8,
10965 SPARC_BUILTIN_FPSUBUS16,
10967 /* VIS 4.0B builtins. */
10969 /* Note that all the DICTUNPACK* entries should be kept
10970 contiguous. */
10971 SPARC_BUILTIN_FIRST_DICTUNPACK,
10972 SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
10973 SPARC_BUILTIN_DICTUNPACK16,
10974 SPARC_BUILTIN_DICTUNPACK32,
10975 SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
10977 /* Note that all the FPCMP*SHL entries should be kept
10978 contiguous. */
10979 SPARC_BUILTIN_FIRST_FPCMPSHL,
10980 SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
10981 SPARC_BUILTIN_FPCMPGT8SHL,
10982 SPARC_BUILTIN_FPCMPEQ8SHL,
10983 SPARC_BUILTIN_FPCMPNE8SHL,
10984 SPARC_BUILTIN_FPCMPLE16SHL,
10985 SPARC_BUILTIN_FPCMPGT16SHL,
10986 SPARC_BUILTIN_FPCMPEQ16SHL,
10987 SPARC_BUILTIN_FPCMPNE16SHL,
10988 SPARC_BUILTIN_FPCMPLE32SHL,
10989 SPARC_BUILTIN_FPCMPGT32SHL,
10990 SPARC_BUILTIN_FPCMPEQ32SHL,
10991 SPARC_BUILTIN_FPCMPNE32SHL,
10992 SPARC_BUILTIN_FPCMPULE8SHL,
10993 SPARC_BUILTIN_FPCMPUGT8SHL,
10994 SPARC_BUILTIN_FPCMPULE16SHL,
10995 SPARC_BUILTIN_FPCMPUGT16SHL,
10996 SPARC_BUILTIN_FPCMPULE32SHL,
10997 SPARC_BUILTIN_FPCMPUGT32SHL,
10998 SPARC_BUILTIN_FPCMPDE8SHL,
10999 SPARC_BUILTIN_FPCMPDE16SHL,
11000 SPARC_BUILTIN_FPCMPDE32SHL,
11001 SPARC_BUILTIN_FPCMPUR8SHL,
11002 SPARC_BUILTIN_FPCMPUR16SHL,
11003 SPARC_BUILTIN_FPCMPUR32SHL,
11004 SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
11006 SPARC_BUILTIN_MAX
11009 static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
11010 static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
11012 /* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
11013 The instruction should require a constant operand of some sort. The
11014 function prints an error if OPVAL is not valid. */
11016 static int
11017 check_constant_argument (enum insn_code icode, int opnum, rtx opval)
11019 if (GET_CODE (opval) != CONST_INT)
11021 error ("%qs expects a constant argument", insn_data[icode].name);
11022 return false;
11025 if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
11027 error ("constant argument out of range for %qs", insn_data[icode].name);
11028 return false;
11030 return true;
11033 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
11034 function decl or NULL_TREE if the builtin was not added. */
11036 static tree
11037 def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
11038 tree type)
11040 tree t
11041 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
11043 if (t)
11045 sparc_builtins[code] = t;
11046 sparc_builtins_icode[code] = icode;
11049 return t;
11052 /* Likewise, but also marks the function as "const". */
11054 static tree
11055 def_builtin_const (const char *name, enum insn_code icode,
11056 enum sparc_builtins code, tree type)
11058 tree t = def_builtin (name, icode, code, type);
11060 if (t)
11061 TREE_READONLY (t) = 1;
11063 return t;
11066 /* Implement the TARGET_INIT_BUILTINS target hook.
11067 Create builtin functions for special SPARC instructions. */
11069 static void
11070 sparc_init_builtins (void)
11072 if (TARGET_FPU)
11073 sparc_fpu_init_builtins ();
11075 if (TARGET_VIS)
11076 sparc_vis_init_builtins ();
11079 /* Create builtin functions for FPU instructions. */
11081 static void
11082 sparc_fpu_init_builtins (void)
11084 tree ftype
11085 = build_function_type_list (void_type_node,
11086 build_pointer_type (unsigned_type_node), 0);
11087 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
11088 SPARC_BUILTIN_LDFSR, ftype);
11089 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
11090 SPARC_BUILTIN_STFSR, ftype);
11093 /* Create builtin functions for VIS instructions. */
11095 static void
11096 sparc_vis_init_builtins (void)
11098 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
11099 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
11100 tree v4hi = build_vector_type (intHI_type_node, 4);
11101 tree v2hi = build_vector_type (intHI_type_node, 2);
11102 tree v2si = build_vector_type (intSI_type_node, 2);
11103 tree v1si = build_vector_type (intSI_type_node, 1);
11105 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
11106 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
11107 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
11108 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
11109 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
11110 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
11111 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
11112 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
11113 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
11114 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
11115 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
11116 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
11117 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
11118 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
11119 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
11120 v8qi, v8qi,
11121 intDI_type_node, 0);
11122 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
11123 v8qi, v8qi, 0);
11124 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
11125 v8qi, v8qi, 0);
11126 tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
11127 intSI_type_node, 0);
11128 tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
11129 intSI_type_node, 0);
11130 tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
11131 intDI_type_node, 0);
11132 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
11133 intDI_type_node,
11134 intDI_type_node, 0);
11135 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
11136 intSI_type_node,
11137 intSI_type_node, 0);
11138 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
11139 ptr_type_node,
11140 intSI_type_node, 0);
11141 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
11142 ptr_type_node,
11143 intDI_type_node, 0);
11144 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
11145 ptr_type_node,
11146 ptr_type_node, 0);
11147 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
11148 ptr_type_node,
11149 ptr_type_node, 0);
11150 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
11151 v4hi, v4hi, 0);
11152 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
11153 v2si, v2si, 0);
11154 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
11155 v4hi, v4hi, 0);
11156 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
11157 v2si, v2si, 0);
11158 tree void_ftype_di = build_function_type_list (void_type_node,
11159 intDI_type_node, 0);
11160 tree di_ftype_void = build_function_type_list (intDI_type_node,
11161 void_type_node, 0);
11162 tree void_ftype_si = build_function_type_list (void_type_node,
11163 intSI_type_node, 0);
11164 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
11165 float_type_node,
11166 float_type_node, 0);
11167 tree df_ftype_df_df = build_function_type_list (double_type_node,
11168 double_type_node,
11169 double_type_node, 0);
11171 /* Packing and expanding vectors. */
11172 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
11173 SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
11174 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
11175 SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
11176 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
11177 SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
11178 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
11179 SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
11180 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
11181 SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
11183 /* Multiplications. */
11184 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
11185 SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
11186 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
11187 SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
11188 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
11189 SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
11190 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
11191 SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
11192 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
11193 SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
11194 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
11195 SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
11196 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
11197 SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
11199 /* Data aligning. */
11200 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
11201 SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
11202 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
11203 SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
11204 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
11205 SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
11206 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
11207 SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
11209 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
11210 SPARC_BUILTIN_WRGSR, void_ftype_di);
11211 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
11212 SPARC_BUILTIN_RDGSR, di_ftype_void);
11214 if (TARGET_ARCH64)
11216 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
11217 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
11218 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
11219 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
11221 else
11223 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
11224 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
11225 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
11226 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
11229 /* Pixel distance. */
11230 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
11231 SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
11233 /* Edge handling. */
11234 if (TARGET_ARCH64)
11236 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
11237 SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
11238 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
11239 SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
11240 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
11241 SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
11242 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
11243 SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
11244 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
11245 SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
11246 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
11247 SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
11249 else
11251 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
11252 SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
11253 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
11254 SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
11255 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
11256 SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
11257 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
11258 SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
11259 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
11260 SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
11261 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
11262 SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
11265 /* Pixel compare. */
11266 if (TARGET_ARCH64)
11268 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
11269 SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
11270 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
11271 SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
11272 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
11273 SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
11274 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
11275 SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
11276 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
11277 SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
11278 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
11279 SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
11280 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
11281 SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
11282 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
11283 SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
11285 else
11287 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
11288 SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
11289 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
11290 SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
11291 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
11292 SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
11293 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
11294 SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
11295 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
11296 SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
11297 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
11298 SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
11299 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
11300 SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
11301 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
11302 SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
11305 /* Addition and subtraction. */
11306 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
11307 SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
11308 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
11309 SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
11310 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
11311 SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
11312 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
11313 SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
11314 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
11315 SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
11316 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
11317 SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
11318 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
11319 SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
11320 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
11321 SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
11323 /* Three-dimensional array addressing. */
11324 if (TARGET_ARCH64)
11326 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
11327 SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
11328 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
11329 SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
11330 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
11331 SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
11333 else
11335 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
11336 SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
11337 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
11338 SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
11339 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
11340 SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
11343 if (TARGET_VIS2)
11345 /* Edge handling. */
11346 if (TARGET_ARCH64)
11348 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
11349 SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
11350 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
11351 SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
11352 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
11353 SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
11354 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
11355 SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
11356 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
11357 SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
11358 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
11359 SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
11361 else
11363 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
11364 SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
11365 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
11366 SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
11367 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
11368 SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
11369 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
11370 SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
11371 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
11372 SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
11373 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
11374 SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
11377 /* Byte mask and shuffle. */
11378 if (TARGET_ARCH64)
11379 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
11380 SPARC_BUILTIN_BMASK, di_ftype_di_di);
11381 else
11382 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
11383 SPARC_BUILTIN_BMASK, si_ftype_si_si);
11384 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
11385 SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
11386 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
11387 SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
11388 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
11389 SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
11390 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
11391 SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
11394 if (TARGET_VIS3)
11396 if (TARGET_ARCH64)
11398 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
11399 SPARC_BUILTIN_CMASK8, void_ftype_di);
11400 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
11401 SPARC_BUILTIN_CMASK16, void_ftype_di);
11402 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
11403 SPARC_BUILTIN_CMASK32, void_ftype_di);
11405 else
11407 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
11408 SPARC_BUILTIN_CMASK8, void_ftype_si);
11409 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
11410 SPARC_BUILTIN_CMASK16, void_ftype_si);
11411 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
11412 SPARC_BUILTIN_CMASK32, void_ftype_si);
11415 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
11416 SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
11418 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
11419 SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
11420 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
11421 SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
11422 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
11423 SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
11424 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
11425 SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
11426 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
11427 SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
11428 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
11429 SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
11430 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
11431 SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
11432 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
11433 SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
11435 if (TARGET_ARCH64)
11436 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
11437 SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
11438 else
11439 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
11440 SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
11442 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
11443 SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
11444 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
11445 SPARC_BUILTIN_FPADD64, di_ftype_di_di);
11446 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
11447 SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
11449 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
11450 SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
11451 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
11452 SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
11453 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
11454 SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
11455 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
11456 SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
11457 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
11458 SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
11459 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
11460 SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
11461 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
11462 SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
11463 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
11464 SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
11466 if (TARGET_ARCH64)
11468 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
11469 SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
11470 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
11471 SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
11472 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
11473 SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
11474 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
11475 SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
11477 else
11479 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
11480 SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
11481 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
11482 SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
11483 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
11484 SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
11485 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
11486 SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
11489 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
11490 SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
11491 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
11492 SPARC_BUILTIN_FHADDD, df_ftype_df_df);
11493 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
11494 SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
11495 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
11496 SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
11497 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
11498 SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
11499 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
11500 SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
11502 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
11503 SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
11504 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
11505 SPARC_BUILTIN_XMULX, di_ftype_di_di);
11506 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
11507 SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
11510 if (TARGET_VIS4)
11512 def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
11513 SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
11514 def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
11515 SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
11516 def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
11517 SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
11518 def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
11519 SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
11522 if (TARGET_ARCH64)
11524 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
11525 SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
11526 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
11527 SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
11528 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
11529 SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
11530 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
11531 SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
11532 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
11533 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11534 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
11535 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11537 else
11539 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
11540 SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
11541 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
11542 SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
11543 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
11544 SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
11545 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
11546 SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
11547 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
11548 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11549 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
11550 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11553 def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
11554 SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
11555 def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
11556 SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
11557 def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
11558 SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
11559 def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
11560 SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
11561 def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
11562 SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
11563 def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
11564 SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
11565 def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
11566 SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
11567 def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
11568 SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
11569 def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
11570 SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
11571 def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
11572 SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
11573 def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
11574 SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
11575 def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
11576 SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
11577 def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
11578 SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
11579 def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
11580 SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
11581 def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
11582 SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
11583 def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
11584 SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
11587 if (TARGET_VIS4B)
11589 def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
11590 SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
11591 def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
11592 SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
11593 def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
11594 SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
11596 if (TARGET_ARCH64)
11598 tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
11599 v8qi, v8qi,
11600 intSI_type_node, 0);
11601 tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
11602 v4hi, v4hi,
11603 intSI_type_node, 0);
11604 tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
11605 v2si, v2si,
11606 intSI_type_node, 0);
11608 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
11609 SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
11610 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
11611 SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
11612 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
11613 SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
11614 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
11615 SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
11617 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
11618 SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
11619 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
11620 SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
11621 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
11622 SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
11623 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
11624 SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
11626 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
11627 SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
11628 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
11629 SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
11630 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
11631 SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
11632 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
11633 SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
11636 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
11637 SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
11638 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
11639 SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
11641 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
11642 SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
11643 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
11644 SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
11646 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
11647 SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
11648 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
11649 SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
11651 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
11652 SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
11653 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
11654 SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
11655 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
11656 SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
11658 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
11659 SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
11660 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
11661 SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
11662 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
11663 SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
11666 else
11668 tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
11669 v8qi, v8qi,
11670 intSI_type_node, 0);
11671 tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
11672 v4hi, v4hi,
11673 intSI_type_node, 0);
11674 tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
11675 v2si, v2si,
11676 intSI_type_node, 0);
11678 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
11679 SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
11680 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
11681 SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
11682 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
11683 SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
11684 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
11685 SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
11687 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
11688 SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
11689 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
11690 SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
11691 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
11692 SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
11693 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
11694 SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
11696 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
11697 SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
11698 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
11699 SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
11700 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
11701 SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
11702 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
11703 SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
11706 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
11707 SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
11708 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
11709 SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
11711 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
11712 SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
11713 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
11714 SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
11716 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
11717 SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
11718 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
11719 SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
11721 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
11722 SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
11723 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
11724 SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
11725 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
11726 SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
11728 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
11729 SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
11730 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
11731 SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
11732 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
11733 SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
11738 /* Implement TARGET_BUILTIN_DECL hook. */
11740 static tree
11741 sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11743 if (code >= SPARC_BUILTIN_MAX)
11744 return error_mark_node;
11746 return sparc_builtins[code];
11749 /* Implemented TARGET_EXPAND_BUILTIN hook. */
11751 static rtx
11752 sparc_expand_builtin (tree exp, rtx target,
11753 rtx subtarget ATTRIBUTE_UNUSED,
11754 machine_mode tmode ATTRIBUTE_UNUSED,
11755 int ignore ATTRIBUTE_UNUSED)
11757 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11758 enum sparc_builtins code
11759 = (enum sparc_builtins) DECL_MD_FUNCTION_CODE (fndecl);
11760 enum insn_code icode = sparc_builtins_icode[code];
11761 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11762 call_expr_arg_iterator iter;
11763 int arg_count = 0;
11764 rtx pat, op[4];
11765 tree arg;
11767 if (nonvoid)
11769 machine_mode tmode = insn_data[icode].operand[0].mode;
11770 if (!target
11771 || GET_MODE (target) != tmode
11772 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11773 op[0] = gen_reg_rtx (tmode);
11774 else
11775 op[0] = target;
11777 else
11778 op[0] = NULL_RTX;
11780 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11782 const struct insn_operand_data *insn_op;
11783 int idx;
11785 if (arg == error_mark_node)
11786 return NULL_RTX;
11788 arg_count++;
11789 idx = arg_count - !nonvoid;
11790 insn_op = &insn_data[icode].operand[idx];
11791 op[arg_count] = expand_normal (arg);
11793 /* Some of the builtins require constant arguments. We check
11794 for this here. */
11795 if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
11796 && code <= SPARC_BUILTIN_LAST_FPCMPSHL
11797 && arg_count == 3)
11798 || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
11799 && code <= SPARC_BUILTIN_LAST_DICTUNPACK
11800 && arg_count == 2))
11802 if (!check_constant_argument (icode, idx, op[arg_count]))
11803 return const0_rtx;
11806 if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
11808 if (!address_operand (op[arg_count], SImode))
11810 op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
11811 op[arg_count] = copy_addr_to_reg (op[arg_count]);
11813 op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
11816 else if (insn_op->mode == V1DImode
11817 && GET_MODE (op[arg_count]) == DImode)
11818 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
11820 else if (insn_op->mode == V1SImode
11821 && GET_MODE (op[arg_count]) == SImode)
11822 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
11824 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
11825 insn_op->mode))
11826 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
11829 switch (arg_count)
11831 case 0:
11832 pat = GEN_FCN (icode) (op[0]);
11833 break;
11834 case 1:
11835 if (nonvoid)
11836 pat = GEN_FCN (icode) (op[0], op[1]);
11837 else
11838 pat = GEN_FCN (icode) (op[1]);
11839 break;
11840 case 2:
11841 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11842 break;
11843 case 3:
11844 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11845 break;
11846 default:
11847 gcc_unreachable ();
11850 if (!pat)
11851 return NULL_RTX;
11853 emit_insn (pat);
11855 return (nonvoid ? op[0] : const0_rtx);
11858 /* Return the upper 16 bits of the 8x16 multiplication. */
11860 static int
11861 sparc_vis_mul8x16 (int e8, int e16)
11863 return (e8 * e16 + 128) / 256;
11866 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
11867 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
11869 static void
11870 sparc_handle_vis_mul8x16 (vec<tree> *n_elts, enum sparc_builtins fncode,
11871 tree inner_type, tree cst0, tree cst1)
11873 unsigned i, num = VECTOR_CST_NELTS (cst0);
11874 int scale;
11876 switch (fncode)
11878 case SPARC_BUILTIN_FMUL8X16:
11879 for (i = 0; i < num; ++i)
11881 int val
11882 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11883 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
11884 n_elts->quick_push (build_int_cst (inner_type, val));
11886 break;
11888 case SPARC_BUILTIN_FMUL8X16AU:
11889 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
11891 for (i = 0; i < num; ++i)
11893 int val
11894 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11895 scale);
11896 n_elts->quick_push (build_int_cst (inner_type, val));
11898 break;
11900 case SPARC_BUILTIN_FMUL8X16AL:
11901 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
11903 for (i = 0; i < num; ++i)
11905 int val
11906 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11907 scale);
11908 n_elts->quick_push (build_int_cst (inner_type, val));
11910 break;
11912 default:
11913 gcc_unreachable ();
11917 /* Implement TARGET_FOLD_BUILTIN hook.
11919 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
11920 result of the function call is ignored. NULL_TREE is returned if the
11921 function could not be folded. */
11923 static tree
11924 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
11925 tree *args, bool ignore)
11927 enum sparc_builtins code
11928 = (enum sparc_builtins) DECL_MD_FUNCTION_CODE (fndecl);
11929 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
11930 tree arg0, arg1, arg2;
11932 if (ignore)
11933 switch (code)
11935 case SPARC_BUILTIN_LDFSR:
11936 case SPARC_BUILTIN_STFSR:
11937 case SPARC_BUILTIN_ALIGNADDR:
11938 case SPARC_BUILTIN_WRGSR:
11939 case SPARC_BUILTIN_BMASK:
11940 case SPARC_BUILTIN_CMASK8:
11941 case SPARC_BUILTIN_CMASK16:
11942 case SPARC_BUILTIN_CMASK32:
11943 break;
11945 default:
11946 return build_zero_cst (rtype);
11949 switch (code)
11951 case SPARC_BUILTIN_FEXPAND:
11952 arg0 = args[0];
11953 STRIP_NOPS (arg0);
11955 if (TREE_CODE (arg0) == VECTOR_CST)
11957 tree inner_type = TREE_TYPE (rtype);
11958 unsigned i;
11960 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11961 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11963 unsigned HOST_WIDE_INT val
11964 = TREE_INT_CST_LOW (VECTOR_CST_ELT (arg0, i));
11965 n_elts.quick_push (build_int_cst (inner_type, val << 4));
11967 return n_elts.build ();
11969 break;
11971 case SPARC_BUILTIN_FMUL8X16:
11972 case SPARC_BUILTIN_FMUL8X16AU:
11973 case SPARC_BUILTIN_FMUL8X16AL:
11974 arg0 = args[0];
11975 arg1 = args[1];
11976 STRIP_NOPS (arg0);
11977 STRIP_NOPS (arg1);
11979 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11981 tree inner_type = TREE_TYPE (rtype);
11982 tree_vector_builder n_elts (rtype, VECTOR_CST_NELTS (arg0), 1);
11983 sparc_handle_vis_mul8x16 (&n_elts, code, inner_type, arg0, arg1);
11984 return n_elts.build ();
11986 break;
11988 case SPARC_BUILTIN_FPMERGE:
11989 arg0 = args[0];
11990 arg1 = args[1];
11991 STRIP_NOPS (arg0);
11992 STRIP_NOPS (arg1);
11994 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11996 tree_vector_builder n_elts (rtype, 2 * VECTOR_CST_NELTS (arg0), 1);
11997 unsigned i;
11998 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
12000 n_elts.quick_push (VECTOR_CST_ELT (arg0, i));
12001 n_elts.quick_push (VECTOR_CST_ELT (arg1, i));
12004 return n_elts.build ();
12006 break;
12008 case SPARC_BUILTIN_PDIST:
12009 case SPARC_BUILTIN_PDISTN:
12010 arg0 = args[0];
12011 arg1 = args[1];
12012 STRIP_NOPS (arg0);
12013 STRIP_NOPS (arg1);
12014 if (code == SPARC_BUILTIN_PDIST)
12016 arg2 = args[2];
12017 STRIP_NOPS (arg2);
12019 else
12020 arg2 = integer_zero_node;
12022 if (TREE_CODE (arg0) == VECTOR_CST
12023 && TREE_CODE (arg1) == VECTOR_CST
12024 && TREE_CODE (arg2) == INTEGER_CST)
12026 bool overflow = false;
12027 widest_int result = wi::to_widest (arg2);
12028 widest_int tmp;
12029 unsigned i;
12031 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
12033 tree e0 = VECTOR_CST_ELT (arg0, i);
12034 tree e1 = VECTOR_CST_ELT (arg1, i);
12036 wi::overflow_type neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
12038 tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
12039 tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
12040 if (wi::neg_p (tmp))
12041 tmp = wi::neg (tmp, &neg2_ovf);
12042 else
12043 neg2_ovf = wi::OVF_NONE;
12044 result = wi::add (result, tmp, SIGNED, &add2_ovf);
12045 overflow |= ((neg1_ovf != wi::OVF_NONE)
12046 | (neg2_ovf != wi::OVF_NONE)
12047 | (add1_ovf != wi::OVF_NONE)
12048 | (add2_ovf != wi::OVF_NONE));
12051 gcc_assert (!overflow);
12053 return wide_int_to_tree (rtype, result);
12056 default:
12057 break;
12060 return NULL_TREE;
12063 /* ??? This duplicates information provided to the compiler by the
12064 ??? scheduler description. Some day, teach genautomata to output
12065 ??? the latencies and then CSE will just use that. */
12067 static bool
12068 sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
12069 int opno ATTRIBUTE_UNUSED,
12070 int *total, bool speed ATTRIBUTE_UNUSED)
12072 int code = GET_CODE (x);
12073 bool float_mode_p = FLOAT_MODE_P (mode);
12075 switch (code)
12077 case CONST_INT:
12078 if (SMALL_INT (x))
12079 *total = 0;
12080 else
12081 *total = 2;
12082 return true;
12084 case CONST_WIDE_INT:
12085 *total = 0;
12086 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
12087 *total += 2;
12088 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
12089 *total += 2;
12090 return true;
12092 case HIGH:
12093 *total = 2;
12094 return true;
12096 case CONST:
12097 case LABEL_REF:
12098 case SYMBOL_REF:
12099 *total = 4;
12100 return true;
12102 case CONST_DOUBLE:
12103 *total = 8;
12104 return true;
12106 case MEM:
12107 /* If outer-code was a sign or zero extension, a cost
12108 of COSTS_N_INSNS (1) was already added in. This is
12109 why we are subtracting it back out. */
12110 if (outer_code == ZERO_EXTEND)
12112 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
12114 else if (outer_code == SIGN_EXTEND)
12116 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
12118 else if (float_mode_p)
12120 *total = sparc_costs->float_load;
12122 else
12124 *total = sparc_costs->int_load;
12127 return true;
12129 case PLUS:
12130 case MINUS:
12131 if (float_mode_p)
12132 *total = sparc_costs->float_plusminus;
12133 else
12134 *total = COSTS_N_INSNS (1);
12135 return false;
12137 case FMA:
12139 rtx sub;
12141 gcc_assert (float_mode_p);
12142 *total = sparc_costs->float_mul;
12144 sub = XEXP (x, 0);
12145 if (GET_CODE (sub) == NEG)
12146 sub = XEXP (sub, 0);
12147 *total += rtx_cost (sub, mode, FMA, 0, speed);
12149 sub = XEXP (x, 2);
12150 if (GET_CODE (sub) == NEG)
12151 sub = XEXP (sub, 0);
12152 *total += rtx_cost (sub, mode, FMA, 2, speed);
12153 return true;
12156 case MULT:
12157 if (float_mode_p)
12158 *total = sparc_costs->float_mul;
12159 else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
12160 *total = COSTS_N_INSNS (25);
12161 else
12163 int bit_cost;
12165 bit_cost = 0;
12166 if (sparc_costs->int_mul_bit_factor)
12168 int nbits;
12170 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
12172 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
12173 for (nbits = 0; value != 0; value &= value - 1)
12174 nbits++;
12176 else
12177 nbits = 7;
12179 if (nbits < 3)
12180 nbits = 3;
12181 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
12182 bit_cost = COSTS_N_INSNS (bit_cost);
12185 if (mode == DImode || !TARGET_HARD_MUL)
12186 *total = sparc_costs->int_mulX + bit_cost;
12187 else
12188 *total = sparc_costs->int_mul + bit_cost;
12190 return false;
12192 case ASHIFT:
12193 case ASHIFTRT:
12194 case LSHIFTRT:
12195 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
12196 return false;
12198 case DIV:
12199 case UDIV:
12200 case MOD:
12201 case UMOD:
12202 if (float_mode_p)
12204 if (mode == DFmode)
12205 *total = sparc_costs->float_div_df;
12206 else
12207 *total = sparc_costs->float_div_sf;
12209 else
12211 if (mode == DImode)
12212 *total = sparc_costs->int_divX;
12213 else
12214 *total = sparc_costs->int_div;
12216 return false;
12218 case NEG:
12219 if (! float_mode_p)
12221 *total = COSTS_N_INSNS (1);
12222 return false;
12224 /* FALLTHRU */
12226 case ABS:
12227 case FLOAT:
12228 case UNSIGNED_FLOAT:
12229 case FIX:
12230 case UNSIGNED_FIX:
12231 case FLOAT_EXTEND:
12232 case FLOAT_TRUNCATE:
12233 *total = sparc_costs->float_move;
12234 return false;
12236 case SQRT:
12237 if (mode == DFmode)
12238 *total = sparc_costs->float_sqrt_df;
12239 else
12240 *total = sparc_costs->float_sqrt_sf;
12241 return false;
12243 case COMPARE:
12244 if (float_mode_p)
12245 *total = sparc_costs->float_cmp;
12246 else
12247 *total = COSTS_N_INSNS (1);
12248 return false;
12250 case IF_THEN_ELSE:
12251 if (float_mode_p)
12252 *total = sparc_costs->float_cmove;
12253 else
12254 *total = sparc_costs->int_cmove;
12255 return false;
12257 case IOR:
12258 /* Handle the NAND vector patterns. */
12259 if (sparc_vector_mode_supported_p (mode)
12260 && GET_CODE (XEXP (x, 0)) == NOT
12261 && GET_CODE (XEXP (x, 1)) == NOT)
12263 *total = COSTS_N_INSNS (1);
12264 return true;
12266 else
12267 return false;
12269 default:
12270 return false;
12274 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
12276 static inline bool
12277 general_or_i64_p (reg_class_t rclass)
12279 return (rclass == GENERAL_REGS || rclass == I64_REGS);
12282 /* Implement TARGET_REGISTER_MOVE_COST. */
12284 static int
12285 sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
12286 reg_class_t from, reg_class_t to)
12288 bool need_memory = false;
12290 /* This helps postreload CSE to eliminate redundant comparisons. */
12291 if (from == NO_REGS || to == NO_REGS)
12292 return 100;
12294 if (from == FPCC_REGS || to == FPCC_REGS)
12295 need_memory = true;
12296 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
12297 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
12299 if (TARGET_VIS3)
12301 int size = GET_MODE_SIZE (mode);
12302 if (size == 8 || size == 4)
12304 if (! TARGET_ARCH32 || size == 4)
12305 return 4;
12306 else
12307 return 6;
12310 need_memory = true;
12313 if (need_memory)
12315 if (sparc_cpu == PROCESSOR_ULTRASPARC
12316 || sparc_cpu == PROCESSOR_ULTRASPARC3
12317 || sparc_cpu == PROCESSOR_NIAGARA
12318 || sparc_cpu == PROCESSOR_NIAGARA2
12319 || sparc_cpu == PROCESSOR_NIAGARA3
12320 || sparc_cpu == PROCESSOR_NIAGARA4
12321 || sparc_cpu == PROCESSOR_NIAGARA7
12322 || sparc_cpu == PROCESSOR_M8)
12323 return 12;
12325 return 6;
12328 return 2;
12331 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
12332 This is achieved by means of a manual dynamic stack space allocation in
12333 the current frame. We make the assumption that SEQ doesn't contain any
12334 function calls, with the possible exception of calls to the GOT helper. */
12336 static void
12337 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
12339 /* We must preserve the lowest 16 words for the register save area. */
12340 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
12341 /* We really need only 2 words of fresh stack space. */
12342 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
12344 rtx slot
12345 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
12346 SPARC_STACK_BIAS + offset));
12348 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
12349 emit_insn (gen_rtx_SET (slot, reg));
12350 if (reg2)
12351 emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
12352 reg2));
12353 emit_insn (seq);
12354 if (reg2)
12355 emit_insn (gen_rtx_SET (reg2,
12356 adjust_address (slot, word_mode, UNITS_PER_WORD)));
12357 emit_insn (gen_rtx_SET (reg, slot));
12358 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
12361 /* Output the assembler code for a thunk function. THUNK_DECL is the
12362 declaration for the thunk function itself, FUNCTION is the decl for
12363 the target function. DELTA is an immediate constant offset to be
12364 added to THIS. If VCALL_OFFSET is nonzero, the word at address
12365 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
12367 static void
12368 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
12369 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12370 tree function)
12372 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
12373 rtx this_rtx, funexp;
12374 rtx_insn *insn;
12375 unsigned int int_arg_first;
12377 reload_completed = 1;
12378 epilogue_completed = 1;
12380 emit_note (NOTE_INSN_PROLOGUE_END);
12382 if (TARGET_FLAT)
12384 sparc_leaf_function_p = 1;
12386 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12388 else if (flag_delayed_branch)
12390 /* We will emit a regular sibcall below, so we need to instruct
12391 output_sibcall that we are in a leaf function. */
12392 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
12394 /* This will cause final.cc to invoke leaf_renumber_regs so we
12395 must behave as if we were in a not-yet-leafified function. */
12396 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
12398 else
12400 /* We will emit the sibcall manually below, so we will need to
12401 manually spill non-leaf registers. */
12402 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
12404 /* We really are in a leaf function. */
12405 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
12408 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
12409 returns a structure, the structure return pointer is there instead. */
12410 if (TARGET_ARCH64
12411 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12412 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
12413 else
12414 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
12416 /* Add DELTA. When possible use a plain add, otherwise load it into
12417 a register first. */
12418 if (delta)
12420 rtx delta_rtx = GEN_INT (delta);
12422 if (! SPARC_SIMM13_P (delta))
12424 rtx scratch = gen_rtx_REG (Pmode, 1);
12425 emit_move_insn (scratch, delta_rtx);
12426 delta_rtx = scratch;
12429 /* THIS_RTX += DELTA. */
12430 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
12433 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
12434 if (vcall_offset)
12436 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
12437 rtx scratch = gen_rtx_REG (Pmode, 1);
12439 gcc_assert (vcall_offset < 0);
12441 /* SCRATCH = *THIS_RTX. */
12442 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
12444 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
12445 may not have any available scratch register at this point. */
12446 if (SPARC_SIMM13_P (vcall_offset))
12448 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
12449 else if (! fixed_regs[5]
12450 /* The below sequence is made up of at least 2 insns,
12451 while the default method may need only one. */
12452 && vcall_offset < -8192)
12454 rtx scratch2 = gen_rtx_REG (Pmode, 5);
12455 emit_move_insn (scratch2, vcall_offset_rtx);
12456 vcall_offset_rtx = scratch2;
12458 else
12460 rtx increment = GEN_INT (-4096);
12462 /* VCALL_OFFSET is a negative number whose typical range can be
12463 estimated as -32768..0 in 32-bit mode. In almost all cases
12464 it is therefore cheaper to emit multiple add insns than
12465 spilling and loading the constant into a register (at least
12466 6 insns). */
12467 while (! SPARC_SIMM13_P (vcall_offset))
12469 emit_insn (gen_add2_insn (scratch, increment));
12470 vcall_offset += 4096;
12472 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
12475 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
12476 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
12477 gen_rtx_PLUS (Pmode,
12478 scratch,
12479 vcall_offset_rtx)));
12481 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
12482 emit_insn (gen_add2_insn (this_rtx, scratch));
12485 /* Generate a tail call to the target function. */
12486 if (! TREE_USED (function))
12488 assemble_external (function);
12489 TREE_USED (function) = 1;
12491 funexp = XEXP (DECL_RTL (function), 0);
12493 if (flag_delayed_branch)
12495 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
12496 insn = emit_call_insn (gen_sibcall (funexp));
12497 SIBLING_CALL_P (insn) = 1;
12499 else
12501 /* The hoops we have to jump through in order to generate a sibcall
12502 without using delay slots... */
12503 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
12505 if (flag_pic)
12507 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
12508 start_sequence ();
12509 load_got_register (); /* clobbers %o7 */
12510 if (!TARGET_VXWORKS_RTP)
12511 pic_offset_table_rtx = got_register_rtx;
12512 scratch = sparc_legitimize_pic_address (funexp, scratch);
12513 seq = get_insns ();
12514 end_sequence ();
12515 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
12517 else if (TARGET_ARCH32)
12519 emit_insn (gen_rtx_SET (scratch,
12520 gen_rtx_HIGH (SImode, funexp)));
12521 emit_insn (gen_rtx_SET (scratch,
12522 gen_rtx_LO_SUM (SImode, scratch, funexp)));
12524 else /* TARGET_ARCH64 */
12526 switch (sparc_code_model)
12528 case CM_MEDLOW:
12529 case CM_MEDMID:
12530 /* The destination can serve as a temporary. */
12531 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
12532 break;
12534 case CM_MEDANY:
12535 case CM_EMBMEDANY:
12536 /* The destination cannot serve as a temporary. */
12537 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
12538 start_sequence ();
12539 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
12540 seq = get_insns ();
12541 end_sequence ();
12542 emit_and_preserve (seq, spill_reg, 0);
12543 break;
12545 default:
12546 gcc_unreachable ();
12550 emit_jump_insn (gen_indirect_jump (scratch));
12553 emit_barrier ();
12555 /* Run just enough of rest_of_compilation to get the insns emitted.
12556 There's not really enough bulk here to make other passes such as
12557 instruction scheduling worth while. */
12558 insn = get_insns ();
12559 shorten_branches (insn);
12560 assemble_start_function (thunk_fndecl, fnname);
12561 final_start_function (insn, file, 1);
12562 final (insn, file, 1);
12563 final_end_function ();
12564 assemble_end_function (thunk_fndecl, fnname);
12566 reload_completed = 0;
12567 epilogue_completed = 0;
12570 /* Return true if sparc_output_mi_thunk would be able to output the
12571 assembler code for the thunk function specified by the arguments
12572 it is passed, and false otherwise. */
12573 static bool
12574 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
12575 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
12576 HOST_WIDE_INT vcall_offset,
12577 const_tree function ATTRIBUTE_UNUSED)
12579 /* Bound the loop used in the default method above. */
12580 return (vcall_offset >= -32768 || ! fixed_regs[5]);
12583 /* How to allocate a 'struct machine_function'. */
12585 static struct machine_function *
12586 sparc_init_machine_status (void)
12588 return ggc_cleared_alloc<machine_function> ();
12591 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
12593 static unsigned HOST_WIDE_INT
12594 sparc_asan_shadow_offset (void)
12596 return TARGET_ARCH64 ? (HOST_WIDE_INT_1 << 43) : (HOST_WIDE_INT_1 << 29);
12599 /* This is called from dwarf2out.cc via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12600 We need to emit DTP-relative relocations. */
12602 static void
12603 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
12605 switch (size)
12607 case 4:
12608 fputs ("\t.word\t%r_tls_dtpoff32(", file);
12609 break;
12610 case 8:
12611 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
12612 break;
12613 default:
12614 gcc_unreachable ();
12616 output_addr_const (file, x);
12617 fputs (")", file);
12620 /* Do whatever processing is required at the end of a file. */
12622 static void
12623 sparc_file_end (void)
12625 /* If we need to emit the special GOT helper function, do so now. */
12626 if (got_helper_needed)
12628 const char *name = XSTR (got_helper_rtx, 0);
12629 #ifdef DWARF2_UNWIND_INFO
12630 bool do_cfi;
12631 #endif
12633 if (USE_HIDDEN_LINKONCE)
12635 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
12636 get_identifier (name),
12637 build_function_type_list (void_type_node,
12638 NULL_TREE));
12639 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
12640 NULL_TREE, void_type_node);
12641 TREE_PUBLIC (decl) = 1;
12642 TREE_STATIC (decl) = 1;
12643 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
12644 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
12645 DECL_VISIBILITY_SPECIFIED (decl) = 1;
12646 resolve_unique_section (decl, 0, flag_function_sections);
12647 allocate_struct_function (decl, true);
12648 cfun->is_thunk = 1;
12649 current_function_decl = decl;
12650 init_varasm_status ();
12651 assemble_start_function (decl, name);
12653 else
12655 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
12656 switch_to_section (text_section);
12657 if (align > 0)
12658 ASM_OUTPUT_ALIGN (asm_out_file, align);
12659 ASM_OUTPUT_LABEL (asm_out_file, name);
12662 #ifdef DWARF2_UNWIND_INFO
12663 do_cfi = dwarf2out_do_cfi_asm ();
12664 if (do_cfi)
12665 output_asm_insn (".cfi_startproc", NULL);
12666 #endif
12667 if (flag_delayed_branch)
12669 output_asm_insn ("jmp\t%%o7+8", NULL);
12670 output_asm_insn (" add\t%%o7, %0, %0", &got_register_rtx);
12672 else
12674 output_asm_insn ("add\t%%o7, %0, %0", &got_register_rtx);
12675 output_asm_insn ("jmp\t%%o7+8", NULL);
12676 output_asm_insn (" nop", NULL);
12678 #ifdef DWARF2_UNWIND_INFO
12679 if (do_cfi)
12680 output_asm_insn (".cfi_endproc", NULL);
12681 #endif
12684 if (NEED_INDICATE_EXEC_STACK)
12685 file_end_indicate_exec_stack ();
12687 #ifdef TARGET_SOLARIS
12688 solaris_file_end ();
12689 #endif
12692 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12693 /* Implement TARGET_MANGLE_TYPE. */
12695 static const char *
12696 sparc_mangle_type (const_tree type)
12698 if (TARGET_ARCH32
12699 && TYPE_MAIN_VARIANT (type) == long_double_type_node
12700 && TARGET_LONG_DOUBLE_128)
12701 return "g";
12703 /* For all other types, use normal C++ mangling. */
12704 return NULL;
12706 #endif
12708 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
12709 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
12710 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
12712 void
12713 sparc_emit_membar_for_model (enum memmodel model,
12714 int load_store, int before_after)
12716 /* Bits for the MEMBAR mmask field. */
12717 const int LoadLoad = 1;
12718 const int StoreLoad = 2;
12719 const int LoadStore = 4;
12720 const int StoreStore = 8;
12722 int mm = 0, implied = 0;
12724 switch (sparc_memory_model)
12726 case SMM_SC:
12727 /* Sequential Consistency. All memory transactions are immediately
12728 visible in sequential execution order. No barriers needed. */
12729 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
12730 break;
12732 case SMM_TSO:
12733 /* Total Store Ordering: all memory transactions with store semantics
12734 are followed by an implied StoreStore. */
12735 implied |= StoreStore;
12737 /* If we're not looking for a raw barrer (before+after), then atomic
12738 operations get the benefit of being both load and store. */
12739 if (load_store == 3 && before_after == 1)
12740 implied |= StoreLoad;
12741 /* FALLTHRU */
12743 case SMM_PSO:
12744 /* Partial Store Ordering: all memory transactions with load semantics
12745 are followed by an implied LoadLoad | LoadStore. */
12746 implied |= LoadLoad | LoadStore;
12748 /* If we're not looking for a raw barrer (before+after), then atomic
12749 operations get the benefit of being both load and store. */
12750 if (load_store == 3 && before_after == 2)
12751 implied |= StoreLoad | StoreStore;
12752 /* FALLTHRU */
12754 case SMM_RMO:
12755 /* Relaxed Memory Ordering: no implicit bits. */
12756 break;
12758 default:
12759 gcc_unreachable ();
12762 if (before_after & 1)
12764 if (is_mm_release (model) || is_mm_acq_rel (model)
12765 || is_mm_seq_cst (model))
12767 if (load_store & 1)
12768 mm |= LoadLoad | StoreLoad;
12769 if (load_store & 2)
12770 mm |= LoadStore | StoreStore;
12773 if (before_after & 2)
12775 if (is_mm_acquire (model) || is_mm_acq_rel (model)
12776 || is_mm_seq_cst (model))
12778 if (load_store & 1)
12779 mm |= LoadLoad | LoadStore;
12780 if (load_store & 2)
12781 mm |= StoreLoad | StoreStore;
12785 /* Remove the bits implied by the system memory model. */
12786 mm &= ~implied;
12788 /* For raw barriers (before+after), always emit a barrier.
12789 This will become a compile-time barrier if needed. */
12790 if (mm || before_after == 3)
12791 emit_insn (gen_membar (GEN_INT (mm)));
12794 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
12795 compare and swap on the word containing the byte or half-word. */
12797 static void
12798 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
12799 rtx oldval, rtx newval)
12801 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
12802 rtx addr = gen_reg_rtx (Pmode);
12803 rtx off = gen_reg_rtx (SImode);
12804 rtx oldv = gen_reg_rtx (SImode);
12805 rtx newv = gen_reg_rtx (SImode);
12806 rtx oldvalue = gen_reg_rtx (SImode);
12807 rtx newvalue = gen_reg_rtx (SImode);
12808 rtx res = gen_reg_rtx (SImode);
12809 rtx resv = gen_reg_rtx (SImode);
12810 rtx memsi, val, mask, cc;
12812 emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
12814 if (Pmode != SImode)
12815 addr1 = gen_lowpart (SImode, addr1);
12816 emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
12818 memsi = gen_rtx_MEM (SImode, addr);
12819 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
12820 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
12822 val = copy_to_reg (memsi);
12824 emit_insn (gen_rtx_SET (off,
12825 gen_rtx_XOR (SImode, off,
12826 GEN_INT (GET_MODE (mem) == QImode
12827 ? 3 : 2))));
12829 emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
12831 if (GET_MODE (mem) == QImode)
12832 mask = force_reg (SImode, GEN_INT (0xff));
12833 else
12834 mask = force_reg (SImode, GEN_INT (0xffff));
12836 emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
12838 emit_insn (gen_rtx_SET (val,
12839 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12840 val)));
12842 oldval = gen_lowpart (SImode, oldval);
12843 emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
12845 newval = gen_lowpart_common (SImode, newval);
12846 emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
12848 emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
12850 emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
12852 rtx_code_label *end_label = gen_label_rtx ();
12853 rtx_code_label *loop_label = gen_label_rtx ();
12854 emit_label (loop_label);
12856 emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
12858 emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
12860 emit_move_insn (bool_result, const1_rtx);
12862 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
12864 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
12866 emit_insn (gen_rtx_SET (resv,
12867 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12868 res)));
12870 emit_move_insn (bool_result, const0_rtx);
12872 cc = gen_compare_reg_1 (NE, resv, val);
12873 emit_insn (gen_rtx_SET (val, resv));
12875 /* Use cbranchcc4 to separate the compare and branch! */
12876 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
12877 cc, const0_rtx, loop_label));
12879 emit_label (end_label);
12881 emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
12883 emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
12885 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
12888 /* Expand code to perform a compare-and-swap. */
12890 void
12891 sparc_expand_compare_and_swap (rtx operands[])
12893 rtx bval, retval, mem, oldval, newval;
12894 machine_mode mode;
12895 enum memmodel model;
12897 bval = operands[0];
12898 retval = operands[1];
12899 mem = operands[2];
12900 oldval = operands[3];
12901 newval = operands[4];
12902 model = (enum memmodel) INTVAL (operands[6]);
12903 mode = GET_MODE (mem);
12905 sparc_emit_membar_for_model (model, 3, 1);
12907 if (reg_overlap_mentioned_p (retval, oldval))
12908 oldval = copy_to_reg (oldval);
12910 if (mode == QImode || mode == HImode)
12911 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
12912 else
12914 rtx (*gen) (rtx, rtx, rtx, rtx);
12915 rtx x;
12917 if (mode == SImode)
12918 gen = gen_atomic_compare_and_swapsi_1;
12919 else
12920 gen = gen_atomic_compare_and_swapdi_1;
12921 emit_insn (gen (retval, mem, oldval, newval));
12923 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
12924 if (x != bval)
12925 convert_move (bval, x, 1);
12928 sparc_emit_membar_for_model (model, 3, 2);
12931 void
12932 sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
12934 rtx t_1, t_2, t_3;
12936 sel = gen_lowpart (DImode, sel);
12937 switch (vmode)
12939 case E_V2SImode:
12940 /* inp = xxxxxxxAxxxxxxxB */
12941 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12942 NULL_RTX, 1, OPTAB_DIRECT);
12943 /* t_1 = ....xxxxxxxAxxx. */
12944 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12945 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
12946 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12947 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
12948 /* sel = .......B */
12949 /* t_1 = ...A.... */
12950 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12951 /* sel = ...A...B */
12952 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
12953 /* sel = AAAABBBB * 4 */
12954 t_1 = force_reg (SImode, GEN_INT (0x01230123));
12955 /* sel = { A*4, A*4+1, A*4+2, ... } */
12956 break;
12958 case E_V4HImode:
12959 /* inp = xxxAxxxBxxxCxxxD */
12960 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12961 NULL_RTX, 1, OPTAB_DIRECT);
12962 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12963 NULL_RTX, 1, OPTAB_DIRECT);
12964 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
12965 NULL_RTX, 1, OPTAB_DIRECT);
12966 /* t_1 = ..xxxAxxxBxxxCxx */
12967 /* t_2 = ....xxxAxxxBxxxC */
12968 /* t_3 = ......xxxAxxxBxx */
12969 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12970 GEN_INT (0x07),
12971 NULL_RTX, 1, OPTAB_DIRECT);
12972 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12973 GEN_INT (0x0700),
12974 NULL_RTX, 1, OPTAB_DIRECT);
12975 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
12976 GEN_INT (0x070000),
12977 NULL_RTX, 1, OPTAB_DIRECT);
12978 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
12979 GEN_INT (0x07000000),
12980 NULL_RTX, 1, OPTAB_DIRECT);
12981 /* sel = .......D */
12982 /* t_1 = .....C.. */
12983 /* t_2 = ...B.... */
12984 /* t_3 = .A...... */
12985 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12986 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
12987 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
12988 /* sel = .A.B.C.D */
12989 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
12990 /* sel = AABBCCDD * 2 */
12991 t_1 = force_reg (SImode, GEN_INT (0x01010101));
12992 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
12993 break;
12995 case E_V8QImode:
12996 /* input = xAxBxCxDxExFxGxH */
12997 sel = expand_simple_binop (DImode, AND, sel,
12998 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
12999 | 0x0f0f0f0f),
13000 NULL_RTX, 1, OPTAB_DIRECT);
13001 /* sel = .A.B.C.D.E.F.G.H */
13002 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
13003 NULL_RTX, 1, OPTAB_DIRECT);
13004 /* t_1 = ..A.B.C.D.E.F.G. */
13005 sel = expand_simple_binop (DImode, IOR, sel, t_1,
13006 NULL_RTX, 1, OPTAB_DIRECT);
13007 /* sel = .AABBCCDDEEFFGGH */
13008 sel = expand_simple_binop (DImode, AND, sel,
13009 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
13010 | 0xff00ff),
13011 NULL_RTX, 1, OPTAB_DIRECT);
13012 /* sel = ..AB..CD..EF..GH */
13013 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
13014 NULL_RTX, 1, OPTAB_DIRECT);
13015 /* t_1 = ....AB..CD..EF.. */
13016 sel = expand_simple_binop (DImode, IOR, sel, t_1,
13017 NULL_RTX, 1, OPTAB_DIRECT);
13018 /* sel = ..ABABCDCDEFEFGH */
13019 sel = expand_simple_binop (DImode, AND, sel,
13020 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
13021 NULL_RTX, 1, OPTAB_DIRECT);
13022 /* sel = ....ABCD....EFGH */
13023 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
13024 NULL_RTX, 1, OPTAB_DIRECT);
13025 /* t_1 = ........ABCD.... */
13026 sel = gen_lowpart (SImode, sel);
13027 t_1 = gen_lowpart (SImode, t_1);
13028 break;
13030 default:
13031 gcc_unreachable ();
13034 /* Always perform the final addition/merge within the bmask insn. */
13035 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
13038 /* Implement TARGET_VEC_PERM_CONST. */
13040 static bool
13041 sparc_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode,
13042 rtx target, rtx op0, rtx op1,
13043 const vec_perm_indices &sel)
13045 if (vmode != op_mode)
13046 return false;
13048 if (!TARGET_VIS2)
13049 return false;
13051 /* All 8-byte permutes are supported. */
13052 if (!target)
13053 return GET_MODE_SIZE (vmode) == 8;
13055 /* Force target-independent code to convert constant permutations on other
13056 modes down to V8QI. Rely on this to avoid the complexity of the byte
13057 order of the permutation. */
13058 if (vmode != V8QImode)
13059 return false;
13061 rtx nop0 = force_reg (vmode, op0);
13062 if (op0 == op1)
13063 op1 = nop0;
13064 op0 = nop0;
13065 op1 = force_reg (vmode, op1);
13067 unsigned int i, mask;
13068 for (i = mask = 0; i < 8; ++i)
13069 mask |= (sel[i] & 0xf) << (28 - i*4);
13070 rtx mask_rtx = force_reg (SImode, gen_int_mode (mask, SImode));
13072 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), mask_rtx, const0_rtx));
13073 emit_insn (gen_bshufflev8qi_vis (target, op0, op1));
13074 return true;
13077 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
13079 static bool
13080 sparc_frame_pointer_required (void)
13082 /* If the stack pointer is dynamically modified in the function, it cannot
13083 serve as the frame pointer. */
13084 if (cfun->calls_alloca)
13085 return true;
13087 /* If the function receives nonlocal gotos, it needs to save the frame
13088 pointer in the nonlocal_goto_save_area object. */
13089 if (cfun->has_nonlocal_label)
13090 return true;
13092 /* In flat mode, that's it. */
13093 if (TARGET_FLAT)
13094 return false;
13096 /* Otherwise, the frame pointer is required if the function isn't leaf, but
13097 we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
13098 return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
13101 /* The way this is structured, we can't eliminate SFP in favor of SP
13102 if the frame pointer is required: we want to use the SFP->HFP elimination
13103 in that case. But the test in update_eliminables doesn't know we are
13104 assuming below that we only do the former elimination. */
13106 static bool
13107 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
13109 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
13112 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
13113 they won't be allocated. */
13115 static void
13116 sparc_conditional_register_usage (void)
13118 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
13119 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13120 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
13121 /* then honor it. */
13122 if (TARGET_ARCH32 && fixed_regs[5])
13123 fixed_regs[5] = 1;
13124 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
13125 fixed_regs[5] = 0;
13126 if (! TARGET_V9)
13128 int regno;
13129 for (regno = SPARC_FIRST_V9_FP_REG;
13130 regno <= SPARC_LAST_V9_FP_REG;
13131 regno++)
13132 fixed_regs[regno] = 1;
13133 /* %fcc0 is used by v8 and v9. */
13134 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
13135 regno <= SPARC_LAST_V9_FCC_REG;
13136 regno++)
13137 fixed_regs[regno] = 1;
13139 if (! TARGET_FPU)
13141 int regno;
13142 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
13143 fixed_regs[regno] = 1;
13145 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
13146 /* then honor it. Likewise with g3 and g4. */
13147 if (fixed_regs[2] == 2)
13148 fixed_regs[2] = ! TARGET_APP_REGS;
13149 if (fixed_regs[3] == 2)
13150 fixed_regs[3] = ! TARGET_APP_REGS;
13151 if (TARGET_ARCH32 && fixed_regs[4] == 2)
13152 fixed_regs[4] = ! TARGET_APP_REGS;
13153 else if (TARGET_CM_EMBMEDANY)
13154 fixed_regs[4] = 1;
13155 else if (fixed_regs[4] == 2)
13156 fixed_regs[4] = 0;
13158 /* Disable leaf function optimization in flat mode. */
13159 if (TARGET_FLAT)
13160 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
13162 if (TARGET_VIS)
13163 global_regs[SPARC_GSR_REG] = 1;
13166 /* Implement TARGET_USE_PSEUDO_PIC_REG. */
13168 static bool
13169 sparc_use_pseudo_pic_reg (void)
13171 return !TARGET_VXWORKS_RTP && flag_pic;
13174 /* Implement TARGET_INIT_PIC_REG. */
13176 static void
13177 sparc_init_pic_reg (void)
13179 edge entry_edge;
13180 rtx_insn *seq;
13182 /* In PIC mode, we need to always initialize the PIC register if optimization
13183 is enabled, because we are called from IRA and LRA may later force things
13184 to the constant pool for optimization purposes. */
13185 if (!flag_pic || (!crtl->uses_pic_offset_table && !optimize))
13186 return;
13188 start_sequence ();
13189 load_got_register ();
13190 if (!TARGET_VXWORKS_RTP)
13191 emit_move_insn (pic_offset_table_rtx, got_register_rtx);
13192 seq = get_insns ();
13193 end_sequence ();
13195 entry_edge = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13196 insert_insn_on_edge (seq, entry_edge);
13197 commit_one_edge_insertion (entry_edge);
13200 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
13202 - We can't load constants into FP registers.
13203 - We can't load FP constants into integer registers when soft-float,
13204 because there is no soft-float pattern with a r/F constraint.
13205 - We can't load FP constants into integer registers for TFmode unless
13206 it is 0.0L, because there is no movtf pattern with a r/F constraint.
13207 - Try and reload integer constants (symbolic or otherwise) back into
13208 registers directly, rather than having them dumped to memory. */
13210 static reg_class_t
13211 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
13213 machine_mode mode = GET_MODE (x);
13214 if (CONSTANT_P (x))
13216 if (FP_REG_CLASS_P (rclass)
13217 || rclass == GENERAL_OR_FP_REGS
13218 || rclass == GENERAL_OR_EXTRA_FP_REGS
13219 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
13220 || (mode == TFmode && ! const_zero_operand (x, mode)))
13221 return NO_REGS;
13223 if (GET_MODE_CLASS (mode) == MODE_INT)
13224 return GENERAL_REGS;
13226 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13228 if (! FP_REG_CLASS_P (rclass)
13229 || !(const_zero_operand (x, mode)
13230 || const_all_ones_operand (x, mode)))
13231 return NO_REGS;
13235 if (TARGET_VIS3
13236 && ! TARGET_ARCH64
13237 && (rclass == EXTRA_FP_REGS
13238 || rclass == GENERAL_OR_EXTRA_FP_REGS))
13240 int regno = true_regnum (x);
13242 if (SPARC_INT_REG_P (regno))
13243 return (rclass == EXTRA_FP_REGS
13244 ? FP_REGS : GENERAL_OR_FP_REGS);
13247 return rclass;
13250 /* Return true if we use LRA instead of reload pass. */
13252 static bool
13253 sparc_lra_p (void)
13255 return TARGET_LRA;
13258 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
13259 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
13261 const char *
13262 output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
13264 char mulstr[32];
13266 gcc_assert (! TARGET_ARCH64);
13268 if (sparc_check_64 (operands[1], insn) <= 0)
13269 output_asm_insn ("srl\t%L1, 0, %L1", operands);
13270 if (which_alternative == 1)
13271 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
13272 if (GET_CODE (operands[2]) == CONST_INT)
13274 if (which_alternative == 1)
13276 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13277 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
13278 output_asm_insn (mulstr, operands);
13279 return "srlx\t%L0, 32, %H0";
13281 else
13283 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13284 output_asm_insn ("or\t%L1, %3, %3", operands);
13285 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
13286 output_asm_insn (mulstr, operands);
13287 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13288 return "mov\t%3, %L0";
13291 else if (rtx_equal_p (operands[1], operands[2]))
13293 if (which_alternative == 1)
13295 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13296 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
13297 output_asm_insn (mulstr, operands);
13298 return "srlx\t%L0, 32, %H0";
13300 else
13302 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13303 output_asm_insn ("or\t%L1, %3, %3", operands);
13304 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
13305 output_asm_insn (mulstr, operands);
13306 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13307 return "mov\t%3, %L0";
13310 if (sparc_check_64 (operands[2], insn) <= 0)
13311 output_asm_insn ("srl\t%L2, 0, %L2", operands);
13312 if (which_alternative == 1)
13314 output_asm_insn ("or\t%L1, %H1, %H1", operands);
13315 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
13316 output_asm_insn ("or\t%L2, %L1, %L1", operands);
13317 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
13318 output_asm_insn (mulstr, operands);
13319 return "srlx\t%L0, 32, %H0";
13321 else
13323 output_asm_insn ("sllx\t%H1, 32, %3", operands);
13324 output_asm_insn ("sllx\t%H2, 32, %4", operands);
13325 output_asm_insn ("or\t%L1, %3, %3", operands);
13326 output_asm_insn ("or\t%L2, %4, %4", operands);
13327 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
13328 output_asm_insn (mulstr, operands);
13329 output_asm_insn ("srlx\t%3, 32, %H0", operands);
13330 return "mov\t%3, %L0";
13334 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13335 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
13336 and INNER_MODE are the modes describing TARGET. */
13338 static void
13339 vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
13340 machine_mode inner_mode)
13342 rtx t1, final_insn, sel;
13343 int bmask;
13345 t1 = gen_reg_rtx (mode);
13347 elt = convert_modes (SImode, inner_mode, elt, true);
13348 emit_move_insn (gen_lowpart(SImode, t1), elt);
13350 switch (mode)
13352 case E_V2SImode:
13353 final_insn = gen_bshufflev2si_vis (target, t1, t1);
13354 bmask = 0x45674567;
13355 break;
13356 case E_V4HImode:
13357 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
13358 bmask = 0x67676767;
13359 break;
13360 case E_V8QImode:
13361 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
13362 bmask = 0x77777777;
13363 break;
13364 default:
13365 gcc_unreachable ();
13368 sel = force_reg (SImode, GEN_INT (bmask));
13369 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
13370 emit_insn (final_insn);
13373 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13374 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
13376 static void
13377 vector_init_fpmerge (rtx target, rtx elt)
13379 rtx t1, t2, t2_low, t3, t3_low;
13381 t1 = gen_reg_rtx (V4QImode);
13382 elt = convert_modes (SImode, QImode, elt, true);
13383 emit_move_insn (gen_lowpart (SImode, t1), elt);
13385 t2 = gen_reg_rtx (V8QImode);
13386 t2_low = gen_lowpart (V4QImode, t2);
13387 emit_insn (gen_fpmerge_vis (t2, t1, t1));
13389 t3 = gen_reg_rtx (V8QImode);
13390 t3_low = gen_lowpart (V4QImode, t3);
13391 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
13393 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
13396 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
13397 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
13399 static void
13400 vector_init_faligndata (rtx target, rtx elt)
13402 rtx t1 = gen_reg_rtx (V4HImode);
13403 int i;
13405 elt = convert_modes (SImode, HImode, elt, true);
13406 emit_move_insn (gen_lowpart (SImode, t1), elt);
13408 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
13409 force_reg (SImode, GEN_INT (6)),
13410 const0_rtx));
13412 for (i = 0; i < 4; i++)
13413 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
13416 /* Emit code to initialize TARGET to values for individual fields VALS. */
13418 void
13419 sparc_expand_vector_init (rtx target, rtx vals)
13421 const machine_mode mode = GET_MODE (target);
13422 const machine_mode inner_mode = GET_MODE_INNER (mode);
13423 const int n_elts = GET_MODE_NUNITS (mode);
13424 int i, n_var = 0;
13425 bool all_same = true;
13426 rtx mem;
13428 for (i = 0; i < n_elts; i++)
13430 rtx x = XVECEXP (vals, 0, i);
13431 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
13432 n_var++;
13434 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
13435 all_same = false;
13438 if (n_var == 0)
13440 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
13441 return;
13444 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
13446 if (GET_MODE_SIZE (inner_mode) == 4)
13448 emit_move_insn (gen_lowpart (SImode, target),
13449 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
13450 return;
13452 else if (GET_MODE_SIZE (inner_mode) == 8)
13454 emit_move_insn (gen_lowpart (DImode, target),
13455 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
13456 return;
13459 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
13460 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
13462 emit_move_insn (gen_highpart (word_mode, target),
13463 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
13464 emit_move_insn (gen_lowpart (word_mode, target),
13465 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
13466 return;
13469 if (all_same && GET_MODE_SIZE (mode) == 8)
13471 if (TARGET_VIS2)
13473 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
13474 return;
13476 if (mode == V8QImode)
13478 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
13479 return;
13481 if (mode == V4HImode)
13483 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
13484 return;
13488 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
13489 for (i = 0; i < n_elts; i++)
13490 emit_move_insn (adjust_address_nv (mem, inner_mode,
13491 i * GET_MODE_SIZE (inner_mode)),
13492 XVECEXP (vals, 0, i));
13493 emit_move_insn (target, mem);
13496 /* Implement TARGET_SECONDARY_RELOAD. */
13498 static reg_class_t
13499 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
13500 machine_mode mode, secondary_reload_info *sri)
13502 enum reg_class rclass = (enum reg_class) rclass_i;
13504 sri->icode = CODE_FOR_nothing;
13505 sri->extra_cost = 0;
13507 /* We need a temporary when loading/storing a HImode/QImode value
13508 between memory and the FPU registers. This can happen when combine puts
13509 a paradoxical subreg in a float/fix conversion insn. */
13510 if (FP_REG_CLASS_P (rclass)
13511 && (mode == HImode || mode == QImode)
13512 && (GET_CODE (x) == MEM
13513 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
13514 && true_regnum (x) == -1)))
13515 return GENERAL_REGS;
13517 /* On 32-bit we need a temporary when loading/storing a DFmode value
13518 between unaligned memory and the upper FPU registers. */
13519 if (TARGET_ARCH32
13520 && rclass == EXTRA_FP_REGS
13521 && mode == DFmode
13522 && GET_CODE (x) == MEM
13523 && ! mem_min_alignment (x, 8))
13524 return FP_REGS;
13526 if (((TARGET_CM_MEDANY
13527 && symbolic_operand (x, mode))
13528 || (TARGET_CM_EMBMEDANY
13529 && text_segment_operand (x, mode)))
13530 && ! flag_pic)
13532 if (in_p)
13533 sri->icode = direct_optab_handler (reload_in_optab, mode);
13534 else
13535 sri->icode = direct_optab_handler (reload_out_optab, mode);
13536 return NO_REGS;
13539 if (TARGET_VIS3 && TARGET_ARCH32)
13541 int regno = true_regnum (x);
13543 /* When using VIS3 fp<-->int register moves, on 32-bit we have
13544 to move 8-byte values in 4-byte pieces. This only works via
13545 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
13546 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
13547 an FP_REGS intermediate move. */
13548 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
13549 || ((general_or_i64_p (rclass)
13550 || rclass == GENERAL_OR_FP_REGS)
13551 && SPARC_FP_REG_P (regno)))
13553 sri->extra_cost = 2;
13554 return FP_REGS;
13558 return NO_REGS;
13561 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
13563 On SPARC when not VIS3 it is not possible to directly move data
13564 between GENERAL_REGS and FP_REGS. */
13566 static bool
13567 sparc_secondary_memory_needed (machine_mode mode, reg_class_t class1,
13568 reg_class_t class2)
13570 return ((FP_REG_CLASS_P (class1) != FP_REG_CLASS_P (class2))
13571 && (! TARGET_VIS3
13572 || GET_MODE_SIZE (mode) > 8
13573 || GET_MODE_SIZE (mode) < 4));
13576 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
13578 get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
13579 because the movsi and movsf patterns don't handle r/f moves.
13580 For v8 we copy the default definition. */
13582 static machine_mode
13583 sparc_secondary_memory_needed_mode (machine_mode mode)
13585 if (TARGET_ARCH64)
13587 if (GET_MODE_BITSIZE (mode) < 32)
13588 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
13589 return mode;
13591 else
13593 if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
13594 return mode_for_size (BITS_PER_WORD,
13595 GET_MODE_CLASS (mode), 0).require ();
13596 return mode;
13600 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
13601 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
13603 bool
13604 sparc_expand_conditional_move (machine_mode mode, rtx *operands)
13606 enum rtx_code rc = GET_CODE (operands[1]);
13607 machine_mode cmp_mode;
13608 rtx cc_reg, dst, cmp;
13610 cmp = operands[1];
13611 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
13612 return false;
13614 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
13615 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
13617 cmp_mode = GET_MODE (XEXP (cmp, 0));
13618 rc = GET_CODE (cmp);
13620 dst = operands[0];
13621 if (! rtx_equal_p (operands[2], dst)
13622 && ! rtx_equal_p (operands[3], dst))
13624 if (reg_overlap_mentioned_p (dst, cmp))
13625 dst = gen_reg_rtx (mode);
13627 emit_move_insn (dst, operands[3]);
13629 else if (operands[2] == dst)
13631 operands[2] = operands[3];
13633 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
13634 rc = reverse_condition_maybe_unordered (rc);
13635 else
13636 rc = reverse_condition (rc);
13639 if (XEXP (cmp, 1) == const0_rtx
13640 && GET_CODE (XEXP (cmp, 0)) == REG
13641 && cmp_mode == DImode
13642 && v9_regcmp_p (rc))
13643 cc_reg = XEXP (cmp, 0);
13644 else
13645 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
13647 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
13649 emit_insn (gen_rtx_SET (dst,
13650 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
13652 if (dst != operands[0])
13653 emit_move_insn (operands[0], dst);
13655 return true;
13658 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
13659 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
13660 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
13661 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
13662 code to be used for the condition mask. */
13664 void
13665 sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
13667 enum rtx_code code = signed_condition (GET_CODE (operands[3]));
13668 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
13670 mask = gen_reg_rtx (Pmode);
13671 cop0 = operands[4];
13672 cop1 = operands[5];
13673 if (code == LT || code == GE)
13675 code = swap_condition (code);
13676 std::swap (cop0, cop1);
13679 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
13681 fcmp = gen_rtx_UNSPEC (Pmode,
13682 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
13683 fcode);
13685 cmask = gen_rtx_UNSPEC (DImode,
13686 gen_rtvec (2, mask, gsr),
13687 ccode);
13689 bshuf = gen_rtx_UNSPEC (mode,
13690 gen_rtvec (3, operands[1], operands[2], gsr),
13691 UNSPEC_BSHUFFLE);
13693 emit_insn (gen_rtx_SET (mask, fcmp));
13694 emit_insn (gen_rtx_SET (gsr, cmask));
13696 emit_insn (gen_rtx_SET (operands[0], bshuf));
13699 /* On the SPARC, any mode which naturally allocates into the single float
13700 registers should return 4 here. */
13702 unsigned int
13703 sparc_regmode_natural_size (machine_mode mode)
13705 const enum mode_class cl = GET_MODE_CLASS (mode);
13707 if ((cl == MODE_FLOAT || cl == MODE_VECTOR_INT) && GET_MODE_SIZE (mode) <= 4)
13708 return 4;
13710 return UNITS_PER_WORD;
13713 /* Implement TARGET_HARD_REGNO_NREGS.
13715 On SPARC, ordinary registers hold 32 bits worth; this means both
13716 integer and floating point registers. On v9, integer regs hold 64
13717 bits worth; floating point regs hold 32 bits worth (this includes the
13718 new fp regs as even the odd ones are included in the hard register
13719 count). */
13721 static unsigned int
13722 sparc_hard_regno_nregs (unsigned int regno, machine_mode mode)
13724 if (regno == SPARC_GSR_REG)
13725 return 1;
13726 if (TARGET_ARCH64)
13728 if (SPARC_INT_REG_P (regno) || regno == FRAME_POINTER_REGNUM)
13729 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13730 return CEIL (GET_MODE_SIZE (mode), 4);
13732 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
13735 /* Implement TARGET_HARD_REGNO_MODE_OK.
13737 ??? Because of the funny way we pass parameters we should allow certain
13738 ??? types of float/complex values to be in integer registers during
13739 ??? RTL generation. This only matters on arch32. */
13741 static bool
13742 sparc_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
13744 return (hard_regno_mode_classes[regno] & sparc_mode_class[mode]) != 0;
13747 /* Implement TARGET_MODES_TIEABLE_P.
13749 For V9 we have to deal with the fact that only the lower 32 floating
13750 point registers are 32-bit addressable. */
13752 static bool
13753 sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13755 enum mode_class mclass1, mclass2;
13756 unsigned short size1, size2;
13758 if (mode1 == mode2)
13759 return true;
13761 mclass1 = GET_MODE_CLASS (mode1);
13762 mclass2 = GET_MODE_CLASS (mode2);
13763 if (mclass1 != mclass2)
13764 return false;
13766 if (! TARGET_V9)
13767 return true;
13769 /* Classes are the same and we are V9 so we have to deal with upper
13770 vs. lower floating point registers. If one of the modes is a
13771 4-byte mode, and the other is not, we have to mark them as not
13772 tieable because only the lower 32 floating point register are
13773 addressable 32-bits at a time.
13775 We can't just test explicitly for SFmode, otherwise we won't
13776 cover the vector mode cases properly. */
13778 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
13779 return true;
13781 size1 = GET_MODE_SIZE (mode1);
13782 size2 = GET_MODE_SIZE (mode2);
13783 if ((size1 > 4 && size2 == 4)
13784 || (size2 > 4 && size1 == 4))
13785 return false;
13787 return true;
13790 /* Implement TARGET_CSTORE_MODE. */
13792 static scalar_int_mode
13793 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
13795 return (TARGET_ARCH64 ? DImode : SImode);
13798 /* Return the compound expression made of T1 and T2. */
13800 static inline tree
13801 compound_expr (tree t1, tree t2)
13803 return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
13806 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13808 static void
13809 sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13811 if (!TARGET_FPU)
13812 return;
13814 const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
13815 const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
13817 /* We generate the equivalent of feholdexcept (&fenv_var):
13819 unsigned int fenv_var;
13820 __builtin_store_fsr (&fenv_var);
13822 unsigned int tmp1_var;
13823 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
13825 __builtin_load_fsr (&tmp1_var); */
13827 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
13828 TREE_ADDRESSABLE (fenv_var) = 1;
13829 tree fenv_addr = build_fold_addr_expr (fenv_var);
13830 tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
13831 tree hold_stfsr
13832 = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
13833 build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
13835 tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
13836 TREE_ADDRESSABLE (tmp1_var) = 1;
13837 tree masked_fenv_var
13838 = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13839 build_int_cst (unsigned_type_node,
13840 ~(accrued_exception_mask | trap_enable_mask)));
13841 tree hold_mask
13842 = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
13843 NULL_TREE, NULL_TREE);
13845 tree tmp1_addr = build_fold_addr_expr (tmp1_var);
13846 tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
13847 tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
13849 *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
13851 /* We reload the value of tmp1_var to clear the exceptions:
13853 __builtin_load_fsr (&tmp1_var); */
13855 *clear = build_call_expr (ldfsr, 1, tmp1_addr);
13857 /* We generate the equivalent of feupdateenv (&fenv_var):
13859 unsigned int tmp2_var;
13860 __builtin_store_fsr (&tmp2_var);
13862 __builtin_load_fsr (&fenv_var);
13864 if (SPARC_LOW_FE_EXCEPT_VALUES)
13865 tmp2_var >>= 5;
13866 __atomic_feraiseexcept ((int) tmp2_var); */
13868 tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
13869 TREE_ADDRESSABLE (tmp2_var) = 1;
13870 tree tmp2_addr = build_fold_addr_expr (tmp2_var);
13871 tree update_stfsr
13872 = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
13873 build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
13875 tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
13877 tree atomic_feraiseexcept
13878 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13879 tree update_call
13880 = build_call_expr (atomic_feraiseexcept, 1,
13881 fold_convert (integer_type_node, tmp2_var));
13883 if (SPARC_LOW_FE_EXCEPT_VALUES)
13885 tree shifted_tmp2_var
13886 = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
13887 build_int_cst (unsigned_type_node, 5));
13888 tree update_shift
13889 = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
13890 update_call = compound_expr (update_shift, update_call);
13893 *update
13894 = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
13897 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. Borrowed from the PA port.
13899 SImode loads to floating-point registers are not zero-extended.
13900 The definition for LOAD_EXTEND_OP specifies that integer loads
13901 narrower than BITS_PER_WORD will be zero-extended. As a result,
13902 we inhibit changes from SImode unless they are to a mode that is
13903 identical in size.
13905 Likewise for SFmode, since word-mode paradoxical subregs are
13906 problematic on big-endian architectures. */
13908 static bool
13909 sparc_can_change_mode_class (machine_mode from, machine_mode to,
13910 reg_class_t rclass)
13912 if (TARGET_ARCH64
13913 && GET_MODE_SIZE (from) == 4
13914 && GET_MODE_SIZE (to) != 4)
13915 return !reg_classes_intersect_p (rclass, FP_REGS);
13916 return true;
13919 /* Implement TARGET_CONSTANT_ALIGNMENT. */
13921 static HOST_WIDE_INT
13922 sparc_constant_alignment (const_tree exp, HOST_WIDE_INT align)
13924 if (TREE_CODE (exp) == STRING_CST)
13925 return MAX (align, FASTEST_ALIGNMENT);
13926 return align;
13929 /* Implement TARGET_ZERO_CALL_USED_REGS.
13931 Generate a sequence of instructions that zero registers specified by
13932 NEED_ZEROED_HARDREGS. Return the ZEROED_HARDREGS that are actually
13933 zeroed. */
13935 static HARD_REG_SET
13936 sparc_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
13938 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
13939 if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
13941 /* Do not touch the CC registers or the FP registers if no VIS. */
13942 if (regno >= SPARC_FCC_REG
13943 || (regno >= SPARC_FIRST_FP_REG && !TARGET_VIS))
13944 CLEAR_HARD_REG_BIT (need_zeroed_hardregs, regno);
13946 /* Do not access the odd upper FP registers individually. */
13947 else if (regno >= SPARC_FIRST_V9_FP_REG && (regno & 1))
13950 /* Use the most natural mode for the registers, which is not given by
13951 regno_reg_rtx/reg_raw_mode for the FP registers on the SPARC. */
13952 else
13954 machine_mode mode;
13955 rtx reg;
13957 if (regno < SPARC_FIRST_FP_REG)
13959 reg = regno_reg_rtx[regno];
13960 mode = GET_MODE (reg);
13962 else
13964 mode = regno < SPARC_FIRST_V9_FP_REG ? SFmode : DFmode;
13965 reg = gen_raw_REG (mode, regno);
13968 emit_move_insn (reg, CONST0_RTX (mode));
13972 return need_zeroed_hardregs;
13975 #include "gt-sparc.h"