* config/sparc/sparc.c (sparc_option_override): Set MASK_FSMULD flag
[official-gcc.git] / gcc / config / sparc / sparc.c
blob59761aac3106df84c021d5db5d5b3a363ccb8c58
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "memmodel.h"
31 #include "gimple.h"
32 #include "df.h"
33 #include "tm_p.h"
34 #include "stringpool.h"
35 #include "expmed.h"
36 #include "optabs.h"
37 #include "regs.h"
38 #include "emit-rtl.h"
39 #include "recog.h"
40 #include "diagnostic-core.h"
41 #include "alias.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
44 #include "calls.h"
45 #include "varasm.h"
46 #include "output.h"
47 #include "insn-attr.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "debug.h"
51 #include "common/common-target.h"
52 #include "gimplify.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "params.h"
56 #include "tree-pass.h"
57 #include "context.h"
58 #include "builtins.h"
60 /* This file should be included last. */
61 #include "target-def.h"
63 /* Processor costs */
65 struct processor_costs {
66 /* Integer load */
67 const int int_load;
69 /* Integer signed load */
70 const int int_sload;
72 /* Integer zeroed load */
73 const int int_zload;
75 /* Float load */
76 const int float_load;
78 /* fmov, fneg, fabs */
79 const int float_move;
81 /* fadd, fsub */
82 const int float_plusminus;
84 /* fcmp */
85 const int float_cmp;
87 /* fmov, fmovr */
88 const int float_cmove;
90 /* fmul */
91 const int float_mul;
93 /* fdivs */
94 const int float_div_sf;
96 /* fdivd */
97 const int float_div_df;
99 /* fsqrts */
100 const int float_sqrt_sf;
102 /* fsqrtd */
103 const int float_sqrt_df;
105 /* umul/smul */
106 const int int_mul;
108 /* mulX */
109 const int int_mulX;
111 /* integer multiply cost for each bit set past the most
112 significant 3, so the formula for multiply cost becomes:
114 if (rs1 < 0)
115 highest_bit = highest_clear_bit(rs1);
116 else
117 highest_bit = highest_set_bit(rs1);
118 if (highest_bit < 3)
119 highest_bit = 3;
120 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
122 A value of zero indicates that the multiply costs is fixed,
123 and not variable. */
124 const int int_mul_bit_factor;
126 /* udiv/sdiv */
127 const int int_div;
129 /* divX */
130 const int int_divX;
132 /* movcc, movr */
133 const int int_cmove;
135 /* penalty for shifts, due to scheduling rules etc. */
136 const int shift_penalty;
139 static const
140 struct processor_costs cypress_costs = {
141 COSTS_N_INSNS (2), /* int load */
142 COSTS_N_INSNS (2), /* int signed load */
143 COSTS_N_INSNS (2), /* int zeroed load */
144 COSTS_N_INSNS (2), /* float load */
145 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
146 COSTS_N_INSNS (5), /* fadd, fsub */
147 COSTS_N_INSNS (1), /* fcmp */
148 COSTS_N_INSNS (1), /* fmov, fmovr */
149 COSTS_N_INSNS (7), /* fmul */
150 COSTS_N_INSNS (37), /* fdivs */
151 COSTS_N_INSNS (37), /* fdivd */
152 COSTS_N_INSNS (63), /* fsqrts */
153 COSTS_N_INSNS (63), /* fsqrtd */
154 COSTS_N_INSNS (1), /* imul */
155 COSTS_N_INSNS (1), /* imulX */
156 0, /* imul bit factor */
157 COSTS_N_INSNS (1), /* idiv */
158 COSTS_N_INSNS (1), /* idivX */
159 COSTS_N_INSNS (1), /* movcc/movr */
160 0, /* shift penalty */
163 static const
164 struct processor_costs supersparc_costs = {
165 COSTS_N_INSNS (1), /* int load */
166 COSTS_N_INSNS (1), /* int signed load */
167 COSTS_N_INSNS (1), /* int zeroed load */
168 COSTS_N_INSNS (0), /* float load */
169 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
170 COSTS_N_INSNS (3), /* fadd, fsub */
171 COSTS_N_INSNS (3), /* fcmp */
172 COSTS_N_INSNS (1), /* fmov, fmovr */
173 COSTS_N_INSNS (3), /* fmul */
174 COSTS_N_INSNS (6), /* fdivs */
175 COSTS_N_INSNS (9), /* fdivd */
176 COSTS_N_INSNS (12), /* fsqrts */
177 COSTS_N_INSNS (12), /* fsqrtd */
178 COSTS_N_INSNS (4), /* imul */
179 COSTS_N_INSNS (4), /* imulX */
180 0, /* imul bit factor */
181 COSTS_N_INSNS (4), /* idiv */
182 COSTS_N_INSNS (4), /* idivX */
183 COSTS_N_INSNS (1), /* movcc/movr */
184 1, /* shift penalty */
187 static const
188 struct processor_costs hypersparc_costs = {
189 COSTS_N_INSNS (1), /* int load */
190 COSTS_N_INSNS (1), /* int signed load */
191 COSTS_N_INSNS (1), /* int zeroed load */
192 COSTS_N_INSNS (1), /* float load */
193 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
194 COSTS_N_INSNS (1), /* fadd, fsub */
195 COSTS_N_INSNS (1), /* fcmp */
196 COSTS_N_INSNS (1), /* fmov, fmovr */
197 COSTS_N_INSNS (1), /* fmul */
198 COSTS_N_INSNS (8), /* fdivs */
199 COSTS_N_INSNS (12), /* fdivd */
200 COSTS_N_INSNS (17), /* fsqrts */
201 COSTS_N_INSNS (17), /* fsqrtd */
202 COSTS_N_INSNS (17), /* imul */
203 COSTS_N_INSNS (17), /* imulX */
204 0, /* imul bit factor */
205 COSTS_N_INSNS (17), /* idiv */
206 COSTS_N_INSNS (17), /* idivX */
207 COSTS_N_INSNS (1), /* movcc/movr */
208 0, /* shift penalty */
211 static const
212 struct processor_costs leon_costs = {
213 COSTS_N_INSNS (1), /* int load */
214 COSTS_N_INSNS (1), /* int signed load */
215 COSTS_N_INSNS (1), /* int zeroed load */
216 COSTS_N_INSNS (1), /* float load */
217 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
218 COSTS_N_INSNS (1), /* fadd, fsub */
219 COSTS_N_INSNS (1), /* fcmp */
220 COSTS_N_INSNS (1), /* fmov, fmovr */
221 COSTS_N_INSNS (1), /* fmul */
222 COSTS_N_INSNS (15), /* fdivs */
223 COSTS_N_INSNS (15), /* fdivd */
224 COSTS_N_INSNS (23), /* fsqrts */
225 COSTS_N_INSNS (23), /* fsqrtd */
226 COSTS_N_INSNS (5), /* imul */
227 COSTS_N_INSNS (5), /* imulX */
228 0, /* imul bit factor */
229 COSTS_N_INSNS (5), /* idiv */
230 COSTS_N_INSNS (5), /* idivX */
231 COSTS_N_INSNS (1), /* movcc/movr */
232 0, /* shift penalty */
235 static const
236 struct processor_costs leon3_costs = {
237 COSTS_N_INSNS (1), /* int load */
238 COSTS_N_INSNS (1), /* int signed load */
239 COSTS_N_INSNS (1), /* int zeroed load */
240 COSTS_N_INSNS (1), /* float load */
241 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
242 COSTS_N_INSNS (1), /* fadd, fsub */
243 COSTS_N_INSNS (1), /* fcmp */
244 COSTS_N_INSNS (1), /* fmov, fmovr */
245 COSTS_N_INSNS (1), /* fmul */
246 COSTS_N_INSNS (14), /* fdivs */
247 COSTS_N_INSNS (15), /* fdivd */
248 COSTS_N_INSNS (22), /* fsqrts */
249 COSTS_N_INSNS (23), /* fsqrtd */
250 COSTS_N_INSNS (5), /* imul */
251 COSTS_N_INSNS (5), /* imulX */
252 0, /* imul bit factor */
253 COSTS_N_INSNS (35), /* idiv */
254 COSTS_N_INSNS (35), /* idivX */
255 COSTS_N_INSNS (1), /* movcc/movr */
256 0, /* shift penalty */
259 static const
260 struct processor_costs sparclet_costs = {
261 COSTS_N_INSNS (3), /* int load */
262 COSTS_N_INSNS (3), /* int signed load */
263 COSTS_N_INSNS (1), /* int zeroed load */
264 COSTS_N_INSNS (1), /* float load */
265 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
266 COSTS_N_INSNS (1), /* fadd, fsub */
267 COSTS_N_INSNS (1), /* fcmp */
268 COSTS_N_INSNS (1), /* fmov, fmovr */
269 COSTS_N_INSNS (1), /* fmul */
270 COSTS_N_INSNS (1), /* fdivs */
271 COSTS_N_INSNS (1), /* fdivd */
272 COSTS_N_INSNS (1), /* fsqrts */
273 COSTS_N_INSNS (1), /* fsqrtd */
274 COSTS_N_INSNS (5), /* imul */
275 COSTS_N_INSNS (5), /* imulX */
276 0, /* imul bit factor */
277 COSTS_N_INSNS (5), /* idiv */
278 COSTS_N_INSNS (5), /* idivX */
279 COSTS_N_INSNS (1), /* movcc/movr */
280 0, /* shift penalty */
283 static const
284 struct processor_costs ultrasparc_costs = {
285 COSTS_N_INSNS (2), /* int load */
286 COSTS_N_INSNS (3), /* int signed load */
287 COSTS_N_INSNS (2), /* int zeroed load */
288 COSTS_N_INSNS (2), /* float load */
289 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
290 COSTS_N_INSNS (4), /* fadd, fsub */
291 COSTS_N_INSNS (1), /* fcmp */
292 COSTS_N_INSNS (2), /* fmov, fmovr */
293 COSTS_N_INSNS (4), /* fmul */
294 COSTS_N_INSNS (13), /* fdivs */
295 COSTS_N_INSNS (23), /* fdivd */
296 COSTS_N_INSNS (13), /* fsqrts */
297 COSTS_N_INSNS (23), /* fsqrtd */
298 COSTS_N_INSNS (4), /* imul */
299 COSTS_N_INSNS (4), /* imulX */
300 2, /* imul bit factor */
301 COSTS_N_INSNS (37), /* idiv */
302 COSTS_N_INSNS (68), /* idivX */
303 COSTS_N_INSNS (2), /* movcc/movr */
304 2, /* shift penalty */
307 static const
308 struct processor_costs ultrasparc3_costs = {
309 COSTS_N_INSNS (2), /* int load */
310 COSTS_N_INSNS (3), /* int signed load */
311 COSTS_N_INSNS (3), /* int zeroed load */
312 COSTS_N_INSNS (2), /* float load */
313 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
314 COSTS_N_INSNS (4), /* fadd, fsub */
315 COSTS_N_INSNS (5), /* fcmp */
316 COSTS_N_INSNS (3), /* fmov, fmovr */
317 COSTS_N_INSNS (4), /* fmul */
318 COSTS_N_INSNS (17), /* fdivs */
319 COSTS_N_INSNS (20), /* fdivd */
320 COSTS_N_INSNS (20), /* fsqrts */
321 COSTS_N_INSNS (29), /* fsqrtd */
322 COSTS_N_INSNS (6), /* imul */
323 COSTS_N_INSNS (6), /* imulX */
324 0, /* imul bit factor */
325 COSTS_N_INSNS (40), /* idiv */
326 COSTS_N_INSNS (71), /* idivX */
327 COSTS_N_INSNS (2), /* movcc/movr */
328 0, /* shift penalty */
331 static const
332 struct processor_costs niagara_costs = {
333 COSTS_N_INSNS (3), /* int load */
334 COSTS_N_INSNS (3), /* int signed load */
335 COSTS_N_INSNS (3), /* int zeroed load */
336 COSTS_N_INSNS (9), /* float load */
337 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
338 COSTS_N_INSNS (8), /* fadd, fsub */
339 COSTS_N_INSNS (26), /* fcmp */
340 COSTS_N_INSNS (8), /* fmov, fmovr */
341 COSTS_N_INSNS (29), /* fmul */
342 COSTS_N_INSNS (54), /* fdivs */
343 COSTS_N_INSNS (83), /* fdivd */
344 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
345 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
346 COSTS_N_INSNS (11), /* imul */
347 COSTS_N_INSNS (11), /* imulX */
348 0, /* imul bit factor */
349 COSTS_N_INSNS (72), /* idiv */
350 COSTS_N_INSNS (72), /* idivX */
351 COSTS_N_INSNS (1), /* movcc/movr */
352 0, /* shift penalty */
355 static const
356 struct processor_costs niagara2_costs = {
357 COSTS_N_INSNS (3), /* int load */
358 COSTS_N_INSNS (3), /* int signed load */
359 COSTS_N_INSNS (3), /* int zeroed load */
360 COSTS_N_INSNS (3), /* float load */
361 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
362 COSTS_N_INSNS (6), /* fadd, fsub */
363 COSTS_N_INSNS (6), /* fcmp */
364 COSTS_N_INSNS (6), /* fmov, fmovr */
365 COSTS_N_INSNS (6), /* fmul */
366 COSTS_N_INSNS (19), /* fdivs */
367 COSTS_N_INSNS (33), /* fdivd */
368 COSTS_N_INSNS (19), /* fsqrts */
369 COSTS_N_INSNS (33), /* fsqrtd */
370 COSTS_N_INSNS (5), /* imul */
371 COSTS_N_INSNS (5), /* imulX */
372 0, /* imul bit factor */
373 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
374 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
375 COSTS_N_INSNS (1), /* movcc/movr */
376 0, /* shift penalty */
379 static const
380 struct processor_costs niagara3_costs = {
381 COSTS_N_INSNS (3), /* int load */
382 COSTS_N_INSNS (3), /* int signed load */
383 COSTS_N_INSNS (3), /* int zeroed load */
384 COSTS_N_INSNS (3), /* float load */
385 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
386 COSTS_N_INSNS (9), /* fadd, fsub */
387 COSTS_N_INSNS (9), /* fcmp */
388 COSTS_N_INSNS (9), /* fmov, fmovr */
389 COSTS_N_INSNS (9), /* fmul */
390 COSTS_N_INSNS (23), /* fdivs */
391 COSTS_N_INSNS (37), /* fdivd */
392 COSTS_N_INSNS (23), /* fsqrts */
393 COSTS_N_INSNS (37), /* fsqrtd */
394 COSTS_N_INSNS (9), /* imul */
395 COSTS_N_INSNS (9), /* imulX */
396 0, /* imul bit factor */
397 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
398 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
399 COSTS_N_INSNS (1), /* movcc/movr */
400 0, /* shift penalty */
403 static const
404 struct processor_costs niagara4_costs = {
405 COSTS_N_INSNS (5), /* int load */
406 COSTS_N_INSNS (5), /* int signed load */
407 COSTS_N_INSNS (5), /* int zeroed load */
408 COSTS_N_INSNS (5), /* float load */
409 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
410 COSTS_N_INSNS (11), /* fadd, fsub */
411 COSTS_N_INSNS (11), /* fcmp */
412 COSTS_N_INSNS (11), /* fmov, fmovr */
413 COSTS_N_INSNS (11), /* fmul */
414 COSTS_N_INSNS (24), /* fdivs */
415 COSTS_N_INSNS (37), /* fdivd */
416 COSTS_N_INSNS (24), /* fsqrts */
417 COSTS_N_INSNS (37), /* fsqrtd */
418 COSTS_N_INSNS (12), /* imul */
419 COSTS_N_INSNS (12), /* imulX */
420 0, /* imul bit factor */
421 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
422 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
423 COSTS_N_INSNS (1), /* movcc/movr */
424 0, /* shift penalty */
427 static const
428 struct processor_costs niagara7_costs = {
429 COSTS_N_INSNS (5), /* int load */
430 COSTS_N_INSNS (5), /* int signed load */
431 COSTS_N_INSNS (5), /* int zeroed load */
432 COSTS_N_INSNS (5), /* float load */
433 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
434 COSTS_N_INSNS (11), /* fadd, fsub */
435 COSTS_N_INSNS (11), /* fcmp */
436 COSTS_N_INSNS (11), /* fmov, fmovr */
437 COSTS_N_INSNS (11), /* fmul */
438 COSTS_N_INSNS (24), /* fdivs */
439 COSTS_N_INSNS (37), /* fdivd */
440 COSTS_N_INSNS (24), /* fsqrts */
441 COSTS_N_INSNS (37), /* fsqrtd */
442 COSTS_N_INSNS (12), /* imul */
443 COSTS_N_INSNS (12), /* imulX */
444 0, /* imul bit factor */
445 COSTS_N_INSNS (51), /* idiv, average of 42 - 61 cycle range */
446 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
447 COSTS_N_INSNS (1), /* movcc/movr */
448 0, /* shift penalty */
451 static const
452 struct processor_costs m8_costs = {
453 COSTS_N_INSNS (3), /* int load */
454 COSTS_N_INSNS (3), /* int signed load */
455 COSTS_N_INSNS (3), /* int zeroed load */
456 COSTS_N_INSNS (3), /* float load */
457 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
458 COSTS_N_INSNS (9), /* fadd, fsub */
459 COSTS_N_INSNS (9), /* fcmp */
460 COSTS_N_INSNS (9), /* fmov, fmovr */
461 COSTS_N_INSNS (9), /* fmul */
462 COSTS_N_INSNS (26), /* fdivs */
463 COSTS_N_INSNS (30), /* fdivd */
464 COSTS_N_INSNS (33), /* fsqrts */
465 COSTS_N_INSNS (41), /* fsqrtd */
466 COSTS_N_INSNS (12), /* imul */
467 COSTS_N_INSNS (10), /* imulX */
468 0, /* imul bit factor */
469 COSTS_N_INSNS (57), /* udiv/sdiv */
470 COSTS_N_INSNS (30), /* udivx/sdivx */
471 COSTS_N_INSNS (1), /* movcc/movr */
472 0, /* shift penalty */
475 static const struct processor_costs *sparc_costs = &cypress_costs;
477 #ifdef HAVE_AS_RELAX_OPTION
478 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
479 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
480 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
481 somebody does not branch between the sethi and jmp. */
482 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
483 #else
484 #define LEAF_SIBCALL_SLOT_RESERVED_P \
485 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
486 #endif
488 /* Vector to say how input registers are mapped to output registers.
489 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
490 eliminate it. You must use -fomit-frame-pointer to get that. */
491 char leaf_reg_remap[] =
492 { 0, 1, 2, 3, 4, 5, 6, 7,
493 -1, -1, -1, -1, -1, -1, 14, -1,
494 -1, -1, -1, -1, -1, -1, -1, -1,
495 8, 9, 10, 11, 12, 13, -1, 15,
497 32, 33, 34, 35, 36, 37, 38, 39,
498 40, 41, 42, 43, 44, 45, 46, 47,
499 48, 49, 50, 51, 52, 53, 54, 55,
500 56, 57, 58, 59, 60, 61, 62, 63,
501 64, 65, 66, 67, 68, 69, 70, 71,
502 72, 73, 74, 75, 76, 77, 78, 79,
503 80, 81, 82, 83, 84, 85, 86, 87,
504 88, 89, 90, 91, 92, 93, 94, 95,
505 96, 97, 98, 99, 100, 101, 102};
507 /* Vector, indexed by hard register number, which contains 1
508 for a register that is allowable in a candidate for leaf
509 function treatment. */
510 char sparc_leaf_regs[] =
511 { 1, 1, 1, 1, 1, 1, 1, 1,
512 0, 0, 0, 0, 0, 0, 1, 0,
513 0, 0, 0, 0, 0, 0, 0, 0,
514 1, 1, 1, 1, 1, 1, 0, 1,
515 1, 1, 1, 1, 1, 1, 1, 1,
516 1, 1, 1, 1, 1, 1, 1, 1,
517 1, 1, 1, 1, 1, 1, 1, 1,
518 1, 1, 1, 1, 1, 1, 1, 1,
519 1, 1, 1, 1, 1, 1, 1, 1,
520 1, 1, 1, 1, 1, 1, 1, 1,
521 1, 1, 1, 1, 1, 1, 1, 1,
522 1, 1, 1, 1, 1, 1, 1, 1,
523 1, 1, 1, 1, 1, 1, 1};
525 struct GTY(()) machine_function
527 /* Size of the frame of the function. */
528 HOST_WIDE_INT frame_size;
530 /* Size of the frame of the function minus the register window save area
531 and the outgoing argument area. */
532 HOST_WIDE_INT apparent_frame_size;
534 /* Register we pretend the frame pointer is allocated to. Normally, this
535 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
536 record "offset" separately as it may be too big for (reg + disp). */
537 rtx frame_base_reg;
538 HOST_WIDE_INT frame_base_offset;
540 /* Number of global or FP registers to be saved (as 4-byte quantities). */
541 int n_global_fp_regs;
543 /* True if the current function is leaf and uses only leaf regs,
544 so that the SPARC leaf function optimization can be applied.
545 Private version of crtl->uses_only_leaf_regs, see
546 sparc_expand_prologue for the rationale. */
547 int leaf_function_p;
549 /* True if the prologue saves local or in registers. */
550 bool save_local_in_regs_p;
552 /* True if the data calculated by sparc_expand_prologue are valid. */
553 bool prologue_data_valid_p;
556 #define sparc_frame_size cfun->machine->frame_size
557 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
558 #define sparc_frame_base_reg cfun->machine->frame_base_reg
559 #define sparc_frame_base_offset cfun->machine->frame_base_offset
560 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
561 #define sparc_leaf_function_p cfun->machine->leaf_function_p
562 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
563 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
565 /* 1 if the next opcode is to be specially indented. */
566 int sparc_indent_opcode = 0;
568 static void sparc_option_override (void);
569 static void sparc_init_modes (void);
570 static int function_arg_slotno (const CUMULATIVE_ARGS *, machine_mode,
571 const_tree, bool, bool, int *, int *);
573 static int supersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
574 static int hypersparc_adjust_cost (rtx_insn *, int, rtx_insn *, int);
576 static void sparc_emit_set_const32 (rtx, rtx);
577 static void sparc_emit_set_const64 (rtx, rtx);
578 static void sparc_output_addr_vec (rtx);
579 static void sparc_output_addr_diff_vec (rtx);
580 static void sparc_output_deferred_case_vectors (void);
581 static bool sparc_legitimate_address_p (machine_mode, rtx, bool);
582 static bool sparc_legitimate_constant_p (machine_mode, rtx);
583 static rtx sparc_builtin_saveregs (void);
584 static int epilogue_renumber (rtx *, int);
585 static bool sparc_assemble_integer (rtx, unsigned int, int);
586 static int set_extends (rtx_insn *);
587 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
588 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
589 #ifdef TARGET_SOLARIS
590 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
591 tree) ATTRIBUTE_UNUSED;
592 #endif
593 static int sparc_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
594 static int sparc_issue_rate (void);
595 static void sparc_sched_init (FILE *, int, int);
596 static int sparc_use_sched_lookahead (void);
598 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
599 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
600 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
601 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
602 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
604 static bool sparc_function_ok_for_sibcall (tree, tree);
605 static void sparc_init_libfuncs (void);
606 static void sparc_init_builtins (void);
607 static void sparc_fpu_init_builtins (void);
608 static void sparc_vis_init_builtins (void);
609 static tree sparc_builtin_decl (unsigned, bool);
610 static rtx sparc_expand_builtin (tree, rtx, rtx, machine_mode, int);
611 static tree sparc_fold_builtin (tree, int, tree *, bool);
612 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
613 HOST_WIDE_INT, tree);
614 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
615 HOST_WIDE_INT, const_tree);
616 static struct machine_function * sparc_init_machine_status (void);
617 static bool sparc_cannot_force_const_mem (machine_mode, rtx);
618 static rtx sparc_tls_get_addr (void);
619 static rtx sparc_tls_got (void);
620 static int sparc_register_move_cost (machine_mode,
621 reg_class_t, reg_class_t);
622 static bool sparc_rtx_costs (rtx, machine_mode, int, int, int *, bool);
623 static rtx sparc_function_value (const_tree, const_tree, bool);
624 static rtx sparc_libcall_value (machine_mode, const_rtx);
625 static bool sparc_function_value_regno_p (const unsigned int);
626 static rtx sparc_struct_value_rtx (tree, int);
627 static machine_mode sparc_promote_function_mode (const_tree, machine_mode,
628 int *, const_tree, int);
629 static bool sparc_return_in_memory (const_tree, const_tree);
630 static bool sparc_strict_argument_naming (cumulative_args_t);
631 static void sparc_va_start (tree, rtx);
632 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
633 static bool sparc_vector_mode_supported_p (machine_mode);
634 static bool sparc_tls_referenced_p (rtx);
635 static rtx sparc_legitimize_tls_address (rtx);
636 static rtx sparc_legitimize_pic_address (rtx, rtx);
637 static rtx sparc_legitimize_address (rtx, rtx, machine_mode);
638 static rtx sparc_delegitimize_address (rtx);
639 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
640 static bool sparc_pass_by_reference (cumulative_args_t,
641 machine_mode, const_tree, bool);
642 static void sparc_function_arg_advance (cumulative_args_t,
643 machine_mode, const_tree, bool);
644 static rtx sparc_function_arg_1 (cumulative_args_t,
645 machine_mode, const_tree, bool, bool);
646 static rtx sparc_function_arg (cumulative_args_t,
647 machine_mode, const_tree, bool);
648 static rtx sparc_function_incoming_arg (cumulative_args_t,
649 machine_mode, const_tree, bool);
650 static unsigned int sparc_function_arg_boundary (machine_mode,
651 const_tree);
652 static int sparc_arg_partial_bytes (cumulative_args_t,
653 machine_mode, tree, bool);
654 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
655 static void sparc_file_end (void);
656 static bool sparc_frame_pointer_required (void);
657 static bool sparc_can_eliminate (const int, const int);
658 static rtx sparc_builtin_setjmp_frame_value (void);
659 static void sparc_conditional_register_usage (void);
660 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
661 static const char *sparc_mangle_type (const_tree);
662 #endif
663 static void sparc_trampoline_init (rtx, tree, rtx);
664 static machine_mode sparc_preferred_simd_mode (machine_mode);
665 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
666 static bool sparc_lra_p (void);
667 static bool sparc_print_operand_punct_valid_p (unsigned char);
668 static void sparc_print_operand (FILE *, rtx, int);
669 static void sparc_print_operand_address (FILE *, machine_mode, rtx);
670 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
671 machine_mode,
672 secondary_reload_info *);
673 static machine_mode sparc_cstore_mode (enum insn_code icode);
674 static void sparc_atomic_assign_expand_fenv (tree *, tree *, tree *);
675 static bool sparc_fixed_condition_code_regs (unsigned int *, unsigned int *);
676 static unsigned int sparc_min_arithmetic_precision (void);
678 #ifdef SUBTARGET_ATTRIBUTE_TABLE
679 /* Table of valid machine attributes. */
680 static const struct attribute_spec sparc_attribute_table[] =
682 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
683 do_diagnostic } */
684 SUBTARGET_ATTRIBUTE_TABLE,
685 { NULL, 0, 0, false, false, false, NULL, false }
687 #endif
689 /* Option handling. */
691 /* Parsed value. */
692 enum cmodel sparc_cmodel;
694 char sparc_hard_reg_printed[8];
696 /* Initialize the GCC target structure. */
698 /* The default is to use .half rather than .short for aligned HI objects. */
699 #undef TARGET_ASM_ALIGNED_HI_OP
700 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
702 #undef TARGET_ASM_UNALIGNED_HI_OP
703 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
704 #undef TARGET_ASM_UNALIGNED_SI_OP
705 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
706 #undef TARGET_ASM_UNALIGNED_DI_OP
707 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
709 /* The target hook has to handle DI-mode values. */
710 #undef TARGET_ASM_INTEGER
711 #define TARGET_ASM_INTEGER sparc_assemble_integer
713 #undef TARGET_ASM_FUNCTION_PROLOGUE
714 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
715 #undef TARGET_ASM_FUNCTION_EPILOGUE
716 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
718 #undef TARGET_SCHED_ADJUST_COST
719 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
720 #undef TARGET_SCHED_ISSUE_RATE
721 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
722 #undef TARGET_SCHED_INIT
723 #define TARGET_SCHED_INIT sparc_sched_init
724 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
725 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
727 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
728 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
730 #undef TARGET_INIT_LIBFUNCS
731 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
733 #undef TARGET_LEGITIMIZE_ADDRESS
734 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
735 #undef TARGET_DELEGITIMIZE_ADDRESS
736 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
737 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
738 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
740 #undef TARGET_INIT_BUILTINS
741 #define TARGET_INIT_BUILTINS sparc_init_builtins
742 #undef TARGET_BUILTIN_DECL
743 #define TARGET_BUILTIN_DECL sparc_builtin_decl
744 #undef TARGET_EXPAND_BUILTIN
745 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
746 #undef TARGET_FOLD_BUILTIN
747 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
749 #if TARGET_TLS
750 #undef TARGET_HAVE_TLS
751 #define TARGET_HAVE_TLS true
752 #endif
754 #undef TARGET_CANNOT_FORCE_CONST_MEM
755 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
757 #undef TARGET_ASM_OUTPUT_MI_THUNK
758 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
759 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
760 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
762 #undef TARGET_RTX_COSTS
763 #define TARGET_RTX_COSTS sparc_rtx_costs
764 #undef TARGET_ADDRESS_COST
765 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
766 #undef TARGET_REGISTER_MOVE_COST
767 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
769 #undef TARGET_PROMOTE_FUNCTION_MODE
770 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
772 #undef TARGET_FUNCTION_VALUE
773 #define TARGET_FUNCTION_VALUE sparc_function_value
774 #undef TARGET_LIBCALL_VALUE
775 #define TARGET_LIBCALL_VALUE sparc_libcall_value
776 #undef TARGET_FUNCTION_VALUE_REGNO_P
777 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
779 #undef TARGET_STRUCT_VALUE_RTX
780 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
781 #undef TARGET_RETURN_IN_MEMORY
782 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
783 #undef TARGET_MUST_PASS_IN_STACK
784 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
785 #undef TARGET_PASS_BY_REFERENCE
786 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
787 #undef TARGET_ARG_PARTIAL_BYTES
788 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
789 #undef TARGET_FUNCTION_ARG_ADVANCE
790 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
791 #undef TARGET_FUNCTION_ARG
792 #define TARGET_FUNCTION_ARG sparc_function_arg
793 #undef TARGET_FUNCTION_INCOMING_ARG
794 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
795 #undef TARGET_FUNCTION_ARG_BOUNDARY
796 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
798 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
799 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
800 #undef TARGET_STRICT_ARGUMENT_NAMING
801 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
803 #undef TARGET_EXPAND_BUILTIN_VA_START
804 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
805 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
806 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
808 #undef TARGET_VECTOR_MODE_SUPPORTED_P
809 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
811 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
812 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
814 #ifdef SUBTARGET_INSERT_ATTRIBUTES
815 #undef TARGET_INSERT_ATTRIBUTES
816 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
817 #endif
819 #ifdef SUBTARGET_ATTRIBUTE_TABLE
820 #undef TARGET_ATTRIBUTE_TABLE
821 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
822 #endif
824 #undef TARGET_OPTION_OVERRIDE
825 #define TARGET_OPTION_OVERRIDE sparc_option_override
827 #ifdef TARGET_THREAD_SSP_OFFSET
828 #undef TARGET_STACK_PROTECT_GUARD
829 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
830 #endif
832 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
833 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
834 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
835 #endif
837 #undef TARGET_ASM_FILE_END
838 #define TARGET_ASM_FILE_END sparc_file_end
840 #undef TARGET_FRAME_POINTER_REQUIRED
841 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
843 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
844 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
846 #undef TARGET_CAN_ELIMINATE
847 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
849 #undef TARGET_PREFERRED_RELOAD_CLASS
850 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
852 #undef TARGET_SECONDARY_RELOAD
853 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
855 #undef TARGET_CONDITIONAL_REGISTER_USAGE
856 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
858 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
859 #undef TARGET_MANGLE_TYPE
860 #define TARGET_MANGLE_TYPE sparc_mangle_type
861 #endif
863 #undef TARGET_LRA_P
864 #define TARGET_LRA_P sparc_lra_p
866 #undef TARGET_LEGITIMATE_ADDRESS_P
867 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
869 #undef TARGET_LEGITIMATE_CONSTANT_P
870 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
872 #undef TARGET_TRAMPOLINE_INIT
873 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
875 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
876 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
877 #undef TARGET_PRINT_OPERAND
878 #define TARGET_PRINT_OPERAND sparc_print_operand
879 #undef TARGET_PRINT_OPERAND_ADDRESS
880 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
882 /* The value stored by LDSTUB. */
883 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
884 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
886 #undef TARGET_CSTORE_MODE
887 #define TARGET_CSTORE_MODE sparc_cstore_mode
889 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
890 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
892 #undef TARGET_FIXED_CONDITION_CODE_REGS
893 #define TARGET_FIXED_CONDITION_CODE_REGS sparc_fixed_condition_code_regs
895 #undef TARGET_MIN_ARITHMETIC_PRECISION
896 #define TARGET_MIN_ARITHMETIC_PRECISION sparc_min_arithmetic_precision
898 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
899 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
901 struct gcc_target targetm = TARGET_INITIALIZER;
903 /* Return the memory reference contained in X if any, zero otherwise. */
905 static rtx
906 mem_ref (rtx x)
908 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
909 x = XEXP (x, 0);
911 if (MEM_P (x))
912 return x;
914 return NULL_RTX;
917 /* We use a machine specific pass to enable workarounds for errata.
919 We need to have the (essentially) final form of the insn stream in order
920 to properly detect the various hazards. Therefore, this machine specific
921 pass runs as late as possible. */
923 /* True if INSN is a md pattern or asm statement. */
924 #define USEFUL_INSN_P(INSN) \
925 (NONDEBUG_INSN_P (INSN) \
926 && GET_CODE (PATTERN (INSN)) != USE \
927 && GET_CODE (PATTERN (INSN)) != CLOBBER)
929 static unsigned int
930 sparc_do_work_around_errata (void)
932 rtx_insn *insn, *next;
934 /* Force all instructions to be split into their final form. */
935 split_all_insns_noflow ();
937 /* Now look for specific patterns in the insn stream. */
938 for (insn = get_insns (); insn; insn = next)
940 bool insert_nop = false;
941 rtx set;
943 /* Look into the instruction in a delay slot. */
944 if (NONJUMP_INSN_P (insn))
945 if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
946 insn = seq->insn (1);
948 /* Look for either of these two sequences:
950 Sequence A:
951 1. store of word size or less (e.g. st / stb / sth / stf)
952 2. any single instruction that is not a load or store
953 3. any store instruction (e.g. st / stb / sth / stf / std / stdf)
955 Sequence B:
956 1. store of double word size (e.g. std / stdf)
957 2. any store instruction (e.g. st / stb / sth / stf / std / stdf) */
958 if (sparc_fix_b2bst
959 && NONJUMP_INSN_P (insn)
960 && (set = single_set (insn)) != NULL_RTX
961 && MEM_P (SET_DEST (set)))
963 /* Sequence B begins with a double-word store. */
964 bool seq_b = GET_MODE_SIZE (GET_MODE (SET_DEST (set))) == 8;
965 rtx_insn *after;
966 int i;
968 next = next_active_insn (insn);
969 if (!next)
970 break;
972 for (after = next, i = 0; i < 2; i++)
974 /* Skip empty assembly statements. */
975 if ((GET_CODE (PATTERN (after)) == UNSPEC_VOLATILE)
976 || (USEFUL_INSN_P (after)
977 && (asm_noperands (PATTERN (after))>=0)
978 && !strcmp (decode_asm_operands (PATTERN (after),
979 NULL, NULL, NULL,
980 NULL, NULL), "")))
981 after = next_active_insn (after);
982 if (!after)
983 break;
985 /* If the insn is a branch, then it cannot be problematic. */
986 if (!NONJUMP_INSN_P (after)
987 || GET_CODE (PATTERN (after)) == SEQUENCE)
988 break;
990 /* Sequence B is only two instructions long. */
991 if (seq_b)
993 /* Add NOP if followed by a store. */
994 if ((set = single_set (after)) != NULL_RTX
995 && MEM_P (SET_DEST (set)))
996 insert_nop = true;
998 /* Otherwise it is ok. */
999 break;
1002 /* If the second instruction is a load or a store,
1003 then the sequence cannot be problematic. */
1004 if (i == 0)
1006 if (((set = single_set (after)) != NULL_RTX)
1007 && (MEM_P (SET_DEST (set)) || MEM_P (SET_SRC (set))))
1008 break;
1010 after = next_active_insn (after);
1011 if (!after)
1012 break;
1015 /* Add NOP if third instruction is a store. */
1016 if (i == 1
1017 && ((set = single_set (after)) != NULL_RTX)
1018 && MEM_P (SET_DEST (set)))
1019 insert_nop = true;
1022 else
1023 /* Look for a single-word load into an odd-numbered FP register. */
1024 if (sparc_fix_at697f
1025 && NONJUMP_INSN_P (insn)
1026 && (set = single_set (insn)) != NULL_RTX
1027 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1028 && MEM_P (SET_SRC (set))
1029 && REG_P (SET_DEST (set))
1030 && REGNO (SET_DEST (set)) > 31
1031 && REGNO (SET_DEST (set)) % 2 != 0)
1033 /* The wrong dependency is on the enclosing double register. */
1034 const unsigned int x = REGNO (SET_DEST (set)) - 1;
1035 unsigned int src1, src2, dest;
1036 int code;
1038 next = next_active_insn (insn);
1039 if (!next)
1040 break;
1041 /* If the insn is a branch, then it cannot be problematic. */
1042 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1043 continue;
1045 extract_insn (next);
1046 code = INSN_CODE (next);
1048 switch (code)
1050 case CODE_FOR_adddf3:
1051 case CODE_FOR_subdf3:
1052 case CODE_FOR_muldf3:
1053 case CODE_FOR_divdf3:
1054 dest = REGNO (recog_data.operand[0]);
1055 src1 = REGNO (recog_data.operand[1]);
1056 src2 = REGNO (recog_data.operand[2]);
1057 if (src1 != src2)
1059 /* Case [1-4]:
1060 ld [address], %fx+1
1061 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
1062 if ((src1 == x || src2 == x)
1063 && (dest == src1 || dest == src2))
1064 insert_nop = true;
1066 else
1068 /* Case 5:
1069 ld [address], %fx+1
1070 FPOPd %fx, %fx, %fx */
1071 if (src1 == x
1072 && dest == src1
1073 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
1074 insert_nop = true;
1076 break;
1078 case CODE_FOR_sqrtdf2:
1079 dest = REGNO (recog_data.operand[0]);
1080 src1 = REGNO (recog_data.operand[1]);
1081 /* Case 6:
1082 ld [address], %fx+1
1083 fsqrtd %fx, %fx */
1084 if (src1 == x && dest == src1)
1085 insert_nop = true;
1086 break;
1088 default:
1089 break;
1093 /* Look for a single-word load into an integer register. */
1094 else if (sparc_fix_ut699
1095 && NONJUMP_INSN_P (insn)
1096 && (set = single_set (insn)) != NULL_RTX
1097 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
1098 && mem_ref (SET_SRC (set)) != NULL_RTX
1099 && REG_P (SET_DEST (set))
1100 && REGNO (SET_DEST (set)) < 32)
1102 /* There is no problem if the second memory access has a data
1103 dependency on the first single-cycle load. */
1104 rtx x = SET_DEST (set);
1106 next = next_active_insn (insn);
1107 if (!next)
1108 break;
1109 /* If the insn is a branch, then it cannot be problematic. */
1110 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1111 continue;
1113 /* Look for a second memory access to/from an integer register. */
1114 if ((set = single_set (next)) != NULL_RTX)
1116 rtx src = SET_SRC (set);
1117 rtx dest = SET_DEST (set);
1118 rtx mem;
1120 /* LDD is affected. */
1121 if ((mem = mem_ref (src)) != NULL_RTX
1122 && REG_P (dest)
1123 && REGNO (dest) < 32
1124 && !reg_mentioned_p (x, XEXP (mem, 0)))
1125 insert_nop = true;
1127 /* STD is *not* affected. */
1128 else if (MEM_P (dest)
1129 && GET_MODE_SIZE (GET_MODE (dest)) <= 4
1130 && (src == CONST0_RTX (GET_MODE (dest))
1131 || (REG_P (src)
1132 && REGNO (src) < 32
1133 && REGNO (src) != REGNO (x)))
1134 && !reg_mentioned_p (x, XEXP (dest, 0)))
1135 insert_nop = true;
1139 /* Look for a single-word load/operation into an FP register. */
1140 else if (sparc_fix_ut699
1141 && NONJUMP_INSN_P (insn)
1142 && (set = single_set (insn)) != NULL_RTX
1143 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1144 && REG_P (SET_DEST (set))
1145 && REGNO (SET_DEST (set)) > 31)
1147 /* Number of instructions in the problematic window. */
1148 const int n_insns = 4;
1149 /* The problematic combination is with the sibling FP register. */
1150 const unsigned int x = REGNO (SET_DEST (set));
1151 const unsigned int y = x ^ 1;
1152 rtx_insn *after;
1153 int i;
1155 next = next_active_insn (insn);
1156 if (!next)
1157 break;
1158 /* If the insn is a branch, then it cannot be problematic. */
1159 if (!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) == SEQUENCE)
1160 continue;
1162 /* Look for a second load/operation into the sibling FP register. */
1163 if (!((set = single_set (next)) != NULL_RTX
1164 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
1165 && REG_P (SET_DEST (set))
1166 && REGNO (SET_DEST (set)) == y))
1167 continue;
1169 /* Look for a (possible) store from the FP register in the next N
1170 instructions, but bail out if it is again modified or if there
1171 is a store from the sibling FP register before this store. */
1172 for (after = next, i = 0; i < n_insns; i++)
1174 bool branch_p;
1176 after = next_active_insn (after);
1177 if (!after)
1178 break;
1180 /* This is a branch with an empty delay slot. */
1181 if (!NONJUMP_INSN_P (after))
1183 if (++i == n_insns)
1184 break;
1185 branch_p = true;
1186 after = NULL;
1188 /* This is a branch with a filled delay slot. */
1189 else if (rtx_sequence *seq =
1190 dyn_cast <rtx_sequence *> (PATTERN (after)))
1192 if (++i == n_insns)
1193 break;
1194 branch_p = true;
1195 after = seq->insn (1);
1197 /* This is a regular instruction. */
1198 else
1199 branch_p = false;
1201 if (after && (set = single_set (after)) != NULL_RTX)
1203 const rtx src = SET_SRC (set);
1204 const rtx dest = SET_DEST (set);
1205 const unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
1207 /* If the FP register is again modified before the store,
1208 then the store isn't affected. */
1209 if (REG_P (dest)
1210 && (REGNO (dest) == x
1211 || (REGNO (dest) == y && size == 8)))
1212 break;
1214 if (MEM_P (dest) && REG_P (src))
1216 /* If there is a store from the sibling FP register
1217 before the store, then the store is not affected. */
1218 if (REGNO (src) == y || (REGNO (src) == x && size == 8))
1219 break;
1221 /* Otherwise, the store is affected. */
1222 if (REGNO (src) == x && size == 4)
1224 insert_nop = true;
1225 break;
1230 /* If we have a branch in the first M instructions, then we
1231 cannot see the (M+2)th instruction so we play safe. */
1232 if (branch_p && i <= (n_insns - 2))
1234 insert_nop = true;
1235 break;
1240 else
1241 next = NEXT_INSN (insn);
1243 if (insert_nop)
1244 emit_insn_before (gen_nop (), next);
1247 return 0;
1250 namespace {
1252 const pass_data pass_data_work_around_errata =
1254 RTL_PASS, /* type */
1255 "errata", /* name */
1256 OPTGROUP_NONE, /* optinfo_flags */
1257 TV_MACH_DEP, /* tv_id */
1258 0, /* properties_required */
1259 0, /* properties_provided */
1260 0, /* properties_destroyed */
1261 0, /* todo_flags_start */
1262 0, /* todo_flags_finish */
1265 class pass_work_around_errata : public rtl_opt_pass
1267 public:
1268 pass_work_around_errata(gcc::context *ctxt)
1269 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1272 /* opt_pass methods: */
1273 virtual bool gate (function *)
1275 return sparc_fix_at697f || sparc_fix_ut699 || sparc_fix_b2bst;
1278 virtual unsigned int execute (function *)
1280 return sparc_do_work_around_errata ();
1283 }; // class pass_work_around_errata
1285 } // anon namespace
1287 rtl_opt_pass *
1288 make_pass_work_around_errata (gcc::context *ctxt)
1290 return new pass_work_around_errata (ctxt);
1293 /* Helpers for TARGET_DEBUG_OPTIONS. */
1294 static void
1295 dump_target_flag_bits (const int flags)
1297 if (flags & MASK_64BIT)
1298 fprintf (stderr, "64BIT ");
1299 if (flags & MASK_APP_REGS)
1300 fprintf (stderr, "APP_REGS ");
1301 if (flags & MASK_FASTER_STRUCTS)
1302 fprintf (stderr, "FASTER_STRUCTS ");
1303 if (flags & MASK_FLAT)
1304 fprintf (stderr, "FLAT ");
1305 if (flags & MASK_FMAF)
1306 fprintf (stderr, "FMAF ");
1307 if (flags & MASK_FSMULD)
1308 fprintf (stderr, "FSMULD ");
1309 if (flags & MASK_FPU)
1310 fprintf (stderr, "FPU ");
1311 if (flags & MASK_HARD_QUAD)
1312 fprintf (stderr, "HARD_QUAD ");
1313 if (flags & MASK_POPC)
1314 fprintf (stderr, "POPC ");
1315 if (flags & MASK_PTR64)
1316 fprintf (stderr, "PTR64 ");
1317 if (flags & MASK_STACK_BIAS)
1318 fprintf (stderr, "STACK_BIAS ");
1319 if (flags & MASK_UNALIGNED_DOUBLES)
1320 fprintf (stderr, "UNALIGNED_DOUBLES ");
1321 if (flags & MASK_V8PLUS)
1322 fprintf (stderr, "V8PLUS ");
1323 if (flags & MASK_VIS)
1324 fprintf (stderr, "VIS ");
1325 if (flags & MASK_VIS2)
1326 fprintf (stderr, "VIS2 ");
1327 if (flags & MASK_VIS3)
1328 fprintf (stderr, "VIS3 ");
1329 if (flags & MASK_VIS4)
1330 fprintf (stderr, "VIS4 ");
1331 if (flags & MASK_VIS4B)
1332 fprintf (stderr, "VIS4B ");
1333 if (flags & MASK_CBCOND)
1334 fprintf (stderr, "CBCOND ");
1335 if (flags & MASK_DEPRECATED_V8_INSNS)
1336 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1337 if (flags & MASK_SPARCLET)
1338 fprintf (stderr, "SPARCLET ");
1339 if (flags & MASK_SPARCLITE)
1340 fprintf (stderr, "SPARCLITE ");
1341 if (flags & MASK_V8)
1342 fprintf (stderr, "V8 ");
1343 if (flags & MASK_V9)
1344 fprintf (stderr, "V9 ");
1347 static void
1348 dump_target_flags (const char *prefix, const int flags)
1350 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1351 dump_target_flag_bits (flags);
1352 fprintf(stderr, "]\n");
1355 /* Validate and override various options, and do some machine dependent
1356 initialization. */
1358 static void
1359 sparc_option_override (void)
1361 static struct code_model {
1362 const char *const name;
1363 const enum cmodel value;
1364 } const cmodels[] = {
1365 { "32", CM_32 },
1366 { "medlow", CM_MEDLOW },
1367 { "medmid", CM_MEDMID },
1368 { "medany", CM_MEDANY },
1369 { "embmedany", CM_EMBMEDANY },
1370 { NULL, (enum cmodel) 0 }
1372 const struct code_model *cmodel;
1373 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1374 static struct cpu_default {
1375 const int cpu;
1376 const enum processor_type processor;
1377 } const cpu_default[] = {
1378 /* There must be one entry here for each TARGET_CPU value. */
1379 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1380 { TARGET_CPU_v8, PROCESSOR_V8 },
1381 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1382 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1383 { TARGET_CPU_leon, PROCESSOR_LEON },
1384 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1385 { TARGET_CPU_leon3v7, PROCESSOR_LEON3V7 },
1386 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1387 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1388 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1389 { TARGET_CPU_v9, PROCESSOR_V9 },
1390 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1391 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1392 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1393 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1394 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1395 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1396 { TARGET_CPU_niagara7, PROCESSOR_NIAGARA7 },
1397 { TARGET_CPU_m8, PROCESSOR_M8 },
1398 { -1, PROCESSOR_V7 }
1400 const struct cpu_default *def;
1401 /* Table of values for -m{cpu,tune}=. This must match the order of
1402 the enum processor_type in sparc-opts.h. */
1403 static struct cpu_table {
1404 const char *const name;
1405 const int disable;
1406 const int enable;
1407 } const cpu_table[] = {
1408 { "v7", MASK_ISA|MASK_FSMULD, 0 },
1409 { "cypress", MASK_ISA|MASK_FSMULD, 0 },
1410 { "v8", MASK_ISA, MASK_V8 },
1411 /* TI TMS390Z55 supersparc */
1412 { "supersparc", MASK_ISA, MASK_V8 },
1413 { "hypersparc", MASK_ISA, MASK_V8 },
1414 { "leon", MASK_ISA|MASK_FSMULD, MASK_V8|MASK_LEON },
1415 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3 },
1416 { "leon3v7", MASK_ISA|MASK_FSMULD, MASK_LEON3 },
1417 { "sparclite", MASK_ISA|MASK_FSMULD, MASK_SPARCLITE },
1418 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1419 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1420 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1421 { "f934", MASK_ISA|MASK_FSMULD, MASK_SPARCLITE },
1422 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1423 { "sparclet", MASK_ISA|MASK_FSMULD, MASK_SPARCLET },
1424 /* TEMIC sparclet */
1425 { "tsc701", MASK_ISA|MASK_FSMULD, MASK_SPARCLET },
1426 { "v9", MASK_ISA, MASK_V9 },
1427 /* UltraSPARC I, II, IIi */
1428 { "ultrasparc", MASK_ISA,
1429 /* Although insns using %y are deprecated, it is a clear win. */
1430 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1431 /* UltraSPARC III */
1432 /* ??? Check if %y issue still holds true. */
1433 { "ultrasparc3", MASK_ISA,
1434 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1435 /* UltraSPARC T1 */
1436 { "niagara", MASK_ISA,
1437 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1438 /* UltraSPARC T2 */
1439 { "niagara2", MASK_ISA,
1440 MASK_V9|MASK_POPC|MASK_VIS2 },
1441 /* UltraSPARC T3 */
1442 { "niagara3", MASK_ISA,
1443 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF },
1444 /* UltraSPARC T4 */
1445 { "niagara4", MASK_ISA,
1446 MASK_V9|MASK_POPC|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1447 /* UltraSPARC M7 */
1448 { "niagara7", MASK_ISA,
1449 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC },
1450 /* UltraSPARC M8 */
1451 { "m8", MASK_ISA,
1452 MASK_V9|MASK_POPC|MASK_VIS4|MASK_FMAF|MASK_CBCOND|MASK_SUBXC|MASK_VIS4B }
1454 const struct cpu_table *cpu;
1455 unsigned int i;
1457 if (sparc_debug_string != NULL)
1459 const char *q;
1460 char *p;
1462 p = ASTRDUP (sparc_debug_string);
1463 while ((q = strtok (p, ",")) != NULL)
1465 bool invert;
1466 int mask;
1468 p = NULL;
1469 if (*q == '!')
1471 invert = true;
1472 q++;
1474 else
1475 invert = false;
1477 if (! strcmp (q, "all"))
1478 mask = MASK_DEBUG_ALL;
1479 else if (! strcmp (q, "options"))
1480 mask = MASK_DEBUG_OPTIONS;
1481 else
1482 error ("unknown -mdebug-%s switch", q);
1484 if (invert)
1485 sparc_debug &= ~mask;
1486 else
1487 sparc_debug |= mask;
1491 /* Enable the FsMULd instruction by default if not explicitly specified by
1492 the user. It may be later disabled by the CPU (explicitly or not). */
1493 if (TARGET_FPU && !(target_flags_explicit & MASK_FSMULD))
1494 target_flags |= MASK_FSMULD;
1496 if (TARGET_DEBUG_OPTIONS)
1498 dump_target_flags("Initial target_flags", target_flags);
1499 dump_target_flags("target_flags_explicit", target_flags_explicit);
1502 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1503 SUBTARGET_OVERRIDE_OPTIONS;
1504 #endif
1506 #ifndef SPARC_BI_ARCH
1507 /* Check for unsupported architecture size. */
1508 if (!TARGET_64BIT != DEFAULT_ARCH32_P)
1509 error ("%s is not supported by this configuration",
1510 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1511 #endif
1513 /* We force all 64bit archs to use 128 bit long double */
1514 if (TARGET_ARCH64 && !TARGET_LONG_DOUBLE_128)
1516 error ("-mlong-double-64 not allowed with -m64");
1517 target_flags |= MASK_LONG_DOUBLE_128;
1520 /* Code model selection. */
1521 sparc_cmodel = SPARC_DEFAULT_CMODEL;
1523 #ifdef SPARC_BI_ARCH
1524 if (TARGET_ARCH32)
1525 sparc_cmodel = CM_32;
1526 #endif
1528 if (sparc_cmodel_string != NULL)
1530 if (TARGET_ARCH64)
1532 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1533 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1534 break;
1535 if (cmodel->name == NULL)
1536 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1537 else
1538 sparc_cmodel = cmodel->value;
1540 else
1541 error ("-mcmodel= is not supported on 32-bit systems");
1544 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1545 for (i = 8; i < 16; i++)
1546 if (!call_used_regs [i])
1548 error ("-fcall-saved-REG is not supported for out registers");
1549 call_used_regs [i] = 1;
1552 /* Set the default CPU if no -mcpu option was specified. */
1553 if (!global_options_set.x_sparc_cpu_and_features)
1555 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1556 if (def->cpu == TARGET_CPU_DEFAULT)
1557 break;
1558 gcc_assert (def->cpu != -1);
1559 sparc_cpu_and_features = def->processor;
1562 /* Set the default CPU if no -mtune option was specified. */
1563 if (!global_options_set.x_sparc_cpu)
1564 sparc_cpu = sparc_cpu_and_features;
1566 cpu = &cpu_table[(int) sparc_cpu_and_features];
1568 if (TARGET_DEBUG_OPTIONS)
1570 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1571 dump_target_flags ("cpu->disable", cpu->disable);
1572 dump_target_flags ("cpu->enable", cpu->enable);
1575 target_flags &= ~cpu->disable;
1576 target_flags |= (cpu->enable
1577 #ifndef HAVE_AS_FMAF_HPC_VIS3
1578 & ~(MASK_FMAF | MASK_VIS3)
1579 #endif
1580 #ifndef HAVE_AS_SPARC4
1581 & ~MASK_CBCOND
1582 #endif
1583 #ifndef HAVE_AS_SPARC5_VIS4
1584 & ~(MASK_VIS4 | MASK_SUBXC)
1585 #endif
1586 #ifndef HAVE_AS_SPARC6
1587 & ~(MASK_VIS4B)
1588 #endif
1589 #ifndef HAVE_AS_LEON
1590 & ~(MASK_LEON | MASK_LEON3)
1591 #endif
1592 & ~(target_flags_explicit & MASK_FEATURES)
1595 /* -mvis2 implies -mvis. */
1596 if (TARGET_VIS2)
1597 target_flags |= MASK_VIS;
1599 /* -mvis3 implies -mvis2 and -mvis. */
1600 if (TARGET_VIS3)
1601 target_flags |= MASK_VIS2 | MASK_VIS;
1603 /* -mvis4 implies -mvis3, -mvis2 and -mvis. */
1604 if (TARGET_VIS4)
1605 target_flags |= MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1607 /* -mvis4b implies -mvis4, -mvis3, -mvis2 and -mvis */
1608 if (TARGET_VIS4B)
1609 target_flags |= MASK_VIS4 | MASK_VIS3 | MASK_VIS2 | MASK_VIS;
1611 /* Don't allow -mvis, -mvis2, -mvis3, -mvis4, -mvis4b, -mfmaf and -mfsmuld if
1612 FPU is disabled. */
1613 if (!TARGET_FPU)
1614 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_VIS4
1615 | MASK_VIS4B | MASK_FMAF | MASK_FSMULD);
1617 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1618 are available; -m64 also implies v9. */
1619 if (TARGET_VIS || TARGET_ARCH64)
1621 target_flags |= MASK_V9;
1622 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1625 /* -mvis also implies -mv8plus on 32-bit. */
1626 if (TARGET_VIS && !TARGET_ARCH64)
1627 target_flags |= MASK_V8PLUS;
1629 /* Use the deprecated v8 insns for sparc64 in 32-bit mode. */
1630 if (TARGET_V9 && TARGET_ARCH32)
1631 target_flags |= MASK_DEPRECATED_V8_INSNS;
1633 /* V8PLUS requires V9 and makes no sense in 64-bit mode. */
1634 if (!TARGET_V9 || TARGET_ARCH64)
1635 target_flags &= ~MASK_V8PLUS;
1637 /* Don't use stack biasing in 32-bit mode. */
1638 if (TARGET_ARCH32)
1639 target_flags &= ~MASK_STACK_BIAS;
1641 /* Use LRA instead of reload, unless otherwise instructed. */
1642 if (!(target_flags_explicit & MASK_LRA))
1643 target_flags |= MASK_LRA;
1645 /* Enable the back-to-back store errata workaround for LEON3FT. */
1646 if (sparc_fix_ut699 || sparc_fix_ut700 || sparc_fix_gr712rc)
1647 sparc_fix_b2bst = 1;
1649 /* Disable FsMULd for the UT699 since it doesn't work correctly. */
1650 if (sparc_fix_ut699)
1651 target_flags &= ~MASK_FSMULD;
1653 /* Supply a default value for align_functions. */
1654 if (align_functions == 0)
1656 if (sparc_cpu == PROCESSOR_ULTRASPARC
1657 || sparc_cpu == PROCESSOR_ULTRASPARC3
1658 || sparc_cpu == PROCESSOR_NIAGARA
1659 || sparc_cpu == PROCESSOR_NIAGARA2
1660 || sparc_cpu == PROCESSOR_NIAGARA3
1661 || sparc_cpu == PROCESSOR_NIAGARA4)
1662 align_functions = 32;
1663 else if (sparc_cpu == PROCESSOR_NIAGARA7
1664 || sparc_cpu == PROCESSOR_M8)
1665 align_functions = 64;
1668 /* Validate PCC_STRUCT_RETURN. */
1669 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1670 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1672 /* Only use .uaxword when compiling for a 64-bit target. */
1673 if (!TARGET_ARCH64)
1674 targetm.asm_out.unaligned_op.di = NULL;
1676 /* Do various machine dependent initializations. */
1677 sparc_init_modes ();
1679 /* Set up function hooks. */
1680 init_machine_status = sparc_init_machine_status;
1682 switch (sparc_cpu)
1684 case PROCESSOR_V7:
1685 case PROCESSOR_CYPRESS:
1686 sparc_costs = &cypress_costs;
1687 break;
1688 case PROCESSOR_V8:
1689 case PROCESSOR_SPARCLITE:
1690 case PROCESSOR_SUPERSPARC:
1691 sparc_costs = &supersparc_costs;
1692 break;
1693 case PROCESSOR_F930:
1694 case PROCESSOR_F934:
1695 case PROCESSOR_HYPERSPARC:
1696 case PROCESSOR_SPARCLITE86X:
1697 sparc_costs = &hypersparc_costs;
1698 break;
1699 case PROCESSOR_LEON:
1700 sparc_costs = &leon_costs;
1701 break;
1702 case PROCESSOR_LEON3:
1703 case PROCESSOR_LEON3V7:
1704 sparc_costs = &leon3_costs;
1705 break;
1706 case PROCESSOR_SPARCLET:
1707 case PROCESSOR_TSC701:
1708 sparc_costs = &sparclet_costs;
1709 break;
1710 case PROCESSOR_V9:
1711 case PROCESSOR_ULTRASPARC:
1712 sparc_costs = &ultrasparc_costs;
1713 break;
1714 case PROCESSOR_ULTRASPARC3:
1715 sparc_costs = &ultrasparc3_costs;
1716 break;
1717 case PROCESSOR_NIAGARA:
1718 sparc_costs = &niagara_costs;
1719 break;
1720 case PROCESSOR_NIAGARA2:
1721 sparc_costs = &niagara2_costs;
1722 break;
1723 case PROCESSOR_NIAGARA3:
1724 sparc_costs = &niagara3_costs;
1725 break;
1726 case PROCESSOR_NIAGARA4:
1727 sparc_costs = &niagara4_costs;
1728 break;
1729 case PROCESSOR_NIAGARA7:
1730 sparc_costs = &niagara7_costs;
1731 break;
1732 case PROCESSOR_M8:
1733 sparc_costs = &m8_costs;
1734 break;
1735 case PROCESSOR_NATIVE:
1736 gcc_unreachable ();
1739 if (sparc_memory_model == SMM_DEFAULT)
1741 /* Choose the memory model for the operating system. */
1742 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1743 if (os_default != SMM_DEFAULT)
1744 sparc_memory_model = os_default;
1745 /* Choose the most relaxed model for the processor. */
1746 else if (TARGET_V9)
1747 sparc_memory_model = SMM_RMO;
1748 else if (TARGET_LEON3)
1749 sparc_memory_model = SMM_TSO;
1750 else if (TARGET_LEON)
1751 sparc_memory_model = SMM_SC;
1752 else if (TARGET_V8)
1753 sparc_memory_model = SMM_PSO;
1754 else
1755 sparc_memory_model = SMM_SC;
1758 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1759 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1760 target_flags |= MASK_LONG_DOUBLE_128;
1761 #endif
1763 if (TARGET_DEBUG_OPTIONS)
1764 dump_target_flags ("Final target_flags", target_flags);
1766 /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
1767 can run at the same time. More important, it is the threshold
1768 defining when additional prefetches will be dropped by the
1769 hardware.
1771 The UltraSPARC-III features a documented prefetch queue with a
1772 size of 8. Additional prefetches issued in the cpu are
1773 dropped.
1775 Niagara processors are different. In these processors prefetches
1776 are handled much like regular loads. The L1 miss buffer is 32
1777 entries, but prefetches start getting affected when 30 entries
1778 become occupied. That occupation could be a mix of regular loads
1779 and prefetches though. And that buffer is shared by all threads.
1780 Once the threshold is reached, if the core is running a single
1781 thread the prefetch will retry. If more than one thread is
1782 running, the prefetch will be dropped.
1784 All this makes it very difficult to determine how many
1785 simultaneous prefetches can be issued simultaneously, even in a
1786 single-threaded program. Experimental results show that setting
1787 this parameter to 32 works well when the number of threads is not
1788 high. */
1789 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
1790 ((sparc_cpu == PROCESSOR_ULTRASPARC
1791 || sparc_cpu == PROCESSOR_NIAGARA
1792 || sparc_cpu == PROCESSOR_NIAGARA2
1793 || sparc_cpu == PROCESSOR_NIAGARA3
1794 || sparc_cpu == PROCESSOR_NIAGARA4)
1796 : (sparc_cpu == PROCESSOR_ULTRASPARC3
1797 ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
1798 || sparc_cpu == PROCESSOR_M8)
1799 ? 32 : 3))),
1800 global_options.x_param_values,
1801 global_options_set.x_param_values);
1803 /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
1804 bytes.
1806 The Oracle SPARC Architecture (previously the UltraSPARC
1807 Architecture) specification states that when a PREFETCH[A]
1808 instruction is executed an implementation-specific amount of data
1809 is prefetched, and that it is at least 64 bytes long (aligned to
1810 at least 64 bytes).
1812 However, this is not correct. The M7 (and implementations prior
1813 to that) does not guarantee a 64B prefetch into a cache if the
1814 line size is smaller. A single cache line is all that is ever
1815 prefetched. So for the M7, where the L1D$ has 32B lines and the
1816 L2D$ and L3 have 64B lines, a prefetch will prefetch 64B into the
1817 L2 and L3, but only 32B are brought into the L1D$. (Assuming it
1818 is a read_n prefetch, which is the only type which allocates to
1819 the L1.) */
1820 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
1821 (sparc_cpu == PROCESSOR_M8
1822 ? 64 : 32),
1823 global_options.x_param_values,
1824 global_options_set.x_param_values);
1826 /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
1827 Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
1828 Niagara processors feature a L1D$ of 16KB. */
1829 maybe_set_param_value (PARAM_L1_CACHE_SIZE,
1830 ((sparc_cpu == PROCESSOR_ULTRASPARC
1831 || sparc_cpu == PROCESSOR_ULTRASPARC3
1832 || sparc_cpu == PROCESSOR_NIAGARA
1833 || sparc_cpu == PROCESSOR_NIAGARA2
1834 || sparc_cpu == PROCESSOR_NIAGARA3
1835 || sparc_cpu == PROCESSOR_NIAGARA4
1836 || sparc_cpu == PROCESSOR_NIAGARA7
1837 || sparc_cpu == PROCESSOR_M8)
1838 ? 16 : 64),
1839 global_options.x_param_values,
1840 global_options_set.x_param_values);
1843 /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
1844 that 512 is the default in params.def. */
1845 maybe_set_param_value (PARAM_L2_CACHE_SIZE,
1846 ((sparc_cpu == PROCESSOR_NIAGARA4
1847 || sparc_cpu == PROCESSOR_M8)
1848 ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
1849 ? 256 : 512)),
1850 global_options.x_param_values,
1851 global_options_set.x_param_values);
1854 /* Disable save slot sharing for call-clobbered registers by default.
1855 The IRA sharing algorithm works on single registers only and this
1856 pessimizes for double floating-point registers. */
1857 if (!global_options_set.x_flag_ira_share_save_slots)
1858 flag_ira_share_save_slots = 0;
1860 /* Only enable REE by default in 64-bit mode where it helps to eliminate
1861 redundant 32-to-64-bit extensions. */
1862 if (!global_options_set.x_flag_ree && TARGET_ARCH32)
1863 flag_ree = 0;
1866 /* Miscellaneous utilities. */
1868 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1869 or branch on register contents instructions. */
1872 v9_regcmp_p (enum rtx_code code)
1874 return (code == EQ || code == NE || code == GE || code == LT
1875 || code == LE || code == GT);
1878 /* Nonzero if OP is a floating point constant which can
1879 be loaded into an integer register using a single
1880 sethi instruction. */
1883 fp_sethi_p (rtx op)
1885 if (GET_CODE (op) == CONST_DOUBLE)
1887 long i;
1889 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
1890 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1893 return 0;
1896 /* Nonzero if OP is a floating point constant which can
1897 be loaded into an integer register using a single
1898 mov instruction. */
1901 fp_mov_p (rtx op)
1903 if (GET_CODE (op) == CONST_DOUBLE)
1905 long i;
1907 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
1908 return SPARC_SIMM13_P (i);
1911 return 0;
1914 /* Nonzero if OP is a floating point constant which can
1915 be loaded into an integer register using a high/losum
1916 instruction sequence. */
1919 fp_high_losum_p (rtx op)
1921 /* The constraints calling this should only be in
1922 SFmode move insns, so any constant which cannot
1923 be moved using a single insn will do. */
1924 if (GET_CODE (op) == CONST_DOUBLE)
1926 long i;
1928 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), i);
1929 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1932 return 0;
1935 /* Return true if the address of LABEL can be loaded by means of the
1936 mov{si,di}_pic_label_ref patterns in PIC mode. */
1938 static bool
1939 can_use_mov_pic_label_ref (rtx label)
1941 /* VxWorks does not impose a fixed gap between segments; the run-time
1942 gap can be different from the object-file gap. We therefore can't
1943 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1944 are absolutely sure that X is in the same segment as the GOT.
1945 Unfortunately, the flexibility of linker scripts means that we
1946 can't be sure of that in general, so assume that GOT-relative
1947 accesses are never valid on VxWorks. */
1948 if (TARGET_VXWORKS_RTP)
1949 return false;
1951 /* Similarly, if the label is non-local, it might end up being placed
1952 in a different section than the current one; now mov_pic_label_ref
1953 requires the label and the code to be in the same section. */
1954 if (LABEL_REF_NONLOCAL_P (label))
1955 return false;
1957 /* Finally, if we are reordering basic blocks and partition into hot
1958 and cold sections, this might happen for any label. */
1959 if (flag_reorder_blocks_and_partition)
1960 return false;
1962 return true;
1965 /* Expand a move instruction. Return true if all work is done. */
1967 bool
1968 sparc_expand_move (machine_mode mode, rtx *operands)
1970 /* Handle sets of MEM first. */
1971 if (GET_CODE (operands[0]) == MEM)
1973 /* 0 is a register (or a pair of registers) on SPARC. */
1974 if (register_or_zero_operand (operands[1], mode))
1975 return false;
1977 if (!reload_in_progress)
1979 operands[0] = validize_mem (operands[0]);
1980 operands[1] = force_reg (mode, operands[1]);
1984 /* Fixup TLS cases. */
1985 if (TARGET_HAVE_TLS
1986 && CONSTANT_P (operands[1])
1987 && sparc_tls_referenced_p (operands [1]))
1989 operands[1] = sparc_legitimize_tls_address (operands[1]);
1990 return false;
1993 /* Fixup PIC cases. */
1994 if (flag_pic && CONSTANT_P (operands[1]))
1996 if (pic_address_needs_scratch (operands[1]))
1997 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1999 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
2000 if (GET_CODE (operands[1]) == LABEL_REF
2001 && can_use_mov_pic_label_ref (operands[1]))
2003 if (mode == SImode)
2005 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
2006 return true;
2009 if (mode == DImode)
2011 gcc_assert (TARGET_ARCH64);
2012 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
2013 return true;
2017 if (symbolic_operand (operands[1], mode))
2019 operands[1]
2020 = sparc_legitimize_pic_address (operands[1],
2021 reload_in_progress
2022 ? operands[0] : NULL_RTX);
2023 return false;
2027 /* If we are trying to toss an integer constant into FP registers,
2028 or loading a FP or vector constant, force it into memory. */
2029 if (CONSTANT_P (operands[1])
2030 && REG_P (operands[0])
2031 && (SPARC_FP_REG_P (REGNO (operands[0]))
2032 || SCALAR_FLOAT_MODE_P (mode)
2033 || VECTOR_MODE_P (mode)))
2035 /* emit_group_store will send such bogosity to us when it is
2036 not storing directly into memory. So fix this up to avoid
2037 crashes in output_constant_pool. */
2038 if (operands [1] == const0_rtx)
2039 operands[1] = CONST0_RTX (mode);
2041 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
2042 always other regs. */
2043 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
2044 && (const_zero_operand (operands[1], mode)
2045 || const_all_ones_operand (operands[1], mode)))
2046 return false;
2048 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
2049 /* We are able to build any SF constant in integer registers
2050 with at most 2 instructions. */
2051 && (mode == SFmode
2052 /* And any DF constant in integer registers if needed. */
2053 || (mode == DFmode && !can_create_pseudo_p ())))
2054 return false;
2056 operands[1] = force_const_mem (mode, operands[1]);
2057 if (!reload_in_progress)
2058 operands[1] = validize_mem (operands[1]);
2059 return false;
2062 /* Accept non-constants and valid constants unmodified. */
2063 if (!CONSTANT_P (operands[1])
2064 || GET_CODE (operands[1]) == HIGH
2065 || input_operand (operands[1], mode))
2066 return false;
2068 switch (mode)
2070 case QImode:
2071 /* All QImode constants require only one insn, so proceed. */
2072 break;
2074 case HImode:
2075 case SImode:
2076 sparc_emit_set_const32 (operands[0], operands[1]);
2077 return true;
2079 case DImode:
2080 /* input_operand should have filtered out 32-bit mode. */
2081 sparc_emit_set_const64 (operands[0], operands[1]);
2082 return true;
2084 case TImode:
2086 rtx high, low;
2087 /* TImode isn't available in 32-bit mode. */
2088 split_double (operands[1], &high, &low);
2089 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
2090 high));
2091 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
2092 low));
2094 return true;
2096 default:
2097 gcc_unreachable ();
2100 return false;
2103 /* Load OP1, a 32-bit constant, into OP0, a register.
2104 We know it can't be done in one insn when we get
2105 here, the move expander guarantees this. */
2107 static void
2108 sparc_emit_set_const32 (rtx op0, rtx op1)
2110 machine_mode mode = GET_MODE (op0);
2111 rtx temp = op0;
2113 if (can_create_pseudo_p ())
2114 temp = gen_reg_rtx (mode);
2116 if (GET_CODE (op1) == CONST_INT)
2118 gcc_assert (!small_int_operand (op1, mode)
2119 && !const_high_operand (op1, mode));
2121 /* Emit them as real moves instead of a HIGH/LO_SUM,
2122 this way CSE can see everything and reuse intermediate
2123 values if it wants. */
2124 emit_insn (gen_rtx_SET (temp, GEN_INT (INTVAL (op1)
2125 & ~(HOST_WIDE_INT) 0x3ff)));
2127 emit_insn (gen_rtx_SET (op0,
2128 gen_rtx_IOR (mode, temp,
2129 GEN_INT (INTVAL (op1) & 0x3ff))));
2131 else
2133 /* A symbol, emit in the traditional way. */
2134 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, op1)));
2135 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (mode, temp, op1)));
2139 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
2140 If TEMP is nonzero, we are forbidden to use any other scratch
2141 registers. Otherwise, we are allowed to generate them as needed.
2143 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
2144 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
2146 void
2147 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
2149 rtx temp1, temp2, temp3, temp4, temp5;
2150 rtx ti_temp = 0;
2152 if (temp && GET_MODE (temp) == TImode)
2154 ti_temp = temp;
2155 temp = gen_rtx_REG (DImode, REGNO (temp));
2158 /* SPARC-V9 code-model support. */
2159 switch (sparc_cmodel)
2161 case CM_MEDLOW:
2162 /* The range spanned by all instructions in the object is less
2163 than 2^31 bytes (2GB) and the distance from any instruction
2164 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2165 than 2^31 bytes (2GB).
2167 The executable must be in the low 4TB of the virtual address
2168 space.
2170 sethi %hi(symbol), %temp1
2171 or %temp1, %lo(symbol), %reg */
2172 if (temp)
2173 temp1 = temp; /* op0 is allowed. */
2174 else
2175 temp1 = gen_reg_rtx (DImode);
2177 emit_insn (gen_rtx_SET (temp1, gen_rtx_HIGH (DImode, op1)));
2178 emit_insn (gen_rtx_SET (op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
2179 break;
2181 case CM_MEDMID:
2182 /* The range spanned by all instructions in the object is less
2183 than 2^31 bytes (2GB) and the distance from any instruction
2184 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2185 than 2^31 bytes (2GB).
2187 The executable must be in the low 16TB of the virtual address
2188 space.
2190 sethi %h44(symbol), %temp1
2191 or %temp1, %m44(symbol), %temp2
2192 sllx %temp2, 12, %temp3
2193 or %temp3, %l44(symbol), %reg */
2194 if (temp)
2196 temp1 = op0;
2197 temp2 = op0;
2198 temp3 = temp; /* op0 is allowed. */
2200 else
2202 temp1 = gen_reg_rtx (DImode);
2203 temp2 = gen_reg_rtx (DImode);
2204 temp3 = gen_reg_rtx (DImode);
2207 emit_insn (gen_seth44 (temp1, op1));
2208 emit_insn (gen_setm44 (temp2, temp1, op1));
2209 emit_insn (gen_rtx_SET (temp3,
2210 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
2211 emit_insn (gen_setl44 (op0, temp3, op1));
2212 break;
2214 case CM_MEDANY:
2215 /* The range spanned by all instructions in the object is less
2216 than 2^31 bytes (2GB) and the distance from any instruction
2217 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
2218 than 2^31 bytes (2GB).
2220 The executable can be placed anywhere in the virtual address
2221 space.
2223 sethi %hh(symbol), %temp1
2224 sethi %lm(symbol), %temp2
2225 or %temp1, %hm(symbol), %temp3
2226 sllx %temp3, 32, %temp4
2227 or %temp4, %temp2, %temp5
2228 or %temp5, %lo(symbol), %reg */
2229 if (temp)
2231 /* It is possible that one of the registers we got for operands[2]
2232 might coincide with that of operands[0] (which is why we made
2233 it TImode). Pick the other one to use as our scratch. */
2234 if (rtx_equal_p (temp, op0))
2236 gcc_assert (ti_temp);
2237 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2239 temp1 = op0;
2240 temp2 = temp; /* op0 is _not_ allowed, see above. */
2241 temp3 = op0;
2242 temp4 = op0;
2243 temp5 = op0;
2245 else
2247 temp1 = gen_reg_rtx (DImode);
2248 temp2 = gen_reg_rtx (DImode);
2249 temp3 = gen_reg_rtx (DImode);
2250 temp4 = gen_reg_rtx (DImode);
2251 temp5 = gen_reg_rtx (DImode);
2254 emit_insn (gen_sethh (temp1, op1));
2255 emit_insn (gen_setlm (temp2, op1));
2256 emit_insn (gen_sethm (temp3, temp1, op1));
2257 emit_insn (gen_rtx_SET (temp4,
2258 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2259 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2260 emit_insn (gen_setlo (op0, temp5, op1));
2261 break;
2263 case CM_EMBMEDANY:
2264 /* Old old old backwards compatibility kruft here.
2265 Essentially it is MEDLOW with a fixed 64-bit
2266 virtual base added to all data segment addresses.
2267 Text-segment stuff is computed like MEDANY, we can't
2268 reuse the code above because the relocation knobs
2269 look different.
2271 Data segment: sethi %hi(symbol), %temp1
2272 add %temp1, EMBMEDANY_BASE_REG, %temp2
2273 or %temp2, %lo(symbol), %reg */
2274 if (data_segment_operand (op1, GET_MODE (op1)))
2276 if (temp)
2278 temp1 = temp; /* op0 is allowed. */
2279 temp2 = op0;
2281 else
2283 temp1 = gen_reg_rtx (DImode);
2284 temp2 = gen_reg_rtx (DImode);
2287 emit_insn (gen_embmedany_sethi (temp1, op1));
2288 emit_insn (gen_embmedany_brsum (temp2, temp1));
2289 emit_insn (gen_embmedany_losum (op0, temp2, op1));
2292 /* Text segment: sethi %uhi(symbol), %temp1
2293 sethi %hi(symbol), %temp2
2294 or %temp1, %ulo(symbol), %temp3
2295 sllx %temp3, 32, %temp4
2296 or %temp4, %temp2, %temp5
2297 or %temp5, %lo(symbol), %reg */
2298 else
2300 if (temp)
2302 /* It is possible that one of the registers we got for operands[2]
2303 might coincide with that of operands[0] (which is why we made
2304 it TImode). Pick the other one to use as our scratch. */
2305 if (rtx_equal_p (temp, op0))
2307 gcc_assert (ti_temp);
2308 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
2310 temp1 = op0;
2311 temp2 = temp; /* op0 is _not_ allowed, see above. */
2312 temp3 = op0;
2313 temp4 = op0;
2314 temp5 = op0;
2316 else
2318 temp1 = gen_reg_rtx (DImode);
2319 temp2 = gen_reg_rtx (DImode);
2320 temp3 = gen_reg_rtx (DImode);
2321 temp4 = gen_reg_rtx (DImode);
2322 temp5 = gen_reg_rtx (DImode);
2325 emit_insn (gen_embmedany_textuhi (temp1, op1));
2326 emit_insn (gen_embmedany_texthi (temp2, op1));
2327 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
2328 emit_insn (gen_rtx_SET (temp4,
2329 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
2330 emit_insn (gen_rtx_SET (temp5, gen_rtx_PLUS (DImode, temp4, temp2)));
2331 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
2333 break;
2335 default:
2336 gcc_unreachable ();
2340 /* These avoid problems when cross compiling. If we do not
2341 go through all this hair then the optimizer will see
2342 invalid REG_EQUAL notes or in some cases none at all. */
2343 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2344 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2345 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2346 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2348 /* The optimizer is not to assume anything about exactly
2349 which bits are set for a HIGH, they are unspecified.
2350 Unfortunately this leads to many missed optimizations
2351 during CSE. We mask out the non-HIGH bits, and matches
2352 a plain movdi, to alleviate this problem. */
2353 static rtx
2354 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2356 return gen_rtx_SET (dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2359 static rtx
2360 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2362 return gen_rtx_SET (dest, GEN_INT (val));
2365 static rtx
2366 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2368 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2371 static rtx
2372 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2374 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2377 /* Worker routines for 64-bit constant formation on arch64.
2378 One of the key things to be doing in these emissions is
2379 to create as many temp REGs as possible. This makes it
2380 possible for half-built constants to be used later when
2381 such values are similar to something required later on.
2382 Without doing this, the optimizer cannot see such
2383 opportunities. */
2385 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2386 unsigned HOST_WIDE_INT, int);
2388 static void
2389 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2390 unsigned HOST_WIDE_INT low_bits, int is_neg)
2392 unsigned HOST_WIDE_INT high_bits;
2394 if (is_neg)
2395 high_bits = (~low_bits) & 0xffffffff;
2396 else
2397 high_bits = low_bits;
2399 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2400 if (!is_neg)
2402 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2404 else
2406 /* If we are XOR'ing with -1, then we should emit a one's complement
2407 instead. This way the combiner will notice logical operations
2408 such as ANDN later on and substitute. */
2409 if ((low_bits & 0x3ff) == 0x3ff)
2411 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2413 else
2415 emit_insn (gen_rtx_SET (op0,
2416 gen_safe_XOR64 (temp,
2417 (-(HOST_WIDE_INT)0x400
2418 | (low_bits & 0x3ff)))));
2423 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2424 unsigned HOST_WIDE_INT, int);
2426 static void
2427 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2428 unsigned HOST_WIDE_INT high_bits,
2429 unsigned HOST_WIDE_INT low_immediate,
2430 int shift_count)
2432 rtx temp2 = op0;
2434 if ((high_bits & 0xfffffc00) != 0)
2436 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2437 if ((high_bits & ~0xfffffc00) != 0)
2438 emit_insn (gen_rtx_SET (op0,
2439 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2440 else
2441 temp2 = temp;
2443 else
2445 emit_insn (gen_safe_SET64 (temp, high_bits));
2446 temp2 = temp;
2449 /* Now shift it up into place. */
2450 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp2,
2451 GEN_INT (shift_count))));
2453 /* If there is a low immediate part piece, finish up by
2454 putting that in as well. */
2455 if (low_immediate != 0)
2456 emit_insn (gen_rtx_SET (op0, gen_safe_OR64 (op0, low_immediate)));
2459 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2460 unsigned HOST_WIDE_INT);
2462 /* Full 64-bit constant decomposition. Even though this is the
2463 'worst' case, we still optimize a few things away. */
2464 static void
2465 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2466 unsigned HOST_WIDE_INT high_bits,
2467 unsigned HOST_WIDE_INT low_bits)
2469 rtx sub_temp = op0;
2471 if (can_create_pseudo_p ())
2472 sub_temp = gen_reg_rtx (DImode);
2474 if ((high_bits & 0xfffffc00) != 0)
2476 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2477 if ((high_bits & ~0xfffffc00) != 0)
2478 emit_insn (gen_rtx_SET (sub_temp,
2479 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2480 else
2481 sub_temp = temp;
2483 else
2485 emit_insn (gen_safe_SET64 (temp, high_bits));
2486 sub_temp = temp;
2489 if (can_create_pseudo_p ())
2491 rtx temp2 = gen_reg_rtx (DImode);
2492 rtx temp3 = gen_reg_rtx (DImode);
2493 rtx temp4 = gen_reg_rtx (DImode);
2495 emit_insn (gen_rtx_SET (temp4, gen_rtx_ASHIFT (DImode, sub_temp,
2496 GEN_INT (32))));
2498 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2499 if ((low_bits & ~0xfffffc00) != 0)
2501 emit_insn (gen_rtx_SET (temp3,
2502 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2503 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp3)));
2505 else
2507 emit_insn (gen_rtx_SET (op0, gen_rtx_PLUS (DImode, temp4, temp2)));
2510 else
2512 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2513 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2514 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2515 int to_shift = 12;
2517 /* We are in the middle of reload, so this is really
2518 painful. However we do still make an attempt to
2519 avoid emitting truly stupid code. */
2520 if (low1 != const0_rtx)
2522 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2523 GEN_INT (to_shift))));
2524 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low1)));
2525 sub_temp = op0;
2526 to_shift = 12;
2528 else
2530 to_shift += 12;
2532 if (low2 != const0_rtx)
2534 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2535 GEN_INT (to_shift))));
2536 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low2)));
2537 sub_temp = op0;
2538 to_shift = 8;
2540 else
2542 to_shift += 8;
2544 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, sub_temp,
2545 GEN_INT (to_shift))));
2546 if (low3 != const0_rtx)
2547 emit_insn (gen_rtx_SET (op0, gen_rtx_IOR (DImode, op0, low3)));
2548 /* phew... */
2552 /* Analyze a 64-bit constant for certain properties. */
2553 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2554 unsigned HOST_WIDE_INT,
2555 int *, int *, int *);
2557 static void
2558 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2559 unsigned HOST_WIDE_INT low_bits,
2560 int *hbsp, int *lbsp, int *abbasp)
2562 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2563 int i;
2565 lowest_bit_set = highest_bit_set = -1;
2566 i = 0;
2569 if ((lowest_bit_set == -1)
2570 && ((low_bits >> i) & 1))
2571 lowest_bit_set = i;
2572 if ((highest_bit_set == -1)
2573 && ((high_bits >> (32 - i - 1)) & 1))
2574 highest_bit_set = (64 - i - 1);
2576 while (++i < 32
2577 && ((highest_bit_set == -1)
2578 || (lowest_bit_set == -1)));
2579 if (i == 32)
2581 i = 0;
2584 if ((lowest_bit_set == -1)
2585 && ((high_bits >> i) & 1))
2586 lowest_bit_set = i + 32;
2587 if ((highest_bit_set == -1)
2588 && ((low_bits >> (32 - i - 1)) & 1))
2589 highest_bit_set = 32 - i - 1;
2591 while (++i < 32
2592 && ((highest_bit_set == -1)
2593 || (lowest_bit_set == -1)));
2595 /* If there are no bits set this should have gone out
2596 as one instruction! */
2597 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2598 all_bits_between_are_set = 1;
2599 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2601 if (i < 32)
2603 if ((low_bits & (1 << i)) != 0)
2604 continue;
2606 else
2608 if ((high_bits & (1 << (i - 32))) != 0)
2609 continue;
2611 all_bits_between_are_set = 0;
2612 break;
2614 *hbsp = highest_bit_set;
2615 *lbsp = lowest_bit_set;
2616 *abbasp = all_bits_between_are_set;
2619 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2621 static int
2622 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2623 unsigned HOST_WIDE_INT low_bits)
2625 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2627 if (high_bits == 0
2628 || high_bits == 0xffffffff)
2629 return 1;
2631 analyze_64bit_constant (high_bits, low_bits,
2632 &highest_bit_set, &lowest_bit_set,
2633 &all_bits_between_are_set);
2635 if ((highest_bit_set == 63
2636 || lowest_bit_set == 0)
2637 && all_bits_between_are_set != 0)
2638 return 1;
2640 if ((highest_bit_set - lowest_bit_set) < 21)
2641 return 1;
2643 return 0;
2646 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2647 unsigned HOST_WIDE_INT,
2648 int, int);
2650 static unsigned HOST_WIDE_INT
2651 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2652 unsigned HOST_WIDE_INT low_bits,
2653 int lowest_bit_set, int shift)
2655 HOST_WIDE_INT hi, lo;
2657 if (lowest_bit_set < 32)
2659 lo = (low_bits >> lowest_bit_set) << shift;
2660 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2662 else
2664 lo = 0;
2665 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2667 gcc_assert (! (hi & lo));
2668 return (hi | lo);
2671 /* Here we are sure to be arch64 and this is an integer constant
2672 being loaded into a register. Emit the most efficient
2673 insn sequence possible. Detection of all the 1-insn cases
2674 has been done already. */
2675 static void
2676 sparc_emit_set_const64 (rtx op0, rtx op1)
2678 unsigned HOST_WIDE_INT high_bits, low_bits;
2679 int lowest_bit_set, highest_bit_set;
2680 int all_bits_between_are_set;
2681 rtx temp = 0;
2683 /* Sanity check that we know what we are working with. */
2684 gcc_assert (TARGET_ARCH64
2685 && (GET_CODE (op0) == SUBREG
2686 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2688 if (! can_create_pseudo_p ())
2689 temp = op0;
2691 if (GET_CODE (op1) != CONST_INT)
2693 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2694 return;
2697 if (! temp)
2698 temp = gen_reg_rtx (DImode);
2700 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2701 low_bits = (INTVAL (op1) & 0xffffffff);
2703 /* low_bits bits 0 --> 31
2704 high_bits bits 32 --> 63 */
2706 analyze_64bit_constant (high_bits, low_bits,
2707 &highest_bit_set, &lowest_bit_set,
2708 &all_bits_between_are_set);
2710 /* First try for a 2-insn sequence. */
2712 /* These situations are preferred because the optimizer can
2713 * do more things with them:
2714 * 1) mov -1, %reg
2715 * sllx %reg, shift, %reg
2716 * 2) mov -1, %reg
2717 * srlx %reg, shift, %reg
2718 * 3) mov some_small_const, %reg
2719 * sllx %reg, shift, %reg
2721 if (((highest_bit_set == 63
2722 || lowest_bit_set == 0)
2723 && all_bits_between_are_set != 0)
2724 || ((highest_bit_set - lowest_bit_set) < 12))
2726 HOST_WIDE_INT the_const = -1;
2727 int shift = lowest_bit_set;
2729 if ((highest_bit_set != 63
2730 && lowest_bit_set != 0)
2731 || all_bits_between_are_set == 0)
2733 the_const =
2734 create_simple_focus_bits (high_bits, low_bits,
2735 lowest_bit_set, 0);
2737 else if (lowest_bit_set == 0)
2738 shift = -(63 - highest_bit_set);
2740 gcc_assert (SPARC_SIMM13_P (the_const));
2741 gcc_assert (shift != 0);
2743 emit_insn (gen_safe_SET64 (temp, the_const));
2744 if (shift > 0)
2745 emit_insn (gen_rtx_SET (op0, gen_rtx_ASHIFT (DImode, temp,
2746 GEN_INT (shift))));
2747 else if (shift < 0)
2748 emit_insn (gen_rtx_SET (op0, gen_rtx_LSHIFTRT (DImode, temp,
2749 GEN_INT (-shift))));
2750 return;
2753 /* Now a range of 22 or less bits set somewhere.
2754 * 1) sethi %hi(focus_bits), %reg
2755 * sllx %reg, shift, %reg
2756 * 2) sethi %hi(focus_bits), %reg
2757 * srlx %reg, shift, %reg
2759 if ((highest_bit_set - lowest_bit_set) < 21)
2761 unsigned HOST_WIDE_INT focus_bits =
2762 create_simple_focus_bits (high_bits, low_bits,
2763 lowest_bit_set, 10);
2765 gcc_assert (SPARC_SETHI_P (focus_bits));
2766 gcc_assert (lowest_bit_set != 10);
2768 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
2770 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2771 if (lowest_bit_set < 10)
2772 emit_insn (gen_rtx_SET (op0,
2773 gen_rtx_LSHIFTRT (DImode, temp,
2774 GEN_INT (10 - lowest_bit_set))));
2775 else if (lowest_bit_set > 10)
2776 emit_insn (gen_rtx_SET (op0,
2777 gen_rtx_ASHIFT (DImode, temp,
2778 GEN_INT (lowest_bit_set - 10))));
2779 return;
2782 /* 1) sethi %hi(low_bits), %reg
2783 * or %reg, %lo(low_bits), %reg
2784 * 2) sethi %hi(~low_bits), %reg
2785 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2787 if (high_bits == 0
2788 || high_bits == 0xffffffff)
2790 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2791 (high_bits == 0xffffffff));
2792 return;
2795 /* Now, try 3-insn sequences. */
2797 /* 1) sethi %hi(high_bits), %reg
2798 * or %reg, %lo(high_bits), %reg
2799 * sllx %reg, 32, %reg
2801 if (low_bits == 0)
2803 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2804 return;
2807 /* We may be able to do something quick
2808 when the constant is negated, so try that. */
2809 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2810 (~low_bits) & 0xfffffc00))
2812 /* NOTE: The trailing bits get XOR'd so we need the
2813 non-negated bits, not the negated ones. */
2814 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2816 if ((((~high_bits) & 0xffffffff) == 0
2817 && ((~low_bits) & 0x80000000) == 0)
2818 || (((~high_bits) & 0xffffffff) == 0xffffffff
2819 && ((~low_bits) & 0x80000000) != 0))
2821 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
2823 if ((SPARC_SETHI_P (fast_int)
2824 && (~high_bits & 0xffffffff) == 0)
2825 || SPARC_SIMM13_P (fast_int))
2826 emit_insn (gen_safe_SET64 (temp, fast_int));
2827 else
2828 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2830 else
2832 rtx negated_const;
2833 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2834 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2835 sparc_emit_set_const64 (temp, negated_const);
2838 /* If we are XOR'ing with -1, then we should emit a one's complement
2839 instead. This way the combiner will notice logical operations
2840 such as ANDN later on and substitute. */
2841 if (trailing_bits == 0x3ff)
2843 emit_insn (gen_rtx_SET (op0, gen_rtx_NOT (DImode, temp)));
2845 else
2847 emit_insn (gen_rtx_SET (op0,
2848 gen_safe_XOR64 (temp,
2849 (-0x400 | trailing_bits))));
2851 return;
2854 /* 1) sethi %hi(xxx), %reg
2855 * or %reg, %lo(xxx), %reg
2856 * sllx %reg, yyy, %reg
2858 * ??? This is just a generalized version of the low_bits==0
2859 * thing above, FIXME...
2861 if ((highest_bit_set - lowest_bit_set) < 32)
2863 unsigned HOST_WIDE_INT focus_bits =
2864 create_simple_focus_bits (high_bits, low_bits,
2865 lowest_bit_set, 0);
2867 /* We can't get here in this state. */
2868 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2870 /* So what we know is that the set bits straddle the
2871 middle of the 64-bit word. */
2872 sparc_emit_set_const64_quick2 (op0, temp,
2873 focus_bits, 0,
2874 lowest_bit_set);
2875 return;
2878 /* 1) sethi %hi(high_bits), %reg
2879 * or %reg, %lo(high_bits), %reg
2880 * sllx %reg, 32, %reg
2881 * or %reg, low_bits, %reg
2883 if (SPARC_SIMM13_P (low_bits) && ((int)low_bits > 0))
2885 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2886 return;
2889 /* The easiest way when all else fails, is full decomposition. */
2890 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2893 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
2895 static bool
2896 sparc_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2898 *p1 = SPARC_ICC_REG;
2899 *p2 = SPARC_FCC_REG;
2900 return true;
2903 /* Implement TARGET_MIN_ARITHMETIC_PRECISION. */
2905 static unsigned int
2906 sparc_min_arithmetic_precision (void)
2908 return 32;
2911 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2912 return the mode to be used for the comparison. For floating-point,
2913 CCFP[E]mode is used. CCNZmode should be used when the first operand
2914 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2915 processing is needed. */
2917 machine_mode
2918 select_cc_mode (enum rtx_code op, rtx x, rtx y)
2920 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2922 switch (op)
2924 case EQ:
2925 case NE:
2926 case UNORDERED:
2927 case ORDERED:
2928 case UNLT:
2929 case UNLE:
2930 case UNGT:
2931 case UNGE:
2932 case UNEQ:
2933 case LTGT:
2934 return CCFPmode;
2936 case LT:
2937 case LE:
2938 case GT:
2939 case GE:
2940 return CCFPEmode;
2942 default:
2943 gcc_unreachable ();
2946 else if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2947 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2948 && y == const0_rtx)
2950 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2951 return CCXNZmode;
2952 else
2953 return CCNZmode;
2955 else
2957 /* This is for the cmp<mode>_sne pattern. */
2958 if (GET_CODE (x) == NOT && y == constm1_rtx)
2960 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2961 return CCXCmode;
2962 else
2963 return CCCmode;
2966 /* This is for the [u]addvdi4_sp32 and [u]subvdi4_sp32 patterns. */
2967 if (!TARGET_ARCH64 && GET_MODE (x) == DImode)
2969 if (GET_CODE (y) == UNSPEC
2970 && (XINT (y, 1) == UNSPEC_ADDV
2971 || XINT (y, 1) == UNSPEC_SUBV
2972 || XINT (y, 1) == UNSPEC_NEGV))
2973 return CCVmode;
2974 else
2975 return CCCmode;
2978 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2979 return CCXmode;
2980 else
2981 return CCmode;
2985 /* Emit the compare insn and return the CC reg for a CODE comparison
2986 with operands X and Y. */
2988 static rtx
2989 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2991 machine_mode mode;
2992 rtx cc_reg;
2994 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2995 return x;
2997 mode = SELECT_CC_MODE (code, x, y);
2999 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
3000 fcc regs (cse can't tell they're really call clobbered regs and will
3001 remove a duplicate comparison even if there is an intervening function
3002 call - it will then try to reload the cc reg via an int reg which is why
3003 we need the movcc patterns). It is possible to provide the movcc
3004 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
3005 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
3006 to tell cse that CCFPE mode registers (even pseudos) are call
3007 clobbered. */
3009 /* ??? This is an experiment. Rather than making changes to cse which may
3010 or may not be easy/clean, we do our own cse. This is possible because
3011 we will generate hard registers. Cse knows they're call clobbered (it
3012 doesn't know the same thing about pseudos). If we guess wrong, no big
3013 deal, but if we win, great! */
3015 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3016 #if 1 /* experiment */
3018 int reg;
3019 /* We cycle through the registers to ensure they're all exercised. */
3020 static int next_fcc_reg = 0;
3021 /* Previous x,y for each fcc reg. */
3022 static rtx prev_args[4][2];
3024 /* Scan prev_args for x,y. */
3025 for (reg = 0; reg < 4; reg++)
3026 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
3027 break;
3028 if (reg == 4)
3030 reg = next_fcc_reg;
3031 prev_args[reg][0] = x;
3032 prev_args[reg][1] = y;
3033 next_fcc_reg = (next_fcc_reg + 1) & 3;
3035 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
3037 #else
3038 cc_reg = gen_reg_rtx (mode);
3039 #endif /* ! experiment */
3040 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
3041 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
3042 else
3043 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
3045 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
3046 will only result in an unrecognizable insn so no point in asserting. */
3047 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y)));
3049 return cc_reg;
3053 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
3056 gen_compare_reg (rtx cmp)
3058 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
3061 /* This function is used for v9 only.
3062 DEST is the target of the Scc insn.
3063 CODE is the code for an Scc's comparison.
3064 X and Y are the values we compare.
3066 This function is needed to turn
3068 (set (reg:SI 110)
3069 (gt (reg:CCX 100 %icc)
3070 (const_int 0)))
3071 into
3072 (set (reg:SI 110)
3073 (gt:DI (reg:CCX 100 %icc)
3074 (const_int 0)))
3076 IE: The instruction recognizer needs to see the mode of the comparison to
3077 find the right instruction. We could use "gt:DI" right in the
3078 define_expand, but leaving it out allows us to handle DI, SI, etc. */
3080 static int
3081 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
3083 if (! TARGET_ARCH64
3084 && (GET_MODE (x) == DImode
3085 || GET_MODE (dest) == DImode))
3086 return 0;
3088 /* Try to use the movrCC insns. */
3089 if (TARGET_ARCH64
3090 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3091 && y == const0_rtx
3092 && v9_regcmp_p (compare_code))
3094 rtx op0 = x;
3095 rtx temp;
3097 /* Special case for op0 != 0. This can be done with one instruction if
3098 dest == x. */
3100 if (compare_code == NE
3101 && GET_MODE (dest) == DImode
3102 && rtx_equal_p (op0, dest))
3104 emit_insn (gen_rtx_SET (dest,
3105 gen_rtx_IF_THEN_ELSE (DImode,
3106 gen_rtx_fmt_ee (compare_code, DImode,
3107 op0, const0_rtx),
3108 const1_rtx,
3109 dest)));
3110 return 1;
3113 if (reg_overlap_mentioned_p (dest, op0))
3115 /* Handle the case where dest == x.
3116 We "early clobber" the result. */
3117 op0 = gen_reg_rtx (GET_MODE (x));
3118 emit_move_insn (op0, x);
3121 emit_insn (gen_rtx_SET (dest, const0_rtx));
3122 if (GET_MODE (op0) != DImode)
3124 temp = gen_reg_rtx (DImode);
3125 convert_move (temp, op0, 0);
3127 else
3128 temp = op0;
3129 emit_insn (gen_rtx_SET (dest,
3130 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3131 gen_rtx_fmt_ee (compare_code, DImode,
3132 temp, const0_rtx),
3133 const1_rtx,
3134 dest)));
3135 return 1;
3137 else
3139 x = gen_compare_reg_1 (compare_code, x, y);
3140 y = const0_rtx;
3142 emit_insn (gen_rtx_SET (dest, const0_rtx));
3143 emit_insn (gen_rtx_SET (dest,
3144 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
3145 gen_rtx_fmt_ee (compare_code,
3146 GET_MODE (x), x, y),
3147 const1_rtx, dest)));
3148 return 1;
3153 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
3154 without jumps using the addx/subx instructions. */
3156 bool
3157 emit_scc_insn (rtx operands[])
3159 rtx tem, x, y;
3160 enum rtx_code code;
3161 machine_mode mode;
3163 /* The quad-word fp compare library routines all return nonzero to indicate
3164 true, which is different from the equivalent libgcc routines, so we must
3165 handle them specially here. */
3166 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
3168 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
3169 GET_CODE (operands[1]));
3170 operands[2] = XEXP (operands[1], 0);
3171 operands[3] = XEXP (operands[1], 1);
3174 code = GET_CODE (operands[1]);
3175 x = operands[2];
3176 y = operands[3];
3177 mode = GET_MODE (x);
3179 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
3180 more applications). The exception to this is "reg != 0" which can
3181 be done in one instruction on v9 (so we do it). */
3182 if ((code == EQ || code == NE) && (mode == SImode || mode == DImode))
3184 if (y != const0_rtx)
3185 x = force_reg (mode, gen_rtx_XOR (mode, x, y));
3187 rtx pat = gen_rtx_SET (operands[0],
3188 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3189 x, const0_rtx));
3191 /* If we can use addx/subx or addxc, add a clobber for CC. */
3192 if (mode == SImode || (code == NE && TARGET_VIS3))
3194 rtx clobber
3195 = gen_rtx_CLOBBER (VOIDmode,
3196 gen_rtx_REG (mode == SImode ? CCmode : CCXmode,
3197 SPARC_ICC_REG));
3198 pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clobber));
3201 emit_insn (pat);
3202 return true;
3205 /* We can do LTU in DImode using the addxc instruction with VIS3. */
3206 if (TARGET_ARCH64
3207 && mode == DImode
3208 && !((code == LTU || code == GTU) && TARGET_VIS3)
3209 && gen_v9_scc (operands[0], code, x, y))
3210 return true;
3212 /* We can do LTU and GEU using the addx/subx instructions too. And
3213 for GTU/LEU, if both operands are registers swap them and fall
3214 back to the easy case. */
3215 if (code == GTU || code == LEU)
3217 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3218 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
3220 tem = x;
3221 x = y;
3222 y = tem;
3223 code = swap_condition (code);
3227 if (code == LTU || code == GEU)
3229 emit_insn (gen_rtx_SET (operands[0],
3230 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3231 gen_compare_reg_1 (code, x, y),
3232 const0_rtx)));
3233 return true;
3236 /* All the posibilities to use addx/subx based sequences has been
3237 exhausted, try for a 3 instruction sequence using v9 conditional
3238 moves. */
3239 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
3240 return true;
3242 /* Nope, do branches. */
3243 return false;
3246 /* Emit a conditional jump insn for the v9 architecture using comparison code
3247 CODE and jump target LABEL.
3248 This function exists to take advantage of the v9 brxx insns. */
3250 static void
3251 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
3253 emit_jump_insn (gen_rtx_SET (pc_rtx,
3254 gen_rtx_IF_THEN_ELSE (VOIDmode,
3255 gen_rtx_fmt_ee (code, GET_MODE (op0),
3256 op0, const0_rtx),
3257 gen_rtx_LABEL_REF (VOIDmode, label),
3258 pc_rtx)));
3261 /* Emit a conditional jump insn for the UA2011 architecture using
3262 comparison code CODE and jump target LABEL. This function exists
3263 to take advantage of the UA2011 Compare and Branch insns. */
3265 static void
3266 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
3268 rtx if_then_else;
3270 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
3271 gen_rtx_fmt_ee(code, GET_MODE(op0),
3272 op0, op1),
3273 gen_rtx_LABEL_REF (VOIDmode, label),
3274 pc_rtx);
3276 emit_jump_insn (gen_rtx_SET (pc_rtx, if_then_else));
3279 void
3280 emit_conditional_branch_insn (rtx operands[])
3282 /* The quad-word fp compare library routines all return nonzero to indicate
3283 true, which is different from the equivalent libgcc routines, so we must
3284 handle them specially here. */
3285 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
3287 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
3288 GET_CODE (operands[0]));
3289 operands[1] = XEXP (operands[0], 0);
3290 operands[2] = XEXP (operands[0], 1);
3293 /* If we can tell early on that the comparison is against a constant
3294 that won't fit in the 5-bit signed immediate field of a cbcond,
3295 use one of the other v9 conditional branch sequences. */
3296 if (TARGET_CBCOND
3297 && GET_CODE (operands[1]) == REG
3298 && (GET_MODE (operands[1]) == SImode
3299 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
3300 && (GET_CODE (operands[2]) != CONST_INT
3301 || SPARC_SIMM5_P (INTVAL (operands[2]))))
3303 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
3304 return;
3307 if (TARGET_ARCH64 && operands[2] == const0_rtx
3308 && GET_CODE (operands[1]) == REG
3309 && GET_MODE (operands[1]) == DImode)
3311 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
3312 return;
3315 operands[1] = gen_compare_reg (operands[0]);
3316 operands[2] = const0_rtx;
3317 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
3318 operands[1], operands[2]);
3319 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
3320 operands[3]));
3324 /* Generate a DFmode part of a hard TFmode register.
3325 REG is the TFmode hard register, LOW is 1 for the
3326 low 64bit of the register and 0 otherwise.
3329 gen_df_reg (rtx reg, int low)
3331 int regno = REGNO (reg);
3333 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3334 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3335 return gen_rtx_REG (DFmode, regno);
3338 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3339 Unlike normal calls, TFmode operands are passed by reference. It is
3340 assumed that no more than 3 operands are required. */
3342 static void
3343 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3345 rtx ret_slot = NULL, arg[3], func_sym;
3346 int i;
3348 /* We only expect to be called for conversions, unary, and binary ops. */
3349 gcc_assert (nargs == 2 || nargs == 3);
3351 for (i = 0; i < nargs; ++i)
3353 rtx this_arg = operands[i];
3354 rtx this_slot;
3356 /* TFmode arguments and return values are passed by reference. */
3357 if (GET_MODE (this_arg) == TFmode)
3359 int force_stack_temp;
3361 force_stack_temp = 0;
3362 if (TARGET_BUGGY_QP_LIB && i == 0)
3363 force_stack_temp = 1;
3365 if (GET_CODE (this_arg) == MEM
3366 && ! force_stack_temp)
3368 tree expr = MEM_EXPR (this_arg);
3369 if (expr)
3370 mark_addressable (expr);
3371 this_arg = XEXP (this_arg, 0);
3373 else if (CONSTANT_P (this_arg)
3374 && ! force_stack_temp)
3376 this_slot = force_const_mem (TFmode, this_arg);
3377 this_arg = XEXP (this_slot, 0);
3379 else
3381 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3383 /* Operand 0 is the return value. We'll copy it out later. */
3384 if (i > 0)
3385 emit_move_insn (this_slot, this_arg);
3386 else
3387 ret_slot = this_slot;
3389 this_arg = XEXP (this_slot, 0);
3393 arg[i] = this_arg;
3396 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3398 if (GET_MODE (operands[0]) == TFmode)
3400 if (nargs == 2)
3401 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
3402 arg[0], GET_MODE (arg[0]),
3403 arg[1], GET_MODE (arg[1]));
3404 else
3405 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
3406 arg[0], GET_MODE (arg[0]),
3407 arg[1], GET_MODE (arg[1]),
3408 arg[2], GET_MODE (arg[2]));
3410 if (ret_slot)
3411 emit_move_insn (operands[0], ret_slot);
3413 else
3415 rtx ret;
3417 gcc_assert (nargs == 2);
3419 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3420 GET_MODE (operands[0]), 1,
3421 arg[1], GET_MODE (arg[1]));
3423 if (ret != operands[0])
3424 emit_move_insn (operands[0], ret);
3428 /* Expand soft-float TFmode calls to sparc abi routines. */
3430 static void
3431 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3433 const char *func;
3435 switch (code)
3437 case PLUS:
3438 func = "_Qp_add";
3439 break;
3440 case MINUS:
3441 func = "_Qp_sub";
3442 break;
3443 case MULT:
3444 func = "_Qp_mul";
3445 break;
3446 case DIV:
3447 func = "_Qp_div";
3448 break;
3449 default:
3450 gcc_unreachable ();
3453 emit_soft_tfmode_libcall (func, 3, operands);
3456 static void
3457 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3459 const char *func;
3461 gcc_assert (code == SQRT);
3462 func = "_Qp_sqrt";
3464 emit_soft_tfmode_libcall (func, 2, operands);
3467 static void
3468 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3470 const char *func;
3472 switch (code)
3474 case FLOAT_EXTEND:
3475 switch (GET_MODE (operands[1]))
3477 case SFmode:
3478 func = "_Qp_stoq";
3479 break;
3480 case DFmode:
3481 func = "_Qp_dtoq";
3482 break;
3483 default:
3484 gcc_unreachable ();
3486 break;
3488 case FLOAT_TRUNCATE:
3489 switch (GET_MODE (operands[0]))
3491 case SFmode:
3492 func = "_Qp_qtos";
3493 break;
3494 case DFmode:
3495 func = "_Qp_qtod";
3496 break;
3497 default:
3498 gcc_unreachable ();
3500 break;
3502 case FLOAT:
3503 switch (GET_MODE (operands[1]))
3505 case SImode:
3506 func = "_Qp_itoq";
3507 if (TARGET_ARCH64)
3508 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3509 break;
3510 case DImode:
3511 func = "_Qp_xtoq";
3512 break;
3513 default:
3514 gcc_unreachable ();
3516 break;
3518 case UNSIGNED_FLOAT:
3519 switch (GET_MODE (operands[1]))
3521 case SImode:
3522 func = "_Qp_uitoq";
3523 if (TARGET_ARCH64)
3524 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3525 break;
3526 case DImode:
3527 func = "_Qp_uxtoq";
3528 break;
3529 default:
3530 gcc_unreachable ();
3532 break;
3534 case FIX:
3535 switch (GET_MODE (operands[0]))
3537 case SImode:
3538 func = "_Qp_qtoi";
3539 break;
3540 case DImode:
3541 func = "_Qp_qtox";
3542 break;
3543 default:
3544 gcc_unreachable ();
3546 break;
3548 case UNSIGNED_FIX:
3549 switch (GET_MODE (operands[0]))
3551 case SImode:
3552 func = "_Qp_qtoui";
3553 break;
3554 case DImode:
3555 func = "_Qp_qtoux";
3556 break;
3557 default:
3558 gcc_unreachable ();
3560 break;
3562 default:
3563 gcc_unreachable ();
3566 emit_soft_tfmode_libcall (func, 2, operands);
3569 /* Expand a hard-float tfmode operation. All arguments must be in
3570 registers. */
3572 static void
3573 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3575 rtx op, dest;
3577 if (GET_RTX_CLASS (code) == RTX_UNARY)
3579 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3580 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3582 else
3584 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3585 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3586 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3587 operands[1], operands[2]);
3590 if (register_operand (operands[0], VOIDmode))
3591 dest = operands[0];
3592 else
3593 dest = gen_reg_rtx (GET_MODE (operands[0]));
3595 emit_insn (gen_rtx_SET (dest, op));
3597 if (dest != operands[0])
3598 emit_move_insn (operands[0], dest);
3601 void
3602 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3604 if (TARGET_HARD_QUAD)
3605 emit_hard_tfmode_operation (code, operands);
3606 else
3607 emit_soft_tfmode_binop (code, operands);
3610 void
3611 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3613 if (TARGET_HARD_QUAD)
3614 emit_hard_tfmode_operation (code, operands);
3615 else
3616 emit_soft_tfmode_unop (code, operands);
3619 void
3620 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3622 if (TARGET_HARD_QUAD)
3623 emit_hard_tfmode_operation (code, operands);
3624 else
3625 emit_soft_tfmode_cvt (code, operands);
3628 /* Return nonzero if a branch/jump/call instruction will be emitting
3629 nop into its delay slot. */
3632 empty_delay_slot (rtx_insn *insn)
3634 rtx seq;
3636 /* If no previous instruction (should not happen), return true. */
3637 if (PREV_INSN (insn) == NULL)
3638 return 1;
3640 seq = NEXT_INSN (PREV_INSN (insn));
3641 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3642 return 0;
3644 return 1;
3647 /* Return nonzero if we should emit a nop after a cbcond instruction.
3648 The cbcond instruction does not have a delay slot, however there is
3649 a severe performance penalty if a control transfer appears right
3650 after a cbcond. Therefore we emit a nop when we detect this
3651 situation. */
3654 emit_cbcond_nop (rtx_insn *insn)
3656 rtx next = next_active_insn (insn);
3658 if (!next)
3659 return 1;
3661 if (NONJUMP_INSN_P (next)
3662 && GET_CODE (PATTERN (next)) == SEQUENCE)
3663 next = XVECEXP (PATTERN (next), 0, 0);
3664 else if (CALL_P (next)
3665 && GET_CODE (PATTERN (next)) == PARALLEL)
3667 rtx delay = XVECEXP (PATTERN (next), 0, 1);
3669 if (GET_CODE (delay) == RETURN)
3671 /* It's a sibling call. Do not emit the nop if we're going
3672 to emit something other than the jump itself as the first
3673 instruction of the sibcall sequence. */
3674 if (sparc_leaf_function_p || TARGET_FLAT)
3675 return 0;
3679 if (NONJUMP_INSN_P (next))
3680 return 0;
3682 return 1;
3685 /* Return nonzero if TRIAL can go into the call delay slot. */
3688 eligible_for_call_delay (rtx_insn *trial)
3690 rtx pat;
3692 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3693 return 0;
3695 /* Binutils allows
3696 call __tls_get_addr, %tgd_call (foo)
3697 add %l7, %o0, %o0, %tgd_add (foo)
3698 while Sun as/ld does not. */
3699 if (TARGET_GNU_TLS || !TARGET_TLS)
3700 return 1;
3702 pat = PATTERN (trial);
3704 /* We must reject tgd_add{32|64}, i.e.
3705 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3706 and tldm_add{32|64}, i.e.
3707 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3708 for Sun as/ld. */
3709 if (GET_CODE (pat) == SET
3710 && GET_CODE (SET_SRC (pat)) == PLUS)
3712 rtx unspec = XEXP (SET_SRC (pat), 1);
3714 if (GET_CODE (unspec) == UNSPEC
3715 && (XINT (unspec, 1) == UNSPEC_TLSGD
3716 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3717 return 0;
3720 return 1;
3723 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3724 instruction. RETURN_P is true if the v9 variant 'return' is to be
3725 considered in the test too.
3727 TRIAL must be a SET whose destination is a REG appropriate for the
3728 'restore' instruction or, if RETURN_P is true, for the 'return'
3729 instruction. */
3731 static int
3732 eligible_for_restore_insn (rtx trial, bool return_p)
3734 rtx pat = PATTERN (trial);
3735 rtx src = SET_SRC (pat);
3736 bool src_is_freg = false;
3737 rtx src_reg;
3739 /* Since we now can do moves between float and integer registers when
3740 VIS3 is enabled, we have to catch this case. We can allow such
3741 moves when doing a 'return' however. */
3742 src_reg = src;
3743 if (GET_CODE (src_reg) == SUBREG)
3744 src_reg = SUBREG_REG (src_reg);
3745 if (GET_CODE (src_reg) == REG
3746 && SPARC_FP_REG_P (REGNO (src_reg)))
3747 src_is_freg = true;
3749 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3750 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3751 && arith_operand (src, GET_MODE (src))
3752 && ! src_is_freg)
3754 if (TARGET_ARCH64)
3755 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3756 else
3757 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3760 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3761 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3762 && arith_double_operand (src, GET_MODE (src))
3763 && ! src_is_freg)
3764 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3766 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3767 else if (! TARGET_FPU && register_operand (src, SFmode))
3768 return 1;
3770 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3771 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3772 return 1;
3774 /* If we have the 'return' instruction, anything that does not use
3775 local or output registers and can go into a delay slot wins. */
3776 else if (return_p && TARGET_V9 && !epilogue_renumber (&pat, 1))
3777 return 1;
3779 /* The 'restore src1,src2,dest' pattern for SImode. */
3780 else if (GET_CODE (src) == PLUS
3781 && register_operand (XEXP (src, 0), SImode)
3782 && arith_operand (XEXP (src, 1), SImode))
3783 return 1;
3785 /* The 'restore src1,src2,dest' pattern for DImode. */
3786 else if (GET_CODE (src) == PLUS
3787 && register_operand (XEXP (src, 0), DImode)
3788 && arith_double_operand (XEXP (src, 1), DImode))
3789 return 1;
3791 /* The 'restore src1,%lo(src2),dest' pattern. */
3792 else if (GET_CODE (src) == LO_SUM
3793 && ! TARGET_CM_MEDMID
3794 && ((register_operand (XEXP (src, 0), SImode)
3795 && immediate_operand (XEXP (src, 1), SImode))
3796 || (TARGET_ARCH64
3797 && register_operand (XEXP (src, 0), DImode)
3798 && immediate_operand (XEXP (src, 1), DImode))))
3799 return 1;
3801 /* The 'restore src,src,dest' pattern. */
3802 else if (GET_CODE (src) == ASHIFT
3803 && (register_operand (XEXP (src, 0), SImode)
3804 || register_operand (XEXP (src, 0), DImode))
3805 && XEXP (src, 1) == const1_rtx)
3806 return 1;
3808 return 0;
3811 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3814 eligible_for_return_delay (rtx_insn *trial)
3816 int regno;
3817 rtx pat;
3819 /* If the function uses __builtin_eh_return, the eh_return machinery
3820 occupies the delay slot. */
3821 if (crtl->calls_eh_return)
3822 return 0;
3824 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3825 return 0;
3827 /* In the case of a leaf or flat function, anything can go into the slot. */
3828 if (sparc_leaf_function_p || TARGET_FLAT)
3829 return 1;
3831 if (!NONJUMP_INSN_P (trial))
3832 return 0;
3834 pat = PATTERN (trial);
3835 if (GET_CODE (pat) == PARALLEL)
3837 int i;
3839 if (! TARGET_V9)
3840 return 0;
3841 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3843 rtx expr = XVECEXP (pat, 0, i);
3844 if (GET_CODE (expr) != SET)
3845 return 0;
3846 if (GET_CODE (SET_DEST (expr)) != REG)
3847 return 0;
3848 regno = REGNO (SET_DEST (expr));
3849 if (regno >= 8 && regno < 24)
3850 return 0;
3852 return !epilogue_renumber (&pat, 1);
3855 if (GET_CODE (pat) != SET)
3856 return 0;
3858 if (GET_CODE (SET_DEST (pat)) != REG)
3859 return 0;
3861 regno = REGNO (SET_DEST (pat));
3863 /* Otherwise, only operations which can be done in tandem with
3864 a `restore' or `return' insn can go into the delay slot. */
3865 if (regno >= 8 && regno < 24)
3866 return 0;
3868 /* If this instruction sets up floating point register and we have a return
3869 instruction, it can probably go in. But restore will not work
3870 with FP_REGS. */
3871 if (! SPARC_INT_REG_P (regno))
3872 return TARGET_V9 && !epilogue_renumber (&pat, 1);
3874 return eligible_for_restore_insn (trial, true);
3877 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3880 eligible_for_sibcall_delay (rtx_insn *trial)
3882 rtx pat;
3884 if (get_attr_in_branch_delay (trial) == IN_BRANCH_DELAY_FALSE)
3885 return 0;
3887 if (!NONJUMP_INSN_P (trial))
3888 return 0;
3890 pat = PATTERN (trial);
3892 if (sparc_leaf_function_p || TARGET_FLAT)
3894 /* If the tail call is done using the call instruction,
3895 we have to restore %o7 in the delay slot. */
3896 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3897 return 0;
3899 /* %g1 is used to build the function address */
3900 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3901 return 0;
3903 return 1;
3906 if (GET_CODE (pat) != SET)
3907 return 0;
3909 /* Otherwise, only operations which can be done in tandem with
3910 a `restore' insn can go into the delay slot. */
3911 if (GET_CODE (SET_DEST (pat)) != REG
3912 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3913 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
3914 return 0;
3916 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3917 in most cases. */
3918 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3919 return 0;
3921 return eligible_for_restore_insn (trial, false);
3924 /* Determine if it's legal to put X into the constant pool. This
3925 is not possible if X contains the address of a symbol that is
3926 not constant (TLS) or not known at final link time (PIC). */
3928 static bool
3929 sparc_cannot_force_const_mem (machine_mode mode, rtx x)
3931 switch (GET_CODE (x))
3933 case CONST_INT:
3934 case CONST_WIDE_INT:
3935 case CONST_DOUBLE:
3936 case CONST_VECTOR:
3937 /* Accept all non-symbolic constants. */
3938 return false;
3940 case LABEL_REF:
3941 /* Labels are OK iff we are non-PIC. */
3942 return flag_pic != 0;
3944 case SYMBOL_REF:
3945 /* 'Naked' TLS symbol references are never OK,
3946 non-TLS symbols are OK iff we are non-PIC. */
3947 if (SYMBOL_REF_TLS_MODEL (x))
3948 return true;
3949 else
3950 return flag_pic != 0;
3952 case CONST:
3953 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3954 case PLUS:
3955 case MINUS:
3956 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3957 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3958 case UNSPEC:
3959 return true;
3960 default:
3961 gcc_unreachable ();
3965 /* Global Offset Table support. */
3966 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3967 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3969 /* Return the SYMBOL_REF for the Global Offset Table. */
3971 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3973 static rtx
3974 sparc_got (void)
3976 if (!sparc_got_symbol)
3977 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3979 return sparc_got_symbol;
3982 /* Ensure that we are not using patterns that are not OK with PIC. */
3985 check_pic (int i)
3987 rtx op;
3989 switch (flag_pic)
3991 case 1:
3992 op = recog_data.operand[i];
3993 gcc_assert (GET_CODE (op) != SYMBOL_REF
3994 && (GET_CODE (op) != CONST
3995 || (GET_CODE (XEXP (op, 0)) == MINUS
3996 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3997 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3998 /* fallthrough */
3999 case 2:
4000 default:
4001 return 1;
4005 /* Return true if X is an address which needs a temporary register when
4006 reloaded while generating PIC code. */
4009 pic_address_needs_scratch (rtx x)
4011 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
4012 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
4013 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
4014 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4015 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
4016 return 1;
4018 return 0;
4021 /* Determine if a given RTX is a valid constant. We already know this
4022 satisfies CONSTANT_P. */
4024 static bool
4025 sparc_legitimate_constant_p (machine_mode mode, rtx x)
4027 switch (GET_CODE (x))
4029 case CONST:
4030 case SYMBOL_REF:
4031 if (sparc_tls_referenced_p (x))
4032 return false;
4033 break;
4035 case CONST_DOUBLE:
4036 /* Floating point constants are generally not ok.
4037 The only exception is 0.0 and all-ones in VIS. */
4038 if (TARGET_VIS
4039 && SCALAR_FLOAT_MODE_P (mode)
4040 && (const_zero_operand (x, mode)
4041 || const_all_ones_operand (x, mode)))
4042 return true;
4044 return false;
4046 case CONST_VECTOR:
4047 /* Vector constants are generally not ok.
4048 The only exception is 0 or -1 in VIS. */
4049 if (TARGET_VIS
4050 && (const_zero_operand (x, mode)
4051 || const_all_ones_operand (x, mode)))
4052 return true;
4054 return false;
4056 default:
4057 break;
4060 return true;
4063 /* Determine if a given RTX is a valid constant address. */
4065 bool
4066 constant_address_p (rtx x)
4068 switch (GET_CODE (x))
4070 case LABEL_REF:
4071 case CONST_INT:
4072 case HIGH:
4073 return true;
4075 case CONST:
4076 if (flag_pic && pic_address_needs_scratch (x))
4077 return false;
4078 return sparc_legitimate_constant_p (Pmode, x);
4080 case SYMBOL_REF:
4081 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
4083 default:
4084 return false;
4088 /* Nonzero if the constant value X is a legitimate general operand
4089 when generating PIC code. It is given that flag_pic is on and
4090 that X satisfies CONSTANT_P. */
4092 bool
4093 legitimate_pic_operand_p (rtx x)
4095 if (pic_address_needs_scratch (x))
4096 return false;
4097 if (sparc_tls_referenced_p (x))
4098 return false;
4099 return true;
4102 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
4103 (CONST_INT_P (X) \
4104 && INTVAL (X) >= -0x1000 \
4105 && INTVAL (X) <= (0x1000 - GET_MODE_SIZE (MODE)))
4107 #define RTX_OK_FOR_OLO10_P(X, MODE) \
4108 (CONST_INT_P (X) \
4109 && INTVAL (X) >= -0x1000 \
4110 && INTVAL (X) <= (0xc00 - GET_MODE_SIZE (MODE)))
4112 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
4114 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
4115 ordinarily. This changes a bit when generating PIC. */
4117 static bool
4118 sparc_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4120 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
4122 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
4123 rs1 = addr;
4124 else if (GET_CODE (addr) == PLUS)
4126 rs1 = XEXP (addr, 0);
4127 rs2 = XEXP (addr, 1);
4129 /* Canonicalize. REG comes first, if there are no regs,
4130 LO_SUM comes first. */
4131 if (!REG_P (rs1)
4132 && GET_CODE (rs1) != SUBREG
4133 && (REG_P (rs2)
4134 || GET_CODE (rs2) == SUBREG
4135 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
4137 rs1 = XEXP (addr, 1);
4138 rs2 = XEXP (addr, 0);
4141 if ((flag_pic == 1
4142 && rs1 == pic_offset_table_rtx
4143 && !REG_P (rs2)
4144 && GET_CODE (rs2) != SUBREG
4145 && GET_CODE (rs2) != LO_SUM
4146 && GET_CODE (rs2) != MEM
4147 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
4148 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
4149 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
4150 || ((REG_P (rs1)
4151 || GET_CODE (rs1) == SUBREG)
4152 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
4154 imm1 = rs2;
4155 rs2 = NULL;
4157 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
4158 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
4160 /* We prohibit REG + REG for TFmode when there are no quad move insns
4161 and we consequently need to split. We do this because REG+REG
4162 is not an offsettable address. If we get the situation in reload
4163 where source and destination of a movtf pattern are both MEMs with
4164 REG+REG address, then only one of them gets converted to an
4165 offsettable address. */
4166 if (mode == TFmode
4167 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
4168 return 0;
4170 /* Likewise for TImode, but in all cases. */
4171 if (mode == TImode)
4172 return 0;
4174 /* We prohibit REG + REG on ARCH32 if not optimizing for
4175 DFmode/DImode because then mem_min_alignment is likely to be zero
4176 after reload and the forced split would lack a matching splitter
4177 pattern. */
4178 if (TARGET_ARCH32 && !optimize
4179 && (mode == DFmode || mode == DImode))
4180 return 0;
4182 else if (USE_AS_OFFSETABLE_LO10
4183 && GET_CODE (rs1) == LO_SUM
4184 && TARGET_ARCH64
4185 && ! TARGET_CM_MEDMID
4186 && RTX_OK_FOR_OLO10_P (rs2, mode))
4188 rs2 = NULL;
4189 imm1 = XEXP (rs1, 1);
4190 rs1 = XEXP (rs1, 0);
4191 if (!CONSTANT_P (imm1)
4192 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4193 return 0;
4196 else if (GET_CODE (addr) == LO_SUM)
4198 rs1 = XEXP (addr, 0);
4199 imm1 = XEXP (addr, 1);
4201 if (!CONSTANT_P (imm1)
4202 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
4203 return 0;
4205 /* We can't allow TFmode in 32-bit mode, because an offset greater
4206 than the alignment (8) may cause the LO_SUM to overflow. */
4207 if (mode == TFmode && TARGET_ARCH32)
4208 return 0;
4210 /* During reload, accept the HIGH+LO_SUM construct generated by
4211 sparc_legitimize_reload_address. */
4212 if (reload_in_progress
4213 && GET_CODE (rs1) == HIGH
4214 && XEXP (rs1, 0) == imm1)
4215 return 1;
4217 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
4218 return 1;
4219 else
4220 return 0;
4222 if (GET_CODE (rs1) == SUBREG)
4223 rs1 = SUBREG_REG (rs1);
4224 if (!REG_P (rs1))
4225 return 0;
4227 if (rs2)
4229 if (GET_CODE (rs2) == SUBREG)
4230 rs2 = SUBREG_REG (rs2);
4231 if (!REG_P (rs2))
4232 return 0;
4235 if (strict)
4237 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
4238 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
4239 return 0;
4241 else
4243 if ((! SPARC_INT_REG_P (REGNO (rs1))
4244 && REGNO (rs1) != FRAME_POINTER_REGNUM
4245 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
4246 || (rs2
4247 && (! SPARC_INT_REG_P (REGNO (rs2))
4248 && REGNO (rs2) != FRAME_POINTER_REGNUM
4249 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
4250 return 0;
4252 return 1;
4255 /* Return the SYMBOL_REF for the tls_get_addr function. */
4257 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
4259 static rtx
4260 sparc_tls_get_addr (void)
4262 if (!sparc_tls_symbol)
4263 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
4265 return sparc_tls_symbol;
4268 /* Return the Global Offset Table to be used in TLS mode. */
4270 static rtx
4271 sparc_tls_got (void)
4273 /* In PIC mode, this is just the PIC offset table. */
4274 if (flag_pic)
4276 crtl->uses_pic_offset_table = 1;
4277 return pic_offset_table_rtx;
4280 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4281 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4282 if (TARGET_SUN_TLS && TARGET_ARCH32)
4284 load_got_register ();
4285 return global_offset_table_rtx;
4288 /* In all other cases, we load a new pseudo with the GOT symbol. */
4289 return copy_to_reg (sparc_got ());
4292 /* Return true if X contains a thread-local symbol. */
4294 static bool
4295 sparc_tls_referenced_p (rtx x)
4297 if (!TARGET_HAVE_TLS)
4298 return false;
4300 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
4301 x = XEXP (XEXP (x, 0), 0);
4303 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
4304 return true;
4306 /* That's all we handle in sparc_legitimize_tls_address for now. */
4307 return false;
4310 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4311 this (thread-local) address. */
4313 static rtx
4314 sparc_legitimize_tls_address (rtx addr)
4316 rtx temp1, temp2, temp3, ret, o0, got;
4317 rtx_insn *insn;
4319 gcc_assert (can_create_pseudo_p ());
4321 if (GET_CODE (addr) == SYMBOL_REF)
4322 switch (SYMBOL_REF_TLS_MODEL (addr))
4324 case TLS_MODEL_GLOBAL_DYNAMIC:
4325 start_sequence ();
4326 temp1 = gen_reg_rtx (SImode);
4327 temp2 = gen_reg_rtx (SImode);
4328 ret = gen_reg_rtx (Pmode);
4329 o0 = gen_rtx_REG (Pmode, 8);
4330 got = sparc_tls_got ();
4331 emit_insn (gen_tgd_hi22 (temp1, addr));
4332 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
4333 if (TARGET_ARCH32)
4335 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
4336 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
4337 addr, const1_rtx));
4339 else
4341 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
4342 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
4343 addr, const1_rtx));
4345 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4346 insn = get_insns ();
4347 end_sequence ();
4348 emit_libcall_block (insn, ret, o0, addr);
4349 break;
4351 case TLS_MODEL_LOCAL_DYNAMIC:
4352 start_sequence ();
4353 temp1 = gen_reg_rtx (SImode);
4354 temp2 = gen_reg_rtx (SImode);
4355 temp3 = gen_reg_rtx (Pmode);
4356 ret = gen_reg_rtx (Pmode);
4357 o0 = gen_rtx_REG (Pmode, 8);
4358 got = sparc_tls_got ();
4359 emit_insn (gen_tldm_hi22 (temp1));
4360 emit_insn (gen_tldm_lo10 (temp2, temp1));
4361 if (TARGET_ARCH32)
4363 emit_insn (gen_tldm_add32 (o0, got, temp2));
4364 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
4365 const1_rtx));
4367 else
4369 emit_insn (gen_tldm_add64 (o0, got, temp2));
4370 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
4371 const1_rtx));
4373 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4374 insn = get_insns ();
4375 end_sequence ();
4376 emit_libcall_block (insn, temp3, o0,
4377 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4378 UNSPEC_TLSLD_BASE));
4379 temp1 = gen_reg_rtx (SImode);
4380 temp2 = gen_reg_rtx (SImode);
4381 emit_insn (gen_tldo_hix22 (temp1, addr));
4382 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
4383 if (TARGET_ARCH32)
4384 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
4385 else
4386 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
4387 break;
4389 case TLS_MODEL_INITIAL_EXEC:
4390 temp1 = gen_reg_rtx (SImode);
4391 temp2 = gen_reg_rtx (SImode);
4392 temp3 = gen_reg_rtx (Pmode);
4393 got = sparc_tls_got ();
4394 emit_insn (gen_tie_hi22 (temp1, addr));
4395 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
4396 if (TARGET_ARCH32)
4397 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4398 else
4399 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4400 if (TARGET_SUN_TLS)
4402 ret = gen_reg_rtx (Pmode);
4403 if (TARGET_ARCH32)
4404 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
4405 temp3, addr));
4406 else
4407 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
4408 temp3, addr));
4410 else
4411 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4412 break;
4414 case TLS_MODEL_LOCAL_EXEC:
4415 temp1 = gen_reg_rtx (Pmode);
4416 temp2 = gen_reg_rtx (Pmode);
4417 if (TARGET_ARCH32)
4419 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
4420 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
4422 else
4424 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
4425 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
4427 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4428 break;
4430 default:
4431 gcc_unreachable ();
4434 else if (GET_CODE (addr) == CONST)
4436 rtx base, offset;
4438 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4440 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4441 offset = XEXP (XEXP (addr, 0), 1);
4443 base = force_operand (base, NULL_RTX);
4444 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4445 offset = force_reg (Pmode, offset);
4446 ret = gen_rtx_PLUS (Pmode, base, offset);
4449 else
4450 gcc_unreachable (); /* for now ... */
4452 return ret;
4455 /* Legitimize PIC addresses. If the address is already position-independent,
4456 we return ORIG. Newly generated position-independent addresses go into a
4457 reg. This is REG if nonzero, otherwise we allocate register(s) as
4458 necessary. */
4460 static rtx
4461 sparc_legitimize_pic_address (rtx orig, rtx reg)
4463 bool gotdata_op = false;
4465 if (GET_CODE (orig) == SYMBOL_REF
4466 /* See the comment in sparc_expand_move. */
4467 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4469 rtx pic_ref, address;
4470 rtx_insn *insn;
4472 if (reg == 0)
4474 gcc_assert (can_create_pseudo_p ());
4475 reg = gen_reg_rtx (Pmode);
4478 if (flag_pic == 2)
4480 /* If not during reload, allocate another temp reg here for loading
4481 in the address, so that these instructions can be optimized
4482 properly. */
4483 rtx temp_reg = (! can_create_pseudo_p ()
4484 ? reg : gen_reg_rtx (Pmode));
4486 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4487 won't get confused into thinking that these two instructions
4488 are loading in the true address of the symbol. If in the
4489 future a PIC rtx exists, that should be used instead. */
4490 if (TARGET_ARCH64)
4492 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4493 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4495 else
4497 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4498 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4500 address = temp_reg;
4501 gotdata_op = true;
4503 else
4504 address = orig;
4506 crtl->uses_pic_offset_table = 1;
4507 if (gotdata_op)
4509 if (TARGET_ARCH64)
4510 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4511 pic_offset_table_rtx,
4512 address, orig));
4513 else
4514 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4515 pic_offset_table_rtx,
4516 address, orig));
4518 else
4520 pic_ref
4521 = gen_const_mem (Pmode,
4522 gen_rtx_PLUS (Pmode,
4523 pic_offset_table_rtx, address));
4524 insn = emit_move_insn (reg, pic_ref);
4527 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4528 by loop. */
4529 set_unique_reg_note (insn, REG_EQUAL, orig);
4530 return reg;
4532 else if (GET_CODE (orig) == CONST)
4534 rtx base, offset;
4536 if (GET_CODE (XEXP (orig, 0)) == PLUS
4537 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
4538 return orig;
4540 if (reg == 0)
4542 gcc_assert (can_create_pseudo_p ());
4543 reg = gen_reg_rtx (Pmode);
4546 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4547 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4548 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4549 base == reg ? NULL_RTX : reg);
4551 if (GET_CODE (offset) == CONST_INT)
4553 if (SMALL_INT (offset))
4554 return plus_constant (Pmode, base, INTVAL (offset));
4555 else if (can_create_pseudo_p ())
4556 offset = force_reg (Pmode, offset);
4557 else
4558 /* If we reach here, then something is seriously wrong. */
4559 gcc_unreachable ();
4561 return gen_rtx_PLUS (Pmode, base, offset);
4563 else if (GET_CODE (orig) == LABEL_REF)
4564 /* ??? We ought to be checking that the register is live instead, in case
4565 it is eliminated. */
4566 crtl->uses_pic_offset_table = 1;
4568 return orig;
4571 /* Try machine-dependent ways of modifying an illegitimate address X
4572 to be legitimate. If we find one, return the new, valid address.
4574 OLDX is the address as it was before break_out_memory_refs was called.
4575 In some cases it is useful to look at this to decide what needs to be done.
4577 MODE is the mode of the operand pointed to by X.
4579 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4581 static rtx
4582 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4583 machine_mode mode)
4585 rtx orig_x = x;
4587 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4588 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4589 force_operand (XEXP (x, 0), NULL_RTX));
4590 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4591 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4592 force_operand (XEXP (x, 1), NULL_RTX));
4593 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4594 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4595 XEXP (x, 1));
4596 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4597 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4598 force_operand (XEXP (x, 1), NULL_RTX));
4600 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4601 return x;
4603 if (sparc_tls_referenced_p (x))
4604 x = sparc_legitimize_tls_address (x);
4605 else if (flag_pic)
4606 x = sparc_legitimize_pic_address (x, NULL_RTX);
4607 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4608 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4609 copy_to_mode_reg (Pmode, XEXP (x, 1)));
4610 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
4611 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4612 copy_to_mode_reg (Pmode, XEXP (x, 0)));
4613 else if (GET_CODE (x) == SYMBOL_REF
4614 || GET_CODE (x) == CONST
4615 || GET_CODE (x) == LABEL_REF)
4616 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
4618 return x;
4621 /* Delegitimize an address that was legitimized by the above function. */
4623 static rtx
4624 sparc_delegitimize_address (rtx x)
4626 x = delegitimize_mem_from_attrs (x);
4628 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
4629 switch (XINT (XEXP (x, 1), 1))
4631 case UNSPEC_MOVE_PIC:
4632 case UNSPEC_TLSLE:
4633 x = XVECEXP (XEXP (x, 1), 0, 0);
4634 gcc_assert (GET_CODE (x) == SYMBOL_REF);
4635 break;
4636 default:
4637 break;
4640 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
4641 if (GET_CODE (x) == MINUS
4642 && REG_P (XEXP (x, 0))
4643 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
4644 && GET_CODE (XEXP (x, 1)) == LO_SUM
4645 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
4646 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
4648 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
4649 gcc_assert (GET_CODE (x) == LABEL_REF);
4652 return x;
4655 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4656 replace the input X, or the original X if no replacement is called for.
4657 The output parameter *WIN is 1 if the calling macro should goto WIN,
4658 0 if it should not.
4660 For SPARC, we wish to handle addresses by splitting them into
4661 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
4662 This cuts the number of extra insns by one.
4664 Do nothing when generating PIC code and the address is a symbolic
4665 operand or requires a scratch register. */
4668 sparc_legitimize_reload_address (rtx x, machine_mode mode,
4669 int opnum, int type,
4670 int ind_levels ATTRIBUTE_UNUSED, int *win)
4672 /* Decompose SImode constants into HIGH+LO_SUM. */
4673 if (CONSTANT_P (x)
4674 && (mode != TFmode || TARGET_ARCH64)
4675 && GET_MODE (x) == SImode
4676 && GET_CODE (x) != LO_SUM
4677 && GET_CODE (x) != HIGH
4678 && sparc_cmodel <= CM_MEDLOW
4679 && !(flag_pic
4680 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
4682 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
4683 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4684 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4685 opnum, (enum reload_type)type);
4686 *win = 1;
4687 return x;
4690 /* We have to recognize what we have already generated above. */
4691 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
4693 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4694 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4695 opnum, (enum reload_type)type);
4696 *win = 1;
4697 return x;
4700 *win = 0;
4701 return x;
4704 /* Return true if ADDR (a legitimate address expression)
4705 has an effect that depends on the machine mode it is used for.
4707 In PIC mode,
4709 (mem:HI [%l7+a])
4711 is not equivalent to
4713 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
4715 because [%l7+a+1] is interpreted as the address of (a+1). */
4718 static bool
4719 sparc_mode_dependent_address_p (const_rtx addr,
4720 addr_space_t as ATTRIBUTE_UNUSED)
4722 if (flag_pic && GET_CODE (addr) == PLUS)
4724 rtx op0 = XEXP (addr, 0);
4725 rtx op1 = XEXP (addr, 1);
4726 if (op0 == pic_offset_table_rtx
4727 && symbolic_operand (op1, VOIDmode))
4728 return true;
4731 return false;
4734 #ifdef HAVE_GAS_HIDDEN
4735 # define USE_HIDDEN_LINKONCE 1
4736 #else
4737 # define USE_HIDDEN_LINKONCE 0
4738 #endif
4740 static void
4741 get_pc_thunk_name (char name[32], unsigned int regno)
4743 const char *reg_name = reg_names[regno];
4745 /* Skip the leading '%' as that cannot be used in a
4746 symbol name. */
4747 reg_name += 1;
4749 if (USE_HIDDEN_LINKONCE)
4750 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4751 else
4752 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4755 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4757 static rtx
4758 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
4760 int orig_flag_pic = flag_pic;
4761 rtx insn;
4763 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4764 flag_pic = 0;
4765 if (TARGET_ARCH64)
4766 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
4767 else
4768 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
4769 flag_pic = orig_flag_pic;
4771 return insn;
4774 /* Emit code to load the GOT register. */
4776 void
4777 load_got_register (void)
4779 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4780 if (!global_offset_table_rtx)
4781 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4783 if (TARGET_VXWORKS_RTP)
4784 emit_insn (gen_vxworks_load_got ());
4785 else
4787 /* The GOT symbol is subject to a PC-relative relocation so we need a
4788 helper function to add the PC value and thus get the final value. */
4789 if (!got_helper_rtx)
4791 char name[32];
4792 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4793 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4796 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
4797 got_helper_rtx,
4798 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
4801 /* Need to emit this whether or not we obey regdecls,
4802 since setjmp/longjmp can cause life info to screw up.
4803 ??? In the case where we don't obey regdecls, this is not sufficient
4804 since we may not fall out the bottom. */
4805 emit_use (global_offset_table_rtx);
4808 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4809 address of the call target. */
4811 void
4812 sparc_emit_call_insn (rtx pat, rtx addr)
4814 rtx_insn *insn;
4816 insn = emit_call_insn (pat);
4818 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4819 if (TARGET_VXWORKS_RTP
4820 && flag_pic
4821 && GET_CODE (addr) == SYMBOL_REF
4822 && (SYMBOL_REF_DECL (addr)
4823 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
4824 : !SYMBOL_REF_LOCAL_P (addr)))
4826 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4827 crtl->uses_pic_offset_table = 1;
4831 /* Return 1 if RTX is a MEM which is known to be aligned to at
4832 least a DESIRED byte boundary. */
4835 mem_min_alignment (rtx mem, int desired)
4837 rtx addr, base, offset;
4839 /* If it's not a MEM we can't accept it. */
4840 if (GET_CODE (mem) != MEM)
4841 return 0;
4843 /* Obviously... */
4844 if (!TARGET_UNALIGNED_DOUBLES
4845 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
4846 return 1;
4848 /* ??? The rest of the function predates MEM_ALIGN so
4849 there is probably a bit of redundancy. */
4850 addr = XEXP (mem, 0);
4851 base = offset = NULL_RTX;
4852 if (GET_CODE (addr) == PLUS)
4854 if (GET_CODE (XEXP (addr, 0)) == REG)
4856 base = XEXP (addr, 0);
4858 /* What we are saying here is that if the base
4859 REG is aligned properly, the compiler will make
4860 sure any REG based index upon it will be so
4861 as well. */
4862 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4863 offset = XEXP (addr, 1);
4864 else
4865 offset = const0_rtx;
4868 else if (GET_CODE (addr) == REG)
4870 base = addr;
4871 offset = const0_rtx;
4874 if (base != NULL_RTX)
4876 int regno = REGNO (base);
4878 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4880 /* Check if the compiler has recorded some information
4881 about the alignment of the base REG. If reload has
4882 completed, we already matched with proper alignments.
4883 If not running global_alloc, reload might give us
4884 unaligned pointer to local stack though. */
4885 if (((cfun != 0
4886 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4887 || (optimize && reload_completed))
4888 && (INTVAL (offset) & (desired - 1)) == 0)
4889 return 1;
4891 else
4893 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4894 return 1;
4897 else if (! TARGET_UNALIGNED_DOUBLES
4898 || CONSTANT_P (addr)
4899 || GET_CODE (addr) == LO_SUM)
4901 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4902 is true, in which case we can only assume that an access is aligned if
4903 it is to a constant address, or the address involves a LO_SUM. */
4904 return 1;
4907 /* An obviously unaligned address. */
4908 return 0;
4912 /* Vectors to keep interesting information about registers where it can easily
4913 be got. We used to use the actual mode value as the bit number, but there
4914 are more than 32 modes now. Instead we use two tables: one indexed by
4915 hard register number, and one indexed by mode. */
4917 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4918 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4919 mapped into one sparc_mode_class mode. */
4921 enum sparc_mode_class {
4922 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
4923 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4924 CC_MODE, CCFP_MODE
4927 /* Modes for single-word and smaller quantities. */
4928 #define S_MODES \
4929 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
4931 /* Modes for double-word and smaller quantities. */
4932 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
4934 /* Modes for quad-word and smaller quantities. */
4935 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4937 /* Modes for 8-word and smaller quantities. */
4938 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4940 /* Modes for single-float quantities. */
4941 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4943 /* Modes for double-float and smaller quantities. */
4944 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << (int) DF_MODE))
4946 /* Modes for quad-float and smaller quantities. */
4947 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4949 /* Modes for quad-float pairs and smaller quantities. */
4950 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4952 /* Modes for double-float only quantities. */
4953 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4955 /* Modes for quad-float and double-float only quantities. */
4956 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4958 /* Modes for quad-float pairs and double-float only quantities. */
4959 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4961 /* Modes for condition codes. */
4962 #define CC_MODES (1 << (int) CC_MODE)
4963 #define CCFP_MODES (1 << (int) CCFP_MODE)
4965 /* Value is 1 if register/mode pair is acceptable on sparc.
4967 The funny mixture of D and T modes is because integer operations
4968 do not specially operate on tetra quantities, so non-quad-aligned
4969 registers can hold quadword quantities (except %o4 and %i4 because
4970 they cross fixed registers).
4972 ??? Note that, despite the settings, non-double-aligned parameter
4973 registers can hold double-word quantities in 32-bit mode. */
4975 /* This points to either the 32-bit or the 64-bit version. */
4976 const int *hard_regno_mode_classes;
4978 static const int hard_32bit_mode_classes[] = {
4979 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4980 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4981 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4982 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4984 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4985 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4986 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4987 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4989 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4990 and none can hold SFmode/SImode values. */
4991 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4992 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4993 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4994 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4996 /* %fcc[0123] */
4997 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4999 /* %icc, %sfp, %gsr */
5000 CC_MODES, 0, D_MODES
5003 static const int hard_64bit_mode_classes[] = {
5004 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5005 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5006 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5007 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
5009 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5010 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5011 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
5012 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
5014 /* FP regs f32 to f63. Only the even numbered registers actually exist,
5015 and none can hold SFmode/SImode values. */
5016 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5017 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5018 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5019 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
5021 /* %fcc[0123] */
5022 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
5024 /* %icc, %sfp, %gsr */
5025 CC_MODES, 0, D_MODES
5028 int sparc_mode_class [NUM_MACHINE_MODES];
5030 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
5032 static void
5033 sparc_init_modes (void)
5035 int i;
5037 for (i = 0; i < NUM_MACHINE_MODES; i++)
5039 machine_mode m = (machine_mode) i;
5040 unsigned int size = GET_MODE_SIZE (m);
5042 switch (GET_MODE_CLASS (m))
5044 case MODE_INT:
5045 case MODE_PARTIAL_INT:
5046 case MODE_COMPLEX_INT:
5047 if (size < 4)
5048 sparc_mode_class[i] = 1 << (int) H_MODE;
5049 else if (size == 4)
5050 sparc_mode_class[i] = 1 << (int) S_MODE;
5051 else if (size == 8)
5052 sparc_mode_class[i] = 1 << (int) D_MODE;
5053 else if (size == 16)
5054 sparc_mode_class[i] = 1 << (int) T_MODE;
5055 else if (size == 32)
5056 sparc_mode_class[i] = 1 << (int) O_MODE;
5057 else
5058 sparc_mode_class[i] = 0;
5059 break;
5060 case MODE_VECTOR_INT:
5061 if (size == 4)
5062 sparc_mode_class[i] = 1 << (int) SF_MODE;
5063 else if (size == 8)
5064 sparc_mode_class[i] = 1 << (int) DF_MODE;
5065 else
5066 sparc_mode_class[i] = 0;
5067 break;
5068 case MODE_FLOAT:
5069 case MODE_COMPLEX_FLOAT:
5070 if (size == 4)
5071 sparc_mode_class[i] = 1 << (int) SF_MODE;
5072 else if (size == 8)
5073 sparc_mode_class[i] = 1 << (int) DF_MODE;
5074 else if (size == 16)
5075 sparc_mode_class[i] = 1 << (int) TF_MODE;
5076 else if (size == 32)
5077 sparc_mode_class[i] = 1 << (int) OF_MODE;
5078 else
5079 sparc_mode_class[i] = 0;
5080 break;
5081 case MODE_CC:
5082 if (m == CCFPmode || m == CCFPEmode)
5083 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
5084 else
5085 sparc_mode_class[i] = 1 << (int) CC_MODE;
5086 break;
5087 default:
5088 sparc_mode_class[i] = 0;
5089 break;
5093 if (TARGET_ARCH64)
5094 hard_regno_mode_classes = hard_64bit_mode_classes;
5095 else
5096 hard_regno_mode_classes = hard_32bit_mode_classes;
5098 /* Initialize the array used by REGNO_REG_CLASS. */
5099 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5101 if (i < 16 && TARGET_V8PLUS)
5102 sparc_regno_reg_class[i] = I64_REGS;
5103 else if (i < 32 || i == FRAME_POINTER_REGNUM)
5104 sparc_regno_reg_class[i] = GENERAL_REGS;
5105 else if (i < 64)
5106 sparc_regno_reg_class[i] = FP_REGS;
5107 else if (i < 96)
5108 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
5109 else if (i < 100)
5110 sparc_regno_reg_class[i] = FPCC_REGS;
5111 else
5112 sparc_regno_reg_class[i] = NO_REGS;
5116 /* Return whether REGNO, a global or FP register, must be saved/restored. */
5118 static inline bool
5119 save_global_or_fp_reg_p (unsigned int regno,
5120 int leaf_function ATTRIBUTE_UNUSED)
5122 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
5125 /* Return whether the return address register (%i7) is needed. */
5127 static inline bool
5128 return_addr_reg_needed_p (int leaf_function)
5130 /* If it is live, for example because of __builtin_return_address (0). */
5131 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
5132 return true;
5134 /* Otherwise, it is needed as save register if %o7 is clobbered. */
5135 if (!leaf_function
5136 /* Loading the GOT register clobbers %o7. */
5137 || crtl->uses_pic_offset_table
5138 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
5139 return true;
5141 return false;
5144 /* Return whether REGNO, a local or in register, must be saved/restored. */
5146 static bool
5147 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
5149 /* General case: call-saved registers live at some point. */
5150 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
5151 return true;
5153 /* Frame pointer register (%fp) if needed. */
5154 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5155 return true;
5157 /* Return address register (%i7) if needed. */
5158 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
5159 return true;
5161 /* GOT register (%l7) if needed. */
5162 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
5163 return true;
5165 /* If the function accesses prior frames, the frame pointer and the return
5166 address of the previous frame must be saved on the stack. */
5167 if (crtl->accesses_prior_frames
5168 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
5169 return true;
5171 return false;
5174 /* Compute the frame size required by the function. This function is called
5175 during the reload pass and also by sparc_expand_prologue. */
5177 HOST_WIDE_INT
5178 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
5180 HOST_WIDE_INT frame_size, apparent_frame_size;
5181 int args_size, n_global_fp_regs = 0;
5182 bool save_local_in_regs_p = false;
5183 unsigned int i;
5185 /* If the function allocates dynamic stack space, the dynamic offset is
5186 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
5187 if (leaf_function && !cfun->calls_alloca)
5188 args_size = 0;
5189 else
5190 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
5192 /* Calculate space needed for global registers. */
5193 if (TARGET_ARCH64)
5195 for (i = 0; i < 8; i++)
5196 if (save_global_or_fp_reg_p (i, 0))
5197 n_global_fp_regs += 2;
5199 else
5201 for (i = 0; i < 8; i += 2)
5202 if (save_global_or_fp_reg_p (i, 0)
5203 || save_global_or_fp_reg_p (i + 1, 0))
5204 n_global_fp_regs += 2;
5207 /* In the flat window model, find out which local and in registers need to
5208 be saved. We don't reserve space in the current frame for them as they
5209 will be spilled into the register window save area of the caller's frame.
5210 However, as soon as we use this register window save area, we must create
5211 that of the current frame to make it the live one. */
5212 if (TARGET_FLAT)
5213 for (i = 16; i < 32; i++)
5214 if (save_local_or_in_reg_p (i, leaf_function))
5216 save_local_in_regs_p = true;
5217 break;
5220 /* Calculate space needed for FP registers. */
5221 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
5222 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
5223 n_global_fp_regs += 2;
5225 if (size == 0
5226 && n_global_fp_regs == 0
5227 && args_size == 0
5228 && !save_local_in_regs_p)
5229 frame_size = apparent_frame_size = 0;
5230 else
5232 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
5233 apparent_frame_size = ROUND_UP (size - STARTING_FRAME_OFFSET, 8);
5234 apparent_frame_size += n_global_fp_regs * 4;
5236 /* We need to add the size of the outgoing argument area. */
5237 frame_size = apparent_frame_size + ROUND_UP (args_size, 8);
5239 /* And that of the register window save area. */
5240 frame_size += FIRST_PARM_OFFSET (cfun->decl);
5242 /* Finally, bump to the appropriate alignment. */
5243 frame_size = SPARC_STACK_ALIGN (frame_size);
5246 /* Set up values for use in prologue and epilogue. */
5247 sparc_frame_size = frame_size;
5248 sparc_apparent_frame_size = apparent_frame_size;
5249 sparc_n_global_fp_regs = n_global_fp_regs;
5250 sparc_save_local_in_regs_p = save_local_in_regs_p;
5252 return frame_size;
5255 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5258 sparc_initial_elimination_offset (int to)
5260 int offset;
5262 if (to == STACK_POINTER_REGNUM)
5263 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
5264 else
5265 offset = 0;
5267 offset += SPARC_STACK_BIAS;
5268 return offset;
5271 /* Output any necessary .register pseudo-ops. */
5273 void
5274 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
5276 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
5277 int i;
5279 if (TARGET_ARCH32)
5280 return;
5282 /* Check if %g[2367] were used without
5283 .register being printed for them already. */
5284 for (i = 2; i < 8; i++)
5286 if (df_regs_ever_live_p (i)
5287 && ! sparc_hard_reg_printed [i])
5289 sparc_hard_reg_printed [i] = 1;
5290 /* %g7 is used as TLS base register, use #ignore
5291 for it instead of #scratch. */
5292 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
5293 i == 7 ? "ignore" : "scratch");
5295 if (i == 3) i = 5;
5297 #endif
5300 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5302 #if PROBE_INTERVAL > 4096
5303 #error Cannot use indexed addressing mode for stack probing
5304 #endif
5306 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5307 inclusive. These are offsets from the current stack pointer.
5309 Note that we don't use the REG+REG addressing mode for the probes because
5310 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5311 so the advantages of having a single code win here. */
5313 static void
5314 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
5316 rtx g1 = gen_rtx_REG (Pmode, 1);
5318 /* See if we have a constant small number of probes to generate. If so,
5319 that's the easy case. */
5320 if (size <= PROBE_INTERVAL)
5322 emit_move_insn (g1, GEN_INT (first));
5323 emit_insn (gen_rtx_SET (g1,
5324 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5325 emit_stack_probe (plus_constant (Pmode, g1, -size));
5328 /* The run-time loop is made up of 9 insns in the generic case while the
5329 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5330 else if (size <= 4 * PROBE_INTERVAL)
5332 HOST_WIDE_INT i;
5334 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
5335 emit_insn (gen_rtx_SET (g1,
5336 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5337 emit_stack_probe (g1);
5339 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5340 it exceeds SIZE. If only two probes are needed, this will not
5341 generate any code. Then probe at FIRST + SIZE. */
5342 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5344 emit_insn (gen_rtx_SET (g1,
5345 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5346 emit_stack_probe (g1);
5349 emit_stack_probe (plus_constant (Pmode, g1,
5350 (i - PROBE_INTERVAL) - size));
5353 /* Otherwise, do the same as above, but in a loop. Note that we must be
5354 extra careful with variables wrapping around because we might be at
5355 the very top (or the very bottom) of the address space and we have
5356 to be able to handle this case properly; in particular, we use an
5357 equality test for the loop condition. */
5358 else
5360 HOST_WIDE_INT rounded_size;
5361 rtx g4 = gen_rtx_REG (Pmode, 4);
5363 emit_move_insn (g1, GEN_INT (first));
5366 /* Step 1: round SIZE to the previous multiple of the interval. */
5368 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
5369 emit_move_insn (g4, GEN_INT (rounded_size));
5372 /* Step 2: compute initial and final value of the loop counter. */
5374 /* TEST_ADDR = SP + FIRST. */
5375 emit_insn (gen_rtx_SET (g1,
5376 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5378 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5379 emit_insn (gen_rtx_SET (g4, gen_rtx_MINUS (Pmode, g1, g4)));
5382 /* Step 3: the loop
5384 while (TEST_ADDR != LAST_ADDR)
5386 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5387 probe at TEST_ADDR
5390 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5391 until it is equal to ROUNDED_SIZE. */
5393 if (TARGET_ARCH64)
5394 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
5395 else
5396 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
5399 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5400 that SIZE is equal to ROUNDED_SIZE. */
5402 if (size != rounded_size)
5403 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5406 /* Make sure nothing is scheduled before we are done. */
5407 emit_insn (gen_blockage ());
5410 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5411 absolute addresses. */
5413 const char *
5414 output_probe_stack_range (rtx reg1, rtx reg2)
5416 static int labelno = 0;
5417 char loop_lab[32];
5418 rtx xops[2];
5420 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
5422 /* Loop. */
5423 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5425 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5426 xops[0] = reg1;
5427 xops[1] = GEN_INT (-PROBE_INTERVAL);
5428 output_asm_insn ("add\t%0, %1, %0", xops);
5430 /* Test if TEST_ADDR == LAST_ADDR. */
5431 xops[1] = reg2;
5432 output_asm_insn ("cmp\t%0, %1", xops);
5434 /* Probe at TEST_ADDR and branch. */
5435 if (TARGET_ARCH64)
5436 fputs ("\tbne,pt\t%xcc,", asm_out_file);
5437 else
5438 fputs ("\tbne\t", asm_out_file);
5439 assemble_name_raw (asm_out_file, loop_lab);
5440 fputc ('\n', asm_out_file);
5441 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5442 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5444 return "";
5447 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5448 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5449 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5450 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5451 the action to be performed if it returns false. Return the new offset. */
5453 typedef bool (*sorr_pred_t) (unsigned int, int);
5454 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5456 static int
5457 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5458 int offset, int leaf_function, sorr_pred_t save_p,
5459 sorr_act_t action_true, sorr_act_t action_false)
5461 unsigned int i;
5462 rtx mem;
5463 rtx_insn *insn;
5465 if (TARGET_ARCH64 && high <= 32)
5467 int fp_offset = -1;
5469 for (i = low; i < high; i++)
5471 if (save_p (i, leaf_function))
5473 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5474 base, offset));
5475 if (action_true == SORR_SAVE)
5477 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5478 RTX_FRAME_RELATED_P (insn) = 1;
5480 else /* action_true == SORR_RESTORE */
5482 /* The frame pointer must be restored last since its old
5483 value may be used as base address for the frame. This
5484 is problematic in 64-bit mode only because of the lack
5485 of double-word load instruction. */
5486 if (i == HARD_FRAME_POINTER_REGNUM)
5487 fp_offset = offset;
5488 else
5489 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5491 offset += 8;
5493 else if (action_false == SORR_ADVANCE)
5494 offset += 8;
5497 if (fp_offset >= 0)
5499 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5500 emit_move_insn (hard_frame_pointer_rtx, mem);
5503 else
5505 for (i = low; i < high; i += 2)
5507 bool reg0 = save_p (i, leaf_function);
5508 bool reg1 = save_p (i + 1, leaf_function);
5509 machine_mode mode;
5510 int regno;
5512 if (reg0 && reg1)
5514 mode = SPARC_INT_REG_P (i) ? DImode : DFmode;
5515 regno = i;
5517 else if (reg0)
5519 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
5520 regno = i;
5522 else if (reg1)
5524 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
5525 regno = i + 1;
5526 offset += 4;
5528 else
5530 if (action_false == SORR_ADVANCE)
5531 offset += 8;
5532 continue;
5535 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5536 if (action_true == SORR_SAVE)
5538 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5539 RTX_FRAME_RELATED_P (insn) = 1;
5540 if (mode == DImode)
5542 rtx set1, set2;
5543 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5544 offset));
5545 set1 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno));
5546 RTX_FRAME_RELATED_P (set1) = 1;
5548 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5549 offset + 4));
5550 set2 = gen_rtx_SET (mem, gen_rtx_REG (SImode, regno + 1));
5551 RTX_FRAME_RELATED_P (set2) = 1;
5552 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5553 gen_rtx_PARALLEL (VOIDmode,
5554 gen_rtvec (2, set1, set2)));
5557 else /* action_true == SORR_RESTORE */
5558 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5560 /* Bump and round down to double word
5561 in case we already bumped by 4. */
5562 offset = ROUND_DOWN (offset + 8, 8);
5566 return offset;
5569 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5571 static rtx
5572 emit_adjust_base_to_offset (rtx base, int offset)
5574 /* ??? This might be optimized a little as %g1 might already have a
5575 value close enough that a single add insn will do. */
5576 /* ??? Although, all of this is probably only a temporary fix because
5577 if %g1 can hold a function result, then sparc_expand_epilogue will
5578 lose (the result will be clobbered). */
5579 rtx new_base = gen_rtx_REG (Pmode, 1);
5580 emit_move_insn (new_base, GEN_INT (offset));
5581 emit_insn (gen_rtx_SET (new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5582 return new_base;
5585 /* Emit code to save/restore call-saved global and FP registers. */
5587 static void
5588 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5590 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5592 base = emit_adjust_base_to_offset (base, offset);
5593 offset = 0;
5596 offset
5597 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5598 save_global_or_fp_reg_p, action, SORR_NONE);
5599 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5600 save_global_or_fp_reg_p, action, SORR_NONE);
5603 /* Emit code to save/restore call-saved local and in registers. */
5605 static void
5606 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5608 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5610 base = emit_adjust_base_to_offset (base, offset);
5611 offset = 0;
5614 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5615 save_local_or_in_reg_p, action, SORR_ADVANCE);
5618 /* Emit a window_save insn. */
5620 static rtx_insn *
5621 emit_window_save (rtx increment)
5623 rtx_insn *insn = emit_insn (gen_window_save (increment));
5624 RTX_FRAME_RELATED_P (insn) = 1;
5626 /* The incoming return address (%o7) is saved in %i7. */
5627 add_reg_note (insn, REG_CFA_REGISTER,
5628 gen_rtx_SET (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5629 gen_rtx_REG (Pmode,
5630 INCOMING_RETURN_ADDR_REGNUM)));
5632 /* The window save event. */
5633 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5635 /* The CFA is %fp, the hard frame pointer. */
5636 add_reg_note (insn, REG_CFA_DEF_CFA,
5637 plus_constant (Pmode, hard_frame_pointer_rtx,
5638 INCOMING_FRAME_SP_OFFSET));
5640 return insn;
5643 /* Generate an increment for the stack pointer. */
5645 static rtx
5646 gen_stack_pointer_inc (rtx increment)
5648 return gen_rtx_SET (stack_pointer_rtx,
5649 gen_rtx_PLUS (Pmode,
5650 stack_pointer_rtx,
5651 increment));
5654 /* Expand the function prologue. The prologue is responsible for reserving
5655 storage for the frame, saving the call-saved registers and loading the
5656 GOT register if needed. */
5658 void
5659 sparc_expand_prologue (void)
5661 HOST_WIDE_INT size;
5662 rtx_insn *insn;
5664 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5665 on the final value of the flag means deferring the prologue/epilogue
5666 expansion until just before the second scheduling pass, which is too
5667 late to emit multiple epilogues or return insns.
5669 Of course we are making the assumption that the value of the flag
5670 will not change between now and its final value. Of the three parts
5671 of the formula, only the last one can reasonably vary. Let's take a
5672 closer look, after assuming that the first two ones are set to true
5673 (otherwise the last value is effectively silenced).
5675 If only_leaf_regs_used returns false, the global predicate will also
5676 be false so the actual frame size calculated below will be positive.
5677 As a consequence, the save_register_window insn will be emitted in
5678 the instruction stream; now this insn explicitly references %fp
5679 which is not a leaf register so only_leaf_regs_used will always
5680 return false subsequently.
5682 If only_leaf_regs_used returns true, we hope that the subsequent
5683 optimization passes won't cause non-leaf registers to pop up. For
5684 example, the regrename pass has special provisions to not rename to
5685 non-leaf registers in a leaf function. */
5686 sparc_leaf_function_p
5687 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
5689 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5691 if (flag_stack_usage_info)
5692 current_function_static_stack_size = size;
5694 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5696 if (crtl->is_leaf && !cfun->calls_alloca)
5698 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
5699 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
5700 size - STACK_CHECK_PROTECT);
5702 else if (size > 0)
5703 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5706 if (size == 0)
5707 ; /* do nothing. */
5708 else if (sparc_leaf_function_p)
5710 rtx size_int_rtx = GEN_INT (-size);
5712 if (size <= 4096)
5713 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5714 else if (size <= 8192)
5716 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5717 RTX_FRAME_RELATED_P (insn) = 1;
5719 /* %sp is still the CFA register. */
5720 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5722 else
5724 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5725 emit_move_insn (size_rtx, size_int_rtx);
5726 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5727 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5728 gen_stack_pointer_inc (size_int_rtx));
5731 RTX_FRAME_RELATED_P (insn) = 1;
5733 else
5735 rtx size_int_rtx = GEN_INT (-size);
5737 if (size <= 4096)
5738 emit_window_save (size_int_rtx);
5739 else if (size <= 8192)
5741 emit_window_save (GEN_INT (-4096));
5743 /* %sp is not the CFA register anymore. */
5744 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5746 /* Make sure no %fp-based store is issued until after the frame is
5747 established. The offset between the frame pointer and the stack
5748 pointer is calculated relative to the value of the stack pointer
5749 at the end of the function prologue, and moving instructions that
5750 access the stack via the frame pointer between the instructions
5751 that decrement the stack pointer could result in accessing the
5752 register window save area, which is volatile. */
5753 emit_insn (gen_frame_blockage ());
5755 else
5757 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5758 emit_move_insn (size_rtx, size_int_rtx);
5759 emit_window_save (size_rtx);
5763 if (sparc_leaf_function_p)
5765 sparc_frame_base_reg = stack_pointer_rtx;
5766 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5768 else
5770 sparc_frame_base_reg = hard_frame_pointer_rtx;
5771 sparc_frame_base_offset = SPARC_STACK_BIAS;
5774 if (sparc_n_global_fp_regs > 0)
5775 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5776 sparc_frame_base_offset
5777 - sparc_apparent_frame_size,
5778 SORR_SAVE);
5780 /* Load the GOT register if needed. */
5781 if (crtl->uses_pic_offset_table)
5782 load_got_register ();
5784 /* Advertise that the data calculated just above are now valid. */
5785 sparc_prologue_data_valid_p = true;
5788 /* Expand the function prologue. The prologue is responsible for reserving
5789 storage for the frame, saving the call-saved registers and loading the
5790 GOT register if needed. */
5792 void
5793 sparc_flat_expand_prologue (void)
5795 HOST_WIDE_INT size;
5796 rtx_insn *insn;
5798 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
5800 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5802 if (flag_stack_usage_info)
5803 current_function_static_stack_size = size;
5805 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5807 if (crtl->is_leaf && !cfun->calls_alloca)
5809 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
5810 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
5811 size - STACK_CHECK_PROTECT);
5813 else if (size > 0)
5814 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5817 if (sparc_save_local_in_regs_p)
5818 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
5819 SORR_SAVE);
5821 if (size == 0)
5822 ; /* do nothing. */
5823 else
5825 rtx size_int_rtx, size_rtx;
5827 size_rtx = size_int_rtx = GEN_INT (-size);
5829 /* We establish the frame (i.e. decrement the stack pointer) first, even
5830 if we use a frame pointer, because we cannot clobber any call-saved
5831 registers, including the frame pointer, if we haven't created a new
5832 register save area, for the sake of compatibility with the ABI. */
5833 if (size <= 4096)
5834 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5835 else if (size <= 8192 && !frame_pointer_needed)
5837 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5838 RTX_FRAME_RELATED_P (insn) = 1;
5839 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5841 else
5843 size_rtx = gen_rtx_REG (Pmode, 1);
5844 emit_move_insn (size_rtx, size_int_rtx);
5845 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5846 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5847 gen_stack_pointer_inc (size_int_rtx));
5849 RTX_FRAME_RELATED_P (insn) = 1;
5851 /* Ensure nothing is scheduled until after the frame is established. */
5852 emit_insn (gen_blockage ());
5854 if (frame_pointer_needed)
5856 insn = emit_insn (gen_rtx_SET (hard_frame_pointer_rtx,
5857 gen_rtx_MINUS (Pmode,
5858 stack_pointer_rtx,
5859 size_rtx)));
5860 RTX_FRAME_RELATED_P (insn) = 1;
5862 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5863 gen_rtx_SET (hard_frame_pointer_rtx,
5864 plus_constant (Pmode, stack_pointer_rtx,
5865 size)));
5868 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5870 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
5871 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
5873 insn = emit_move_insn (i7, o7);
5874 RTX_FRAME_RELATED_P (insn) = 1;
5876 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (i7, o7));
5878 /* Prevent this instruction from ever being considered dead,
5879 even if this function has no epilogue. */
5880 emit_use (i7);
5884 if (frame_pointer_needed)
5886 sparc_frame_base_reg = hard_frame_pointer_rtx;
5887 sparc_frame_base_offset = SPARC_STACK_BIAS;
5889 else
5891 sparc_frame_base_reg = stack_pointer_rtx;
5892 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5895 if (sparc_n_global_fp_regs > 0)
5896 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5897 sparc_frame_base_offset
5898 - sparc_apparent_frame_size,
5899 SORR_SAVE);
5901 /* Load the GOT register if needed. */
5902 if (crtl->uses_pic_offset_table)
5903 load_got_register ();
5905 /* Advertise that the data calculated just above are now valid. */
5906 sparc_prologue_data_valid_p = true;
5909 /* This function generates the assembly code for function entry, which boils
5910 down to emitting the necessary .register directives. */
5912 static void
5913 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5915 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5916 if (!TARGET_FLAT)
5917 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
5919 sparc_output_scratch_registers (file);
5922 /* Expand the function epilogue, either normal or part of a sibcall.
5923 We emit all the instructions except the return or the call. */
5925 void
5926 sparc_expand_epilogue (bool for_eh)
5928 HOST_WIDE_INT size = sparc_frame_size;
5930 if (cfun->calls_alloca)
5931 emit_insn (gen_frame_blockage ());
5933 if (sparc_n_global_fp_regs > 0)
5934 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5935 sparc_frame_base_offset
5936 - sparc_apparent_frame_size,
5937 SORR_RESTORE);
5939 if (size == 0 || for_eh)
5940 ; /* do nothing. */
5941 else if (sparc_leaf_function_p)
5943 if (size <= 4096)
5944 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
5945 else if (size <= 8192)
5947 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
5948 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
5950 else
5952 rtx reg = gen_rtx_REG (Pmode, 1);
5953 emit_move_insn (reg, GEN_INT (size));
5954 emit_insn (gen_stack_pointer_inc (reg));
5959 /* Expand the function epilogue, either normal or part of a sibcall.
5960 We emit all the instructions except the return or the call. */
5962 void
5963 sparc_flat_expand_epilogue (bool for_eh)
5965 HOST_WIDE_INT size = sparc_frame_size;
5967 if (sparc_n_global_fp_regs > 0)
5968 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5969 sparc_frame_base_offset
5970 - sparc_apparent_frame_size,
5971 SORR_RESTORE);
5973 /* If we have a frame pointer, we'll need both to restore it before the
5974 frame is destroyed and use its current value in destroying the frame.
5975 Since we don't have an atomic way to do that in the flat window model,
5976 we save the current value into a temporary register (%g1). */
5977 if (frame_pointer_needed && !for_eh)
5978 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5980 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5981 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5982 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5984 if (sparc_save_local_in_regs_p)
5985 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5986 sparc_frame_base_offset,
5987 SORR_RESTORE);
5989 if (size == 0 || for_eh)
5990 ; /* do nothing. */
5991 else if (frame_pointer_needed)
5993 /* Make sure the frame is destroyed after everything else is done. */
5994 emit_insn (gen_blockage ());
5996 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5998 else
6000 /* Likewise. */
6001 emit_insn (gen_blockage ());
6003 if (size <= 4096)
6004 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
6005 else if (size <= 8192)
6007 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
6008 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
6010 else
6012 rtx reg = gen_rtx_REG (Pmode, 1);
6013 emit_move_insn (reg, GEN_INT (size));
6014 emit_insn (gen_stack_pointer_inc (reg));
6019 /* Return true if it is appropriate to emit `return' instructions in the
6020 body of a function. */
6022 bool
6023 sparc_can_use_return_insn_p (void)
6025 return sparc_prologue_data_valid_p
6026 && sparc_n_global_fp_regs == 0
6027 && TARGET_FLAT
6028 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
6029 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
6032 /* This function generates the assembly code for function exit. */
6034 static void
6035 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6037 /* If the last two instructions of a function are "call foo; dslot;"
6038 the return address might point to the first instruction in the next
6039 function and we have to output a dummy nop for the sake of sane
6040 backtraces in such cases. This is pointless for sibling calls since
6041 the return address is explicitly adjusted. */
6043 rtx_insn *insn = get_last_insn ();
6045 rtx last_real_insn = prev_real_insn (insn);
6046 if (last_real_insn
6047 && NONJUMP_INSN_P (last_real_insn)
6048 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
6049 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
6051 if (last_real_insn
6052 && CALL_P (last_real_insn)
6053 && !SIBLING_CALL_P (last_real_insn))
6054 fputs("\tnop\n", file);
6056 sparc_output_deferred_case_vectors ();
6059 /* Output a 'restore' instruction. */
6061 static void
6062 output_restore (rtx pat)
6064 rtx operands[3];
6066 if (! pat)
6068 fputs ("\t restore\n", asm_out_file);
6069 return;
6072 gcc_assert (GET_CODE (pat) == SET);
6074 operands[0] = SET_DEST (pat);
6075 pat = SET_SRC (pat);
6077 switch (GET_CODE (pat))
6079 case PLUS:
6080 operands[1] = XEXP (pat, 0);
6081 operands[2] = XEXP (pat, 1);
6082 output_asm_insn (" restore %r1, %2, %Y0", operands);
6083 break;
6084 case LO_SUM:
6085 operands[1] = XEXP (pat, 0);
6086 operands[2] = XEXP (pat, 1);
6087 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
6088 break;
6089 case ASHIFT:
6090 operands[1] = XEXP (pat, 0);
6091 gcc_assert (XEXP (pat, 1) == const1_rtx);
6092 output_asm_insn (" restore %r1, %r1, %Y0", operands);
6093 break;
6094 default:
6095 operands[1] = pat;
6096 output_asm_insn (" restore %%g0, %1, %Y0", operands);
6097 break;
6101 /* Output a return. */
6103 const char *
6104 output_return (rtx_insn *insn)
6106 if (crtl->calls_eh_return)
6108 /* If the function uses __builtin_eh_return, the eh_return
6109 machinery occupies the delay slot. */
6110 gcc_assert (!final_sequence);
6112 if (flag_delayed_branch)
6114 if (!TARGET_FLAT && TARGET_V9)
6115 fputs ("\treturn\t%i7+8\n", asm_out_file);
6116 else
6118 if (!TARGET_FLAT)
6119 fputs ("\trestore\n", asm_out_file);
6121 fputs ("\tjmp\t%o7+8\n", asm_out_file);
6124 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
6126 else
6128 if (!TARGET_FLAT)
6129 fputs ("\trestore\n", asm_out_file);
6131 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
6132 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
6135 else if (sparc_leaf_function_p || TARGET_FLAT)
6137 /* This is a leaf or flat function so we don't have to bother restoring
6138 the register window, which frees us from dealing with the convoluted
6139 semantics of restore/return. We simply output the jump to the
6140 return address and the insn in the delay slot (if any). */
6142 return "jmp\t%%o7+%)%#";
6144 else
6146 /* This is a regular function so we have to restore the register window.
6147 We may have a pending insn for the delay slot, which will be either
6148 combined with the 'restore' instruction or put in the delay slot of
6149 the 'return' instruction. */
6151 if (final_sequence)
6153 rtx delay, pat;
6155 delay = NEXT_INSN (insn);
6156 gcc_assert (delay);
6158 pat = PATTERN (delay);
6160 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
6162 epilogue_renumber (&pat, 0);
6163 return "return\t%%i7+%)%#";
6165 else
6167 output_asm_insn ("jmp\t%%i7+%)", NULL);
6168 output_restore (pat);
6169 PATTERN (delay) = gen_blockage ();
6170 INSN_CODE (delay) = -1;
6173 else
6175 /* The delay slot is empty. */
6176 if (TARGET_V9)
6177 return "return\t%%i7+%)\n\t nop";
6178 else if (flag_delayed_branch)
6179 return "jmp\t%%i7+%)\n\t restore";
6180 else
6181 return "restore\n\tjmp\t%%o7+%)\n\t nop";
6185 return "";
6188 /* Output a sibling call. */
6190 const char *
6191 output_sibcall (rtx_insn *insn, rtx call_operand)
6193 rtx operands[1];
6195 gcc_assert (flag_delayed_branch);
6197 operands[0] = call_operand;
6199 if (sparc_leaf_function_p || TARGET_FLAT)
6201 /* This is a leaf or flat function so we don't have to bother restoring
6202 the register window. We simply output the jump to the function and
6203 the insn in the delay slot (if any). */
6205 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
6207 if (final_sequence)
6208 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
6209 operands);
6210 else
6211 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6212 it into branch if possible. */
6213 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6214 operands);
6216 else
6218 /* This is a regular function so we have to restore the register window.
6219 We may have a pending insn for the delay slot, which will be combined
6220 with the 'restore' instruction. */
6222 output_asm_insn ("call\t%a0, 0", operands);
6224 if (final_sequence)
6226 rtx_insn *delay = NEXT_INSN (insn);
6227 gcc_assert (delay);
6229 output_restore (PATTERN (delay));
6231 PATTERN (delay) = gen_blockage ();
6232 INSN_CODE (delay) = -1;
6234 else
6235 output_restore (NULL_RTX);
6238 return "";
6241 /* Functions for handling argument passing.
6243 For 32-bit, the first 6 args are normally in registers and the rest are
6244 pushed. Any arg that starts within the first 6 words is at least
6245 partially passed in a register unless its data type forbids.
6247 For 64-bit, the argument registers are laid out as an array of 16 elements
6248 and arguments are added sequentially. The first 6 int args and up to the
6249 first 16 fp args (depending on size) are passed in regs.
6251 Slot Stack Integral Float Float in structure Double Long Double
6252 ---- ----- -------- ----- ------------------ ------ -----------
6253 15 [SP+248] %f31 %f30,%f31 %d30
6254 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6255 13 [SP+232] %f27 %f26,%f27 %d26
6256 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6257 11 [SP+216] %f23 %f22,%f23 %d22
6258 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6259 9 [SP+200] %f19 %f18,%f19 %d18
6260 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6261 7 [SP+184] %f15 %f14,%f15 %d14
6262 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6263 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6264 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6265 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6266 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6267 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6268 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6270 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6272 Integral arguments are always passed as 64-bit quantities appropriately
6273 extended.
6275 Passing of floating point values is handled as follows.
6276 If a prototype is in scope:
6277 If the value is in a named argument (i.e. not a stdarg function or a
6278 value not part of the `...') then the value is passed in the appropriate
6279 fp reg.
6280 If the value is part of the `...' and is passed in one of the first 6
6281 slots then the value is passed in the appropriate int reg.
6282 If the value is part of the `...' and is not passed in one of the first 6
6283 slots then the value is passed in memory.
6284 If a prototype is not in scope:
6285 If the value is one of the first 6 arguments the value is passed in the
6286 appropriate integer reg and the appropriate fp reg.
6287 If the value is not one of the first 6 arguments the value is passed in
6288 the appropriate fp reg and in memory.
6291 Summary of the calling conventions implemented by GCC on the SPARC:
6293 32-bit ABI:
6294 size argument return value
6296 small integer <4 int. reg. int. reg.
6297 word 4 int. reg. int. reg.
6298 double word 8 int. reg. int. reg.
6300 _Complex small integer <8 int. reg. int. reg.
6301 _Complex word 8 int. reg. int. reg.
6302 _Complex double word 16 memory int. reg.
6304 vector integer <=8 int. reg. FP reg.
6305 vector integer >8 memory memory
6307 float 4 int. reg. FP reg.
6308 double 8 int. reg. FP reg.
6309 long double 16 memory memory
6311 _Complex float 8 memory FP reg.
6312 _Complex double 16 memory FP reg.
6313 _Complex long double 32 memory FP reg.
6315 vector float any memory memory
6317 aggregate any memory memory
6321 64-bit ABI:
6322 size argument return value
6324 small integer <8 int. reg. int. reg.
6325 word 8 int. reg. int. reg.
6326 double word 16 int. reg. int. reg.
6328 _Complex small integer <16 int. reg. int. reg.
6329 _Complex word 16 int. reg. int. reg.
6330 _Complex double word 32 memory int. reg.
6332 vector integer <=16 FP reg. FP reg.
6333 vector integer 16<s<=32 memory FP reg.
6334 vector integer >32 memory memory
6336 float 4 FP reg. FP reg.
6337 double 8 FP reg. FP reg.
6338 long double 16 FP reg. FP reg.
6340 _Complex float 8 FP reg. FP reg.
6341 _Complex double 16 FP reg. FP reg.
6342 _Complex long double 32 memory FP reg.
6344 vector float <=16 FP reg. FP reg.
6345 vector float 16<s<=32 memory FP reg.
6346 vector float >32 memory memory
6348 aggregate <=16 reg. reg.
6349 aggregate 16<s<=32 memory reg.
6350 aggregate >32 memory memory
6354 Note #1: complex floating-point types follow the extended SPARC ABIs as
6355 implemented by the Sun compiler.
6357 Note #2: integral vector types follow the scalar floating-point types
6358 conventions to match what is implemented by the Sun VIS SDK.
6360 Note #3: floating-point vector types follow the aggregate types
6361 conventions. */
6364 /* Maximum number of int regs for args. */
6365 #define SPARC_INT_ARG_MAX 6
6366 /* Maximum number of fp regs for args. */
6367 #define SPARC_FP_ARG_MAX 16
6368 /* Number of words (partially) occupied for a given size in units. */
6369 #define CEIL_NWORDS(SIZE) CEIL((SIZE), UNITS_PER_WORD)
6371 /* Handle the INIT_CUMULATIVE_ARGS macro.
6372 Initialize a variable CUM of type CUMULATIVE_ARGS
6373 for a call to a function whose data type is FNTYPE.
6374 For a library call, FNTYPE is 0. */
6376 void
6377 init_cumulative_args (struct sparc_args *cum, tree fntype, rtx, tree)
6379 cum->words = 0;
6380 cum->prototype_p = fntype && prototype_p (fntype);
6381 cum->libcall_p = !fntype;
6384 /* Handle promotion of pointer and integer arguments. */
6386 static machine_mode
6387 sparc_promote_function_mode (const_tree type, machine_mode mode,
6388 int *punsignedp, const_tree, int)
6390 if (type && POINTER_TYPE_P (type))
6392 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6393 return Pmode;
6396 /* Integral arguments are passed as full words, as per the ABI. */
6397 if (GET_MODE_CLASS (mode) == MODE_INT
6398 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6399 return word_mode;
6401 return mode;
6404 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6406 static bool
6407 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6409 return TARGET_ARCH64 ? true : false;
6412 /* Traverse the record TYPE recursively and call FUNC on its fields.
6413 NAMED is true if this is for a named parameter. DATA is passed
6414 to FUNC for each field. OFFSET is the starting position and
6415 PACKED is true if we are inside a packed record. */
6417 template <typename T, void Func (const_tree, HOST_WIDE_INT, bool, T*)>
6418 static void
6419 traverse_record_type (const_tree type, bool named, T *data,
6420 HOST_WIDE_INT offset = 0, bool packed = false)
6422 /* The ABI obviously doesn't specify how packed structures are passed.
6423 These are passed in integer regs if possible, otherwise memory. */
6424 if (!packed)
6425 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6426 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6428 packed = true;
6429 break;
6432 /* Walk the real fields, but skip those with no size or a zero size.
6433 ??? Fields with variable offset are handled as having zero offset. */
6434 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6435 if (TREE_CODE (field) == FIELD_DECL)
6437 if (!DECL_SIZE (field) || integer_zerop (DECL_SIZE (field)))
6438 continue;
6440 HOST_WIDE_INT bitpos = offset;
6441 if (TREE_CODE (DECL_FIELD_OFFSET (field)) == INTEGER_CST)
6442 bitpos += int_bit_position (field);
6444 tree field_type = TREE_TYPE (field);
6445 if (TREE_CODE (field_type) == RECORD_TYPE)
6446 traverse_record_type<T, Func> (field_type, named, data, bitpos,
6447 packed);
6448 else
6450 const bool fp_type
6451 = FLOAT_TYPE_P (field_type) || VECTOR_TYPE_P (field_type);
6452 Func (field, bitpos, fp_type && named && !packed && TARGET_FPU,
6453 data);
6458 /* Handle recursive register classifying for structure layout. */
6460 typedef struct
6462 bool fp_regs; /* true if field eligible to FP registers. */
6463 bool fp_regs_in_first_word; /* true if such field in first word. */
6464 } classify_data_t;
6466 /* A subroutine of function_arg_slotno. Classify the field. */
6468 inline void
6469 classify_registers (const_tree, HOST_WIDE_INT bitpos, bool fp,
6470 classify_data_t *data)
6472 if (fp)
6474 data->fp_regs = true;
6475 if (bitpos < BITS_PER_WORD)
6476 data->fp_regs_in_first_word = true;
6480 /* Compute the slot number to pass an argument in.
6481 Return the slot number or -1 if passing on the stack.
6483 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6484 the preceding args and about the function being called.
6485 MODE is the argument's machine mode.
6486 TYPE is the data type of the argument (as a tree).
6487 This is null for libcalls where that information may
6488 not be available.
6489 NAMED is nonzero if this argument is a named parameter
6490 (otherwise it is an extra parameter matching an ellipsis).
6491 INCOMING is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6492 *PREGNO records the register number to use if scalar type.
6493 *PPADDING records the amount of padding needed in words. */
6495 static int
6496 function_arg_slotno (const struct sparc_args *cum, machine_mode mode,
6497 const_tree type, bool named, bool incoming,
6498 int *pregno, int *ppadding)
6500 int regbase = (incoming
6501 ? SPARC_INCOMING_INT_ARG_FIRST
6502 : SPARC_OUTGOING_INT_ARG_FIRST);
6503 int slotno = cum->words;
6504 enum mode_class mclass;
6505 int regno;
6507 *ppadding = 0;
6509 if (type && TREE_ADDRESSABLE (type))
6510 return -1;
6512 if (TARGET_ARCH32
6513 && mode == BLKmode
6514 && type
6515 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
6516 return -1;
6518 /* For SPARC64, objects requiring 16-byte alignment get it. */
6519 if (TARGET_ARCH64
6520 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6521 && (slotno & 1) != 0)
6522 slotno++, *ppadding = 1;
6524 mclass = GET_MODE_CLASS (mode);
6525 if (type && TREE_CODE (type) == VECTOR_TYPE)
6527 /* Vector types deserve special treatment because they are
6528 polymorphic wrt their mode, depending upon whether VIS
6529 instructions are enabled. */
6530 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6532 /* The SPARC port defines no floating-point vector modes. */
6533 gcc_assert (mode == BLKmode);
6535 else
6537 /* Integral vector types should either have a vector
6538 mode or an integral mode, because we are guaranteed
6539 by pass_by_reference that their size is not greater
6540 than 16 bytes and TImode is 16-byte wide. */
6541 gcc_assert (mode != BLKmode);
6543 /* Vector integers are handled like floats according to
6544 the Sun VIS SDK. */
6545 mclass = MODE_FLOAT;
6549 switch (mclass)
6551 case MODE_FLOAT:
6552 case MODE_COMPLEX_FLOAT:
6553 case MODE_VECTOR_INT:
6554 if (TARGET_ARCH64 && TARGET_FPU && named)
6556 /* If all arg slots are filled, then must pass on stack. */
6557 if (slotno >= SPARC_FP_ARG_MAX)
6558 return -1;
6560 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6561 /* Arguments filling only one single FP register are
6562 right-justified in the outer double FP register. */
6563 if (GET_MODE_SIZE (mode) <= 4)
6564 regno++;
6565 break;
6567 /* fallthrough */
6569 case MODE_INT:
6570 case MODE_COMPLEX_INT:
6571 /* If all arg slots are filled, then must pass on stack. */
6572 if (slotno >= SPARC_INT_ARG_MAX)
6573 return -1;
6575 regno = regbase + slotno;
6576 break;
6578 case MODE_RANDOM:
6579 if (mode == VOIDmode)
6580 /* MODE is VOIDmode when generating the actual call. */
6581 return -1;
6583 gcc_assert (mode == BLKmode);
6585 if (TARGET_ARCH32
6586 || !type
6587 || (TREE_CODE (type) != RECORD_TYPE
6588 && TREE_CODE (type) != VECTOR_TYPE))
6590 /* If all arg slots are filled, then must pass on stack. */
6591 if (slotno >= SPARC_INT_ARG_MAX)
6592 return -1;
6594 regno = regbase + slotno;
6596 else /* TARGET_ARCH64 && type */
6598 /* If all arg slots are filled, then must pass on stack. */
6599 if (slotno >= SPARC_FP_ARG_MAX)
6600 return -1;
6602 if (TREE_CODE (type) == RECORD_TYPE)
6604 classify_data_t data = { false, false };
6605 traverse_record_type<classify_data_t, classify_registers>
6606 (type, named, &data);
6608 if (data.fp_regs)
6610 /* If all FP slots are filled except for the last one and
6611 there is no FP field in the first word, then must pass
6612 on stack. */
6613 if (slotno >= SPARC_FP_ARG_MAX - 1
6614 && !data.fp_regs_in_first_word)
6615 return -1;
6617 else
6619 /* If all int slots are filled, then must pass on stack. */
6620 if (slotno >= SPARC_INT_ARG_MAX)
6621 return -1;
6625 /* PREGNO isn't set since both int and FP regs can be used. */
6626 return slotno;
6628 break;
6630 default :
6631 gcc_unreachable ();
6634 *pregno = regno;
6635 return slotno;
6638 /* Handle recursive register counting/assigning for structure layout. */
6640 typedef struct
6642 int slotno; /* slot number of the argument. */
6643 int regbase; /* regno of the base register. */
6644 int intoffset; /* offset of the first pending integer field. */
6645 int nregs; /* number of words passed in registers. */
6646 bool stack; /* true if part of the argument is on the stack. */
6647 rtx ret; /* return expression being built. */
6648 } assign_data_t;
6650 /* A subroutine of function_arg_record_value. Compute the number of integer
6651 registers to be assigned between PARMS->intoffset and BITPOS. Return
6652 true if at least one integer register is assigned or false otherwise. */
6654 static bool
6655 compute_int_layout (HOST_WIDE_INT bitpos, assign_data_t *data, int *pnregs)
6657 if (data->intoffset < 0)
6658 return false;
6660 const int intoffset = data->intoffset;
6661 data->intoffset = -1;
6663 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
6664 const unsigned int startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
6665 const unsigned int endbit = ROUND_UP (bitpos, BITS_PER_WORD);
6666 int nregs = (endbit - startbit) / BITS_PER_WORD;
6668 if (nregs > 0 && nregs > SPARC_INT_ARG_MAX - this_slotno)
6670 nregs = SPARC_INT_ARG_MAX - this_slotno;
6672 /* We need to pass this field (partly) on the stack. */
6673 data->stack = 1;
6676 if (nregs <= 0)
6677 return false;
6679 *pnregs = nregs;
6680 return true;
6683 /* A subroutine of function_arg_record_value. Compute the number and the mode
6684 of the FP registers to be assigned for FIELD. Return true if at least one
6685 FP register is assigned or false otherwise. */
6687 static bool
6688 compute_fp_layout (const_tree field, HOST_WIDE_INT bitpos,
6689 assign_data_t *data,
6690 int *pnregs, machine_mode *pmode)
6692 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
6693 machine_mode mode = DECL_MODE (field);
6694 int nregs, nslots;
6696 /* Slots are counted as words while regs are counted as having the size of
6697 the (inner) mode. */
6698 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE && mode == BLKmode)
6700 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6701 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6703 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6705 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6706 nregs = 2;
6708 else
6709 nregs = 1;
6711 nslots = CEIL_NWORDS (nregs * GET_MODE_SIZE (mode));
6713 if (nslots > SPARC_FP_ARG_MAX - this_slotno)
6715 nslots = SPARC_FP_ARG_MAX - this_slotno;
6716 nregs = (nslots * UNITS_PER_WORD) / GET_MODE_SIZE (mode);
6718 /* We need to pass this field (partly) on the stack. */
6719 data->stack = 1;
6721 if (nregs <= 0)
6722 return false;
6725 *pnregs = nregs;
6726 *pmode = mode;
6727 return true;
6730 /* A subroutine of function_arg_record_value. Count the number of registers
6731 to be assigned for FIELD and between PARMS->intoffset and BITPOS. */
6733 inline void
6734 count_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
6735 assign_data_t *data)
6737 if (fp)
6739 int nregs;
6740 machine_mode mode;
6742 if (compute_int_layout (bitpos, data, &nregs))
6743 data->nregs += nregs;
6745 if (compute_fp_layout (field, bitpos, data, &nregs, &mode))
6746 data->nregs += nregs;
6748 else
6750 if (data->intoffset < 0)
6751 data->intoffset = bitpos;
6755 /* A subroutine of function_arg_record_value. Assign the bits of the
6756 structure between PARMS->intoffset and BITPOS to integer registers. */
6758 static void
6759 assign_int_registers (HOST_WIDE_INT bitpos, assign_data_t *data)
6761 int intoffset = data->intoffset;
6762 machine_mode mode;
6763 int nregs;
6765 if (!compute_int_layout (bitpos, data, &nregs))
6766 return;
6768 /* If this is the trailing part of a word, only load that much into
6769 the register. Otherwise load the whole register. Note that in
6770 the latter case we may pick up unwanted bits. It's not a problem
6771 at the moment but may wish to revisit. */
6772 if (intoffset % BITS_PER_WORD != 0)
6773 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
6774 MODE_INT);
6775 else
6776 mode = word_mode;
6778 const int this_slotno = data->slotno + intoffset / BITS_PER_WORD;
6779 unsigned int regno = data->regbase + this_slotno;
6780 intoffset /= BITS_PER_UNIT;
6784 rtx reg = gen_rtx_REG (mode, regno);
6785 XVECEXP (data->ret, 0, data->stack + data->nregs)
6786 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
6787 data->nregs += 1;
6788 mode = word_mode;
6789 regno += 1;
6790 intoffset = (intoffset | (UNITS_PER_WORD - 1)) + 1;
6792 while (--nregs > 0);
6795 /* A subroutine of function_arg_record_value. Assign FIELD at position
6796 BITPOS to FP registers. */
6798 static void
6799 assign_fp_registers (const_tree field, HOST_WIDE_INT bitpos,
6800 assign_data_t *data)
6802 int nregs;
6803 machine_mode mode;
6805 if (!compute_fp_layout (field, bitpos, data, &nregs, &mode))
6806 return;
6808 const int this_slotno = data->slotno + bitpos / BITS_PER_WORD;
6809 int regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
6810 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
6811 regno++;
6812 int pos = bitpos / BITS_PER_UNIT;
6816 rtx reg = gen_rtx_REG (mode, regno);
6817 XVECEXP (data->ret, 0, data->stack + data->nregs)
6818 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6819 data->nregs += 1;
6820 regno += GET_MODE_SIZE (mode) / 4;
6821 pos += GET_MODE_SIZE (mode);
6823 while (--nregs > 0);
6826 /* A subroutine of function_arg_record_value. Assign FIELD and the bits of
6827 the structure between PARMS->intoffset and BITPOS to registers. */
6829 inline void
6830 assign_registers (const_tree field, HOST_WIDE_INT bitpos, bool fp,
6831 assign_data_t *data)
6833 if (fp)
6835 assign_int_registers (bitpos, data);
6837 assign_fp_registers (field, bitpos, data);
6839 else
6841 if (data->intoffset < 0)
6842 data->intoffset = bitpos;
6846 /* Used by function_arg and sparc_function_value_1 to implement the complex
6847 conventions of the 64-bit ABI for passing and returning structures.
6848 Return an expression valid as a return value for the FUNCTION_ARG
6849 and TARGET_FUNCTION_VALUE.
6851 TYPE is the data type of the argument (as a tree).
6852 This is null for libcalls where that information may
6853 not be available.
6854 MODE is the argument's machine mode.
6855 SLOTNO is the index number of the argument's slot in the parameter array.
6856 NAMED is true if this argument is a named parameter
6857 (otherwise it is an extra parameter matching an ellipsis).
6858 REGBASE is the regno of the base register for the parameter array. */
6860 static rtx
6861 function_arg_record_value (const_tree type, machine_mode mode,
6862 int slotno, bool named, int regbase)
6864 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6865 assign_data_t data;
6866 int nregs;
6868 data.slotno = slotno;
6869 data.regbase = regbase;
6871 /* Count how many registers we need. */
6872 data.nregs = 0;
6873 data.intoffset = 0;
6874 data.stack = false;
6875 traverse_record_type<assign_data_t, count_registers> (type, named, &data);
6877 /* Take into account pending integer fields. */
6878 if (compute_int_layout (typesize * BITS_PER_UNIT, &data, &nregs))
6879 data.nregs += nregs;
6881 /* Allocate the vector and handle some annoying special cases. */
6882 nregs = data.nregs;
6884 if (nregs == 0)
6886 /* ??? Empty structure has no value? Duh? */
6887 if (typesize <= 0)
6889 /* Though there's nothing really to store, return a word register
6890 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6891 leads to breakage due to the fact that there are zero bytes to
6892 load. */
6893 return gen_rtx_REG (mode, regbase);
6896 /* ??? C++ has structures with no fields, and yet a size. Give up
6897 for now and pass everything back in integer registers. */
6898 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6899 if (nregs + slotno > SPARC_INT_ARG_MAX)
6900 nregs = SPARC_INT_ARG_MAX - slotno;
6903 gcc_assert (nregs > 0);
6905 data.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (data.stack + nregs));
6907 /* If at least one field must be passed on the stack, generate
6908 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6909 also be passed on the stack. We can't do much better because the
6910 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6911 of structures for which the fields passed exclusively in registers
6912 are not at the beginning of the structure. */
6913 if (data.stack)
6914 XVECEXP (data.ret, 0, 0)
6915 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6917 /* Assign the registers. */
6918 data.nregs = 0;
6919 data.intoffset = 0;
6920 traverse_record_type<assign_data_t, assign_registers> (type, named, &data);
6922 /* Assign pending integer fields. */
6923 assign_int_registers (typesize * BITS_PER_UNIT, &data);
6925 gcc_assert (data.nregs == nregs);
6927 return data.ret;
6930 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6931 of the 64-bit ABI for passing and returning unions.
6932 Return an expression valid as a return value for the FUNCTION_ARG
6933 and TARGET_FUNCTION_VALUE.
6935 SIZE is the size in bytes of the union.
6936 MODE is the argument's machine mode.
6937 REGNO is the hard register the union will be passed in. */
6939 static rtx
6940 function_arg_union_value (int size, machine_mode mode, int slotno,
6941 int regno)
6943 int nwords = CEIL_NWORDS (size), i;
6944 rtx regs;
6946 /* See comment in previous function for empty structures. */
6947 if (nwords == 0)
6948 return gen_rtx_REG (mode, regno);
6950 if (slotno == SPARC_INT_ARG_MAX - 1)
6951 nwords = 1;
6953 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6955 for (i = 0; i < nwords; i++)
6957 /* Unions are passed left-justified. */
6958 XVECEXP (regs, 0, i)
6959 = gen_rtx_EXPR_LIST (VOIDmode,
6960 gen_rtx_REG (word_mode, regno),
6961 GEN_INT (UNITS_PER_WORD * i));
6962 regno++;
6965 return regs;
6968 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6969 for passing and returning BLKmode vectors.
6970 Return an expression valid as a return value for the FUNCTION_ARG
6971 and TARGET_FUNCTION_VALUE.
6973 SIZE is the size in bytes of the vector.
6974 REGNO is the FP hard register the vector will be passed in. */
6976 static rtx
6977 function_arg_vector_value (int size, int regno)
6979 const int nregs = MAX (1, size / 8);
6980 rtx regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6982 if (size < 8)
6983 XVECEXP (regs, 0, 0)
6984 = gen_rtx_EXPR_LIST (VOIDmode,
6985 gen_rtx_REG (SImode, regno),
6986 const0_rtx);
6987 else
6988 for (int i = 0; i < nregs; i++)
6989 XVECEXP (regs, 0, i)
6990 = gen_rtx_EXPR_LIST (VOIDmode,
6991 gen_rtx_REG (DImode, regno + 2*i),
6992 GEN_INT (i*8));
6994 return regs;
6997 /* Determine where to put an argument to a function.
6998 Value is zero to push the argument on the stack,
6999 or a hard register in which to store the argument.
7001 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7002 the preceding args and about the function being called.
7003 MODE is the argument's machine mode.
7004 TYPE is the data type of the argument (as a tree).
7005 This is null for libcalls where that information may
7006 not be available.
7007 NAMED is true if this argument is a named parameter
7008 (otherwise it is an extra parameter matching an ellipsis).
7009 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
7010 TARGET_FUNCTION_INCOMING_ARG. */
7012 static rtx
7013 sparc_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
7014 const_tree type, bool named, bool incoming)
7016 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7018 int regbase = (incoming
7019 ? SPARC_INCOMING_INT_ARG_FIRST
7020 : SPARC_OUTGOING_INT_ARG_FIRST);
7021 int slotno, regno, padding;
7022 enum mode_class mclass = GET_MODE_CLASS (mode);
7024 slotno = function_arg_slotno (cum, mode, type, named, incoming,
7025 &regno, &padding);
7026 if (slotno == -1)
7027 return 0;
7029 /* Vector types deserve special treatment because they are polymorphic wrt
7030 their mode, depending upon whether VIS instructions are enabled. */
7031 if (type && TREE_CODE (type) == VECTOR_TYPE)
7033 HOST_WIDE_INT size = int_size_in_bytes (type);
7034 gcc_assert ((TARGET_ARCH32 && size <= 8)
7035 || (TARGET_ARCH64 && size <= 16));
7037 if (mode == BLKmode)
7038 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST + 2*slotno);
7040 mclass = MODE_FLOAT;
7043 if (TARGET_ARCH32)
7044 return gen_rtx_REG (mode, regno);
7046 /* Structures up to 16 bytes in size are passed in arg slots on the stack
7047 and are promoted to registers if possible. */
7048 if (type && TREE_CODE (type) == RECORD_TYPE)
7050 HOST_WIDE_INT size = int_size_in_bytes (type);
7051 gcc_assert (size <= 16);
7053 return function_arg_record_value (type, mode, slotno, named, regbase);
7056 /* Unions up to 16 bytes in size are passed in integer registers. */
7057 else if (type && TREE_CODE (type) == UNION_TYPE)
7059 HOST_WIDE_INT size = int_size_in_bytes (type);
7060 gcc_assert (size <= 16);
7062 return function_arg_union_value (size, mode, slotno, regno);
7065 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
7066 but also have the slot allocated for them.
7067 If no prototype is in scope fp values in register slots get passed
7068 in two places, either fp regs and int regs or fp regs and memory. */
7069 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7070 && SPARC_FP_REG_P (regno))
7072 rtx reg = gen_rtx_REG (mode, regno);
7073 if (cum->prototype_p || cum->libcall_p)
7074 return reg;
7075 else
7077 rtx v0, v1;
7079 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
7081 int intreg;
7083 /* On incoming, we don't need to know that the value
7084 is passed in %f0 and %i0, and it confuses other parts
7085 causing needless spillage even on the simplest cases. */
7086 if (incoming)
7087 return reg;
7089 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
7090 + (regno - SPARC_FP_ARG_FIRST) / 2);
7092 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7093 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
7094 const0_rtx);
7095 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7097 else
7099 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
7100 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
7101 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
7106 /* All other aggregate types are passed in an integer register in a mode
7107 corresponding to the size of the type. */
7108 else if (type && AGGREGATE_TYPE_P (type))
7110 HOST_WIDE_INT size = int_size_in_bytes (type);
7111 gcc_assert (size <= 16);
7113 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7116 return gen_rtx_REG (mode, regno);
7119 /* Handle the TARGET_FUNCTION_ARG target hook. */
7121 static rtx
7122 sparc_function_arg (cumulative_args_t cum, machine_mode mode,
7123 const_tree type, bool named)
7125 return sparc_function_arg_1 (cum, mode, type, named, false);
7128 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
7130 static rtx
7131 sparc_function_incoming_arg (cumulative_args_t cum, machine_mode mode,
7132 const_tree type, bool named)
7134 return sparc_function_arg_1 (cum, mode, type, named, true);
7137 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7139 static unsigned int
7140 sparc_function_arg_boundary (machine_mode mode, const_tree type)
7142 return ((TARGET_ARCH64
7143 && (GET_MODE_ALIGNMENT (mode) == 128
7144 || (type && TYPE_ALIGN (type) == 128)))
7145 ? 128
7146 : PARM_BOUNDARY);
7149 /* For an arg passed partly in registers and partly in memory,
7150 this is the number of bytes of registers used.
7151 For args passed entirely in registers or entirely in memory, zero.
7153 Any arg that starts in the first 6 regs but won't entirely fit in them
7154 needs partial registers on v8. On v9, structures with integer
7155 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7156 values that begin in the last fp reg [where "last fp reg" varies with the
7157 mode] will be split between that reg and memory. */
7159 static int
7160 sparc_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
7161 tree type, bool named)
7163 int slotno, regno, padding;
7165 /* We pass false for incoming here, it doesn't matter. */
7166 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
7167 false, &regno, &padding);
7169 if (slotno == -1)
7170 return 0;
7172 if (TARGET_ARCH32)
7174 if ((slotno + (mode == BLKmode
7175 ? CEIL_NWORDS (int_size_in_bytes (type))
7176 : CEIL_NWORDS (GET_MODE_SIZE (mode))))
7177 > SPARC_INT_ARG_MAX)
7178 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
7180 else
7182 /* We are guaranteed by pass_by_reference that the size of the
7183 argument is not greater than 16 bytes, so we only need to return
7184 one word if the argument is partially passed in registers. */
7186 if (type && AGGREGATE_TYPE_P (type))
7188 int size = int_size_in_bytes (type);
7190 if (size > UNITS_PER_WORD
7191 && (slotno == SPARC_INT_ARG_MAX - 1
7192 || slotno == SPARC_FP_ARG_MAX - 1))
7193 return UNITS_PER_WORD;
7195 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
7196 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7197 && ! (TARGET_FPU && named)))
7199 /* The complex types are passed as packed types. */
7200 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7201 && slotno == SPARC_INT_ARG_MAX - 1)
7202 return UNITS_PER_WORD;
7204 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7206 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
7207 > SPARC_FP_ARG_MAX)
7208 return UNITS_PER_WORD;
7212 return 0;
7215 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
7216 Specify whether to pass the argument by reference. */
7218 static bool
7219 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
7220 machine_mode mode, const_tree type,
7221 bool named ATTRIBUTE_UNUSED)
7223 if (TARGET_ARCH32)
7224 /* Original SPARC 32-bit ABI says that structures and unions,
7225 and quad-precision floats are passed by reference. For Pascal,
7226 also pass arrays by reference. All other base types are passed
7227 in registers.
7229 Extended ABI (as implemented by the Sun compiler) says that all
7230 complex floats are passed by reference. Pass complex integers
7231 in registers up to 8 bytes. More generally, enforce the 2-word
7232 cap for passing arguments in registers.
7234 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7235 integers are passed like floats of the same size, that is in
7236 registers up to 8 bytes. Pass all vector floats by reference
7237 like structure and unions. */
7238 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
7239 || mode == SCmode
7240 /* Catch CDImode, TFmode, DCmode and TCmode. */
7241 || GET_MODE_SIZE (mode) > 8
7242 || (type
7243 && TREE_CODE (type) == VECTOR_TYPE
7244 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7245 else
7246 /* Original SPARC 64-bit ABI says that structures and unions
7247 smaller than 16 bytes are passed in registers, as well as
7248 all other base types.
7250 Extended ABI (as implemented by the Sun compiler) says that
7251 complex floats are passed in registers up to 16 bytes. Pass
7252 all complex integers in registers up to 16 bytes. More generally,
7253 enforce the 2-word cap for passing arguments in registers.
7255 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7256 integers are passed like floats of the same size, that is in
7257 registers (up to 16 bytes). Pass all vector floats like structure
7258 and unions. */
7259 return ((type
7260 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
7261 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
7262 /* Catch CTImode and TCmode. */
7263 || GET_MODE_SIZE (mode) > 16);
7266 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7267 Update the data in CUM to advance over an argument
7268 of mode MODE and data type TYPE.
7269 TYPE is null for libcalls where that information may not be available. */
7271 static void
7272 sparc_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
7273 const_tree type, bool named)
7275 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7276 int regno, padding;
7278 /* We pass false for incoming here, it doesn't matter. */
7279 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
7281 /* If argument requires leading padding, add it. */
7282 cum->words += padding;
7284 if (TARGET_ARCH32)
7285 cum->words += (mode == BLKmode
7286 ? CEIL_NWORDS (int_size_in_bytes (type))
7287 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7288 else
7290 if (type && AGGREGATE_TYPE_P (type))
7292 int size = int_size_in_bytes (type);
7294 if (size <= 8)
7295 ++cum->words;
7296 else if (size <= 16)
7297 cum->words += 2;
7298 else /* passed by reference */
7299 ++cum->words;
7301 else
7302 cum->words += (mode == BLKmode
7303 ? CEIL_NWORDS (int_size_in_bytes (type))
7304 : CEIL_NWORDS (GET_MODE_SIZE (mode)));
7308 /* Handle the FUNCTION_ARG_PADDING macro.
7309 For the 64-bit ABI structs are always stored left shifted in their
7310 argument slot. */
7312 enum direction
7313 function_arg_padding (machine_mode mode, const_tree type)
7315 if (TARGET_ARCH64 && type && AGGREGATE_TYPE_P (type))
7316 return upward;
7318 /* Fall back to the default. */
7319 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7322 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7323 Specify whether to return the return value in memory. */
7325 static bool
7326 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7328 if (TARGET_ARCH32)
7329 /* Original SPARC 32-bit ABI says that structures and unions,
7330 and quad-precision floats are returned in memory. All other
7331 base types are returned in registers.
7333 Extended ABI (as implemented by the Sun compiler) says that
7334 all complex floats are returned in registers (8 FP registers
7335 at most for '_Complex long double'). Return all complex integers
7336 in registers (4 at most for '_Complex long long').
7338 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7339 integers are returned like floats of the same size, that is in
7340 registers up to 8 bytes and in memory otherwise. Return all
7341 vector floats in memory like structure and unions; note that
7342 they always have BLKmode like the latter. */
7343 return (TYPE_MODE (type) == BLKmode
7344 || TYPE_MODE (type) == TFmode
7345 || (TREE_CODE (type) == VECTOR_TYPE
7346 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7347 else
7348 /* Original SPARC 64-bit ABI says that structures and unions
7349 smaller than 32 bytes are returned in registers, as well as
7350 all other base types.
7352 Extended ABI (as implemented by the Sun compiler) says that all
7353 complex floats are returned in registers (8 FP registers at most
7354 for '_Complex long double'). Return all complex integers in
7355 registers (4 at most for '_Complex TItype').
7357 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7358 integers are returned like floats of the same size, that is in
7359 registers. Return all vector floats like structure and unions;
7360 note that they always have BLKmode like the latter. */
7361 return (TYPE_MODE (type) == BLKmode
7362 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7365 /* Handle the TARGET_STRUCT_VALUE target hook.
7366 Return where to find the structure return value address. */
7368 static rtx
7369 sparc_struct_value_rtx (tree fndecl, int incoming)
7371 if (TARGET_ARCH64)
7372 return 0;
7373 else
7375 rtx mem;
7377 if (incoming)
7378 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7379 STRUCT_VALUE_OFFSET));
7380 else
7381 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7382 STRUCT_VALUE_OFFSET));
7384 /* Only follow the SPARC ABI for fixed-size structure returns.
7385 Variable size structure returns are handled per the normal
7386 procedures in GCC. This is enabled by -mstd-struct-return */
7387 if (incoming == 2
7388 && sparc_std_struct_return
7389 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7390 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7392 /* We must check and adjust the return address, as it is optional
7393 as to whether the return object is really provided. */
7394 rtx ret_reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
7395 rtx scratch = gen_reg_rtx (SImode);
7396 rtx_code_label *endlab = gen_label_rtx ();
7398 /* Calculate the return object size. */
7399 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7400 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7401 /* Construct a temporary return value. */
7402 rtx temp_val
7403 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7405 /* Implement SPARC 32-bit psABI callee return struct checking:
7407 Fetch the instruction where we will return to and see if
7408 it's an unimp instruction (the most significant 10 bits
7409 will be zero). */
7410 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7411 plus_constant (Pmode,
7412 ret_reg, 8)));
7413 /* Assume the size is valid and pre-adjust. */
7414 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7415 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7416 0, endlab);
7417 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7418 /* Write the address of the memory pointed to by temp_val into
7419 the memory pointed to by mem. */
7420 emit_move_insn (mem, XEXP (temp_val, 0));
7421 emit_label (endlab);
7424 return mem;
7428 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7429 For v9, function return values are subject to the same rules as arguments,
7430 except that up to 32 bytes may be returned in registers. */
7432 static rtx
7433 sparc_function_value_1 (const_tree type, machine_mode mode,
7434 bool outgoing)
7436 /* Beware that the two values are swapped here wrt function_arg. */
7437 int regbase = (outgoing
7438 ? SPARC_INCOMING_INT_ARG_FIRST
7439 : SPARC_OUTGOING_INT_ARG_FIRST);
7440 enum mode_class mclass = GET_MODE_CLASS (mode);
7441 int regno;
7443 /* Vector types deserve special treatment because they are polymorphic wrt
7444 their mode, depending upon whether VIS instructions are enabled. */
7445 if (type && TREE_CODE (type) == VECTOR_TYPE)
7447 HOST_WIDE_INT size = int_size_in_bytes (type);
7448 gcc_assert ((TARGET_ARCH32 && size <= 8)
7449 || (TARGET_ARCH64 && size <= 32));
7451 if (mode == BLKmode)
7452 return function_arg_vector_value (size, SPARC_FP_ARG_FIRST);
7454 mclass = MODE_FLOAT;
7457 if (TARGET_ARCH64 && type)
7459 /* Structures up to 32 bytes in size are returned in registers. */
7460 if (TREE_CODE (type) == RECORD_TYPE)
7462 HOST_WIDE_INT size = int_size_in_bytes (type);
7463 gcc_assert (size <= 32);
7465 return function_arg_record_value (type, mode, 0, 1, regbase);
7468 /* Unions up to 32 bytes in size are returned in integer registers. */
7469 else if (TREE_CODE (type) == UNION_TYPE)
7471 HOST_WIDE_INT size = int_size_in_bytes (type);
7472 gcc_assert (size <= 32);
7474 return function_arg_union_value (size, mode, 0, regbase);
7477 /* Objects that require it are returned in FP registers. */
7478 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7481 /* All other aggregate types are returned in an integer register in a
7482 mode corresponding to the size of the type. */
7483 else if (AGGREGATE_TYPE_P (type))
7485 /* All other aggregate types are passed in an integer register
7486 in a mode corresponding to the size of the type. */
7487 HOST_WIDE_INT size = int_size_in_bytes (type);
7488 gcc_assert (size <= 32);
7490 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7492 /* ??? We probably should have made the same ABI change in
7493 3.4.0 as the one we made for unions. The latter was
7494 required by the SCD though, while the former is not
7495 specified, so we favored compatibility and efficiency.
7497 Now we're stuck for aggregates larger than 16 bytes,
7498 because OImode vanished in the meantime. Let's not
7499 try to be unduly clever, and simply follow the ABI
7500 for unions in that case. */
7501 if (mode == BLKmode)
7502 return function_arg_union_value (size, mode, 0, regbase);
7503 else
7504 mclass = MODE_INT;
7507 /* We should only have pointer and integer types at this point. This
7508 must match sparc_promote_function_mode. */
7509 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7510 mode = word_mode;
7513 /* We should only have pointer and integer types at this point, except with
7514 -freg-struct-return. This must match sparc_promote_function_mode. */
7515 else if (TARGET_ARCH32
7516 && !(type && AGGREGATE_TYPE_P (type))
7517 && mclass == MODE_INT
7518 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7519 mode = word_mode;
7521 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7522 regno = SPARC_FP_ARG_FIRST;
7523 else
7524 regno = regbase;
7526 return gen_rtx_REG (mode, regno);
7529 /* Handle TARGET_FUNCTION_VALUE.
7530 On the SPARC, the value is found in the first "output" register, but the
7531 called function leaves it in the first "input" register. */
7533 static rtx
7534 sparc_function_value (const_tree valtype,
7535 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7536 bool outgoing)
7538 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7541 /* Handle TARGET_LIBCALL_VALUE. */
7543 static rtx
7544 sparc_libcall_value (machine_mode mode,
7545 const_rtx fun ATTRIBUTE_UNUSED)
7547 return sparc_function_value_1 (NULL_TREE, mode, false);
7550 /* Handle FUNCTION_VALUE_REGNO_P.
7551 On the SPARC, the first "output" reg is used for integer values, and the
7552 first floating point register is used for floating point values. */
7554 static bool
7555 sparc_function_value_regno_p (const unsigned int regno)
7557 return (regno == 8 || (TARGET_FPU && regno == 32));
7560 /* Do what is necessary for `va_start'. We look at the current function
7561 to determine if stdarg or varargs is used and return the address of
7562 the first unnamed parameter. */
7564 static rtx
7565 sparc_builtin_saveregs (void)
7567 int first_reg = crtl->args.info.words;
7568 rtx address;
7569 int regno;
7571 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7572 emit_move_insn (gen_rtx_MEM (word_mode,
7573 gen_rtx_PLUS (Pmode,
7574 frame_pointer_rtx,
7575 GEN_INT (FIRST_PARM_OFFSET (0)
7576 + (UNITS_PER_WORD
7577 * regno)))),
7578 gen_rtx_REG (word_mode,
7579 SPARC_INCOMING_INT_ARG_FIRST + regno));
7581 address = gen_rtx_PLUS (Pmode,
7582 frame_pointer_rtx,
7583 GEN_INT (FIRST_PARM_OFFSET (0)
7584 + UNITS_PER_WORD * first_reg));
7586 return address;
7589 /* Implement `va_start' for stdarg. */
7591 static void
7592 sparc_va_start (tree valist, rtx nextarg)
7594 nextarg = expand_builtin_saveregs ();
7595 std_expand_builtin_va_start (valist, nextarg);
7598 /* Implement `va_arg' for stdarg. */
7600 static tree
7601 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7602 gimple_seq *post_p)
7604 HOST_WIDE_INT size, rsize, align;
7605 tree addr, incr;
7606 bool indirect;
7607 tree ptrtype = build_pointer_type (type);
7609 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
7611 indirect = true;
7612 size = rsize = UNITS_PER_WORD;
7613 align = 0;
7615 else
7617 indirect = false;
7618 size = int_size_in_bytes (type);
7619 rsize = ROUND_UP (size, UNITS_PER_WORD);
7620 align = 0;
7622 if (TARGET_ARCH64)
7624 /* For SPARC64, objects requiring 16-byte alignment get it. */
7625 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7626 align = 2 * UNITS_PER_WORD;
7628 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7629 are left-justified in their slots. */
7630 if (AGGREGATE_TYPE_P (type))
7632 if (size == 0)
7633 size = rsize = UNITS_PER_WORD;
7634 else
7635 size = rsize;
7640 incr = valist;
7641 if (align)
7643 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7644 incr = fold_convert (sizetype, incr);
7645 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7646 size_int (-align));
7647 incr = fold_convert (ptr_type_node, incr);
7650 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7651 addr = incr;
7653 if (BYTES_BIG_ENDIAN && size < rsize)
7654 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7656 if (indirect)
7658 addr = fold_convert (build_pointer_type (ptrtype), addr);
7659 addr = build_va_arg_indirect_ref (addr);
7662 /* If the address isn't aligned properly for the type, we need a temporary.
7663 FIXME: This is inefficient, usually we can do this in registers. */
7664 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7666 tree tmp = create_tmp_var (type, "va_arg_tmp");
7667 tree dest_addr = build_fold_addr_expr (tmp);
7668 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7669 3, dest_addr, addr, size_int (rsize));
7670 TREE_ADDRESSABLE (tmp) = 1;
7671 gimplify_and_add (copy, pre_p);
7672 addr = dest_addr;
7675 else
7676 addr = fold_convert (ptrtype, addr);
7678 incr = fold_build_pointer_plus_hwi (incr, rsize);
7679 gimplify_assign (valist, incr, post_p);
7681 return build_va_arg_indirect_ref (addr);
7684 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
7685 Specify whether the vector mode is supported by the hardware. */
7687 static bool
7688 sparc_vector_mode_supported_p (machine_mode mode)
7690 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
7693 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
7695 static machine_mode
7696 sparc_preferred_simd_mode (machine_mode mode)
7698 if (TARGET_VIS)
7699 switch (mode)
7701 case SImode:
7702 return V2SImode;
7703 case HImode:
7704 return V4HImode;
7705 case QImode:
7706 return V8QImode;
7708 default:;
7711 return word_mode;
7714 /* Return the string to output an unconditional branch to LABEL, which is
7715 the operand number of the label.
7717 DEST is the destination insn (i.e. the label), INSN is the source. */
7719 const char *
7720 output_ubranch (rtx dest, rtx_insn *insn)
7722 static char string[64];
7723 bool v9_form = false;
7724 int delta;
7725 char *p;
7727 /* Even if we are trying to use cbcond for this, evaluate
7728 whether we can use V9 branches as our backup plan. */
7730 delta = 5000000;
7731 if (INSN_ADDRESSES_SET_P ())
7732 delta = (INSN_ADDRESSES (INSN_UID (dest))
7733 - INSN_ADDRESSES (INSN_UID (insn)));
7735 /* Leave some instructions for "slop". */
7736 if (TARGET_V9 && delta >= -260000 && delta < 260000)
7737 v9_form = true;
7739 if (TARGET_CBCOND)
7741 bool emit_nop = emit_cbcond_nop (insn);
7742 bool far = false;
7743 const char *rval;
7745 if (delta < -500 || delta > 500)
7746 far = true;
7748 if (far)
7750 if (v9_form)
7751 rval = "ba,a,pt\t%%xcc, %l0";
7752 else
7753 rval = "b,a\t%l0";
7755 else
7757 if (emit_nop)
7758 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
7759 else
7760 rval = "cwbe\t%%g0, %%g0, %l0";
7762 return rval;
7765 if (v9_form)
7766 strcpy (string, "ba%*,pt\t%%xcc, ");
7767 else
7768 strcpy (string, "b%*\t");
7770 p = strchr (string, '\0');
7771 *p++ = '%';
7772 *p++ = 'l';
7773 *p++ = '0';
7774 *p++ = '%';
7775 *p++ = '(';
7776 *p = '\0';
7778 return string;
7781 /* Return the string to output a conditional branch to LABEL, which is
7782 the operand number of the label. OP is the conditional expression.
7783 XEXP (OP, 0) is assumed to be a condition code register (integer or
7784 floating point) and its mode specifies what kind of comparison we made.
7786 DEST is the destination insn (i.e. the label), INSN is the source.
7788 REVERSED is nonzero if we should reverse the sense of the comparison.
7790 ANNUL is nonzero if we should generate an annulling branch. */
7792 const char *
7793 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
7794 rtx_insn *insn)
7796 static char string[64];
7797 enum rtx_code code = GET_CODE (op);
7798 rtx cc_reg = XEXP (op, 0);
7799 machine_mode mode = GET_MODE (cc_reg);
7800 const char *labelno, *branch;
7801 int spaces = 8, far;
7802 char *p;
7804 /* v9 branches are limited to +-1MB. If it is too far away,
7805 change
7807 bne,pt %xcc, .LC30
7811 be,pn %xcc, .+12
7813 ba .LC30
7817 fbne,a,pn %fcc2, .LC29
7821 fbe,pt %fcc2, .+16
7823 ba .LC29 */
7825 far = TARGET_V9 && (get_attr_length (insn) >= 3);
7826 if (reversed ^ far)
7828 /* Reversal of FP compares takes care -- an ordered compare
7829 becomes an unordered compare and vice versa. */
7830 if (mode == CCFPmode || mode == CCFPEmode)
7831 code = reverse_condition_maybe_unordered (code);
7832 else
7833 code = reverse_condition (code);
7836 /* Start by writing the branch condition. */
7837 if (mode == CCFPmode || mode == CCFPEmode)
7839 switch (code)
7841 case NE:
7842 branch = "fbne";
7843 break;
7844 case EQ:
7845 branch = "fbe";
7846 break;
7847 case GE:
7848 branch = "fbge";
7849 break;
7850 case GT:
7851 branch = "fbg";
7852 break;
7853 case LE:
7854 branch = "fble";
7855 break;
7856 case LT:
7857 branch = "fbl";
7858 break;
7859 case UNORDERED:
7860 branch = "fbu";
7861 break;
7862 case ORDERED:
7863 branch = "fbo";
7864 break;
7865 case UNGT:
7866 branch = "fbug";
7867 break;
7868 case UNLT:
7869 branch = "fbul";
7870 break;
7871 case UNEQ:
7872 branch = "fbue";
7873 break;
7874 case UNGE:
7875 branch = "fbuge";
7876 break;
7877 case UNLE:
7878 branch = "fbule";
7879 break;
7880 case LTGT:
7881 branch = "fblg";
7882 break;
7883 default:
7884 gcc_unreachable ();
7887 /* ??? !v9: FP branches cannot be preceded by another floating point
7888 insn. Because there is currently no concept of pre-delay slots,
7889 we can fix this only by always emitting a nop before a floating
7890 point branch. */
7892 string[0] = '\0';
7893 if (! TARGET_V9)
7894 strcpy (string, "nop\n\t");
7895 strcat (string, branch);
7897 else
7899 switch (code)
7901 case NE:
7902 if (mode == CCVmode || mode == CCXVmode)
7903 branch = "bvs";
7904 else
7905 branch = "bne";
7906 break;
7907 case EQ:
7908 if (mode == CCVmode || mode == CCXVmode)
7909 branch = "bvc";
7910 else
7911 branch = "be";
7912 break;
7913 case GE:
7914 if (mode == CCNZmode || mode == CCXNZmode)
7915 branch = "bpos";
7916 else
7917 branch = "bge";
7918 break;
7919 case GT:
7920 branch = "bg";
7921 break;
7922 case LE:
7923 branch = "ble";
7924 break;
7925 case LT:
7926 if (mode == CCNZmode || mode == CCXNZmode)
7927 branch = "bneg";
7928 else
7929 branch = "bl";
7930 break;
7931 case GEU:
7932 branch = "bgeu";
7933 break;
7934 case GTU:
7935 branch = "bgu";
7936 break;
7937 case LEU:
7938 branch = "bleu";
7939 break;
7940 case LTU:
7941 branch = "blu";
7942 break;
7943 default:
7944 gcc_unreachable ();
7946 strcpy (string, branch);
7948 spaces -= strlen (branch);
7949 p = strchr (string, '\0');
7951 /* Now add the annulling, the label, and a possible noop. */
7952 if (annul && ! far)
7954 strcpy (p, ",a");
7955 p += 2;
7956 spaces -= 2;
7959 if (TARGET_V9)
7961 rtx note;
7962 int v8 = 0;
7964 if (! far && insn && INSN_ADDRESSES_SET_P ())
7966 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7967 - INSN_ADDRESSES (INSN_UID (insn)));
7968 /* Leave some instructions for "slop". */
7969 if (delta < -260000 || delta >= 260000)
7970 v8 = 1;
7973 switch (mode)
7975 case CCmode:
7976 case CCNZmode:
7977 case CCCmode:
7978 case CCVmode:
7979 labelno = "%%icc, ";
7980 if (v8)
7981 labelno = "";
7982 break;
7983 case CCXmode:
7984 case CCXNZmode:
7985 case CCXCmode:
7986 case CCXVmode:
7987 labelno = "%%xcc, ";
7988 gcc_assert (!v8);
7989 break;
7990 case CCFPmode:
7991 case CCFPEmode:
7993 static char v9_fcc_labelno[] = "%%fccX, ";
7994 /* Set the char indicating the number of the fcc reg to use. */
7995 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7996 labelno = v9_fcc_labelno;
7997 if (v8)
7999 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
8000 labelno = "";
8003 break;
8004 default:
8005 gcc_unreachable ();
8008 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8010 strcpy (p,
8011 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8012 >= profile_probability::even ()) ^ far)
8013 ? ",pt" : ",pn");
8014 p += 3;
8015 spaces -= 3;
8018 else
8019 labelno = "";
8021 if (spaces > 0)
8022 *p++ = '\t';
8023 else
8024 *p++ = ' ';
8025 strcpy (p, labelno);
8026 p = strchr (p, '\0');
8027 if (far)
8029 strcpy (p, ".+12\n\t nop\n\tb\t");
8030 /* Skip the next insn if requested or
8031 if we know that it will be a nop. */
8032 if (annul || ! final_sequence)
8033 p[3] = '6';
8034 p += 14;
8036 *p++ = '%';
8037 *p++ = 'l';
8038 *p++ = label + '0';
8039 *p++ = '%';
8040 *p++ = '#';
8041 *p = '\0';
8043 return string;
8046 /* Emit a library call comparison between floating point X and Y.
8047 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
8048 Return the new operator to be used in the comparison sequence.
8050 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
8051 values as arguments instead of the TFmode registers themselves,
8052 that's why we cannot call emit_float_lib_cmp. */
8055 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
8057 const char *qpfunc;
8058 rtx slot0, slot1, result, tem, tem2, libfunc;
8059 machine_mode mode;
8060 enum rtx_code new_comparison;
8062 switch (comparison)
8064 case EQ:
8065 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
8066 break;
8068 case NE:
8069 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
8070 break;
8072 case GT:
8073 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
8074 break;
8076 case GE:
8077 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
8078 break;
8080 case LT:
8081 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
8082 break;
8084 case LE:
8085 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
8086 break;
8088 case ORDERED:
8089 case UNORDERED:
8090 case UNGT:
8091 case UNLT:
8092 case UNEQ:
8093 case UNGE:
8094 case UNLE:
8095 case LTGT:
8096 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
8097 break;
8099 default:
8100 gcc_unreachable ();
8103 if (TARGET_ARCH64)
8105 if (MEM_P (x))
8107 tree expr = MEM_EXPR (x);
8108 if (expr)
8109 mark_addressable (expr);
8110 slot0 = x;
8112 else
8114 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8115 emit_move_insn (slot0, x);
8118 if (MEM_P (y))
8120 tree expr = MEM_EXPR (y);
8121 if (expr)
8122 mark_addressable (expr);
8123 slot1 = y;
8125 else
8127 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
8128 emit_move_insn (slot1, y);
8131 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8132 emit_library_call (libfunc, LCT_NORMAL,
8133 DImode, 2,
8134 XEXP (slot0, 0), Pmode,
8135 XEXP (slot1, 0), Pmode);
8136 mode = DImode;
8138 else
8140 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
8141 emit_library_call (libfunc, LCT_NORMAL,
8142 SImode, 2,
8143 x, TFmode, y, TFmode);
8144 mode = SImode;
8148 /* Immediately move the result of the libcall into a pseudo
8149 register so reload doesn't clobber the value if it needs
8150 the return register for a spill reg. */
8151 result = gen_reg_rtx (mode);
8152 emit_move_insn (result, hard_libcall_value (mode, libfunc));
8154 switch (comparison)
8156 default:
8157 return gen_rtx_NE (VOIDmode, result, const0_rtx);
8158 case ORDERED:
8159 case UNORDERED:
8160 new_comparison = (comparison == UNORDERED ? EQ : NE);
8161 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
8162 case UNGT:
8163 case UNGE:
8164 new_comparison = (comparison == UNGT ? GT : NE);
8165 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
8166 case UNLE:
8167 return gen_rtx_NE (VOIDmode, result, const2_rtx);
8168 case UNLT:
8169 tem = gen_reg_rtx (mode);
8170 if (TARGET_ARCH32)
8171 emit_insn (gen_andsi3 (tem, result, const1_rtx));
8172 else
8173 emit_insn (gen_anddi3 (tem, result, const1_rtx));
8174 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
8175 case UNEQ:
8176 case LTGT:
8177 tem = gen_reg_rtx (mode);
8178 if (TARGET_ARCH32)
8179 emit_insn (gen_addsi3 (tem, result, const1_rtx));
8180 else
8181 emit_insn (gen_adddi3 (tem, result, const1_rtx));
8182 tem2 = gen_reg_rtx (mode);
8183 if (TARGET_ARCH32)
8184 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
8185 else
8186 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
8187 new_comparison = (comparison == UNEQ ? EQ : NE);
8188 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
8191 gcc_unreachable ();
8194 /* Generate an unsigned DImode to FP conversion. This is the same code
8195 optabs would emit if we didn't have TFmode patterns. */
8197 void
8198 sparc_emit_floatunsdi (rtx *operands, machine_mode mode)
8200 rtx i0, i1, f0, in, out;
8202 out = operands[0];
8203 in = force_reg (DImode, operands[1]);
8204 rtx_code_label *neglab = gen_label_rtx ();
8205 rtx_code_label *donelab = gen_label_rtx ();
8206 i0 = gen_reg_rtx (DImode);
8207 i1 = gen_reg_rtx (DImode);
8208 f0 = gen_reg_rtx (mode);
8210 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
8212 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
8213 emit_jump_insn (gen_jump (donelab));
8214 emit_barrier ();
8216 emit_label (neglab);
8218 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
8219 emit_insn (gen_anddi3 (i1, in, const1_rtx));
8220 emit_insn (gen_iordi3 (i0, i0, i1));
8221 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
8222 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
8224 emit_label (donelab);
8227 /* Generate an FP to unsigned DImode conversion. This is the same code
8228 optabs would emit if we didn't have TFmode patterns. */
8230 void
8231 sparc_emit_fixunsdi (rtx *operands, machine_mode mode)
8233 rtx i0, i1, f0, in, out, limit;
8235 out = operands[0];
8236 in = force_reg (mode, operands[1]);
8237 rtx_code_label *neglab = gen_label_rtx ();
8238 rtx_code_label *donelab = gen_label_rtx ();
8239 i0 = gen_reg_rtx (DImode);
8240 i1 = gen_reg_rtx (DImode);
8241 limit = gen_reg_rtx (mode);
8242 f0 = gen_reg_rtx (mode);
8244 emit_move_insn (limit,
8245 const_double_from_real_value (
8246 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
8247 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
8249 emit_insn (gen_rtx_SET (out,
8250 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
8251 emit_jump_insn (gen_jump (donelab));
8252 emit_barrier ();
8254 emit_label (neglab);
8256 emit_insn (gen_rtx_SET (f0, gen_rtx_MINUS (mode, in, limit)));
8257 emit_insn (gen_rtx_SET (i0,
8258 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
8259 emit_insn (gen_movdi (i1, const1_rtx));
8260 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
8261 emit_insn (gen_xordi3 (out, i0, i1));
8263 emit_label (donelab);
8266 /* Return the string to output a compare and branch instruction to DEST.
8267 DEST is the destination insn (i.e. the label), INSN is the source,
8268 and OP is the conditional expression. */
8270 const char *
8271 output_cbcond (rtx op, rtx dest, rtx_insn *insn)
8273 machine_mode mode = GET_MODE (XEXP (op, 0));
8274 enum rtx_code code = GET_CODE (op);
8275 const char *cond_str, *tmpl;
8276 int far, emit_nop, len;
8277 static char string[64];
8278 char size_char;
8280 /* Compare and Branch is limited to +-2KB. If it is too far away,
8281 change
8283 cxbne X, Y, .LC30
8287 cxbe X, Y, .+16
8289 ba,pt xcc, .LC30
8290 nop */
8292 len = get_attr_length (insn);
8294 far = len == 4;
8295 emit_nop = len == 2;
8297 if (far)
8298 code = reverse_condition (code);
8300 size_char = ((mode == SImode) ? 'w' : 'x');
8302 switch (code)
8304 case NE:
8305 cond_str = "ne";
8306 break;
8308 case EQ:
8309 cond_str = "e";
8310 break;
8312 case GE:
8313 cond_str = "ge";
8314 break;
8316 case GT:
8317 cond_str = "g";
8318 break;
8320 case LE:
8321 cond_str = "le";
8322 break;
8324 case LT:
8325 cond_str = "l";
8326 break;
8328 case GEU:
8329 cond_str = "cc";
8330 break;
8332 case GTU:
8333 cond_str = "gu";
8334 break;
8336 case LEU:
8337 cond_str = "leu";
8338 break;
8340 case LTU:
8341 cond_str = "cs";
8342 break;
8344 default:
8345 gcc_unreachable ();
8348 if (far)
8350 int veryfar = 1, delta;
8352 if (INSN_ADDRESSES_SET_P ())
8354 delta = (INSN_ADDRESSES (INSN_UID (dest))
8355 - INSN_ADDRESSES (INSN_UID (insn)));
8356 /* Leave some instructions for "slop". */
8357 if (delta >= -260000 && delta < 260000)
8358 veryfar = 0;
8361 if (veryfar)
8362 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8363 else
8364 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8366 else
8368 if (emit_nop)
8369 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8370 else
8371 tmpl = "c%cb%s\t%%1, %%2, %%3";
8374 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8376 return string;
8379 /* Return the string to output a conditional branch to LABEL, testing
8380 register REG. LABEL is the operand number of the label; REG is the
8381 operand number of the reg. OP is the conditional expression. The mode
8382 of REG says what kind of comparison we made.
8384 DEST is the destination insn (i.e. the label), INSN is the source.
8386 REVERSED is nonzero if we should reverse the sense of the comparison.
8388 ANNUL is nonzero if we should generate an annulling branch. */
8390 const char *
8391 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8392 int annul, rtx_insn *insn)
8394 static char string[64];
8395 enum rtx_code code = GET_CODE (op);
8396 machine_mode mode = GET_MODE (XEXP (op, 0));
8397 rtx note;
8398 int far;
8399 char *p;
8401 /* branch on register are limited to +-128KB. If it is too far away,
8402 change
8404 brnz,pt %g1, .LC30
8408 brz,pn %g1, .+12
8410 ba,pt %xcc, .LC30
8414 brgez,a,pn %o1, .LC29
8418 brlz,pt %o1, .+16
8420 ba,pt %xcc, .LC29 */
8422 far = get_attr_length (insn) >= 3;
8424 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8425 if (reversed ^ far)
8426 code = reverse_condition (code);
8428 /* Only 64-bit versions of these instructions exist. */
8429 gcc_assert (mode == DImode);
8431 /* Start by writing the branch condition. */
8433 switch (code)
8435 case NE:
8436 strcpy (string, "brnz");
8437 break;
8439 case EQ:
8440 strcpy (string, "brz");
8441 break;
8443 case GE:
8444 strcpy (string, "brgez");
8445 break;
8447 case LT:
8448 strcpy (string, "brlz");
8449 break;
8451 case LE:
8452 strcpy (string, "brlez");
8453 break;
8455 case GT:
8456 strcpy (string, "brgz");
8457 break;
8459 default:
8460 gcc_unreachable ();
8463 p = strchr (string, '\0');
8465 /* Now add the annulling, reg, label, and nop. */
8466 if (annul && ! far)
8468 strcpy (p, ",a");
8469 p += 2;
8472 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8474 strcpy (p,
8475 ((profile_probability::from_reg_br_prob_note (XINT (note, 0))
8476 >= profile_probability::even ()) ^ far)
8477 ? ",pt" : ",pn");
8478 p += 3;
8481 *p = p < string + 8 ? '\t' : ' ';
8482 p++;
8483 *p++ = '%';
8484 *p++ = '0' + reg;
8485 *p++ = ',';
8486 *p++ = ' ';
8487 if (far)
8489 int veryfar = 1, delta;
8491 if (INSN_ADDRESSES_SET_P ())
8493 delta = (INSN_ADDRESSES (INSN_UID (dest))
8494 - INSN_ADDRESSES (INSN_UID (insn)));
8495 /* Leave some instructions for "slop". */
8496 if (delta >= -260000 && delta < 260000)
8497 veryfar = 0;
8500 strcpy (p, ".+12\n\t nop\n\t");
8501 /* Skip the next insn if requested or
8502 if we know that it will be a nop. */
8503 if (annul || ! final_sequence)
8504 p[3] = '6';
8505 p += 12;
8506 if (veryfar)
8508 strcpy (p, "b\t");
8509 p += 2;
8511 else
8513 strcpy (p, "ba,pt\t%%xcc, ");
8514 p += 13;
8517 *p++ = '%';
8518 *p++ = 'l';
8519 *p++ = '0' + label;
8520 *p++ = '%';
8521 *p++ = '#';
8522 *p = '\0';
8524 return string;
8527 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8528 Such instructions cannot be used in the delay slot of return insn on v9.
8529 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8532 static int
8533 epilogue_renumber (register rtx *where, int test)
8535 register const char *fmt;
8536 register int i;
8537 register enum rtx_code code;
8539 if (*where == 0)
8540 return 0;
8542 code = GET_CODE (*where);
8544 switch (code)
8546 case REG:
8547 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8548 return 1;
8549 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8550 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
8551 /* fallthrough */
8552 case SCRATCH:
8553 case CC0:
8554 case PC:
8555 case CONST_INT:
8556 case CONST_WIDE_INT:
8557 case CONST_DOUBLE:
8558 return 0;
8560 /* Do not replace the frame pointer with the stack pointer because
8561 it can cause the delayed instruction to load below the stack.
8562 This occurs when instructions like:
8564 (set (reg/i:SI 24 %i0)
8565 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8566 (const_int -20 [0xffffffec])) 0))
8568 are in the return delayed slot. */
8569 case PLUS:
8570 if (GET_CODE (XEXP (*where, 0)) == REG
8571 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8572 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8573 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8574 return 1;
8575 break;
8577 case MEM:
8578 if (SPARC_STACK_BIAS
8579 && GET_CODE (XEXP (*where, 0)) == REG
8580 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8581 return 1;
8582 break;
8584 default:
8585 break;
8588 fmt = GET_RTX_FORMAT (code);
8590 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8592 if (fmt[i] == 'E')
8594 register int j;
8595 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8596 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8597 return 1;
8599 else if (fmt[i] == 'e'
8600 && epilogue_renumber (&(XEXP (*where, i)), test))
8601 return 1;
8603 return 0;
8606 /* Leaf functions and non-leaf functions have different needs. */
8608 static const int
8609 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8611 static const int
8612 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8614 static const int *const reg_alloc_orders[] = {
8615 reg_leaf_alloc_order,
8616 reg_nonleaf_alloc_order};
8618 void
8619 order_regs_for_local_alloc (void)
8621 static int last_order_nonleaf = 1;
8623 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8625 last_order_nonleaf = !last_order_nonleaf;
8626 memcpy ((char *) reg_alloc_order,
8627 (const char *) reg_alloc_orders[last_order_nonleaf],
8628 FIRST_PSEUDO_REGISTER * sizeof (int));
8632 /* Return 1 if REG and MEM are legitimate enough to allow the various
8633 MEM<-->REG splits to be run. */
8636 sparc_split_reg_mem_legitimate (rtx reg, rtx mem)
8638 /* Punt if we are here by mistake. */
8639 gcc_assert (reload_completed);
8641 /* We must have an offsettable memory reference. */
8642 if (!offsettable_memref_p (mem))
8643 return 0;
8645 /* If we have legitimate args for ldd/std, we do not want
8646 the split to happen. */
8647 if ((REGNO (reg) % 2) == 0 && mem_min_alignment (mem, 8))
8648 return 0;
8650 /* Success. */
8651 return 1;
8654 /* Split a REG <-- MEM move into a pair of moves in MODE. */
8656 void
8657 sparc_split_reg_mem (rtx dest, rtx src, machine_mode mode)
8659 rtx high_part = gen_highpart (mode, dest);
8660 rtx low_part = gen_lowpart (mode, dest);
8661 rtx word0 = adjust_address (src, mode, 0);
8662 rtx word1 = adjust_address (src, mode, 4);
8664 if (reg_overlap_mentioned_p (high_part, word1))
8666 emit_move_insn_1 (low_part, word1);
8667 emit_move_insn_1 (high_part, word0);
8669 else
8671 emit_move_insn_1 (high_part, word0);
8672 emit_move_insn_1 (low_part, word1);
8676 /* Split a MEM <-- REG move into a pair of moves in MODE. */
8678 void
8679 sparc_split_mem_reg (rtx dest, rtx src, machine_mode mode)
8681 rtx word0 = adjust_address (dest, mode, 0);
8682 rtx word1 = adjust_address (dest, mode, 4);
8683 rtx high_part = gen_highpart (mode, src);
8684 rtx low_part = gen_lowpart (mode, src);
8686 emit_move_insn_1 (word0, high_part);
8687 emit_move_insn_1 (word1, low_part);
8690 /* Like sparc_split_reg_mem_legitimate but for REG <--> REG moves. */
8693 sparc_split_reg_reg_legitimate (rtx reg1, rtx reg2)
8695 /* Punt if we are here by mistake. */
8696 gcc_assert (reload_completed);
8698 if (GET_CODE (reg1) == SUBREG)
8699 reg1 = SUBREG_REG (reg1);
8700 if (GET_CODE (reg1) != REG)
8701 return 0;
8702 const int regno1 = REGNO (reg1);
8704 if (GET_CODE (reg2) == SUBREG)
8705 reg2 = SUBREG_REG (reg2);
8706 if (GET_CODE (reg2) != REG)
8707 return 0;
8708 const int regno2 = REGNO (reg2);
8710 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
8711 return 1;
8713 if (TARGET_VIS3)
8715 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
8716 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
8717 return 1;
8720 return 0;
8723 /* Split a REG <--> REG move into a pair of moves in MODE. */
8725 void
8726 sparc_split_reg_reg (rtx dest, rtx src, machine_mode mode)
8728 rtx dest1 = gen_highpart (mode, dest);
8729 rtx dest2 = gen_lowpart (mode, dest);
8730 rtx src1 = gen_highpart (mode, src);
8731 rtx src2 = gen_lowpart (mode, src);
8733 /* Now emit using the real source and destination we found, swapping
8734 the order if we detect overlap. */
8735 if (reg_overlap_mentioned_p (dest1, src2))
8737 emit_move_insn_1 (dest2, src2);
8738 emit_move_insn_1 (dest1, src1);
8740 else
8742 emit_move_insn_1 (dest1, src1);
8743 emit_move_insn_1 (dest2, src2);
8747 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
8748 This makes them candidates for using ldd and std insns.
8750 Note reg1 and reg2 *must* be hard registers. */
8753 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
8755 /* We might have been passed a SUBREG. */
8756 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
8757 return 0;
8759 if (REGNO (reg1) % 2 != 0)
8760 return 0;
8762 /* Integer ldd is deprecated in SPARC V9 */
8763 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
8764 return 0;
8766 return (REGNO (reg1) == REGNO (reg2) - 1);
8769 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
8770 an ldd or std insn.
8772 This can only happen when addr1 and addr2, the addresses in mem1
8773 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
8774 addr1 must also be aligned on a 64-bit boundary.
8776 Also iff dependent_reg_rtx is not null it should not be used to
8777 compute the address for mem1, i.e. we cannot optimize a sequence
8778 like:
8779 ld [%o0], %o0
8780 ld [%o0 + 4], %o1
8782 ldd [%o0], %o0
8783 nor:
8784 ld [%g3 + 4], %g3
8785 ld [%g3], %g2
8787 ldd [%g3], %g2
8789 But, note that the transformation from:
8790 ld [%g2 + 4], %g3
8791 ld [%g2], %g2
8793 ldd [%g2], %g2
8794 is perfectly fine. Thus, the peephole2 patterns always pass us
8795 the destination register of the first load, never the second one.
8797 For stores we don't have a similar problem, so dependent_reg_rtx is
8798 NULL_RTX. */
8801 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
8803 rtx addr1, addr2;
8804 unsigned int reg1;
8805 HOST_WIDE_INT offset1;
8807 /* The mems cannot be volatile. */
8808 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
8809 return 0;
8811 /* MEM1 should be aligned on a 64-bit boundary. */
8812 if (MEM_ALIGN (mem1) < 64)
8813 return 0;
8815 addr1 = XEXP (mem1, 0);
8816 addr2 = XEXP (mem2, 0);
8818 /* Extract a register number and offset (if used) from the first addr. */
8819 if (GET_CODE (addr1) == PLUS)
8821 /* If not a REG, return zero. */
8822 if (GET_CODE (XEXP (addr1, 0)) != REG)
8823 return 0;
8824 else
8826 reg1 = REGNO (XEXP (addr1, 0));
8827 /* The offset must be constant! */
8828 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
8829 return 0;
8830 offset1 = INTVAL (XEXP (addr1, 1));
8833 else if (GET_CODE (addr1) != REG)
8834 return 0;
8835 else
8837 reg1 = REGNO (addr1);
8838 /* This was a simple (mem (reg)) expression. Offset is 0. */
8839 offset1 = 0;
8842 /* Make sure the second address is a (mem (plus (reg) (const_int). */
8843 if (GET_CODE (addr2) != PLUS)
8844 return 0;
8846 if (GET_CODE (XEXP (addr2, 0)) != REG
8847 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
8848 return 0;
8850 if (reg1 != REGNO (XEXP (addr2, 0)))
8851 return 0;
8853 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
8854 return 0;
8856 /* The first offset must be evenly divisible by 8 to ensure the
8857 address is 64-bit aligned. */
8858 if (offset1 % 8 != 0)
8859 return 0;
8861 /* The offset for the second addr must be 4 more than the first addr. */
8862 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
8863 return 0;
8865 /* All the tests passed. addr1 and addr2 are valid for ldd and std
8866 instructions. */
8867 return 1;
8870 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
8873 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, machine_mode mode)
8875 rtx x = widen_memory_access (mem1, mode, 0);
8876 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
8877 return x;
8880 /* Return 1 if reg is a pseudo, or is the first register in
8881 a hard register pair. This makes it suitable for use in
8882 ldd and std insns. */
8885 register_ok_for_ldd (rtx reg)
8887 /* We might have been passed a SUBREG. */
8888 if (!REG_P (reg))
8889 return 0;
8891 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
8892 return (REGNO (reg) % 2 == 0);
8894 return 1;
8897 /* Return 1 if OP, a MEM, has an address which is known to be
8898 aligned to an 8-byte boundary. */
8901 memory_ok_for_ldd (rtx op)
8903 /* In 64-bit mode, we assume that the address is word-aligned. */
8904 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
8905 return 0;
8907 if (! can_create_pseudo_p ()
8908 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
8909 return 0;
8911 return 1;
8914 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8916 static bool
8917 sparc_print_operand_punct_valid_p (unsigned char code)
8919 if (code == '#'
8920 || code == '*'
8921 || code == '('
8922 || code == ')'
8923 || code == '_'
8924 || code == '&')
8925 return true;
8927 return false;
8930 /* Implement TARGET_PRINT_OPERAND.
8931 Print operand X (an rtx) in assembler syntax to file FILE.
8932 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8933 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8935 static void
8936 sparc_print_operand (FILE *file, rtx x, int code)
8938 const char *s;
8940 switch (code)
8942 case '#':
8943 /* Output an insn in a delay slot. */
8944 if (final_sequence)
8945 sparc_indent_opcode = 1;
8946 else
8947 fputs ("\n\t nop", file);
8948 return;
8949 case '*':
8950 /* Output an annul flag if there's nothing for the delay slot and we
8951 are optimizing. This is always used with '(' below.
8952 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8953 this is a dbx bug. So, we only do this when optimizing.
8954 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8955 Always emit a nop in case the next instruction is a branch. */
8956 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
8957 fputs (",a", file);
8958 return;
8959 case '(':
8960 /* Output a 'nop' if there's nothing for the delay slot and we are
8961 not optimizing. This is always used with '*' above. */
8962 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
8963 fputs ("\n\t nop", file);
8964 else if (final_sequence)
8965 sparc_indent_opcode = 1;
8966 return;
8967 case ')':
8968 /* Output the right displacement from the saved PC on function return.
8969 The caller may have placed an "unimp" insn immediately after the call
8970 so we have to account for it. This insn is used in the 32-bit ABI
8971 when calling a function that returns a non zero-sized structure. The
8972 64-bit ABI doesn't have it. Be careful to have this test be the same
8973 as that for the call. The exception is when sparc_std_struct_return
8974 is enabled, the psABI is followed exactly and the adjustment is made
8975 by the code in sparc_struct_value_rtx. The call emitted is the same
8976 when sparc_std_struct_return is enabled. */
8977 if (!TARGET_ARCH64
8978 && cfun->returns_struct
8979 && !sparc_std_struct_return
8980 && DECL_SIZE (DECL_RESULT (current_function_decl))
8981 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
8982 == INTEGER_CST
8983 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
8984 fputs ("12", file);
8985 else
8986 fputc ('8', file);
8987 return;
8988 case '_':
8989 /* Output the Embedded Medium/Anywhere code model base register. */
8990 fputs (EMBMEDANY_BASE_REG, file);
8991 return;
8992 case '&':
8993 /* Print some local dynamic TLS name. */
8994 if (const char *name = get_some_local_dynamic_name ())
8995 assemble_name (file, name);
8996 else
8997 output_operand_lossage ("'%%&' used without any "
8998 "local dynamic TLS references");
8999 return;
9001 case 'Y':
9002 /* Adjust the operand to take into account a RESTORE operation. */
9003 if (GET_CODE (x) == CONST_INT)
9004 break;
9005 else if (GET_CODE (x) != REG)
9006 output_operand_lossage ("invalid %%Y operand");
9007 else if (REGNO (x) < 8)
9008 fputs (reg_names[REGNO (x)], file);
9009 else if (REGNO (x) >= 24 && REGNO (x) < 32)
9010 fputs (reg_names[REGNO (x)-16], file);
9011 else
9012 output_operand_lossage ("invalid %%Y operand");
9013 return;
9014 case 'L':
9015 /* Print out the low order register name of a register pair. */
9016 if (WORDS_BIG_ENDIAN)
9017 fputs (reg_names[REGNO (x)+1], file);
9018 else
9019 fputs (reg_names[REGNO (x)], file);
9020 return;
9021 case 'H':
9022 /* Print out the high order register name of a register pair. */
9023 if (WORDS_BIG_ENDIAN)
9024 fputs (reg_names[REGNO (x)], file);
9025 else
9026 fputs (reg_names[REGNO (x)+1], file);
9027 return;
9028 case 'R':
9029 /* Print out the second register name of a register pair or quad.
9030 I.e., R (%o0) => %o1. */
9031 fputs (reg_names[REGNO (x)+1], file);
9032 return;
9033 case 'S':
9034 /* Print out the third register name of a register quad.
9035 I.e., S (%o0) => %o2. */
9036 fputs (reg_names[REGNO (x)+2], file);
9037 return;
9038 case 'T':
9039 /* Print out the fourth register name of a register quad.
9040 I.e., T (%o0) => %o3. */
9041 fputs (reg_names[REGNO (x)+3], file);
9042 return;
9043 case 'x':
9044 /* Print a condition code register. */
9045 if (REGNO (x) == SPARC_ICC_REG)
9047 switch (GET_MODE (x))
9049 case CCmode:
9050 case CCNZmode:
9051 case CCCmode:
9052 case CCVmode:
9053 s = "%icc";
9054 break;
9055 case CCXmode:
9056 case CCXNZmode:
9057 case CCXCmode:
9058 case CCXVmode:
9059 s = "%xcc";
9060 break;
9061 default:
9062 gcc_unreachable ();
9064 fputs (s, file);
9066 else
9067 /* %fccN register */
9068 fputs (reg_names[REGNO (x)], file);
9069 return;
9070 case 'm':
9071 /* Print the operand's address only. */
9072 output_address (GET_MODE (x), XEXP (x, 0));
9073 return;
9074 case 'r':
9075 /* In this case we need a register. Use %g0 if the
9076 operand is const0_rtx. */
9077 if (x == const0_rtx
9078 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
9080 fputs ("%g0", file);
9081 return;
9083 else
9084 break;
9086 case 'A':
9087 switch (GET_CODE (x))
9089 case IOR:
9090 s = "or";
9091 break;
9092 case AND:
9093 s = "and";
9094 break;
9095 case XOR:
9096 s = "xor";
9097 break;
9098 default:
9099 output_operand_lossage ("invalid %%A operand");
9100 s = "";
9101 break;
9103 fputs (s, file);
9104 return;
9106 case 'B':
9107 switch (GET_CODE (x))
9109 case IOR:
9110 s = "orn";
9111 break;
9112 case AND:
9113 s = "andn";
9114 break;
9115 case XOR:
9116 s = "xnor";
9117 break;
9118 default:
9119 output_operand_lossage ("invalid %%B operand");
9120 s = "";
9121 break;
9123 fputs (s, file);
9124 return;
9126 /* This is used by the conditional move instructions. */
9127 case 'C':
9129 machine_mode mode = GET_MODE (XEXP (x, 0));
9130 switch (GET_CODE (x))
9132 case NE:
9133 if (mode == CCVmode || mode == CCXVmode)
9134 s = "vs";
9135 else
9136 s = "ne";
9137 break;
9138 case EQ:
9139 if (mode == CCVmode || mode == CCXVmode)
9140 s = "vc";
9141 else
9142 s = "e";
9143 break;
9144 case GE:
9145 if (mode == CCNZmode || mode == CCXNZmode)
9146 s = "pos";
9147 else
9148 s = "ge";
9149 break;
9150 case GT:
9151 s = "g";
9152 break;
9153 case LE:
9154 s = "le";
9155 break;
9156 case LT:
9157 if (mode == CCNZmode || mode == CCXNZmode)
9158 s = "neg";
9159 else
9160 s = "l";
9161 break;
9162 case GEU:
9163 s = "geu";
9164 break;
9165 case GTU:
9166 s = "gu";
9167 break;
9168 case LEU:
9169 s = "leu";
9170 break;
9171 case LTU:
9172 s = "lu";
9173 break;
9174 case LTGT:
9175 s = "lg";
9176 break;
9177 case UNORDERED:
9178 s = "u";
9179 break;
9180 case ORDERED:
9181 s = "o";
9182 break;
9183 case UNLT:
9184 s = "ul";
9185 break;
9186 case UNLE:
9187 s = "ule";
9188 break;
9189 case UNGT:
9190 s = "ug";
9191 break;
9192 case UNGE:
9193 s = "uge"
9194 ; break;
9195 case UNEQ:
9196 s = "ue";
9197 break;
9198 default:
9199 output_operand_lossage ("invalid %%C operand");
9200 s = "";
9201 break;
9203 fputs (s, file);
9204 return;
9207 /* This are used by the movr instruction pattern. */
9208 case 'D':
9210 switch (GET_CODE (x))
9212 case NE:
9213 s = "ne";
9214 break;
9215 case EQ:
9216 s = "e";
9217 break;
9218 case GE:
9219 s = "gez";
9220 break;
9221 case LT:
9222 s = "lz";
9223 break;
9224 case LE:
9225 s = "lez";
9226 break;
9227 case GT:
9228 s = "gz";
9229 break;
9230 default:
9231 output_operand_lossage ("invalid %%D operand");
9232 s = "";
9233 break;
9235 fputs (s, file);
9236 return;
9239 case 'b':
9241 /* Print a sign-extended character. */
9242 int i = trunc_int_for_mode (INTVAL (x), QImode);
9243 fprintf (file, "%d", i);
9244 return;
9247 case 'f':
9248 /* Operand must be a MEM; write its address. */
9249 if (GET_CODE (x) != MEM)
9250 output_operand_lossage ("invalid %%f operand");
9251 output_address (GET_MODE (x), XEXP (x, 0));
9252 return;
9254 case 's':
9256 /* Print a sign-extended 32-bit value. */
9257 HOST_WIDE_INT i;
9258 if (GET_CODE(x) == CONST_INT)
9259 i = INTVAL (x);
9260 else
9262 output_operand_lossage ("invalid %%s operand");
9263 return;
9265 i = trunc_int_for_mode (i, SImode);
9266 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
9267 return;
9270 case 0:
9271 /* Do nothing special. */
9272 break;
9274 default:
9275 /* Undocumented flag. */
9276 output_operand_lossage ("invalid operand output code");
9279 if (GET_CODE (x) == REG)
9280 fputs (reg_names[REGNO (x)], file);
9281 else if (GET_CODE (x) == MEM)
9283 fputc ('[', file);
9284 /* Poor Sun assembler doesn't understand absolute addressing. */
9285 if (CONSTANT_P (XEXP (x, 0)))
9286 fputs ("%g0+", file);
9287 output_address (GET_MODE (x), XEXP (x, 0));
9288 fputc (']', file);
9290 else if (GET_CODE (x) == HIGH)
9292 fputs ("%hi(", file);
9293 output_addr_const (file, XEXP (x, 0));
9294 fputc (')', file);
9296 else if (GET_CODE (x) == LO_SUM)
9298 sparc_print_operand (file, XEXP (x, 0), 0);
9299 if (TARGET_CM_MEDMID)
9300 fputs ("+%l44(", file);
9301 else
9302 fputs ("+%lo(", file);
9303 output_addr_const (file, XEXP (x, 1));
9304 fputc (')', file);
9306 else if (GET_CODE (x) == CONST_DOUBLE)
9307 output_operand_lossage ("floating-point constant not a valid immediate operand");
9308 else
9309 output_addr_const (file, x);
9312 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9314 static void
9315 sparc_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9317 register rtx base, index = 0;
9318 int offset = 0;
9319 register rtx addr = x;
9321 if (REG_P (addr))
9322 fputs (reg_names[REGNO (addr)], file);
9323 else if (GET_CODE (addr) == PLUS)
9325 if (CONST_INT_P (XEXP (addr, 0)))
9326 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
9327 else if (CONST_INT_P (XEXP (addr, 1)))
9328 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
9329 else
9330 base = XEXP (addr, 0), index = XEXP (addr, 1);
9331 if (GET_CODE (base) == LO_SUM)
9333 gcc_assert (USE_AS_OFFSETABLE_LO10
9334 && TARGET_ARCH64
9335 && ! TARGET_CM_MEDMID);
9336 output_operand (XEXP (base, 0), 0);
9337 fputs ("+%lo(", file);
9338 output_address (VOIDmode, XEXP (base, 1));
9339 fprintf (file, ")+%d", offset);
9341 else
9343 fputs (reg_names[REGNO (base)], file);
9344 if (index == 0)
9345 fprintf (file, "%+d", offset);
9346 else if (REG_P (index))
9347 fprintf (file, "+%s", reg_names[REGNO (index)]);
9348 else if (GET_CODE (index) == SYMBOL_REF
9349 || GET_CODE (index) == LABEL_REF
9350 || GET_CODE (index) == CONST)
9351 fputc ('+', file), output_addr_const (file, index);
9352 else gcc_unreachable ();
9355 else if (GET_CODE (addr) == MINUS
9356 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
9358 output_addr_const (file, XEXP (addr, 0));
9359 fputs ("-(", file);
9360 output_addr_const (file, XEXP (addr, 1));
9361 fputs ("-.)", file);
9363 else if (GET_CODE (addr) == LO_SUM)
9365 output_operand (XEXP (addr, 0), 0);
9366 if (TARGET_CM_MEDMID)
9367 fputs ("+%l44(", file);
9368 else
9369 fputs ("+%lo(", file);
9370 output_address (VOIDmode, XEXP (addr, 1));
9371 fputc (')', file);
9373 else if (flag_pic
9374 && GET_CODE (addr) == CONST
9375 && GET_CODE (XEXP (addr, 0)) == MINUS
9376 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
9377 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
9378 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
9380 addr = XEXP (addr, 0);
9381 output_addr_const (file, XEXP (addr, 0));
9382 /* Group the args of the second CONST in parenthesis. */
9383 fputs ("-(", file);
9384 /* Skip past the second CONST--it does nothing for us. */
9385 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
9386 /* Close the parenthesis. */
9387 fputc (')', file);
9389 else
9391 output_addr_const (file, addr);
9395 /* Target hook for assembling integer objects. The sparc version has
9396 special handling for aligned DI-mode objects. */
9398 static bool
9399 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9401 /* ??? We only output .xword's for symbols and only then in environments
9402 where the assembler can handle them. */
9403 if (aligned_p && size == 8 && GET_CODE (x) != CONST_INT)
9405 if (TARGET_V9)
9407 assemble_integer_with_op ("\t.xword\t", x);
9408 return true;
9410 else
9412 assemble_aligned_integer (4, const0_rtx);
9413 assemble_aligned_integer (4, x);
9414 return true;
9417 return default_assemble_integer (x, size, aligned_p);
9420 /* Return the value of a code used in the .proc pseudo-op that says
9421 what kind of result this function returns. For non-C types, we pick
9422 the closest C type. */
9424 #ifndef SHORT_TYPE_SIZE
9425 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9426 #endif
9428 #ifndef INT_TYPE_SIZE
9429 #define INT_TYPE_SIZE BITS_PER_WORD
9430 #endif
9432 #ifndef LONG_TYPE_SIZE
9433 #define LONG_TYPE_SIZE BITS_PER_WORD
9434 #endif
9436 #ifndef LONG_LONG_TYPE_SIZE
9437 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9438 #endif
9440 #ifndef FLOAT_TYPE_SIZE
9441 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9442 #endif
9444 #ifndef DOUBLE_TYPE_SIZE
9445 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9446 #endif
9448 #ifndef LONG_DOUBLE_TYPE_SIZE
9449 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9450 #endif
9452 unsigned long
9453 sparc_type_code (register tree type)
9455 register unsigned long qualifiers = 0;
9456 register unsigned shift;
9458 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9459 setting more, since some assemblers will give an error for this. Also,
9460 we must be careful to avoid shifts of 32 bits or more to avoid getting
9461 unpredictable results. */
9463 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9465 switch (TREE_CODE (type))
9467 case ERROR_MARK:
9468 return qualifiers;
9470 case ARRAY_TYPE:
9471 qualifiers |= (3 << shift);
9472 break;
9474 case FUNCTION_TYPE:
9475 case METHOD_TYPE:
9476 qualifiers |= (2 << shift);
9477 break;
9479 case POINTER_TYPE:
9480 case REFERENCE_TYPE:
9481 case OFFSET_TYPE:
9482 qualifiers |= (1 << shift);
9483 break;
9485 case RECORD_TYPE:
9486 return (qualifiers | 8);
9488 case UNION_TYPE:
9489 case QUAL_UNION_TYPE:
9490 return (qualifiers | 9);
9492 case ENUMERAL_TYPE:
9493 return (qualifiers | 10);
9495 case VOID_TYPE:
9496 return (qualifiers | 16);
9498 case INTEGER_TYPE:
9499 /* If this is a range type, consider it to be the underlying
9500 type. */
9501 if (TREE_TYPE (type) != 0)
9502 break;
9504 /* Carefully distinguish all the standard types of C,
9505 without messing up if the language is not C. We do this by
9506 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9507 look at both the names and the above fields, but that's redundant.
9508 Any type whose size is between two C types will be considered
9509 to be the wider of the two types. Also, we do not have a
9510 special code to use for "long long", so anything wider than
9511 long is treated the same. Note that we can't distinguish
9512 between "int" and "long" in this code if they are the same
9513 size, but that's fine, since neither can the assembler. */
9515 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9516 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9518 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9519 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9521 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9522 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9524 else
9525 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9527 case REAL_TYPE:
9528 /* If this is a range type, consider it to be the underlying
9529 type. */
9530 if (TREE_TYPE (type) != 0)
9531 break;
9533 /* Carefully distinguish all the standard types of C,
9534 without messing up if the language is not C. */
9536 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9537 return (qualifiers | 6);
9539 else
9540 return (qualifiers | 7);
9542 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9543 /* ??? We need to distinguish between double and float complex types,
9544 but I don't know how yet because I can't reach this code from
9545 existing front-ends. */
9546 return (qualifiers | 7); /* Who knows? */
9548 case VECTOR_TYPE:
9549 case BOOLEAN_TYPE: /* Boolean truth value type. */
9550 case LANG_TYPE:
9551 case NULLPTR_TYPE:
9552 return qualifiers;
9554 default:
9555 gcc_unreachable (); /* Not a type! */
9559 return qualifiers;
9562 /* Nested function support. */
9564 /* Emit RTL insns to initialize the variable parts of a trampoline.
9565 FNADDR is an RTX for the address of the function's pure code.
9566 CXT is an RTX for the static chain value for the function.
9568 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9569 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9570 (to store insns). This is a bit excessive. Perhaps a different
9571 mechanism would be better here.
9573 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9575 static void
9576 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9578 /* SPARC 32-bit trampoline:
9580 sethi %hi(fn), %g1
9581 sethi %hi(static), %g2
9582 jmp %g1+%lo(fn)
9583 or %g2, %lo(static), %g2
9585 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9586 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9589 emit_move_insn
9590 (adjust_address (m_tramp, SImode, 0),
9591 expand_binop (SImode, ior_optab,
9592 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9593 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9594 NULL_RTX, 1, OPTAB_DIRECT));
9596 emit_move_insn
9597 (adjust_address (m_tramp, SImode, 4),
9598 expand_binop (SImode, ior_optab,
9599 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9600 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9601 NULL_RTX, 1, OPTAB_DIRECT));
9603 emit_move_insn
9604 (adjust_address (m_tramp, SImode, 8),
9605 expand_binop (SImode, ior_optab,
9606 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9607 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9608 NULL_RTX, 1, OPTAB_DIRECT));
9610 emit_move_insn
9611 (adjust_address (m_tramp, SImode, 12),
9612 expand_binop (SImode, ior_optab,
9613 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9614 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9615 NULL_RTX, 1, OPTAB_DIRECT));
9617 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9618 aligned on a 16 byte boundary so one flush clears it all. */
9619 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 0))));
9620 if (sparc_cpu != PROCESSOR_ULTRASPARC
9621 && sparc_cpu != PROCESSOR_ULTRASPARC3
9622 && sparc_cpu != PROCESSOR_NIAGARA
9623 && sparc_cpu != PROCESSOR_NIAGARA2
9624 && sparc_cpu != PROCESSOR_NIAGARA3
9625 && sparc_cpu != PROCESSOR_NIAGARA4
9626 && sparc_cpu != PROCESSOR_NIAGARA7
9627 && sparc_cpu != PROCESSOR_M8)
9628 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp, SImode, 8))));
9630 /* Call __enable_execute_stack after writing onto the stack to make sure
9631 the stack address is accessible. */
9632 #ifdef HAVE_ENABLE_EXECUTE_STACK
9633 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9634 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
9635 #endif
9639 /* The 64-bit version is simpler because it makes more sense to load the
9640 values as "immediate" data out of the trampoline. It's also easier since
9641 we can read the PC without clobbering a register. */
9643 static void
9644 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9646 /* SPARC 64-bit trampoline:
9648 rd %pc, %g1
9649 ldx [%g1+24], %g5
9650 jmp %g5
9651 ldx [%g1+16], %g5
9652 +16 bytes data
9655 emit_move_insn (adjust_address (m_tramp, SImode, 0),
9656 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
9657 emit_move_insn (adjust_address (m_tramp, SImode, 4),
9658 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
9659 emit_move_insn (adjust_address (m_tramp, SImode, 8),
9660 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
9661 emit_move_insn (adjust_address (m_tramp, SImode, 12),
9662 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
9663 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
9664 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
9665 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
9667 if (sparc_cpu != PROCESSOR_ULTRASPARC
9668 && sparc_cpu != PROCESSOR_ULTRASPARC3
9669 && sparc_cpu != PROCESSOR_NIAGARA
9670 && sparc_cpu != PROCESSOR_NIAGARA2
9671 && sparc_cpu != PROCESSOR_NIAGARA3
9672 && sparc_cpu != PROCESSOR_NIAGARA4
9673 && sparc_cpu != PROCESSOR_NIAGARA7
9674 && sparc_cpu != PROCESSOR_M8)
9675 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
9677 /* Call __enable_execute_stack after writing onto the stack to make sure
9678 the stack address is accessible. */
9679 #ifdef HAVE_ENABLE_EXECUTE_STACK
9680 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9681 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
9682 #endif
9685 /* Worker for TARGET_TRAMPOLINE_INIT. */
9687 static void
9688 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9690 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
9691 cxt = force_reg (Pmode, cxt);
9692 if (TARGET_ARCH64)
9693 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
9694 else
9695 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
9698 /* Adjust the cost of a scheduling dependency. Return the new cost of
9699 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
9701 static int
9702 supersparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
9703 int cost)
9705 enum attr_type insn_type;
9707 if (recog_memoized (insn) < 0)
9708 return cost;
9710 insn_type = get_attr_type (insn);
9712 if (dep_type == 0)
9714 /* Data dependency; DEP_INSN writes a register that INSN reads some
9715 cycles later. */
9717 /* if a load, then the dependence must be on the memory address;
9718 add an extra "cycle". Note that the cost could be two cycles
9719 if the reg was written late in an instruction group; we ca not tell
9720 here. */
9721 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
9722 return cost + 3;
9724 /* Get the delay only if the address of the store is the dependence. */
9725 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
9727 rtx pat = PATTERN(insn);
9728 rtx dep_pat = PATTERN (dep_insn);
9730 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
9731 return cost; /* This should not happen! */
9733 /* The dependency between the two instructions was on the data that
9734 is being stored. Assume that this implies that the address of the
9735 store is not dependent. */
9736 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
9737 return cost;
9739 return cost + 3; /* An approximation. */
9742 /* A shift instruction cannot receive its data from an instruction
9743 in the same cycle; add a one cycle penalty. */
9744 if (insn_type == TYPE_SHIFT)
9745 return cost + 3; /* Split before cascade into shift. */
9747 else
9749 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
9750 INSN writes some cycles later. */
9752 /* These are only significant for the fpu unit; writing a fp reg before
9753 the fpu has finished with it stalls the processor. */
9755 /* Reusing an integer register causes no problems. */
9756 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
9757 return 0;
9760 return cost;
9763 static int
9764 hypersparc_adjust_cost (rtx_insn *insn, int dtype, rtx_insn *dep_insn,
9765 int cost)
9767 enum attr_type insn_type, dep_type;
9768 rtx pat = PATTERN(insn);
9769 rtx dep_pat = PATTERN (dep_insn);
9771 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
9772 return cost;
9774 insn_type = get_attr_type (insn);
9775 dep_type = get_attr_type (dep_insn);
9777 switch (dtype)
9779 case 0:
9780 /* Data dependency; DEP_INSN writes a register that INSN reads some
9781 cycles later. */
9783 switch (insn_type)
9785 case TYPE_STORE:
9786 case TYPE_FPSTORE:
9787 /* Get the delay iff the address of the store is the dependence. */
9788 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
9789 return cost;
9791 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
9792 return cost;
9793 return cost + 3;
9795 case TYPE_LOAD:
9796 case TYPE_SLOAD:
9797 case TYPE_FPLOAD:
9798 /* If a load, then the dependence must be on the memory address. If
9799 the addresses aren't equal, then it might be a false dependency */
9800 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
9802 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
9803 || GET_CODE (SET_DEST (dep_pat)) != MEM
9804 || GET_CODE (SET_SRC (pat)) != MEM
9805 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
9806 XEXP (SET_SRC (pat), 0)))
9807 return cost + 2;
9809 return cost + 8;
9811 break;
9813 case TYPE_BRANCH:
9814 /* Compare to branch latency is 0. There is no benefit from
9815 separating compare and branch. */
9816 if (dep_type == TYPE_COMPARE)
9817 return 0;
9818 /* Floating point compare to branch latency is less than
9819 compare to conditional move. */
9820 if (dep_type == TYPE_FPCMP)
9821 return cost - 1;
9822 break;
9823 default:
9824 break;
9826 break;
9828 case REG_DEP_ANTI:
9829 /* Anti-dependencies only penalize the fpu unit. */
9830 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
9831 return 0;
9832 break;
9834 default:
9835 break;
9838 return cost;
9841 static int
9842 sparc_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
9843 unsigned int)
9845 switch (sparc_cpu)
9847 case PROCESSOR_SUPERSPARC:
9848 cost = supersparc_adjust_cost (insn, dep_type, dep, cost);
9849 break;
9850 case PROCESSOR_HYPERSPARC:
9851 case PROCESSOR_SPARCLITE86X:
9852 cost = hypersparc_adjust_cost (insn, dep_type, dep, cost);
9853 break;
9854 default:
9855 break;
9857 return cost;
9860 static void
9861 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
9862 int sched_verbose ATTRIBUTE_UNUSED,
9863 int max_ready ATTRIBUTE_UNUSED)
9866 static int
9867 sparc_use_sched_lookahead (void)
9869 if (sparc_cpu == PROCESSOR_NIAGARA
9870 || sparc_cpu == PROCESSOR_NIAGARA2
9871 || sparc_cpu == PROCESSOR_NIAGARA3)
9872 return 0;
9873 if (sparc_cpu == PROCESSOR_NIAGARA4
9874 || sparc_cpu == PROCESSOR_NIAGARA7
9875 || sparc_cpu == PROCESSOR_M8)
9876 return 2;
9877 if (sparc_cpu == PROCESSOR_ULTRASPARC
9878 || sparc_cpu == PROCESSOR_ULTRASPARC3)
9879 return 4;
9880 if ((1 << sparc_cpu) &
9881 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
9882 (1 << PROCESSOR_SPARCLITE86X)))
9883 return 3;
9884 return 0;
9887 static int
9888 sparc_issue_rate (void)
9890 switch (sparc_cpu)
9892 case PROCESSOR_NIAGARA:
9893 case PROCESSOR_NIAGARA2:
9894 case PROCESSOR_NIAGARA3:
9895 default:
9896 return 1;
9897 case PROCESSOR_NIAGARA4:
9898 case PROCESSOR_NIAGARA7:
9899 case PROCESSOR_V9:
9900 /* Assume V9 processors are capable of at least dual-issue. */
9901 return 2;
9902 case PROCESSOR_SUPERSPARC:
9903 return 3;
9904 case PROCESSOR_HYPERSPARC:
9905 case PROCESSOR_SPARCLITE86X:
9906 return 2;
9907 case PROCESSOR_ULTRASPARC:
9908 case PROCESSOR_ULTRASPARC3:
9909 case PROCESSOR_M8:
9910 return 4;
9914 static int
9915 set_extends (rtx_insn *insn)
9917 register rtx pat = PATTERN (insn);
9919 switch (GET_CODE (SET_SRC (pat)))
9921 /* Load and some shift instructions zero extend. */
9922 case MEM:
9923 case ZERO_EXTEND:
9924 /* sethi clears the high bits */
9925 case HIGH:
9926 /* LO_SUM is used with sethi. sethi cleared the high
9927 bits and the values used with lo_sum are positive */
9928 case LO_SUM:
9929 /* Store flag stores 0 or 1 */
9930 case LT: case LTU:
9931 case GT: case GTU:
9932 case LE: case LEU:
9933 case GE: case GEU:
9934 case EQ:
9935 case NE:
9936 return 1;
9937 case AND:
9939 rtx op0 = XEXP (SET_SRC (pat), 0);
9940 rtx op1 = XEXP (SET_SRC (pat), 1);
9941 if (GET_CODE (op1) == CONST_INT)
9942 return INTVAL (op1) >= 0;
9943 if (GET_CODE (op0) != REG)
9944 return 0;
9945 if (sparc_check_64 (op0, insn) == 1)
9946 return 1;
9947 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9949 case IOR:
9950 case XOR:
9952 rtx op0 = XEXP (SET_SRC (pat), 0);
9953 rtx op1 = XEXP (SET_SRC (pat), 1);
9954 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
9955 return 0;
9956 if (GET_CODE (op1) == CONST_INT)
9957 return INTVAL (op1) >= 0;
9958 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9960 case LSHIFTRT:
9961 return GET_MODE (SET_SRC (pat)) == SImode;
9962 /* Positive integers leave the high bits zero. */
9963 case CONST_INT:
9964 return !(INTVAL (SET_SRC (pat)) & 0x80000000);
9965 case ASHIFTRT:
9966 case SIGN_EXTEND:
9967 return - (GET_MODE (SET_SRC (pat)) == SImode);
9968 case REG:
9969 return sparc_check_64 (SET_SRC (pat), insn);
9970 default:
9971 return 0;
9975 /* We _ought_ to have only one kind per function, but... */
9976 static GTY(()) rtx sparc_addr_diff_list;
9977 static GTY(()) rtx sparc_addr_list;
9979 void
9980 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
9982 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9983 if (diff)
9984 sparc_addr_diff_list
9985 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
9986 else
9987 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
9990 static void
9991 sparc_output_addr_vec (rtx vec)
9993 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9994 int idx, vlen = XVECLEN (body, 0);
9996 #ifdef ASM_OUTPUT_ADDR_VEC_START
9997 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9998 #endif
10000 #ifdef ASM_OUTPUT_CASE_LABEL
10001 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10002 NEXT_INSN (lab));
10003 #else
10004 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10005 #endif
10007 for (idx = 0; idx < vlen; idx++)
10009 ASM_OUTPUT_ADDR_VEC_ELT
10010 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10013 #ifdef ASM_OUTPUT_ADDR_VEC_END
10014 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10015 #endif
10018 static void
10019 sparc_output_addr_diff_vec (rtx vec)
10021 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
10022 rtx base = XEXP (XEXP (body, 0), 0);
10023 int idx, vlen = XVECLEN (body, 1);
10025 #ifdef ASM_OUTPUT_ADDR_VEC_START
10026 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
10027 #endif
10029 #ifdef ASM_OUTPUT_CASE_LABEL
10030 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
10031 NEXT_INSN (lab));
10032 #else
10033 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10034 #endif
10036 for (idx = 0; idx < vlen; idx++)
10038 ASM_OUTPUT_ADDR_DIFF_ELT
10039 (asm_out_file,
10040 body,
10041 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10042 CODE_LABEL_NUMBER (base));
10045 #ifdef ASM_OUTPUT_ADDR_VEC_END
10046 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
10047 #endif
10050 static void
10051 sparc_output_deferred_case_vectors (void)
10053 rtx t;
10054 int align;
10056 if (sparc_addr_list == NULL_RTX
10057 && sparc_addr_diff_list == NULL_RTX)
10058 return;
10060 /* Align to cache line in the function's code section. */
10061 switch_to_section (current_function_section ());
10063 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10064 if (align > 0)
10065 ASM_OUTPUT_ALIGN (asm_out_file, align);
10067 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
10068 sparc_output_addr_vec (XEXP (t, 0));
10069 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
10070 sparc_output_addr_diff_vec (XEXP (t, 0));
10072 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
10075 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
10076 unknown. Return 1 if the high bits are zero, -1 if the register is
10077 sign extended. */
10079 sparc_check_64 (rtx x, rtx_insn *insn)
10081 /* If a register is set only once it is safe to ignore insns this
10082 code does not know how to handle. The loop will either recognize
10083 the single set and return the correct value or fail to recognize
10084 it and return 0. */
10085 int set_once = 0;
10086 rtx y = x;
10088 gcc_assert (GET_CODE (x) == REG);
10090 if (GET_MODE (x) == DImode)
10091 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
10093 if (flag_expensive_optimizations
10094 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
10095 set_once = 1;
10097 if (insn == 0)
10099 if (set_once)
10100 insn = get_last_insn_anywhere ();
10101 else
10102 return 0;
10105 while ((insn = PREV_INSN (insn)))
10107 switch (GET_CODE (insn))
10109 case JUMP_INSN:
10110 case NOTE:
10111 break;
10112 case CODE_LABEL:
10113 case CALL_INSN:
10114 default:
10115 if (! set_once)
10116 return 0;
10117 break;
10118 case INSN:
10120 rtx pat = PATTERN (insn);
10121 if (GET_CODE (pat) != SET)
10122 return 0;
10123 if (rtx_equal_p (x, SET_DEST (pat)))
10124 return set_extends (insn);
10125 if (y && rtx_equal_p (y, SET_DEST (pat)))
10126 return set_extends (insn);
10127 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
10128 return 0;
10132 return 0;
10135 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
10136 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
10138 const char *
10139 output_v8plus_shift (rtx_insn *insn, rtx *operands, const char *opcode)
10141 static char asm_code[60];
10143 /* The scratch register is only required when the destination
10144 register is not a 64-bit global or out register. */
10145 if (which_alternative != 2)
10146 operands[3] = operands[0];
10148 /* We can only shift by constants <= 63. */
10149 if (GET_CODE (operands[2]) == CONST_INT)
10150 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
10152 if (GET_CODE (operands[1]) == CONST_INT)
10154 output_asm_insn ("mov\t%1, %3", operands);
10156 else
10158 output_asm_insn ("sllx\t%H1, 32, %3", operands);
10159 if (sparc_check_64 (operands[1], insn) <= 0)
10160 output_asm_insn ("srl\t%L1, 0, %L1", operands);
10161 output_asm_insn ("or\t%L1, %3, %3", operands);
10164 strcpy (asm_code, opcode);
10166 if (which_alternative != 2)
10167 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
10168 else
10169 return
10170 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
10173 /* Output rtl to increment the profiler label LABELNO
10174 for profiling a function entry. */
10176 void
10177 sparc_profile_hook (int labelno)
10179 char buf[32];
10180 rtx lab, fun;
10182 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
10183 if (NO_PROFILE_COUNTERS)
10185 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
10187 else
10189 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10190 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
10191 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
10195 #ifdef TARGET_SOLARIS
10196 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
10198 static void
10199 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
10200 tree decl ATTRIBUTE_UNUSED)
10202 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
10204 solaris_elf_asm_comdat_section (name, flags, decl);
10205 return;
10208 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
10210 if (!(flags & SECTION_DEBUG))
10211 fputs (",#alloc", asm_out_file);
10212 if (flags & SECTION_WRITE)
10213 fputs (",#write", asm_out_file);
10214 if (flags & SECTION_TLS)
10215 fputs (",#tls", asm_out_file);
10216 if (flags & SECTION_CODE)
10217 fputs (",#execinstr", asm_out_file);
10219 if (flags & SECTION_NOTYPE)
10221 else if (flags & SECTION_BSS)
10222 fputs (",#nobits", asm_out_file);
10223 else
10224 fputs (",#progbits", asm_out_file);
10226 fputc ('\n', asm_out_file);
10228 #endif /* TARGET_SOLARIS */
10230 /* We do not allow indirect calls to be optimized into sibling calls.
10232 We cannot use sibling calls when delayed branches are disabled
10233 because they will likely require the call delay slot to be filled.
10235 Also, on SPARC 32-bit we cannot emit a sibling call when the
10236 current function returns a structure. This is because the "unimp
10237 after call" convention would cause the callee to return to the
10238 wrong place. The generic code already disallows cases where the
10239 function being called returns a structure.
10241 It may seem strange how this last case could occur. Usually there
10242 is code after the call which jumps to epilogue code which dumps the
10243 return value into the struct return area. That ought to invalidate
10244 the sibling call right? Well, in the C++ case we can end up passing
10245 the pointer to the struct return area to a constructor (which returns
10246 void) and then nothing else happens. Such a sibling call would look
10247 valid without the added check here.
10249 VxWorks PIC PLT entries require the global pointer to be initialized
10250 on entry. We therefore can't emit sibling calls to them. */
10251 static bool
10252 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10254 return (decl
10255 && flag_delayed_branch
10256 && (TARGET_ARCH64 || ! cfun->returns_struct)
10257 && !(TARGET_VXWORKS_RTP
10258 && flag_pic
10259 && !targetm.binds_local_p (decl)));
10262 /* libfunc renaming. */
10264 static void
10265 sparc_init_libfuncs (void)
10267 if (TARGET_ARCH32)
10269 /* Use the subroutines that Sun's library provides for integer
10270 multiply and divide. The `*' prevents an underscore from
10271 being prepended by the compiler. .umul is a little faster
10272 than .mul. */
10273 set_optab_libfunc (smul_optab, SImode, "*.umul");
10274 set_optab_libfunc (sdiv_optab, SImode, "*.div");
10275 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
10276 set_optab_libfunc (smod_optab, SImode, "*.rem");
10277 set_optab_libfunc (umod_optab, SImode, "*.urem");
10279 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
10280 set_optab_libfunc (add_optab, TFmode, "_Q_add");
10281 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
10282 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
10283 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
10284 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
10286 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10287 is because with soft-float, the SFmode and DFmode sqrt
10288 instructions will be absent, and the compiler will notice and
10289 try to use the TFmode sqrt instruction for calls to the
10290 builtin function sqrt, but this fails. */
10291 if (TARGET_FPU)
10292 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
10294 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
10295 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
10296 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
10297 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
10298 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
10299 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
10301 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
10302 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
10303 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
10304 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
10306 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
10307 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
10308 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
10309 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
10311 if (DITF_CONVERSION_LIBFUNCS)
10313 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
10314 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
10315 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
10316 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
10319 if (SUN_CONVERSION_LIBFUNCS)
10321 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
10322 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
10323 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
10324 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
10327 if (TARGET_ARCH64)
10329 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10330 do not exist in the library. Make sure the compiler does not
10331 emit calls to them by accident. (It should always use the
10332 hardware instructions.) */
10333 set_optab_libfunc (smul_optab, SImode, 0);
10334 set_optab_libfunc (sdiv_optab, SImode, 0);
10335 set_optab_libfunc (udiv_optab, SImode, 0);
10336 set_optab_libfunc (smod_optab, SImode, 0);
10337 set_optab_libfunc (umod_optab, SImode, 0);
10339 if (SUN_INTEGER_MULTIPLY_64)
10341 set_optab_libfunc (smul_optab, DImode, "__mul64");
10342 set_optab_libfunc (sdiv_optab, DImode, "__div64");
10343 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
10344 set_optab_libfunc (smod_optab, DImode, "__rem64");
10345 set_optab_libfunc (umod_optab, DImode, "__urem64");
10348 if (SUN_CONVERSION_LIBFUNCS)
10350 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
10351 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
10352 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
10353 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
10358 /* SPARC builtins. */
10359 enum sparc_builtins
10361 /* FPU builtins. */
10362 SPARC_BUILTIN_LDFSR,
10363 SPARC_BUILTIN_STFSR,
10365 /* VIS 1.0 builtins. */
10366 SPARC_BUILTIN_FPACK16,
10367 SPARC_BUILTIN_FPACK32,
10368 SPARC_BUILTIN_FPACKFIX,
10369 SPARC_BUILTIN_FEXPAND,
10370 SPARC_BUILTIN_FPMERGE,
10371 SPARC_BUILTIN_FMUL8X16,
10372 SPARC_BUILTIN_FMUL8X16AU,
10373 SPARC_BUILTIN_FMUL8X16AL,
10374 SPARC_BUILTIN_FMUL8SUX16,
10375 SPARC_BUILTIN_FMUL8ULX16,
10376 SPARC_BUILTIN_FMULD8SUX16,
10377 SPARC_BUILTIN_FMULD8ULX16,
10378 SPARC_BUILTIN_FALIGNDATAV4HI,
10379 SPARC_BUILTIN_FALIGNDATAV8QI,
10380 SPARC_BUILTIN_FALIGNDATAV2SI,
10381 SPARC_BUILTIN_FALIGNDATADI,
10382 SPARC_BUILTIN_WRGSR,
10383 SPARC_BUILTIN_RDGSR,
10384 SPARC_BUILTIN_ALIGNADDR,
10385 SPARC_BUILTIN_ALIGNADDRL,
10386 SPARC_BUILTIN_PDIST,
10387 SPARC_BUILTIN_EDGE8,
10388 SPARC_BUILTIN_EDGE8L,
10389 SPARC_BUILTIN_EDGE16,
10390 SPARC_BUILTIN_EDGE16L,
10391 SPARC_BUILTIN_EDGE32,
10392 SPARC_BUILTIN_EDGE32L,
10393 SPARC_BUILTIN_FCMPLE16,
10394 SPARC_BUILTIN_FCMPLE32,
10395 SPARC_BUILTIN_FCMPNE16,
10396 SPARC_BUILTIN_FCMPNE32,
10397 SPARC_BUILTIN_FCMPGT16,
10398 SPARC_BUILTIN_FCMPGT32,
10399 SPARC_BUILTIN_FCMPEQ16,
10400 SPARC_BUILTIN_FCMPEQ32,
10401 SPARC_BUILTIN_FPADD16,
10402 SPARC_BUILTIN_FPADD16S,
10403 SPARC_BUILTIN_FPADD32,
10404 SPARC_BUILTIN_FPADD32S,
10405 SPARC_BUILTIN_FPSUB16,
10406 SPARC_BUILTIN_FPSUB16S,
10407 SPARC_BUILTIN_FPSUB32,
10408 SPARC_BUILTIN_FPSUB32S,
10409 SPARC_BUILTIN_ARRAY8,
10410 SPARC_BUILTIN_ARRAY16,
10411 SPARC_BUILTIN_ARRAY32,
10413 /* VIS 2.0 builtins. */
10414 SPARC_BUILTIN_EDGE8N,
10415 SPARC_BUILTIN_EDGE8LN,
10416 SPARC_BUILTIN_EDGE16N,
10417 SPARC_BUILTIN_EDGE16LN,
10418 SPARC_BUILTIN_EDGE32N,
10419 SPARC_BUILTIN_EDGE32LN,
10420 SPARC_BUILTIN_BMASK,
10421 SPARC_BUILTIN_BSHUFFLEV4HI,
10422 SPARC_BUILTIN_BSHUFFLEV8QI,
10423 SPARC_BUILTIN_BSHUFFLEV2SI,
10424 SPARC_BUILTIN_BSHUFFLEDI,
10426 /* VIS 3.0 builtins. */
10427 SPARC_BUILTIN_CMASK8,
10428 SPARC_BUILTIN_CMASK16,
10429 SPARC_BUILTIN_CMASK32,
10430 SPARC_BUILTIN_FCHKSM16,
10431 SPARC_BUILTIN_FSLL16,
10432 SPARC_BUILTIN_FSLAS16,
10433 SPARC_BUILTIN_FSRL16,
10434 SPARC_BUILTIN_FSRA16,
10435 SPARC_BUILTIN_FSLL32,
10436 SPARC_BUILTIN_FSLAS32,
10437 SPARC_BUILTIN_FSRL32,
10438 SPARC_BUILTIN_FSRA32,
10439 SPARC_BUILTIN_PDISTN,
10440 SPARC_BUILTIN_FMEAN16,
10441 SPARC_BUILTIN_FPADD64,
10442 SPARC_BUILTIN_FPSUB64,
10443 SPARC_BUILTIN_FPADDS16,
10444 SPARC_BUILTIN_FPADDS16S,
10445 SPARC_BUILTIN_FPSUBS16,
10446 SPARC_BUILTIN_FPSUBS16S,
10447 SPARC_BUILTIN_FPADDS32,
10448 SPARC_BUILTIN_FPADDS32S,
10449 SPARC_BUILTIN_FPSUBS32,
10450 SPARC_BUILTIN_FPSUBS32S,
10451 SPARC_BUILTIN_FUCMPLE8,
10452 SPARC_BUILTIN_FUCMPNE8,
10453 SPARC_BUILTIN_FUCMPGT8,
10454 SPARC_BUILTIN_FUCMPEQ8,
10455 SPARC_BUILTIN_FHADDS,
10456 SPARC_BUILTIN_FHADDD,
10457 SPARC_BUILTIN_FHSUBS,
10458 SPARC_BUILTIN_FHSUBD,
10459 SPARC_BUILTIN_FNHADDS,
10460 SPARC_BUILTIN_FNHADDD,
10461 SPARC_BUILTIN_UMULXHI,
10462 SPARC_BUILTIN_XMULX,
10463 SPARC_BUILTIN_XMULXHI,
10465 /* VIS 4.0 builtins. */
10466 SPARC_BUILTIN_FPADD8,
10467 SPARC_BUILTIN_FPADDS8,
10468 SPARC_BUILTIN_FPADDUS8,
10469 SPARC_BUILTIN_FPADDUS16,
10470 SPARC_BUILTIN_FPCMPLE8,
10471 SPARC_BUILTIN_FPCMPGT8,
10472 SPARC_BUILTIN_FPCMPULE16,
10473 SPARC_BUILTIN_FPCMPUGT16,
10474 SPARC_BUILTIN_FPCMPULE32,
10475 SPARC_BUILTIN_FPCMPUGT32,
10476 SPARC_BUILTIN_FPMAX8,
10477 SPARC_BUILTIN_FPMAX16,
10478 SPARC_BUILTIN_FPMAX32,
10479 SPARC_BUILTIN_FPMAXU8,
10480 SPARC_BUILTIN_FPMAXU16,
10481 SPARC_BUILTIN_FPMAXU32,
10482 SPARC_BUILTIN_FPMIN8,
10483 SPARC_BUILTIN_FPMIN16,
10484 SPARC_BUILTIN_FPMIN32,
10485 SPARC_BUILTIN_FPMINU8,
10486 SPARC_BUILTIN_FPMINU16,
10487 SPARC_BUILTIN_FPMINU32,
10488 SPARC_BUILTIN_FPSUB8,
10489 SPARC_BUILTIN_FPSUBS8,
10490 SPARC_BUILTIN_FPSUBUS8,
10491 SPARC_BUILTIN_FPSUBUS16,
10493 /* VIS 4.0B builtins. */
10495 /* Note that all the DICTUNPACK* entries should be kept
10496 contiguous. */
10497 SPARC_BUILTIN_FIRST_DICTUNPACK,
10498 SPARC_BUILTIN_DICTUNPACK8 = SPARC_BUILTIN_FIRST_DICTUNPACK,
10499 SPARC_BUILTIN_DICTUNPACK16,
10500 SPARC_BUILTIN_DICTUNPACK32,
10501 SPARC_BUILTIN_LAST_DICTUNPACK = SPARC_BUILTIN_DICTUNPACK32,
10503 /* Note that all the FPCMP*SHL entries should be kept
10504 contiguous. */
10505 SPARC_BUILTIN_FIRST_FPCMPSHL,
10506 SPARC_BUILTIN_FPCMPLE8SHL = SPARC_BUILTIN_FIRST_FPCMPSHL,
10507 SPARC_BUILTIN_FPCMPGT8SHL,
10508 SPARC_BUILTIN_FPCMPEQ8SHL,
10509 SPARC_BUILTIN_FPCMPNE8SHL,
10510 SPARC_BUILTIN_FPCMPLE16SHL,
10511 SPARC_BUILTIN_FPCMPGT16SHL,
10512 SPARC_BUILTIN_FPCMPEQ16SHL,
10513 SPARC_BUILTIN_FPCMPNE16SHL,
10514 SPARC_BUILTIN_FPCMPLE32SHL,
10515 SPARC_BUILTIN_FPCMPGT32SHL,
10516 SPARC_BUILTIN_FPCMPEQ32SHL,
10517 SPARC_BUILTIN_FPCMPNE32SHL,
10518 SPARC_BUILTIN_FPCMPULE8SHL,
10519 SPARC_BUILTIN_FPCMPUGT8SHL,
10520 SPARC_BUILTIN_FPCMPULE16SHL,
10521 SPARC_BUILTIN_FPCMPUGT16SHL,
10522 SPARC_BUILTIN_FPCMPULE32SHL,
10523 SPARC_BUILTIN_FPCMPUGT32SHL,
10524 SPARC_BUILTIN_FPCMPDE8SHL,
10525 SPARC_BUILTIN_FPCMPDE16SHL,
10526 SPARC_BUILTIN_FPCMPDE32SHL,
10527 SPARC_BUILTIN_FPCMPUR8SHL,
10528 SPARC_BUILTIN_FPCMPUR16SHL,
10529 SPARC_BUILTIN_FPCMPUR32SHL,
10530 SPARC_BUILTIN_LAST_FPCMPSHL = SPARC_BUILTIN_FPCMPUR32SHL,
10532 SPARC_BUILTIN_MAX
10535 static GTY (()) tree sparc_builtins[(int) SPARC_BUILTIN_MAX];
10536 static enum insn_code sparc_builtins_icode[(int) SPARC_BUILTIN_MAX];
10538 /* Return true if OPVAL can be used for operand OPNUM of instruction ICODE.
10539 The instruction should require a constant operand of some sort. The
10540 function prints an error if OPVAL is not valid. */
10542 static int
10543 check_constant_argument (enum insn_code icode, int opnum, rtx opval)
10545 if (GET_CODE (opval) != CONST_INT)
10547 error ("%qs expects a constant argument", insn_data[icode].name);
10548 return false;
10551 if (!(*insn_data[icode].operand[opnum].predicate) (opval, VOIDmode))
10553 error ("constant argument out of range for %qs", insn_data[icode].name);
10554 return false;
10556 return true;
10559 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
10560 function decl or NULL_TREE if the builtin was not added. */
10562 static tree
10563 def_builtin (const char *name, enum insn_code icode, enum sparc_builtins code,
10564 tree type)
10566 tree t
10567 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
10569 if (t)
10571 sparc_builtins[code] = t;
10572 sparc_builtins_icode[code] = icode;
10575 return t;
10578 /* Likewise, but also marks the function as "const". */
10580 static tree
10581 def_builtin_const (const char *name, enum insn_code icode,
10582 enum sparc_builtins code, tree type)
10584 tree t = def_builtin (name, icode, code, type);
10586 if (t)
10587 TREE_READONLY (t) = 1;
10589 return t;
10592 /* Implement the TARGET_INIT_BUILTINS target hook.
10593 Create builtin functions for special SPARC instructions. */
10595 static void
10596 sparc_init_builtins (void)
10598 if (TARGET_FPU)
10599 sparc_fpu_init_builtins ();
10601 if (TARGET_VIS)
10602 sparc_vis_init_builtins ();
10605 /* Create builtin functions for FPU instructions. */
10607 static void
10608 sparc_fpu_init_builtins (void)
10610 tree ftype
10611 = build_function_type_list (void_type_node,
10612 build_pointer_type (unsigned_type_node), 0);
10613 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr,
10614 SPARC_BUILTIN_LDFSR, ftype);
10615 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr,
10616 SPARC_BUILTIN_STFSR, ftype);
10619 /* Create builtin functions for VIS instructions. */
10621 static void
10622 sparc_vis_init_builtins (void)
10624 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
10625 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
10626 tree v4hi = build_vector_type (intHI_type_node, 4);
10627 tree v2hi = build_vector_type (intHI_type_node, 2);
10628 tree v2si = build_vector_type (intSI_type_node, 2);
10629 tree v1si = build_vector_type (intSI_type_node, 1);
10631 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
10632 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
10633 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
10634 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
10635 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
10636 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
10637 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
10638 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
10639 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
10640 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
10641 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
10642 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
10643 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
10644 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
10645 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
10646 v8qi, v8qi,
10647 intDI_type_node, 0);
10648 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
10649 v8qi, v8qi, 0);
10650 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
10651 v8qi, v8qi, 0);
10652 tree v8qi_ftype_df_si = build_function_type_list (v8qi, double_type_node,
10653 intSI_type_node, 0);
10654 tree v4hi_ftype_df_si = build_function_type_list (v4hi, double_type_node,
10655 intSI_type_node, 0);
10656 tree v2si_ftype_df_si = build_function_type_list (v2si, double_type_node,
10657 intDI_type_node, 0);
10658 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
10659 intDI_type_node,
10660 intDI_type_node, 0);
10661 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
10662 intSI_type_node,
10663 intSI_type_node, 0);
10664 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
10665 ptr_type_node,
10666 intSI_type_node, 0);
10667 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
10668 ptr_type_node,
10669 intDI_type_node, 0);
10670 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
10671 ptr_type_node,
10672 ptr_type_node, 0);
10673 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
10674 ptr_type_node,
10675 ptr_type_node, 0);
10676 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
10677 v4hi, v4hi, 0);
10678 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
10679 v2si, v2si, 0);
10680 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
10681 v4hi, v4hi, 0);
10682 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
10683 v2si, v2si, 0);
10684 tree void_ftype_di = build_function_type_list (void_type_node,
10685 intDI_type_node, 0);
10686 tree di_ftype_void = build_function_type_list (intDI_type_node,
10687 void_type_node, 0);
10688 tree void_ftype_si = build_function_type_list (void_type_node,
10689 intSI_type_node, 0);
10690 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
10691 float_type_node,
10692 float_type_node, 0);
10693 tree df_ftype_df_df = build_function_type_list (double_type_node,
10694 double_type_node,
10695 double_type_node, 0);
10697 /* Packing and expanding vectors. */
10698 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
10699 SPARC_BUILTIN_FPACK16, v4qi_ftype_v4hi);
10700 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
10701 SPARC_BUILTIN_FPACK32, v8qi_ftype_v2si_v8qi);
10702 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
10703 SPARC_BUILTIN_FPACKFIX, v2hi_ftype_v2si);
10704 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
10705 SPARC_BUILTIN_FEXPAND, v4hi_ftype_v4qi);
10706 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
10707 SPARC_BUILTIN_FPMERGE, v8qi_ftype_v4qi_v4qi);
10709 /* Multiplications. */
10710 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
10711 SPARC_BUILTIN_FMUL8X16, v4hi_ftype_v4qi_v4hi);
10712 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
10713 SPARC_BUILTIN_FMUL8X16AU, v4hi_ftype_v4qi_v2hi);
10714 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
10715 SPARC_BUILTIN_FMUL8X16AL, v4hi_ftype_v4qi_v2hi);
10716 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
10717 SPARC_BUILTIN_FMUL8SUX16, v4hi_ftype_v8qi_v4hi);
10718 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
10719 SPARC_BUILTIN_FMUL8ULX16, v4hi_ftype_v8qi_v4hi);
10720 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
10721 SPARC_BUILTIN_FMULD8SUX16, v2si_ftype_v4qi_v2hi);
10722 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
10723 SPARC_BUILTIN_FMULD8ULX16, v2si_ftype_v4qi_v2hi);
10725 /* Data aligning. */
10726 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
10727 SPARC_BUILTIN_FALIGNDATAV4HI, v4hi_ftype_v4hi_v4hi);
10728 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
10729 SPARC_BUILTIN_FALIGNDATAV8QI, v8qi_ftype_v8qi_v8qi);
10730 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
10731 SPARC_BUILTIN_FALIGNDATAV2SI, v2si_ftype_v2si_v2si);
10732 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
10733 SPARC_BUILTIN_FALIGNDATADI, di_ftype_di_di);
10735 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
10736 SPARC_BUILTIN_WRGSR, void_ftype_di);
10737 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
10738 SPARC_BUILTIN_RDGSR, di_ftype_void);
10740 if (TARGET_ARCH64)
10742 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
10743 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_di);
10744 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
10745 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_di);
10747 else
10749 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
10750 SPARC_BUILTIN_ALIGNADDR, ptr_ftype_ptr_si);
10751 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
10752 SPARC_BUILTIN_ALIGNADDRL, ptr_ftype_ptr_si);
10755 /* Pixel distance. */
10756 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
10757 SPARC_BUILTIN_PDIST, di_ftype_v8qi_v8qi_di);
10759 /* Edge handling. */
10760 if (TARGET_ARCH64)
10762 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
10763 SPARC_BUILTIN_EDGE8, di_ftype_ptr_ptr);
10764 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
10765 SPARC_BUILTIN_EDGE8L, di_ftype_ptr_ptr);
10766 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
10767 SPARC_BUILTIN_EDGE16, di_ftype_ptr_ptr);
10768 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
10769 SPARC_BUILTIN_EDGE16L, di_ftype_ptr_ptr);
10770 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
10771 SPARC_BUILTIN_EDGE32, di_ftype_ptr_ptr);
10772 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
10773 SPARC_BUILTIN_EDGE32L, di_ftype_ptr_ptr);
10775 else
10777 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
10778 SPARC_BUILTIN_EDGE8, si_ftype_ptr_ptr);
10779 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
10780 SPARC_BUILTIN_EDGE8L, si_ftype_ptr_ptr);
10781 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
10782 SPARC_BUILTIN_EDGE16, si_ftype_ptr_ptr);
10783 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
10784 SPARC_BUILTIN_EDGE16L, si_ftype_ptr_ptr);
10785 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
10786 SPARC_BUILTIN_EDGE32, si_ftype_ptr_ptr);
10787 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
10788 SPARC_BUILTIN_EDGE32L, si_ftype_ptr_ptr);
10791 /* Pixel compare. */
10792 if (TARGET_ARCH64)
10794 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
10795 SPARC_BUILTIN_FCMPLE16, di_ftype_v4hi_v4hi);
10796 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
10797 SPARC_BUILTIN_FCMPLE32, di_ftype_v2si_v2si);
10798 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
10799 SPARC_BUILTIN_FCMPNE16, di_ftype_v4hi_v4hi);
10800 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
10801 SPARC_BUILTIN_FCMPNE32, di_ftype_v2si_v2si);
10802 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
10803 SPARC_BUILTIN_FCMPGT16, di_ftype_v4hi_v4hi);
10804 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
10805 SPARC_BUILTIN_FCMPGT32, di_ftype_v2si_v2si);
10806 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
10807 SPARC_BUILTIN_FCMPEQ16, di_ftype_v4hi_v4hi);
10808 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
10809 SPARC_BUILTIN_FCMPEQ32, di_ftype_v2si_v2si);
10811 else
10813 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
10814 SPARC_BUILTIN_FCMPLE16, si_ftype_v4hi_v4hi);
10815 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
10816 SPARC_BUILTIN_FCMPLE32, si_ftype_v2si_v2si);
10817 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
10818 SPARC_BUILTIN_FCMPNE16, si_ftype_v4hi_v4hi);
10819 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
10820 SPARC_BUILTIN_FCMPNE32, si_ftype_v2si_v2si);
10821 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
10822 SPARC_BUILTIN_FCMPGT16, si_ftype_v4hi_v4hi);
10823 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
10824 SPARC_BUILTIN_FCMPGT32, si_ftype_v2si_v2si);
10825 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
10826 SPARC_BUILTIN_FCMPEQ16, si_ftype_v4hi_v4hi);
10827 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
10828 SPARC_BUILTIN_FCMPEQ32, si_ftype_v2si_v2si);
10831 /* Addition and subtraction. */
10832 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
10833 SPARC_BUILTIN_FPADD16, v4hi_ftype_v4hi_v4hi);
10834 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
10835 SPARC_BUILTIN_FPADD16S, v2hi_ftype_v2hi_v2hi);
10836 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
10837 SPARC_BUILTIN_FPADD32, v2si_ftype_v2si_v2si);
10838 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
10839 SPARC_BUILTIN_FPADD32S, v1si_ftype_v1si_v1si);
10840 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
10841 SPARC_BUILTIN_FPSUB16, v4hi_ftype_v4hi_v4hi);
10842 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
10843 SPARC_BUILTIN_FPSUB16S, v2hi_ftype_v2hi_v2hi);
10844 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
10845 SPARC_BUILTIN_FPSUB32, v2si_ftype_v2si_v2si);
10846 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
10847 SPARC_BUILTIN_FPSUB32S, v1si_ftype_v1si_v1si);
10849 /* Three-dimensional array addressing. */
10850 if (TARGET_ARCH64)
10852 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
10853 SPARC_BUILTIN_ARRAY8, di_ftype_di_di);
10854 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
10855 SPARC_BUILTIN_ARRAY16, di_ftype_di_di);
10856 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
10857 SPARC_BUILTIN_ARRAY32, di_ftype_di_di);
10859 else
10861 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
10862 SPARC_BUILTIN_ARRAY8, si_ftype_si_si);
10863 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
10864 SPARC_BUILTIN_ARRAY16, si_ftype_si_si);
10865 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
10866 SPARC_BUILTIN_ARRAY32, si_ftype_si_si);
10869 if (TARGET_VIS2)
10871 /* Edge handling. */
10872 if (TARGET_ARCH64)
10874 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
10875 SPARC_BUILTIN_EDGE8N, di_ftype_ptr_ptr);
10876 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
10877 SPARC_BUILTIN_EDGE8LN, di_ftype_ptr_ptr);
10878 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
10879 SPARC_BUILTIN_EDGE16N, di_ftype_ptr_ptr);
10880 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
10881 SPARC_BUILTIN_EDGE16LN, di_ftype_ptr_ptr);
10882 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
10883 SPARC_BUILTIN_EDGE32N, di_ftype_ptr_ptr);
10884 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
10885 SPARC_BUILTIN_EDGE32LN, di_ftype_ptr_ptr);
10887 else
10889 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
10890 SPARC_BUILTIN_EDGE8N, si_ftype_ptr_ptr);
10891 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
10892 SPARC_BUILTIN_EDGE8LN, si_ftype_ptr_ptr);
10893 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
10894 SPARC_BUILTIN_EDGE16N, si_ftype_ptr_ptr);
10895 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
10896 SPARC_BUILTIN_EDGE16LN, si_ftype_ptr_ptr);
10897 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
10898 SPARC_BUILTIN_EDGE32N, si_ftype_ptr_ptr);
10899 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
10900 SPARC_BUILTIN_EDGE32LN, si_ftype_ptr_ptr);
10903 /* Byte mask and shuffle. */
10904 if (TARGET_ARCH64)
10905 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
10906 SPARC_BUILTIN_BMASK, di_ftype_di_di);
10907 else
10908 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
10909 SPARC_BUILTIN_BMASK, si_ftype_si_si);
10910 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
10911 SPARC_BUILTIN_BSHUFFLEV4HI, v4hi_ftype_v4hi_v4hi);
10912 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
10913 SPARC_BUILTIN_BSHUFFLEV8QI, v8qi_ftype_v8qi_v8qi);
10914 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
10915 SPARC_BUILTIN_BSHUFFLEV2SI, v2si_ftype_v2si_v2si);
10916 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
10917 SPARC_BUILTIN_BSHUFFLEDI, di_ftype_di_di);
10920 if (TARGET_VIS3)
10922 if (TARGET_ARCH64)
10924 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
10925 SPARC_BUILTIN_CMASK8, void_ftype_di);
10926 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
10927 SPARC_BUILTIN_CMASK16, void_ftype_di);
10928 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
10929 SPARC_BUILTIN_CMASK32, void_ftype_di);
10931 else
10933 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
10934 SPARC_BUILTIN_CMASK8, void_ftype_si);
10935 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
10936 SPARC_BUILTIN_CMASK16, void_ftype_si);
10937 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
10938 SPARC_BUILTIN_CMASK32, void_ftype_si);
10941 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
10942 SPARC_BUILTIN_FCHKSM16, v4hi_ftype_v4hi_v4hi);
10944 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
10945 SPARC_BUILTIN_FSLL16, v4hi_ftype_v4hi_v4hi);
10946 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
10947 SPARC_BUILTIN_FSLAS16, v4hi_ftype_v4hi_v4hi);
10948 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
10949 SPARC_BUILTIN_FSRL16, v4hi_ftype_v4hi_v4hi);
10950 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
10951 SPARC_BUILTIN_FSRA16, v4hi_ftype_v4hi_v4hi);
10952 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
10953 SPARC_BUILTIN_FSLL32, v2si_ftype_v2si_v2si);
10954 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
10955 SPARC_BUILTIN_FSLAS32, v2si_ftype_v2si_v2si);
10956 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
10957 SPARC_BUILTIN_FSRL32, v2si_ftype_v2si_v2si);
10958 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
10959 SPARC_BUILTIN_FSRA32, v2si_ftype_v2si_v2si);
10961 if (TARGET_ARCH64)
10962 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
10963 SPARC_BUILTIN_PDISTN, di_ftype_v8qi_v8qi);
10964 else
10965 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
10966 SPARC_BUILTIN_PDISTN, si_ftype_v8qi_v8qi);
10968 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
10969 SPARC_BUILTIN_FMEAN16, v4hi_ftype_v4hi_v4hi);
10970 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
10971 SPARC_BUILTIN_FPADD64, di_ftype_di_di);
10972 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
10973 SPARC_BUILTIN_FPSUB64, di_ftype_di_di);
10975 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
10976 SPARC_BUILTIN_FPADDS16, v4hi_ftype_v4hi_v4hi);
10977 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
10978 SPARC_BUILTIN_FPADDS16S, v2hi_ftype_v2hi_v2hi);
10979 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
10980 SPARC_BUILTIN_FPSUBS16, v4hi_ftype_v4hi_v4hi);
10981 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
10982 SPARC_BUILTIN_FPSUBS16S, v2hi_ftype_v2hi_v2hi);
10983 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
10984 SPARC_BUILTIN_FPADDS32, v2si_ftype_v2si_v2si);
10985 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
10986 SPARC_BUILTIN_FPADDS32S, v1si_ftype_v1si_v1si);
10987 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
10988 SPARC_BUILTIN_FPSUBS32, v2si_ftype_v2si_v2si);
10989 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
10990 SPARC_BUILTIN_FPSUBS32S, v1si_ftype_v1si_v1si);
10992 if (TARGET_ARCH64)
10994 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
10995 SPARC_BUILTIN_FUCMPLE8, di_ftype_v8qi_v8qi);
10996 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
10997 SPARC_BUILTIN_FUCMPNE8, di_ftype_v8qi_v8qi);
10998 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
10999 SPARC_BUILTIN_FUCMPGT8, di_ftype_v8qi_v8qi);
11000 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
11001 SPARC_BUILTIN_FUCMPEQ8, di_ftype_v8qi_v8qi);
11003 else
11005 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
11006 SPARC_BUILTIN_FUCMPLE8, si_ftype_v8qi_v8qi);
11007 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
11008 SPARC_BUILTIN_FUCMPNE8, si_ftype_v8qi_v8qi);
11009 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
11010 SPARC_BUILTIN_FUCMPGT8, si_ftype_v8qi_v8qi);
11011 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
11012 SPARC_BUILTIN_FUCMPEQ8, si_ftype_v8qi_v8qi);
11015 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
11016 SPARC_BUILTIN_FHADDS, sf_ftype_sf_sf);
11017 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
11018 SPARC_BUILTIN_FHADDD, df_ftype_df_df);
11019 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
11020 SPARC_BUILTIN_FHSUBS, sf_ftype_sf_sf);
11021 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
11022 SPARC_BUILTIN_FHSUBD, df_ftype_df_df);
11023 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
11024 SPARC_BUILTIN_FNHADDS, sf_ftype_sf_sf);
11025 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
11026 SPARC_BUILTIN_FNHADDD, df_ftype_df_df);
11028 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
11029 SPARC_BUILTIN_UMULXHI, di_ftype_di_di);
11030 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
11031 SPARC_BUILTIN_XMULX, di_ftype_di_di);
11032 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
11033 SPARC_BUILTIN_XMULXHI, di_ftype_di_di);
11036 if (TARGET_VIS4)
11038 def_builtin_const ("__builtin_vis_fpadd8", CODE_FOR_addv8qi3,
11039 SPARC_BUILTIN_FPADD8, v8qi_ftype_v8qi_v8qi);
11040 def_builtin_const ("__builtin_vis_fpadds8", CODE_FOR_ssaddv8qi3,
11041 SPARC_BUILTIN_FPADDS8, v8qi_ftype_v8qi_v8qi);
11042 def_builtin_const ("__builtin_vis_fpaddus8", CODE_FOR_usaddv8qi3,
11043 SPARC_BUILTIN_FPADDUS8, v8qi_ftype_v8qi_v8qi);
11044 def_builtin_const ("__builtin_vis_fpaddus16", CODE_FOR_usaddv4hi3,
11045 SPARC_BUILTIN_FPADDUS16, v4hi_ftype_v4hi_v4hi);
11048 if (TARGET_ARCH64)
11050 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8di_vis,
11051 SPARC_BUILTIN_FPCMPLE8, di_ftype_v8qi_v8qi);
11052 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8di_vis,
11053 SPARC_BUILTIN_FPCMPGT8, di_ftype_v8qi_v8qi);
11054 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16di_vis,
11055 SPARC_BUILTIN_FPCMPULE16, di_ftype_v4hi_v4hi);
11056 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16di_vis,
11057 SPARC_BUILTIN_FPCMPUGT16, di_ftype_v4hi_v4hi);
11058 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32di_vis,
11059 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11060 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32di_vis,
11061 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11063 else
11065 def_builtin_const ("__builtin_vis_fpcmple8", CODE_FOR_fpcmple8si_vis,
11066 SPARC_BUILTIN_FPCMPLE8, si_ftype_v8qi_v8qi);
11067 def_builtin_const ("__builtin_vis_fpcmpgt8", CODE_FOR_fpcmpgt8si_vis,
11068 SPARC_BUILTIN_FPCMPGT8, si_ftype_v8qi_v8qi);
11069 def_builtin_const ("__builtin_vis_fpcmpule16", CODE_FOR_fpcmpule16si_vis,
11070 SPARC_BUILTIN_FPCMPULE16, si_ftype_v4hi_v4hi);
11071 def_builtin_const ("__builtin_vis_fpcmpugt16", CODE_FOR_fpcmpugt16si_vis,
11072 SPARC_BUILTIN_FPCMPUGT16, si_ftype_v4hi_v4hi);
11073 def_builtin_const ("__builtin_vis_fpcmpule32", CODE_FOR_fpcmpule32si_vis,
11074 SPARC_BUILTIN_FPCMPULE32, di_ftype_v2si_v2si);
11075 def_builtin_const ("__builtin_vis_fpcmpugt32", CODE_FOR_fpcmpugt32si_vis,
11076 SPARC_BUILTIN_FPCMPUGT32, di_ftype_v2si_v2si);
11079 def_builtin_const ("__builtin_vis_fpmax8", CODE_FOR_maxv8qi3,
11080 SPARC_BUILTIN_FPMAX8, v8qi_ftype_v8qi_v8qi);
11081 def_builtin_const ("__builtin_vis_fpmax16", CODE_FOR_maxv4hi3,
11082 SPARC_BUILTIN_FPMAX16, v4hi_ftype_v4hi_v4hi);
11083 def_builtin_const ("__builtin_vis_fpmax32", CODE_FOR_maxv2si3,
11084 SPARC_BUILTIN_FPMAX32, v2si_ftype_v2si_v2si);
11085 def_builtin_const ("__builtin_vis_fpmaxu8", CODE_FOR_maxuv8qi3,
11086 SPARC_BUILTIN_FPMAXU8, v8qi_ftype_v8qi_v8qi);
11087 def_builtin_const ("__builtin_vis_fpmaxu16", CODE_FOR_maxuv4hi3,
11088 SPARC_BUILTIN_FPMAXU16, v4hi_ftype_v4hi_v4hi);
11089 def_builtin_const ("__builtin_vis_fpmaxu32", CODE_FOR_maxuv2si3,
11090 SPARC_BUILTIN_FPMAXU32, v2si_ftype_v2si_v2si);
11091 def_builtin_const ("__builtin_vis_fpmin8", CODE_FOR_minv8qi3,
11092 SPARC_BUILTIN_FPMIN8, v8qi_ftype_v8qi_v8qi);
11093 def_builtin_const ("__builtin_vis_fpmin16", CODE_FOR_minv4hi3,
11094 SPARC_BUILTIN_FPMIN16, v4hi_ftype_v4hi_v4hi);
11095 def_builtin_const ("__builtin_vis_fpmin32", CODE_FOR_minv2si3,
11096 SPARC_BUILTIN_FPMIN32, v2si_ftype_v2si_v2si);
11097 def_builtin_const ("__builtin_vis_fpminu8", CODE_FOR_minuv8qi3,
11098 SPARC_BUILTIN_FPMINU8, v8qi_ftype_v8qi_v8qi);
11099 def_builtin_const ("__builtin_vis_fpminu16", CODE_FOR_minuv4hi3,
11100 SPARC_BUILTIN_FPMINU16, v4hi_ftype_v4hi_v4hi);
11101 def_builtin_const ("__builtin_vis_fpminu32", CODE_FOR_minuv2si3,
11102 SPARC_BUILTIN_FPMINU32, v2si_ftype_v2si_v2si);
11103 def_builtin_const ("__builtin_vis_fpsub8", CODE_FOR_subv8qi3,
11104 SPARC_BUILTIN_FPSUB8, v8qi_ftype_v8qi_v8qi);
11105 def_builtin_const ("__builtin_vis_fpsubs8", CODE_FOR_sssubv8qi3,
11106 SPARC_BUILTIN_FPSUBS8, v8qi_ftype_v8qi_v8qi);
11107 def_builtin_const ("__builtin_vis_fpsubus8", CODE_FOR_ussubv8qi3,
11108 SPARC_BUILTIN_FPSUBUS8, v8qi_ftype_v8qi_v8qi);
11109 def_builtin_const ("__builtin_vis_fpsubus16", CODE_FOR_ussubv4hi3,
11110 SPARC_BUILTIN_FPSUBUS16, v4hi_ftype_v4hi_v4hi);
11113 if (TARGET_VIS4B)
11115 def_builtin_const ("__builtin_vis_dictunpack8", CODE_FOR_dictunpack8,
11116 SPARC_BUILTIN_DICTUNPACK8, v8qi_ftype_df_si);
11117 def_builtin_const ("__builtin_vis_dictunpack16", CODE_FOR_dictunpack16,
11118 SPARC_BUILTIN_DICTUNPACK16, v4hi_ftype_df_si);
11119 def_builtin_const ("__builtin_vis_dictunpack32", CODE_FOR_dictunpack32,
11120 SPARC_BUILTIN_DICTUNPACK32, v2si_ftype_df_si);
11122 if (TARGET_ARCH64)
11124 tree di_ftype_v8qi_v8qi_si = build_function_type_list (intDI_type_node,
11125 v8qi, v8qi,
11126 intSI_type_node, 0);
11127 tree di_ftype_v4hi_v4hi_si = build_function_type_list (intDI_type_node,
11128 v4hi, v4hi,
11129 intSI_type_node, 0);
11130 tree di_ftype_v2si_v2si_si = build_function_type_list (intDI_type_node,
11131 v2si, v2si,
11132 intSI_type_node, 0);
11134 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8dishl,
11135 SPARC_BUILTIN_FPCMPLE8SHL, di_ftype_v8qi_v8qi_si);
11136 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8dishl,
11137 SPARC_BUILTIN_FPCMPGT8SHL, di_ftype_v8qi_v8qi_si);
11138 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8dishl,
11139 SPARC_BUILTIN_FPCMPEQ8SHL, di_ftype_v8qi_v8qi_si);
11140 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8dishl,
11141 SPARC_BUILTIN_FPCMPNE8SHL, di_ftype_v8qi_v8qi_si);
11143 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16dishl,
11144 SPARC_BUILTIN_FPCMPLE16SHL, di_ftype_v4hi_v4hi_si);
11145 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16dishl,
11146 SPARC_BUILTIN_FPCMPGT16SHL, di_ftype_v4hi_v4hi_si);
11147 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16dishl,
11148 SPARC_BUILTIN_FPCMPEQ16SHL, di_ftype_v4hi_v4hi_si);
11149 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16dishl,
11150 SPARC_BUILTIN_FPCMPNE16SHL, di_ftype_v4hi_v4hi_si);
11152 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32dishl,
11153 SPARC_BUILTIN_FPCMPLE32SHL, di_ftype_v2si_v2si_si);
11154 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32dishl,
11155 SPARC_BUILTIN_FPCMPGT32SHL, di_ftype_v2si_v2si_si);
11156 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32dishl,
11157 SPARC_BUILTIN_FPCMPEQ32SHL, di_ftype_v2si_v2si_si);
11158 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32dishl,
11159 SPARC_BUILTIN_FPCMPNE32SHL, di_ftype_v2si_v2si_si);
11162 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8dishl,
11163 SPARC_BUILTIN_FPCMPULE8SHL, di_ftype_v8qi_v8qi_si);
11164 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8dishl,
11165 SPARC_BUILTIN_FPCMPUGT8SHL, di_ftype_v8qi_v8qi_si);
11167 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16dishl,
11168 SPARC_BUILTIN_FPCMPULE16SHL, di_ftype_v4hi_v4hi_si);
11169 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16dishl,
11170 SPARC_BUILTIN_FPCMPUGT16SHL, di_ftype_v4hi_v4hi_si);
11172 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32dishl,
11173 SPARC_BUILTIN_FPCMPULE32SHL, di_ftype_v2si_v2si_si);
11174 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32dishl,
11175 SPARC_BUILTIN_FPCMPUGT32SHL, di_ftype_v2si_v2si_si);
11177 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8dishl,
11178 SPARC_BUILTIN_FPCMPDE8SHL, di_ftype_v8qi_v8qi_si);
11179 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16dishl,
11180 SPARC_BUILTIN_FPCMPDE16SHL, di_ftype_v4hi_v4hi_si);
11181 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32dishl,
11182 SPARC_BUILTIN_FPCMPDE32SHL, di_ftype_v2si_v2si_si);
11184 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8dishl,
11185 SPARC_BUILTIN_FPCMPUR8SHL, di_ftype_v8qi_v8qi_si);
11186 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16dishl,
11187 SPARC_BUILTIN_FPCMPUR16SHL, di_ftype_v4hi_v4hi_si);
11188 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32dishl,
11189 SPARC_BUILTIN_FPCMPUR32SHL, di_ftype_v2si_v2si_si);
11192 else
11194 tree si_ftype_v8qi_v8qi_si = build_function_type_list (intSI_type_node,
11195 v8qi, v8qi,
11196 intSI_type_node, 0);
11197 tree si_ftype_v4hi_v4hi_si = build_function_type_list (intSI_type_node,
11198 v4hi, v4hi,
11199 intSI_type_node, 0);
11200 tree si_ftype_v2si_v2si_si = build_function_type_list (intSI_type_node,
11201 v2si, v2si,
11202 intSI_type_node, 0);
11204 def_builtin_const ("__builtin_vis_fpcmple8shl", CODE_FOR_fpcmple8sishl,
11205 SPARC_BUILTIN_FPCMPLE8SHL, si_ftype_v8qi_v8qi_si);
11206 def_builtin_const ("__builtin_vis_fpcmpgt8shl", CODE_FOR_fpcmpgt8sishl,
11207 SPARC_BUILTIN_FPCMPGT8SHL, si_ftype_v8qi_v8qi_si);
11208 def_builtin_const ("__builtin_vis_fpcmpeq8shl", CODE_FOR_fpcmpeq8sishl,
11209 SPARC_BUILTIN_FPCMPEQ8SHL, si_ftype_v8qi_v8qi_si);
11210 def_builtin_const ("__builtin_vis_fpcmpne8shl", CODE_FOR_fpcmpne8sishl,
11211 SPARC_BUILTIN_FPCMPNE8SHL, si_ftype_v8qi_v8qi_si);
11213 def_builtin_const ("__builtin_vis_fpcmple16shl", CODE_FOR_fpcmple16sishl,
11214 SPARC_BUILTIN_FPCMPLE16SHL, si_ftype_v4hi_v4hi_si);
11215 def_builtin_const ("__builtin_vis_fpcmpgt16shl", CODE_FOR_fpcmpgt16sishl,
11216 SPARC_BUILTIN_FPCMPGT16SHL, si_ftype_v4hi_v4hi_si);
11217 def_builtin_const ("__builtin_vis_fpcmpeq16shl", CODE_FOR_fpcmpeq16sishl,
11218 SPARC_BUILTIN_FPCMPEQ16SHL, si_ftype_v4hi_v4hi_si);
11219 def_builtin_const ("__builtin_vis_fpcmpne16shl", CODE_FOR_fpcmpne16sishl,
11220 SPARC_BUILTIN_FPCMPNE16SHL, si_ftype_v4hi_v4hi_si);
11222 def_builtin_const ("__builtin_vis_fpcmple32shl", CODE_FOR_fpcmple32sishl,
11223 SPARC_BUILTIN_FPCMPLE32SHL, si_ftype_v2si_v2si_si);
11224 def_builtin_const ("__builtin_vis_fpcmpgt32shl", CODE_FOR_fpcmpgt32sishl,
11225 SPARC_BUILTIN_FPCMPGT32SHL, si_ftype_v2si_v2si_si);
11226 def_builtin_const ("__builtin_vis_fpcmpeq32shl", CODE_FOR_fpcmpeq32sishl,
11227 SPARC_BUILTIN_FPCMPEQ32SHL, si_ftype_v2si_v2si_si);
11228 def_builtin_const ("__builtin_vis_fpcmpne32shl", CODE_FOR_fpcmpne32sishl,
11229 SPARC_BUILTIN_FPCMPNE32SHL, si_ftype_v2si_v2si_si);
11232 def_builtin_const ("__builtin_vis_fpcmpule8shl", CODE_FOR_fpcmpule8sishl,
11233 SPARC_BUILTIN_FPCMPULE8SHL, si_ftype_v8qi_v8qi_si);
11234 def_builtin_const ("__builtin_vis_fpcmpugt8shl", CODE_FOR_fpcmpugt8sishl,
11235 SPARC_BUILTIN_FPCMPUGT8SHL, si_ftype_v8qi_v8qi_si);
11237 def_builtin_const ("__builtin_vis_fpcmpule16shl", CODE_FOR_fpcmpule16sishl,
11238 SPARC_BUILTIN_FPCMPULE16SHL, si_ftype_v4hi_v4hi_si);
11239 def_builtin_const ("__builtin_vis_fpcmpugt16shl", CODE_FOR_fpcmpugt16sishl,
11240 SPARC_BUILTIN_FPCMPUGT16SHL, si_ftype_v4hi_v4hi_si);
11242 def_builtin_const ("__builtin_vis_fpcmpule32shl", CODE_FOR_fpcmpule32sishl,
11243 SPARC_BUILTIN_FPCMPULE32SHL, si_ftype_v2si_v2si_si);
11244 def_builtin_const ("__builtin_vis_fpcmpugt32shl", CODE_FOR_fpcmpugt32sishl,
11245 SPARC_BUILTIN_FPCMPUGT32SHL, si_ftype_v2si_v2si_si);
11247 def_builtin_const ("__builtin_vis_fpcmpde8shl", CODE_FOR_fpcmpde8sishl,
11248 SPARC_BUILTIN_FPCMPDE8SHL, si_ftype_v8qi_v8qi_si);
11249 def_builtin_const ("__builtin_vis_fpcmpde16shl", CODE_FOR_fpcmpde16sishl,
11250 SPARC_BUILTIN_FPCMPDE16SHL, si_ftype_v4hi_v4hi_si);
11251 def_builtin_const ("__builtin_vis_fpcmpde32shl", CODE_FOR_fpcmpde32sishl,
11252 SPARC_BUILTIN_FPCMPDE32SHL, si_ftype_v2si_v2si_si);
11254 def_builtin_const ("__builtin_vis_fpcmpur8shl", CODE_FOR_fpcmpur8sishl,
11255 SPARC_BUILTIN_FPCMPUR8SHL, si_ftype_v8qi_v8qi_si);
11256 def_builtin_const ("__builtin_vis_fpcmpur16shl", CODE_FOR_fpcmpur16sishl,
11257 SPARC_BUILTIN_FPCMPUR16SHL, si_ftype_v4hi_v4hi_si);
11258 def_builtin_const ("__builtin_vis_fpcmpur32shl", CODE_FOR_fpcmpur32sishl,
11259 SPARC_BUILTIN_FPCMPUR32SHL, si_ftype_v2si_v2si_si);
11264 /* Implement TARGET_BUILTIN_DECL hook. */
11266 static tree
11267 sparc_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
11269 if (code >= SPARC_BUILTIN_MAX)
11270 return error_mark_node;
11272 return sparc_builtins[code];
11275 /* Implemented TARGET_EXPAND_BUILTIN hook. */
11277 static rtx
11278 sparc_expand_builtin (tree exp, rtx target,
11279 rtx subtarget ATTRIBUTE_UNUSED,
11280 machine_mode tmode ATTRIBUTE_UNUSED,
11281 int ignore ATTRIBUTE_UNUSED)
11283 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11284 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11285 enum insn_code icode = sparc_builtins_icode[code];
11286 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
11287 call_expr_arg_iterator iter;
11288 int arg_count = 0;
11289 rtx pat, op[4];
11290 tree arg;
11292 if (nonvoid)
11294 machine_mode tmode = insn_data[icode].operand[0].mode;
11295 if (!target
11296 || GET_MODE (target) != tmode
11297 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11298 op[0] = gen_reg_rtx (tmode);
11299 else
11300 op[0] = target;
11303 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
11305 const struct insn_operand_data *insn_op;
11306 int idx;
11308 if (arg == error_mark_node)
11309 return NULL_RTX;
11311 arg_count++;
11312 idx = arg_count - !nonvoid;
11313 insn_op = &insn_data[icode].operand[idx];
11314 op[arg_count] = expand_normal (arg);
11316 /* Some of the builtins require constant arguments. We check
11317 for this here. */
11318 if ((code >= SPARC_BUILTIN_FIRST_FPCMPSHL
11319 && code <= SPARC_BUILTIN_LAST_FPCMPSHL
11320 && arg_count == 3)
11321 || (code >= SPARC_BUILTIN_FIRST_DICTUNPACK
11322 && code <= SPARC_BUILTIN_LAST_DICTUNPACK
11323 && arg_count == 2))
11325 if (!check_constant_argument (icode, idx, op[arg_count]))
11326 return const0_rtx;
11329 if (code == SPARC_BUILTIN_LDFSR || code == SPARC_BUILTIN_STFSR)
11331 if (!address_operand (op[arg_count], SImode))
11333 op[arg_count] = convert_memory_address (Pmode, op[arg_count]);
11334 op[arg_count] = copy_addr_to_reg (op[arg_count]);
11336 op[arg_count] = gen_rtx_MEM (SImode, op[arg_count]);
11339 else if (insn_op->mode == V1DImode
11340 && GET_MODE (op[arg_count]) == DImode)
11341 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
11343 else if (insn_op->mode == V1SImode
11344 && GET_MODE (op[arg_count]) == SImode)
11345 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
11347 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
11348 insn_op->mode))
11349 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
11352 switch (arg_count)
11354 case 0:
11355 pat = GEN_FCN (icode) (op[0]);
11356 break;
11357 case 1:
11358 if (nonvoid)
11359 pat = GEN_FCN (icode) (op[0], op[1]);
11360 else
11361 pat = GEN_FCN (icode) (op[1]);
11362 break;
11363 case 2:
11364 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
11365 break;
11366 case 3:
11367 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
11368 break;
11369 default:
11370 gcc_unreachable ();
11373 if (!pat)
11374 return NULL_RTX;
11376 emit_insn (pat);
11378 return (nonvoid ? op[0] : const0_rtx);
11381 /* Return the upper 16 bits of the 8x16 multiplication. */
11383 static int
11384 sparc_vis_mul8x16 (int e8, int e16)
11386 return (e8 * e16 + 128) / 256;
11389 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
11390 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
11392 static void
11393 sparc_handle_vis_mul8x16 (tree *n_elts, enum sparc_builtins fncode,
11394 tree inner_type, tree cst0, tree cst1)
11396 unsigned i, num = VECTOR_CST_NELTS (cst0);
11397 int scale;
11399 switch (fncode)
11401 case SPARC_BUILTIN_FMUL8X16:
11402 for (i = 0; i < num; ++i)
11404 int val
11405 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11406 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
11407 n_elts[i] = build_int_cst (inner_type, val);
11409 break;
11411 case SPARC_BUILTIN_FMUL8X16AU:
11412 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
11414 for (i = 0; i < num; ++i)
11416 int val
11417 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11418 scale);
11419 n_elts[i] = build_int_cst (inner_type, val);
11421 break;
11423 case SPARC_BUILTIN_FMUL8X16AL:
11424 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
11426 for (i = 0; i < num; ++i)
11428 int val
11429 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
11430 scale);
11431 n_elts[i] = build_int_cst (inner_type, val);
11433 break;
11435 default:
11436 gcc_unreachable ();
11440 /* Implement TARGET_FOLD_BUILTIN hook.
11442 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
11443 result of the function call is ignored. NULL_TREE is returned if the
11444 function could not be folded. */
11446 static tree
11447 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
11448 tree *args, bool ignore)
11450 enum sparc_builtins code = (enum sparc_builtins) DECL_FUNCTION_CODE (fndecl);
11451 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
11452 tree arg0, arg1, arg2;
11454 if (ignore)
11455 switch (code)
11457 case SPARC_BUILTIN_LDFSR:
11458 case SPARC_BUILTIN_STFSR:
11459 case SPARC_BUILTIN_ALIGNADDR:
11460 case SPARC_BUILTIN_WRGSR:
11461 case SPARC_BUILTIN_BMASK:
11462 case SPARC_BUILTIN_CMASK8:
11463 case SPARC_BUILTIN_CMASK16:
11464 case SPARC_BUILTIN_CMASK32:
11465 break;
11467 default:
11468 return build_zero_cst (rtype);
11471 switch (code)
11473 case SPARC_BUILTIN_FEXPAND:
11474 arg0 = args[0];
11475 STRIP_NOPS (arg0);
11477 if (TREE_CODE (arg0) == VECTOR_CST)
11479 tree inner_type = TREE_TYPE (rtype);
11480 tree *n_elts;
11481 unsigned i;
11483 n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
11484 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11485 n_elts[i] = build_int_cst (inner_type,
11486 TREE_INT_CST_LOW
11487 (VECTOR_CST_ELT (arg0, i)) << 4);
11488 return build_vector (rtype, n_elts);
11490 break;
11492 case SPARC_BUILTIN_FMUL8X16:
11493 case SPARC_BUILTIN_FMUL8X16AU:
11494 case SPARC_BUILTIN_FMUL8X16AL:
11495 arg0 = args[0];
11496 arg1 = args[1];
11497 STRIP_NOPS (arg0);
11498 STRIP_NOPS (arg1);
11500 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11502 tree inner_type = TREE_TYPE (rtype);
11503 tree *n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
11504 sparc_handle_vis_mul8x16 (n_elts, code, inner_type, arg0, arg1);
11505 return build_vector (rtype, n_elts);
11507 break;
11509 case SPARC_BUILTIN_FPMERGE:
11510 arg0 = args[0];
11511 arg1 = args[1];
11512 STRIP_NOPS (arg0);
11513 STRIP_NOPS (arg1);
11515 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
11517 tree *n_elts = XALLOCAVEC (tree, 2 * VECTOR_CST_NELTS (arg0));
11518 unsigned i;
11519 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11521 n_elts[2*i] = VECTOR_CST_ELT (arg0, i);
11522 n_elts[2*i+1] = VECTOR_CST_ELT (arg1, i);
11525 return build_vector (rtype, n_elts);
11527 break;
11529 case SPARC_BUILTIN_PDIST:
11530 case SPARC_BUILTIN_PDISTN:
11531 arg0 = args[0];
11532 arg1 = args[1];
11533 STRIP_NOPS (arg0);
11534 STRIP_NOPS (arg1);
11535 if (code == SPARC_BUILTIN_PDIST)
11537 arg2 = args[2];
11538 STRIP_NOPS (arg2);
11540 else
11541 arg2 = integer_zero_node;
11543 if (TREE_CODE (arg0) == VECTOR_CST
11544 && TREE_CODE (arg1) == VECTOR_CST
11545 && TREE_CODE (arg2) == INTEGER_CST)
11547 bool overflow = false;
11548 widest_int result = wi::to_widest (arg2);
11549 widest_int tmp;
11550 unsigned i;
11552 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
11554 tree e0 = VECTOR_CST_ELT (arg0, i);
11555 tree e1 = VECTOR_CST_ELT (arg1, i);
11557 bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
11559 tmp = wi::neg (wi::to_widest (e1), &neg1_ovf);
11560 tmp = wi::add (wi::to_widest (e0), tmp, SIGNED, &add1_ovf);
11561 if (wi::neg_p (tmp))
11562 tmp = wi::neg (tmp, &neg2_ovf);
11563 else
11564 neg2_ovf = false;
11565 result = wi::add (result, tmp, SIGNED, &add2_ovf);
11566 overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf;
11569 gcc_assert (!overflow);
11571 return wide_int_to_tree (rtype, result);
11574 default:
11575 break;
11578 return NULL_TREE;
11581 /* ??? This duplicates information provided to the compiler by the
11582 ??? scheduler description. Some day, teach genautomata to output
11583 ??? the latencies and then CSE will just use that. */
11585 static bool
11586 sparc_rtx_costs (rtx x, machine_mode mode, int outer_code,
11587 int opno ATTRIBUTE_UNUSED,
11588 int *total, bool speed ATTRIBUTE_UNUSED)
11590 int code = GET_CODE (x);
11591 bool float_mode_p = FLOAT_MODE_P (mode);
11593 switch (code)
11595 case CONST_INT:
11596 if (SMALL_INT (x))
11597 *total = 0;
11598 else
11599 *total = 2;
11600 return true;
11602 case CONST_WIDE_INT:
11603 *total = 0;
11604 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 0)))
11605 *total += 2;
11606 if (!SPARC_SIMM13_P (CONST_WIDE_INT_ELT (x, 1)))
11607 *total += 2;
11608 return true;
11610 case HIGH:
11611 *total = 2;
11612 return true;
11614 case CONST:
11615 case LABEL_REF:
11616 case SYMBOL_REF:
11617 *total = 4;
11618 return true;
11620 case CONST_DOUBLE:
11621 *total = 8;
11622 return true;
11624 case MEM:
11625 /* If outer-code was a sign or zero extension, a cost
11626 of COSTS_N_INSNS (1) was already added in. This is
11627 why we are subtracting it back out. */
11628 if (outer_code == ZERO_EXTEND)
11630 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
11632 else if (outer_code == SIGN_EXTEND)
11634 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
11636 else if (float_mode_p)
11638 *total = sparc_costs->float_load;
11640 else
11642 *total = sparc_costs->int_load;
11645 return true;
11647 case PLUS:
11648 case MINUS:
11649 if (float_mode_p)
11650 *total = sparc_costs->float_plusminus;
11651 else
11652 *total = COSTS_N_INSNS (1);
11653 return false;
11655 case FMA:
11657 rtx sub;
11659 gcc_assert (float_mode_p);
11660 *total = sparc_costs->float_mul;
11662 sub = XEXP (x, 0);
11663 if (GET_CODE (sub) == NEG)
11664 sub = XEXP (sub, 0);
11665 *total += rtx_cost (sub, mode, FMA, 0, speed);
11667 sub = XEXP (x, 2);
11668 if (GET_CODE (sub) == NEG)
11669 sub = XEXP (sub, 0);
11670 *total += rtx_cost (sub, mode, FMA, 2, speed);
11671 return true;
11674 case MULT:
11675 if (float_mode_p)
11676 *total = sparc_costs->float_mul;
11677 else if (TARGET_ARCH32 && !TARGET_HARD_MUL)
11678 *total = COSTS_N_INSNS (25);
11679 else
11681 int bit_cost;
11683 bit_cost = 0;
11684 if (sparc_costs->int_mul_bit_factor)
11686 int nbits;
11688 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
11690 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
11691 for (nbits = 0; value != 0; value &= value - 1)
11692 nbits++;
11694 else
11695 nbits = 7;
11697 if (nbits < 3)
11698 nbits = 3;
11699 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
11700 bit_cost = COSTS_N_INSNS (bit_cost);
11703 if (mode == DImode || !TARGET_HARD_MUL)
11704 *total = sparc_costs->int_mulX + bit_cost;
11705 else
11706 *total = sparc_costs->int_mul + bit_cost;
11708 return false;
11710 case ASHIFT:
11711 case ASHIFTRT:
11712 case LSHIFTRT:
11713 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
11714 return false;
11716 case DIV:
11717 case UDIV:
11718 case MOD:
11719 case UMOD:
11720 if (float_mode_p)
11722 if (mode == DFmode)
11723 *total = sparc_costs->float_div_df;
11724 else
11725 *total = sparc_costs->float_div_sf;
11727 else
11729 if (mode == DImode)
11730 *total = sparc_costs->int_divX;
11731 else
11732 *total = sparc_costs->int_div;
11734 return false;
11736 case NEG:
11737 if (! float_mode_p)
11739 *total = COSTS_N_INSNS (1);
11740 return false;
11742 /* FALLTHRU */
11744 case ABS:
11745 case FLOAT:
11746 case UNSIGNED_FLOAT:
11747 case FIX:
11748 case UNSIGNED_FIX:
11749 case FLOAT_EXTEND:
11750 case FLOAT_TRUNCATE:
11751 *total = sparc_costs->float_move;
11752 return false;
11754 case SQRT:
11755 if (mode == DFmode)
11756 *total = sparc_costs->float_sqrt_df;
11757 else
11758 *total = sparc_costs->float_sqrt_sf;
11759 return false;
11761 case COMPARE:
11762 if (float_mode_p)
11763 *total = sparc_costs->float_cmp;
11764 else
11765 *total = COSTS_N_INSNS (1);
11766 return false;
11768 case IF_THEN_ELSE:
11769 if (float_mode_p)
11770 *total = sparc_costs->float_cmove;
11771 else
11772 *total = sparc_costs->int_cmove;
11773 return false;
11775 case IOR:
11776 /* Handle the NAND vector patterns. */
11777 if (sparc_vector_mode_supported_p (mode)
11778 && GET_CODE (XEXP (x, 0)) == NOT
11779 && GET_CODE (XEXP (x, 1)) == NOT)
11781 *total = COSTS_N_INSNS (1);
11782 return true;
11784 else
11785 return false;
11787 default:
11788 return false;
11792 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
11794 static inline bool
11795 general_or_i64_p (reg_class_t rclass)
11797 return (rclass == GENERAL_REGS || rclass == I64_REGS);
11800 /* Implement TARGET_REGISTER_MOVE_COST. */
11802 static int
11803 sparc_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
11804 reg_class_t from, reg_class_t to)
11806 bool need_memory = false;
11808 /* This helps postreload CSE to eliminate redundant comparisons. */
11809 if (from == NO_REGS || to == NO_REGS)
11810 return 100;
11812 if (from == FPCC_REGS || to == FPCC_REGS)
11813 need_memory = true;
11814 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
11815 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
11817 if (TARGET_VIS3)
11819 int size = GET_MODE_SIZE (mode);
11820 if (size == 8 || size == 4)
11822 if (! TARGET_ARCH32 || size == 4)
11823 return 4;
11824 else
11825 return 6;
11828 need_memory = true;
11831 if (need_memory)
11833 if (sparc_cpu == PROCESSOR_ULTRASPARC
11834 || sparc_cpu == PROCESSOR_ULTRASPARC3
11835 || sparc_cpu == PROCESSOR_NIAGARA
11836 || sparc_cpu == PROCESSOR_NIAGARA2
11837 || sparc_cpu == PROCESSOR_NIAGARA3
11838 || sparc_cpu == PROCESSOR_NIAGARA4
11839 || sparc_cpu == PROCESSOR_NIAGARA7
11840 || sparc_cpu == PROCESSOR_M8)
11841 return 12;
11843 return 6;
11846 return 2;
11849 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
11850 This is achieved by means of a manual dynamic stack space allocation in
11851 the current frame. We make the assumption that SEQ doesn't contain any
11852 function calls, with the possible exception of calls to the GOT helper. */
11854 static void
11855 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
11857 /* We must preserve the lowest 16 words for the register save area. */
11858 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
11859 /* We really need only 2 words of fresh stack space. */
11860 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
11862 rtx slot
11863 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
11864 SPARC_STACK_BIAS + offset));
11866 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
11867 emit_insn (gen_rtx_SET (slot, reg));
11868 if (reg2)
11869 emit_insn (gen_rtx_SET (adjust_address (slot, word_mode, UNITS_PER_WORD),
11870 reg2));
11871 emit_insn (seq);
11872 if (reg2)
11873 emit_insn (gen_rtx_SET (reg2,
11874 adjust_address (slot, word_mode, UNITS_PER_WORD)));
11875 emit_insn (gen_rtx_SET (reg, slot));
11876 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
11879 /* Output the assembler code for a thunk function. THUNK_DECL is the
11880 declaration for the thunk function itself, FUNCTION is the decl for
11881 the target function. DELTA is an immediate constant offset to be
11882 added to THIS. If VCALL_OFFSET is nonzero, the word at address
11883 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
11885 static void
11886 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11887 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11888 tree function)
11890 rtx this_rtx, funexp;
11891 rtx_insn *insn;
11892 unsigned int int_arg_first;
11894 reload_completed = 1;
11895 epilogue_completed = 1;
11897 emit_note (NOTE_INSN_PROLOGUE_END);
11899 if (TARGET_FLAT)
11901 sparc_leaf_function_p = 1;
11903 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
11905 else if (flag_delayed_branch)
11907 /* We will emit a regular sibcall below, so we need to instruct
11908 output_sibcall that we are in a leaf function. */
11909 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
11911 /* This will cause final.c to invoke leaf_renumber_regs so we
11912 must behave as if we were in a not-yet-leafified function. */
11913 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
11915 else
11917 /* We will emit the sibcall manually below, so we will need to
11918 manually spill non-leaf registers. */
11919 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
11921 /* We really are in a leaf function. */
11922 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
11925 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
11926 returns a structure, the structure return pointer is there instead. */
11927 if (TARGET_ARCH64
11928 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11929 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
11930 else
11931 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
11933 /* Add DELTA. When possible use a plain add, otherwise load it into
11934 a register first. */
11935 if (delta)
11937 rtx delta_rtx = GEN_INT (delta);
11939 if (! SPARC_SIMM13_P (delta))
11941 rtx scratch = gen_rtx_REG (Pmode, 1);
11942 emit_move_insn (scratch, delta_rtx);
11943 delta_rtx = scratch;
11946 /* THIS_RTX += DELTA. */
11947 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
11950 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
11951 if (vcall_offset)
11953 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
11954 rtx scratch = gen_rtx_REG (Pmode, 1);
11956 gcc_assert (vcall_offset < 0);
11958 /* SCRATCH = *THIS_RTX. */
11959 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
11961 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
11962 may not have any available scratch register at this point. */
11963 if (SPARC_SIMM13_P (vcall_offset))
11965 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
11966 else if (! fixed_regs[5]
11967 /* The below sequence is made up of at least 2 insns,
11968 while the default method may need only one. */
11969 && vcall_offset < -8192)
11971 rtx scratch2 = gen_rtx_REG (Pmode, 5);
11972 emit_move_insn (scratch2, vcall_offset_rtx);
11973 vcall_offset_rtx = scratch2;
11975 else
11977 rtx increment = GEN_INT (-4096);
11979 /* VCALL_OFFSET is a negative number whose typical range can be
11980 estimated as -32768..0 in 32-bit mode. In almost all cases
11981 it is therefore cheaper to emit multiple add insns than
11982 spilling and loading the constant into a register (at least
11983 6 insns). */
11984 while (! SPARC_SIMM13_P (vcall_offset))
11986 emit_insn (gen_add2_insn (scratch, increment));
11987 vcall_offset += 4096;
11989 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
11992 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
11993 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
11994 gen_rtx_PLUS (Pmode,
11995 scratch,
11996 vcall_offset_rtx)));
11998 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
11999 emit_insn (gen_add2_insn (this_rtx, scratch));
12002 /* Generate a tail call to the target function. */
12003 if (! TREE_USED (function))
12005 assemble_external (function);
12006 TREE_USED (function) = 1;
12008 funexp = XEXP (DECL_RTL (function), 0);
12010 if (flag_delayed_branch)
12012 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
12013 insn = emit_call_insn (gen_sibcall (funexp));
12014 SIBLING_CALL_P (insn) = 1;
12016 else
12018 /* The hoops we have to jump through in order to generate a sibcall
12019 without using delay slots... */
12020 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
12022 if (flag_pic)
12024 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
12025 start_sequence ();
12026 load_got_register (); /* clobbers %o7 */
12027 scratch = sparc_legitimize_pic_address (funexp, scratch);
12028 seq = get_insns ();
12029 end_sequence ();
12030 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
12032 else if (TARGET_ARCH32)
12034 emit_insn (gen_rtx_SET (scratch,
12035 gen_rtx_HIGH (SImode, funexp)));
12036 emit_insn (gen_rtx_SET (scratch,
12037 gen_rtx_LO_SUM (SImode, scratch, funexp)));
12039 else /* TARGET_ARCH64 */
12041 switch (sparc_cmodel)
12043 case CM_MEDLOW:
12044 case CM_MEDMID:
12045 /* The destination can serve as a temporary. */
12046 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
12047 break;
12049 case CM_MEDANY:
12050 case CM_EMBMEDANY:
12051 /* The destination cannot serve as a temporary. */
12052 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
12053 start_sequence ();
12054 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
12055 seq = get_insns ();
12056 end_sequence ();
12057 emit_and_preserve (seq, spill_reg, 0);
12058 break;
12060 default:
12061 gcc_unreachable ();
12065 emit_jump_insn (gen_indirect_jump (scratch));
12068 emit_barrier ();
12070 /* Run just enough of rest_of_compilation to get the insns emitted.
12071 There's not really enough bulk here to make other passes such as
12072 instruction scheduling worth while. Note that use_thunk calls
12073 assemble_start_function and assemble_end_function. */
12074 insn = get_insns ();
12075 shorten_branches (insn);
12076 final_start_function (insn, file, 1);
12077 final (insn, file, 1);
12078 final_end_function ();
12080 reload_completed = 0;
12081 epilogue_completed = 0;
12084 /* Return true if sparc_output_mi_thunk would be able to output the
12085 assembler code for the thunk function specified by the arguments
12086 it is passed, and false otherwise. */
12087 static bool
12088 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
12089 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
12090 HOST_WIDE_INT vcall_offset,
12091 const_tree function ATTRIBUTE_UNUSED)
12093 /* Bound the loop used in the default method above. */
12094 return (vcall_offset >= -32768 || ! fixed_regs[5]);
12097 /* How to allocate a 'struct machine_function'. */
12099 static struct machine_function *
12100 sparc_init_machine_status (void)
12102 return ggc_cleared_alloc<machine_function> ();
12105 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
12106 We need to emit DTP-relative relocations. */
12108 static void
12109 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
12111 switch (size)
12113 case 4:
12114 fputs ("\t.word\t%r_tls_dtpoff32(", file);
12115 break;
12116 case 8:
12117 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
12118 break;
12119 default:
12120 gcc_unreachable ();
12122 output_addr_const (file, x);
12123 fputs (")", file);
12126 /* Do whatever processing is required at the end of a file. */
12128 static void
12129 sparc_file_end (void)
12131 /* If we need to emit the special GOT helper function, do so now. */
12132 if (got_helper_rtx)
12134 const char *name = XSTR (got_helper_rtx, 0);
12135 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
12136 #ifdef DWARF2_UNWIND_INFO
12137 bool do_cfi;
12138 #endif
12140 if (USE_HIDDEN_LINKONCE)
12142 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
12143 get_identifier (name),
12144 build_function_type_list (void_type_node,
12145 NULL_TREE));
12146 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
12147 NULL_TREE, void_type_node);
12148 TREE_PUBLIC (decl) = 1;
12149 TREE_STATIC (decl) = 1;
12150 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
12151 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
12152 DECL_VISIBILITY_SPECIFIED (decl) = 1;
12153 resolve_unique_section (decl, 0, flag_function_sections);
12154 allocate_struct_function (decl, true);
12155 cfun->is_thunk = 1;
12156 current_function_decl = decl;
12157 init_varasm_status ();
12158 assemble_start_function (decl, name);
12160 else
12162 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
12163 switch_to_section (text_section);
12164 if (align > 0)
12165 ASM_OUTPUT_ALIGN (asm_out_file, align);
12166 ASM_OUTPUT_LABEL (asm_out_file, name);
12169 #ifdef DWARF2_UNWIND_INFO
12170 do_cfi = dwarf2out_do_cfi_asm ();
12171 if (do_cfi)
12172 fprintf (asm_out_file, "\t.cfi_startproc\n");
12173 #endif
12174 if (flag_delayed_branch)
12175 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
12176 reg_name, reg_name);
12177 else
12178 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
12179 reg_name, reg_name);
12180 #ifdef DWARF2_UNWIND_INFO
12181 if (do_cfi)
12182 fprintf (asm_out_file, "\t.cfi_endproc\n");
12183 #endif
12186 if (NEED_INDICATE_EXEC_STACK)
12187 file_end_indicate_exec_stack ();
12189 #ifdef TARGET_SOLARIS
12190 solaris_file_end ();
12191 #endif
12194 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12195 /* Implement TARGET_MANGLE_TYPE. */
12197 static const char *
12198 sparc_mangle_type (const_tree type)
12200 if (TARGET_ARCH32
12201 && TYPE_MAIN_VARIANT (type) == long_double_type_node
12202 && TARGET_LONG_DOUBLE_128)
12203 return "g";
12205 /* For all other types, use normal C++ mangling. */
12206 return NULL;
12208 #endif
12210 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
12211 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
12212 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
12214 void
12215 sparc_emit_membar_for_model (enum memmodel model,
12216 int load_store, int before_after)
12218 /* Bits for the MEMBAR mmask field. */
12219 const int LoadLoad = 1;
12220 const int StoreLoad = 2;
12221 const int LoadStore = 4;
12222 const int StoreStore = 8;
12224 int mm = 0, implied = 0;
12226 switch (sparc_memory_model)
12228 case SMM_SC:
12229 /* Sequential Consistency. All memory transactions are immediately
12230 visible in sequential execution order. No barriers needed. */
12231 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
12232 break;
12234 case SMM_TSO:
12235 /* Total Store Ordering: all memory transactions with store semantics
12236 are followed by an implied StoreStore. */
12237 implied |= StoreStore;
12239 /* If we're not looking for a raw barrer (before+after), then atomic
12240 operations get the benefit of being both load and store. */
12241 if (load_store == 3 && before_after == 1)
12242 implied |= StoreLoad;
12243 /* FALLTHRU */
12245 case SMM_PSO:
12246 /* Partial Store Ordering: all memory transactions with load semantics
12247 are followed by an implied LoadLoad | LoadStore. */
12248 implied |= LoadLoad | LoadStore;
12250 /* If we're not looking for a raw barrer (before+after), then atomic
12251 operations get the benefit of being both load and store. */
12252 if (load_store == 3 && before_after == 2)
12253 implied |= StoreLoad | StoreStore;
12254 /* FALLTHRU */
12256 case SMM_RMO:
12257 /* Relaxed Memory Ordering: no implicit bits. */
12258 break;
12260 default:
12261 gcc_unreachable ();
12264 if (before_after & 1)
12266 if (is_mm_release (model) || is_mm_acq_rel (model)
12267 || is_mm_seq_cst (model))
12269 if (load_store & 1)
12270 mm |= LoadLoad | StoreLoad;
12271 if (load_store & 2)
12272 mm |= LoadStore | StoreStore;
12275 if (before_after & 2)
12277 if (is_mm_acquire (model) || is_mm_acq_rel (model)
12278 || is_mm_seq_cst (model))
12280 if (load_store & 1)
12281 mm |= LoadLoad | LoadStore;
12282 if (load_store & 2)
12283 mm |= StoreLoad | StoreStore;
12287 /* Remove the bits implied by the system memory model. */
12288 mm &= ~implied;
12290 /* For raw barriers (before+after), always emit a barrier.
12291 This will become a compile-time barrier if needed. */
12292 if (mm || before_after == 3)
12293 emit_insn (gen_membar (GEN_INT (mm)));
12296 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
12297 compare and swap on the word containing the byte or half-word. */
12299 static void
12300 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
12301 rtx oldval, rtx newval)
12303 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
12304 rtx addr = gen_reg_rtx (Pmode);
12305 rtx off = gen_reg_rtx (SImode);
12306 rtx oldv = gen_reg_rtx (SImode);
12307 rtx newv = gen_reg_rtx (SImode);
12308 rtx oldvalue = gen_reg_rtx (SImode);
12309 rtx newvalue = gen_reg_rtx (SImode);
12310 rtx res = gen_reg_rtx (SImode);
12311 rtx resv = gen_reg_rtx (SImode);
12312 rtx memsi, val, mask, cc;
12314 emit_insn (gen_rtx_SET (addr, gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
12316 if (Pmode != SImode)
12317 addr1 = gen_lowpart (SImode, addr1);
12318 emit_insn (gen_rtx_SET (off, gen_rtx_AND (SImode, addr1, GEN_INT (3))));
12320 memsi = gen_rtx_MEM (SImode, addr);
12321 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
12322 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
12324 val = copy_to_reg (memsi);
12326 emit_insn (gen_rtx_SET (off,
12327 gen_rtx_XOR (SImode, off,
12328 GEN_INT (GET_MODE (mem) == QImode
12329 ? 3 : 2))));
12331 emit_insn (gen_rtx_SET (off, gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
12333 if (GET_MODE (mem) == QImode)
12334 mask = force_reg (SImode, GEN_INT (0xff));
12335 else
12336 mask = force_reg (SImode, GEN_INT (0xffff));
12338 emit_insn (gen_rtx_SET (mask, gen_rtx_ASHIFT (SImode, mask, off)));
12340 emit_insn (gen_rtx_SET (val,
12341 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12342 val)));
12344 oldval = gen_lowpart (SImode, oldval);
12345 emit_insn (gen_rtx_SET (oldv, gen_rtx_ASHIFT (SImode, oldval, off)));
12347 newval = gen_lowpart_common (SImode, newval);
12348 emit_insn (gen_rtx_SET (newv, gen_rtx_ASHIFT (SImode, newval, off)));
12350 emit_insn (gen_rtx_SET (oldv, gen_rtx_AND (SImode, oldv, mask)));
12352 emit_insn (gen_rtx_SET (newv, gen_rtx_AND (SImode, newv, mask)));
12354 rtx_code_label *end_label = gen_label_rtx ();
12355 rtx_code_label *loop_label = gen_label_rtx ();
12356 emit_label (loop_label);
12358 emit_insn (gen_rtx_SET (oldvalue, gen_rtx_IOR (SImode, oldv, val)));
12360 emit_insn (gen_rtx_SET (newvalue, gen_rtx_IOR (SImode, newv, val)));
12362 emit_move_insn (bool_result, const1_rtx);
12364 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
12366 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
12368 emit_insn (gen_rtx_SET (resv,
12369 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
12370 res)));
12372 emit_move_insn (bool_result, const0_rtx);
12374 cc = gen_compare_reg_1 (NE, resv, val);
12375 emit_insn (gen_rtx_SET (val, resv));
12377 /* Use cbranchcc4 to separate the compare and branch! */
12378 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
12379 cc, const0_rtx, loop_label));
12381 emit_label (end_label);
12383 emit_insn (gen_rtx_SET (res, gen_rtx_AND (SImode, res, mask)));
12385 emit_insn (gen_rtx_SET (res, gen_rtx_LSHIFTRT (SImode, res, off)));
12387 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
12390 /* Expand code to perform a compare-and-swap. */
12392 void
12393 sparc_expand_compare_and_swap (rtx operands[])
12395 rtx bval, retval, mem, oldval, newval;
12396 machine_mode mode;
12397 enum memmodel model;
12399 bval = operands[0];
12400 retval = operands[1];
12401 mem = operands[2];
12402 oldval = operands[3];
12403 newval = operands[4];
12404 model = (enum memmodel) INTVAL (operands[6]);
12405 mode = GET_MODE (mem);
12407 sparc_emit_membar_for_model (model, 3, 1);
12409 if (reg_overlap_mentioned_p (retval, oldval))
12410 oldval = copy_to_reg (oldval);
12412 if (mode == QImode || mode == HImode)
12413 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
12414 else
12416 rtx (*gen) (rtx, rtx, rtx, rtx);
12417 rtx x;
12419 if (mode == SImode)
12420 gen = gen_atomic_compare_and_swapsi_1;
12421 else
12422 gen = gen_atomic_compare_and_swapdi_1;
12423 emit_insn (gen (retval, mem, oldval, newval));
12425 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
12426 if (x != bval)
12427 convert_move (bval, x, 1);
12430 sparc_emit_membar_for_model (model, 3, 2);
12433 void
12434 sparc_expand_vec_perm_bmask (machine_mode vmode, rtx sel)
12436 rtx t_1, t_2, t_3;
12438 sel = gen_lowpart (DImode, sel);
12439 switch (vmode)
12441 case V2SImode:
12442 /* inp = xxxxxxxAxxxxxxxB */
12443 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12444 NULL_RTX, 1, OPTAB_DIRECT);
12445 /* t_1 = ....xxxxxxxAxxx. */
12446 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12447 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
12448 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12449 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
12450 /* sel = .......B */
12451 /* t_1 = ...A.... */
12452 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12453 /* sel = ...A...B */
12454 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
12455 /* sel = AAAABBBB * 4 */
12456 t_1 = force_reg (SImode, GEN_INT (0x01230123));
12457 /* sel = { A*4, A*4+1, A*4+2, ... } */
12458 break;
12460 case V4HImode:
12461 /* inp = xxxAxxxBxxxCxxxD */
12462 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12463 NULL_RTX, 1, OPTAB_DIRECT);
12464 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12465 NULL_RTX, 1, OPTAB_DIRECT);
12466 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
12467 NULL_RTX, 1, OPTAB_DIRECT);
12468 /* t_1 = ..xxxAxxxBxxxCxx */
12469 /* t_2 = ....xxxAxxxBxxxC */
12470 /* t_3 = ......xxxAxxxBxx */
12471 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
12472 GEN_INT (0x07),
12473 NULL_RTX, 1, OPTAB_DIRECT);
12474 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
12475 GEN_INT (0x0700),
12476 NULL_RTX, 1, OPTAB_DIRECT);
12477 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
12478 GEN_INT (0x070000),
12479 NULL_RTX, 1, OPTAB_DIRECT);
12480 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
12481 GEN_INT (0x07000000),
12482 NULL_RTX, 1, OPTAB_DIRECT);
12483 /* sel = .......D */
12484 /* t_1 = .....C.. */
12485 /* t_2 = ...B.... */
12486 /* t_3 = .A...... */
12487 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
12488 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
12489 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
12490 /* sel = .A.B.C.D */
12491 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
12492 /* sel = AABBCCDD * 2 */
12493 t_1 = force_reg (SImode, GEN_INT (0x01010101));
12494 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
12495 break;
12497 case V8QImode:
12498 /* input = xAxBxCxDxExFxGxH */
12499 sel = expand_simple_binop (DImode, AND, sel,
12500 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
12501 | 0x0f0f0f0f),
12502 NULL_RTX, 1, OPTAB_DIRECT);
12503 /* sel = .A.B.C.D.E.F.G.H */
12504 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
12505 NULL_RTX, 1, OPTAB_DIRECT);
12506 /* t_1 = ..A.B.C.D.E.F.G. */
12507 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12508 NULL_RTX, 1, OPTAB_DIRECT);
12509 /* sel = .AABBCCDDEEFFGGH */
12510 sel = expand_simple_binop (DImode, AND, sel,
12511 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
12512 | 0xff00ff),
12513 NULL_RTX, 1, OPTAB_DIRECT);
12514 /* sel = ..AB..CD..EF..GH */
12515 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
12516 NULL_RTX, 1, OPTAB_DIRECT);
12517 /* t_1 = ....AB..CD..EF.. */
12518 sel = expand_simple_binop (DImode, IOR, sel, t_1,
12519 NULL_RTX, 1, OPTAB_DIRECT);
12520 /* sel = ..ABABCDCDEFEFGH */
12521 sel = expand_simple_binop (DImode, AND, sel,
12522 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
12523 NULL_RTX, 1, OPTAB_DIRECT);
12524 /* sel = ....ABCD....EFGH */
12525 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
12526 NULL_RTX, 1, OPTAB_DIRECT);
12527 /* t_1 = ........ABCD.... */
12528 sel = gen_lowpart (SImode, sel);
12529 t_1 = gen_lowpart (SImode, t_1);
12530 break;
12532 default:
12533 gcc_unreachable ();
12536 /* Always perform the final addition/merge within the bmask insn. */
12537 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
12540 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
12542 static bool
12543 sparc_frame_pointer_required (void)
12545 /* If the stack pointer is dynamically modified in the function, it cannot
12546 serve as the frame pointer. */
12547 if (cfun->calls_alloca)
12548 return true;
12550 /* If the function receives nonlocal gotos, it needs to save the frame
12551 pointer in the nonlocal_goto_save_area object. */
12552 if (cfun->has_nonlocal_label)
12553 return true;
12555 /* In flat mode, that's it. */
12556 if (TARGET_FLAT)
12557 return false;
12559 /* Otherwise, the frame pointer is required if the function isn't leaf, but
12560 we cannot use sparc_leaf_function_p since it hasn't been computed yet. */
12561 return !(optimize > 0 && crtl->is_leaf && only_leaf_regs_used ());
12564 /* The way this is structured, we can't eliminate SFP in favor of SP
12565 if the frame pointer is required: we want to use the SFP->HFP elimination
12566 in that case. But the test in update_eliminables doesn't know we are
12567 assuming below that we only do the former elimination. */
12569 static bool
12570 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
12572 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
12575 /* Return the hard frame pointer directly to bypass the stack bias. */
12577 static rtx
12578 sparc_builtin_setjmp_frame_value (void)
12580 return hard_frame_pointer_rtx;
12583 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
12584 they won't be allocated. */
12586 static void
12587 sparc_conditional_register_usage (void)
12589 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
12591 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12592 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12594 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
12595 /* then honor it. */
12596 if (TARGET_ARCH32 && fixed_regs[5])
12597 fixed_regs[5] = 1;
12598 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
12599 fixed_regs[5] = 0;
12600 if (! TARGET_V9)
12602 int regno;
12603 for (regno = SPARC_FIRST_V9_FP_REG;
12604 regno <= SPARC_LAST_V9_FP_REG;
12605 regno++)
12606 fixed_regs[regno] = 1;
12607 /* %fcc0 is used by v8 and v9. */
12608 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
12609 regno <= SPARC_LAST_V9_FCC_REG;
12610 regno++)
12611 fixed_regs[regno] = 1;
12613 if (! TARGET_FPU)
12615 int regno;
12616 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
12617 fixed_regs[regno] = 1;
12619 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
12620 /* then honor it. Likewise with g3 and g4. */
12621 if (fixed_regs[2] == 2)
12622 fixed_regs[2] = ! TARGET_APP_REGS;
12623 if (fixed_regs[3] == 2)
12624 fixed_regs[3] = ! TARGET_APP_REGS;
12625 if (TARGET_ARCH32 && fixed_regs[4] == 2)
12626 fixed_regs[4] = ! TARGET_APP_REGS;
12627 else if (TARGET_CM_EMBMEDANY)
12628 fixed_regs[4] = 1;
12629 else if (fixed_regs[4] == 2)
12630 fixed_regs[4] = 0;
12631 if (TARGET_FLAT)
12633 int regno;
12634 /* Disable leaf functions. */
12635 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
12636 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12637 leaf_reg_remap [regno] = regno;
12639 if (TARGET_VIS)
12640 global_regs[SPARC_GSR_REG] = 1;
12643 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
12645 - We can't load constants into FP registers.
12646 - We can't load FP constants into integer registers when soft-float,
12647 because there is no soft-float pattern with a r/F constraint.
12648 - We can't load FP constants into integer registers for TFmode unless
12649 it is 0.0L, because there is no movtf pattern with a r/F constraint.
12650 - Try and reload integer constants (symbolic or otherwise) back into
12651 registers directly, rather than having them dumped to memory. */
12653 static reg_class_t
12654 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
12656 machine_mode mode = GET_MODE (x);
12657 if (CONSTANT_P (x))
12659 if (FP_REG_CLASS_P (rclass)
12660 || rclass == GENERAL_OR_FP_REGS
12661 || rclass == GENERAL_OR_EXTRA_FP_REGS
12662 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
12663 || (mode == TFmode && ! const_zero_operand (x, mode)))
12664 return NO_REGS;
12666 if (GET_MODE_CLASS (mode) == MODE_INT)
12667 return GENERAL_REGS;
12669 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12671 if (! FP_REG_CLASS_P (rclass)
12672 || !(const_zero_operand (x, mode)
12673 || const_all_ones_operand (x, mode)))
12674 return NO_REGS;
12678 if (TARGET_VIS3
12679 && ! TARGET_ARCH64
12680 && (rclass == EXTRA_FP_REGS
12681 || rclass == GENERAL_OR_EXTRA_FP_REGS))
12683 int regno = true_regnum (x);
12685 if (SPARC_INT_REG_P (regno))
12686 return (rclass == EXTRA_FP_REGS
12687 ? FP_REGS : GENERAL_OR_FP_REGS);
12690 return rclass;
12693 /* Return true if we use LRA instead of reload pass. */
12695 static bool
12696 sparc_lra_p (void)
12698 return TARGET_LRA;
12701 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
12702 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
12704 const char *
12705 output_v8plus_mult (rtx_insn *insn, rtx *operands, const char *opcode)
12707 char mulstr[32];
12709 gcc_assert (! TARGET_ARCH64);
12711 if (sparc_check_64 (operands[1], insn) <= 0)
12712 output_asm_insn ("srl\t%L1, 0, %L1", operands);
12713 if (which_alternative == 1)
12714 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
12715 if (GET_CODE (operands[2]) == CONST_INT)
12717 if (which_alternative == 1)
12719 output_asm_insn ("or\t%L1, %H1, %H1", operands);
12720 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
12721 output_asm_insn (mulstr, operands);
12722 return "srlx\t%L0, 32, %H0";
12724 else
12726 output_asm_insn ("sllx\t%H1, 32, %3", operands);
12727 output_asm_insn ("or\t%L1, %3, %3", operands);
12728 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
12729 output_asm_insn (mulstr, operands);
12730 output_asm_insn ("srlx\t%3, 32, %H0", operands);
12731 return "mov\t%3, %L0";
12734 else if (rtx_equal_p (operands[1], operands[2]))
12736 if (which_alternative == 1)
12738 output_asm_insn ("or\t%L1, %H1, %H1", operands);
12739 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
12740 output_asm_insn (mulstr, operands);
12741 return "srlx\t%L0, 32, %H0";
12743 else
12745 output_asm_insn ("sllx\t%H1, 32, %3", operands);
12746 output_asm_insn ("or\t%L1, %3, %3", operands);
12747 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
12748 output_asm_insn (mulstr, operands);
12749 output_asm_insn ("srlx\t%3, 32, %H0", operands);
12750 return "mov\t%3, %L0";
12753 if (sparc_check_64 (operands[2], insn) <= 0)
12754 output_asm_insn ("srl\t%L2, 0, %L2", operands);
12755 if (which_alternative == 1)
12757 output_asm_insn ("or\t%L1, %H1, %H1", operands);
12758 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
12759 output_asm_insn ("or\t%L2, %L1, %L1", operands);
12760 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
12761 output_asm_insn (mulstr, operands);
12762 return "srlx\t%L0, 32, %H0";
12764 else
12766 output_asm_insn ("sllx\t%H1, 32, %3", operands);
12767 output_asm_insn ("sllx\t%H2, 32, %4", operands);
12768 output_asm_insn ("or\t%L1, %3, %3", operands);
12769 output_asm_insn ("or\t%L2, %4, %4", operands);
12770 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
12771 output_asm_insn (mulstr, operands);
12772 output_asm_insn ("srlx\t%3, 32, %H0", operands);
12773 return "mov\t%3, %L0";
12777 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12778 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
12779 and INNER_MODE are the modes describing TARGET. */
12781 static void
12782 vector_init_bshuffle (rtx target, rtx elt, machine_mode mode,
12783 machine_mode inner_mode)
12785 rtx t1, final_insn, sel;
12786 int bmask;
12788 t1 = gen_reg_rtx (mode);
12790 elt = convert_modes (SImode, inner_mode, elt, true);
12791 emit_move_insn (gen_lowpart(SImode, t1), elt);
12793 switch (mode)
12795 case V2SImode:
12796 final_insn = gen_bshufflev2si_vis (target, t1, t1);
12797 bmask = 0x45674567;
12798 break;
12799 case V4HImode:
12800 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
12801 bmask = 0x67676767;
12802 break;
12803 case V8QImode:
12804 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
12805 bmask = 0x77777777;
12806 break;
12807 default:
12808 gcc_unreachable ();
12811 sel = force_reg (SImode, GEN_INT (bmask));
12812 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
12813 emit_insn (final_insn);
12816 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12817 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
12819 static void
12820 vector_init_fpmerge (rtx target, rtx elt)
12822 rtx t1, t2, t2_low, t3, t3_low;
12824 t1 = gen_reg_rtx (V4QImode);
12825 elt = convert_modes (SImode, QImode, elt, true);
12826 emit_move_insn (gen_lowpart (SImode, t1), elt);
12828 t2 = gen_reg_rtx (V8QImode);
12829 t2_low = gen_lowpart (V4QImode, t2);
12830 emit_insn (gen_fpmerge_vis (t2, t1, t1));
12832 t3 = gen_reg_rtx (V8QImode);
12833 t3_low = gen_lowpart (V4QImode, t3);
12834 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
12836 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
12839 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12840 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
12842 static void
12843 vector_init_faligndata (rtx target, rtx elt)
12845 rtx t1 = gen_reg_rtx (V4HImode);
12846 int i;
12848 elt = convert_modes (SImode, HImode, elt, true);
12849 emit_move_insn (gen_lowpart (SImode, t1), elt);
12851 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
12852 force_reg (SImode, GEN_INT (6)),
12853 const0_rtx));
12855 for (i = 0; i < 4; i++)
12856 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
12859 /* Emit code to initialize TARGET to values for individual fields VALS. */
12861 void
12862 sparc_expand_vector_init (rtx target, rtx vals)
12864 const machine_mode mode = GET_MODE (target);
12865 const machine_mode inner_mode = GET_MODE_INNER (mode);
12866 const int n_elts = GET_MODE_NUNITS (mode);
12867 int i, n_var = 0;
12868 bool all_same = true;
12869 rtx mem;
12871 for (i = 0; i < n_elts; i++)
12873 rtx x = XVECEXP (vals, 0, i);
12874 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
12875 n_var++;
12877 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
12878 all_same = false;
12881 if (n_var == 0)
12883 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
12884 return;
12887 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
12889 if (GET_MODE_SIZE (inner_mode) == 4)
12891 emit_move_insn (gen_lowpart (SImode, target),
12892 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
12893 return;
12895 else if (GET_MODE_SIZE (inner_mode) == 8)
12897 emit_move_insn (gen_lowpart (DImode, target),
12898 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
12899 return;
12902 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
12903 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
12905 emit_move_insn (gen_highpart (word_mode, target),
12906 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
12907 emit_move_insn (gen_lowpart (word_mode, target),
12908 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
12909 return;
12912 if (all_same && GET_MODE_SIZE (mode) == 8)
12914 if (TARGET_VIS2)
12916 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
12917 return;
12919 if (mode == V8QImode)
12921 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
12922 return;
12924 if (mode == V4HImode)
12926 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
12927 return;
12931 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
12932 for (i = 0; i < n_elts; i++)
12933 emit_move_insn (adjust_address_nv (mem, inner_mode,
12934 i * GET_MODE_SIZE (inner_mode)),
12935 XVECEXP (vals, 0, i));
12936 emit_move_insn (target, mem);
12939 /* Implement TARGET_SECONDARY_RELOAD. */
12941 static reg_class_t
12942 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
12943 machine_mode mode, secondary_reload_info *sri)
12945 enum reg_class rclass = (enum reg_class) rclass_i;
12947 sri->icode = CODE_FOR_nothing;
12948 sri->extra_cost = 0;
12950 /* We need a temporary when loading/storing a HImode/QImode value
12951 between memory and the FPU registers. This can happen when combine puts
12952 a paradoxical subreg in a float/fix conversion insn. */
12953 if (FP_REG_CLASS_P (rclass)
12954 && (mode == HImode || mode == QImode)
12955 && (GET_CODE (x) == MEM
12956 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
12957 && true_regnum (x) == -1)))
12958 return GENERAL_REGS;
12960 /* On 32-bit we need a temporary when loading/storing a DFmode value
12961 between unaligned memory and the upper FPU registers. */
12962 if (TARGET_ARCH32
12963 && rclass == EXTRA_FP_REGS
12964 && mode == DFmode
12965 && GET_CODE (x) == MEM
12966 && ! mem_min_alignment (x, 8))
12967 return FP_REGS;
12969 if (((TARGET_CM_MEDANY
12970 && symbolic_operand (x, mode))
12971 || (TARGET_CM_EMBMEDANY
12972 && text_segment_operand (x, mode)))
12973 && ! flag_pic)
12975 if (in_p)
12976 sri->icode = direct_optab_handler (reload_in_optab, mode);
12977 else
12978 sri->icode = direct_optab_handler (reload_out_optab, mode);
12979 return NO_REGS;
12982 if (TARGET_VIS3 && TARGET_ARCH32)
12984 int regno = true_regnum (x);
12986 /* When using VIS3 fp<-->int register moves, on 32-bit we have
12987 to move 8-byte values in 4-byte pieces. This only works via
12988 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
12989 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
12990 an FP_REGS intermediate move. */
12991 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
12992 || ((general_or_i64_p (rclass)
12993 || rclass == GENERAL_OR_FP_REGS)
12994 && SPARC_FP_REG_P (regno)))
12996 sri->extra_cost = 2;
12997 return FP_REGS;
13001 return NO_REGS;
13004 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
13005 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
13007 bool
13008 sparc_expand_conditional_move (machine_mode mode, rtx *operands)
13010 enum rtx_code rc = GET_CODE (operands[1]);
13011 machine_mode cmp_mode;
13012 rtx cc_reg, dst, cmp;
13014 cmp = operands[1];
13015 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
13016 return false;
13018 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
13019 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
13021 cmp_mode = GET_MODE (XEXP (cmp, 0));
13022 rc = GET_CODE (cmp);
13024 dst = operands[0];
13025 if (! rtx_equal_p (operands[2], dst)
13026 && ! rtx_equal_p (operands[3], dst))
13028 if (reg_overlap_mentioned_p (dst, cmp))
13029 dst = gen_reg_rtx (mode);
13031 emit_move_insn (dst, operands[3]);
13033 else if (operands[2] == dst)
13035 operands[2] = operands[3];
13037 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
13038 rc = reverse_condition_maybe_unordered (rc);
13039 else
13040 rc = reverse_condition (rc);
13043 if (XEXP (cmp, 1) == const0_rtx
13044 && GET_CODE (XEXP (cmp, 0)) == REG
13045 && cmp_mode == DImode
13046 && v9_regcmp_p (rc))
13047 cc_reg = XEXP (cmp, 0);
13048 else
13049 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
13051 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
13053 emit_insn (gen_rtx_SET (dst,
13054 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
13056 if (dst != operands[0])
13057 emit_move_insn (operands[0], dst);
13059 return true;
13062 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
13063 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
13064 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
13065 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
13066 code to be used for the condition mask. */
13068 void
13069 sparc_expand_vcond (machine_mode mode, rtx *operands, int ccode, int fcode)
13071 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
13072 enum rtx_code code = GET_CODE (operands[3]);
13074 mask = gen_reg_rtx (Pmode);
13075 cop0 = operands[4];
13076 cop1 = operands[5];
13077 if (code == LT || code == GE)
13079 rtx t;
13081 code = swap_condition (code);
13082 t = cop0; cop0 = cop1; cop1 = t;
13085 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
13087 fcmp = gen_rtx_UNSPEC (Pmode,
13088 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
13089 fcode);
13091 cmask = gen_rtx_UNSPEC (DImode,
13092 gen_rtvec (2, mask, gsr),
13093 ccode);
13095 bshuf = gen_rtx_UNSPEC (mode,
13096 gen_rtvec (3, operands[1], operands[2], gsr),
13097 UNSPEC_BSHUFFLE);
13099 emit_insn (gen_rtx_SET (mask, fcmp));
13100 emit_insn (gen_rtx_SET (gsr, cmask));
13102 emit_insn (gen_rtx_SET (operands[0], bshuf));
13105 /* On sparc, any mode which naturally allocates into the float
13106 registers should return 4 here. */
13108 unsigned int
13109 sparc_regmode_natural_size (machine_mode mode)
13111 int size = UNITS_PER_WORD;
13113 if (TARGET_ARCH64)
13115 enum mode_class mclass = GET_MODE_CLASS (mode);
13117 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
13118 size = 4;
13121 return size;
13124 /* Return TRUE if it is a good idea to tie two pseudo registers
13125 when one has mode MODE1 and one has mode MODE2.
13126 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
13127 for any hard reg, then this must be FALSE for correct output.
13129 For V9 we have to deal with the fact that only the lower 32 floating
13130 point registers are 32-bit addressable. */
13132 bool
13133 sparc_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13135 enum mode_class mclass1, mclass2;
13136 unsigned short size1, size2;
13138 if (mode1 == mode2)
13139 return true;
13141 mclass1 = GET_MODE_CLASS (mode1);
13142 mclass2 = GET_MODE_CLASS (mode2);
13143 if (mclass1 != mclass2)
13144 return false;
13146 if (! TARGET_V9)
13147 return true;
13149 /* Classes are the same and we are V9 so we have to deal with upper
13150 vs. lower floating point registers. If one of the modes is a
13151 4-byte mode, and the other is not, we have to mark them as not
13152 tieable because only the lower 32 floating point register are
13153 addressable 32-bits at a time.
13155 We can't just test explicitly for SFmode, otherwise we won't
13156 cover the vector mode cases properly. */
13158 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
13159 return true;
13161 size1 = GET_MODE_SIZE (mode1);
13162 size2 = GET_MODE_SIZE (mode2);
13163 if ((size1 > 4 && size2 == 4)
13164 || (size2 > 4 && size1 == 4))
13165 return false;
13167 return true;
13170 /* Implement TARGET_CSTORE_MODE. */
13172 static machine_mode
13173 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
13175 return (TARGET_ARCH64 ? DImode : SImode);
13178 /* Return the compound expression made of T1 and T2. */
13180 static inline tree
13181 compound_expr (tree t1, tree t2)
13183 return build2 (COMPOUND_EXPR, void_type_node, t1, t2);
13186 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13188 static void
13189 sparc_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13191 if (!TARGET_FPU)
13192 return;
13194 const unsigned HOST_WIDE_INT accrued_exception_mask = 0x1f << 5;
13195 const unsigned HOST_WIDE_INT trap_enable_mask = 0x1f << 23;
13197 /* We generate the equivalent of feholdexcept (&fenv_var):
13199 unsigned int fenv_var;
13200 __builtin_store_fsr (&fenv_var);
13202 unsigned int tmp1_var;
13203 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
13205 __builtin_load_fsr (&tmp1_var); */
13207 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
13208 TREE_ADDRESSABLE (fenv_var) = 1;
13209 tree fenv_addr = build_fold_addr_expr (fenv_var);
13210 tree stfsr = sparc_builtins[SPARC_BUILTIN_STFSR];
13211 tree hold_stfsr
13212 = build4 (TARGET_EXPR, unsigned_type_node, fenv_var,
13213 build_call_expr (stfsr, 1, fenv_addr), NULL_TREE, NULL_TREE);
13215 tree tmp1_var = create_tmp_var_raw (unsigned_type_node);
13216 TREE_ADDRESSABLE (tmp1_var) = 1;
13217 tree masked_fenv_var
13218 = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13219 build_int_cst (unsigned_type_node,
13220 ~(accrued_exception_mask | trap_enable_mask)));
13221 tree hold_mask
13222 = build4 (TARGET_EXPR, unsigned_type_node, tmp1_var, masked_fenv_var,
13223 NULL_TREE, NULL_TREE);
13225 tree tmp1_addr = build_fold_addr_expr (tmp1_var);
13226 tree ldfsr = sparc_builtins[SPARC_BUILTIN_LDFSR];
13227 tree hold_ldfsr = build_call_expr (ldfsr, 1, tmp1_addr);
13229 *hold = compound_expr (compound_expr (hold_stfsr, hold_mask), hold_ldfsr);
13231 /* We reload the value of tmp1_var to clear the exceptions:
13233 __builtin_load_fsr (&tmp1_var); */
13235 *clear = build_call_expr (ldfsr, 1, tmp1_addr);
13237 /* We generate the equivalent of feupdateenv (&fenv_var):
13239 unsigned int tmp2_var;
13240 __builtin_store_fsr (&tmp2_var);
13242 __builtin_load_fsr (&fenv_var);
13244 if (SPARC_LOW_FE_EXCEPT_VALUES)
13245 tmp2_var >>= 5;
13246 __atomic_feraiseexcept ((int) tmp2_var); */
13248 tree tmp2_var = create_tmp_var_raw (unsigned_type_node);
13249 TREE_ADDRESSABLE (tmp2_var) = 1;
13250 tree tmp2_addr = build_fold_addr_expr (tmp2_var);
13251 tree update_stfsr
13252 = build4 (TARGET_EXPR, unsigned_type_node, tmp2_var,
13253 build_call_expr (stfsr, 1, tmp2_addr), NULL_TREE, NULL_TREE);
13255 tree update_ldfsr = build_call_expr (ldfsr, 1, fenv_addr);
13257 tree atomic_feraiseexcept
13258 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13259 tree update_call
13260 = build_call_expr (atomic_feraiseexcept, 1,
13261 fold_convert (integer_type_node, tmp2_var));
13263 if (SPARC_LOW_FE_EXCEPT_VALUES)
13265 tree shifted_tmp2_var
13266 = build2 (RSHIFT_EXPR, unsigned_type_node, tmp2_var,
13267 build_int_cst (unsigned_type_node, 5));
13268 tree update_shift
13269 = build2 (MODIFY_EXPR, void_type_node, tmp2_var, shifted_tmp2_var);
13270 update_call = compound_expr (update_shift, update_call);
13273 *update
13274 = compound_expr (compound_expr (update_stfsr, update_ldfsr), update_call);
13277 #include "gt-sparc.h"