1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
28 #include "stringpool.h"
29 #include "stor-layout.h"
34 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "insn-codes.h"
37 #include "conditions.h"
39 #include "insn-attr.h"
46 #include "diagnostic-core.h"
51 #include "target-def.h"
52 #include "common/common-target.h"
53 #include "pointer-set.h"
54 #include "hash-table.h"
56 #include "basic-block.h"
57 #include "tree-ssa-alias.h"
58 #include "internal-fn.h"
59 #include "gimple-fold.h"
61 #include "gimple-expr.h"
65 #include "langhooks.h"
70 #include "tree-pass.h"
75 struct processor_costs
{
79 /* Integer signed load */
82 /* Integer zeroed load */
88 /* fmov, fneg, fabs */
92 const int float_plusminus
;
98 const int float_cmove
;
104 const int float_div_sf
;
107 const int float_div_df
;
110 const int float_sqrt_sf
;
113 const int float_sqrt_df
;
121 /* integer multiply cost for each bit set past the most
122 significant 3, so the formula for multiply cost becomes:
125 highest_bit = highest_clear_bit(rs1);
127 highest_bit = highest_set_bit(rs1);
130 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
132 A value of zero indicates that the multiply costs is fixed,
134 const int int_mul_bit_factor
;
145 /* penalty for shifts, due to scheduling rules etc. */
146 const int shift_penalty
;
150 struct processor_costs cypress_costs
= {
151 COSTS_N_INSNS (2), /* int load */
152 COSTS_N_INSNS (2), /* int signed load */
153 COSTS_N_INSNS (2), /* int zeroed load */
154 COSTS_N_INSNS (2), /* float load */
155 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
156 COSTS_N_INSNS (5), /* fadd, fsub */
157 COSTS_N_INSNS (1), /* fcmp */
158 COSTS_N_INSNS (1), /* fmov, fmovr */
159 COSTS_N_INSNS (7), /* fmul */
160 COSTS_N_INSNS (37), /* fdivs */
161 COSTS_N_INSNS (37), /* fdivd */
162 COSTS_N_INSNS (63), /* fsqrts */
163 COSTS_N_INSNS (63), /* fsqrtd */
164 COSTS_N_INSNS (1), /* imul */
165 COSTS_N_INSNS (1), /* imulX */
166 0, /* imul bit factor */
167 COSTS_N_INSNS (1), /* idiv */
168 COSTS_N_INSNS (1), /* idivX */
169 COSTS_N_INSNS (1), /* movcc/movr */
170 0, /* shift penalty */
174 struct processor_costs supersparc_costs
= {
175 COSTS_N_INSNS (1), /* int load */
176 COSTS_N_INSNS (1), /* int signed load */
177 COSTS_N_INSNS (1), /* int zeroed load */
178 COSTS_N_INSNS (0), /* float load */
179 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
180 COSTS_N_INSNS (3), /* fadd, fsub */
181 COSTS_N_INSNS (3), /* fcmp */
182 COSTS_N_INSNS (1), /* fmov, fmovr */
183 COSTS_N_INSNS (3), /* fmul */
184 COSTS_N_INSNS (6), /* fdivs */
185 COSTS_N_INSNS (9), /* fdivd */
186 COSTS_N_INSNS (12), /* fsqrts */
187 COSTS_N_INSNS (12), /* fsqrtd */
188 COSTS_N_INSNS (4), /* imul */
189 COSTS_N_INSNS (4), /* imulX */
190 0, /* imul bit factor */
191 COSTS_N_INSNS (4), /* idiv */
192 COSTS_N_INSNS (4), /* idivX */
193 COSTS_N_INSNS (1), /* movcc/movr */
194 1, /* shift penalty */
198 struct processor_costs hypersparc_costs
= {
199 COSTS_N_INSNS (1), /* int load */
200 COSTS_N_INSNS (1), /* int signed load */
201 COSTS_N_INSNS (1), /* int zeroed load */
202 COSTS_N_INSNS (1), /* float load */
203 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
204 COSTS_N_INSNS (1), /* fadd, fsub */
205 COSTS_N_INSNS (1), /* fcmp */
206 COSTS_N_INSNS (1), /* fmov, fmovr */
207 COSTS_N_INSNS (1), /* fmul */
208 COSTS_N_INSNS (8), /* fdivs */
209 COSTS_N_INSNS (12), /* fdivd */
210 COSTS_N_INSNS (17), /* fsqrts */
211 COSTS_N_INSNS (17), /* fsqrtd */
212 COSTS_N_INSNS (17), /* imul */
213 COSTS_N_INSNS (17), /* imulX */
214 0, /* imul bit factor */
215 COSTS_N_INSNS (17), /* idiv */
216 COSTS_N_INSNS (17), /* idivX */
217 COSTS_N_INSNS (1), /* movcc/movr */
218 0, /* shift penalty */
222 struct processor_costs leon_costs
= {
223 COSTS_N_INSNS (1), /* int load */
224 COSTS_N_INSNS (1), /* int signed load */
225 COSTS_N_INSNS (1), /* int zeroed load */
226 COSTS_N_INSNS (1), /* float load */
227 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
228 COSTS_N_INSNS (1), /* fadd, fsub */
229 COSTS_N_INSNS (1), /* fcmp */
230 COSTS_N_INSNS (1), /* fmov, fmovr */
231 COSTS_N_INSNS (1), /* fmul */
232 COSTS_N_INSNS (15), /* fdivs */
233 COSTS_N_INSNS (15), /* fdivd */
234 COSTS_N_INSNS (23), /* fsqrts */
235 COSTS_N_INSNS (23), /* fsqrtd */
236 COSTS_N_INSNS (5), /* imul */
237 COSTS_N_INSNS (5), /* imulX */
238 0, /* imul bit factor */
239 COSTS_N_INSNS (5), /* idiv */
240 COSTS_N_INSNS (5), /* idivX */
241 COSTS_N_INSNS (1), /* movcc/movr */
242 0, /* shift penalty */
246 struct processor_costs leon3_costs
= {
247 COSTS_N_INSNS (1), /* int load */
248 COSTS_N_INSNS (1), /* int signed load */
249 COSTS_N_INSNS (1), /* int zeroed load */
250 COSTS_N_INSNS (1), /* float load */
251 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
252 COSTS_N_INSNS (1), /* fadd, fsub */
253 COSTS_N_INSNS (1), /* fcmp */
254 COSTS_N_INSNS (1), /* fmov, fmovr */
255 COSTS_N_INSNS (1), /* fmul */
256 COSTS_N_INSNS (14), /* fdivs */
257 COSTS_N_INSNS (15), /* fdivd */
258 COSTS_N_INSNS (22), /* fsqrts */
259 COSTS_N_INSNS (23), /* fsqrtd */
260 COSTS_N_INSNS (5), /* imul */
261 COSTS_N_INSNS (5), /* imulX */
262 0, /* imul bit factor */
263 COSTS_N_INSNS (35), /* idiv */
264 COSTS_N_INSNS (35), /* idivX */
265 COSTS_N_INSNS (1), /* movcc/movr */
266 0, /* shift penalty */
270 struct processor_costs sparclet_costs
= {
271 COSTS_N_INSNS (3), /* int load */
272 COSTS_N_INSNS (3), /* int signed load */
273 COSTS_N_INSNS (1), /* int zeroed load */
274 COSTS_N_INSNS (1), /* float load */
275 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
276 COSTS_N_INSNS (1), /* fadd, fsub */
277 COSTS_N_INSNS (1), /* fcmp */
278 COSTS_N_INSNS (1), /* fmov, fmovr */
279 COSTS_N_INSNS (1), /* fmul */
280 COSTS_N_INSNS (1), /* fdivs */
281 COSTS_N_INSNS (1), /* fdivd */
282 COSTS_N_INSNS (1), /* fsqrts */
283 COSTS_N_INSNS (1), /* fsqrtd */
284 COSTS_N_INSNS (5), /* imul */
285 COSTS_N_INSNS (5), /* imulX */
286 0, /* imul bit factor */
287 COSTS_N_INSNS (5), /* idiv */
288 COSTS_N_INSNS (5), /* idivX */
289 COSTS_N_INSNS (1), /* movcc/movr */
290 0, /* shift penalty */
294 struct processor_costs ultrasparc_costs
= {
295 COSTS_N_INSNS (2), /* int load */
296 COSTS_N_INSNS (3), /* int signed load */
297 COSTS_N_INSNS (2), /* int zeroed load */
298 COSTS_N_INSNS (2), /* float load */
299 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
300 COSTS_N_INSNS (4), /* fadd, fsub */
301 COSTS_N_INSNS (1), /* fcmp */
302 COSTS_N_INSNS (2), /* fmov, fmovr */
303 COSTS_N_INSNS (4), /* fmul */
304 COSTS_N_INSNS (13), /* fdivs */
305 COSTS_N_INSNS (23), /* fdivd */
306 COSTS_N_INSNS (13), /* fsqrts */
307 COSTS_N_INSNS (23), /* fsqrtd */
308 COSTS_N_INSNS (4), /* imul */
309 COSTS_N_INSNS (4), /* imulX */
310 2, /* imul bit factor */
311 COSTS_N_INSNS (37), /* idiv */
312 COSTS_N_INSNS (68), /* idivX */
313 COSTS_N_INSNS (2), /* movcc/movr */
314 2, /* shift penalty */
318 struct processor_costs ultrasparc3_costs
= {
319 COSTS_N_INSNS (2), /* int load */
320 COSTS_N_INSNS (3), /* int signed load */
321 COSTS_N_INSNS (3), /* int zeroed load */
322 COSTS_N_INSNS (2), /* float load */
323 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
324 COSTS_N_INSNS (4), /* fadd, fsub */
325 COSTS_N_INSNS (5), /* fcmp */
326 COSTS_N_INSNS (3), /* fmov, fmovr */
327 COSTS_N_INSNS (4), /* fmul */
328 COSTS_N_INSNS (17), /* fdivs */
329 COSTS_N_INSNS (20), /* fdivd */
330 COSTS_N_INSNS (20), /* fsqrts */
331 COSTS_N_INSNS (29), /* fsqrtd */
332 COSTS_N_INSNS (6), /* imul */
333 COSTS_N_INSNS (6), /* imulX */
334 0, /* imul bit factor */
335 COSTS_N_INSNS (40), /* idiv */
336 COSTS_N_INSNS (71), /* idivX */
337 COSTS_N_INSNS (2), /* movcc/movr */
338 0, /* shift penalty */
342 struct processor_costs niagara_costs
= {
343 COSTS_N_INSNS (3), /* int load */
344 COSTS_N_INSNS (3), /* int signed load */
345 COSTS_N_INSNS (3), /* int zeroed load */
346 COSTS_N_INSNS (9), /* float load */
347 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
348 COSTS_N_INSNS (8), /* fadd, fsub */
349 COSTS_N_INSNS (26), /* fcmp */
350 COSTS_N_INSNS (8), /* fmov, fmovr */
351 COSTS_N_INSNS (29), /* fmul */
352 COSTS_N_INSNS (54), /* fdivs */
353 COSTS_N_INSNS (83), /* fdivd */
354 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
355 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
356 COSTS_N_INSNS (11), /* imul */
357 COSTS_N_INSNS (11), /* imulX */
358 0, /* imul bit factor */
359 COSTS_N_INSNS (72), /* idiv */
360 COSTS_N_INSNS (72), /* idivX */
361 COSTS_N_INSNS (1), /* movcc/movr */
362 0, /* shift penalty */
366 struct processor_costs niagara2_costs
= {
367 COSTS_N_INSNS (3), /* int load */
368 COSTS_N_INSNS (3), /* int signed load */
369 COSTS_N_INSNS (3), /* int zeroed load */
370 COSTS_N_INSNS (3), /* float load */
371 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
372 COSTS_N_INSNS (6), /* fadd, fsub */
373 COSTS_N_INSNS (6), /* fcmp */
374 COSTS_N_INSNS (6), /* fmov, fmovr */
375 COSTS_N_INSNS (6), /* fmul */
376 COSTS_N_INSNS (19), /* fdivs */
377 COSTS_N_INSNS (33), /* fdivd */
378 COSTS_N_INSNS (19), /* fsqrts */
379 COSTS_N_INSNS (33), /* fsqrtd */
380 COSTS_N_INSNS (5), /* imul */
381 COSTS_N_INSNS (5), /* imulX */
382 0, /* imul bit factor */
383 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
384 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
385 COSTS_N_INSNS (1), /* movcc/movr */
386 0, /* shift penalty */
390 struct processor_costs niagara3_costs
= {
391 COSTS_N_INSNS (3), /* int load */
392 COSTS_N_INSNS (3), /* int signed load */
393 COSTS_N_INSNS (3), /* int zeroed load */
394 COSTS_N_INSNS (3), /* float load */
395 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
396 COSTS_N_INSNS (9), /* fadd, fsub */
397 COSTS_N_INSNS (9), /* fcmp */
398 COSTS_N_INSNS (9), /* fmov, fmovr */
399 COSTS_N_INSNS (9), /* fmul */
400 COSTS_N_INSNS (23), /* fdivs */
401 COSTS_N_INSNS (37), /* fdivd */
402 COSTS_N_INSNS (23), /* fsqrts */
403 COSTS_N_INSNS (37), /* fsqrtd */
404 COSTS_N_INSNS (9), /* imul */
405 COSTS_N_INSNS (9), /* imulX */
406 0, /* imul bit factor */
407 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
408 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
409 COSTS_N_INSNS (1), /* movcc/movr */
410 0, /* shift penalty */
414 struct processor_costs niagara4_costs
= {
415 COSTS_N_INSNS (5), /* int load */
416 COSTS_N_INSNS (5), /* int signed load */
417 COSTS_N_INSNS (5), /* int zeroed load */
418 COSTS_N_INSNS (5), /* float load */
419 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
420 COSTS_N_INSNS (11), /* fadd, fsub */
421 COSTS_N_INSNS (11), /* fcmp */
422 COSTS_N_INSNS (11), /* fmov, fmovr */
423 COSTS_N_INSNS (11), /* fmul */
424 COSTS_N_INSNS (24), /* fdivs */
425 COSTS_N_INSNS (37), /* fdivd */
426 COSTS_N_INSNS (24), /* fsqrts */
427 COSTS_N_INSNS (37), /* fsqrtd */
428 COSTS_N_INSNS (12), /* imul */
429 COSTS_N_INSNS (12), /* imulX */
430 0, /* imul bit factor */
431 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
432 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
433 COSTS_N_INSNS (1), /* movcc/movr */
434 0, /* shift penalty */
437 static const struct processor_costs
*sparc_costs
= &cypress_costs
;
439 #ifdef HAVE_AS_RELAX_OPTION
440 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
441 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
442 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
443 somebody does not branch between the sethi and jmp. */
444 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
446 #define LEAF_SIBCALL_SLOT_RESERVED_P \
447 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
450 /* Vector to say how input registers are mapped to output registers.
451 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
452 eliminate it. You must use -fomit-frame-pointer to get that. */
453 char leaf_reg_remap
[] =
454 { 0, 1, 2, 3, 4, 5, 6, 7,
455 -1, -1, -1, -1, -1, -1, 14, -1,
456 -1, -1, -1, -1, -1, -1, -1, -1,
457 8, 9, 10, 11, 12, 13, -1, 15,
459 32, 33, 34, 35, 36, 37, 38, 39,
460 40, 41, 42, 43, 44, 45, 46, 47,
461 48, 49, 50, 51, 52, 53, 54, 55,
462 56, 57, 58, 59, 60, 61, 62, 63,
463 64, 65, 66, 67, 68, 69, 70, 71,
464 72, 73, 74, 75, 76, 77, 78, 79,
465 80, 81, 82, 83, 84, 85, 86, 87,
466 88, 89, 90, 91, 92, 93, 94, 95,
467 96, 97, 98, 99, 100, 101, 102};
469 /* Vector, indexed by hard register number, which contains 1
470 for a register that is allowable in a candidate for leaf
471 function treatment. */
472 char sparc_leaf_regs
[] =
473 { 1, 1, 1, 1, 1, 1, 1, 1,
474 0, 0, 0, 0, 0, 0, 1, 0,
475 0, 0, 0, 0, 0, 0, 0, 0,
476 1, 1, 1, 1, 1, 1, 0, 1,
477 1, 1, 1, 1, 1, 1, 1, 1,
478 1, 1, 1, 1, 1, 1, 1, 1,
479 1, 1, 1, 1, 1, 1, 1, 1,
480 1, 1, 1, 1, 1, 1, 1, 1,
481 1, 1, 1, 1, 1, 1, 1, 1,
482 1, 1, 1, 1, 1, 1, 1, 1,
483 1, 1, 1, 1, 1, 1, 1, 1,
484 1, 1, 1, 1, 1, 1, 1, 1,
485 1, 1, 1, 1, 1, 1, 1};
487 struct GTY(()) machine_function
489 /* Size of the frame of the function. */
490 HOST_WIDE_INT frame_size
;
492 /* Size of the frame of the function minus the register window save area
493 and the outgoing argument area. */
494 HOST_WIDE_INT apparent_frame_size
;
496 /* Register we pretend the frame pointer is allocated to. Normally, this
497 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
498 record "offset" separately as it may be too big for (reg + disp). */
500 HOST_WIDE_INT frame_base_offset
;
502 /* Some local-dynamic TLS symbol name. */
503 const char *some_ld_name
;
505 /* Number of global or FP registers to be saved (as 4-byte quantities). */
506 int n_global_fp_regs
;
508 /* True if the current function is leaf and uses only leaf regs,
509 so that the SPARC leaf function optimization can be applied.
510 Private version of crtl->uses_only_leaf_regs, see
511 sparc_expand_prologue for the rationale. */
514 /* True if the prologue saves local or in registers. */
515 bool save_local_in_regs_p
;
517 /* True if the data calculated by sparc_expand_prologue are valid. */
518 bool prologue_data_valid_p
;
521 #define sparc_frame_size cfun->machine->frame_size
522 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
523 #define sparc_frame_base_reg cfun->machine->frame_base_reg
524 #define sparc_frame_base_offset cfun->machine->frame_base_offset
525 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
526 #define sparc_leaf_function_p cfun->machine->leaf_function_p
527 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
528 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
530 /* 1 if the next opcode is to be specially indented. */
531 int sparc_indent_opcode
= 0;
533 static void sparc_option_override (void);
534 static void sparc_init_modes (void);
535 static void scan_record_type (const_tree
, int *, int *, int *);
536 static int function_arg_slotno (const CUMULATIVE_ARGS
*, enum machine_mode
,
537 const_tree
, bool, bool, int *, int *);
539 static int supersparc_adjust_cost (rtx
, rtx
, rtx
, int);
540 static int hypersparc_adjust_cost (rtx
, rtx
, rtx
, int);
542 static void sparc_emit_set_const32 (rtx
, rtx
);
543 static void sparc_emit_set_const64 (rtx
, rtx
);
544 static void sparc_output_addr_vec (rtx
);
545 static void sparc_output_addr_diff_vec (rtx
);
546 static void sparc_output_deferred_case_vectors (void);
547 static bool sparc_legitimate_address_p (enum machine_mode
, rtx
, bool);
548 static bool sparc_legitimate_constant_p (enum machine_mode
, rtx
);
549 static rtx
sparc_builtin_saveregs (void);
550 static int epilogue_renumber (rtx
*, int);
551 static bool sparc_assemble_integer (rtx
, unsigned int, int);
552 static int set_extends (rtx
);
553 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT
);
554 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT
);
555 #ifdef TARGET_SOLARIS
556 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
557 tree
) ATTRIBUTE_UNUSED
;
559 static int sparc_adjust_cost (rtx
, rtx
, rtx
, int);
560 static int sparc_issue_rate (void);
561 static void sparc_sched_init (FILE *, int, int);
562 static int sparc_use_sched_lookahead (void);
564 static void emit_soft_tfmode_libcall (const char *, int, rtx
*);
565 static void emit_soft_tfmode_binop (enum rtx_code
, rtx
*);
566 static void emit_soft_tfmode_unop (enum rtx_code
, rtx
*);
567 static void emit_soft_tfmode_cvt (enum rtx_code
, rtx
*);
568 static void emit_hard_tfmode_operation (enum rtx_code
, rtx
*);
570 static bool sparc_function_ok_for_sibcall (tree
, tree
);
571 static void sparc_init_libfuncs (void);
572 static void sparc_init_builtins (void);
573 static void sparc_fpu_init_builtins (void);
574 static void sparc_vis_init_builtins (void);
575 static tree
sparc_builtin_decl (unsigned, bool);
576 static rtx
sparc_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
577 static tree
sparc_fold_builtin (tree
, int, tree
*, bool);
578 static void sparc_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
579 HOST_WIDE_INT
, tree
);
580 static bool sparc_can_output_mi_thunk (const_tree
, HOST_WIDE_INT
,
581 HOST_WIDE_INT
, const_tree
);
582 static struct machine_function
* sparc_init_machine_status (void);
583 static bool sparc_cannot_force_const_mem (enum machine_mode
, rtx
);
584 static rtx
sparc_tls_get_addr (void);
585 static rtx
sparc_tls_got (void);
586 static const char *get_some_local_dynamic_name (void);
587 static int get_some_local_dynamic_name_1 (rtx
*, void *);
588 static int sparc_register_move_cost (enum machine_mode
,
589 reg_class_t
, reg_class_t
);
590 static bool sparc_rtx_costs (rtx
, int, int, int, int *, bool);
591 static rtx
sparc_function_value (const_tree
, const_tree
, bool);
592 static rtx
sparc_libcall_value (enum machine_mode
, const_rtx
);
593 static bool sparc_function_value_regno_p (const unsigned int);
594 static rtx
sparc_struct_value_rtx (tree
, int);
595 static enum machine_mode
sparc_promote_function_mode (const_tree
, enum machine_mode
,
596 int *, const_tree
, int);
597 static bool sparc_return_in_memory (const_tree
, const_tree
);
598 static bool sparc_strict_argument_naming (cumulative_args_t
);
599 static void sparc_va_start (tree
, rtx
);
600 static tree
sparc_gimplify_va_arg (tree
, tree
, gimple_seq
*, gimple_seq
*);
601 static bool sparc_vector_mode_supported_p (enum machine_mode
);
602 static bool sparc_tls_referenced_p (rtx
);
603 static rtx
sparc_legitimize_tls_address (rtx
);
604 static rtx
sparc_legitimize_pic_address (rtx
, rtx
);
605 static rtx
sparc_legitimize_address (rtx
, rtx
, enum machine_mode
);
606 static rtx
sparc_delegitimize_address (rtx
);
607 static bool sparc_mode_dependent_address_p (const_rtx
, addr_space_t
);
608 static bool sparc_pass_by_reference (cumulative_args_t
,
609 enum machine_mode
, const_tree
, bool);
610 static void sparc_function_arg_advance (cumulative_args_t
,
611 enum machine_mode
, const_tree
, bool);
612 static rtx
sparc_function_arg_1 (cumulative_args_t
,
613 enum machine_mode
, const_tree
, bool, bool);
614 static rtx
sparc_function_arg (cumulative_args_t
,
615 enum machine_mode
, const_tree
, bool);
616 static rtx
sparc_function_incoming_arg (cumulative_args_t
,
617 enum machine_mode
, const_tree
, bool);
618 static unsigned int sparc_function_arg_boundary (enum machine_mode
,
620 static int sparc_arg_partial_bytes (cumulative_args_t
,
621 enum machine_mode
, tree
, bool);
622 static void sparc_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
623 static void sparc_file_end (void);
624 static bool sparc_frame_pointer_required (void);
625 static bool sparc_can_eliminate (const int, const int);
626 static rtx
sparc_builtin_setjmp_frame_value (void);
627 static void sparc_conditional_register_usage (void);
628 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
629 static const char *sparc_mangle_type (const_tree
);
631 static void sparc_trampoline_init (rtx
, tree
, rtx
);
632 static enum machine_mode
sparc_preferred_simd_mode (enum machine_mode
);
633 static reg_class_t
sparc_preferred_reload_class (rtx x
, reg_class_t rclass
);
634 static bool sparc_print_operand_punct_valid_p (unsigned char);
635 static void sparc_print_operand (FILE *, rtx
, int);
636 static void sparc_print_operand_address (FILE *, rtx
);
637 static reg_class_t
sparc_secondary_reload (bool, rtx
, reg_class_t
,
639 secondary_reload_info
*);
640 static enum machine_mode
sparc_cstore_mode (enum insn_code icode
);
641 static void sparc_atomic_assign_expand_fenv (tree
*, tree
*, tree
*);
643 #ifdef SUBTARGET_ATTRIBUTE_TABLE
644 /* Table of valid machine attributes. */
645 static const struct attribute_spec sparc_attribute_table
[] =
647 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
649 SUBTARGET_ATTRIBUTE_TABLE
,
650 { NULL
, 0, 0, false, false, false, NULL
, false }
654 /* Option handling. */
657 enum cmodel sparc_cmodel
;
659 char sparc_hard_reg_printed
[8];
661 /* Initialize the GCC target structure. */
663 /* The default is to use .half rather than .short for aligned HI objects. */
664 #undef TARGET_ASM_ALIGNED_HI_OP
665 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
667 #undef TARGET_ASM_UNALIGNED_HI_OP
668 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
669 #undef TARGET_ASM_UNALIGNED_SI_OP
670 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
671 #undef TARGET_ASM_UNALIGNED_DI_OP
672 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
674 /* The target hook has to handle DI-mode values. */
675 #undef TARGET_ASM_INTEGER
676 #define TARGET_ASM_INTEGER sparc_assemble_integer
678 #undef TARGET_ASM_FUNCTION_PROLOGUE
679 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
680 #undef TARGET_ASM_FUNCTION_EPILOGUE
681 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
683 #undef TARGET_SCHED_ADJUST_COST
684 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
685 #undef TARGET_SCHED_ISSUE_RATE
686 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
687 #undef TARGET_SCHED_INIT
688 #define TARGET_SCHED_INIT sparc_sched_init
689 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
690 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
692 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
693 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
695 #undef TARGET_INIT_LIBFUNCS
696 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
698 #undef TARGET_LEGITIMIZE_ADDRESS
699 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
700 #undef TARGET_DELEGITIMIZE_ADDRESS
701 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
702 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
703 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
705 #undef TARGET_INIT_BUILTINS
706 #define TARGET_INIT_BUILTINS sparc_init_builtins
707 #undef TARGET_BUILTIN_DECL
708 #define TARGET_BUILTIN_DECL sparc_builtin_decl
709 #undef TARGET_EXPAND_BUILTIN
710 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
711 #undef TARGET_FOLD_BUILTIN
712 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
715 #undef TARGET_HAVE_TLS
716 #define TARGET_HAVE_TLS true
719 #undef TARGET_CANNOT_FORCE_CONST_MEM
720 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
722 #undef TARGET_ASM_OUTPUT_MI_THUNK
723 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
724 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
725 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
727 #undef TARGET_RTX_COSTS
728 #define TARGET_RTX_COSTS sparc_rtx_costs
729 #undef TARGET_ADDRESS_COST
730 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
731 #undef TARGET_REGISTER_MOVE_COST
732 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
734 #undef TARGET_PROMOTE_FUNCTION_MODE
735 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
737 #undef TARGET_FUNCTION_VALUE
738 #define TARGET_FUNCTION_VALUE sparc_function_value
739 #undef TARGET_LIBCALL_VALUE
740 #define TARGET_LIBCALL_VALUE sparc_libcall_value
741 #undef TARGET_FUNCTION_VALUE_REGNO_P
742 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
744 #undef TARGET_STRUCT_VALUE_RTX
745 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
746 #undef TARGET_RETURN_IN_MEMORY
747 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
748 #undef TARGET_MUST_PASS_IN_STACK
749 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
750 #undef TARGET_PASS_BY_REFERENCE
751 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
752 #undef TARGET_ARG_PARTIAL_BYTES
753 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
754 #undef TARGET_FUNCTION_ARG_ADVANCE
755 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
756 #undef TARGET_FUNCTION_ARG
757 #define TARGET_FUNCTION_ARG sparc_function_arg
758 #undef TARGET_FUNCTION_INCOMING_ARG
759 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
760 #undef TARGET_FUNCTION_ARG_BOUNDARY
761 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
763 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
764 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
765 #undef TARGET_STRICT_ARGUMENT_NAMING
766 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
768 #undef TARGET_EXPAND_BUILTIN_VA_START
769 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
770 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
771 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
773 #undef TARGET_VECTOR_MODE_SUPPORTED_P
774 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
776 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
777 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
779 #ifdef SUBTARGET_INSERT_ATTRIBUTES
780 #undef TARGET_INSERT_ATTRIBUTES
781 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
784 #ifdef SUBTARGET_ATTRIBUTE_TABLE
785 #undef TARGET_ATTRIBUTE_TABLE
786 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
789 #undef TARGET_RELAXED_ORDERING
790 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
792 #undef TARGET_OPTION_OVERRIDE
793 #define TARGET_OPTION_OVERRIDE sparc_option_override
795 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
796 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
797 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
800 #undef TARGET_ASM_FILE_END
801 #define TARGET_ASM_FILE_END sparc_file_end
803 #undef TARGET_FRAME_POINTER_REQUIRED
804 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
806 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
807 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
809 #undef TARGET_CAN_ELIMINATE
810 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
812 #undef TARGET_PREFERRED_RELOAD_CLASS
813 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
815 #undef TARGET_SECONDARY_RELOAD
816 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
818 #undef TARGET_CONDITIONAL_REGISTER_USAGE
819 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
821 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
822 #undef TARGET_MANGLE_TYPE
823 #define TARGET_MANGLE_TYPE sparc_mangle_type
826 #undef TARGET_LEGITIMATE_ADDRESS_P
827 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
829 #undef TARGET_LEGITIMATE_CONSTANT_P
830 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
832 #undef TARGET_TRAMPOLINE_INIT
833 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
835 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
836 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
837 #undef TARGET_PRINT_OPERAND
838 #define TARGET_PRINT_OPERAND sparc_print_operand
839 #undef TARGET_PRINT_OPERAND_ADDRESS
840 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
842 /* The value stored by LDSTUB. */
843 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
844 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
846 #undef TARGET_CSTORE_MODE
847 #define TARGET_CSTORE_MODE sparc_cstore_mode
849 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
850 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sparc_atomic_assign_expand_fenv
852 struct gcc_target targetm
= TARGET_INITIALIZER
;
854 /* Return the memory reference contained in X if any, zero otherwise. */
859 if (GET_CODE (x
) == SIGN_EXTEND
|| GET_CODE (x
) == ZERO_EXTEND
)
868 /* We use a machine specific pass to enable workarounds for errata.
869 We need to have the (essentially) final form of the insn stream in order
870 to properly detect the various hazards. Therefore, this machine specific
871 pass runs as late as possible. The pass is inserted in the pass pipeline
872 at the end of sparc_option_override. */
875 sparc_gate_work_around_errata (void)
877 /* The only errata we handle are those of the AT697F and UT699. */
878 return sparc_fix_at697f
!= 0 || sparc_fix_ut699
!= 0;
882 sparc_do_work_around_errata (void)
886 /* Force all instructions to be split into their final form. */
887 split_all_insns_noflow ();
889 /* Now look for specific patterns in the insn stream. */
890 for (insn
= get_insns (); insn
; insn
= next
)
892 bool insert_nop
= false;
895 /* Look into the instruction in a delay slot. */
896 if (NONJUMP_INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
897 insn
= XVECEXP (PATTERN (insn
), 0, 1);
899 /* Look for a single-word load into an odd-numbered FP register. */
901 && NONJUMP_INSN_P (insn
)
902 && (set
= single_set (insn
)) != NULL_RTX
903 && GET_MODE_SIZE (GET_MODE (SET_SRC (set
))) == 4
904 && MEM_P (SET_SRC (set
))
905 && REG_P (SET_DEST (set
))
906 && REGNO (SET_DEST (set
)) > 31
907 && REGNO (SET_DEST (set
)) % 2 != 0)
909 /* The wrong dependency is on the enclosing double register. */
910 const unsigned int x
= REGNO (SET_DEST (set
)) - 1;
911 unsigned int src1
, src2
, dest
;
914 next
= next_active_insn (insn
);
917 /* If the insn is a branch, then it cannot be problematic. */
918 if (!NONJUMP_INSN_P (next
) || GET_CODE (PATTERN (next
)) == SEQUENCE
)
922 code
= INSN_CODE (next
);
926 case CODE_FOR_adddf3
:
927 case CODE_FOR_subdf3
:
928 case CODE_FOR_muldf3
:
929 case CODE_FOR_divdf3
:
930 dest
= REGNO (recog_data
.operand
[0]);
931 src1
= REGNO (recog_data
.operand
[1]);
932 src2
= REGNO (recog_data
.operand
[2]);
937 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
938 if ((src1
== x
|| src2
== x
)
939 && (dest
== src1
|| dest
== src2
))
946 FPOPd %fx, %fx, %fx */
949 && (code
== CODE_FOR_adddf3
|| code
== CODE_FOR_muldf3
))
954 case CODE_FOR_sqrtdf2
:
955 dest
= REGNO (recog_data
.operand
[0]);
956 src1
= REGNO (recog_data
.operand
[1]);
960 if (src1
== x
&& dest
== src1
)
969 /* Look for a single-word load into an integer register. */
970 else if (sparc_fix_ut699
971 && NONJUMP_INSN_P (insn
)
972 && (set
= single_set (insn
)) != NULL_RTX
973 && GET_MODE_SIZE (GET_MODE (SET_SRC (set
))) <= 4
974 && mem_ref (SET_SRC (set
)) != NULL_RTX
975 && REG_P (SET_DEST (set
))
976 && REGNO (SET_DEST (set
)) < 32)
978 /* There is no problem if the second memory access has a data
979 dependency on the first single-cycle load. */
980 rtx x
= SET_DEST (set
);
982 next
= next_active_insn (insn
);
985 /* If the insn is a branch, then it cannot be problematic. */
986 if (!NONJUMP_INSN_P (next
) || GET_CODE (PATTERN (next
)) == SEQUENCE
)
989 /* Look for a second memory access to/from an integer register. */
990 if ((set
= single_set (next
)) != NULL_RTX
)
992 rtx src
= SET_SRC (set
);
993 rtx dest
= SET_DEST (set
);
996 /* LDD is affected. */
997 if ((mem
= mem_ref (src
)) != NULL_RTX
1000 && !reg_mentioned_p (x
, XEXP (mem
, 0)))
1003 /* STD is *not* affected. */
1004 else if (MEM_P (dest
)
1005 && GET_MODE_SIZE (GET_MODE (dest
)) <= 4
1006 && (src
== CONST0_RTX (GET_MODE (dest
))
1009 && REGNO (src
) != REGNO (x
)))
1010 && !reg_mentioned_p (x
, XEXP (dest
, 0)))
1015 /* Look for a single-word load/operation into an FP register. */
1016 else if (sparc_fix_ut699
1017 && NONJUMP_INSN_P (insn
)
1018 && (set
= single_set (insn
)) != NULL_RTX
1019 && GET_MODE_SIZE (GET_MODE (SET_SRC (set
))) == 4
1020 && REG_P (SET_DEST (set
))
1021 && REGNO (SET_DEST (set
)) > 31)
1023 /* Number of instructions in the problematic window. */
1024 const int n_insns
= 4;
1025 /* The problematic combination is with the sibling FP register. */
1026 const unsigned int x
= REGNO (SET_DEST (set
));
1027 const unsigned int y
= x
^ 1;
1031 next
= next_active_insn (insn
);
1034 /* If the insn is a branch, then it cannot be problematic. */
1035 if (!NONJUMP_INSN_P (next
) || GET_CODE (PATTERN (next
)) == SEQUENCE
)
1038 /* Look for a second load/operation into the sibling FP register. */
1039 if (!((set
= single_set (next
)) != NULL_RTX
1040 && GET_MODE_SIZE (GET_MODE (SET_SRC (set
))) == 4
1041 && REG_P (SET_DEST (set
))
1042 && REGNO (SET_DEST (set
)) == y
))
1045 /* Look for a (possible) store from the FP register in the next N
1046 instructions, but bail out if it is again modified or if there
1047 is a store from the sibling FP register before this store. */
1048 for (after
= next
, i
= 0; i
< n_insns
; i
++)
1052 after
= next_active_insn (after
);
1056 /* This is a branch with an empty delay slot. */
1057 if (!NONJUMP_INSN_P (after
))
1064 /* This is a branch with a filled delay slot. */
1065 else if (GET_CODE (PATTERN (after
)) == SEQUENCE
)
1070 after
= XVECEXP (PATTERN (after
), 0, 1);
1072 /* This is a regular instruction. */
1076 if (after
&& (set
= single_set (after
)) != NULL_RTX
)
1078 const rtx src
= SET_SRC (set
);
1079 const rtx dest
= SET_DEST (set
);
1080 const unsigned int size
= GET_MODE_SIZE (GET_MODE (dest
));
1082 /* If the FP register is again modified before the store,
1083 then the store isn't affected. */
1085 && (REGNO (dest
) == x
1086 || (REGNO (dest
) == y
&& size
== 8)))
1089 if (MEM_P (dest
) && REG_P (src
))
1091 /* If there is a store from the sibling FP register
1092 before the store, then the store is not affected. */
1093 if (REGNO (src
) == y
|| (REGNO (src
) == x
&& size
== 8))
1096 /* Otherwise, the store is affected. */
1097 if (REGNO (src
) == x
&& size
== 4)
1105 /* If we have a branch in the first M instructions, then we
1106 cannot see the (M+2)th instruction so we play safe. */
1107 if (branch_p
&& i
<= (n_insns
- 2))
1116 next
= NEXT_INSN (insn
);
1119 emit_insn_before (gen_nop (), next
);
1127 const pass_data pass_data_work_around_errata
=
1129 RTL_PASS
, /* type */
1130 "errata", /* name */
1131 OPTGROUP_NONE
, /* optinfo_flags */
1132 true, /* has_gate */
1133 true, /* has_execute */
1134 TV_MACH_DEP
, /* tv_id */
1135 0, /* properties_required */
1136 0, /* properties_provided */
1137 0, /* properties_destroyed */
1138 0, /* todo_flags_start */
1139 TODO_verify_rtl_sharing
, /* todo_flags_finish */
1142 class pass_work_around_errata
: public rtl_opt_pass
1145 pass_work_around_errata(gcc::context
*ctxt
)
1146 : rtl_opt_pass(pass_data_work_around_errata
, ctxt
)
1149 /* opt_pass methods: */
1150 bool gate () { return sparc_gate_work_around_errata (); }
1151 unsigned int execute () { return sparc_do_work_around_errata (); }
1153 }; // class pass_work_around_errata
1158 make_pass_work_around_errata (gcc::context
*ctxt
)
1160 return new pass_work_around_errata (ctxt
);
1163 /* Helpers for TARGET_DEBUG_OPTIONS. */
1165 dump_target_flag_bits (const int flags
)
1167 if (flags
& MASK_64BIT
)
1168 fprintf (stderr
, "64BIT ");
1169 if (flags
& MASK_APP_REGS
)
1170 fprintf (stderr
, "APP_REGS ");
1171 if (flags
& MASK_FASTER_STRUCTS
)
1172 fprintf (stderr
, "FASTER_STRUCTS ");
1173 if (flags
& MASK_FLAT
)
1174 fprintf (stderr
, "FLAT ");
1175 if (flags
& MASK_FMAF
)
1176 fprintf (stderr
, "FMAF ");
1177 if (flags
& MASK_FPU
)
1178 fprintf (stderr
, "FPU ");
1179 if (flags
& MASK_HARD_QUAD
)
1180 fprintf (stderr
, "HARD_QUAD ");
1181 if (flags
& MASK_POPC
)
1182 fprintf (stderr
, "POPC ");
1183 if (flags
& MASK_PTR64
)
1184 fprintf (stderr
, "PTR64 ");
1185 if (flags
& MASK_STACK_BIAS
)
1186 fprintf (stderr
, "STACK_BIAS ");
1187 if (flags
& MASK_UNALIGNED_DOUBLES
)
1188 fprintf (stderr
, "UNALIGNED_DOUBLES ");
1189 if (flags
& MASK_V8PLUS
)
1190 fprintf (stderr
, "V8PLUS ");
1191 if (flags
& MASK_VIS
)
1192 fprintf (stderr
, "VIS ");
1193 if (flags
& MASK_VIS2
)
1194 fprintf (stderr
, "VIS2 ");
1195 if (flags
& MASK_VIS3
)
1196 fprintf (stderr
, "VIS3 ");
1197 if (flags
& MASK_CBCOND
)
1198 fprintf (stderr
, "CBCOND ");
1199 if (flags
& MASK_DEPRECATED_V8_INSNS
)
1200 fprintf (stderr
, "DEPRECATED_V8_INSNS ");
1201 if (flags
& MASK_SPARCLET
)
1202 fprintf (stderr
, "SPARCLET ");
1203 if (flags
& MASK_SPARCLITE
)
1204 fprintf (stderr
, "SPARCLITE ");
1205 if (flags
& MASK_V8
)
1206 fprintf (stderr
, "V8 ");
1207 if (flags
& MASK_V9
)
1208 fprintf (stderr
, "V9 ");
1212 dump_target_flags (const char *prefix
, const int flags
)
1214 fprintf (stderr
, "%s: (%08x) [ ", prefix
, flags
);
1215 dump_target_flag_bits (flags
);
1216 fprintf(stderr
, "]\n");
1219 /* Validate and override various options, and do some machine dependent
1223 sparc_option_override (void)
1225 static struct code_model
{
1226 const char *const name
;
1227 const enum cmodel value
;
1228 } const cmodels
[] = {
1230 { "medlow", CM_MEDLOW
},
1231 { "medmid", CM_MEDMID
},
1232 { "medany", CM_MEDANY
},
1233 { "embmedany", CM_EMBMEDANY
},
1234 { NULL
, (enum cmodel
) 0 }
1236 const struct code_model
*cmodel
;
1237 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1238 static struct cpu_default
{
1240 const enum processor_type processor
;
1241 } const cpu_default
[] = {
1242 /* There must be one entry here for each TARGET_CPU value. */
1243 { TARGET_CPU_sparc
, PROCESSOR_CYPRESS
},
1244 { TARGET_CPU_v8
, PROCESSOR_V8
},
1245 { TARGET_CPU_supersparc
, PROCESSOR_SUPERSPARC
},
1246 { TARGET_CPU_hypersparc
, PROCESSOR_HYPERSPARC
},
1247 { TARGET_CPU_leon
, PROCESSOR_LEON
},
1248 { TARGET_CPU_leon3
, PROCESSOR_LEON3
},
1249 { TARGET_CPU_sparclite
, PROCESSOR_F930
},
1250 { TARGET_CPU_sparclite86x
, PROCESSOR_SPARCLITE86X
},
1251 { TARGET_CPU_sparclet
, PROCESSOR_TSC701
},
1252 { TARGET_CPU_v9
, PROCESSOR_V9
},
1253 { TARGET_CPU_ultrasparc
, PROCESSOR_ULTRASPARC
},
1254 { TARGET_CPU_ultrasparc3
, PROCESSOR_ULTRASPARC3
},
1255 { TARGET_CPU_niagara
, PROCESSOR_NIAGARA
},
1256 { TARGET_CPU_niagara2
, PROCESSOR_NIAGARA2
},
1257 { TARGET_CPU_niagara3
, PROCESSOR_NIAGARA3
},
1258 { TARGET_CPU_niagara4
, PROCESSOR_NIAGARA4
},
1259 { -1, PROCESSOR_V7
}
1261 const struct cpu_default
*def
;
1262 /* Table of values for -m{cpu,tune}=. This must match the order of
1263 the enum processor_type in sparc-opts.h. */
1264 static struct cpu_table
{
1265 const char *const name
;
1268 } const cpu_table
[] = {
1269 { "v7", MASK_ISA
, 0 },
1270 { "cypress", MASK_ISA
, 0 },
1271 { "v8", MASK_ISA
, MASK_V8
},
1272 /* TI TMS390Z55 supersparc */
1273 { "supersparc", MASK_ISA
, MASK_V8
},
1274 { "hypersparc", MASK_ISA
, MASK_V8
|MASK_FPU
},
1275 { "leon", MASK_ISA
, MASK_V8
|MASK_LEON
|MASK_FPU
},
1276 { "leon3", MASK_ISA
, MASK_V8
|MASK_LEON3
|MASK_FPU
},
1277 { "sparclite", MASK_ISA
, MASK_SPARCLITE
},
1278 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1279 { "f930", MASK_ISA
|MASK_FPU
, MASK_SPARCLITE
},
1280 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1281 { "f934", MASK_ISA
, MASK_SPARCLITE
|MASK_FPU
},
1282 { "sparclite86x", MASK_ISA
|MASK_FPU
, MASK_SPARCLITE
},
1283 { "sparclet", MASK_ISA
, MASK_SPARCLET
},
1284 /* TEMIC sparclet */
1285 { "tsc701", MASK_ISA
, MASK_SPARCLET
},
1286 { "v9", MASK_ISA
, MASK_V9
},
1287 /* UltraSPARC I, II, IIi */
1288 { "ultrasparc", MASK_ISA
,
1289 /* Although insns using %y are deprecated, it is a clear win. */
1290 MASK_V9
|MASK_DEPRECATED_V8_INSNS
},
1291 /* UltraSPARC III */
1292 /* ??? Check if %y issue still holds true. */
1293 { "ultrasparc3", MASK_ISA
,
1294 MASK_V9
|MASK_DEPRECATED_V8_INSNS
|MASK_VIS2
},
1296 { "niagara", MASK_ISA
,
1297 MASK_V9
|MASK_DEPRECATED_V8_INSNS
},
1299 { "niagara2", MASK_ISA
,
1300 MASK_V9
|MASK_POPC
|MASK_VIS2
},
1302 { "niagara3", MASK_ISA
,
1303 MASK_V9
|MASK_POPC
|MASK_VIS2
|MASK_VIS3
|MASK_FMAF
},
1305 { "niagara4", MASK_ISA
,
1306 MASK_V9
|MASK_POPC
|MASK_VIS2
|MASK_VIS3
|MASK_FMAF
|MASK_CBCOND
},
1308 const struct cpu_table
*cpu
;
1312 if (sparc_debug_string
!= NULL
)
1317 p
= ASTRDUP (sparc_debug_string
);
1318 while ((q
= strtok (p
, ",")) != NULL
)
1332 if (! strcmp (q
, "all"))
1333 mask
= MASK_DEBUG_ALL
;
1334 else if (! strcmp (q
, "options"))
1335 mask
= MASK_DEBUG_OPTIONS
;
1337 error ("unknown -mdebug-%s switch", q
);
1340 sparc_debug
&= ~mask
;
1342 sparc_debug
|= mask
;
1346 if (TARGET_DEBUG_OPTIONS
)
1348 dump_target_flags("Initial target_flags", target_flags
);
1349 dump_target_flags("target_flags_explicit", target_flags_explicit
);
1352 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1353 SUBTARGET_OVERRIDE_OPTIONS
;
1356 #ifndef SPARC_BI_ARCH
1357 /* Check for unsupported architecture size. */
1358 if (! TARGET_64BIT
!= DEFAULT_ARCH32_P
)
1359 error ("%s is not supported by this configuration",
1360 DEFAULT_ARCH32_P
? "-m64" : "-m32");
1363 /* We force all 64bit archs to use 128 bit long double */
1364 if (TARGET_64BIT
&& ! TARGET_LONG_DOUBLE_128
)
1366 error ("-mlong-double-64 not allowed with -m64");
1367 target_flags
|= MASK_LONG_DOUBLE_128
;
1370 /* Code model selection. */
1371 sparc_cmodel
= SPARC_DEFAULT_CMODEL
;
1373 #ifdef SPARC_BI_ARCH
1375 sparc_cmodel
= CM_32
;
1378 if (sparc_cmodel_string
!= NULL
)
1382 for (cmodel
= &cmodels
[0]; cmodel
->name
; cmodel
++)
1383 if (strcmp (sparc_cmodel_string
, cmodel
->name
) == 0)
1385 if (cmodel
->name
== NULL
)
1386 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string
);
1388 sparc_cmodel
= cmodel
->value
;
1391 error ("-mcmodel= is not supported on 32 bit systems");
1394 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1395 for (i
= 8; i
< 16; i
++)
1396 if (!call_used_regs
[i
])
1398 error ("-fcall-saved-REG is not supported for out registers");
1399 call_used_regs
[i
] = 1;
1402 fpu
= target_flags
& MASK_FPU
; /* save current -mfpu status */
1404 /* Set the default CPU. */
1405 if (!global_options_set
.x_sparc_cpu_and_features
)
1407 for (def
= &cpu_default
[0]; def
->cpu
!= -1; ++def
)
1408 if (def
->cpu
== TARGET_CPU_DEFAULT
)
1410 gcc_assert (def
->cpu
!= -1);
1411 sparc_cpu_and_features
= def
->processor
;
1414 if (!global_options_set
.x_sparc_cpu
)
1415 sparc_cpu
= sparc_cpu_and_features
;
1417 cpu
= &cpu_table
[(int) sparc_cpu_and_features
];
1419 if (TARGET_DEBUG_OPTIONS
)
1421 fprintf (stderr
, "sparc_cpu_and_features: %s\n", cpu
->name
);
1422 fprintf (stderr
, "sparc_cpu: %s\n",
1423 cpu_table
[(int) sparc_cpu
].name
);
1424 dump_target_flags ("cpu->disable", cpu
->disable
);
1425 dump_target_flags ("cpu->enable", cpu
->enable
);
1428 target_flags
&= ~cpu
->disable
;
1429 target_flags
|= (cpu
->enable
1430 #ifndef HAVE_AS_FMAF_HPC_VIS3
1431 & ~(MASK_FMAF
| MASK_VIS3
)
1433 #ifndef HAVE_AS_SPARC4
1436 #ifndef HAVE_AS_LEON
1437 & ~(MASK_LEON
| MASK_LEON3
)
1441 /* If -mfpu or -mno-fpu was explicitly used, don't override with
1442 the processor default. */
1443 if (target_flags_explicit
& MASK_FPU
)
1444 target_flags
= (target_flags
& ~MASK_FPU
) | fpu
;
1446 /* -mvis2 implies -mvis */
1448 target_flags
|= MASK_VIS
;
1450 /* -mvis3 implies -mvis2 and -mvis */
1452 target_flags
|= MASK_VIS2
| MASK_VIS
;
1454 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is
1457 target_flags
&= ~(MASK_VIS
| MASK_VIS2
| MASK_VIS3
| MASK_FMAF
);
1459 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1461 -m64 also implies v9. */
1462 if (TARGET_VIS
|| TARGET_ARCH64
)
1464 target_flags
|= MASK_V9
;
1465 target_flags
&= ~(MASK_V8
| MASK_SPARCLET
| MASK_SPARCLITE
);
1468 /* -mvis also implies -mv8plus on 32-bit */
1469 if (TARGET_VIS
&& ! TARGET_ARCH64
)
1470 target_flags
|= MASK_V8PLUS
;
1472 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1473 if (TARGET_V9
&& TARGET_ARCH32
)
1474 target_flags
|= MASK_DEPRECATED_V8_INSNS
;
1476 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1477 if (! TARGET_V9
|| TARGET_ARCH64
)
1478 target_flags
&= ~MASK_V8PLUS
;
1480 /* Don't use stack biasing in 32 bit mode. */
1482 target_flags
&= ~MASK_STACK_BIAS
;
1484 /* Supply a default value for align_functions. */
1485 if (align_functions
== 0
1486 && (sparc_cpu
== PROCESSOR_ULTRASPARC
1487 || sparc_cpu
== PROCESSOR_ULTRASPARC3
1488 || sparc_cpu
== PROCESSOR_NIAGARA
1489 || sparc_cpu
== PROCESSOR_NIAGARA2
1490 || sparc_cpu
== PROCESSOR_NIAGARA3
1491 || sparc_cpu
== PROCESSOR_NIAGARA4
))
1492 align_functions
= 32;
1494 /* Validate PCC_STRUCT_RETURN. */
1495 if (flag_pcc_struct_return
== DEFAULT_PCC_STRUCT_RETURN
)
1496 flag_pcc_struct_return
= (TARGET_ARCH64
? 0 : 1);
1498 /* Only use .uaxword when compiling for a 64-bit target. */
1500 targetm
.asm_out
.unaligned_op
.di
= NULL
;
1502 /* Do various machine dependent initializations. */
1503 sparc_init_modes ();
1505 /* Set up function hooks. */
1506 init_machine_status
= sparc_init_machine_status
;
1511 case PROCESSOR_CYPRESS
:
1512 sparc_costs
= &cypress_costs
;
1515 case PROCESSOR_SPARCLITE
:
1516 case PROCESSOR_SUPERSPARC
:
1517 sparc_costs
= &supersparc_costs
;
1519 case PROCESSOR_F930
:
1520 case PROCESSOR_F934
:
1521 case PROCESSOR_HYPERSPARC
:
1522 case PROCESSOR_SPARCLITE86X
:
1523 sparc_costs
= &hypersparc_costs
;
1525 case PROCESSOR_LEON
:
1526 sparc_costs
= &leon_costs
;
1528 case PROCESSOR_LEON3
:
1529 sparc_costs
= &leon3_costs
;
1531 case PROCESSOR_SPARCLET
:
1532 case PROCESSOR_TSC701
:
1533 sparc_costs
= &sparclet_costs
;
1536 case PROCESSOR_ULTRASPARC
:
1537 sparc_costs
= &ultrasparc_costs
;
1539 case PROCESSOR_ULTRASPARC3
:
1540 sparc_costs
= &ultrasparc3_costs
;
1542 case PROCESSOR_NIAGARA
:
1543 sparc_costs
= &niagara_costs
;
1545 case PROCESSOR_NIAGARA2
:
1546 sparc_costs
= &niagara2_costs
;
1548 case PROCESSOR_NIAGARA3
:
1549 sparc_costs
= &niagara3_costs
;
1551 case PROCESSOR_NIAGARA4
:
1552 sparc_costs
= &niagara4_costs
;
1554 case PROCESSOR_NATIVE
:
1558 if (sparc_memory_model
== SMM_DEFAULT
)
1560 /* Choose the memory model for the operating system. */
1561 enum sparc_memory_model_type os_default
= SUBTARGET_DEFAULT_MEMORY_MODEL
;
1562 if (os_default
!= SMM_DEFAULT
)
1563 sparc_memory_model
= os_default
;
1564 /* Choose the most relaxed model for the processor. */
1566 sparc_memory_model
= SMM_RMO
;
1567 else if (TARGET_LEON3
)
1568 sparc_memory_model
= SMM_TSO
;
1569 else if (TARGET_LEON
)
1570 sparc_memory_model
= SMM_SC
;
1572 sparc_memory_model
= SMM_PSO
;
1574 sparc_memory_model
= SMM_SC
;
1577 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1578 if (!(target_flags_explicit
& MASK_LONG_DOUBLE_128
))
1579 target_flags
|= MASK_LONG_DOUBLE_128
;
1582 if (TARGET_DEBUG_OPTIONS
)
1583 dump_target_flags ("Final target_flags", target_flags
);
1585 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
1586 ((sparc_cpu
== PROCESSOR_ULTRASPARC
1587 || sparc_cpu
== PROCESSOR_NIAGARA
1588 || sparc_cpu
== PROCESSOR_NIAGARA2
1589 || sparc_cpu
== PROCESSOR_NIAGARA3
1590 || sparc_cpu
== PROCESSOR_NIAGARA4
)
1592 : (sparc_cpu
== PROCESSOR_ULTRASPARC3
1594 global_options
.x_param_values
,
1595 global_options_set
.x_param_values
);
1596 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
1597 ((sparc_cpu
== PROCESSOR_ULTRASPARC
1598 || sparc_cpu
== PROCESSOR_ULTRASPARC3
1599 || sparc_cpu
== PROCESSOR_NIAGARA
1600 || sparc_cpu
== PROCESSOR_NIAGARA2
1601 || sparc_cpu
== PROCESSOR_NIAGARA3
1602 || sparc_cpu
== PROCESSOR_NIAGARA4
)
1604 global_options
.x_param_values
,
1605 global_options_set
.x_param_values
);
1607 /* Disable save slot sharing for call-clobbered registers by default.
1608 The IRA sharing algorithm works on single registers only and this
1609 pessimizes for double floating-point registers. */
1610 if (!global_options_set
.x_flag_ira_share_save_slots
)
1611 flag_ira_share_save_slots
= 0;
1613 /* We register a machine specific pass to work around errata, if any.
1614 The pass mut be scheduled as late as possible so that we have the
1615 (essentially) final form of the insn stream to work on.
1616 Registering the pass must be done at start up. It's convenient to
1618 opt_pass
*errata_pass
= make_pass_work_around_errata (g
);
1619 struct register_pass_info insert_pass_work_around_errata
=
1621 errata_pass
, /* pass */
1622 "dbr", /* reference_pass_name */
1623 1, /* ref_pass_instance_number */
1624 PASS_POS_INSERT_AFTER
/* po_op */
1626 register_pass (&insert_pass_work_around_errata
);
1629 /* Miscellaneous utilities. */
1631 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1632 or branch on register contents instructions. */
1635 v9_regcmp_p (enum rtx_code code
)
1637 return (code
== EQ
|| code
== NE
|| code
== GE
|| code
== LT
1638 || code
== LE
|| code
== GT
);
1641 /* Nonzero if OP is a floating point constant which can
1642 be loaded into an integer register using a single
1643 sethi instruction. */
1648 if (GET_CODE (op
) == CONST_DOUBLE
)
1653 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
1654 REAL_VALUE_TO_TARGET_SINGLE (r
, i
);
1655 return !SPARC_SIMM13_P (i
) && SPARC_SETHI_P (i
);
1661 /* Nonzero if OP is a floating point constant which can
1662 be loaded into an integer register using a single
1668 if (GET_CODE (op
) == CONST_DOUBLE
)
1673 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
1674 REAL_VALUE_TO_TARGET_SINGLE (r
, i
);
1675 return SPARC_SIMM13_P (i
);
1681 /* Nonzero if OP is a floating point constant which can
1682 be loaded into an integer register using a high/losum
1683 instruction sequence. */
1686 fp_high_losum_p (rtx op
)
1688 /* The constraints calling this should only be in
1689 SFmode move insns, so any constant which cannot
1690 be moved using a single insn will do. */
1691 if (GET_CODE (op
) == CONST_DOUBLE
)
1696 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
1697 REAL_VALUE_TO_TARGET_SINGLE (r
, i
);
1698 return !SPARC_SIMM13_P (i
) && !SPARC_SETHI_P (i
);
1704 /* Return true if the address of LABEL can be loaded by means of the
1705 mov{si,di}_pic_label_ref patterns in PIC mode. */
1708 can_use_mov_pic_label_ref (rtx label
)
1710 /* VxWorks does not impose a fixed gap between segments; the run-time
1711 gap can be different from the object-file gap. We therefore can't
1712 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1713 are absolutely sure that X is in the same segment as the GOT.
1714 Unfortunately, the flexibility of linker scripts means that we
1715 can't be sure of that in general, so assume that GOT-relative
1716 accesses are never valid on VxWorks. */
1717 if (TARGET_VXWORKS_RTP
)
1720 /* Similarly, if the label is non-local, it might end up being placed
1721 in a different section than the current one; now mov_pic_label_ref
1722 requires the label and the code to be in the same section. */
1723 if (LABEL_REF_NONLOCAL_P (label
))
1726 /* Finally, if we are reordering basic blocks and partition into hot
1727 and cold sections, this might happen for any label. */
1728 if (flag_reorder_blocks_and_partition
)
1734 /* Expand a move instruction. Return true if all work is done. */
1737 sparc_expand_move (enum machine_mode mode
, rtx
*operands
)
1739 /* Handle sets of MEM first. */
1740 if (GET_CODE (operands
[0]) == MEM
)
1742 /* 0 is a register (or a pair of registers) on SPARC. */
1743 if (register_or_zero_operand (operands
[1], mode
))
1746 if (!reload_in_progress
)
1748 operands
[0] = validize_mem (operands
[0]);
1749 operands
[1] = force_reg (mode
, operands
[1]);
1753 /* Fixup TLS cases. */
1755 && CONSTANT_P (operands
[1])
1756 && sparc_tls_referenced_p (operands
[1]))
1758 operands
[1] = sparc_legitimize_tls_address (operands
[1]);
1762 /* Fixup PIC cases. */
1763 if (flag_pic
&& CONSTANT_P (operands
[1]))
1765 if (pic_address_needs_scratch (operands
[1]))
1766 operands
[1] = sparc_legitimize_pic_address (operands
[1], NULL_RTX
);
1768 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1769 if (GET_CODE (operands
[1]) == LABEL_REF
1770 && can_use_mov_pic_label_ref (operands
[1]))
1774 emit_insn (gen_movsi_pic_label_ref (operands
[0], operands
[1]));
1780 gcc_assert (TARGET_ARCH64
);
1781 emit_insn (gen_movdi_pic_label_ref (operands
[0], operands
[1]));
1786 if (symbolic_operand (operands
[1], mode
))
1789 = sparc_legitimize_pic_address (operands
[1],
1791 ? operands
[0] : NULL_RTX
);
1796 /* If we are trying to toss an integer constant into FP registers,
1797 or loading a FP or vector constant, force it into memory. */
1798 if (CONSTANT_P (operands
[1])
1799 && REG_P (operands
[0])
1800 && (SPARC_FP_REG_P (REGNO (operands
[0]))
1801 || SCALAR_FLOAT_MODE_P (mode
)
1802 || VECTOR_MODE_P (mode
)))
1804 /* emit_group_store will send such bogosity to us when it is
1805 not storing directly into memory. So fix this up to avoid
1806 crashes in output_constant_pool. */
1807 if (operands
[1] == const0_rtx
)
1808 operands
[1] = CONST0_RTX (mode
);
1810 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1811 always other regs. */
1812 if ((TARGET_VIS
|| REGNO (operands
[0]) < SPARC_FIRST_FP_REG
)
1813 && (const_zero_operand (operands
[1], mode
)
1814 || const_all_ones_operand (operands
[1], mode
)))
1817 if (REGNO (operands
[0]) < SPARC_FIRST_FP_REG
1818 /* We are able to build any SF constant in integer registers
1819 with at most 2 instructions. */
1821 /* And any DF constant in integer registers. */
1823 && ! can_create_pseudo_p ())))
1826 operands
[1] = force_const_mem (mode
, operands
[1]);
1827 if (!reload_in_progress
)
1828 operands
[1] = validize_mem (operands
[1]);
1832 /* Accept non-constants and valid constants unmodified. */
1833 if (!CONSTANT_P (operands
[1])
1834 || GET_CODE (operands
[1]) == HIGH
1835 || input_operand (operands
[1], mode
))
1841 /* All QImode constants require only one insn, so proceed. */
1846 sparc_emit_set_const32 (operands
[0], operands
[1]);
1850 /* input_operand should have filtered out 32-bit mode. */
1851 sparc_emit_set_const64 (operands
[0], operands
[1]);
1857 /* TImode isn't available in 32-bit mode. */
1858 split_double (operands
[1], &high
, &low
);
1859 emit_insn (gen_movdi (operand_subword (operands
[0], 0, 0, TImode
),
1861 emit_insn (gen_movdi (operand_subword (operands
[0], 1, 0, TImode
),
1873 /* Load OP1, a 32-bit constant, into OP0, a register.
1874 We know it can't be done in one insn when we get
1875 here, the move expander guarantees this. */
1878 sparc_emit_set_const32 (rtx op0
, rtx op1
)
1880 enum machine_mode mode
= GET_MODE (op0
);
1883 if (can_create_pseudo_p ())
1884 temp
= gen_reg_rtx (mode
);
1886 if (GET_CODE (op1
) == CONST_INT
)
1888 gcc_assert (!small_int_operand (op1
, mode
)
1889 && !const_high_operand (op1
, mode
));
1891 /* Emit them as real moves instead of a HIGH/LO_SUM,
1892 this way CSE can see everything and reuse intermediate
1893 values if it wants. */
1894 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
1895 GEN_INT (INTVAL (op1
)
1896 & ~(HOST_WIDE_INT
)0x3ff)));
1898 emit_insn (gen_rtx_SET (VOIDmode
,
1900 gen_rtx_IOR (mode
, temp
,
1901 GEN_INT (INTVAL (op1
) & 0x3ff))));
1905 /* A symbol, emit in the traditional way. */
1906 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
1907 gen_rtx_HIGH (mode
, op1
)));
1908 emit_insn (gen_rtx_SET (VOIDmode
,
1909 op0
, gen_rtx_LO_SUM (mode
, temp
, op1
)));
1913 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1914 If TEMP is nonzero, we are forbidden to use any other scratch
1915 registers. Otherwise, we are allowed to generate them as needed.
1917 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1918 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1921 sparc_emit_set_symbolic_const64 (rtx op0
, rtx op1
, rtx temp
)
1923 rtx temp1
, temp2
, temp3
, temp4
, temp5
;
1926 if (temp
&& GET_MODE (temp
) == TImode
)
1929 temp
= gen_rtx_REG (DImode
, REGNO (temp
));
1932 /* SPARC-V9 code-model support. */
1933 switch (sparc_cmodel
)
1936 /* The range spanned by all instructions in the object is less
1937 than 2^31 bytes (2GB) and the distance from any instruction
1938 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1939 than 2^31 bytes (2GB).
1941 The executable must be in the low 4TB of the virtual address
1944 sethi %hi(symbol), %temp1
1945 or %temp1, %lo(symbol), %reg */
1947 temp1
= temp
; /* op0 is allowed. */
1949 temp1
= gen_reg_rtx (DImode
);
1951 emit_insn (gen_rtx_SET (VOIDmode
, temp1
, gen_rtx_HIGH (DImode
, op1
)));
1952 emit_insn (gen_rtx_SET (VOIDmode
, op0
, gen_rtx_LO_SUM (DImode
, temp1
, op1
)));
1956 /* The range spanned by all instructions in the object is less
1957 than 2^31 bytes (2GB) and the distance from any instruction
1958 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1959 than 2^31 bytes (2GB).
1961 The executable must be in the low 16TB of the virtual address
1964 sethi %h44(symbol), %temp1
1965 or %temp1, %m44(symbol), %temp2
1966 sllx %temp2, 12, %temp3
1967 or %temp3, %l44(symbol), %reg */
1972 temp3
= temp
; /* op0 is allowed. */
1976 temp1
= gen_reg_rtx (DImode
);
1977 temp2
= gen_reg_rtx (DImode
);
1978 temp3
= gen_reg_rtx (DImode
);
1981 emit_insn (gen_seth44 (temp1
, op1
));
1982 emit_insn (gen_setm44 (temp2
, temp1
, op1
));
1983 emit_insn (gen_rtx_SET (VOIDmode
, temp3
,
1984 gen_rtx_ASHIFT (DImode
, temp2
, GEN_INT (12))));
1985 emit_insn (gen_setl44 (op0
, temp3
, op1
));
1989 /* The range spanned by all instructions in the object is less
1990 than 2^31 bytes (2GB) and the distance from any instruction
1991 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1992 than 2^31 bytes (2GB).
1994 The executable can be placed anywhere in the virtual address
1997 sethi %hh(symbol), %temp1
1998 sethi %lm(symbol), %temp2
1999 or %temp1, %hm(symbol), %temp3
2000 sllx %temp3, 32, %temp4
2001 or %temp4, %temp2, %temp5
2002 or %temp5, %lo(symbol), %reg */
2005 /* It is possible that one of the registers we got for operands[2]
2006 might coincide with that of operands[0] (which is why we made
2007 it TImode). Pick the other one to use as our scratch. */
2008 if (rtx_equal_p (temp
, op0
))
2010 gcc_assert (ti_temp
);
2011 temp
= gen_rtx_REG (DImode
, REGNO (temp
) + 1);
2014 temp2
= temp
; /* op0 is _not_ allowed, see above. */
2021 temp1
= gen_reg_rtx (DImode
);
2022 temp2
= gen_reg_rtx (DImode
);
2023 temp3
= gen_reg_rtx (DImode
);
2024 temp4
= gen_reg_rtx (DImode
);
2025 temp5
= gen_reg_rtx (DImode
);
2028 emit_insn (gen_sethh (temp1
, op1
));
2029 emit_insn (gen_setlm (temp2
, op1
));
2030 emit_insn (gen_sethm (temp3
, temp1
, op1
));
2031 emit_insn (gen_rtx_SET (VOIDmode
, temp4
,
2032 gen_rtx_ASHIFT (DImode
, temp3
, GEN_INT (32))));
2033 emit_insn (gen_rtx_SET (VOIDmode
, temp5
,
2034 gen_rtx_PLUS (DImode
, temp4
, temp2
)));
2035 emit_insn (gen_setlo (op0
, temp5
, op1
));
2039 /* Old old old backwards compatibility kruft here.
2040 Essentially it is MEDLOW with a fixed 64-bit
2041 virtual base added to all data segment addresses.
2042 Text-segment stuff is computed like MEDANY, we can't
2043 reuse the code above because the relocation knobs
2046 Data segment: sethi %hi(symbol), %temp1
2047 add %temp1, EMBMEDANY_BASE_REG, %temp2
2048 or %temp2, %lo(symbol), %reg */
2049 if (data_segment_operand (op1
, GET_MODE (op1
)))
2053 temp1
= temp
; /* op0 is allowed. */
2058 temp1
= gen_reg_rtx (DImode
);
2059 temp2
= gen_reg_rtx (DImode
);
2062 emit_insn (gen_embmedany_sethi (temp1
, op1
));
2063 emit_insn (gen_embmedany_brsum (temp2
, temp1
));
2064 emit_insn (gen_embmedany_losum (op0
, temp2
, op1
));
2067 /* Text segment: sethi %uhi(symbol), %temp1
2068 sethi %hi(symbol), %temp2
2069 or %temp1, %ulo(symbol), %temp3
2070 sllx %temp3, 32, %temp4
2071 or %temp4, %temp2, %temp5
2072 or %temp5, %lo(symbol), %reg */
2077 /* It is possible that one of the registers we got for operands[2]
2078 might coincide with that of operands[0] (which is why we made
2079 it TImode). Pick the other one to use as our scratch. */
2080 if (rtx_equal_p (temp
, op0
))
2082 gcc_assert (ti_temp
);
2083 temp
= gen_rtx_REG (DImode
, REGNO (temp
) + 1);
2086 temp2
= temp
; /* op0 is _not_ allowed, see above. */
2093 temp1
= gen_reg_rtx (DImode
);
2094 temp2
= gen_reg_rtx (DImode
);
2095 temp3
= gen_reg_rtx (DImode
);
2096 temp4
= gen_reg_rtx (DImode
);
2097 temp5
= gen_reg_rtx (DImode
);
2100 emit_insn (gen_embmedany_textuhi (temp1
, op1
));
2101 emit_insn (gen_embmedany_texthi (temp2
, op1
));
2102 emit_insn (gen_embmedany_textulo (temp3
, temp1
, op1
));
2103 emit_insn (gen_rtx_SET (VOIDmode
, temp4
,
2104 gen_rtx_ASHIFT (DImode
, temp3
, GEN_INT (32))));
2105 emit_insn (gen_rtx_SET (VOIDmode
, temp5
,
2106 gen_rtx_PLUS (DImode
, temp4
, temp2
)));
2107 emit_insn (gen_embmedany_textlo (op0
, temp5
, op1
));
2116 #if HOST_BITS_PER_WIDE_INT == 32
2118 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED
, rtx op1 ATTRIBUTE_UNUSED
)
2123 /* These avoid problems when cross compiling. If we do not
2124 go through all this hair then the optimizer will see
2125 invalid REG_EQUAL notes or in some cases none at all. */
2126 static rtx
gen_safe_HIGH64 (rtx
, HOST_WIDE_INT
);
2127 static rtx
gen_safe_SET64 (rtx
, HOST_WIDE_INT
);
2128 static rtx
gen_safe_OR64 (rtx
, HOST_WIDE_INT
);
2129 static rtx
gen_safe_XOR64 (rtx
, HOST_WIDE_INT
);
2131 /* The optimizer is not to assume anything about exactly
2132 which bits are set for a HIGH, they are unspecified.
2133 Unfortunately this leads to many missed optimizations
2134 during CSE. We mask out the non-HIGH bits, and matches
2135 a plain movdi, to alleviate this problem. */
2137 gen_safe_HIGH64 (rtx dest
, HOST_WIDE_INT val
)
2139 return gen_rtx_SET (VOIDmode
, dest
, GEN_INT (val
& ~(HOST_WIDE_INT
)0x3ff));
2143 gen_safe_SET64 (rtx dest
, HOST_WIDE_INT val
)
2145 return gen_rtx_SET (VOIDmode
, dest
, GEN_INT (val
));
2149 gen_safe_OR64 (rtx src
, HOST_WIDE_INT val
)
2151 return gen_rtx_IOR (DImode
, src
, GEN_INT (val
));
2155 gen_safe_XOR64 (rtx src
, HOST_WIDE_INT val
)
2157 return gen_rtx_XOR (DImode
, src
, GEN_INT (val
));
2160 /* Worker routines for 64-bit constant formation on arch64.
2161 One of the key things to be doing in these emissions is
2162 to create as many temp REGs as possible. This makes it
2163 possible for half-built constants to be used later when
2164 such values are similar to something required later on.
2165 Without doing this, the optimizer cannot see such
2168 static void sparc_emit_set_const64_quick1 (rtx
, rtx
,
2169 unsigned HOST_WIDE_INT
, int);
2172 sparc_emit_set_const64_quick1 (rtx op0
, rtx temp
,
2173 unsigned HOST_WIDE_INT low_bits
, int is_neg
)
2175 unsigned HOST_WIDE_INT high_bits
;
2178 high_bits
= (~low_bits
) & 0xffffffff;
2180 high_bits
= low_bits
;
2182 emit_insn (gen_safe_HIGH64 (temp
, high_bits
));
2185 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2186 gen_safe_OR64 (temp
, (high_bits
& 0x3ff))));
2190 /* If we are XOR'ing with -1, then we should emit a one's complement
2191 instead. This way the combiner will notice logical operations
2192 such as ANDN later on and substitute. */
2193 if ((low_bits
& 0x3ff) == 0x3ff)
2195 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2196 gen_rtx_NOT (DImode
, temp
)));
2200 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2201 gen_safe_XOR64 (temp
,
2202 (-(HOST_WIDE_INT
)0x400
2203 | (low_bits
& 0x3ff)))));
2208 static void sparc_emit_set_const64_quick2 (rtx
, rtx
, unsigned HOST_WIDE_INT
,
2209 unsigned HOST_WIDE_INT
, int);
2212 sparc_emit_set_const64_quick2 (rtx op0
, rtx temp
,
2213 unsigned HOST_WIDE_INT high_bits
,
2214 unsigned HOST_WIDE_INT low_immediate
,
2219 if ((high_bits
& 0xfffffc00) != 0)
2221 emit_insn (gen_safe_HIGH64 (temp
, high_bits
));
2222 if ((high_bits
& ~0xfffffc00) != 0)
2223 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2224 gen_safe_OR64 (temp
, (high_bits
& 0x3ff))));
2230 emit_insn (gen_safe_SET64 (temp
, high_bits
));
2234 /* Now shift it up into place. */
2235 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2236 gen_rtx_ASHIFT (DImode
, temp2
,
2237 GEN_INT (shift_count
))));
2239 /* If there is a low immediate part piece, finish up by
2240 putting that in as well. */
2241 if (low_immediate
!= 0)
2242 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2243 gen_safe_OR64 (op0
, low_immediate
)));
2246 static void sparc_emit_set_const64_longway (rtx
, rtx
, unsigned HOST_WIDE_INT
,
2247 unsigned HOST_WIDE_INT
);
2249 /* Full 64-bit constant decomposition. Even though this is the
2250 'worst' case, we still optimize a few things away. */
2252 sparc_emit_set_const64_longway (rtx op0
, rtx temp
,
2253 unsigned HOST_WIDE_INT high_bits
,
2254 unsigned HOST_WIDE_INT low_bits
)
2258 if (can_create_pseudo_p ())
2259 sub_temp
= gen_reg_rtx (DImode
);
2261 if ((high_bits
& 0xfffffc00) != 0)
2263 emit_insn (gen_safe_HIGH64 (temp
, high_bits
));
2264 if ((high_bits
& ~0xfffffc00) != 0)
2265 emit_insn (gen_rtx_SET (VOIDmode
,
2267 gen_safe_OR64 (temp
, (high_bits
& 0x3ff))));
2273 emit_insn (gen_safe_SET64 (temp
, high_bits
));
2277 if (can_create_pseudo_p ())
2279 rtx temp2
= gen_reg_rtx (DImode
);
2280 rtx temp3
= gen_reg_rtx (DImode
);
2281 rtx temp4
= gen_reg_rtx (DImode
);
2283 emit_insn (gen_rtx_SET (VOIDmode
, temp4
,
2284 gen_rtx_ASHIFT (DImode
, sub_temp
,
2287 emit_insn (gen_safe_HIGH64 (temp2
, low_bits
));
2288 if ((low_bits
& ~0xfffffc00) != 0)
2290 emit_insn (gen_rtx_SET (VOIDmode
, temp3
,
2291 gen_safe_OR64 (temp2
, (low_bits
& 0x3ff))));
2292 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2293 gen_rtx_PLUS (DImode
, temp4
, temp3
)));
2297 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2298 gen_rtx_PLUS (DImode
, temp4
, temp2
)));
2303 rtx low1
= GEN_INT ((low_bits
>> (32 - 12)) & 0xfff);
2304 rtx low2
= GEN_INT ((low_bits
>> (32 - 12 - 12)) & 0xfff);
2305 rtx low3
= GEN_INT ((low_bits
>> (32 - 12 - 12 - 8)) & 0x0ff);
2308 /* We are in the middle of reload, so this is really
2309 painful. However we do still make an attempt to
2310 avoid emitting truly stupid code. */
2311 if (low1
!= const0_rtx
)
2313 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2314 gen_rtx_ASHIFT (DImode
, sub_temp
,
2315 GEN_INT (to_shift
))));
2316 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2317 gen_rtx_IOR (DImode
, op0
, low1
)));
2325 if (low2
!= const0_rtx
)
2327 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2328 gen_rtx_ASHIFT (DImode
, sub_temp
,
2329 GEN_INT (to_shift
))));
2330 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2331 gen_rtx_IOR (DImode
, op0
, low2
)));
2339 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2340 gen_rtx_ASHIFT (DImode
, sub_temp
,
2341 GEN_INT (to_shift
))));
2342 if (low3
!= const0_rtx
)
2343 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2344 gen_rtx_IOR (DImode
, op0
, low3
)));
2349 /* Analyze a 64-bit constant for certain properties. */
2350 static void analyze_64bit_constant (unsigned HOST_WIDE_INT
,
2351 unsigned HOST_WIDE_INT
,
2352 int *, int *, int *);
2355 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits
,
2356 unsigned HOST_WIDE_INT low_bits
,
2357 int *hbsp
, int *lbsp
, int *abbasp
)
2359 int lowest_bit_set
, highest_bit_set
, all_bits_between_are_set
;
2362 lowest_bit_set
= highest_bit_set
= -1;
2366 if ((lowest_bit_set
== -1)
2367 && ((low_bits
>> i
) & 1))
2369 if ((highest_bit_set
== -1)
2370 && ((high_bits
>> (32 - i
- 1)) & 1))
2371 highest_bit_set
= (64 - i
- 1);
2374 && ((highest_bit_set
== -1)
2375 || (lowest_bit_set
== -1)));
2381 if ((lowest_bit_set
== -1)
2382 && ((high_bits
>> i
) & 1))
2383 lowest_bit_set
= i
+ 32;
2384 if ((highest_bit_set
== -1)
2385 && ((low_bits
>> (32 - i
- 1)) & 1))
2386 highest_bit_set
= 32 - i
- 1;
2389 && ((highest_bit_set
== -1)
2390 || (lowest_bit_set
== -1)));
2392 /* If there are no bits set this should have gone out
2393 as one instruction! */
2394 gcc_assert (lowest_bit_set
!= -1 && highest_bit_set
!= -1);
2395 all_bits_between_are_set
= 1;
2396 for (i
= lowest_bit_set
; i
<= highest_bit_set
; i
++)
2400 if ((low_bits
& (1 << i
)) != 0)
2405 if ((high_bits
& (1 << (i
- 32))) != 0)
2408 all_bits_between_are_set
= 0;
2411 *hbsp
= highest_bit_set
;
2412 *lbsp
= lowest_bit_set
;
2413 *abbasp
= all_bits_between_are_set
;
2416 static int const64_is_2insns (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
);
2419 const64_is_2insns (unsigned HOST_WIDE_INT high_bits
,
2420 unsigned HOST_WIDE_INT low_bits
)
2422 int highest_bit_set
, lowest_bit_set
, all_bits_between_are_set
;
2425 || high_bits
== 0xffffffff)
2428 analyze_64bit_constant (high_bits
, low_bits
,
2429 &highest_bit_set
, &lowest_bit_set
,
2430 &all_bits_between_are_set
);
2432 if ((highest_bit_set
== 63
2433 || lowest_bit_set
== 0)
2434 && all_bits_between_are_set
!= 0)
2437 if ((highest_bit_set
- lowest_bit_set
) < 21)
2443 static unsigned HOST_WIDE_INT
create_simple_focus_bits (unsigned HOST_WIDE_INT
,
2444 unsigned HOST_WIDE_INT
,
2447 static unsigned HOST_WIDE_INT
2448 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits
,
2449 unsigned HOST_WIDE_INT low_bits
,
2450 int lowest_bit_set
, int shift
)
2452 HOST_WIDE_INT hi
, lo
;
2454 if (lowest_bit_set
< 32)
2456 lo
= (low_bits
>> lowest_bit_set
) << shift
;
2457 hi
= ((high_bits
<< (32 - lowest_bit_set
)) << shift
);
2462 hi
= ((high_bits
>> (lowest_bit_set
- 32)) << shift
);
2464 gcc_assert (! (hi
& lo
));
2468 /* Here we are sure to be arch64 and this is an integer constant
2469 being loaded into a register. Emit the most efficient
2470 insn sequence possible. Detection of all the 1-insn cases
2471 has been done already. */
2473 sparc_emit_set_const64 (rtx op0
, rtx op1
)
2475 unsigned HOST_WIDE_INT high_bits
, low_bits
;
2476 int lowest_bit_set
, highest_bit_set
;
2477 int all_bits_between_are_set
;
2480 /* Sanity check that we know what we are working with. */
2481 gcc_assert (TARGET_ARCH64
2482 && (GET_CODE (op0
) == SUBREG
2483 || (REG_P (op0
) && ! SPARC_FP_REG_P (REGNO (op0
)))));
2485 if (! can_create_pseudo_p ())
2488 if (GET_CODE (op1
) != CONST_INT
)
2490 sparc_emit_set_symbolic_const64 (op0
, op1
, temp
);
2495 temp
= gen_reg_rtx (DImode
);
2497 high_bits
= ((INTVAL (op1
) >> 32) & 0xffffffff);
2498 low_bits
= (INTVAL (op1
) & 0xffffffff);
2500 /* low_bits bits 0 --> 31
2501 high_bits bits 32 --> 63 */
2503 analyze_64bit_constant (high_bits
, low_bits
,
2504 &highest_bit_set
, &lowest_bit_set
,
2505 &all_bits_between_are_set
);
2507 /* First try for a 2-insn sequence. */
2509 /* These situations are preferred because the optimizer can
2510 * do more things with them:
2512 * sllx %reg, shift, %reg
2514 * srlx %reg, shift, %reg
2515 * 3) mov some_small_const, %reg
2516 * sllx %reg, shift, %reg
2518 if (((highest_bit_set
== 63
2519 || lowest_bit_set
== 0)
2520 && all_bits_between_are_set
!= 0)
2521 || ((highest_bit_set
- lowest_bit_set
) < 12))
2523 HOST_WIDE_INT the_const
= -1;
2524 int shift
= lowest_bit_set
;
2526 if ((highest_bit_set
!= 63
2527 && lowest_bit_set
!= 0)
2528 || all_bits_between_are_set
== 0)
2531 create_simple_focus_bits (high_bits
, low_bits
,
2534 else if (lowest_bit_set
== 0)
2535 shift
= -(63 - highest_bit_set
);
2537 gcc_assert (SPARC_SIMM13_P (the_const
));
2538 gcc_assert (shift
!= 0);
2540 emit_insn (gen_safe_SET64 (temp
, the_const
));
2542 emit_insn (gen_rtx_SET (VOIDmode
,
2544 gen_rtx_ASHIFT (DImode
,
2548 emit_insn (gen_rtx_SET (VOIDmode
,
2550 gen_rtx_LSHIFTRT (DImode
,
2552 GEN_INT (-shift
))));
2556 /* Now a range of 22 or less bits set somewhere.
2557 * 1) sethi %hi(focus_bits), %reg
2558 * sllx %reg, shift, %reg
2559 * 2) sethi %hi(focus_bits), %reg
2560 * srlx %reg, shift, %reg
2562 if ((highest_bit_set
- lowest_bit_set
) < 21)
2564 unsigned HOST_WIDE_INT focus_bits
=
2565 create_simple_focus_bits (high_bits
, low_bits
,
2566 lowest_bit_set
, 10);
2568 gcc_assert (SPARC_SETHI_P (focus_bits
));
2569 gcc_assert (lowest_bit_set
!= 10);
2571 emit_insn (gen_safe_HIGH64 (temp
, focus_bits
));
2573 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2574 if (lowest_bit_set
< 10)
2575 emit_insn (gen_rtx_SET (VOIDmode
,
2577 gen_rtx_LSHIFTRT (DImode
, temp
,
2578 GEN_INT (10 - lowest_bit_set
))));
2579 else if (lowest_bit_set
> 10)
2580 emit_insn (gen_rtx_SET (VOIDmode
,
2582 gen_rtx_ASHIFT (DImode
, temp
,
2583 GEN_INT (lowest_bit_set
- 10))));
2587 /* 1) sethi %hi(low_bits), %reg
2588 * or %reg, %lo(low_bits), %reg
2589 * 2) sethi %hi(~low_bits), %reg
2590 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2593 || high_bits
== 0xffffffff)
2595 sparc_emit_set_const64_quick1 (op0
, temp
, low_bits
,
2596 (high_bits
== 0xffffffff));
2600 /* Now, try 3-insn sequences. */
2602 /* 1) sethi %hi(high_bits), %reg
2603 * or %reg, %lo(high_bits), %reg
2604 * sllx %reg, 32, %reg
2608 sparc_emit_set_const64_quick2 (op0
, temp
, high_bits
, 0, 32);
2612 /* We may be able to do something quick
2613 when the constant is negated, so try that. */
2614 if (const64_is_2insns ((~high_bits
) & 0xffffffff,
2615 (~low_bits
) & 0xfffffc00))
2617 /* NOTE: The trailing bits get XOR'd so we need the
2618 non-negated bits, not the negated ones. */
2619 unsigned HOST_WIDE_INT trailing_bits
= low_bits
& 0x3ff;
2621 if ((((~high_bits
) & 0xffffffff) == 0
2622 && ((~low_bits
) & 0x80000000) == 0)
2623 || (((~high_bits
) & 0xffffffff) == 0xffffffff
2624 && ((~low_bits
) & 0x80000000) != 0))
2626 unsigned HOST_WIDE_INT fast_int
= (~low_bits
& 0xffffffff);
2628 if ((SPARC_SETHI_P (fast_int
)
2629 && (~high_bits
& 0xffffffff) == 0)
2630 || SPARC_SIMM13_P (fast_int
))
2631 emit_insn (gen_safe_SET64 (temp
, fast_int
));
2633 sparc_emit_set_const64 (temp
, GEN_INT (fast_int
));
2638 negated_const
= GEN_INT (((~low_bits
) & 0xfffffc00) |
2639 (((HOST_WIDE_INT
)((~high_bits
) & 0xffffffff))<<32));
2640 sparc_emit_set_const64 (temp
, negated_const
);
2643 /* If we are XOR'ing with -1, then we should emit a one's complement
2644 instead. This way the combiner will notice logical operations
2645 such as ANDN later on and substitute. */
2646 if (trailing_bits
== 0x3ff)
2648 emit_insn (gen_rtx_SET (VOIDmode
, op0
,
2649 gen_rtx_NOT (DImode
, temp
)));
2653 emit_insn (gen_rtx_SET (VOIDmode
,
2655 gen_safe_XOR64 (temp
,
2656 (-0x400 | trailing_bits
))));
2661 /* 1) sethi %hi(xxx), %reg
2662 * or %reg, %lo(xxx), %reg
2663 * sllx %reg, yyy, %reg
2665 * ??? This is just a generalized version of the low_bits==0
2666 * thing above, FIXME...
2668 if ((highest_bit_set
- lowest_bit_set
) < 32)
2670 unsigned HOST_WIDE_INT focus_bits
=
2671 create_simple_focus_bits (high_bits
, low_bits
,
2674 /* We can't get here in this state. */
2675 gcc_assert (highest_bit_set
>= 32 && lowest_bit_set
< 32);
2677 /* So what we know is that the set bits straddle the
2678 middle of the 64-bit word. */
2679 sparc_emit_set_const64_quick2 (op0
, temp
,
2685 /* 1) sethi %hi(high_bits), %reg
2686 * or %reg, %lo(high_bits), %reg
2687 * sllx %reg, 32, %reg
2688 * or %reg, low_bits, %reg
2690 if (SPARC_SIMM13_P(low_bits
)
2691 && ((int)low_bits
> 0))
2693 sparc_emit_set_const64_quick2 (op0
, temp
, high_bits
, low_bits
, 32);
2697 /* The easiest way when all else fails, is full decomposition. */
2698 sparc_emit_set_const64_longway (op0
, temp
, high_bits
, low_bits
);
2700 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2702 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2703 return the mode to be used for the comparison. For floating-point,
2704 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2705 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2706 processing is needed. */
2709 select_cc_mode (enum rtx_code op
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2711 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2737 else if (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
2738 || GET_CODE (x
) == NEG
|| GET_CODE (x
) == ASHIFT
)
2740 if (TARGET_ARCH64
&& GET_MODE (x
) == DImode
)
2741 return CCX_NOOVmode
;
2747 if (TARGET_ARCH64
&& GET_MODE (x
) == DImode
)
2754 /* Emit the compare insn and return the CC reg for a CODE comparison
2755 with operands X and Y. */
2758 gen_compare_reg_1 (enum rtx_code code
, rtx x
, rtx y
)
2760 enum machine_mode mode
;
2763 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
)
2766 mode
= SELECT_CC_MODE (code
, x
, y
);
2768 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2769 fcc regs (cse can't tell they're really call clobbered regs and will
2770 remove a duplicate comparison even if there is an intervening function
2771 call - it will then try to reload the cc reg via an int reg which is why
2772 we need the movcc patterns). It is possible to provide the movcc
2773 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2774 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2775 to tell cse that CCFPE mode registers (even pseudos) are call
2778 /* ??? This is an experiment. Rather than making changes to cse which may
2779 or may not be easy/clean, we do our own cse. This is possible because
2780 we will generate hard registers. Cse knows they're call clobbered (it
2781 doesn't know the same thing about pseudos). If we guess wrong, no big
2782 deal, but if we win, great! */
2784 if (TARGET_V9
&& GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2785 #if 1 /* experiment */
2788 /* We cycle through the registers to ensure they're all exercised. */
2789 static int next_fcc_reg
= 0;
2790 /* Previous x,y for each fcc reg. */
2791 static rtx prev_args
[4][2];
2793 /* Scan prev_args for x,y. */
2794 for (reg
= 0; reg
< 4; reg
++)
2795 if (prev_args
[reg
][0] == x
&& prev_args
[reg
][1] == y
)
2800 prev_args
[reg
][0] = x
;
2801 prev_args
[reg
][1] = y
;
2802 next_fcc_reg
= (next_fcc_reg
+ 1) & 3;
2804 cc_reg
= gen_rtx_REG (mode
, reg
+ SPARC_FIRST_V9_FCC_REG
);
2807 cc_reg
= gen_reg_rtx (mode
);
2808 #endif /* ! experiment */
2809 else if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2810 cc_reg
= gen_rtx_REG (mode
, SPARC_FCC_REG
);
2812 cc_reg
= gen_rtx_REG (mode
, SPARC_ICC_REG
);
2814 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2815 will only result in an unrecognizable insn so no point in asserting. */
2816 emit_insn (gen_rtx_SET (VOIDmode
, cc_reg
, gen_rtx_COMPARE (mode
, x
, y
)));
2822 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2825 gen_compare_reg (rtx cmp
)
2827 return gen_compare_reg_1 (GET_CODE (cmp
), XEXP (cmp
, 0), XEXP (cmp
, 1));
2830 /* This function is used for v9 only.
2831 DEST is the target of the Scc insn.
2832 CODE is the code for an Scc's comparison.
2833 X and Y are the values we compare.
2835 This function is needed to turn
2838 (gt (reg:CCX 100 %icc)
2842 (gt:DI (reg:CCX 100 %icc)
2845 IE: The instruction recognizer needs to see the mode of the comparison to
2846 find the right instruction. We could use "gt:DI" right in the
2847 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2850 gen_v9_scc (rtx dest
, enum rtx_code compare_code
, rtx x
, rtx y
)
2853 && (GET_MODE (x
) == DImode
2854 || GET_MODE (dest
) == DImode
))
2857 /* Try to use the movrCC insns. */
2859 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
2861 && v9_regcmp_p (compare_code
))
2866 /* Special case for op0 != 0. This can be done with one instruction if
2869 if (compare_code
== NE
2870 && GET_MODE (dest
) == DImode
2871 && rtx_equal_p (op0
, dest
))
2873 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
2874 gen_rtx_IF_THEN_ELSE (DImode
,
2875 gen_rtx_fmt_ee (compare_code
, DImode
,
2882 if (reg_overlap_mentioned_p (dest
, op0
))
2884 /* Handle the case where dest == x.
2885 We "early clobber" the result. */
2886 op0
= gen_reg_rtx (GET_MODE (x
));
2887 emit_move_insn (op0
, x
);
2890 emit_insn (gen_rtx_SET (VOIDmode
, dest
, const0_rtx
));
2891 if (GET_MODE (op0
) != DImode
)
2893 temp
= gen_reg_rtx (DImode
);
2894 convert_move (temp
, op0
, 0);
2898 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
2899 gen_rtx_IF_THEN_ELSE (GET_MODE (dest
),
2900 gen_rtx_fmt_ee (compare_code
, DImode
,
2908 x
= gen_compare_reg_1 (compare_code
, x
, y
);
2911 gcc_assert (GET_MODE (x
) != CC_NOOVmode
2912 && GET_MODE (x
) != CCX_NOOVmode
);
2914 emit_insn (gen_rtx_SET (VOIDmode
, dest
, const0_rtx
));
2915 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
2916 gen_rtx_IF_THEN_ELSE (GET_MODE (dest
),
2917 gen_rtx_fmt_ee (compare_code
,
2918 GET_MODE (x
), x
, y
),
2919 const1_rtx
, dest
)));
2925 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2926 without jumps using the addx/subx instructions. */
2929 emit_scc_insn (rtx operands
[])
2936 /* The quad-word fp compare library routines all return nonzero to indicate
2937 true, which is different from the equivalent libgcc routines, so we must
2938 handle them specially here. */
2939 if (GET_MODE (operands
[2]) == TFmode
&& ! TARGET_HARD_QUAD
)
2941 operands
[1] = sparc_emit_float_lib_cmp (operands
[2], operands
[3],
2942 GET_CODE (operands
[1]));
2943 operands
[2] = XEXP (operands
[1], 0);
2944 operands
[3] = XEXP (operands
[1], 1);
2947 code
= GET_CODE (operands
[1]);
2951 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2952 more applications). The exception to this is "reg != 0" which can
2953 be done in one instruction on v9 (so we do it). */
2956 if (GET_MODE (x
) == SImode
)
2960 pat
= gen_seqsidi_special (operands
[0], x
, y
);
2962 pat
= gen_seqsisi_special (operands
[0], x
, y
);
2966 else if (GET_MODE (x
) == DImode
)
2968 rtx pat
= gen_seqdi_special (operands
[0], x
, y
);
2976 if (GET_MODE (x
) == SImode
)
2980 pat
= gen_snesidi_special (operands
[0], x
, y
);
2982 pat
= gen_snesisi_special (operands
[0], x
, y
);
2986 else if (GET_MODE (x
) == DImode
)
2990 pat
= gen_snedi_special_vis3 (operands
[0], x
, y
);
2992 pat
= gen_snedi_special (operands
[0], x
, y
);
3000 && GET_MODE (x
) == DImode
3002 && (code
== GTU
|| code
== LTU
))
3003 && gen_v9_scc (operands
[0], code
, x
, y
))
3006 /* We can do LTU and GEU using the addx/subx instructions too. And
3007 for GTU/LEU, if both operands are registers swap them and fall
3008 back to the easy case. */
3009 if (code
== GTU
|| code
== LEU
)
3011 if ((GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
3012 && (GET_CODE (y
) == REG
|| GET_CODE (y
) == SUBREG
))
3017 code
= swap_condition (code
);
3022 || (!TARGET_VIS3
&& code
== GEU
))
3024 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0],
3025 gen_rtx_fmt_ee (code
, GET_MODE (operands
[0]),
3026 gen_compare_reg_1 (code
, x
, y
),
3031 /* All the posibilities to use addx/subx based sequences has been
3032 exhausted, try for a 3 instruction sequence using v9 conditional
3034 if (TARGET_V9
&& gen_v9_scc (operands
[0], code
, x
, y
))
3037 /* Nope, do branches. */
3041 /* Emit a conditional jump insn for the v9 architecture using comparison code
3042 CODE and jump target LABEL.
3043 This function exists to take advantage of the v9 brxx insns. */
3046 emit_v9_brxx_insn (enum rtx_code code
, rtx op0
, rtx label
)
3048 emit_jump_insn (gen_rtx_SET (VOIDmode
,
3050 gen_rtx_IF_THEN_ELSE (VOIDmode
,
3051 gen_rtx_fmt_ee (code
, GET_MODE (op0
),
3053 gen_rtx_LABEL_REF (VOIDmode
, label
),
3057 /* Emit a conditional jump insn for the UA2011 architecture using
3058 comparison code CODE and jump target LABEL. This function exists
3059 to take advantage of the UA2011 Compare and Branch insns. */
3062 emit_cbcond_insn (enum rtx_code code
, rtx op0
, rtx op1
, rtx label
)
3066 if_then_else
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
3067 gen_rtx_fmt_ee(code
, GET_MODE(op0
),
3069 gen_rtx_LABEL_REF (VOIDmode
, label
),
3072 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, if_then_else
));
3076 emit_conditional_branch_insn (rtx operands
[])
3078 /* The quad-word fp compare library routines all return nonzero to indicate
3079 true, which is different from the equivalent libgcc routines, so we must
3080 handle them specially here. */
3081 if (GET_MODE (operands
[1]) == TFmode
&& ! TARGET_HARD_QUAD
)
3083 operands
[0] = sparc_emit_float_lib_cmp (operands
[1], operands
[2],
3084 GET_CODE (operands
[0]));
3085 operands
[1] = XEXP (operands
[0], 0);
3086 operands
[2] = XEXP (operands
[0], 1);
3089 /* If we can tell early on that the comparison is against a constant
3090 that won't fit in the 5-bit signed immediate field of a cbcond,
3091 use one of the other v9 conditional branch sequences. */
3093 && GET_CODE (operands
[1]) == REG
3094 && (GET_MODE (operands
[1]) == SImode
3095 || (TARGET_ARCH64
&& GET_MODE (operands
[1]) == DImode
))
3096 && (GET_CODE (operands
[2]) != CONST_INT
3097 || SPARC_SIMM5_P (INTVAL (operands
[2]))))
3099 emit_cbcond_insn (GET_CODE (operands
[0]), operands
[1], operands
[2], operands
[3]);
3103 if (TARGET_ARCH64
&& operands
[2] == const0_rtx
3104 && GET_CODE (operands
[1]) == REG
3105 && GET_MODE (operands
[1]) == DImode
)
3107 emit_v9_brxx_insn (GET_CODE (operands
[0]), operands
[1], operands
[3]);
3111 operands
[1] = gen_compare_reg (operands
[0]);
3112 operands
[2] = const0_rtx
;
3113 operands
[0] = gen_rtx_fmt_ee (GET_CODE (operands
[0]), VOIDmode
,
3114 operands
[1], operands
[2]);
3115 emit_jump_insn (gen_cbranchcc4 (operands
[0], operands
[1], operands
[2],
3120 /* Generate a DFmode part of a hard TFmode register.
3121 REG is the TFmode hard register, LOW is 1 for the
3122 low 64bit of the register and 0 otherwise.
3125 gen_df_reg (rtx reg
, int low
)
3127 int regno
= REGNO (reg
);
3129 if ((WORDS_BIG_ENDIAN
== 0) ^ (low
!= 0))
3130 regno
+= (TARGET_ARCH64
&& SPARC_INT_REG_P (regno
)) ? 1 : 2;
3131 return gen_rtx_REG (DFmode
, regno
);
3134 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3135 Unlike normal calls, TFmode operands are passed by reference. It is
3136 assumed that no more than 3 operands are required. */
3139 emit_soft_tfmode_libcall (const char *func_name
, int nargs
, rtx
*operands
)
3141 rtx ret_slot
= NULL
, arg
[3], func_sym
;
3144 /* We only expect to be called for conversions, unary, and binary ops. */
3145 gcc_assert (nargs
== 2 || nargs
== 3);
3147 for (i
= 0; i
< nargs
; ++i
)
3149 rtx this_arg
= operands
[i
];
3152 /* TFmode arguments and return values are passed by reference. */
3153 if (GET_MODE (this_arg
) == TFmode
)
3155 int force_stack_temp
;
3157 force_stack_temp
= 0;
3158 if (TARGET_BUGGY_QP_LIB
&& i
== 0)
3159 force_stack_temp
= 1;
3161 if (GET_CODE (this_arg
) == MEM
3162 && ! force_stack_temp
)
3164 tree expr
= MEM_EXPR (this_arg
);
3166 mark_addressable (expr
);
3167 this_arg
= XEXP (this_arg
, 0);
3169 else if (CONSTANT_P (this_arg
)
3170 && ! force_stack_temp
)
3172 this_slot
= force_const_mem (TFmode
, this_arg
);
3173 this_arg
= XEXP (this_slot
, 0);
3177 this_slot
= assign_stack_temp (TFmode
, GET_MODE_SIZE (TFmode
));
3179 /* Operand 0 is the return value. We'll copy it out later. */
3181 emit_move_insn (this_slot
, this_arg
);
3183 ret_slot
= this_slot
;
3185 this_arg
= XEXP (this_slot
, 0);
3192 func_sym
= gen_rtx_SYMBOL_REF (Pmode
, func_name
);
3194 if (GET_MODE (operands
[0]) == TFmode
)
3197 emit_library_call (func_sym
, LCT_NORMAL
, VOIDmode
, 2,
3198 arg
[0], GET_MODE (arg
[0]),
3199 arg
[1], GET_MODE (arg
[1]));
3201 emit_library_call (func_sym
, LCT_NORMAL
, VOIDmode
, 3,
3202 arg
[0], GET_MODE (arg
[0]),
3203 arg
[1], GET_MODE (arg
[1]),
3204 arg
[2], GET_MODE (arg
[2]));
3207 emit_move_insn (operands
[0], ret_slot
);
3213 gcc_assert (nargs
== 2);
3215 ret
= emit_library_call_value (func_sym
, operands
[0], LCT_NORMAL
,
3216 GET_MODE (operands
[0]), 1,
3217 arg
[1], GET_MODE (arg
[1]));
3219 if (ret
!= operands
[0])
3220 emit_move_insn (operands
[0], ret
);
3224 /* Expand soft-float TFmode calls to sparc abi routines. */
3227 emit_soft_tfmode_binop (enum rtx_code code
, rtx
*operands
)
3249 emit_soft_tfmode_libcall (func
, 3, operands
);
3253 emit_soft_tfmode_unop (enum rtx_code code
, rtx
*operands
)
3257 gcc_assert (code
== SQRT
);
3260 emit_soft_tfmode_libcall (func
, 2, operands
);
3264 emit_soft_tfmode_cvt (enum rtx_code code
, rtx
*operands
)
3271 switch (GET_MODE (operands
[1]))
3284 case FLOAT_TRUNCATE
:
3285 switch (GET_MODE (operands
[0]))
3299 switch (GET_MODE (operands
[1]))
3304 operands
[1] = gen_rtx_SIGN_EXTEND (DImode
, operands
[1]);
3314 case UNSIGNED_FLOAT
:
3315 switch (GET_MODE (operands
[1]))
3320 operands
[1] = gen_rtx_ZERO_EXTEND (DImode
, operands
[1]);
3331 switch (GET_MODE (operands
[0]))
3345 switch (GET_MODE (operands
[0]))
3362 emit_soft_tfmode_libcall (func
, 2, operands
);
3365 /* Expand a hard-float tfmode operation. All arguments must be in
3369 emit_hard_tfmode_operation (enum rtx_code code
, rtx
*operands
)
3373 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
3375 operands
[1] = force_reg (GET_MODE (operands
[1]), operands
[1]);
3376 op
= gen_rtx_fmt_e (code
, GET_MODE (operands
[0]), operands
[1]);
3380 operands
[1] = force_reg (GET_MODE (operands
[1]), operands
[1]);
3381 operands
[2] = force_reg (GET_MODE (operands
[2]), operands
[2]);
3382 op
= gen_rtx_fmt_ee (code
, GET_MODE (operands
[0]),
3383 operands
[1], operands
[2]);
3386 if (register_operand (operands
[0], VOIDmode
))
3389 dest
= gen_reg_rtx (GET_MODE (operands
[0]));
3391 emit_insn (gen_rtx_SET (VOIDmode
, dest
, op
));
3393 if (dest
!= operands
[0])
3394 emit_move_insn (operands
[0], dest
);
3398 emit_tfmode_binop (enum rtx_code code
, rtx
*operands
)
3400 if (TARGET_HARD_QUAD
)
3401 emit_hard_tfmode_operation (code
, operands
);
3403 emit_soft_tfmode_binop (code
, operands
);
3407 emit_tfmode_unop (enum rtx_code code
, rtx
*operands
)
3409 if (TARGET_HARD_QUAD
)
3410 emit_hard_tfmode_operation (code
, operands
);
3412 emit_soft_tfmode_unop (code
, operands
);
3416 emit_tfmode_cvt (enum rtx_code code
, rtx
*operands
)
3418 if (TARGET_HARD_QUAD
)
3419 emit_hard_tfmode_operation (code
, operands
);
3421 emit_soft_tfmode_cvt (code
, operands
);
3424 /* Return nonzero if a branch/jump/call instruction will be emitting
3425 nop into its delay slot. */
3428 empty_delay_slot (rtx insn
)
3432 /* If no previous instruction (should not happen), return true. */
3433 if (PREV_INSN (insn
) == NULL
)
3436 seq
= NEXT_INSN (PREV_INSN (insn
));
3437 if (GET_CODE (PATTERN (seq
)) == SEQUENCE
)
3443 /* Return nonzero if we should emit a nop after a cbcond instruction.
3444 The cbcond instruction does not have a delay slot, however there is
3445 a severe performance penalty if a control transfer appears right
3446 after a cbcond. Therefore we emit a nop when we detect this
3450 emit_cbcond_nop (rtx insn
)
3452 rtx next
= next_active_insn (insn
);
3457 if (NONJUMP_INSN_P (next
)
3458 && GET_CODE (PATTERN (next
)) == SEQUENCE
)
3459 next
= XVECEXP (PATTERN (next
), 0, 0);
3460 else if (CALL_P (next
)
3461 && GET_CODE (PATTERN (next
)) == PARALLEL
)
3463 rtx delay
= XVECEXP (PATTERN (next
), 0, 1);
3465 if (GET_CODE (delay
) == RETURN
)
3467 /* It's a sibling call. Do not emit the nop if we're going
3468 to emit something other than the jump itself as the first
3469 instruction of the sibcall sequence. */
3470 if (sparc_leaf_function_p
|| TARGET_FLAT
)
3475 if (NONJUMP_INSN_P (next
))
3481 /* Return nonzero if TRIAL can go into the call delay slot. */
3484 eligible_for_call_delay (rtx trial
)
3488 if (get_attr_in_branch_delay (trial
) == IN_BRANCH_DELAY_FALSE
)
3492 call __tls_get_addr, %tgd_call (foo)
3493 add %l7, %o0, %o0, %tgd_add (foo)
3494 while Sun as/ld does not. */
3495 if (TARGET_GNU_TLS
|| !TARGET_TLS
)
3498 pat
= PATTERN (trial
);
3500 /* We must reject tgd_add{32|64}, i.e.
3501 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3502 and tldm_add{32|64}, i.e.
3503 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3505 if (GET_CODE (pat
) == SET
3506 && GET_CODE (SET_SRC (pat
)) == PLUS
)
3508 rtx unspec
= XEXP (SET_SRC (pat
), 1);
3510 if (GET_CODE (unspec
) == UNSPEC
3511 && (XINT (unspec
, 1) == UNSPEC_TLSGD
3512 || XINT (unspec
, 1) == UNSPEC_TLSLDM
))
3519 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3520 instruction. RETURN_P is true if the v9 variant 'return' is to be
3521 considered in the test too.
3523 TRIAL must be a SET whose destination is a REG appropriate for the
3524 'restore' instruction or, if RETURN_P is true, for the 'return'
3528 eligible_for_restore_insn (rtx trial
, bool return_p
)
3530 rtx pat
= PATTERN (trial
);
3531 rtx src
= SET_SRC (pat
);
3532 bool src_is_freg
= false;
3535 /* Since we now can do moves between float and integer registers when
3536 VIS3 is enabled, we have to catch this case. We can allow such
3537 moves when doing a 'return' however. */
3539 if (GET_CODE (src_reg
) == SUBREG
)
3540 src_reg
= SUBREG_REG (src_reg
);
3541 if (GET_CODE (src_reg
) == REG
3542 && SPARC_FP_REG_P (REGNO (src_reg
)))
3545 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3546 if (GET_MODE_CLASS (GET_MODE (src
)) != MODE_FLOAT
3547 && arith_operand (src
, GET_MODE (src
))
3551 return GET_MODE_SIZE (GET_MODE (src
)) <= GET_MODE_SIZE (DImode
);
3553 return GET_MODE_SIZE (GET_MODE (src
)) <= GET_MODE_SIZE (SImode
);
3556 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3557 else if (GET_MODE_CLASS (GET_MODE (src
)) != MODE_FLOAT
3558 && arith_double_operand (src
, GET_MODE (src
))
3560 return GET_MODE_SIZE (GET_MODE (src
)) <= GET_MODE_SIZE (DImode
);
3562 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3563 else if (! TARGET_FPU
&& register_operand (src
, SFmode
))
3566 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3567 else if (! TARGET_FPU
&& TARGET_ARCH64
&& register_operand (src
, DFmode
))
3570 /* If we have the 'return' instruction, anything that does not use
3571 local or output registers and can go into a delay slot wins. */
3572 else if (return_p
&& TARGET_V9
&& !epilogue_renumber (&pat
, 1))
3575 /* The 'restore src1,src2,dest' pattern for SImode. */
3576 else if (GET_CODE (src
) == PLUS
3577 && register_operand (XEXP (src
, 0), SImode
)
3578 && arith_operand (XEXP (src
, 1), SImode
))
3581 /* The 'restore src1,src2,dest' pattern for DImode. */
3582 else if (GET_CODE (src
) == PLUS
3583 && register_operand (XEXP (src
, 0), DImode
)
3584 && arith_double_operand (XEXP (src
, 1), DImode
))
3587 /* The 'restore src1,%lo(src2),dest' pattern. */
3588 else if (GET_CODE (src
) == LO_SUM
3589 && ! TARGET_CM_MEDMID
3590 && ((register_operand (XEXP (src
, 0), SImode
)
3591 && immediate_operand (XEXP (src
, 1), SImode
))
3593 && register_operand (XEXP (src
, 0), DImode
)
3594 && immediate_operand (XEXP (src
, 1), DImode
))))
3597 /* The 'restore src,src,dest' pattern. */
3598 else if (GET_CODE (src
) == ASHIFT
3599 && (register_operand (XEXP (src
, 0), SImode
)
3600 || register_operand (XEXP (src
, 0), DImode
))
3601 && XEXP (src
, 1) == const1_rtx
)
3607 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3610 eligible_for_return_delay (rtx trial
)
3615 /* If the function uses __builtin_eh_return, the eh_return machinery
3616 occupies the delay slot. */
3617 if (crtl
->calls_eh_return
)
3620 if (get_attr_in_branch_delay (trial
) == IN_BRANCH_DELAY_FALSE
)
3623 /* In the case of a leaf or flat function, anything can go into the slot. */
3624 if (sparc_leaf_function_p
|| TARGET_FLAT
)
3627 if (!NONJUMP_INSN_P (trial
))
3630 pat
= PATTERN (trial
);
3631 if (GET_CODE (pat
) == PARALLEL
)
3637 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
3639 rtx expr
= XVECEXP (pat
, 0, i
);
3640 if (GET_CODE (expr
) != SET
)
3642 if (GET_CODE (SET_DEST (expr
)) != REG
)
3644 regno
= REGNO (SET_DEST (expr
));
3645 if (regno
>= 8 && regno
< 24)
3648 return !epilogue_renumber (&pat
, 1);
3651 if (GET_CODE (pat
) != SET
)
3654 if (GET_CODE (SET_DEST (pat
)) != REG
)
3657 regno
= REGNO (SET_DEST (pat
));
3659 /* Otherwise, only operations which can be done in tandem with
3660 a `restore' or `return' insn can go into the delay slot. */
3661 if (regno
>= 8 && regno
< 24)
3664 /* If this instruction sets up floating point register and we have a return
3665 instruction, it can probably go in. But restore will not work
3667 if (! SPARC_INT_REG_P (regno
))
3668 return TARGET_V9
&& !epilogue_renumber (&pat
, 1);
3670 return eligible_for_restore_insn (trial
, true);
3673 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3676 eligible_for_sibcall_delay (rtx trial
)
3680 if (get_attr_in_branch_delay (trial
) == IN_BRANCH_DELAY_FALSE
)
3683 if (!NONJUMP_INSN_P (trial
))
3686 pat
= PATTERN (trial
);
3688 if (sparc_leaf_function_p
|| TARGET_FLAT
)
3690 /* If the tail call is done using the call instruction,
3691 we have to restore %o7 in the delay slot. */
3692 if (LEAF_SIBCALL_SLOT_RESERVED_P
)
3695 /* %g1 is used to build the function address */
3696 if (reg_mentioned_p (gen_rtx_REG (Pmode
, 1), pat
))
3702 if (GET_CODE (pat
) != SET
)
3705 /* Otherwise, only operations which can be done in tandem with
3706 a `restore' insn can go into the delay slot. */
3707 if (GET_CODE (SET_DEST (pat
)) != REG
3708 || (REGNO (SET_DEST (pat
)) >= 8 && REGNO (SET_DEST (pat
)) < 24)
3709 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat
))))
3712 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3714 if (reg_mentioned_p (gen_rtx_REG (Pmode
, 15), pat
))
3717 return eligible_for_restore_insn (trial
, false);
3720 /* Determine if it's legal to put X into the constant pool. This
3721 is not possible if X contains the address of a symbol that is
3722 not constant (TLS) or not known at final link time (PIC). */
3725 sparc_cannot_force_const_mem (enum machine_mode mode
, rtx x
)
3727 switch (GET_CODE (x
))
3732 /* Accept all non-symbolic constants. */
3736 /* Labels are OK iff we are non-PIC. */
3737 return flag_pic
!= 0;
3740 /* 'Naked' TLS symbol references are never OK,
3741 non-TLS symbols are OK iff we are non-PIC. */
3742 if (SYMBOL_REF_TLS_MODEL (x
))
3745 return flag_pic
!= 0;
3748 return sparc_cannot_force_const_mem (mode
, XEXP (x
, 0));
3751 return sparc_cannot_force_const_mem (mode
, XEXP (x
, 0))
3752 || sparc_cannot_force_const_mem (mode
, XEXP (x
, 1));
3760 /* Global Offset Table support. */
3761 static GTY(()) rtx got_helper_rtx
= NULL_RTX
;
3762 static GTY(()) rtx global_offset_table_rtx
= NULL_RTX
;
3764 /* Return the SYMBOL_REF for the Global Offset Table. */
3766 static GTY(()) rtx sparc_got_symbol
= NULL_RTX
;
3771 if (!sparc_got_symbol
)
3772 sparc_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
3774 return sparc_got_symbol
;
3777 /* Ensure that we are not using patterns that are not OK with PIC. */
3787 op
= recog_data
.operand
[i
];
3788 gcc_assert (GET_CODE (op
) != SYMBOL_REF
3789 && (GET_CODE (op
) != CONST
3790 || (GET_CODE (XEXP (op
, 0)) == MINUS
3791 && XEXP (XEXP (op
, 0), 0) == sparc_got ()
3792 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == CONST
)));
3799 /* Return true if X is an address which needs a temporary register when
3800 reloaded while generating PIC code. */
3803 pic_address_needs_scratch (rtx x
)
3805 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3806 if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == PLUS
3807 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
3808 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3809 && ! SMALL_INT (XEXP (XEXP (x
, 0), 1)))
3815 /* Determine if a given RTX is a valid constant. We already know this
3816 satisfies CONSTANT_P. */
3819 sparc_legitimate_constant_p (enum machine_mode mode
, rtx x
)
3821 switch (GET_CODE (x
))
3825 if (sparc_tls_referenced_p (x
))
3830 if (GET_MODE (x
) == VOIDmode
)
3833 /* Floating point constants are generally not ok.
3834 The only exception is 0.0 and all-ones in VIS. */
3836 && SCALAR_FLOAT_MODE_P (mode
)
3837 && (const_zero_operand (x
, mode
)
3838 || const_all_ones_operand (x
, mode
)))
3844 /* Vector constants are generally not ok.
3845 The only exception is 0 or -1 in VIS. */
3847 && (const_zero_operand (x
, mode
)
3848 || const_all_ones_operand (x
, mode
)))
3860 /* Determine if a given RTX is a valid constant address. */
3863 constant_address_p (rtx x
)
3865 switch (GET_CODE (x
))
3873 if (flag_pic
&& pic_address_needs_scratch (x
))
3875 return sparc_legitimate_constant_p (Pmode
, x
);
3878 return !flag_pic
&& sparc_legitimate_constant_p (Pmode
, x
);
3885 /* Nonzero if the constant value X is a legitimate general operand
3886 when generating PIC code. It is given that flag_pic is on and
3887 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3890 legitimate_pic_operand_p (rtx x
)
3892 if (pic_address_needs_scratch (x
))
3894 if (sparc_tls_referenced_p (x
))
3899 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3901 && INTVAL (X) >= -0x1000 \
3902 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3904 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3906 && INTVAL (X) >= -0x1000 \
3907 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3909 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3911 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3912 ordinarily. This changes a bit when generating PIC. */
3915 sparc_legitimate_address_p (enum machine_mode mode
, rtx addr
, bool strict
)
3917 rtx rs1
= NULL
, rs2
= NULL
, imm1
= NULL
;
3919 if (REG_P (addr
) || GET_CODE (addr
) == SUBREG
)
3921 else if (GET_CODE (addr
) == PLUS
)
3923 rs1
= XEXP (addr
, 0);
3924 rs2
= XEXP (addr
, 1);
3926 /* Canonicalize. REG comes first, if there are no regs,
3927 LO_SUM comes first. */
3929 && GET_CODE (rs1
) != SUBREG
3931 || GET_CODE (rs2
) == SUBREG
3932 || (GET_CODE (rs2
) == LO_SUM
&& GET_CODE (rs1
) != LO_SUM
)))
3934 rs1
= XEXP (addr
, 1);
3935 rs2
= XEXP (addr
, 0);
3939 && rs1
== pic_offset_table_rtx
3941 && GET_CODE (rs2
) != SUBREG
3942 && GET_CODE (rs2
) != LO_SUM
3943 && GET_CODE (rs2
) != MEM
3944 && !(GET_CODE (rs2
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (rs2
))
3945 && (! symbolic_operand (rs2
, VOIDmode
) || mode
== Pmode
)
3946 && (GET_CODE (rs2
) != CONST_INT
|| SMALL_INT (rs2
)))
3948 || GET_CODE (rs1
) == SUBREG
)
3949 && RTX_OK_FOR_OFFSET_P (rs2
, mode
)))
3954 else if ((REG_P (rs1
) || GET_CODE (rs1
) == SUBREG
)
3955 && (REG_P (rs2
) || GET_CODE (rs2
) == SUBREG
))
3957 /* We prohibit REG + REG for TFmode when there are no quad move insns
3958 and we consequently need to split. We do this because REG+REG
3959 is not an offsettable address. If we get the situation in reload
3960 where source and destination of a movtf pattern are both MEMs with
3961 REG+REG address, then only one of them gets converted to an
3962 offsettable address. */
3964 && ! (TARGET_ARCH64
&& TARGET_HARD_QUAD
))
3967 /* Likewise for TImode, but in all cases. */
3971 /* We prohibit REG + REG on ARCH32 if not optimizing for
3972 DFmode/DImode because then mem_min_alignment is likely to be zero
3973 after reload and the forced split would lack a matching splitter
3975 if (TARGET_ARCH32
&& !optimize
3976 && (mode
== DFmode
|| mode
== DImode
))
3979 else if (USE_AS_OFFSETABLE_LO10
3980 && GET_CODE (rs1
) == LO_SUM
3982 && ! TARGET_CM_MEDMID
3983 && RTX_OK_FOR_OLO10_P (rs2
, mode
))
3986 imm1
= XEXP (rs1
, 1);
3987 rs1
= XEXP (rs1
, 0);
3988 if (!CONSTANT_P (imm1
)
3989 || (GET_CODE (rs1
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (rs1
)))
3993 else if (GET_CODE (addr
) == LO_SUM
)
3995 rs1
= XEXP (addr
, 0);
3996 imm1
= XEXP (addr
, 1);
3998 if (!CONSTANT_P (imm1
)
3999 || (GET_CODE (rs1
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (rs1
)))
4002 /* We can't allow TFmode in 32-bit mode, because an offset greater
4003 than the alignment (8) may cause the LO_SUM to overflow. */
4004 if (mode
== TFmode
&& TARGET_ARCH32
)
4007 else if (GET_CODE (addr
) == CONST_INT
&& SMALL_INT (addr
))
4012 if (GET_CODE (rs1
) == SUBREG
)
4013 rs1
= SUBREG_REG (rs1
);
4019 if (GET_CODE (rs2
) == SUBREG
)
4020 rs2
= SUBREG_REG (rs2
);
4027 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1
))
4028 || (rs2
&& !REGNO_OK_FOR_BASE_P (REGNO (rs2
))))
4033 if ((! SPARC_INT_REG_P (REGNO (rs1
))
4034 && REGNO (rs1
) != FRAME_POINTER_REGNUM
4035 && REGNO (rs1
) < FIRST_PSEUDO_REGISTER
)
4037 && (! SPARC_INT_REG_P (REGNO (rs2
))
4038 && REGNO (rs2
) != FRAME_POINTER_REGNUM
4039 && REGNO (rs2
) < FIRST_PSEUDO_REGISTER
)))
4045 /* Return the SYMBOL_REF for the tls_get_addr function. */
4047 static GTY(()) rtx sparc_tls_symbol
= NULL_RTX
;
4050 sparc_tls_get_addr (void)
4052 if (!sparc_tls_symbol
)
4053 sparc_tls_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "__tls_get_addr");
4055 return sparc_tls_symbol
;
4058 /* Return the Global Offset Table to be used in TLS mode. */
4061 sparc_tls_got (void)
4063 /* In PIC mode, this is just the PIC offset table. */
4066 crtl
->uses_pic_offset_table
= 1;
4067 return pic_offset_table_rtx
;
4070 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
4071 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
4072 if (TARGET_SUN_TLS
&& TARGET_ARCH32
)
4074 load_got_register ();
4075 return global_offset_table_rtx
;
4078 /* In all other cases, we load a new pseudo with the GOT symbol. */
4079 return copy_to_reg (sparc_got ());
4082 /* Return true if X contains a thread-local symbol. */
4085 sparc_tls_referenced_p (rtx x
)
4087 if (!TARGET_HAVE_TLS
)
4090 if (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == PLUS
)
4091 x
= XEXP (XEXP (x
, 0), 0);
4093 if (GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
))
4096 /* That's all we handle in sparc_legitimize_tls_address for now. */
4100 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4101 this (thread-local) address. */
4104 sparc_legitimize_tls_address (rtx addr
)
4106 rtx temp1
, temp2
, temp3
, ret
, o0
, got
, insn
;
4108 gcc_assert (can_create_pseudo_p ());
4110 if (GET_CODE (addr
) == SYMBOL_REF
)
4111 switch (SYMBOL_REF_TLS_MODEL (addr
))
4113 case TLS_MODEL_GLOBAL_DYNAMIC
:
4115 temp1
= gen_reg_rtx (SImode
);
4116 temp2
= gen_reg_rtx (SImode
);
4117 ret
= gen_reg_rtx (Pmode
);
4118 o0
= gen_rtx_REG (Pmode
, 8);
4119 got
= sparc_tls_got ();
4120 emit_insn (gen_tgd_hi22 (temp1
, addr
));
4121 emit_insn (gen_tgd_lo10 (temp2
, temp1
, addr
));
4124 emit_insn (gen_tgd_add32 (o0
, got
, temp2
, addr
));
4125 insn
= emit_call_insn (gen_tgd_call32 (o0
, sparc_tls_get_addr (),
4130 emit_insn (gen_tgd_add64 (o0
, got
, temp2
, addr
));
4131 insn
= emit_call_insn (gen_tgd_call64 (o0
, sparc_tls_get_addr (),
4134 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), o0
);
4135 insn
= get_insns ();
4137 emit_libcall_block (insn
, ret
, o0
, addr
);
4140 case TLS_MODEL_LOCAL_DYNAMIC
:
4142 temp1
= gen_reg_rtx (SImode
);
4143 temp2
= gen_reg_rtx (SImode
);
4144 temp3
= gen_reg_rtx (Pmode
);
4145 ret
= gen_reg_rtx (Pmode
);
4146 o0
= gen_rtx_REG (Pmode
, 8);
4147 got
= sparc_tls_got ();
4148 emit_insn (gen_tldm_hi22 (temp1
));
4149 emit_insn (gen_tldm_lo10 (temp2
, temp1
));
4152 emit_insn (gen_tldm_add32 (o0
, got
, temp2
));
4153 insn
= emit_call_insn (gen_tldm_call32 (o0
, sparc_tls_get_addr (),
4158 emit_insn (gen_tldm_add64 (o0
, got
, temp2
));
4159 insn
= emit_call_insn (gen_tldm_call64 (o0
, sparc_tls_get_addr (),
4162 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), o0
);
4163 insn
= get_insns ();
4165 emit_libcall_block (insn
, temp3
, o0
,
4166 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
4167 UNSPEC_TLSLD_BASE
));
4168 temp1
= gen_reg_rtx (SImode
);
4169 temp2
= gen_reg_rtx (SImode
);
4170 emit_insn (gen_tldo_hix22 (temp1
, addr
));
4171 emit_insn (gen_tldo_lox10 (temp2
, temp1
, addr
));
4173 emit_insn (gen_tldo_add32 (ret
, temp3
, temp2
, addr
));
4175 emit_insn (gen_tldo_add64 (ret
, temp3
, temp2
, addr
));
4178 case TLS_MODEL_INITIAL_EXEC
:
4179 temp1
= gen_reg_rtx (SImode
);
4180 temp2
= gen_reg_rtx (SImode
);
4181 temp3
= gen_reg_rtx (Pmode
);
4182 got
= sparc_tls_got ();
4183 emit_insn (gen_tie_hi22 (temp1
, addr
));
4184 emit_insn (gen_tie_lo10 (temp2
, temp1
, addr
));
4186 emit_insn (gen_tie_ld32 (temp3
, got
, temp2
, addr
));
4188 emit_insn (gen_tie_ld64 (temp3
, got
, temp2
, addr
));
4191 ret
= gen_reg_rtx (Pmode
);
4193 emit_insn (gen_tie_add32 (ret
, gen_rtx_REG (Pmode
, 7),
4196 emit_insn (gen_tie_add64 (ret
, gen_rtx_REG (Pmode
, 7),
4200 ret
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, 7), temp3
);
4203 case TLS_MODEL_LOCAL_EXEC
:
4204 temp1
= gen_reg_rtx (Pmode
);
4205 temp2
= gen_reg_rtx (Pmode
);
4208 emit_insn (gen_tle_hix22_sp32 (temp1
, addr
));
4209 emit_insn (gen_tle_lox10_sp32 (temp2
, temp1
, addr
));
4213 emit_insn (gen_tle_hix22_sp64 (temp1
, addr
));
4214 emit_insn (gen_tle_lox10_sp64 (temp2
, temp1
, addr
));
4216 ret
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, 7), temp2
);
4223 else if (GET_CODE (addr
) == CONST
)
4227 gcc_assert (GET_CODE (XEXP (addr
, 0)) == PLUS
);
4229 base
= sparc_legitimize_tls_address (XEXP (XEXP (addr
, 0), 0));
4230 offset
= XEXP (XEXP (addr
, 0), 1);
4232 base
= force_operand (base
, NULL_RTX
);
4233 if (!(GET_CODE (offset
) == CONST_INT
&& SMALL_INT (offset
)))
4234 offset
= force_reg (Pmode
, offset
);
4235 ret
= gen_rtx_PLUS (Pmode
, base
, offset
);
4239 gcc_unreachable (); /* for now ... */
4244 /* Legitimize PIC addresses. If the address is already position-independent,
4245 we return ORIG. Newly generated position-independent addresses go into a
4246 reg. This is REG if nonzero, otherwise we allocate register(s) as
4250 sparc_legitimize_pic_address (rtx orig
, rtx reg
)
4252 bool gotdata_op
= false;
4254 if (GET_CODE (orig
) == SYMBOL_REF
4255 /* See the comment in sparc_expand_move. */
4256 || (GET_CODE (orig
) == LABEL_REF
&& !can_use_mov_pic_label_ref (orig
)))
4258 rtx pic_ref
, address
;
4263 gcc_assert (can_create_pseudo_p ());
4264 reg
= gen_reg_rtx (Pmode
);
4269 /* If not during reload, allocate another temp reg here for loading
4270 in the address, so that these instructions can be optimized
4272 rtx temp_reg
= (! can_create_pseudo_p ()
4273 ? reg
: gen_reg_rtx (Pmode
));
4275 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4276 won't get confused into thinking that these two instructions
4277 are loading in the true address of the symbol. If in the
4278 future a PIC rtx exists, that should be used instead. */
4281 emit_insn (gen_movdi_high_pic (temp_reg
, orig
));
4282 emit_insn (gen_movdi_lo_sum_pic (temp_reg
, temp_reg
, orig
));
4286 emit_insn (gen_movsi_high_pic (temp_reg
, orig
));
4287 emit_insn (gen_movsi_lo_sum_pic (temp_reg
, temp_reg
, orig
));
4295 crtl
->uses_pic_offset_table
= 1;
4299 insn
= emit_insn (gen_movdi_pic_gotdata_op (reg
,
4300 pic_offset_table_rtx
,
4303 insn
= emit_insn (gen_movsi_pic_gotdata_op (reg
,
4304 pic_offset_table_rtx
,
4310 = gen_const_mem (Pmode
,
4311 gen_rtx_PLUS (Pmode
,
4312 pic_offset_table_rtx
, address
));
4313 insn
= emit_move_insn (reg
, pic_ref
);
4316 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4318 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
4321 else if (GET_CODE (orig
) == CONST
)
4325 if (GET_CODE (XEXP (orig
, 0)) == PLUS
4326 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
4331 gcc_assert (can_create_pseudo_p ());
4332 reg
= gen_reg_rtx (Pmode
);
4335 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
4336 base
= sparc_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), reg
);
4337 offset
= sparc_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
4338 base
== reg
? NULL_RTX
: reg
);
4340 if (GET_CODE (offset
) == CONST_INT
)
4342 if (SMALL_INT (offset
))
4343 return plus_constant (Pmode
, base
, INTVAL (offset
));
4344 else if (can_create_pseudo_p ())
4345 offset
= force_reg (Pmode
, offset
);
4347 /* If we reach here, then something is seriously wrong. */
4350 return gen_rtx_PLUS (Pmode
, base
, offset
);
4352 else if (GET_CODE (orig
) == LABEL_REF
)
4353 /* ??? We ought to be checking that the register is live instead, in case
4354 it is eliminated. */
4355 crtl
->uses_pic_offset_table
= 1;
4360 /* Try machine-dependent ways of modifying an illegitimate address X
4361 to be legitimate. If we find one, return the new, valid address.
4363 OLDX is the address as it was before break_out_memory_refs was called.
4364 In some cases it is useful to look at this to decide what needs to be done.
4366 MODE is the mode of the operand pointed to by X.
4368 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4371 sparc_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
4372 enum machine_mode mode
)
4376 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == MULT
)
4377 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 1),
4378 force_operand (XEXP (x
, 0), NULL_RTX
));
4379 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == MULT
)
4380 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
4381 force_operand (XEXP (x
, 1), NULL_RTX
));
4382 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 0)) == PLUS
)
4383 x
= gen_rtx_PLUS (Pmode
, force_operand (XEXP (x
, 0), NULL_RTX
),
4385 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == PLUS
)
4386 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
4387 force_operand (XEXP (x
, 1), NULL_RTX
));
4389 if (x
!= orig_x
&& sparc_legitimate_address_p (mode
, x
, FALSE
))
4392 if (sparc_tls_referenced_p (x
))
4393 x
= sparc_legitimize_tls_address (x
);
4395 x
= sparc_legitimize_pic_address (x
, NULL_RTX
);
4396 else if (GET_CODE (x
) == PLUS
&& CONSTANT_ADDRESS_P (XEXP (x
, 1)))
4397 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
4398 copy_to_mode_reg (Pmode
, XEXP (x
, 1)));
4399 else if (GET_CODE (x
) == PLUS
&& CONSTANT_ADDRESS_P (XEXP (x
, 0)))
4400 x
= gen_rtx_PLUS (Pmode
, XEXP (x
, 1),
4401 copy_to_mode_reg (Pmode
, XEXP (x
, 0)));
4402 else if (GET_CODE (x
) == SYMBOL_REF
4403 || GET_CODE (x
) == CONST
4404 || GET_CODE (x
) == LABEL_REF
)
4405 x
= copy_to_suggested_reg (x
, NULL_RTX
, Pmode
);
4410 /* Delegitimize an address that was legitimized by the above function. */
4413 sparc_delegitimize_address (rtx x
)
4415 x
= delegitimize_mem_from_attrs (x
);
4417 if (GET_CODE (x
) == LO_SUM
&& GET_CODE (XEXP (x
, 1)) == UNSPEC
)
4418 switch (XINT (XEXP (x
, 1), 1))
4420 case UNSPEC_MOVE_PIC
:
4422 x
= XVECEXP (XEXP (x
, 1), 0, 0);
4423 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
4429 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
4430 if (GET_CODE (x
) == MINUS
4431 && REG_P (XEXP (x
, 0))
4432 && REGNO (XEXP (x
, 0)) == PIC_OFFSET_TABLE_REGNUM
4433 && GET_CODE (XEXP (x
, 1)) == LO_SUM
4434 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == UNSPEC
4435 && XINT (XEXP (XEXP (x
, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL
)
4437 x
= XVECEXP (XEXP (XEXP (x
, 1), 1), 0, 0);
4438 gcc_assert (GET_CODE (x
) == LABEL_REF
);
4444 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4445 replace the input X, or the original X if no replacement is called for.
4446 The output parameter *WIN is 1 if the calling macro should goto WIN,
4449 For SPARC, we wish to handle addresses by splitting them into
4450 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
4451 This cuts the number of extra insns by one.
4453 Do nothing when generating PIC code and the address is a symbolic
4454 operand or requires a scratch register. */
4457 sparc_legitimize_reload_address (rtx x
, enum machine_mode mode
,
4458 int opnum
, int type
,
4459 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
4461 /* Decompose SImode constants into HIGH+LO_SUM. */
4463 && (mode
!= TFmode
|| TARGET_ARCH64
)
4464 && GET_MODE (x
) == SImode
4465 && GET_CODE (x
) != LO_SUM
4466 && GET_CODE (x
) != HIGH
4467 && sparc_cmodel
<= CM_MEDLOW
4469 && (symbolic_operand (x
, Pmode
) || pic_address_needs_scratch (x
))))
4471 x
= gen_rtx_LO_SUM (GET_MODE (x
), gen_rtx_HIGH (GET_MODE (x
), x
), x
);
4472 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4473 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4474 opnum
, (enum reload_type
)type
);
4479 /* We have to recognize what we have already generated above. */
4480 if (GET_CODE (x
) == LO_SUM
&& GET_CODE (XEXP (x
, 0)) == HIGH
)
4482 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4483 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4484 opnum
, (enum reload_type
)type
);
4493 /* Return true if ADDR (a legitimate address expression)
4494 has an effect that depends on the machine mode it is used for.
4500 is not equivalent to
4502 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
4504 because [%l7+a+1] is interpreted as the address of (a+1). */
4508 sparc_mode_dependent_address_p (const_rtx addr
,
4509 addr_space_t as ATTRIBUTE_UNUSED
)
4511 if (flag_pic
&& GET_CODE (addr
) == PLUS
)
4513 rtx op0
= XEXP (addr
, 0);
4514 rtx op1
= XEXP (addr
, 1);
4515 if (op0
== pic_offset_table_rtx
4516 && symbolic_operand (op1
, VOIDmode
))
4523 #ifdef HAVE_GAS_HIDDEN
4524 # define USE_HIDDEN_LINKONCE 1
4526 # define USE_HIDDEN_LINKONCE 0
4530 get_pc_thunk_name (char name
[32], unsigned int regno
)
4532 const char *reg_name
= reg_names
[regno
];
4534 /* Skip the leading '%' as that cannot be used in a
4538 if (USE_HIDDEN_LINKONCE
)
4539 sprintf (name
, "__sparc_get_pc_thunk.%s", reg_name
);
4541 ASM_GENERATE_INTERNAL_LABEL (name
, "LADDPC", regno
);
4544 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4547 gen_load_pcrel_sym (rtx op0
, rtx op1
, rtx op2
, rtx op3
)
4549 int orig_flag_pic
= flag_pic
;
4552 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4555 insn
= gen_load_pcrel_symdi (op0
, op1
, op2
, op3
);
4557 insn
= gen_load_pcrel_symsi (op0
, op1
, op2
, op3
);
4558 flag_pic
= orig_flag_pic
;
4563 /* Emit code to load the GOT register. */
4566 load_got_register (void)
4568 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4569 if (!global_offset_table_rtx
)
4570 global_offset_table_rtx
= gen_rtx_REG (Pmode
, GLOBAL_OFFSET_TABLE_REGNUM
);
4572 if (TARGET_VXWORKS_RTP
)
4573 emit_insn (gen_vxworks_load_got ());
4576 /* The GOT symbol is subject to a PC-relative relocation so we need a
4577 helper function to add the PC value and thus get the final value. */
4578 if (!got_helper_rtx
)
4581 get_pc_thunk_name (name
, GLOBAL_OFFSET_TABLE_REGNUM
);
4582 got_helper_rtx
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
4585 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx
, sparc_got (),
4587 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM
)));
4590 /* Need to emit this whether or not we obey regdecls,
4591 since setjmp/longjmp can cause life info to screw up.
4592 ??? In the case where we don't obey regdecls, this is not sufficient
4593 since we may not fall out the bottom. */
4594 emit_use (global_offset_table_rtx
);
4597 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4598 address of the call target. */
4601 sparc_emit_call_insn (rtx pat
, rtx addr
)
4605 insn
= emit_call_insn (pat
);
4607 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4608 if (TARGET_VXWORKS_RTP
4610 && GET_CODE (addr
) == SYMBOL_REF
4611 && (SYMBOL_REF_DECL (addr
)
4612 ? !targetm
.binds_local_p (SYMBOL_REF_DECL (addr
))
4613 : !SYMBOL_REF_LOCAL_P (addr
)))
4615 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
4616 crtl
->uses_pic_offset_table
= 1;
4620 /* Return 1 if RTX is a MEM which is known to be aligned to at
4621 least a DESIRED byte boundary. */
4624 mem_min_alignment (rtx mem
, int desired
)
4626 rtx addr
, base
, offset
;
4628 /* If it's not a MEM we can't accept it. */
4629 if (GET_CODE (mem
) != MEM
)
4633 if (!TARGET_UNALIGNED_DOUBLES
4634 && MEM_ALIGN (mem
) / BITS_PER_UNIT
>= (unsigned)desired
)
4637 /* ??? The rest of the function predates MEM_ALIGN so
4638 there is probably a bit of redundancy. */
4639 addr
= XEXP (mem
, 0);
4640 base
= offset
= NULL_RTX
;
4641 if (GET_CODE (addr
) == PLUS
)
4643 if (GET_CODE (XEXP (addr
, 0)) == REG
)
4645 base
= XEXP (addr
, 0);
4647 /* What we are saying here is that if the base
4648 REG is aligned properly, the compiler will make
4649 sure any REG based index upon it will be so
4651 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
4652 offset
= XEXP (addr
, 1);
4654 offset
= const0_rtx
;
4657 else if (GET_CODE (addr
) == REG
)
4660 offset
= const0_rtx
;
4663 if (base
!= NULL_RTX
)
4665 int regno
= REGNO (base
);
4667 if (regno
!= HARD_FRAME_POINTER_REGNUM
&& regno
!= STACK_POINTER_REGNUM
)
4669 /* Check if the compiler has recorded some information
4670 about the alignment of the base REG. If reload has
4671 completed, we already matched with proper alignments.
4672 If not running global_alloc, reload might give us
4673 unaligned pointer to local stack though. */
4675 && REGNO_POINTER_ALIGN (regno
) >= desired
* BITS_PER_UNIT
)
4676 || (optimize
&& reload_completed
))
4677 && (INTVAL (offset
) & (desired
- 1)) == 0)
4682 if (((INTVAL (offset
) - SPARC_STACK_BIAS
) & (desired
- 1)) == 0)
4686 else if (! TARGET_UNALIGNED_DOUBLES
4687 || CONSTANT_P (addr
)
4688 || GET_CODE (addr
) == LO_SUM
)
4690 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4691 is true, in which case we can only assume that an access is aligned if
4692 it is to a constant address, or the address involves a LO_SUM. */
4696 /* An obviously unaligned address. */
4701 /* Vectors to keep interesting information about registers where it can easily
4702 be got. We used to use the actual mode value as the bit number, but there
4703 are more than 32 modes now. Instead we use two tables: one indexed by
4704 hard register number, and one indexed by mode. */
4706 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4707 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4708 mapped into one sparc_mode_class mode. */
4710 enum sparc_mode_class
{
4711 H_MODE
, S_MODE
, D_MODE
, T_MODE
, O_MODE
,
4712 SF_MODE
, DF_MODE
, TF_MODE
, OF_MODE
,
4716 /* Modes for single-word and smaller quantities. */
4718 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
4720 /* Modes for double-word and smaller quantities. */
4721 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4723 /* Modes for quad-word and smaller quantities. */
4724 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4726 /* Modes for 8-word and smaller quantities. */
4727 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4729 /* Modes for single-float quantities. */
4730 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4732 /* Modes for double-float and smaller quantities. */
4733 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4735 /* Modes for quad-float and smaller quantities. */
4736 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4738 /* Modes for quad-float pairs and smaller quantities. */
4739 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4741 /* Modes for double-float only quantities. */
4742 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4744 /* Modes for quad-float and double-float only quantities. */
4745 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4747 /* Modes for quad-float pairs and double-float only quantities. */
4748 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4750 /* Modes for condition codes. */
4751 #define CC_MODES (1 << (int) CC_MODE)
4752 #define CCFP_MODES (1 << (int) CCFP_MODE)
4754 /* Value is 1 if register/mode pair is acceptable on sparc.
4755 The funny mixture of D and T modes is because integer operations
4756 do not specially operate on tetra quantities, so non-quad-aligned
4757 registers can hold quadword quantities (except %o4 and %i4 because
4758 they cross fixed registers). */
4760 /* This points to either the 32 bit or the 64 bit version. */
4761 const int *hard_regno_mode_classes
;
4763 static const int hard_32bit_mode_classes
[] = {
4764 S_MODES
, S_MODES
, T_MODES
, S_MODES
, T_MODES
, S_MODES
, D_MODES
, S_MODES
,
4765 T_MODES
, S_MODES
, T_MODES
, S_MODES
, D_MODES
, S_MODES
, D_MODES
, S_MODES
,
4766 T_MODES
, S_MODES
, T_MODES
, S_MODES
, T_MODES
, S_MODES
, D_MODES
, S_MODES
,
4767 T_MODES
, S_MODES
, T_MODES
, S_MODES
, D_MODES
, S_MODES
, D_MODES
, S_MODES
,
4769 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4770 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4771 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4772 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, TF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4774 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4775 and none can hold SFmode/SImode values. */
4776 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4777 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4778 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4779 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, TF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4782 CCFP_MODES
, CCFP_MODES
, CCFP_MODES
, CCFP_MODES
,
4784 /* %icc, %sfp, %gsr */
4785 CC_MODES
, 0, D_MODES
4788 static const int hard_64bit_mode_classes
[] = {
4789 D_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
,
4790 O_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
,
4791 T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
,
4792 O_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
, T_MODES
, D_MODES
,
4794 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4795 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4796 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4797 OF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
, TF_MODES
, SF_MODES
, DF_MODES
, SF_MODES
,
4799 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4800 and none can hold SFmode/SImode values. */
4801 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4802 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4803 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4804 OF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0, TF_MODES_NO_S
, 0, DF_MODES_NO_S
, 0,
4807 CCFP_MODES
, CCFP_MODES
, CCFP_MODES
, CCFP_MODES
,
4809 /* %icc, %sfp, %gsr */
4810 CC_MODES
, 0, D_MODES
4813 int sparc_mode_class
[NUM_MACHINE_MODES
];
4815 enum reg_class sparc_regno_reg_class
[FIRST_PSEUDO_REGISTER
];
4818 sparc_init_modes (void)
4822 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4824 switch (GET_MODE_CLASS (i
))
4827 case MODE_PARTIAL_INT
:
4828 case MODE_COMPLEX_INT
:
4829 if (GET_MODE_SIZE (i
) < 4)
4830 sparc_mode_class
[i
] = 1 << (int) H_MODE
;
4831 else if (GET_MODE_SIZE (i
) == 4)
4832 sparc_mode_class
[i
] = 1 << (int) S_MODE
;
4833 else if (GET_MODE_SIZE (i
) == 8)
4834 sparc_mode_class
[i
] = 1 << (int) D_MODE
;
4835 else if (GET_MODE_SIZE (i
) == 16)
4836 sparc_mode_class
[i
] = 1 << (int) T_MODE
;
4837 else if (GET_MODE_SIZE (i
) == 32)
4838 sparc_mode_class
[i
] = 1 << (int) O_MODE
;
4840 sparc_mode_class
[i
] = 0;
4842 case MODE_VECTOR_INT
:
4843 if (GET_MODE_SIZE (i
) == 4)
4844 sparc_mode_class
[i
] = 1 << (int) SF_MODE
;
4845 else if (GET_MODE_SIZE (i
) == 8)
4846 sparc_mode_class
[i
] = 1 << (int) DF_MODE
;
4848 sparc_mode_class
[i
] = 0;
4851 case MODE_COMPLEX_FLOAT
:
4852 if (GET_MODE_SIZE (i
) == 4)
4853 sparc_mode_class
[i
] = 1 << (int) SF_MODE
;
4854 else if (GET_MODE_SIZE (i
) == 8)
4855 sparc_mode_class
[i
] = 1 << (int) DF_MODE
;
4856 else if (GET_MODE_SIZE (i
) == 16)
4857 sparc_mode_class
[i
] = 1 << (int) TF_MODE
;
4858 else if (GET_MODE_SIZE (i
) == 32)
4859 sparc_mode_class
[i
] = 1 << (int) OF_MODE
;
4861 sparc_mode_class
[i
] = 0;
4864 if (i
== (int) CCFPmode
|| i
== (int) CCFPEmode
)
4865 sparc_mode_class
[i
] = 1 << (int) CCFP_MODE
;
4867 sparc_mode_class
[i
] = 1 << (int) CC_MODE
;
4870 sparc_mode_class
[i
] = 0;
4876 hard_regno_mode_classes
= hard_64bit_mode_classes
;
4878 hard_regno_mode_classes
= hard_32bit_mode_classes
;
4880 /* Initialize the array used by REGNO_REG_CLASS. */
4881 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
4883 if (i
< 16 && TARGET_V8PLUS
)
4884 sparc_regno_reg_class
[i
] = I64_REGS
;
4885 else if (i
< 32 || i
== FRAME_POINTER_REGNUM
)
4886 sparc_regno_reg_class
[i
] = GENERAL_REGS
;
4888 sparc_regno_reg_class
[i
] = FP_REGS
;
4890 sparc_regno_reg_class
[i
] = EXTRA_FP_REGS
;
4892 sparc_regno_reg_class
[i
] = FPCC_REGS
;
4894 sparc_regno_reg_class
[i
] = NO_REGS
;
4898 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4901 save_global_or_fp_reg_p (unsigned int regno
,
4902 int leaf_function ATTRIBUTE_UNUSED
)
4904 return !call_used_regs
[regno
] && df_regs_ever_live_p (regno
);
4907 /* Return whether the return address register (%i7) is needed. */
4910 return_addr_reg_needed_p (int leaf_function
)
4912 /* If it is live, for example because of __builtin_return_address (0). */
4913 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM
))
4916 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4918 /* Loading the GOT register clobbers %o7. */
4919 || crtl
->uses_pic_offset_table
4920 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM
))
4926 /* Return whether REGNO, a local or in register, must be saved/restored. */
4929 save_local_or_in_reg_p (unsigned int regno
, int leaf_function
)
4931 /* General case: call-saved registers live at some point. */
4932 if (!call_used_regs
[regno
] && df_regs_ever_live_p (regno
))
4935 /* Frame pointer register (%fp) if needed. */
4936 if (regno
== HARD_FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
4939 /* Return address register (%i7) if needed. */
4940 if (regno
== RETURN_ADDR_REGNUM
&& return_addr_reg_needed_p (leaf_function
))
4943 /* GOT register (%l7) if needed. */
4944 if (regno
== PIC_OFFSET_TABLE_REGNUM
&& crtl
->uses_pic_offset_table
)
4947 /* If the function accesses prior frames, the frame pointer and the return
4948 address of the previous frame must be saved on the stack. */
4949 if (crtl
->accesses_prior_frames
4950 && (regno
== HARD_FRAME_POINTER_REGNUM
|| regno
== RETURN_ADDR_REGNUM
))
4956 /* Compute the frame size required by the function. This function is called
4957 during the reload pass and also by sparc_expand_prologue. */
4960 sparc_compute_frame_size (HOST_WIDE_INT size
, int leaf_function
)
4962 HOST_WIDE_INT frame_size
, apparent_frame_size
;
4963 int args_size
, n_global_fp_regs
= 0;
4964 bool save_local_in_regs_p
= false;
4967 /* If the function allocates dynamic stack space, the dynamic offset is
4968 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4969 if (leaf_function
&& !cfun
->calls_alloca
)
4972 args_size
= crtl
->outgoing_args_size
+ REG_PARM_STACK_SPACE (cfun
->decl
);
4974 /* Calculate space needed for global registers. */
4976 for (i
= 0; i
< 8; i
++)
4977 if (save_global_or_fp_reg_p (i
, 0))
4978 n_global_fp_regs
+= 2;
4980 for (i
= 0; i
< 8; i
+= 2)
4981 if (save_global_or_fp_reg_p (i
, 0) || save_global_or_fp_reg_p (i
+ 1, 0))
4982 n_global_fp_regs
+= 2;
4984 /* In the flat window model, find out which local and in registers need to
4985 be saved. We don't reserve space in the current frame for them as they
4986 will be spilled into the register window save area of the caller's frame.
4987 However, as soon as we use this register window save area, we must create
4988 that of the current frame to make it the live one. */
4990 for (i
= 16; i
< 32; i
++)
4991 if (save_local_or_in_reg_p (i
, leaf_function
))
4993 save_local_in_regs_p
= true;
4997 /* Calculate space needed for FP registers. */
4998 for (i
= 32; i
< (TARGET_V9
? 96 : 64); i
+= 2)
4999 if (save_global_or_fp_reg_p (i
, 0) || save_global_or_fp_reg_p (i
+ 1, 0))
5000 n_global_fp_regs
+= 2;
5003 && n_global_fp_regs
== 0
5005 && !save_local_in_regs_p
)
5006 frame_size
= apparent_frame_size
= 0;
5009 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
5010 apparent_frame_size
= (size
- STARTING_FRAME_OFFSET
+ 7) & -8;
5011 apparent_frame_size
+= n_global_fp_regs
* 4;
5013 /* We need to add the size of the outgoing argument area. */
5014 frame_size
= apparent_frame_size
+ ((args_size
+ 7) & -8);
5016 /* And that of the register window save area. */
5017 frame_size
+= FIRST_PARM_OFFSET (cfun
->decl
);
5019 /* Finally, bump to the appropriate alignment. */
5020 frame_size
= SPARC_STACK_ALIGN (frame_size
);
5023 /* Set up values for use in prologue and epilogue. */
5024 sparc_frame_size
= frame_size
;
5025 sparc_apparent_frame_size
= apparent_frame_size
;
5026 sparc_n_global_fp_regs
= n_global_fp_regs
;
5027 sparc_save_local_in_regs_p
= save_local_in_regs_p
;
5032 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
5035 sparc_initial_elimination_offset (int to
)
5039 if (to
== STACK_POINTER_REGNUM
)
5040 offset
= sparc_compute_frame_size (get_frame_size (), crtl
->is_leaf
);
5044 offset
+= SPARC_STACK_BIAS
;
5048 /* Output any necessary .register pseudo-ops. */
5051 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED
)
5053 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
5059 /* Check if %g[2367] were used without
5060 .register being printed for them already. */
5061 for (i
= 2; i
< 8; i
++)
5063 if (df_regs_ever_live_p (i
)
5064 && ! sparc_hard_reg_printed
[i
])
5066 sparc_hard_reg_printed
[i
] = 1;
5067 /* %g7 is used as TLS base register, use #ignore
5068 for it instead of #scratch. */
5069 fprintf (file
, "\t.register\t%%g%d, #%s\n", i
,
5070 i
== 7 ? "ignore" : "scratch");
5077 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
5079 #if PROBE_INTERVAL > 4096
5080 #error Cannot use indexed addressing mode for stack probing
5083 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
5084 inclusive. These are offsets from the current stack pointer.
5086 Note that we don't use the REG+REG addressing mode for the probes because
5087 of the stack bias in 64-bit mode. And it doesn't really buy us anything
5088 so the advantages of having a single code win here. */
5091 sparc_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
5093 rtx g1
= gen_rtx_REG (Pmode
, 1);
5095 /* See if we have a constant small number of probes to generate. If so,
5096 that's the easy case. */
5097 if (size
<= PROBE_INTERVAL
)
5099 emit_move_insn (g1
, GEN_INT (first
));
5100 emit_insn (gen_rtx_SET (VOIDmode
, g1
,
5101 gen_rtx_MINUS (Pmode
, stack_pointer_rtx
, g1
)));
5102 emit_stack_probe (plus_constant (Pmode
, g1
, -size
));
5105 /* The run-time loop is made up of 10 insns in the generic case while the
5106 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
5107 else if (size
<= 5 * PROBE_INTERVAL
)
5111 emit_move_insn (g1
, GEN_INT (first
+ PROBE_INTERVAL
));
5112 emit_insn (gen_rtx_SET (VOIDmode
, g1
,
5113 gen_rtx_MINUS (Pmode
, stack_pointer_rtx
, g1
)));
5114 emit_stack_probe (g1
);
5116 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5117 it exceeds SIZE. If only two probes are needed, this will not
5118 generate any code. Then probe at FIRST + SIZE. */
5119 for (i
= 2 * PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
5121 emit_insn (gen_rtx_SET (VOIDmode
, g1
,
5122 plus_constant (Pmode
, g1
, -PROBE_INTERVAL
)));
5123 emit_stack_probe (g1
);
5126 emit_stack_probe (plus_constant (Pmode
, g1
,
5127 (i
- PROBE_INTERVAL
) - size
));
5130 /* Otherwise, do the same as above, but in a loop. Note that we must be
5131 extra careful with variables wrapping around because we might be at
5132 the very top (or the very bottom) of the address space and we have
5133 to be able to handle this case properly; in particular, we use an
5134 equality test for the loop condition. */
5137 HOST_WIDE_INT rounded_size
;
5138 rtx g4
= gen_rtx_REG (Pmode
, 4);
5140 emit_move_insn (g1
, GEN_INT (first
));
5143 /* Step 1: round SIZE to the previous multiple of the interval. */
5145 rounded_size
= size
& -PROBE_INTERVAL
;
5146 emit_move_insn (g4
, GEN_INT (rounded_size
));
5149 /* Step 2: compute initial and final value of the loop counter. */
5151 /* TEST_ADDR = SP + FIRST. */
5152 emit_insn (gen_rtx_SET (VOIDmode
, g1
,
5153 gen_rtx_MINUS (Pmode
, stack_pointer_rtx
, g1
)));
5155 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5156 emit_insn (gen_rtx_SET (VOIDmode
, g4
, gen_rtx_MINUS (Pmode
, g1
, g4
)));
5161 while (TEST_ADDR != LAST_ADDR)
5163 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5167 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5168 until it is equal to ROUNDED_SIZE. */
5171 emit_insn (gen_probe_stack_rangedi (g1
, g1
, g4
));
5173 emit_insn (gen_probe_stack_rangesi (g1
, g1
, g4
));
5176 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5177 that SIZE is equal to ROUNDED_SIZE. */
5179 if (size
!= rounded_size
)
5180 emit_stack_probe (plus_constant (Pmode
, g4
, rounded_size
- size
));
5183 /* Make sure nothing is scheduled before we are done. */
5184 emit_insn (gen_blockage ());
5187 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5188 absolute addresses. */
5191 output_probe_stack_range (rtx reg1
, rtx reg2
)
5193 static int labelno
= 0;
5194 char loop_lab
[32], end_lab
[32];
5197 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
5198 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
5200 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
5202 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
5205 output_asm_insn ("cmp\t%0, %1", xops
);
5207 fputs ("\tbe,pn\t%xcc,", asm_out_file
);
5209 fputs ("\tbe\t", asm_out_file
);
5210 assemble_name_raw (asm_out_file
, end_lab
);
5211 fputc ('\n', asm_out_file
);
5213 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5214 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
5215 output_asm_insn (" add\t%0, %1, %0", xops
);
5217 /* Probe at TEST_ADDR and branch. */
5219 fputs ("\tba,pt\t%xcc,", asm_out_file
);
5221 fputs ("\tba\t", asm_out_file
);
5222 assemble_name_raw (asm_out_file
, loop_lab
);
5223 fputc ('\n', asm_out_file
);
5224 xops
[1] = GEN_INT (SPARC_STACK_BIAS
);
5225 output_asm_insn (" st\t%%g0, [%0+%1]", xops
);
5227 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
5232 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5233 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5234 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5235 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5236 the action to be performed if it returns false. Return the new offset. */
5238 typedef bool (*sorr_pred_t
) (unsigned int, int);
5239 typedef enum { SORR_NONE
, SORR_ADVANCE
, SORR_SAVE
, SORR_RESTORE
} sorr_act_t
;
5242 emit_save_or_restore_regs (unsigned int low
, unsigned int high
, rtx base
,
5243 int offset
, int leaf_function
, sorr_pred_t save_p
,
5244 sorr_act_t action_true
, sorr_act_t action_false
)
5249 if (TARGET_ARCH64
&& high
<= 32)
5253 for (i
= low
; i
< high
; i
++)
5255 if (save_p (i
, leaf_function
))
5257 mem
= gen_frame_mem (DImode
, plus_constant (Pmode
,
5259 if (action_true
== SORR_SAVE
)
5261 insn
= emit_move_insn (mem
, gen_rtx_REG (DImode
, i
));
5262 RTX_FRAME_RELATED_P (insn
) = 1;
5264 else /* action_true == SORR_RESTORE */
5266 /* The frame pointer must be restored last since its old
5267 value may be used as base address for the frame. This
5268 is problematic in 64-bit mode only because of the lack
5269 of double-word load instruction. */
5270 if (i
== HARD_FRAME_POINTER_REGNUM
)
5273 emit_move_insn (gen_rtx_REG (DImode
, i
), mem
);
5277 else if (action_false
== SORR_ADVANCE
)
5283 mem
= gen_frame_mem (DImode
, plus_constant (Pmode
, base
, fp_offset
));
5284 emit_move_insn (hard_frame_pointer_rtx
, mem
);
5289 for (i
= low
; i
< high
; i
+= 2)
5291 bool reg0
= save_p (i
, leaf_function
);
5292 bool reg1
= save_p (i
+ 1, leaf_function
);
5293 enum machine_mode mode
;
5298 mode
= SPARC_INT_REG_P (i
) ? DImode
: DFmode
;
5303 mode
= SPARC_INT_REG_P (i
) ? SImode
: SFmode
;
5308 mode
= SPARC_INT_REG_P (i
) ? SImode
: SFmode
;
5314 if (action_false
== SORR_ADVANCE
)
5319 mem
= gen_frame_mem (mode
, plus_constant (Pmode
, base
, offset
));
5320 if (action_true
== SORR_SAVE
)
5322 insn
= emit_move_insn (mem
, gen_rtx_REG (mode
, regno
));
5323 RTX_FRAME_RELATED_P (insn
) = 1;
5327 mem
= gen_frame_mem (SImode
, plus_constant (Pmode
, base
,
5329 set1
= gen_rtx_SET (VOIDmode
, mem
,
5330 gen_rtx_REG (SImode
, regno
));
5331 RTX_FRAME_RELATED_P (set1
) = 1;
5333 = gen_frame_mem (SImode
, plus_constant (Pmode
, base
,
5335 set2
= gen_rtx_SET (VOIDmode
, mem
,
5336 gen_rtx_REG (SImode
, regno
+ 1));
5337 RTX_FRAME_RELATED_P (set2
) = 1;
5338 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
5339 gen_rtx_PARALLEL (VOIDmode
,
5340 gen_rtvec (2, set1
, set2
)));
5343 else /* action_true == SORR_RESTORE */
5344 emit_move_insn (gen_rtx_REG (mode
, regno
), mem
);
5346 /* Always preserve double-word alignment. */
5347 offset
= (offset
+ 8) & -8;
5354 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5357 emit_adjust_base_to_offset (rtx base
, int offset
)
5359 /* ??? This might be optimized a little as %g1 might already have a
5360 value close enough that a single add insn will do. */
5361 /* ??? Although, all of this is probably only a temporary fix because
5362 if %g1 can hold a function result, then sparc_expand_epilogue will
5363 lose (the result will be clobbered). */
5364 rtx new_base
= gen_rtx_REG (Pmode
, 1);
5365 emit_move_insn (new_base
, GEN_INT (offset
));
5366 emit_insn (gen_rtx_SET (VOIDmode
,
5367 new_base
, gen_rtx_PLUS (Pmode
, base
, new_base
)));
5371 /* Emit code to save/restore call-saved global and FP registers. */
5374 emit_save_or_restore_global_fp_regs (rtx base
, int offset
, sorr_act_t action
)
5376 if (offset
< -4096 || offset
+ sparc_n_global_fp_regs
* 4 > 4095)
5378 base
= emit_adjust_base_to_offset (base
, offset
);
5383 = emit_save_or_restore_regs (0, 8, base
, offset
, 0,
5384 save_global_or_fp_reg_p
, action
, SORR_NONE
);
5385 emit_save_or_restore_regs (32, TARGET_V9
? 96 : 64, base
, offset
, 0,
5386 save_global_or_fp_reg_p
, action
, SORR_NONE
);
5389 /* Emit code to save/restore call-saved local and in registers. */
5392 emit_save_or_restore_local_in_regs (rtx base
, int offset
, sorr_act_t action
)
5394 if (offset
< -4096 || offset
+ 16 * UNITS_PER_WORD
> 4095)
5396 base
= emit_adjust_base_to_offset (base
, offset
);
5400 emit_save_or_restore_regs (16, 32, base
, offset
, sparc_leaf_function_p
,
5401 save_local_or_in_reg_p
, action
, SORR_ADVANCE
);
5404 /* Emit a window_save insn. */
5407 emit_window_save (rtx increment
)
5409 rtx insn
= emit_insn (gen_window_save (increment
));
5410 RTX_FRAME_RELATED_P (insn
) = 1;
5412 /* The incoming return address (%o7) is saved in %i7. */
5413 add_reg_note (insn
, REG_CFA_REGISTER
,
5414 gen_rtx_SET (VOIDmode
,
5415 gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
),
5417 INCOMING_RETURN_ADDR_REGNUM
)));
5419 /* The window save event. */
5420 add_reg_note (insn
, REG_CFA_WINDOW_SAVE
, const0_rtx
);
5422 /* The CFA is %fp, the hard frame pointer. */
5423 add_reg_note (insn
, REG_CFA_DEF_CFA
,
5424 plus_constant (Pmode
, hard_frame_pointer_rtx
,
5425 INCOMING_FRAME_SP_OFFSET
));
5430 /* Generate an increment for the stack pointer. */
5433 gen_stack_pointer_inc (rtx increment
)
5435 return gen_rtx_SET (VOIDmode
,
5437 gen_rtx_PLUS (Pmode
,
5442 /* Expand the function prologue. The prologue is responsible for reserving
5443 storage for the frame, saving the call-saved registers and loading the
5444 GOT register if needed. */
5447 sparc_expand_prologue (void)
5452 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5453 on the final value of the flag means deferring the prologue/epilogue
5454 expansion until just before the second scheduling pass, which is too
5455 late to emit multiple epilogues or return insns.
5457 Of course we are making the assumption that the value of the flag
5458 will not change between now and its final value. Of the three parts
5459 of the formula, only the last one can reasonably vary. Let's take a
5460 closer look, after assuming that the first two ones are set to true
5461 (otherwise the last value is effectively silenced).
5463 If only_leaf_regs_used returns false, the global predicate will also
5464 be false so the actual frame size calculated below will be positive.
5465 As a consequence, the save_register_window insn will be emitted in
5466 the instruction stream; now this insn explicitly references %fp
5467 which is not a leaf register so only_leaf_regs_used will always
5468 return false subsequently.
5470 If only_leaf_regs_used returns true, we hope that the subsequent
5471 optimization passes won't cause non-leaf registers to pop up. For
5472 example, the regrename pass has special provisions to not rename to
5473 non-leaf registers in a leaf function. */
5474 sparc_leaf_function_p
5475 = optimize
> 0 && crtl
->is_leaf
&& only_leaf_regs_used ();
5477 size
= sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p
);
5479 if (flag_stack_usage_info
)
5480 current_function_static_stack_size
= size
;
5482 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
5484 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
5486 if (size
> PROBE_INTERVAL
&& size
> STACK_CHECK_PROTECT
)
5487 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT
,
5488 size
- STACK_CHECK_PROTECT
);
5491 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
5496 else if (sparc_leaf_function_p
)
5498 rtx size_int_rtx
= GEN_INT (-size
);
5501 insn
= emit_insn (gen_stack_pointer_inc (size_int_rtx
));
5502 else if (size
<= 8192)
5504 insn
= emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5505 RTX_FRAME_RELATED_P (insn
) = 1;
5507 /* %sp is still the CFA register. */
5508 insn
= emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size
)));
5512 rtx size_rtx
= gen_rtx_REG (Pmode
, 1);
5513 emit_move_insn (size_rtx
, size_int_rtx
);
5514 insn
= emit_insn (gen_stack_pointer_inc (size_rtx
));
5515 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
5516 gen_stack_pointer_inc (size_int_rtx
));
5519 RTX_FRAME_RELATED_P (insn
) = 1;
5523 rtx size_int_rtx
= GEN_INT (-size
);
5526 emit_window_save (size_int_rtx
);
5527 else if (size
<= 8192)
5529 emit_window_save (GEN_INT (-4096));
5531 /* %sp is not the CFA register anymore. */
5532 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size
)));
5534 /* Make sure no %fp-based store is issued until after the frame is
5535 established. The offset between the frame pointer and the stack
5536 pointer is calculated relative to the value of the stack pointer
5537 at the end of the function prologue, and moving instructions that
5538 access the stack via the frame pointer between the instructions
5539 that decrement the stack pointer could result in accessing the
5540 register window save area, which is volatile. */
5541 emit_insn (gen_frame_blockage ());
5545 rtx size_rtx
= gen_rtx_REG (Pmode
, 1);
5546 emit_move_insn (size_rtx
, size_int_rtx
);
5547 emit_window_save (size_rtx
);
5551 if (sparc_leaf_function_p
)
5553 sparc_frame_base_reg
= stack_pointer_rtx
;
5554 sparc_frame_base_offset
= size
+ SPARC_STACK_BIAS
;
5558 sparc_frame_base_reg
= hard_frame_pointer_rtx
;
5559 sparc_frame_base_offset
= SPARC_STACK_BIAS
;
5562 if (sparc_n_global_fp_regs
> 0)
5563 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg
,
5564 sparc_frame_base_offset
5565 - sparc_apparent_frame_size
,
5568 /* Load the GOT register if needed. */
5569 if (crtl
->uses_pic_offset_table
)
5570 load_got_register ();
5572 /* Advertise that the data calculated just above are now valid. */
5573 sparc_prologue_data_valid_p
= true;
5576 /* Expand the function prologue. The prologue is responsible for reserving
5577 storage for the frame, saving the call-saved registers and loading the
5578 GOT register if needed. */
5581 sparc_flat_expand_prologue (void)
5586 sparc_leaf_function_p
= optimize
> 0 && crtl
->is_leaf
;
5588 size
= sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p
);
5590 if (flag_stack_usage_info
)
5591 current_function_static_stack_size
= size
;
5593 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
5595 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
5597 if (size
> PROBE_INTERVAL
&& size
> STACK_CHECK_PROTECT
)
5598 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT
,
5599 size
- STACK_CHECK_PROTECT
);
5602 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
5605 if (sparc_save_local_in_regs_p
)
5606 emit_save_or_restore_local_in_regs (stack_pointer_rtx
, SPARC_STACK_BIAS
,
5613 rtx size_int_rtx
, size_rtx
;
5615 size_rtx
= size_int_rtx
= GEN_INT (-size
);
5617 /* We establish the frame (i.e. decrement the stack pointer) first, even
5618 if we use a frame pointer, because we cannot clobber any call-saved
5619 registers, including the frame pointer, if we haven't created a new
5620 register save area, for the sake of compatibility with the ABI. */
5622 insn
= emit_insn (gen_stack_pointer_inc (size_int_rtx
));
5623 else if (size
<= 8192 && !frame_pointer_needed
)
5625 insn
= emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5626 RTX_FRAME_RELATED_P (insn
) = 1;
5627 insn
= emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size
)));
5631 size_rtx
= gen_rtx_REG (Pmode
, 1);
5632 emit_move_insn (size_rtx
, size_int_rtx
);
5633 insn
= emit_insn (gen_stack_pointer_inc (size_rtx
));
5634 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
5635 gen_stack_pointer_inc (size_int_rtx
));
5637 RTX_FRAME_RELATED_P (insn
) = 1;
5639 /* Ensure nothing is scheduled until after the frame is established. */
5640 emit_insn (gen_blockage ());
5642 if (frame_pointer_needed
)
5644 insn
= emit_insn (gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
5645 gen_rtx_MINUS (Pmode
,
5648 RTX_FRAME_RELATED_P (insn
) = 1;
5650 add_reg_note (insn
, REG_CFA_ADJUST_CFA
,
5651 gen_rtx_SET (VOIDmode
, hard_frame_pointer_rtx
,
5652 plus_constant (Pmode
, stack_pointer_rtx
,
5656 if (return_addr_reg_needed_p (sparc_leaf_function_p
))
5658 rtx o7
= gen_rtx_REG (Pmode
, INCOMING_RETURN_ADDR_REGNUM
);
5659 rtx i7
= gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
);
5661 insn
= emit_move_insn (i7
, o7
);
5662 RTX_FRAME_RELATED_P (insn
) = 1;
5664 add_reg_note (insn
, REG_CFA_REGISTER
,
5665 gen_rtx_SET (VOIDmode
, i7
, o7
));
5667 /* Prevent this instruction from ever being considered dead,
5668 even if this function has no epilogue. */
5673 if (frame_pointer_needed
)
5675 sparc_frame_base_reg
= hard_frame_pointer_rtx
;
5676 sparc_frame_base_offset
= SPARC_STACK_BIAS
;
5680 sparc_frame_base_reg
= stack_pointer_rtx
;
5681 sparc_frame_base_offset
= size
+ SPARC_STACK_BIAS
;
5684 if (sparc_n_global_fp_regs
> 0)
5685 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg
,
5686 sparc_frame_base_offset
5687 - sparc_apparent_frame_size
,
5690 /* Load the GOT register if needed. */
5691 if (crtl
->uses_pic_offset_table
)
5692 load_got_register ();
5694 /* Advertise that the data calculated just above are now valid. */
5695 sparc_prologue_data_valid_p
= true;
5698 /* This function generates the assembly code for function entry, which boils
5699 down to emitting the necessary .register directives. */
5702 sparc_asm_function_prologue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
5704 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5706 gcc_assert (sparc_leaf_function_p
== crtl
->uses_only_leaf_regs
);
5708 sparc_output_scratch_registers (file
);
5711 /* Expand the function epilogue, either normal or part of a sibcall.
5712 We emit all the instructions except the return or the call. */
5715 sparc_expand_epilogue (bool for_eh
)
5717 HOST_WIDE_INT size
= sparc_frame_size
;
5719 if (sparc_n_global_fp_regs
> 0)
5720 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg
,
5721 sparc_frame_base_offset
5722 - sparc_apparent_frame_size
,
5725 if (size
== 0 || for_eh
)
5727 else if (sparc_leaf_function_p
)
5730 emit_insn (gen_stack_pointer_inc (GEN_INT (size
)));
5731 else if (size
<= 8192)
5733 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
5734 emit_insn (gen_stack_pointer_inc (GEN_INT (size
- 4096)));
5738 rtx reg
= gen_rtx_REG (Pmode
, 1);
5739 emit_move_insn (reg
, GEN_INT (size
));
5740 emit_insn (gen_stack_pointer_inc (reg
));
5745 /* Expand the function epilogue, either normal or part of a sibcall.
5746 We emit all the instructions except the return or the call. */
5749 sparc_flat_expand_epilogue (bool for_eh
)
5751 HOST_WIDE_INT size
= sparc_frame_size
;
5753 if (sparc_n_global_fp_regs
> 0)
5754 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg
,
5755 sparc_frame_base_offset
5756 - sparc_apparent_frame_size
,
5759 /* If we have a frame pointer, we'll need both to restore it before the
5760 frame is destroyed and use its current value in destroying the frame.
5761 Since we don't have an atomic way to do that in the flat window model,
5762 we save the current value into a temporary register (%g1). */
5763 if (frame_pointer_needed
&& !for_eh
)
5764 emit_move_insn (gen_rtx_REG (Pmode
, 1), hard_frame_pointer_rtx
);
5766 if (return_addr_reg_needed_p (sparc_leaf_function_p
))
5767 emit_move_insn (gen_rtx_REG (Pmode
, INCOMING_RETURN_ADDR_REGNUM
),
5768 gen_rtx_REG (Pmode
, RETURN_ADDR_REGNUM
));
5770 if (sparc_save_local_in_regs_p
)
5771 emit_save_or_restore_local_in_regs (sparc_frame_base_reg
,
5772 sparc_frame_base_offset
,
5775 if (size
== 0 || for_eh
)
5777 else if (frame_pointer_needed
)
5779 /* Make sure the frame is destroyed after everything else is done. */
5780 emit_insn (gen_blockage ());
5782 emit_move_insn (stack_pointer_rtx
, gen_rtx_REG (Pmode
, 1));
5787 emit_insn (gen_blockage ());
5790 emit_insn (gen_stack_pointer_inc (GEN_INT (size
)));
5791 else if (size
<= 8192)
5793 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
5794 emit_insn (gen_stack_pointer_inc (GEN_INT (size
- 4096)));
5798 rtx reg
= gen_rtx_REG (Pmode
, 1);
5799 emit_move_insn (reg
, GEN_INT (size
));
5800 emit_insn (gen_stack_pointer_inc (reg
));
5805 /* Return true if it is appropriate to emit `return' instructions in the
5806 body of a function. */
5809 sparc_can_use_return_insn_p (void)
5811 return sparc_prologue_data_valid_p
5812 && sparc_n_global_fp_regs
== 0
5814 ? (sparc_frame_size
== 0 && !sparc_save_local_in_regs_p
)
5815 : (sparc_frame_size
== 0 || !sparc_leaf_function_p
);
5818 /* This function generates the assembly code for function exit. */
5821 sparc_asm_function_epilogue (FILE *file
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
5823 /* If the last two instructions of a function are "call foo; dslot;"
5824 the return address might point to the first instruction in the next
5825 function and we have to output a dummy nop for the sake of sane
5826 backtraces in such cases. This is pointless for sibling calls since
5827 the return address is explicitly adjusted. */
5829 rtx insn
, last_real_insn
;
5831 insn
= get_last_insn ();
5833 last_real_insn
= prev_real_insn (insn
);
5835 && NONJUMP_INSN_P (last_real_insn
)
5836 && GET_CODE (PATTERN (last_real_insn
)) == SEQUENCE
)
5837 last_real_insn
= XVECEXP (PATTERN (last_real_insn
), 0, 0);
5840 && CALL_P (last_real_insn
)
5841 && !SIBLING_CALL_P (last_real_insn
))
5842 fputs("\tnop\n", file
);
5844 sparc_output_deferred_case_vectors ();
5847 /* Output a 'restore' instruction. */
5850 output_restore (rtx pat
)
5856 fputs ("\t restore\n", asm_out_file
);
5860 gcc_assert (GET_CODE (pat
) == SET
);
5862 operands
[0] = SET_DEST (pat
);
5863 pat
= SET_SRC (pat
);
5865 switch (GET_CODE (pat
))
5868 operands
[1] = XEXP (pat
, 0);
5869 operands
[2] = XEXP (pat
, 1);
5870 output_asm_insn (" restore %r1, %2, %Y0", operands
);
5873 operands
[1] = XEXP (pat
, 0);
5874 operands
[2] = XEXP (pat
, 1);
5875 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands
);
5878 operands
[1] = XEXP (pat
, 0);
5879 gcc_assert (XEXP (pat
, 1) == const1_rtx
);
5880 output_asm_insn (" restore %r1, %r1, %Y0", operands
);
5884 output_asm_insn (" restore %%g0, %1, %Y0", operands
);
5889 /* Output a return. */
5892 output_return (rtx insn
)
5894 if (crtl
->calls_eh_return
)
5896 /* If the function uses __builtin_eh_return, the eh_return
5897 machinery occupies the delay slot. */
5898 gcc_assert (!final_sequence
);
5900 if (flag_delayed_branch
)
5902 if (!TARGET_FLAT
&& TARGET_V9
)
5903 fputs ("\treturn\t%i7+8\n", asm_out_file
);
5907 fputs ("\trestore\n", asm_out_file
);
5909 fputs ("\tjmp\t%o7+8\n", asm_out_file
);
5912 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file
);
5917 fputs ("\trestore\n", asm_out_file
);
5919 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file
);
5920 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file
);
5923 else if (sparc_leaf_function_p
|| TARGET_FLAT
)
5925 /* This is a leaf or flat function so we don't have to bother restoring
5926 the register window, which frees us from dealing with the convoluted
5927 semantics of restore/return. We simply output the jump to the
5928 return address and the insn in the delay slot (if any). */
5930 return "jmp\t%%o7+%)%#";
5934 /* This is a regular function so we have to restore the register window.
5935 We may have a pending insn for the delay slot, which will be either
5936 combined with the 'restore' instruction or put in the delay slot of
5937 the 'return' instruction. */
5943 delay
= NEXT_INSN (insn
);
5946 pat
= PATTERN (delay
);
5948 if (TARGET_V9
&& ! epilogue_renumber (&pat
, 1))
5950 epilogue_renumber (&pat
, 0);
5951 return "return\t%%i7+%)%#";
5955 output_asm_insn ("jmp\t%%i7+%)", NULL
);
5956 output_restore (pat
);
5957 PATTERN (delay
) = gen_blockage ();
5958 INSN_CODE (delay
) = -1;
5963 /* The delay slot is empty. */
5965 return "return\t%%i7+%)\n\t nop";
5966 else if (flag_delayed_branch
)
5967 return "jmp\t%%i7+%)\n\t restore";
5969 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5976 /* Output a sibling call. */
5979 output_sibcall (rtx insn
, rtx call_operand
)
5983 gcc_assert (flag_delayed_branch
);
5985 operands
[0] = call_operand
;
5987 if (sparc_leaf_function_p
|| TARGET_FLAT
)
5989 /* This is a leaf or flat function so we don't have to bother restoring
5990 the register window. We simply output the jump to the function and
5991 the insn in the delay slot (if any). */
5993 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P
&& final_sequence
));
5996 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5999 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
6000 it into branch if possible. */
6001 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
6006 /* This is a regular function so we have to restore the register window.
6007 We may have a pending insn for the delay slot, which will be combined
6008 with the 'restore' instruction. */
6010 output_asm_insn ("call\t%a0, 0", operands
);
6014 rtx delay
= NEXT_INSN (insn
);
6017 output_restore (PATTERN (delay
));
6019 PATTERN (delay
) = gen_blockage ();
6020 INSN_CODE (delay
) = -1;
6023 output_restore (NULL_RTX
);
6029 /* Functions for handling argument passing.
6031 For 32-bit, the first 6 args are normally in registers and the rest are
6032 pushed. Any arg that starts within the first 6 words is at least
6033 partially passed in a register unless its data type forbids.
6035 For 64-bit, the argument registers are laid out as an array of 16 elements
6036 and arguments are added sequentially. The first 6 int args and up to the
6037 first 16 fp args (depending on size) are passed in regs.
6039 Slot Stack Integral Float Float in structure Double Long Double
6040 ---- ----- -------- ----- ------------------ ------ -----------
6041 15 [SP+248] %f31 %f30,%f31 %d30
6042 14 [SP+240] %f29 %f28,%f29 %d28 %q28
6043 13 [SP+232] %f27 %f26,%f27 %d26
6044 12 [SP+224] %f25 %f24,%f25 %d24 %q24
6045 11 [SP+216] %f23 %f22,%f23 %d22
6046 10 [SP+208] %f21 %f20,%f21 %d20 %q20
6047 9 [SP+200] %f19 %f18,%f19 %d18
6048 8 [SP+192] %f17 %f16,%f17 %d16 %q16
6049 7 [SP+184] %f15 %f14,%f15 %d14
6050 6 [SP+176] %f13 %f12,%f13 %d12 %q12
6051 5 [SP+168] %o5 %f11 %f10,%f11 %d10
6052 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
6053 3 [SP+152] %o3 %f7 %f6,%f7 %d6
6054 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
6055 1 [SP+136] %o1 %f3 %f2,%f3 %d2
6056 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
6058 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
6060 Integral arguments are always passed as 64-bit quantities appropriately
6063 Passing of floating point values is handled as follows.
6064 If a prototype is in scope:
6065 If the value is in a named argument (i.e. not a stdarg function or a
6066 value not part of the `...') then the value is passed in the appropriate
6068 If the value is part of the `...' and is passed in one of the first 6
6069 slots then the value is passed in the appropriate int reg.
6070 If the value is part of the `...' and is not passed in one of the first 6
6071 slots then the value is passed in memory.
6072 If a prototype is not in scope:
6073 If the value is one of the first 6 arguments the value is passed in the
6074 appropriate integer reg and the appropriate fp reg.
6075 If the value is not one of the first 6 arguments the value is passed in
6076 the appropriate fp reg and in memory.
6079 Summary of the calling conventions implemented by GCC on the SPARC:
6082 size argument return value
6084 small integer <4 int. reg. int. reg.
6085 word 4 int. reg. int. reg.
6086 double word 8 int. reg. int. reg.
6088 _Complex small integer <8 int. reg. int. reg.
6089 _Complex word 8 int. reg. int. reg.
6090 _Complex double word 16 memory int. reg.
6092 vector integer <=8 int. reg. FP reg.
6093 vector integer >8 memory memory
6095 float 4 int. reg. FP reg.
6096 double 8 int. reg. FP reg.
6097 long double 16 memory memory
6099 _Complex float 8 memory FP reg.
6100 _Complex double 16 memory FP reg.
6101 _Complex long double 32 memory FP reg.
6103 vector float any memory memory
6105 aggregate any memory memory
6110 size argument return value
6112 small integer <8 int. reg. int. reg.
6113 word 8 int. reg. int. reg.
6114 double word 16 int. reg. int. reg.
6116 _Complex small integer <16 int. reg. int. reg.
6117 _Complex word 16 int. reg. int. reg.
6118 _Complex double word 32 memory int. reg.
6120 vector integer <=16 FP reg. FP reg.
6121 vector integer 16<s<=32 memory FP reg.
6122 vector integer >32 memory memory
6124 float 4 FP reg. FP reg.
6125 double 8 FP reg. FP reg.
6126 long double 16 FP reg. FP reg.
6128 _Complex float 8 FP reg. FP reg.
6129 _Complex double 16 FP reg. FP reg.
6130 _Complex long double 32 memory FP reg.
6132 vector float <=16 FP reg. FP reg.
6133 vector float 16<s<=32 memory FP reg.
6134 vector float >32 memory memory
6136 aggregate <=16 reg. reg.
6137 aggregate 16<s<=32 memory reg.
6138 aggregate >32 memory memory
6142 Note #1: complex floating-point types follow the extended SPARC ABIs as
6143 implemented by the Sun compiler.
6145 Note #2: integral vector types follow the scalar floating-point types
6146 conventions to match what is implemented by the Sun VIS SDK.
6148 Note #3: floating-point vector types follow the aggregate types
6152 /* Maximum number of int regs for args. */
6153 #define SPARC_INT_ARG_MAX 6
6154 /* Maximum number of fp regs for args. */
6155 #define SPARC_FP_ARG_MAX 16
6157 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
6159 /* Handle the INIT_CUMULATIVE_ARGS macro.
6160 Initialize a variable CUM of type CUMULATIVE_ARGS
6161 for a call to a function whose data type is FNTYPE.
6162 For a library call, FNTYPE is 0. */
6165 init_cumulative_args (struct sparc_args
*cum
, tree fntype
,
6166 rtx libname ATTRIBUTE_UNUSED
,
6167 tree fndecl ATTRIBUTE_UNUSED
)
6170 cum
->prototype_p
= fntype
&& prototype_p (fntype
);
6171 cum
->libcall_p
= fntype
== 0;
6174 /* Handle promotion of pointer and integer arguments. */
6176 static enum machine_mode
6177 sparc_promote_function_mode (const_tree type
,
6178 enum machine_mode mode
,
6180 const_tree fntype ATTRIBUTE_UNUSED
,
6181 int for_return ATTRIBUTE_UNUSED
)
6183 if (type
!= NULL_TREE
&& POINTER_TYPE_P (type
))
6185 *punsignedp
= POINTERS_EXTEND_UNSIGNED
;
6189 /* Integral arguments are passed as full words, as per the ABI. */
6190 if (GET_MODE_CLASS (mode
) == MODE_INT
6191 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
6197 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6200 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
6202 return TARGET_ARCH64
? true : false;
6205 /* Scan the record type TYPE and return the following predicates:
6206 - INTREGS_P: the record contains at least one field or sub-field
6207 that is eligible for promotion in integer registers.
6208 - FP_REGS_P: the record contains at least one field or sub-field
6209 that is eligible for promotion in floating-point registers.
6210 - PACKED_P: the record contains at least one field that is packed.
6212 Sub-fields are not taken into account for the PACKED_P predicate. */
6215 scan_record_type (const_tree type
, int *intregs_p
, int *fpregs_p
,
6220 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
6222 if (TREE_CODE (field
) == FIELD_DECL
)
6224 if (TREE_CODE (TREE_TYPE (field
)) == RECORD_TYPE
)
6225 scan_record_type (TREE_TYPE (field
), intregs_p
, fpregs_p
, 0);
6226 else if ((FLOAT_TYPE_P (TREE_TYPE (field
))
6227 || TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
6233 if (packed_p
&& DECL_PACKED (field
))
6239 /* Compute the slot number to pass an argument in.
6240 Return the slot number or -1 if passing on the stack.
6242 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6243 the preceding args and about the function being called.
6244 MODE is the argument's machine mode.
6245 TYPE is the data type of the argument (as a tree).
6246 This is null for libcalls where that information may
6248 NAMED is nonzero if this argument is a named parameter
6249 (otherwise it is an extra parameter matching an ellipsis).
6250 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6251 *PREGNO records the register number to use if scalar type.
6252 *PPADDING records the amount of padding needed in words. */
6255 function_arg_slotno (const struct sparc_args
*cum
, enum machine_mode mode
,
6256 const_tree type
, bool named
, bool incoming_p
,
6257 int *pregno
, int *ppadding
)
6259 int regbase
= (incoming_p
6260 ? SPARC_INCOMING_INT_ARG_FIRST
6261 : SPARC_OUTGOING_INT_ARG_FIRST
);
6262 int slotno
= cum
->words
;
6263 enum mode_class mclass
;
6268 if (type
&& TREE_ADDRESSABLE (type
))
6274 && TYPE_ALIGN (type
) % PARM_BOUNDARY
!= 0)
6277 /* For SPARC64, objects requiring 16-byte alignment get it. */
6279 && (type
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
)) >= 128
6280 && (slotno
& 1) != 0)
6281 slotno
++, *ppadding
= 1;
6283 mclass
= GET_MODE_CLASS (mode
);
6284 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
6286 /* Vector types deserve special treatment because they are
6287 polymorphic wrt their mode, depending upon whether VIS
6288 instructions are enabled. */
6289 if (TREE_CODE (TREE_TYPE (type
)) == REAL_TYPE
)
6291 /* The SPARC port defines no floating-point vector modes. */
6292 gcc_assert (mode
== BLKmode
);
6296 /* Integral vector types should either have a vector
6297 mode or an integral mode, because we are guaranteed
6298 by pass_by_reference that their size is not greater
6299 than 16 bytes and TImode is 16-byte wide. */
6300 gcc_assert (mode
!= BLKmode
);
6302 /* Vector integers are handled like floats according to
6304 mclass
= MODE_FLOAT
;
6311 case MODE_COMPLEX_FLOAT
:
6312 case MODE_VECTOR_INT
:
6313 if (TARGET_ARCH64
&& TARGET_FPU
&& named
)
6315 if (slotno
>= SPARC_FP_ARG_MAX
)
6317 regno
= SPARC_FP_ARG_FIRST
+ slotno
* 2;
6318 /* Arguments filling only one single FP register are
6319 right-justified in the outer double FP register. */
6320 if (GET_MODE_SIZE (mode
) <= 4)
6327 case MODE_COMPLEX_INT
:
6328 if (slotno
>= SPARC_INT_ARG_MAX
)
6330 regno
= regbase
+ slotno
;
6334 if (mode
== VOIDmode
)
6335 /* MODE is VOIDmode when generating the actual call. */
6338 gcc_assert (mode
== BLKmode
);
6342 || (TREE_CODE (type
) != VECTOR_TYPE
6343 && TREE_CODE (type
) != RECORD_TYPE
))
6345 if (slotno
>= SPARC_INT_ARG_MAX
)
6347 regno
= regbase
+ slotno
;
6349 else /* TARGET_ARCH64 && type */
6351 int intregs_p
= 0, fpregs_p
= 0, packed_p
= 0;
6353 /* First see what kinds of registers we would need. */
6354 if (TREE_CODE (type
) == VECTOR_TYPE
)
6357 scan_record_type (type
, &intregs_p
, &fpregs_p
, &packed_p
);
6359 /* The ABI obviously doesn't specify how packed structures
6360 are passed. These are defined to be passed in int regs
6361 if possible, otherwise memory. */
6362 if (packed_p
|| !named
)
6363 fpregs_p
= 0, intregs_p
= 1;
6365 /* If all arg slots are filled, then must pass on stack. */
6366 if (fpregs_p
&& slotno
>= SPARC_FP_ARG_MAX
)
6369 /* If there are only int args and all int arg slots are filled,
6370 then must pass on stack. */
6371 if (!fpregs_p
&& intregs_p
&& slotno
>= SPARC_INT_ARG_MAX
)
6374 /* Note that even if all int arg slots are filled, fp members may
6375 still be passed in regs if such regs are available.
6376 *PREGNO isn't set because there may be more than one, it's up
6377 to the caller to compute them. */
6390 /* Handle recursive register counting for structure field layout. */
6392 struct function_arg_record_value_parms
6394 rtx ret
; /* return expression being built. */
6395 int slotno
; /* slot number of the argument. */
6396 int named
; /* whether the argument is named. */
6397 int regbase
; /* regno of the base register. */
6398 int stack
; /* 1 if part of the argument is on the stack. */
6399 int intoffset
; /* offset of the first pending integer field. */
6400 unsigned int nregs
; /* number of words passed in registers. */
6403 static void function_arg_record_value_3
6404 (HOST_WIDE_INT
, struct function_arg_record_value_parms
*);
6405 static void function_arg_record_value_2
6406 (const_tree
, HOST_WIDE_INT
, struct function_arg_record_value_parms
*, bool);
6407 static void function_arg_record_value_1
6408 (const_tree
, HOST_WIDE_INT
, struct function_arg_record_value_parms
*, bool);
6409 static rtx
function_arg_record_value (const_tree
, enum machine_mode
, int, int, int);
6410 static rtx
function_arg_union_value (int, enum machine_mode
, int, int);
6412 /* A subroutine of function_arg_record_value. Traverse the structure
6413 recursively and determine how many registers will be required. */
6416 function_arg_record_value_1 (const_tree type
, HOST_WIDE_INT startbitpos
,
6417 struct function_arg_record_value_parms
*parms
,
6422 /* We need to compute how many registers are needed so we can
6423 allocate the PARALLEL but before we can do that we need to know
6424 whether there are any packed fields. The ABI obviously doesn't
6425 specify how structures are passed in this case, so they are
6426 defined to be passed in int regs if possible, otherwise memory,
6427 regardless of whether there are fp values present. */
6430 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
6432 if (TREE_CODE (field
) == FIELD_DECL
&& DECL_PACKED (field
))
6439 /* Compute how many registers we need. */
6440 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
6442 if (TREE_CODE (field
) == FIELD_DECL
)
6444 HOST_WIDE_INT bitpos
= startbitpos
;
6446 if (DECL_SIZE (field
) != 0)
6448 if (integer_zerop (DECL_SIZE (field
)))
6451 if (tree_fits_uhwi_p (bit_position (field
)))
6452 bitpos
+= int_bit_position (field
);
6455 /* ??? FIXME: else assume zero offset. */
6457 if (TREE_CODE (TREE_TYPE (field
)) == RECORD_TYPE
)
6458 function_arg_record_value_1 (TREE_TYPE (field
),
6462 else if ((FLOAT_TYPE_P (TREE_TYPE (field
))
6463 || TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
6468 if (parms
->intoffset
!= -1)
6470 unsigned int startbit
, endbit
;
6471 int intslots
, this_slotno
;
6473 startbit
= parms
->intoffset
& -BITS_PER_WORD
;
6474 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
6476 intslots
= (endbit
- startbit
) / BITS_PER_WORD
;
6477 this_slotno
= parms
->slotno
+ parms
->intoffset
6480 if (intslots
> 0 && intslots
> SPARC_INT_ARG_MAX
- this_slotno
)
6482 intslots
= MAX (0, SPARC_INT_ARG_MAX
- this_slotno
);
6483 /* We need to pass this field on the stack. */
6487 parms
->nregs
+= intslots
;
6488 parms
->intoffset
= -1;
6491 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
6492 If it wasn't true we wouldn't be here. */
6493 if (TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
6494 && DECL_MODE (field
) == BLKmode
)
6495 parms
->nregs
+= TYPE_VECTOR_SUBPARTS (TREE_TYPE (field
));
6496 else if (TREE_CODE (TREE_TYPE (field
)) == COMPLEX_TYPE
)
6503 if (parms
->intoffset
== -1)
6504 parms
->intoffset
= bitpos
;
6510 /* A subroutine of function_arg_record_value. Assign the bits of the
6511 structure between parms->intoffset and bitpos to integer registers. */
6514 function_arg_record_value_3 (HOST_WIDE_INT bitpos
,
6515 struct function_arg_record_value_parms
*parms
)
6517 enum machine_mode mode
;
6519 unsigned int startbit
, endbit
;
6520 int this_slotno
, intslots
, intoffset
;
6523 if (parms
->intoffset
== -1)
6526 intoffset
= parms
->intoffset
;
6527 parms
->intoffset
= -1;
6529 startbit
= intoffset
& -BITS_PER_WORD
;
6530 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
6531 intslots
= (endbit
- startbit
) / BITS_PER_WORD
;
6532 this_slotno
= parms
->slotno
+ intoffset
/ BITS_PER_WORD
;
6534 intslots
= MIN (intslots
, SPARC_INT_ARG_MAX
- this_slotno
);
6538 /* If this is the trailing part of a word, only load that much into
6539 the register. Otherwise load the whole register. Note that in
6540 the latter case we may pick up unwanted bits. It's not a problem
6541 at the moment but may wish to revisit. */
6543 if (intoffset
% BITS_PER_WORD
!= 0)
6544 mode
= smallest_mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
6549 intoffset
/= BITS_PER_UNIT
;
6552 regno
= parms
->regbase
+ this_slotno
;
6553 reg
= gen_rtx_REG (mode
, regno
);
6554 XVECEXP (parms
->ret
, 0, parms
->stack
+ parms
->nregs
)
6555 = gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
6558 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
6563 while (intslots
> 0);
6566 /* A subroutine of function_arg_record_value. Traverse the structure
6567 recursively and assign bits to floating point registers. Track which
6568 bits in between need integer registers; invoke function_arg_record_value_3
6569 to make that happen. */
6572 function_arg_record_value_2 (const_tree type
, HOST_WIDE_INT startbitpos
,
6573 struct function_arg_record_value_parms
*parms
,
6579 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
6581 if (TREE_CODE (field
) == FIELD_DECL
&& DECL_PACKED (field
))
6588 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
6590 if (TREE_CODE (field
) == FIELD_DECL
)
6592 HOST_WIDE_INT bitpos
= startbitpos
;
6594 if (DECL_SIZE (field
) != 0)
6596 if (integer_zerop (DECL_SIZE (field
)))
6599 if (tree_fits_uhwi_p (bit_position (field
)))
6600 bitpos
+= int_bit_position (field
);
6603 /* ??? FIXME: else assume zero offset. */
6605 if (TREE_CODE (TREE_TYPE (field
)) == RECORD_TYPE
)
6606 function_arg_record_value_2 (TREE_TYPE (field
),
6610 else if ((FLOAT_TYPE_P (TREE_TYPE (field
))
6611 || TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
6616 int this_slotno
= parms
->slotno
+ bitpos
/ BITS_PER_WORD
;
6617 int regno
, nregs
, pos
;
6618 enum machine_mode mode
= DECL_MODE (field
);
6621 function_arg_record_value_3 (bitpos
, parms
);
6623 if (TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
6626 mode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (field
)));
6627 nregs
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (field
));
6629 else if (TREE_CODE (TREE_TYPE (field
)) == COMPLEX_TYPE
)
6631 mode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (field
)));
6637 regno
= SPARC_FP_ARG_FIRST
+ this_slotno
* 2;
6638 if (GET_MODE_SIZE (mode
) <= 4 && (bitpos
& 32) != 0)
6640 reg
= gen_rtx_REG (mode
, regno
);
6641 pos
= bitpos
/ BITS_PER_UNIT
;
6642 XVECEXP (parms
->ret
, 0, parms
->stack
+ parms
->nregs
)
6643 = gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (pos
));
6647 regno
+= GET_MODE_SIZE (mode
) / 4;
6648 reg
= gen_rtx_REG (mode
, regno
);
6649 pos
+= GET_MODE_SIZE (mode
);
6650 XVECEXP (parms
->ret
, 0, parms
->stack
+ parms
->nregs
)
6651 = gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (pos
));
6657 if (parms
->intoffset
== -1)
6658 parms
->intoffset
= bitpos
;
6664 /* Used by function_arg and sparc_function_value_1 to implement the complex
6665 conventions of the 64-bit ABI for passing and returning structures.
6666 Return an expression valid as a return value for the FUNCTION_ARG
6667 and TARGET_FUNCTION_VALUE.
6669 TYPE is the data type of the argument (as a tree).
6670 This is null for libcalls where that information may
6672 MODE is the argument's machine mode.
6673 SLOTNO is the index number of the argument's slot in the parameter array.
6674 NAMED is nonzero if this argument is a named parameter
6675 (otherwise it is an extra parameter matching an ellipsis).
6676 REGBASE is the regno of the base register for the parameter array. */
6679 function_arg_record_value (const_tree type
, enum machine_mode mode
,
6680 int slotno
, int named
, int regbase
)
6682 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
6683 struct function_arg_record_value_parms parms
;
6686 parms
.ret
= NULL_RTX
;
6687 parms
.slotno
= slotno
;
6688 parms
.named
= named
;
6689 parms
.regbase
= regbase
;
6692 /* Compute how many registers we need. */
6694 parms
.intoffset
= 0;
6695 function_arg_record_value_1 (type
, 0, &parms
, false);
6697 /* Take into account pending integer fields. */
6698 if (parms
.intoffset
!= -1)
6700 unsigned int startbit
, endbit
;
6701 int intslots
, this_slotno
;
6703 startbit
= parms
.intoffset
& -BITS_PER_WORD
;
6704 endbit
= (typesize
*BITS_PER_UNIT
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
6705 intslots
= (endbit
- startbit
) / BITS_PER_WORD
;
6706 this_slotno
= slotno
+ parms
.intoffset
/ BITS_PER_WORD
;
6708 if (intslots
> 0 && intslots
> SPARC_INT_ARG_MAX
- this_slotno
)
6710 intslots
= MAX (0, SPARC_INT_ARG_MAX
- this_slotno
);
6711 /* We need to pass this field on the stack. */
6715 parms
.nregs
+= intslots
;
6717 nregs
= parms
.nregs
;
6719 /* Allocate the vector and handle some annoying special cases. */
6722 /* ??? Empty structure has no value? Duh? */
6725 /* Though there's nothing really to store, return a word register
6726 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6727 leads to breakage due to the fact that there are zero bytes to
6729 return gen_rtx_REG (mode
, regbase
);
6733 /* ??? C++ has structures with no fields, and yet a size. Give up
6734 for now and pass everything back in integer registers. */
6735 nregs
= (typesize
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
6737 if (nregs
+ slotno
> SPARC_INT_ARG_MAX
)
6738 nregs
= SPARC_INT_ARG_MAX
- slotno
;
6740 gcc_assert (nregs
!= 0);
6742 parms
.ret
= gen_rtx_PARALLEL (mode
, rtvec_alloc (parms
.stack
+ nregs
));
6744 /* If at least one field must be passed on the stack, generate
6745 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6746 also be passed on the stack. We can't do much better because the
6747 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6748 of structures for which the fields passed exclusively in registers
6749 are not at the beginning of the structure. */
6751 XVECEXP (parms
.ret
, 0, 0)
6752 = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
6754 /* Fill in the entries. */
6756 parms
.intoffset
= 0;
6757 function_arg_record_value_2 (type
, 0, &parms
, false);
6758 function_arg_record_value_3 (typesize
* BITS_PER_UNIT
, &parms
);
6760 gcc_assert (parms
.nregs
== nregs
);
6765 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6766 of the 64-bit ABI for passing and returning unions.
6767 Return an expression valid as a return value for the FUNCTION_ARG
6768 and TARGET_FUNCTION_VALUE.
6770 SIZE is the size in bytes of the union.
6771 MODE is the argument's machine mode.
6772 REGNO is the hard register the union will be passed in. */
6775 function_arg_union_value (int size
, enum machine_mode mode
, int slotno
,
6778 int nwords
= ROUND_ADVANCE (size
), i
;
6781 /* See comment in previous function for empty structures. */
6783 return gen_rtx_REG (mode
, regno
);
6785 if (slotno
== SPARC_INT_ARG_MAX
- 1)
6788 regs
= gen_rtx_PARALLEL (mode
, rtvec_alloc (nwords
));
6790 for (i
= 0; i
< nwords
; i
++)
6792 /* Unions are passed left-justified. */
6793 XVECEXP (regs
, 0, i
)
6794 = gen_rtx_EXPR_LIST (VOIDmode
,
6795 gen_rtx_REG (word_mode
, regno
),
6796 GEN_INT (UNITS_PER_WORD
* i
));
6803 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6804 for passing and returning large (BLKmode) vectors.
6805 Return an expression valid as a return value for the FUNCTION_ARG
6806 and TARGET_FUNCTION_VALUE.
6808 SIZE is the size in bytes of the vector (at least 8 bytes).
6809 REGNO is the FP hard register the vector will be passed in. */
6812 function_arg_vector_value (int size
, int regno
)
6814 int i
, nregs
= size
/ 8;
6817 regs
= gen_rtx_PARALLEL (BLKmode
, rtvec_alloc (nregs
));
6819 for (i
= 0; i
< nregs
; i
++)
6821 XVECEXP (regs
, 0, i
)
6822 = gen_rtx_EXPR_LIST (VOIDmode
,
6823 gen_rtx_REG (DImode
, regno
+ 2*i
),
6830 /* Determine where to put an argument to a function.
6831 Value is zero to push the argument on the stack,
6832 or a hard register in which to store the argument.
6834 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6835 the preceding args and about the function being called.
6836 MODE is the argument's machine mode.
6837 TYPE is the data type of the argument (as a tree).
6838 This is null for libcalls where that information may
6840 NAMED is true if this argument is a named parameter
6841 (otherwise it is an extra parameter matching an ellipsis).
6842 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6843 TARGET_FUNCTION_INCOMING_ARG. */
6846 sparc_function_arg_1 (cumulative_args_t cum_v
, enum machine_mode mode
,
6847 const_tree type
, bool named
, bool incoming_p
)
6849 const CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
6851 int regbase
= (incoming_p
6852 ? SPARC_INCOMING_INT_ARG_FIRST
6853 : SPARC_OUTGOING_INT_ARG_FIRST
);
6854 int slotno
, regno
, padding
;
6855 enum mode_class mclass
= GET_MODE_CLASS (mode
);
6857 slotno
= function_arg_slotno (cum
, mode
, type
, named
, incoming_p
,
6862 /* Vector types deserve special treatment because they are polymorphic wrt
6863 their mode, depending upon whether VIS instructions are enabled. */
6864 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
6866 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6867 gcc_assert ((TARGET_ARCH32
&& size
<= 8)
6868 || (TARGET_ARCH64
&& size
<= 16));
6870 if (mode
== BLKmode
)
6871 return function_arg_vector_value (size
,
6872 SPARC_FP_ARG_FIRST
+ 2*slotno
);
6874 mclass
= MODE_FLOAT
;
6878 return gen_rtx_REG (mode
, regno
);
6880 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6881 and are promoted to registers if possible. */
6882 if (type
&& TREE_CODE (type
) == RECORD_TYPE
)
6884 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6885 gcc_assert (size
<= 16);
6887 return function_arg_record_value (type
, mode
, slotno
, named
, regbase
);
6890 /* Unions up to 16 bytes in size are passed in integer registers. */
6891 else if (type
&& TREE_CODE (type
) == UNION_TYPE
)
6893 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6894 gcc_assert (size
<= 16);
6896 return function_arg_union_value (size
, mode
, slotno
, regno
);
6899 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6900 but also have the slot allocated for them.
6901 If no prototype is in scope fp values in register slots get passed
6902 in two places, either fp regs and int regs or fp regs and memory. */
6903 else if ((mclass
== MODE_FLOAT
|| mclass
== MODE_COMPLEX_FLOAT
)
6904 && SPARC_FP_REG_P (regno
))
6906 rtx reg
= gen_rtx_REG (mode
, regno
);
6907 if (cum
->prototype_p
|| cum
->libcall_p
)
6909 /* "* 2" because fp reg numbers are recorded in 4 byte
6912 /* ??? This will cause the value to be passed in the fp reg and
6913 in the stack. When a prototype exists we want to pass the
6914 value in the reg but reserve space on the stack. That's an
6915 optimization, and is deferred [for a bit]. */
6916 if ((regno
- SPARC_FP_ARG_FIRST
) >= SPARC_INT_ARG_MAX
* 2)
6917 return gen_rtx_PARALLEL (mode
,
6919 gen_rtx_EXPR_LIST (VOIDmode
,
6920 NULL_RTX
, const0_rtx
),
6921 gen_rtx_EXPR_LIST (VOIDmode
,
6925 /* ??? It seems that passing back a register even when past
6926 the area declared by REG_PARM_STACK_SPACE will allocate
6927 space appropriately, and will not copy the data onto the
6928 stack, exactly as we desire.
6930 This is due to locate_and_pad_parm being called in
6931 expand_call whenever reg_parm_stack_space > 0, which
6932 while beneficial to our example here, would seem to be
6933 in error from what had been intended. Ho hum... -- r~ */
6941 if ((regno
- SPARC_FP_ARG_FIRST
) < SPARC_INT_ARG_MAX
* 2)
6945 /* On incoming, we don't need to know that the value
6946 is passed in %f0 and %i0, and it confuses other parts
6947 causing needless spillage even on the simplest cases. */
6951 intreg
= (SPARC_OUTGOING_INT_ARG_FIRST
6952 + (regno
- SPARC_FP_ARG_FIRST
) / 2);
6954 v0
= gen_rtx_EXPR_LIST (VOIDmode
, reg
, const0_rtx
);
6955 v1
= gen_rtx_EXPR_LIST (VOIDmode
, gen_rtx_REG (mode
, intreg
),
6957 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, v0
, v1
));
6961 v0
= gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
6962 v1
= gen_rtx_EXPR_LIST (VOIDmode
, reg
, const0_rtx
);
6963 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, v0
, v1
));
6968 /* All other aggregate types are passed in an integer register in a mode
6969 corresponding to the size of the type. */
6970 else if (type
&& AGGREGATE_TYPE_P (type
))
6972 HOST_WIDE_INT size
= int_size_in_bytes (type
);
6973 gcc_assert (size
<= 16);
6975 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
6978 return gen_rtx_REG (mode
, regno
);
6981 /* Handle the TARGET_FUNCTION_ARG target hook. */
6984 sparc_function_arg (cumulative_args_t cum
, enum machine_mode mode
,
6985 const_tree type
, bool named
)
6987 return sparc_function_arg_1 (cum
, mode
, type
, named
, false);
6990 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6993 sparc_function_incoming_arg (cumulative_args_t cum
, enum machine_mode mode
,
6994 const_tree type
, bool named
)
6996 return sparc_function_arg_1 (cum
, mode
, type
, named
, true);
6999 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
7002 sparc_function_arg_boundary (enum machine_mode mode
, const_tree type
)
7004 return ((TARGET_ARCH64
7005 && (GET_MODE_ALIGNMENT (mode
) == 128
7006 || (type
&& TYPE_ALIGN (type
) == 128)))
7011 /* For an arg passed partly in registers and partly in memory,
7012 this is the number of bytes of registers used.
7013 For args passed entirely in registers or entirely in memory, zero.
7015 Any arg that starts in the first 6 regs but won't entirely fit in them
7016 needs partial registers on v8. On v9, structures with integer
7017 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
7018 values that begin in the last fp reg [where "last fp reg" varies with the
7019 mode] will be split between that reg and memory. */
7022 sparc_arg_partial_bytes (cumulative_args_t cum
, enum machine_mode mode
,
7023 tree type
, bool named
)
7025 int slotno
, regno
, padding
;
7027 /* We pass false for incoming_p here, it doesn't matter. */
7028 slotno
= function_arg_slotno (get_cumulative_args (cum
), mode
, type
, named
,
7029 false, ®no
, &padding
);
7036 if ((slotno
+ (mode
== BLKmode
7037 ? ROUND_ADVANCE (int_size_in_bytes (type
))
7038 : ROUND_ADVANCE (GET_MODE_SIZE (mode
))))
7039 > SPARC_INT_ARG_MAX
)
7040 return (SPARC_INT_ARG_MAX
- slotno
) * UNITS_PER_WORD
;
7044 /* We are guaranteed by pass_by_reference that the size of the
7045 argument is not greater than 16 bytes, so we only need to return
7046 one word if the argument is partially passed in registers. */
7048 if (type
&& AGGREGATE_TYPE_P (type
))
7050 int size
= int_size_in_bytes (type
);
7052 if (size
> UNITS_PER_WORD
7053 && slotno
== SPARC_INT_ARG_MAX
- 1)
7054 return UNITS_PER_WORD
;
7056 else if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_INT
7057 || (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
7058 && ! (TARGET_FPU
&& named
)))
7060 /* The complex types are passed as packed types. */
7061 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
7062 && slotno
== SPARC_INT_ARG_MAX
- 1)
7063 return UNITS_PER_WORD
;
7065 else if (GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
7067 if ((slotno
+ GET_MODE_SIZE (mode
) / UNITS_PER_WORD
)
7069 return UNITS_PER_WORD
;
7076 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
7077 Specify whether to pass the argument by reference. */
7080 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
7081 enum machine_mode mode
, const_tree type
,
7082 bool named ATTRIBUTE_UNUSED
)
7085 /* Original SPARC 32-bit ABI says that structures and unions,
7086 and quad-precision floats are passed by reference. For Pascal,
7087 also pass arrays by reference. All other base types are passed
7090 Extended ABI (as implemented by the Sun compiler) says that all
7091 complex floats are passed by reference. Pass complex integers
7092 in registers up to 8 bytes. More generally, enforce the 2-word
7093 cap for passing arguments in registers.
7095 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7096 integers are passed like floats of the same size, that is in
7097 registers up to 8 bytes. Pass all vector floats by reference
7098 like structure and unions. */
7099 return ((type
&& (AGGREGATE_TYPE_P (type
) || VECTOR_FLOAT_TYPE_P (type
)))
7101 /* Catch CDImode, TFmode, DCmode and TCmode. */
7102 || GET_MODE_SIZE (mode
) > 8
7104 && TREE_CODE (type
) == VECTOR_TYPE
7105 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8));
7107 /* Original SPARC 64-bit ABI says that structures and unions
7108 smaller than 16 bytes are passed in registers, as well as
7109 all other base types.
7111 Extended ABI (as implemented by the Sun compiler) says that
7112 complex floats are passed in registers up to 16 bytes. Pass
7113 all complex integers in registers up to 16 bytes. More generally,
7114 enforce the 2-word cap for passing arguments in registers.
7116 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7117 integers are passed like floats of the same size, that is in
7118 registers (up to 16 bytes). Pass all vector floats like structure
7121 && (AGGREGATE_TYPE_P (type
) || TREE_CODE (type
) == VECTOR_TYPE
)
7122 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 16)
7123 /* Catch CTImode and TCmode. */
7124 || GET_MODE_SIZE (mode
) > 16);
7127 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7128 Update the data in CUM to advance over an argument
7129 of mode MODE and data type TYPE.
7130 TYPE is null for libcalls where that information may not be available. */
7133 sparc_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
7134 const_tree type
, bool named
)
7136 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
7139 /* We pass false for incoming_p here, it doesn't matter. */
7140 function_arg_slotno (cum
, mode
, type
, named
, false, ®no
, &padding
);
7142 /* If argument requires leading padding, add it. */
7143 cum
->words
+= padding
;
7147 cum
->words
+= (mode
!= BLKmode
7148 ? ROUND_ADVANCE (GET_MODE_SIZE (mode
))
7149 : ROUND_ADVANCE (int_size_in_bytes (type
)));
7153 if (type
&& AGGREGATE_TYPE_P (type
))
7155 int size
= int_size_in_bytes (type
);
7159 else if (size
<= 16)
7161 else /* passed by reference */
7166 cum
->words
+= (mode
!= BLKmode
7167 ? ROUND_ADVANCE (GET_MODE_SIZE (mode
))
7168 : ROUND_ADVANCE (int_size_in_bytes (type
)));
7173 /* Handle the FUNCTION_ARG_PADDING macro.
7174 For the 64 bit ABI structs are always stored left shifted in their
7178 function_arg_padding (enum machine_mode mode
, const_tree type
)
7180 if (TARGET_ARCH64
&& type
!= 0 && AGGREGATE_TYPE_P (type
))
7183 /* Fall back to the default. */
7184 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
7187 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7188 Specify whether to return the return value in memory. */
7191 sparc_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
7194 /* Original SPARC 32-bit ABI says that structures and unions,
7195 and quad-precision floats are returned in memory. All other
7196 base types are returned in registers.
7198 Extended ABI (as implemented by the Sun compiler) says that
7199 all complex floats are returned in registers (8 FP registers
7200 at most for '_Complex long double'). Return all complex integers
7201 in registers (4 at most for '_Complex long long').
7203 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7204 integers are returned like floats of the same size, that is in
7205 registers up to 8 bytes and in memory otherwise. Return all
7206 vector floats in memory like structure and unions; note that
7207 they always have BLKmode like the latter. */
7208 return (TYPE_MODE (type
) == BLKmode
7209 || TYPE_MODE (type
) == TFmode
7210 || (TREE_CODE (type
) == VECTOR_TYPE
7211 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8));
7213 /* Original SPARC 64-bit ABI says that structures and unions
7214 smaller than 32 bytes are returned in registers, as well as
7215 all other base types.
7217 Extended ABI (as implemented by the Sun compiler) says that all
7218 complex floats are returned in registers (8 FP registers at most
7219 for '_Complex long double'). Return all complex integers in
7220 registers (4 at most for '_Complex TItype').
7222 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7223 integers are returned like floats of the same size, that is in
7224 registers. Return all vector floats like structure and unions;
7225 note that they always have BLKmode like the latter. */
7226 return (TYPE_MODE (type
) == BLKmode
7227 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 32);
7230 /* Handle the TARGET_STRUCT_VALUE target hook.
7231 Return where to find the structure return value address. */
7234 sparc_struct_value_rtx (tree fndecl
, int incoming
)
7243 mem
= gen_frame_mem (Pmode
, plus_constant (Pmode
, frame_pointer_rtx
,
7244 STRUCT_VALUE_OFFSET
));
7246 mem
= gen_frame_mem (Pmode
, plus_constant (Pmode
, stack_pointer_rtx
,
7247 STRUCT_VALUE_OFFSET
));
7249 /* Only follow the SPARC ABI for fixed-size structure returns.
7250 Variable size structure returns are handled per the normal
7251 procedures in GCC. This is enabled by -mstd-struct-return */
7253 && sparc_std_struct_return
7254 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl
))
7255 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl
))) == INTEGER_CST
)
7257 /* We must check and adjust the return address, as it is
7258 optional as to whether the return object is really
7260 rtx ret_reg
= gen_rtx_REG (Pmode
, 31);
7261 rtx scratch
= gen_reg_rtx (SImode
);
7262 rtx endlab
= gen_label_rtx ();
7264 /* Calculate the return object size */
7265 tree size
= TYPE_SIZE_UNIT (TREE_TYPE (fndecl
));
7266 rtx size_rtx
= GEN_INT (TREE_INT_CST_LOW (size
) & 0xfff);
7267 /* Construct a temporary return value */
7269 = assign_stack_local (Pmode
, TREE_INT_CST_LOW (size
), 0);
7271 /* Implement SPARC 32-bit psABI callee return struct checking:
7273 Fetch the instruction where we will return to and see if
7274 it's an unimp instruction (the most significant 10 bits
7276 emit_move_insn (scratch
, gen_rtx_MEM (SImode
,
7277 plus_constant (Pmode
,
7279 /* Assume the size is valid and pre-adjust */
7280 emit_insn (gen_add3_insn (ret_reg
, ret_reg
, GEN_INT (4)));
7281 emit_cmp_and_jump_insns (scratch
, size_rtx
, EQ
, const0_rtx
, SImode
,
7283 emit_insn (gen_sub3_insn (ret_reg
, ret_reg
, GEN_INT (4)));
7284 /* Write the address of the memory pointed to by temp_val into
7285 the memory pointed to by mem */
7286 emit_move_insn (mem
, XEXP (temp_val
, 0));
7287 emit_label (endlab
);
7294 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7295 For v9, function return values are subject to the same rules as arguments,
7296 except that up to 32 bytes may be returned in registers. */
7299 sparc_function_value_1 (const_tree type
, enum machine_mode mode
,
7302 /* Beware that the two values are swapped here wrt function_arg. */
7303 int regbase
= (outgoing
7304 ? SPARC_INCOMING_INT_ARG_FIRST
7305 : SPARC_OUTGOING_INT_ARG_FIRST
);
7306 enum mode_class mclass
= GET_MODE_CLASS (mode
);
7309 /* Vector types deserve special treatment because they are polymorphic wrt
7310 their mode, depending upon whether VIS instructions are enabled. */
7311 if (type
&& TREE_CODE (type
) == VECTOR_TYPE
)
7313 HOST_WIDE_INT size
= int_size_in_bytes (type
);
7314 gcc_assert ((TARGET_ARCH32
&& size
<= 8)
7315 || (TARGET_ARCH64
&& size
<= 32));
7317 if (mode
== BLKmode
)
7318 return function_arg_vector_value (size
,
7319 SPARC_FP_ARG_FIRST
);
7321 mclass
= MODE_FLOAT
;
7324 if (TARGET_ARCH64
&& type
)
7326 /* Structures up to 32 bytes in size are returned in registers. */
7327 if (TREE_CODE (type
) == RECORD_TYPE
)
7329 HOST_WIDE_INT size
= int_size_in_bytes (type
);
7330 gcc_assert (size
<= 32);
7332 return function_arg_record_value (type
, mode
, 0, 1, regbase
);
7335 /* Unions up to 32 bytes in size are returned in integer registers. */
7336 else if (TREE_CODE (type
) == UNION_TYPE
)
7338 HOST_WIDE_INT size
= int_size_in_bytes (type
);
7339 gcc_assert (size
<= 32);
7341 return function_arg_union_value (size
, mode
, 0, regbase
);
7344 /* Objects that require it are returned in FP registers. */
7345 else if (mclass
== MODE_FLOAT
|| mclass
== MODE_COMPLEX_FLOAT
)
7348 /* All other aggregate types are returned in an integer register in a
7349 mode corresponding to the size of the type. */
7350 else if (AGGREGATE_TYPE_P (type
))
7352 /* All other aggregate types are passed in an integer register
7353 in a mode corresponding to the size of the type. */
7354 HOST_WIDE_INT size
= int_size_in_bytes (type
);
7355 gcc_assert (size
<= 32);
7357 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
7359 /* ??? We probably should have made the same ABI change in
7360 3.4.0 as the one we made for unions. The latter was
7361 required by the SCD though, while the former is not
7362 specified, so we favored compatibility and efficiency.
7364 Now we're stuck for aggregates larger than 16 bytes,
7365 because OImode vanished in the meantime. Let's not
7366 try to be unduly clever, and simply follow the ABI
7367 for unions in that case. */
7368 if (mode
== BLKmode
)
7369 return function_arg_union_value (size
, mode
, 0, regbase
);
7374 /* We should only have pointer and integer types at this point. This
7375 must match sparc_promote_function_mode. */
7376 else if (mclass
== MODE_INT
&& GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
7380 /* We should only have pointer and integer types at this point. This must
7381 match sparc_promote_function_mode. */
7382 else if (TARGET_ARCH32
7383 && mclass
== MODE_INT
7384 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
7387 if ((mclass
== MODE_FLOAT
|| mclass
== MODE_COMPLEX_FLOAT
) && TARGET_FPU
)
7388 regno
= SPARC_FP_ARG_FIRST
;
7392 return gen_rtx_REG (mode
, regno
);
7395 /* Handle TARGET_FUNCTION_VALUE.
7396 On the SPARC, the value is found in the first "output" register, but the
7397 called function leaves it in the first "input" register. */
7400 sparc_function_value (const_tree valtype
,
7401 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
7404 return sparc_function_value_1 (valtype
, TYPE_MODE (valtype
), outgoing
);
7407 /* Handle TARGET_LIBCALL_VALUE. */
7410 sparc_libcall_value (enum machine_mode mode
,
7411 const_rtx fun ATTRIBUTE_UNUSED
)
7413 return sparc_function_value_1 (NULL_TREE
, mode
, false);
7416 /* Handle FUNCTION_VALUE_REGNO_P.
7417 On the SPARC, the first "output" reg is used for integer values, and the
7418 first floating point register is used for floating point values. */
7421 sparc_function_value_regno_p (const unsigned int regno
)
7423 return (regno
== 8 || regno
== 32);
7426 /* Do what is necessary for `va_start'. We look at the current function
7427 to determine if stdarg or varargs is used and return the address of
7428 the first unnamed parameter. */
7431 sparc_builtin_saveregs (void)
7433 int first_reg
= crtl
->args
.info
.words
;
7437 for (regno
= first_reg
; regno
< SPARC_INT_ARG_MAX
; regno
++)
7438 emit_move_insn (gen_rtx_MEM (word_mode
,
7439 gen_rtx_PLUS (Pmode
,
7441 GEN_INT (FIRST_PARM_OFFSET (0)
7444 gen_rtx_REG (word_mode
,
7445 SPARC_INCOMING_INT_ARG_FIRST
+ regno
));
7447 address
= gen_rtx_PLUS (Pmode
,
7449 GEN_INT (FIRST_PARM_OFFSET (0)
7450 + UNITS_PER_WORD
* first_reg
));
7455 /* Implement `va_start' for stdarg. */
7458 sparc_va_start (tree valist
, rtx nextarg
)
7460 nextarg
= expand_builtin_saveregs ();
7461 std_expand_builtin_va_start (valist
, nextarg
);
7464 /* Implement `va_arg' for stdarg. */
7467 sparc_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
7470 HOST_WIDE_INT size
, rsize
, align
;
7473 tree ptrtype
= build_pointer_type (type
);
7475 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
7478 size
= rsize
= UNITS_PER_WORD
;
7484 size
= int_size_in_bytes (type
);
7485 rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
7490 /* For SPARC64, objects requiring 16-byte alignment get it. */
7491 if (TYPE_ALIGN (type
) >= 2 * (unsigned) BITS_PER_WORD
)
7492 align
= 2 * UNITS_PER_WORD
;
7494 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7495 are left-justified in their slots. */
7496 if (AGGREGATE_TYPE_P (type
))
7499 size
= rsize
= UNITS_PER_WORD
;
7509 incr
= fold_build_pointer_plus_hwi (incr
, align
- 1);
7510 incr
= fold_convert (sizetype
, incr
);
7511 incr
= fold_build2 (BIT_AND_EXPR
, sizetype
, incr
,
7513 incr
= fold_convert (ptr_type_node
, incr
);
7516 gimplify_expr (&incr
, pre_p
, post_p
, is_gimple_val
, fb_rvalue
);
7519 if (BYTES_BIG_ENDIAN
&& size
< rsize
)
7520 addr
= fold_build_pointer_plus_hwi (incr
, rsize
- size
);
7524 addr
= fold_convert (build_pointer_type (ptrtype
), addr
);
7525 addr
= build_va_arg_indirect_ref (addr
);
7528 /* If the address isn't aligned properly for the type, we need a temporary.
7529 FIXME: This is inefficient, usually we can do this in registers. */
7530 else if (align
== 0 && TYPE_ALIGN (type
) > BITS_PER_WORD
)
7532 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
7533 tree dest_addr
= build_fold_addr_expr (tmp
);
7534 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
7535 3, dest_addr
, addr
, size_int (rsize
));
7536 TREE_ADDRESSABLE (tmp
) = 1;
7537 gimplify_and_add (copy
, pre_p
);
7542 addr
= fold_convert (ptrtype
, addr
);
7544 incr
= fold_build_pointer_plus_hwi (incr
, rsize
);
7545 gimplify_assign (valist
, incr
, post_p
);
7547 return build_va_arg_indirect_ref (addr
);
7550 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
7551 Specify whether the vector mode is supported by the hardware. */
7554 sparc_vector_mode_supported_p (enum machine_mode mode
)
7556 return TARGET_VIS
&& VECTOR_MODE_P (mode
) ? true : false;
7559 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
7561 static enum machine_mode
7562 sparc_preferred_simd_mode (enum machine_mode mode
)
7580 /* Return the string to output an unconditional branch to LABEL, which is
7581 the operand number of the label.
7583 DEST is the destination insn (i.e. the label), INSN is the source. */
7586 output_ubranch (rtx dest
, rtx insn
)
7588 static char string
[64];
7589 bool v9_form
= false;
7593 /* Even if we are trying to use cbcond for this, evaluate
7594 whether we can use V9 branches as our backup plan. */
7597 if (INSN_ADDRESSES_SET_P ())
7598 delta
= (INSN_ADDRESSES (INSN_UID (dest
))
7599 - INSN_ADDRESSES (INSN_UID (insn
)));
7601 /* Leave some instructions for "slop". */
7602 if (TARGET_V9
&& delta
>= -260000 && delta
< 260000)
7607 bool emit_nop
= emit_cbcond_nop (insn
);
7611 if (delta
< -500 || delta
> 500)
7617 rval
= "ba,a,pt\t%%xcc, %l0";
7624 rval
= "cwbe\t%%g0, %%g0, %l0\n\tnop";
7626 rval
= "cwbe\t%%g0, %%g0, %l0";
7632 strcpy (string
, "ba%*,pt\t%%xcc, ");
7634 strcpy (string
, "b%*\t");
7636 p
= strchr (string
, '\0');
7647 /* Return the string to output a conditional branch to LABEL, which is
7648 the operand number of the label. OP is the conditional expression.
7649 XEXP (OP, 0) is assumed to be a condition code register (integer or
7650 floating point) and its mode specifies what kind of comparison we made.
7652 DEST is the destination insn (i.e. the label), INSN is the source.
7654 REVERSED is nonzero if we should reverse the sense of the comparison.
7656 ANNUL is nonzero if we should generate an annulling branch. */
7659 output_cbranch (rtx op
, rtx dest
, int label
, int reversed
, int annul
,
7662 static char string
[64];
7663 enum rtx_code code
= GET_CODE (op
);
7664 rtx cc_reg
= XEXP (op
, 0);
7665 enum machine_mode mode
= GET_MODE (cc_reg
);
7666 const char *labelno
, *branch
;
7667 int spaces
= 8, far
;
7670 /* v9 branches are limited to +-1MB. If it is too far away,
7683 fbne,a,pn %fcc2, .LC29
7691 far
= TARGET_V9
&& (get_attr_length (insn
) >= 3);
7694 /* Reversal of FP compares takes care -- an ordered compare
7695 becomes an unordered compare and vice versa. */
7696 if (mode
== CCFPmode
|| mode
== CCFPEmode
)
7697 code
= reverse_condition_maybe_unordered (code
);
7699 code
= reverse_condition (code
);
7702 /* Start by writing the branch condition. */
7703 if (mode
== CCFPmode
|| mode
== CCFPEmode
)
7754 /* ??? !v9: FP branches cannot be preceded by another floating point
7755 insn. Because there is currently no concept of pre-delay slots,
7756 we can fix this only by always emitting a nop before a floating
7761 strcpy (string
, "nop\n\t");
7762 strcat (string
, branch
);
7775 if (mode
== CC_NOOVmode
|| mode
== CCX_NOOVmode
)
7787 if (mode
== CC_NOOVmode
|| mode
== CCX_NOOVmode
)
7808 strcpy (string
, branch
);
7810 spaces
-= strlen (branch
);
7811 p
= strchr (string
, '\0');
7813 /* Now add the annulling, the label, and a possible noop. */
7826 if (! far
&& insn
&& INSN_ADDRESSES_SET_P ())
7828 int delta
= (INSN_ADDRESSES (INSN_UID (dest
))
7829 - INSN_ADDRESSES (INSN_UID (insn
)));
7830 /* Leave some instructions for "slop". */
7831 if (delta
< -260000 || delta
>= 260000)
7835 if (mode
== CCFPmode
|| mode
== CCFPEmode
)
7837 static char v9_fcc_labelno
[] = "%%fccX, ";
7838 /* Set the char indicating the number of the fcc reg to use. */
7839 v9_fcc_labelno
[5] = REGNO (cc_reg
) - SPARC_FIRST_V9_FCC_REG
+ '0';
7840 labelno
= v9_fcc_labelno
;
7843 gcc_assert (REGNO (cc_reg
) == SPARC_FCC_REG
);
7847 else if (mode
== CCXmode
|| mode
== CCX_NOOVmode
)
7849 labelno
= "%%xcc, ";
7854 labelno
= "%%icc, ";
7859 if (*labelno
&& insn
&& (note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
)))
7862 ((XINT (note
, 0) >= REG_BR_PROB_BASE
/ 2) ^ far
)
7875 strcpy (p
, labelno
);
7876 p
= strchr (p
, '\0');
7879 strcpy (p
, ".+12\n\t nop\n\tb\t");
7880 /* Skip the next insn if requested or
7881 if we know that it will be a nop. */
7882 if (annul
|| ! final_sequence
)
7896 /* Emit a library call comparison between floating point X and Y.
7897 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7898 Return the new operator to be used in the comparison sequence.
7900 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7901 values as arguments instead of the TFmode registers themselves,
7902 that's why we cannot call emit_float_lib_cmp. */
7905 sparc_emit_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
)
7908 rtx slot0
, slot1
, result
, tem
, tem2
, libfunc
;
7909 enum machine_mode mode
;
7910 enum rtx_code new_comparison
;
7915 qpfunc
= (TARGET_ARCH64
? "_Qp_feq" : "_Q_feq");
7919 qpfunc
= (TARGET_ARCH64
? "_Qp_fne" : "_Q_fne");
7923 qpfunc
= (TARGET_ARCH64
? "_Qp_fgt" : "_Q_fgt");
7927 qpfunc
= (TARGET_ARCH64
? "_Qp_fge" : "_Q_fge");
7931 qpfunc
= (TARGET_ARCH64
? "_Qp_flt" : "_Q_flt");
7935 qpfunc
= (TARGET_ARCH64
? "_Qp_fle" : "_Q_fle");
7946 qpfunc
= (TARGET_ARCH64
? "_Qp_cmp" : "_Q_cmp");
7957 tree expr
= MEM_EXPR (x
);
7959 mark_addressable (expr
);
7964 slot0
= assign_stack_temp (TFmode
, GET_MODE_SIZE(TFmode
));
7965 emit_move_insn (slot0
, x
);
7970 tree expr
= MEM_EXPR (y
);
7972 mark_addressable (expr
);
7977 slot1
= assign_stack_temp (TFmode
, GET_MODE_SIZE(TFmode
));
7978 emit_move_insn (slot1
, y
);
7981 libfunc
= gen_rtx_SYMBOL_REF (Pmode
, qpfunc
);
7982 emit_library_call (libfunc
, LCT_NORMAL
,
7984 XEXP (slot0
, 0), Pmode
,
7985 XEXP (slot1
, 0), Pmode
);
7990 libfunc
= gen_rtx_SYMBOL_REF (Pmode
, qpfunc
);
7991 emit_library_call (libfunc
, LCT_NORMAL
,
7993 x
, TFmode
, y
, TFmode
);
7998 /* Immediately move the result of the libcall into a pseudo
7999 register so reload doesn't clobber the value if it needs
8000 the return register for a spill reg. */
8001 result
= gen_reg_rtx (mode
);
8002 emit_move_insn (result
, hard_libcall_value (mode
, libfunc
));
8007 return gen_rtx_NE (VOIDmode
, result
, const0_rtx
);
8010 new_comparison
= (comparison
== UNORDERED
? EQ
: NE
);
8011 return gen_rtx_fmt_ee (new_comparison
, VOIDmode
, result
, GEN_INT(3));
8014 new_comparison
= (comparison
== UNGT
? GT
: NE
);
8015 return gen_rtx_fmt_ee (new_comparison
, VOIDmode
, result
, const1_rtx
);
8017 return gen_rtx_NE (VOIDmode
, result
, const2_rtx
);
8019 tem
= gen_reg_rtx (mode
);
8021 emit_insn (gen_andsi3 (tem
, result
, const1_rtx
));
8023 emit_insn (gen_anddi3 (tem
, result
, const1_rtx
));
8024 return gen_rtx_NE (VOIDmode
, tem
, const0_rtx
);
8027 tem
= gen_reg_rtx (mode
);
8029 emit_insn (gen_addsi3 (tem
, result
, const1_rtx
));
8031 emit_insn (gen_adddi3 (tem
, result
, const1_rtx
));
8032 tem2
= gen_reg_rtx (mode
);
8034 emit_insn (gen_andsi3 (tem2
, tem
, const2_rtx
));
8036 emit_insn (gen_anddi3 (tem2
, tem
, const2_rtx
));
8037 new_comparison
= (comparison
== UNEQ
? EQ
: NE
);
8038 return gen_rtx_fmt_ee (new_comparison
, VOIDmode
, tem2
, const0_rtx
);
8044 /* Generate an unsigned DImode to FP conversion. This is the same code
8045 optabs would emit if we didn't have TFmode patterns. */
8048 sparc_emit_floatunsdi (rtx
*operands
, enum machine_mode mode
)
8050 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
;
8053 in
= force_reg (DImode
, operands
[1]);
8054 neglab
= gen_label_rtx ();
8055 donelab
= gen_label_rtx ();
8056 i0
= gen_reg_rtx (DImode
);
8057 i1
= gen_reg_rtx (DImode
);
8058 f0
= gen_reg_rtx (mode
);
8060 emit_cmp_and_jump_insns (in
, const0_rtx
, LT
, const0_rtx
, DImode
, 0, neglab
);
8062 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_FLOAT (mode
, in
)));
8063 emit_jump_insn (gen_jump (donelab
));
8066 emit_label (neglab
);
8068 emit_insn (gen_lshrdi3 (i0
, in
, const1_rtx
));
8069 emit_insn (gen_anddi3 (i1
, in
, const1_rtx
));
8070 emit_insn (gen_iordi3 (i0
, i0
, i1
));
8071 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_FLOAT (mode
, i0
)));
8072 emit_insn (gen_rtx_SET (VOIDmode
, out
, gen_rtx_PLUS (mode
, f0
, f0
)));
8074 emit_label (donelab
);
8077 /* Generate an FP to unsigned DImode conversion. This is the same code
8078 optabs would emit if we didn't have TFmode patterns. */
8081 sparc_emit_fixunsdi (rtx
*operands
, enum machine_mode mode
)
8083 rtx neglab
, donelab
, i0
, i1
, f0
, in
, out
, limit
;
8086 in
= force_reg (mode
, operands
[1]);
8087 neglab
= gen_label_rtx ();
8088 donelab
= gen_label_rtx ();
8089 i0
= gen_reg_rtx (DImode
);
8090 i1
= gen_reg_rtx (DImode
);
8091 limit
= gen_reg_rtx (mode
);
8092 f0
= gen_reg_rtx (mode
);
8094 emit_move_insn (limit
,
8095 CONST_DOUBLE_FROM_REAL_VALUE (
8096 REAL_VALUE_ATOF ("9223372036854775808.0", mode
), mode
));
8097 emit_cmp_and_jump_insns (in
, limit
, GE
, NULL_RTX
, mode
, 0, neglab
);
8099 emit_insn (gen_rtx_SET (VOIDmode
,
8101 gen_rtx_FIX (DImode
, gen_rtx_FIX (mode
, in
))));
8102 emit_jump_insn (gen_jump (donelab
));
8105 emit_label (neglab
);
8107 emit_insn (gen_rtx_SET (VOIDmode
, f0
, gen_rtx_MINUS (mode
, in
, limit
)));
8108 emit_insn (gen_rtx_SET (VOIDmode
,
8110 gen_rtx_FIX (DImode
, gen_rtx_FIX (mode
, f0
))));
8111 emit_insn (gen_movdi (i1
, const1_rtx
));
8112 emit_insn (gen_ashldi3 (i1
, i1
, GEN_INT (63)));
8113 emit_insn (gen_xordi3 (out
, i0
, i1
));
8115 emit_label (donelab
);
8118 /* Return the string to output a compare and branch instruction to DEST.
8119 DEST is the destination insn (i.e. the label), INSN is the source,
8120 and OP is the conditional expression. */
8123 output_cbcond (rtx op
, rtx dest
, rtx insn
)
8125 enum machine_mode mode
= GET_MODE (XEXP (op
, 0));
8126 enum rtx_code code
= GET_CODE (op
);
8127 const char *cond_str
, *tmpl
;
8128 int far
, emit_nop
, len
;
8129 static char string
[64];
8132 /* Compare and Branch is limited to +-2KB. If it is too far away,
8144 len
= get_attr_length (insn
);
8147 emit_nop
= len
== 2;
8150 code
= reverse_condition (code
);
8152 size_char
= ((mode
== SImode
) ? 'w' : 'x');
8165 if (mode
== CC_NOOVmode
|| mode
== CCX_NOOVmode
)
8180 if (mode
== CC_NOOVmode
|| mode
== CCX_NOOVmode
)
8208 int veryfar
= 1, delta
;
8210 if (INSN_ADDRESSES_SET_P ())
8212 delta
= (INSN_ADDRESSES (INSN_UID (dest
))
8213 - INSN_ADDRESSES (INSN_UID (insn
)));
8214 /* Leave some instructions for "slop". */
8215 if (delta
>= -260000 && delta
< 260000)
8220 tmpl
= "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8222 tmpl
= "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8227 tmpl
= "c%cb%s\t%%1, %%2, %%3\n\tnop";
8229 tmpl
= "c%cb%s\t%%1, %%2, %%3";
8232 snprintf (string
, sizeof(string
), tmpl
, size_char
, cond_str
);
8237 /* Return the string to output a conditional branch to LABEL, testing
8238 register REG. LABEL is the operand number of the label; REG is the
8239 operand number of the reg. OP is the conditional expression. The mode
8240 of REG says what kind of comparison we made.
8242 DEST is the destination insn (i.e. the label), INSN is the source.
8244 REVERSED is nonzero if we should reverse the sense of the comparison.
8246 ANNUL is nonzero if we should generate an annulling branch. */
8249 output_v9branch (rtx op
, rtx dest
, int reg
, int label
, int reversed
,
8250 int annul
, rtx insn
)
8252 static char string
[64];
8253 enum rtx_code code
= GET_CODE (op
);
8254 enum machine_mode mode
= GET_MODE (XEXP (op
, 0));
8259 /* branch on register are limited to +-128KB. If it is too far away,
8272 brgez,a,pn %o1, .LC29
8278 ba,pt %xcc, .LC29 */
8280 far
= get_attr_length (insn
) >= 3;
8282 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8284 code
= reverse_condition (code
);
8286 /* Only 64 bit versions of these instructions exist. */
8287 gcc_assert (mode
== DImode
);
8289 /* Start by writing the branch condition. */
8294 strcpy (string
, "brnz");
8298 strcpy (string
, "brz");
8302 strcpy (string
, "brgez");
8306 strcpy (string
, "brlz");
8310 strcpy (string
, "brlez");
8314 strcpy (string
, "brgz");
8321 p
= strchr (string
, '\0');
8323 /* Now add the annulling, reg, label, and nop. */
8330 if (insn
&& (note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
)))
8333 ((XINT (note
, 0) >= REG_BR_PROB_BASE
/ 2) ^ far
)
8338 *p
= p
< string
+ 8 ? '\t' : ' ';
8346 int veryfar
= 1, delta
;
8348 if (INSN_ADDRESSES_SET_P ())
8350 delta
= (INSN_ADDRESSES (INSN_UID (dest
))
8351 - INSN_ADDRESSES (INSN_UID (insn
)));
8352 /* Leave some instructions for "slop". */
8353 if (delta
>= -260000 && delta
< 260000)
8357 strcpy (p
, ".+12\n\t nop\n\t");
8358 /* Skip the next insn if requested or
8359 if we know that it will be a nop. */
8360 if (annul
|| ! final_sequence
)
8370 strcpy (p
, "ba,pt\t%%xcc, ");
8384 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8385 Such instructions cannot be used in the delay slot of return insn on v9.
8386 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8390 epilogue_renumber (register rtx
*where
, int test
)
8392 register const char *fmt
;
8394 register enum rtx_code code
;
8399 code
= GET_CODE (*where
);
8404 if (REGNO (*where
) >= 8 && REGNO (*where
) < 24) /* oX or lX */
8406 if (! test
&& REGNO (*where
) >= 24 && REGNO (*where
) < 32)
8407 *where
= gen_rtx_REG (GET_MODE (*where
), OUTGOING_REGNO (REGNO(*where
)));
8415 /* Do not replace the frame pointer with the stack pointer because
8416 it can cause the delayed instruction to load below the stack.
8417 This occurs when instructions like:
8419 (set (reg/i:SI 24 %i0)
8420 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8421 (const_int -20 [0xffffffec])) 0))
8423 are in the return delayed slot. */
8425 if (GET_CODE (XEXP (*where
, 0)) == REG
8426 && REGNO (XEXP (*where
, 0)) == HARD_FRAME_POINTER_REGNUM
8427 && (GET_CODE (XEXP (*where
, 1)) != CONST_INT
8428 || INTVAL (XEXP (*where
, 1)) < SPARC_STACK_BIAS
))
8433 if (SPARC_STACK_BIAS
8434 && GET_CODE (XEXP (*where
, 0)) == REG
8435 && REGNO (XEXP (*where
, 0)) == HARD_FRAME_POINTER_REGNUM
)
8443 fmt
= GET_RTX_FORMAT (code
);
8445 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8450 for (j
= XVECLEN (*where
, i
) - 1; j
>= 0; j
--)
8451 if (epilogue_renumber (&(XVECEXP (*where
, i
, j
)), test
))
8454 else if (fmt
[i
] == 'e'
8455 && epilogue_renumber (&(XEXP (*where
, i
)), test
))
8461 /* Leaf functions and non-leaf functions have different needs. */
8464 reg_leaf_alloc_order
[] = REG_LEAF_ALLOC_ORDER
;
8467 reg_nonleaf_alloc_order
[] = REG_ALLOC_ORDER
;
8469 static const int *const reg_alloc_orders
[] = {
8470 reg_leaf_alloc_order
,
8471 reg_nonleaf_alloc_order
};
8474 order_regs_for_local_alloc (void)
8476 static int last_order_nonleaf
= 1;
8478 if (df_regs_ever_live_p (15) != last_order_nonleaf
)
8480 last_order_nonleaf
= !last_order_nonleaf
;
8481 memcpy ((char *) reg_alloc_order
,
8482 (const char *) reg_alloc_orders
[last_order_nonleaf
],
8483 FIRST_PSEUDO_REGISTER
* sizeof (int));
8487 /* Return 1 if REG and MEM are legitimate enough to allow the various
8488 mem<-->reg splits to be run. */
8491 sparc_splitdi_legitimate (rtx reg
, rtx mem
)
8493 /* Punt if we are here by mistake. */
8494 gcc_assert (reload_completed
);
8496 /* We must have an offsettable memory reference. */
8497 if (! offsettable_memref_p (mem
))
8500 /* If we have legitimate args for ldd/std, we do not want
8501 the split to happen. */
8502 if ((REGNO (reg
) % 2) == 0
8503 && mem_min_alignment (mem
, 8))
8510 /* Like sparc_splitdi_legitimate but for REG <--> REG moves. */
8513 sparc_split_regreg_legitimate (rtx reg1
, rtx reg2
)
8517 if (GET_CODE (reg1
) == SUBREG
)
8518 reg1
= SUBREG_REG (reg1
);
8519 if (GET_CODE (reg1
) != REG
)
8521 regno1
= REGNO (reg1
);
8523 if (GET_CODE (reg2
) == SUBREG
)
8524 reg2
= SUBREG_REG (reg2
);
8525 if (GET_CODE (reg2
) != REG
)
8527 regno2
= REGNO (reg2
);
8529 if (SPARC_INT_REG_P (regno1
) && SPARC_INT_REG_P (regno2
))
8534 if ((SPARC_INT_REG_P (regno1
) && SPARC_FP_REG_P (regno2
))
8535 || (SPARC_FP_REG_P (regno1
) && SPARC_INT_REG_P (regno2
)))
8542 /* Return 1 if x and y are some kind of REG and they refer to
8543 different hard registers. This test is guaranteed to be
8544 run after reload. */
8547 sparc_absnegfloat_split_legitimate (rtx x
, rtx y
)
8549 if (GET_CODE (x
) != REG
)
8551 if (GET_CODE (y
) != REG
)
8553 if (REGNO (x
) == REGNO (y
))
8558 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
8559 This makes them candidates for using ldd and std insns.
8561 Note reg1 and reg2 *must* be hard registers. */
8564 registers_ok_for_ldd_peep (rtx reg1
, rtx reg2
)
8566 /* We might have been passed a SUBREG. */
8567 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
8570 if (REGNO (reg1
) % 2 != 0)
8573 /* Integer ldd is deprecated in SPARC V9 */
8574 if (TARGET_V9
&& SPARC_INT_REG_P (REGNO (reg1
)))
8577 return (REGNO (reg1
) == REGNO (reg2
) - 1);
8580 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
8583 This can only happen when addr1 and addr2, the addresses in mem1
8584 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
8585 addr1 must also be aligned on a 64-bit boundary.
8587 Also iff dependent_reg_rtx is not null it should not be used to
8588 compute the address for mem1, i.e. we cannot optimize a sequence
8600 But, note that the transformation from:
8605 is perfectly fine. Thus, the peephole2 patterns always pass us
8606 the destination register of the first load, never the second one.
8608 For stores we don't have a similar problem, so dependent_reg_rtx is
8612 mems_ok_for_ldd_peep (rtx mem1
, rtx mem2
, rtx dependent_reg_rtx
)
8616 HOST_WIDE_INT offset1
;
8618 /* The mems cannot be volatile. */
8619 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
8622 /* MEM1 should be aligned on a 64-bit boundary. */
8623 if (MEM_ALIGN (mem1
) < 64)
8626 addr1
= XEXP (mem1
, 0);
8627 addr2
= XEXP (mem2
, 0);
8629 /* Extract a register number and offset (if used) from the first addr. */
8630 if (GET_CODE (addr1
) == PLUS
)
8632 /* If not a REG, return zero. */
8633 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
8637 reg1
= REGNO (XEXP (addr1
, 0));
8638 /* The offset must be constant! */
8639 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
8641 offset1
= INTVAL (XEXP (addr1
, 1));
8644 else if (GET_CODE (addr1
) != REG
)
8648 reg1
= REGNO (addr1
);
8649 /* This was a simple (mem (reg)) expression. Offset is 0. */
8653 /* Make sure the second address is a (mem (plus (reg) (const_int). */
8654 if (GET_CODE (addr2
) != PLUS
)
8657 if (GET_CODE (XEXP (addr2
, 0)) != REG
8658 || GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
8661 if (reg1
!= REGNO (XEXP (addr2
, 0)))
8664 if (dependent_reg_rtx
!= NULL_RTX
&& reg1
== REGNO (dependent_reg_rtx
))
8667 /* The first offset must be evenly divisible by 8 to ensure the
8668 address is 64 bit aligned. */
8669 if (offset1
% 8 != 0)
8672 /* The offset for the second addr must be 4 more than the first addr. */
8673 if (INTVAL (XEXP (addr2
, 1)) != offset1
+ 4)
8676 /* All the tests passed. addr1 and addr2 are valid for ldd and std
8681 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
8684 widen_mem_for_ldd_peep (rtx mem1
, rtx mem2
, enum machine_mode mode
)
8686 rtx x
= widen_memory_access (mem1
, mode
, 0);
8687 MEM_NOTRAP_P (x
) = MEM_NOTRAP_P (mem1
) && MEM_NOTRAP_P (mem2
);
8691 /* Return 1 if reg is a pseudo, or is the first register in
8692 a hard register pair. This makes it suitable for use in
8693 ldd and std insns. */
8696 register_ok_for_ldd (rtx reg
)
8698 /* We might have been passed a SUBREG. */
8702 if (REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
8703 return (REGNO (reg
) % 2 == 0);
8708 /* Return 1 if OP, a MEM, has an address which is known to be
8709 aligned to an 8-byte boundary. */
8712 memory_ok_for_ldd (rtx op
)
8714 /* In 64-bit mode, we assume that the address is word-aligned. */
8715 if (TARGET_ARCH32
&& !mem_min_alignment (op
, 8))
8718 if (! can_create_pseudo_p ()
8719 && !strict_memory_address_p (Pmode
, XEXP (op
, 0)))
8725 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8728 sparc_print_operand_punct_valid_p (unsigned char code
)
8741 /* Implement TARGET_PRINT_OPERAND.
8742 Print operand X (an rtx) in assembler syntax to file FILE.
8743 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8744 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8747 sparc_print_operand (FILE *file
, rtx x
, int code
)
8752 /* Output an insn in a delay slot. */
8754 sparc_indent_opcode
= 1;
8756 fputs ("\n\t nop", file
);
8759 /* Output an annul flag if there's nothing for the delay slot and we
8760 are optimizing. This is always used with '(' below.
8761 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8762 this is a dbx bug. So, we only do this when optimizing.
8763 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8764 Always emit a nop in case the next instruction is a branch. */
8765 if (! final_sequence
&& (optimize
&& (int)sparc_cpu
< PROCESSOR_V9
))
8769 /* Output a 'nop' if there's nothing for the delay slot and we are
8770 not optimizing. This is always used with '*' above. */
8771 if (! final_sequence
&& ! (optimize
&& (int)sparc_cpu
< PROCESSOR_V9
))
8772 fputs ("\n\t nop", file
);
8773 else if (final_sequence
)
8774 sparc_indent_opcode
= 1;
8777 /* Output the right displacement from the saved PC on function return.
8778 The caller may have placed an "unimp" insn immediately after the call
8779 so we have to account for it. This insn is used in the 32-bit ABI
8780 when calling a function that returns a non zero-sized structure. The
8781 64-bit ABI doesn't have it. Be careful to have this test be the same
8782 as that for the call. The exception is when sparc_std_struct_return
8783 is enabled, the psABI is followed exactly and the adjustment is made
8784 by the code in sparc_struct_value_rtx. The call emitted is the same
8785 when sparc_std_struct_return is enabled. */
8787 && cfun
->returns_struct
8788 && !sparc_std_struct_return
8789 && DECL_SIZE (DECL_RESULT (current_function_decl
))
8790 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl
)))
8792 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl
))))
8798 /* Output the Embedded Medium/Anywhere code model base register. */
8799 fputs (EMBMEDANY_BASE_REG
, file
);
8802 /* Print some local dynamic TLS name. */
8803 assemble_name (file
, get_some_local_dynamic_name ());
8807 /* Adjust the operand to take into account a RESTORE operation. */
8808 if (GET_CODE (x
) == CONST_INT
)
8810 else if (GET_CODE (x
) != REG
)
8811 output_operand_lossage ("invalid %%Y operand");
8812 else if (REGNO (x
) < 8)
8813 fputs (reg_names
[REGNO (x
)], file
);
8814 else if (REGNO (x
) >= 24 && REGNO (x
) < 32)
8815 fputs (reg_names
[REGNO (x
)-16], file
);
8817 output_operand_lossage ("invalid %%Y operand");
8820 /* Print out the low order register name of a register pair. */
8821 if (WORDS_BIG_ENDIAN
)
8822 fputs (reg_names
[REGNO (x
)+1], file
);
8824 fputs (reg_names
[REGNO (x
)], file
);
8827 /* Print out the high order register name of a register pair. */
8828 if (WORDS_BIG_ENDIAN
)
8829 fputs (reg_names
[REGNO (x
)], file
);
8831 fputs (reg_names
[REGNO (x
)+1], file
);
8834 /* Print out the second register name of a register pair or quad.
8835 I.e., R (%o0) => %o1. */
8836 fputs (reg_names
[REGNO (x
)+1], file
);
8839 /* Print out the third register name of a register quad.
8840 I.e., S (%o0) => %o2. */
8841 fputs (reg_names
[REGNO (x
)+2], file
);
8844 /* Print out the fourth register name of a register quad.
8845 I.e., T (%o0) => %o3. */
8846 fputs (reg_names
[REGNO (x
)+3], file
);
8849 /* Print a condition code register. */
8850 if (REGNO (x
) == SPARC_ICC_REG
)
8852 /* We don't handle CC[X]_NOOVmode because they're not supposed
8854 if (GET_MODE (x
) == CCmode
)
8855 fputs ("%icc", file
);
8856 else if (GET_MODE (x
) == CCXmode
)
8857 fputs ("%xcc", file
);
8862 /* %fccN register */
8863 fputs (reg_names
[REGNO (x
)], file
);
8866 /* Print the operand's address only. */
8867 output_address (XEXP (x
, 0));
8870 /* In this case we need a register. Use %g0 if the
8871 operand is const0_rtx. */
8873 || (GET_MODE (x
) != VOIDmode
&& x
== CONST0_RTX (GET_MODE (x
))))
8875 fputs ("%g0", file
);
8882 switch (GET_CODE (x
))
8884 case IOR
: fputs ("or", file
); break;
8885 case AND
: fputs ("and", file
); break;
8886 case XOR
: fputs ("xor", file
); break;
8887 default: output_operand_lossage ("invalid %%A operand");
8892 switch (GET_CODE (x
))
8894 case IOR
: fputs ("orn", file
); break;
8895 case AND
: fputs ("andn", file
); break;
8896 case XOR
: fputs ("xnor", file
); break;
8897 default: output_operand_lossage ("invalid %%B operand");
8901 /* This is used by the conditional move instructions. */
8904 enum rtx_code rc
= GET_CODE (x
);
8908 case NE
: fputs ("ne", file
); break;
8909 case EQ
: fputs ("e", file
); break;
8910 case GE
: fputs ("ge", file
); break;
8911 case GT
: fputs ("g", file
); break;
8912 case LE
: fputs ("le", file
); break;
8913 case LT
: fputs ("l", file
); break;
8914 case GEU
: fputs ("geu", file
); break;
8915 case GTU
: fputs ("gu", file
); break;
8916 case LEU
: fputs ("leu", file
); break;
8917 case LTU
: fputs ("lu", file
); break;
8918 case LTGT
: fputs ("lg", file
); break;
8919 case UNORDERED
: fputs ("u", file
); break;
8920 case ORDERED
: fputs ("o", file
); break;
8921 case UNLT
: fputs ("ul", file
); break;
8922 case UNLE
: fputs ("ule", file
); break;
8923 case UNGT
: fputs ("ug", file
); break;
8924 case UNGE
: fputs ("uge", file
); break;
8925 case UNEQ
: fputs ("ue", file
); break;
8926 default: output_operand_lossage ("invalid %%C operand");
8931 /* This are used by the movr instruction pattern. */
8934 enum rtx_code rc
= GET_CODE (x
);
8937 case NE
: fputs ("ne", file
); break;
8938 case EQ
: fputs ("e", file
); break;
8939 case GE
: fputs ("gez", file
); break;
8940 case LT
: fputs ("lz", file
); break;
8941 case LE
: fputs ("lez", file
); break;
8942 case GT
: fputs ("gz", file
); break;
8943 default: output_operand_lossage ("invalid %%D operand");
8950 /* Print a sign-extended character. */
8951 int i
= trunc_int_for_mode (INTVAL (x
), QImode
);
8952 fprintf (file
, "%d", i
);
8957 /* Operand must be a MEM; write its address. */
8958 if (GET_CODE (x
) != MEM
)
8959 output_operand_lossage ("invalid %%f operand");
8960 output_address (XEXP (x
, 0));
8965 /* Print a sign-extended 32-bit value. */
8967 if (GET_CODE(x
) == CONST_INT
)
8969 else if (GET_CODE(x
) == CONST_DOUBLE
)
8970 i
= CONST_DOUBLE_LOW (x
);
8973 output_operand_lossage ("invalid %%s operand");
8976 i
= trunc_int_for_mode (i
, SImode
);
8977 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, i
);
8982 /* Do nothing special. */
8986 /* Undocumented flag. */
8987 output_operand_lossage ("invalid operand output code");
8990 if (GET_CODE (x
) == REG
)
8991 fputs (reg_names
[REGNO (x
)], file
);
8992 else if (GET_CODE (x
) == MEM
)
8995 /* Poor Sun assembler doesn't understand absolute addressing. */
8996 if (CONSTANT_P (XEXP (x
, 0)))
8997 fputs ("%g0+", file
);
8998 output_address (XEXP (x
, 0));
9001 else if (GET_CODE (x
) == HIGH
)
9003 fputs ("%hi(", file
);
9004 output_addr_const (file
, XEXP (x
, 0));
9007 else if (GET_CODE (x
) == LO_SUM
)
9009 sparc_print_operand (file
, XEXP (x
, 0), 0);
9010 if (TARGET_CM_MEDMID
)
9011 fputs ("+%l44(", file
);
9013 fputs ("+%lo(", file
);
9014 output_addr_const (file
, XEXP (x
, 1));
9017 else if (GET_CODE (x
) == CONST_DOUBLE
9018 && (GET_MODE (x
) == VOIDmode
9019 || GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
))
9021 if (CONST_DOUBLE_HIGH (x
) == 0)
9022 fprintf (file
, "%u", (unsigned int) CONST_DOUBLE_LOW (x
));
9023 else if (CONST_DOUBLE_HIGH (x
) == -1
9024 && CONST_DOUBLE_LOW (x
) < 0)
9025 fprintf (file
, "%d", (int) CONST_DOUBLE_LOW (x
));
9027 output_operand_lossage ("long long constant not a valid immediate operand");
9029 else if (GET_CODE (x
) == CONST_DOUBLE
)
9030 output_operand_lossage ("floating point constant not a valid immediate operand");
9031 else { output_addr_const (file
, x
); }
9034 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
9037 sparc_print_operand_address (FILE *file
, rtx x
)
9039 register rtx base
, index
= 0;
9041 register rtx addr
= x
;
9044 fputs (reg_names
[REGNO (addr
)], file
);
9045 else if (GET_CODE (addr
) == PLUS
)
9047 if (CONST_INT_P (XEXP (addr
, 0)))
9048 offset
= INTVAL (XEXP (addr
, 0)), base
= XEXP (addr
, 1);
9049 else if (CONST_INT_P (XEXP (addr
, 1)))
9050 offset
= INTVAL (XEXP (addr
, 1)), base
= XEXP (addr
, 0);
9052 base
= XEXP (addr
, 0), index
= XEXP (addr
, 1);
9053 if (GET_CODE (base
) == LO_SUM
)
9055 gcc_assert (USE_AS_OFFSETABLE_LO10
9057 && ! TARGET_CM_MEDMID
);
9058 output_operand (XEXP (base
, 0), 0);
9059 fputs ("+%lo(", file
);
9060 output_address (XEXP (base
, 1));
9061 fprintf (file
, ")+%d", offset
);
9065 fputs (reg_names
[REGNO (base
)], file
);
9067 fprintf (file
, "%+d", offset
);
9068 else if (REG_P (index
))
9069 fprintf (file
, "+%s", reg_names
[REGNO (index
)]);
9070 else if (GET_CODE (index
) == SYMBOL_REF
9071 || GET_CODE (index
) == LABEL_REF
9072 || GET_CODE (index
) == CONST
)
9073 fputc ('+', file
), output_addr_const (file
, index
);
9074 else gcc_unreachable ();
9077 else if (GET_CODE (addr
) == MINUS
9078 && GET_CODE (XEXP (addr
, 1)) == LABEL_REF
)
9080 output_addr_const (file
, XEXP (addr
, 0));
9082 output_addr_const (file
, XEXP (addr
, 1));
9083 fputs ("-.)", file
);
9085 else if (GET_CODE (addr
) == LO_SUM
)
9087 output_operand (XEXP (addr
, 0), 0);
9088 if (TARGET_CM_MEDMID
)
9089 fputs ("+%l44(", file
);
9091 fputs ("+%lo(", file
);
9092 output_address (XEXP (addr
, 1));
9096 && GET_CODE (addr
) == CONST
9097 && GET_CODE (XEXP (addr
, 0)) == MINUS
9098 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST
9099 && GET_CODE (XEXP (XEXP (XEXP (addr
, 0), 1), 0)) == MINUS
9100 && XEXP (XEXP (XEXP (XEXP (addr
, 0), 1), 0), 1) == pc_rtx
)
9102 addr
= XEXP (addr
, 0);
9103 output_addr_const (file
, XEXP (addr
, 0));
9104 /* Group the args of the second CONST in parenthesis. */
9106 /* Skip past the second CONST--it does nothing for us. */
9107 output_addr_const (file
, XEXP (XEXP (addr
, 1), 0));
9108 /* Close the parenthesis. */
9113 output_addr_const (file
, addr
);
9117 /* Target hook for assembling integer objects. The sparc version has
9118 special handling for aligned DI-mode objects. */
9121 sparc_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
9123 /* ??? We only output .xword's for symbols and only then in environments
9124 where the assembler can handle them. */
9125 if (aligned_p
&& size
== 8
9126 && (GET_CODE (x
) != CONST_INT
&& GET_CODE (x
) != CONST_DOUBLE
))
9130 assemble_integer_with_op ("\t.xword\t", x
);
9135 assemble_aligned_integer (4, const0_rtx
);
9136 assemble_aligned_integer (4, x
);
9140 return default_assemble_integer (x
, size
, aligned_p
);
9143 /* Return the value of a code used in the .proc pseudo-op that says
9144 what kind of result this function returns. For non-C types, we pick
9145 the closest C type. */
9147 #ifndef SHORT_TYPE_SIZE
9148 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9151 #ifndef INT_TYPE_SIZE
9152 #define INT_TYPE_SIZE BITS_PER_WORD
9155 #ifndef LONG_TYPE_SIZE
9156 #define LONG_TYPE_SIZE BITS_PER_WORD
9159 #ifndef LONG_LONG_TYPE_SIZE
9160 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9163 #ifndef FLOAT_TYPE_SIZE
9164 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9167 #ifndef DOUBLE_TYPE_SIZE
9168 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9171 #ifndef LONG_DOUBLE_TYPE_SIZE
9172 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9176 sparc_type_code (register tree type
)
9178 register unsigned long qualifiers
= 0;
9179 register unsigned shift
;
9181 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9182 setting more, since some assemblers will give an error for this. Also,
9183 we must be careful to avoid shifts of 32 bits or more to avoid getting
9184 unpredictable results. */
9186 for (shift
= 6; shift
< 30; shift
+= 2, type
= TREE_TYPE (type
))
9188 switch (TREE_CODE (type
))
9194 qualifiers
|= (3 << shift
);
9199 qualifiers
|= (2 << shift
);
9203 case REFERENCE_TYPE
:
9205 qualifiers
|= (1 << shift
);
9209 return (qualifiers
| 8);
9212 case QUAL_UNION_TYPE
:
9213 return (qualifiers
| 9);
9216 return (qualifiers
| 10);
9219 return (qualifiers
| 16);
9222 /* If this is a range type, consider it to be the underlying
9224 if (TREE_TYPE (type
) != 0)
9227 /* Carefully distinguish all the standard types of C,
9228 without messing up if the language is not C. We do this by
9229 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9230 look at both the names and the above fields, but that's redundant.
9231 Any type whose size is between two C types will be considered
9232 to be the wider of the two types. Also, we do not have a
9233 special code to use for "long long", so anything wider than
9234 long is treated the same. Note that we can't distinguish
9235 between "int" and "long" in this code if they are the same
9236 size, but that's fine, since neither can the assembler. */
9238 if (TYPE_PRECISION (type
) <= CHAR_TYPE_SIZE
)
9239 return (qualifiers
| (TYPE_UNSIGNED (type
) ? 12 : 2));
9241 else if (TYPE_PRECISION (type
) <= SHORT_TYPE_SIZE
)
9242 return (qualifiers
| (TYPE_UNSIGNED (type
) ? 13 : 3));
9244 else if (TYPE_PRECISION (type
) <= INT_TYPE_SIZE
)
9245 return (qualifiers
| (TYPE_UNSIGNED (type
) ? 14 : 4));
9248 return (qualifiers
| (TYPE_UNSIGNED (type
) ? 15 : 5));
9251 /* If this is a range type, consider it to be the underlying
9253 if (TREE_TYPE (type
) != 0)
9256 /* Carefully distinguish all the standard types of C,
9257 without messing up if the language is not C. */
9259 if (TYPE_PRECISION (type
) == FLOAT_TYPE_SIZE
)
9260 return (qualifiers
| 6);
9263 return (qualifiers
| 7);
9265 case COMPLEX_TYPE
: /* GNU Fortran COMPLEX type. */
9266 /* ??? We need to distinguish between double and float complex types,
9267 but I don't know how yet because I can't reach this code from
9268 existing front-ends. */
9269 return (qualifiers
| 7); /* Who knows? */
9272 case BOOLEAN_TYPE
: /* Boolean truth value type. */
9278 gcc_unreachable (); /* Not a type! */
9285 /* Nested function support. */
9287 /* Emit RTL insns to initialize the variable parts of a trampoline.
9288 FNADDR is an RTX for the address of the function's pure code.
9289 CXT is an RTX for the static chain value for the function.
9291 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9292 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9293 (to store insns). This is a bit excessive. Perhaps a different
9294 mechanism would be better here.
9296 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9299 sparc32_initialize_trampoline (rtx m_tramp
, rtx fnaddr
, rtx cxt
)
9301 /* SPARC 32-bit trampoline:
9304 sethi %hi(static), %g2
9306 or %g2, %lo(static), %g2
9308 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9309 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9313 (adjust_address (m_tramp
, SImode
, 0),
9314 expand_binop (SImode
, ior_optab
,
9315 expand_shift (RSHIFT_EXPR
, SImode
, fnaddr
, 10, 0, 1),
9316 GEN_INT (trunc_int_for_mode (0x03000000, SImode
)),
9317 NULL_RTX
, 1, OPTAB_DIRECT
));
9320 (adjust_address (m_tramp
, SImode
, 4),
9321 expand_binop (SImode
, ior_optab
,
9322 expand_shift (RSHIFT_EXPR
, SImode
, cxt
, 10, 0, 1),
9323 GEN_INT (trunc_int_for_mode (0x05000000, SImode
)),
9324 NULL_RTX
, 1, OPTAB_DIRECT
));
9327 (adjust_address (m_tramp
, SImode
, 8),
9328 expand_binop (SImode
, ior_optab
,
9329 expand_and (SImode
, fnaddr
, GEN_INT (0x3ff), NULL_RTX
),
9330 GEN_INT (trunc_int_for_mode (0x81c06000, SImode
)),
9331 NULL_RTX
, 1, OPTAB_DIRECT
));
9334 (adjust_address (m_tramp
, SImode
, 12),
9335 expand_binop (SImode
, ior_optab
,
9336 expand_and (SImode
, cxt
, GEN_INT (0x3ff), NULL_RTX
),
9337 GEN_INT (trunc_int_for_mode (0x8410a000, SImode
)),
9338 NULL_RTX
, 1, OPTAB_DIRECT
));
9340 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9341 aligned on a 16 byte boundary so one flush clears it all. */
9342 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp
, SImode
, 0))));
9343 if (sparc_cpu
!= PROCESSOR_ULTRASPARC
9344 && sparc_cpu
!= PROCESSOR_ULTRASPARC3
9345 && sparc_cpu
!= PROCESSOR_NIAGARA
9346 && sparc_cpu
!= PROCESSOR_NIAGARA2
9347 && sparc_cpu
!= PROCESSOR_NIAGARA3
9348 && sparc_cpu
!= PROCESSOR_NIAGARA4
)
9349 emit_insn (gen_flushsi (validize_mem (adjust_address (m_tramp
, SImode
, 8))));
9351 /* Call __enable_execute_stack after writing onto the stack to make sure
9352 the stack address is accessible. */
9353 #ifdef HAVE_ENABLE_EXECUTE_STACK
9354 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
9355 LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
9360 /* The 64-bit version is simpler because it makes more sense to load the
9361 values as "immediate" data out of the trampoline. It's also easier since
9362 we can read the PC without clobbering a register. */
9365 sparc64_initialize_trampoline (rtx m_tramp
, rtx fnaddr
, rtx cxt
)
9367 /* SPARC 64-bit trampoline:
9376 emit_move_insn (adjust_address (m_tramp
, SImode
, 0),
9377 GEN_INT (trunc_int_for_mode (0x83414000, SImode
)));
9378 emit_move_insn (adjust_address (m_tramp
, SImode
, 4),
9379 GEN_INT (trunc_int_for_mode (0xca586018, SImode
)));
9380 emit_move_insn (adjust_address (m_tramp
, SImode
, 8),
9381 GEN_INT (trunc_int_for_mode (0x81c14000, SImode
)));
9382 emit_move_insn (adjust_address (m_tramp
, SImode
, 12),
9383 GEN_INT (trunc_int_for_mode (0xca586010, SImode
)));
9384 emit_move_insn (adjust_address (m_tramp
, DImode
, 16), cxt
);
9385 emit_move_insn (adjust_address (m_tramp
, DImode
, 24), fnaddr
);
9386 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp
, DImode
, 0))));
9388 if (sparc_cpu
!= PROCESSOR_ULTRASPARC
9389 && sparc_cpu
!= PROCESSOR_ULTRASPARC3
9390 && sparc_cpu
!= PROCESSOR_NIAGARA
9391 && sparc_cpu
!= PROCESSOR_NIAGARA2
9392 && sparc_cpu
!= PROCESSOR_NIAGARA3
9393 && sparc_cpu
!= PROCESSOR_NIAGARA4
)
9394 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp
, DImode
, 8))));
9396 /* Call __enable_execute_stack after writing onto the stack to make sure
9397 the stack address is accessible. */
9398 #ifdef HAVE_ENABLE_EXECUTE_STACK
9399 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__enable_execute_stack"),
9400 LCT_NORMAL
, VOIDmode
, 1, XEXP (m_tramp
, 0), Pmode
);
9404 /* Worker for TARGET_TRAMPOLINE_INIT. */
9407 sparc_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
9409 rtx fnaddr
= force_reg (Pmode
, XEXP (DECL_RTL (fndecl
), 0));
9410 cxt
= force_reg (Pmode
, cxt
);
9412 sparc64_initialize_trampoline (m_tramp
, fnaddr
, cxt
);
9414 sparc32_initialize_trampoline (m_tramp
, fnaddr
, cxt
);
9417 /* Adjust the cost of a scheduling dependency. Return the new cost of
9418 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
9421 supersparc_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
9423 enum attr_type insn_type
;
9425 if (! recog_memoized (insn
))
9428 insn_type
= get_attr_type (insn
);
9430 if (REG_NOTE_KIND (link
) == 0)
9432 /* Data dependency; DEP_INSN writes a register that INSN reads some
9435 /* if a load, then the dependence must be on the memory address;
9436 add an extra "cycle". Note that the cost could be two cycles
9437 if the reg was written late in an instruction group; we ca not tell
9439 if (insn_type
== TYPE_LOAD
|| insn_type
== TYPE_FPLOAD
)
9442 /* Get the delay only if the address of the store is the dependence. */
9443 if (insn_type
== TYPE_STORE
|| insn_type
== TYPE_FPSTORE
)
9445 rtx pat
= PATTERN(insn
);
9446 rtx dep_pat
= PATTERN (dep_insn
);
9448 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
9449 return cost
; /* This should not happen! */
9451 /* The dependency between the two instructions was on the data that
9452 is being stored. Assume that this implies that the address of the
9453 store is not dependent. */
9454 if (rtx_equal_p (SET_DEST (dep_pat
), SET_SRC (pat
)))
9457 return cost
+ 3; /* An approximation. */
9460 /* A shift instruction cannot receive its data from an instruction
9461 in the same cycle; add a one cycle penalty. */
9462 if (insn_type
== TYPE_SHIFT
)
9463 return cost
+ 3; /* Split before cascade into shift. */
9467 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
9468 INSN writes some cycles later. */
9470 /* These are only significant for the fpu unit; writing a fp reg before
9471 the fpu has finished with it stalls the processor. */
9473 /* Reusing an integer register causes no problems. */
9474 if (insn_type
== TYPE_IALU
|| insn_type
== TYPE_SHIFT
)
9482 hypersparc_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
9484 enum attr_type insn_type
, dep_type
;
9485 rtx pat
= PATTERN(insn
);
9486 rtx dep_pat
= PATTERN (dep_insn
);
9488 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
9491 insn_type
= get_attr_type (insn
);
9492 dep_type
= get_attr_type (dep_insn
);
9494 switch (REG_NOTE_KIND (link
))
9497 /* Data dependency; DEP_INSN writes a register that INSN reads some
9504 /* Get the delay iff the address of the store is the dependence. */
9505 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
)
9508 if (rtx_equal_p (SET_DEST (dep_pat
), SET_SRC (pat
)))
9515 /* If a load, then the dependence must be on the memory address. If
9516 the addresses aren't equal, then it might be a false dependency */
9517 if (dep_type
== TYPE_STORE
|| dep_type
== TYPE_FPSTORE
)
9519 if (GET_CODE (pat
) != SET
|| GET_CODE (dep_pat
) != SET
9520 || GET_CODE (SET_DEST (dep_pat
)) != MEM
9521 || GET_CODE (SET_SRC (pat
)) != MEM
9522 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat
), 0),
9523 XEXP (SET_SRC (pat
), 0)))
9531 /* Compare to branch latency is 0. There is no benefit from
9532 separating compare and branch. */
9533 if (dep_type
== TYPE_COMPARE
)
9535 /* Floating point compare to branch latency is less than
9536 compare to conditional move. */
9537 if (dep_type
== TYPE_FPCMP
)
9546 /* Anti-dependencies only penalize the fpu unit. */
9547 if (insn_type
== TYPE_IALU
|| insn_type
== TYPE_SHIFT
)
9559 sparc_adjust_cost(rtx insn
, rtx link
, rtx dep
, int cost
)
9563 case PROCESSOR_SUPERSPARC
:
9564 cost
= supersparc_adjust_cost (insn
, link
, dep
, cost
);
9566 case PROCESSOR_HYPERSPARC
:
9567 case PROCESSOR_SPARCLITE86X
:
9568 cost
= hypersparc_adjust_cost (insn
, link
, dep
, cost
);
9577 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
9578 int sched_verbose ATTRIBUTE_UNUSED
,
9579 int max_ready ATTRIBUTE_UNUSED
)
9583 sparc_use_sched_lookahead (void)
9585 if (sparc_cpu
== PROCESSOR_NIAGARA
9586 || sparc_cpu
== PROCESSOR_NIAGARA2
9587 || sparc_cpu
== PROCESSOR_NIAGARA3
)
9589 if (sparc_cpu
== PROCESSOR_NIAGARA4
)
9591 if (sparc_cpu
== PROCESSOR_ULTRASPARC
9592 || sparc_cpu
== PROCESSOR_ULTRASPARC3
)
9594 if ((1 << sparc_cpu
) &
9595 ((1 << PROCESSOR_SUPERSPARC
) | (1 << PROCESSOR_HYPERSPARC
) |
9596 (1 << PROCESSOR_SPARCLITE86X
)))
9602 sparc_issue_rate (void)
9606 case PROCESSOR_NIAGARA
:
9607 case PROCESSOR_NIAGARA2
:
9608 case PROCESSOR_NIAGARA3
:
9611 case PROCESSOR_NIAGARA4
:
9613 /* Assume V9 processors are capable of at least dual-issue. */
9615 case PROCESSOR_SUPERSPARC
:
9617 case PROCESSOR_HYPERSPARC
:
9618 case PROCESSOR_SPARCLITE86X
:
9620 case PROCESSOR_ULTRASPARC
:
9621 case PROCESSOR_ULTRASPARC3
:
9627 set_extends (rtx insn
)
9629 register rtx pat
= PATTERN (insn
);
9631 switch (GET_CODE (SET_SRC (pat
)))
9633 /* Load and some shift instructions zero extend. */
9636 /* sethi clears the high bits */
9638 /* LO_SUM is used with sethi. sethi cleared the high
9639 bits and the values used with lo_sum are positive */
9641 /* Store flag stores 0 or 1 */
9651 rtx op0
= XEXP (SET_SRC (pat
), 0);
9652 rtx op1
= XEXP (SET_SRC (pat
), 1);
9653 if (GET_CODE (op1
) == CONST_INT
)
9654 return INTVAL (op1
) >= 0;
9655 if (GET_CODE (op0
) != REG
)
9657 if (sparc_check_64 (op0
, insn
) == 1)
9659 return (GET_CODE (op1
) == REG
&& sparc_check_64 (op1
, insn
) == 1);
9664 rtx op0
= XEXP (SET_SRC (pat
), 0);
9665 rtx op1
= XEXP (SET_SRC (pat
), 1);
9666 if (GET_CODE (op0
) != REG
|| sparc_check_64 (op0
, insn
) <= 0)
9668 if (GET_CODE (op1
) == CONST_INT
)
9669 return INTVAL (op1
) >= 0;
9670 return (GET_CODE (op1
) == REG
&& sparc_check_64 (op1
, insn
) == 1);
9673 return GET_MODE (SET_SRC (pat
)) == SImode
;
9674 /* Positive integers leave the high bits zero. */
9676 return ! (CONST_DOUBLE_LOW (SET_SRC (pat
)) & 0x80000000);
9678 return ! (INTVAL (SET_SRC (pat
)) & 0x80000000);
9681 return - (GET_MODE (SET_SRC (pat
)) == SImode
);
9683 return sparc_check_64 (SET_SRC (pat
), insn
);
9689 /* We _ought_ to have only one kind per function, but... */
9690 static GTY(()) rtx sparc_addr_diff_list
;
9691 static GTY(()) rtx sparc_addr_list
;
9694 sparc_defer_case_vector (rtx lab
, rtx vec
, int diff
)
9696 vec
= gen_rtx_EXPR_LIST (VOIDmode
, lab
, vec
);
9698 sparc_addr_diff_list
9699 = gen_rtx_EXPR_LIST (VOIDmode
, vec
, sparc_addr_diff_list
);
9701 sparc_addr_list
= gen_rtx_EXPR_LIST (VOIDmode
, vec
, sparc_addr_list
);
9705 sparc_output_addr_vec (rtx vec
)
9707 rtx lab
= XEXP (vec
, 0), body
= XEXP (vec
, 1);
9708 int idx
, vlen
= XVECLEN (body
, 0);
9710 #ifdef ASM_OUTPUT_ADDR_VEC_START
9711 ASM_OUTPUT_ADDR_VEC_START (asm_out_file
);
9714 #ifdef ASM_OUTPUT_CASE_LABEL
9715 ASM_OUTPUT_CASE_LABEL (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
),
9718 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
9721 for (idx
= 0; idx
< vlen
; idx
++)
9723 ASM_OUTPUT_ADDR_VEC_ELT
9724 (asm_out_file
, CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 0, idx
), 0)));
9727 #ifdef ASM_OUTPUT_ADDR_VEC_END
9728 ASM_OUTPUT_ADDR_VEC_END (asm_out_file
);
9733 sparc_output_addr_diff_vec (rtx vec
)
9735 rtx lab
= XEXP (vec
, 0), body
= XEXP (vec
, 1);
9736 rtx base
= XEXP (XEXP (body
, 0), 0);
9737 int idx
, vlen
= XVECLEN (body
, 1);
9739 #ifdef ASM_OUTPUT_ADDR_VEC_START
9740 ASM_OUTPUT_ADDR_VEC_START (asm_out_file
);
9743 #ifdef ASM_OUTPUT_CASE_LABEL
9744 ASM_OUTPUT_CASE_LABEL (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
),
9747 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (lab
));
9750 for (idx
= 0; idx
< vlen
; idx
++)
9752 ASM_OUTPUT_ADDR_DIFF_ELT
9755 CODE_LABEL_NUMBER (XEXP (XVECEXP (body
, 1, idx
), 0)),
9756 CODE_LABEL_NUMBER (base
));
9759 #ifdef ASM_OUTPUT_ADDR_VEC_END
9760 ASM_OUTPUT_ADDR_VEC_END (asm_out_file
);
9765 sparc_output_deferred_case_vectors (void)
9770 if (sparc_addr_list
== NULL_RTX
9771 && sparc_addr_diff_list
== NULL_RTX
)
9774 /* Align to cache line in the function's code section. */
9775 switch_to_section (current_function_section ());
9777 align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
9779 ASM_OUTPUT_ALIGN (asm_out_file
, align
);
9781 for (t
= sparc_addr_list
; t
; t
= XEXP (t
, 1))
9782 sparc_output_addr_vec (XEXP (t
, 0));
9783 for (t
= sparc_addr_diff_list
; t
; t
= XEXP (t
, 1))
9784 sparc_output_addr_diff_vec (XEXP (t
, 0));
9786 sparc_addr_list
= sparc_addr_diff_list
= NULL_RTX
;
9789 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
9790 unknown. Return 1 if the high bits are zero, -1 if the register is
9793 sparc_check_64 (rtx x
, rtx insn
)
9795 /* If a register is set only once it is safe to ignore insns this
9796 code does not know how to handle. The loop will either recognize
9797 the single set and return the correct value or fail to recognize
9802 gcc_assert (GET_CODE (x
) == REG
);
9804 if (GET_MODE (x
) == DImode
)
9805 y
= gen_rtx_REG (SImode
, REGNO (x
) + WORDS_BIG_ENDIAN
);
9807 if (flag_expensive_optimizations
9808 && df
&& DF_REG_DEF_COUNT (REGNO (y
)) == 1)
9814 insn
= get_last_insn_anywhere ();
9819 while ((insn
= PREV_INSN (insn
)))
9821 switch (GET_CODE (insn
))
9834 rtx pat
= PATTERN (insn
);
9835 if (GET_CODE (pat
) != SET
)
9837 if (rtx_equal_p (x
, SET_DEST (pat
)))
9838 return set_extends (insn
);
9839 if (y
&& rtx_equal_p (y
, SET_DEST (pat
)))
9840 return set_extends (insn
);
9841 if (reg_overlap_mentioned_p (SET_DEST (pat
), y
))
9849 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
9850 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
9853 output_v8plus_shift (rtx insn
, rtx
*operands
, const char *opcode
)
9855 static char asm_code
[60];
9857 /* The scratch register is only required when the destination
9858 register is not a 64-bit global or out register. */
9859 if (which_alternative
!= 2)
9860 operands
[3] = operands
[0];
9862 /* We can only shift by constants <= 63. */
9863 if (GET_CODE (operands
[2]) == CONST_INT
)
9864 operands
[2] = GEN_INT (INTVAL (operands
[2]) & 0x3f);
9866 if (GET_CODE (operands
[1]) == CONST_INT
)
9868 output_asm_insn ("mov\t%1, %3", operands
);
9872 output_asm_insn ("sllx\t%H1, 32, %3", operands
);
9873 if (sparc_check_64 (operands
[1], insn
) <= 0)
9874 output_asm_insn ("srl\t%L1, 0, %L1", operands
);
9875 output_asm_insn ("or\t%L1, %3, %3", operands
);
9878 strcpy (asm_code
, opcode
);
9880 if (which_alternative
!= 2)
9881 return strcat (asm_code
, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
9884 strcat (asm_code
, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
9887 /* Output rtl to increment the profiler label LABELNO
9888 for profiling a function entry. */
9891 sparc_profile_hook (int labelno
)
9896 fun
= gen_rtx_SYMBOL_REF (Pmode
, MCOUNT_FUNCTION
);
9897 if (NO_PROFILE_COUNTERS
)
9899 emit_library_call (fun
, LCT_NORMAL
, VOIDmode
, 0);
9903 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
9904 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
9905 emit_library_call (fun
, LCT_NORMAL
, VOIDmode
, 1, lab
, Pmode
);
9909 #ifdef TARGET_SOLARIS
9910 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9913 sparc_solaris_elf_asm_named_section (const char *name
, unsigned int flags
,
9914 tree decl ATTRIBUTE_UNUSED
)
9916 if (HAVE_COMDAT_GROUP
&& flags
& SECTION_LINKONCE
)
9918 solaris_elf_asm_comdat_section (name
, flags
, decl
);
9922 fprintf (asm_out_file
, "\t.section\t\"%s\"", name
);
9924 if (!(flags
& SECTION_DEBUG
))
9925 fputs (",#alloc", asm_out_file
);
9926 if (flags
& SECTION_WRITE
)
9927 fputs (",#write", asm_out_file
);
9928 if (flags
& SECTION_TLS
)
9929 fputs (",#tls", asm_out_file
);
9930 if (flags
& SECTION_CODE
)
9931 fputs (",#execinstr", asm_out_file
);
9933 /* Sun as only supports #nobits/#progbits since Solaris 10. */
9934 if (HAVE_AS_SPARC_NOBITS
)
9936 if (flags
& SECTION_BSS
)
9937 fputs (",#nobits", asm_out_file
);
9939 fputs (",#progbits", asm_out_file
);
9942 fputc ('\n', asm_out_file
);
9944 #endif /* TARGET_SOLARIS */
9946 /* We do not allow indirect calls to be optimized into sibling calls.
9948 We cannot use sibling calls when delayed branches are disabled
9949 because they will likely require the call delay slot to be filled.
9951 Also, on SPARC 32-bit we cannot emit a sibling call when the
9952 current function returns a structure. This is because the "unimp
9953 after call" convention would cause the callee to return to the
9954 wrong place. The generic code already disallows cases where the
9955 function being called returns a structure.
9957 It may seem strange how this last case could occur. Usually there
9958 is code after the call which jumps to epilogue code which dumps the
9959 return value into the struct return area. That ought to invalidate
9960 the sibling call right? Well, in the C++ case we can end up passing
9961 the pointer to the struct return area to a constructor (which returns
9962 void) and then nothing else happens. Such a sibling call would look
9963 valid without the added check here.
9965 VxWorks PIC PLT entries require the global pointer to be initialized
9966 on entry. We therefore can't emit sibling calls to them. */
9968 sparc_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
9971 && flag_delayed_branch
9972 && (TARGET_ARCH64
|| ! cfun
->returns_struct
)
9973 && !(TARGET_VXWORKS_RTP
9975 && !targetm
.binds_local_p (decl
)));
9978 /* libfunc renaming. */
9981 sparc_init_libfuncs (void)
9985 /* Use the subroutines that Sun's library provides for integer
9986 multiply and divide. The `*' prevents an underscore from
9987 being prepended by the compiler. .umul is a little faster
9989 set_optab_libfunc (smul_optab
, SImode
, "*.umul");
9990 set_optab_libfunc (sdiv_optab
, SImode
, "*.div");
9991 set_optab_libfunc (udiv_optab
, SImode
, "*.udiv");
9992 set_optab_libfunc (smod_optab
, SImode
, "*.rem");
9993 set_optab_libfunc (umod_optab
, SImode
, "*.urem");
9995 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9996 set_optab_libfunc (add_optab
, TFmode
, "_Q_add");
9997 set_optab_libfunc (sub_optab
, TFmode
, "_Q_sub");
9998 set_optab_libfunc (neg_optab
, TFmode
, "_Q_neg");
9999 set_optab_libfunc (smul_optab
, TFmode
, "_Q_mul");
10000 set_optab_libfunc (sdiv_optab
, TFmode
, "_Q_div");
10002 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
10003 is because with soft-float, the SFmode and DFmode sqrt
10004 instructions will be absent, and the compiler will notice and
10005 try to use the TFmode sqrt instruction for calls to the
10006 builtin function sqrt, but this fails. */
10008 set_optab_libfunc (sqrt_optab
, TFmode
, "_Q_sqrt");
10010 set_optab_libfunc (eq_optab
, TFmode
, "_Q_feq");
10011 set_optab_libfunc (ne_optab
, TFmode
, "_Q_fne");
10012 set_optab_libfunc (gt_optab
, TFmode
, "_Q_fgt");
10013 set_optab_libfunc (ge_optab
, TFmode
, "_Q_fge");
10014 set_optab_libfunc (lt_optab
, TFmode
, "_Q_flt");
10015 set_optab_libfunc (le_optab
, TFmode
, "_Q_fle");
10017 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_Q_stoq");
10018 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_Q_dtoq");
10019 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_Q_qtos");
10020 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_Q_qtod");
10022 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_Q_qtoi");
10023 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_Q_qtou");
10024 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_Q_itoq");
10025 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_Q_utoq");
10027 if (DITF_CONVERSION_LIBFUNCS
)
10029 set_conv_libfunc (sfix_optab
, DImode
, TFmode
, "_Q_qtoll");
10030 set_conv_libfunc (ufix_optab
, DImode
, TFmode
, "_Q_qtoull");
10031 set_conv_libfunc (sfloat_optab
, TFmode
, DImode
, "_Q_lltoq");
10032 set_conv_libfunc (ufloat_optab
, TFmode
, DImode
, "_Q_ulltoq");
10035 if (SUN_CONVERSION_LIBFUNCS
)
10037 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, "__ftoll");
10038 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, "__ftoull");
10039 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, "__dtoll");
10040 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, "__dtoull");
10045 /* In the SPARC 64bit ABI, SImode multiply and divide functions
10046 do not exist in the library. Make sure the compiler does not
10047 emit calls to them by accident. (It should always use the
10048 hardware instructions.) */
10049 set_optab_libfunc (smul_optab
, SImode
, 0);
10050 set_optab_libfunc (sdiv_optab
, SImode
, 0);
10051 set_optab_libfunc (udiv_optab
, SImode
, 0);
10052 set_optab_libfunc (smod_optab
, SImode
, 0);
10053 set_optab_libfunc (umod_optab
, SImode
, 0);
10055 if (SUN_INTEGER_MULTIPLY_64
)
10057 set_optab_libfunc (smul_optab
, DImode
, "__mul64");
10058 set_optab_libfunc (sdiv_optab
, DImode
, "__div64");
10059 set_optab_libfunc (udiv_optab
, DImode
, "__udiv64");
10060 set_optab_libfunc (smod_optab
, DImode
, "__rem64");
10061 set_optab_libfunc (umod_optab
, DImode
, "__urem64");
10064 if (SUN_CONVERSION_LIBFUNCS
)
10066 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, "__ftol");
10067 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, "__ftoul");
10068 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, "__dtol");
10069 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, "__dtoul");
10074 /* SPARC builtins. */
10075 enum sparc_builtins
10077 /* FPU builtins. */
10078 SPARC_BUILTIN_LDFSR
,
10079 SPARC_BUILTIN_STFSR
,
10081 /* VIS 1.0 builtins. */
10082 SPARC_BUILTIN_FPACK16
,
10083 SPARC_BUILTIN_FPACK32
,
10084 SPARC_BUILTIN_FPACKFIX
,
10085 SPARC_BUILTIN_FEXPAND
,
10086 SPARC_BUILTIN_FPMERGE
,
10087 SPARC_BUILTIN_FMUL8X16
,
10088 SPARC_BUILTIN_FMUL8X16AU
,
10089 SPARC_BUILTIN_FMUL8X16AL
,
10090 SPARC_BUILTIN_FMUL8SUX16
,
10091 SPARC_BUILTIN_FMUL8ULX16
,
10092 SPARC_BUILTIN_FMULD8SUX16
,
10093 SPARC_BUILTIN_FMULD8ULX16
,
10094 SPARC_BUILTIN_FALIGNDATAV4HI
,
10095 SPARC_BUILTIN_FALIGNDATAV8QI
,
10096 SPARC_BUILTIN_FALIGNDATAV2SI
,
10097 SPARC_BUILTIN_FALIGNDATADI
,
10098 SPARC_BUILTIN_WRGSR
,
10099 SPARC_BUILTIN_RDGSR
,
10100 SPARC_BUILTIN_ALIGNADDR
,
10101 SPARC_BUILTIN_ALIGNADDRL
,
10102 SPARC_BUILTIN_PDIST
,
10103 SPARC_BUILTIN_EDGE8
,
10104 SPARC_BUILTIN_EDGE8L
,
10105 SPARC_BUILTIN_EDGE16
,
10106 SPARC_BUILTIN_EDGE16L
,
10107 SPARC_BUILTIN_EDGE32
,
10108 SPARC_BUILTIN_EDGE32L
,
10109 SPARC_BUILTIN_FCMPLE16
,
10110 SPARC_BUILTIN_FCMPLE32
,
10111 SPARC_BUILTIN_FCMPNE16
,
10112 SPARC_BUILTIN_FCMPNE32
,
10113 SPARC_BUILTIN_FCMPGT16
,
10114 SPARC_BUILTIN_FCMPGT32
,
10115 SPARC_BUILTIN_FCMPEQ16
,
10116 SPARC_BUILTIN_FCMPEQ32
,
10117 SPARC_BUILTIN_FPADD16
,
10118 SPARC_BUILTIN_FPADD16S
,
10119 SPARC_BUILTIN_FPADD32
,
10120 SPARC_BUILTIN_FPADD32S
,
10121 SPARC_BUILTIN_FPSUB16
,
10122 SPARC_BUILTIN_FPSUB16S
,
10123 SPARC_BUILTIN_FPSUB32
,
10124 SPARC_BUILTIN_FPSUB32S
,
10125 SPARC_BUILTIN_ARRAY8
,
10126 SPARC_BUILTIN_ARRAY16
,
10127 SPARC_BUILTIN_ARRAY32
,
10129 /* VIS 2.0 builtins. */
10130 SPARC_BUILTIN_EDGE8N
,
10131 SPARC_BUILTIN_EDGE8LN
,
10132 SPARC_BUILTIN_EDGE16N
,
10133 SPARC_BUILTIN_EDGE16LN
,
10134 SPARC_BUILTIN_EDGE32N
,
10135 SPARC_BUILTIN_EDGE32LN
,
10136 SPARC_BUILTIN_BMASK
,
10137 SPARC_BUILTIN_BSHUFFLEV4HI
,
10138 SPARC_BUILTIN_BSHUFFLEV8QI
,
10139 SPARC_BUILTIN_BSHUFFLEV2SI
,
10140 SPARC_BUILTIN_BSHUFFLEDI
,
10142 /* VIS 3.0 builtins. */
10143 SPARC_BUILTIN_CMASK8
,
10144 SPARC_BUILTIN_CMASK16
,
10145 SPARC_BUILTIN_CMASK32
,
10146 SPARC_BUILTIN_FCHKSM16
,
10147 SPARC_BUILTIN_FSLL16
,
10148 SPARC_BUILTIN_FSLAS16
,
10149 SPARC_BUILTIN_FSRL16
,
10150 SPARC_BUILTIN_FSRA16
,
10151 SPARC_BUILTIN_FSLL32
,
10152 SPARC_BUILTIN_FSLAS32
,
10153 SPARC_BUILTIN_FSRL32
,
10154 SPARC_BUILTIN_FSRA32
,
10155 SPARC_BUILTIN_PDISTN
,
10156 SPARC_BUILTIN_FMEAN16
,
10157 SPARC_BUILTIN_FPADD64
,
10158 SPARC_BUILTIN_FPSUB64
,
10159 SPARC_BUILTIN_FPADDS16
,
10160 SPARC_BUILTIN_FPADDS16S
,
10161 SPARC_BUILTIN_FPSUBS16
,
10162 SPARC_BUILTIN_FPSUBS16S
,
10163 SPARC_BUILTIN_FPADDS32
,
10164 SPARC_BUILTIN_FPADDS32S
,
10165 SPARC_BUILTIN_FPSUBS32
,
10166 SPARC_BUILTIN_FPSUBS32S
,
10167 SPARC_BUILTIN_FUCMPLE8
,
10168 SPARC_BUILTIN_FUCMPNE8
,
10169 SPARC_BUILTIN_FUCMPGT8
,
10170 SPARC_BUILTIN_FUCMPEQ8
,
10171 SPARC_BUILTIN_FHADDS
,
10172 SPARC_BUILTIN_FHADDD
,
10173 SPARC_BUILTIN_FHSUBS
,
10174 SPARC_BUILTIN_FHSUBD
,
10175 SPARC_BUILTIN_FNHADDS
,
10176 SPARC_BUILTIN_FNHADDD
,
10177 SPARC_BUILTIN_UMULXHI
,
10178 SPARC_BUILTIN_XMULX
,
10179 SPARC_BUILTIN_XMULXHI
,
10184 static GTY (()) tree sparc_builtins
[(int) SPARC_BUILTIN_MAX
];
10185 static enum insn_code sparc_builtins_icode
[(int) SPARC_BUILTIN_MAX
];
10187 /* Add a SPARC builtin function with NAME, ICODE, CODE and TYPE. Return the
10188 function decl or NULL_TREE if the builtin was not added. */
10191 def_builtin (const char *name
, enum insn_code icode
, enum sparc_builtins code
,
10195 = add_builtin_function (name
, type
, code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
10199 sparc_builtins
[code
] = t
;
10200 sparc_builtins_icode
[code
] = icode
;
10206 /* Likewise, but also marks the function as "const". */
10209 def_builtin_const (const char *name
, enum insn_code icode
,
10210 enum sparc_builtins code
, tree type
)
10212 tree t
= def_builtin (name
, icode
, code
, type
);
10215 TREE_READONLY (t
) = 1;
10220 /* Implement the TARGET_INIT_BUILTINS target hook.
10221 Create builtin functions for special SPARC instructions. */
10224 sparc_init_builtins (void)
10227 sparc_fpu_init_builtins ();
10230 sparc_vis_init_builtins ();
10233 /* Create builtin functions for FPU instructions. */
10236 sparc_fpu_init_builtins (void)
10239 = build_function_type_list (void_type_node
,
10240 build_pointer_type (unsigned_type_node
), 0);
10241 def_builtin ("__builtin_load_fsr", CODE_FOR_ldfsr
,
10242 SPARC_BUILTIN_LDFSR
, ftype
);
10243 def_builtin ("__builtin_store_fsr", CODE_FOR_stfsr
,
10244 SPARC_BUILTIN_STFSR
, ftype
);
10247 /* Create builtin functions for VIS instructions. */
10250 sparc_vis_init_builtins (void)
10252 tree v4qi
= build_vector_type (unsigned_intQI_type_node
, 4);
10253 tree v8qi
= build_vector_type (unsigned_intQI_type_node
, 8);
10254 tree v4hi
= build_vector_type (intHI_type_node
, 4);
10255 tree v2hi
= build_vector_type (intHI_type_node
, 2);
10256 tree v2si
= build_vector_type (intSI_type_node
, 2);
10257 tree v1si
= build_vector_type (intSI_type_node
, 1);
10259 tree v4qi_ftype_v4hi
= build_function_type_list (v4qi
, v4hi
, 0);
10260 tree v8qi_ftype_v2si_v8qi
= build_function_type_list (v8qi
, v2si
, v8qi
, 0);
10261 tree v2hi_ftype_v2si
= build_function_type_list (v2hi
, v2si
, 0);
10262 tree v4hi_ftype_v4qi
= build_function_type_list (v4hi
, v4qi
, 0);
10263 tree v8qi_ftype_v4qi_v4qi
= build_function_type_list (v8qi
, v4qi
, v4qi
, 0);
10264 tree v4hi_ftype_v4qi_v4hi
= build_function_type_list (v4hi
, v4qi
, v4hi
, 0);
10265 tree v4hi_ftype_v4qi_v2hi
= build_function_type_list (v4hi
, v4qi
, v2hi
, 0);
10266 tree v2si_ftype_v4qi_v2hi
= build_function_type_list (v2si
, v4qi
, v2hi
, 0);
10267 tree v4hi_ftype_v8qi_v4hi
= build_function_type_list (v4hi
, v8qi
, v4hi
, 0);
10268 tree v4hi_ftype_v4hi_v4hi
= build_function_type_list (v4hi
, v4hi
, v4hi
, 0);
10269 tree v2si_ftype_v2si_v2si
= build_function_type_list (v2si
, v2si
, v2si
, 0);
10270 tree v8qi_ftype_v8qi_v8qi
= build_function_type_list (v8qi
, v8qi
, v8qi
, 0);
10271 tree v2hi_ftype_v2hi_v2hi
= build_function_type_list (v2hi
, v2hi
, v2hi
, 0);
10272 tree v1si_ftype_v1si_v1si
= build_function_type_list (v1si
, v1si
, v1si
, 0);
10273 tree di_ftype_v8qi_v8qi_di
= build_function_type_list (intDI_type_node
,
10275 intDI_type_node
, 0);
10276 tree di_ftype_v8qi_v8qi
= build_function_type_list (intDI_type_node
,
10278 tree si_ftype_v8qi_v8qi
= build_function_type_list (intSI_type_node
,
10280 tree di_ftype_di_di
= build_function_type_list (intDI_type_node
,
10282 intDI_type_node
, 0);
10283 tree si_ftype_si_si
= build_function_type_list (intSI_type_node
,
10285 intSI_type_node
, 0);
10286 tree ptr_ftype_ptr_si
= build_function_type_list (ptr_type_node
,
10288 intSI_type_node
, 0);
10289 tree ptr_ftype_ptr_di
= build_function_type_list (ptr_type_node
,
10291 intDI_type_node
, 0);
10292 tree si_ftype_ptr_ptr
= build_function_type_list (intSI_type_node
,
10295 tree di_ftype_ptr_ptr
= build_function_type_list (intDI_type_node
,
10298 tree si_ftype_v4hi_v4hi
= build_function_type_list (intSI_type_node
,
10300 tree si_ftype_v2si_v2si
= build_function_type_list (intSI_type_node
,
10302 tree di_ftype_v4hi_v4hi
= build_function_type_list (intDI_type_node
,
10304 tree di_ftype_v2si_v2si
= build_function_type_list (intDI_type_node
,
10306 tree void_ftype_di
= build_function_type_list (void_type_node
,
10307 intDI_type_node
, 0);
10308 tree di_ftype_void
= build_function_type_list (intDI_type_node
,
10309 void_type_node
, 0);
10310 tree void_ftype_si
= build_function_type_list (void_type_node
,
10311 intSI_type_node
, 0);
10312 tree sf_ftype_sf_sf
= build_function_type_list (float_type_node
,
10314 float_type_node
, 0);
10315 tree df_ftype_df_df
= build_function_type_list (double_type_node
,
10317 double_type_node
, 0);
10319 /* Packing and expanding vectors. */
10320 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis
,
10321 SPARC_BUILTIN_FPACK16
, v4qi_ftype_v4hi
);
10322 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis
,
10323 SPARC_BUILTIN_FPACK32
, v8qi_ftype_v2si_v8qi
);
10324 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis
,
10325 SPARC_BUILTIN_FPACKFIX
, v2hi_ftype_v2si
);
10326 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis
,
10327 SPARC_BUILTIN_FEXPAND
, v4hi_ftype_v4qi
);
10328 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis
,
10329 SPARC_BUILTIN_FPMERGE
, v8qi_ftype_v4qi_v4qi
);
10331 /* Multiplications. */
10332 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis
,
10333 SPARC_BUILTIN_FMUL8X16
, v4hi_ftype_v4qi_v4hi
);
10334 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis
,
10335 SPARC_BUILTIN_FMUL8X16AU
, v4hi_ftype_v4qi_v2hi
);
10336 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis
,
10337 SPARC_BUILTIN_FMUL8X16AL
, v4hi_ftype_v4qi_v2hi
);
10338 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis
,
10339 SPARC_BUILTIN_FMUL8SUX16
, v4hi_ftype_v8qi_v4hi
);
10340 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis
,
10341 SPARC_BUILTIN_FMUL8ULX16
, v4hi_ftype_v8qi_v4hi
);
10342 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis
,
10343 SPARC_BUILTIN_FMULD8SUX16
, v2si_ftype_v4qi_v2hi
);
10344 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis
,
10345 SPARC_BUILTIN_FMULD8ULX16
, v2si_ftype_v4qi_v2hi
);
10347 /* Data aligning. */
10348 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis
,
10349 SPARC_BUILTIN_FALIGNDATAV4HI
, v4hi_ftype_v4hi_v4hi
);
10350 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis
,
10351 SPARC_BUILTIN_FALIGNDATAV8QI
, v8qi_ftype_v8qi_v8qi
);
10352 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis
,
10353 SPARC_BUILTIN_FALIGNDATAV2SI
, v2si_ftype_v2si_v2si
);
10354 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis
,
10355 SPARC_BUILTIN_FALIGNDATADI
, di_ftype_di_di
);
10357 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis
,
10358 SPARC_BUILTIN_WRGSR
, void_ftype_di
);
10359 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis
,
10360 SPARC_BUILTIN_RDGSR
, di_ftype_void
);
10364 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis
,
10365 SPARC_BUILTIN_ALIGNADDR
, ptr_ftype_ptr_di
);
10366 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis
,
10367 SPARC_BUILTIN_ALIGNADDRL
, ptr_ftype_ptr_di
);
10371 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis
,
10372 SPARC_BUILTIN_ALIGNADDR
, ptr_ftype_ptr_si
);
10373 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis
,
10374 SPARC_BUILTIN_ALIGNADDRL
, ptr_ftype_ptr_si
);
10377 /* Pixel distance. */
10378 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis
,
10379 SPARC_BUILTIN_PDIST
, di_ftype_v8qi_v8qi_di
);
10381 /* Edge handling. */
10384 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis
,
10385 SPARC_BUILTIN_EDGE8
, di_ftype_ptr_ptr
);
10386 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis
,
10387 SPARC_BUILTIN_EDGE8L
, di_ftype_ptr_ptr
);
10388 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis
,
10389 SPARC_BUILTIN_EDGE16
, di_ftype_ptr_ptr
);
10390 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis
,
10391 SPARC_BUILTIN_EDGE16L
, di_ftype_ptr_ptr
);
10392 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis
,
10393 SPARC_BUILTIN_EDGE32
, di_ftype_ptr_ptr
);
10394 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis
,
10395 SPARC_BUILTIN_EDGE32L
, di_ftype_ptr_ptr
);
10399 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis
,
10400 SPARC_BUILTIN_EDGE8
, si_ftype_ptr_ptr
);
10401 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis
,
10402 SPARC_BUILTIN_EDGE8L
, si_ftype_ptr_ptr
);
10403 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis
,
10404 SPARC_BUILTIN_EDGE16
, si_ftype_ptr_ptr
);
10405 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis
,
10406 SPARC_BUILTIN_EDGE16L
, si_ftype_ptr_ptr
);
10407 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis
,
10408 SPARC_BUILTIN_EDGE32
, si_ftype_ptr_ptr
);
10409 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis
,
10410 SPARC_BUILTIN_EDGE32L
, si_ftype_ptr_ptr
);
10413 /* Pixel compare. */
10416 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis
,
10417 SPARC_BUILTIN_FCMPLE16
, di_ftype_v4hi_v4hi
);
10418 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis
,
10419 SPARC_BUILTIN_FCMPLE32
, di_ftype_v2si_v2si
);
10420 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis
,
10421 SPARC_BUILTIN_FCMPNE16
, di_ftype_v4hi_v4hi
);
10422 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis
,
10423 SPARC_BUILTIN_FCMPNE32
, di_ftype_v2si_v2si
);
10424 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis
,
10425 SPARC_BUILTIN_FCMPGT16
, di_ftype_v4hi_v4hi
);
10426 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis
,
10427 SPARC_BUILTIN_FCMPGT32
, di_ftype_v2si_v2si
);
10428 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis
,
10429 SPARC_BUILTIN_FCMPEQ16
, di_ftype_v4hi_v4hi
);
10430 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis
,
10431 SPARC_BUILTIN_FCMPEQ32
, di_ftype_v2si_v2si
);
10435 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis
,
10436 SPARC_BUILTIN_FCMPLE16
, si_ftype_v4hi_v4hi
);
10437 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis
,
10438 SPARC_BUILTIN_FCMPLE32
, si_ftype_v2si_v2si
);
10439 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis
,
10440 SPARC_BUILTIN_FCMPNE16
, si_ftype_v4hi_v4hi
);
10441 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis
,
10442 SPARC_BUILTIN_FCMPNE32
, si_ftype_v2si_v2si
);
10443 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis
,
10444 SPARC_BUILTIN_FCMPGT16
, si_ftype_v4hi_v4hi
);
10445 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis
,
10446 SPARC_BUILTIN_FCMPGT32
, si_ftype_v2si_v2si
);
10447 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis
,
10448 SPARC_BUILTIN_FCMPEQ16
, si_ftype_v4hi_v4hi
);
10449 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis
,
10450 SPARC_BUILTIN_FCMPEQ32
, si_ftype_v2si_v2si
);
10453 /* Addition and subtraction. */
10454 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3
,
10455 SPARC_BUILTIN_FPADD16
, v4hi_ftype_v4hi_v4hi
);
10456 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3
,
10457 SPARC_BUILTIN_FPADD16S
, v2hi_ftype_v2hi_v2hi
);
10458 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3
,
10459 SPARC_BUILTIN_FPADD32
, v2si_ftype_v2si_v2si
);
10460 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3
,
10461 SPARC_BUILTIN_FPADD32S
, v1si_ftype_v1si_v1si
);
10462 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3
,
10463 SPARC_BUILTIN_FPSUB16
, v4hi_ftype_v4hi_v4hi
);
10464 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3
,
10465 SPARC_BUILTIN_FPSUB16S
, v2hi_ftype_v2hi_v2hi
);
10466 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3
,
10467 SPARC_BUILTIN_FPSUB32
, v2si_ftype_v2si_v2si
);
10468 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3
,
10469 SPARC_BUILTIN_FPSUB32S
, v1si_ftype_v1si_v1si
);
10471 /* Three-dimensional array addressing. */
10474 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis
,
10475 SPARC_BUILTIN_ARRAY8
, di_ftype_di_di
);
10476 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis
,
10477 SPARC_BUILTIN_ARRAY16
, di_ftype_di_di
);
10478 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis
,
10479 SPARC_BUILTIN_ARRAY32
, di_ftype_di_di
);
10483 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis
,
10484 SPARC_BUILTIN_ARRAY8
, si_ftype_si_si
);
10485 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis
,
10486 SPARC_BUILTIN_ARRAY16
, si_ftype_si_si
);
10487 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis
,
10488 SPARC_BUILTIN_ARRAY32
, si_ftype_si_si
);
10493 /* Edge handling. */
10496 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis
,
10497 SPARC_BUILTIN_EDGE8N
, di_ftype_ptr_ptr
);
10498 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis
,
10499 SPARC_BUILTIN_EDGE8LN
, di_ftype_ptr_ptr
);
10500 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis
,
10501 SPARC_BUILTIN_EDGE16N
, di_ftype_ptr_ptr
);
10502 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis
,
10503 SPARC_BUILTIN_EDGE16LN
, di_ftype_ptr_ptr
);
10504 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis
,
10505 SPARC_BUILTIN_EDGE32N
, di_ftype_ptr_ptr
);
10506 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis
,
10507 SPARC_BUILTIN_EDGE32LN
, di_ftype_ptr_ptr
);
10511 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis
,
10512 SPARC_BUILTIN_EDGE8N
, si_ftype_ptr_ptr
);
10513 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis
,
10514 SPARC_BUILTIN_EDGE8LN
, si_ftype_ptr_ptr
);
10515 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis
,
10516 SPARC_BUILTIN_EDGE16N
, si_ftype_ptr_ptr
);
10517 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis
,
10518 SPARC_BUILTIN_EDGE16LN
, si_ftype_ptr_ptr
);
10519 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis
,
10520 SPARC_BUILTIN_EDGE32N
, si_ftype_ptr_ptr
);
10521 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis
,
10522 SPARC_BUILTIN_EDGE32LN
, si_ftype_ptr_ptr
);
10525 /* Byte mask and shuffle. */
10527 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis
,
10528 SPARC_BUILTIN_BMASK
, di_ftype_di_di
);
10530 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis
,
10531 SPARC_BUILTIN_BMASK
, si_ftype_si_si
);
10532 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis
,
10533 SPARC_BUILTIN_BSHUFFLEV4HI
, v4hi_ftype_v4hi_v4hi
);
10534 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis
,
10535 SPARC_BUILTIN_BSHUFFLEV8QI
, v8qi_ftype_v8qi_v8qi
);
10536 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis
,
10537 SPARC_BUILTIN_BSHUFFLEV2SI
, v2si_ftype_v2si_v2si
);
10538 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis
,
10539 SPARC_BUILTIN_BSHUFFLEDI
, di_ftype_di_di
);
10546 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis
,
10547 SPARC_BUILTIN_CMASK8
, void_ftype_di
);
10548 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis
,
10549 SPARC_BUILTIN_CMASK16
, void_ftype_di
);
10550 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis
,
10551 SPARC_BUILTIN_CMASK32
, void_ftype_di
);
10555 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis
,
10556 SPARC_BUILTIN_CMASK8
, void_ftype_si
);
10557 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis
,
10558 SPARC_BUILTIN_CMASK16
, void_ftype_si
);
10559 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis
,
10560 SPARC_BUILTIN_CMASK32
, void_ftype_si
);
10563 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis
,
10564 SPARC_BUILTIN_FCHKSM16
, v4hi_ftype_v4hi_v4hi
);
10566 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3
,
10567 SPARC_BUILTIN_FSLL16
, v4hi_ftype_v4hi_v4hi
);
10568 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3
,
10569 SPARC_BUILTIN_FSLAS16
, v4hi_ftype_v4hi_v4hi
);
10570 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3
,
10571 SPARC_BUILTIN_FSRL16
, v4hi_ftype_v4hi_v4hi
);
10572 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3
,
10573 SPARC_BUILTIN_FSRA16
, v4hi_ftype_v4hi_v4hi
);
10574 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3
,
10575 SPARC_BUILTIN_FSLL32
, v2si_ftype_v2si_v2si
);
10576 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3
,
10577 SPARC_BUILTIN_FSLAS32
, v2si_ftype_v2si_v2si
);
10578 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3
,
10579 SPARC_BUILTIN_FSRL32
, v2si_ftype_v2si_v2si
);
10580 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3
,
10581 SPARC_BUILTIN_FSRA32
, v2si_ftype_v2si_v2si
);
10584 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis
,
10585 SPARC_BUILTIN_PDISTN
, di_ftype_v8qi_v8qi
);
10587 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis
,
10588 SPARC_BUILTIN_PDISTN
, si_ftype_v8qi_v8qi
);
10590 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis
,
10591 SPARC_BUILTIN_FMEAN16
, v4hi_ftype_v4hi_v4hi
);
10592 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis
,
10593 SPARC_BUILTIN_FPADD64
, di_ftype_di_di
);
10594 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis
,
10595 SPARC_BUILTIN_FPSUB64
, di_ftype_di_di
);
10597 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3
,
10598 SPARC_BUILTIN_FPADDS16
, v4hi_ftype_v4hi_v4hi
);
10599 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3
,
10600 SPARC_BUILTIN_FPADDS16S
, v2hi_ftype_v2hi_v2hi
);
10601 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3
,
10602 SPARC_BUILTIN_FPSUBS16
, v4hi_ftype_v4hi_v4hi
);
10603 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3
,
10604 SPARC_BUILTIN_FPSUBS16S
, v2hi_ftype_v2hi_v2hi
);
10605 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3
,
10606 SPARC_BUILTIN_FPADDS32
, v2si_ftype_v2si_v2si
);
10607 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3
,
10608 SPARC_BUILTIN_FPADDS32S
, v1si_ftype_v1si_v1si
);
10609 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3
,
10610 SPARC_BUILTIN_FPSUBS32
, v2si_ftype_v2si_v2si
);
10611 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3
,
10612 SPARC_BUILTIN_FPSUBS32S
, v1si_ftype_v1si_v1si
);
10616 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis
,
10617 SPARC_BUILTIN_FUCMPLE8
, di_ftype_v8qi_v8qi
);
10618 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis
,
10619 SPARC_BUILTIN_FUCMPNE8
, di_ftype_v8qi_v8qi
);
10620 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis
,
10621 SPARC_BUILTIN_FUCMPGT8
, di_ftype_v8qi_v8qi
);
10622 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis
,
10623 SPARC_BUILTIN_FUCMPEQ8
, di_ftype_v8qi_v8qi
);
10627 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis
,
10628 SPARC_BUILTIN_FUCMPLE8
, si_ftype_v8qi_v8qi
);
10629 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis
,
10630 SPARC_BUILTIN_FUCMPNE8
, si_ftype_v8qi_v8qi
);
10631 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis
,
10632 SPARC_BUILTIN_FUCMPGT8
, si_ftype_v8qi_v8qi
);
10633 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis
,
10634 SPARC_BUILTIN_FUCMPEQ8
, si_ftype_v8qi_v8qi
);
10637 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis
,
10638 SPARC_BUILTIN_FHADDS
, sf_ftype_sf_sf
);
10639 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis
,
10640 SPARC_BUILTIN_FHADDD
, df_ftype_df_df
);
10641 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis
,
10642 SPARC_BUILTIN_FHSUBS
, sf_ftype_sf_sf
);
10643 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis
,
10644 SPARC_BUILTIN_FHSUBD
, df_ftype_df_df
);
10645 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis
,
10646 SPARC_BUILTIN_FNHADDS
, sf_ftype_sf_sf
);
10647 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis
,
10648 SPARC_BUILTIN_FNHADDD
, df_ftype_df_df
);
10650 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis
,
10651 SPARC_BUILTIN_UMULXHI
, di_ftype_di_di
);
10652 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis
,
10653 SPARC_BUILTIN_XMULX
, di_ftype_di_di
);
10654 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis
,
10655 SPARC_BUILTIN_XMULXHI
, di_ftype_di_di
);
10659 /* Implement TARGET_BUILTIN_DECL hook. */
10662 sparc_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
10664 if (code
>= SPARC_BUILTIN_MAX
)
10665 return error_mark_node
;
10667 return sparc_builtins
[code
];
10670 /* Implemented TARGET_EXPAND_BUILTIN hook. */
10673 sparc_expand_builtin (tree exp
, rtx target
,
10674 rtx subtarget ATTRIBUTE_UNUSED
,
10675 enum machine_mode tmode ATTRIBUTE_UNUSED
,
10676 int ignore ATTRIBUTE_UNUSED
)
10678 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10679 enum sparc_builtins code
= (enum sparc_builtins
) DECL_FUNCTION_CODE (fndecl
);
10680 enum insn_code icode
= sparc_builtins_icode
[code
];
10681 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
10682 call_expr_arg_iterator iter
;
10689 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10691 || GET_MODE (target
) != tmode
10692 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10693 op
[0] = gen_reg_rtx (tmode
);
10698 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
10700 const struct insn_operand_data
*insn_op
;
10703 if (arg
== error_mark_node
)
10707 idx
= arg_count
- !nonvoid
;
10708 insn_op
= &insn_data
[icode
].operand
[idx
];
10709 op
[arg_count
] = expand_normal (arg
);
10711 if (code
== SPARC_BUILTIN_LDFSR
|| code
== SPARC_BUILTIN_STFSR
)
10713 if (!address_operand (op
[arg_count
], SImode
))
10715 op
[arg_count
] = convert_memory_address (Pmode
, op
[arg_count
]);
10716 op
[arg_count
] = copy_addr_to_reg (op
[arg_count
]);
10718 op
[arg_count
] = gen_rtx_MEM (SImode
, op
[arg_count
]);
10721 else if (insn_op
->mode
== V1DImode
10722 && GET_MODE (op
[arg_count
]) == DImode
)
10723 op
[arg_count
] = gen_lowpart (V1DImode
, op
[arg_count
]);
10725 else if (insn_op
->mode
== V1SImode
10726 && GET_MODE (op
[arg_count
]) == SImode
)
10727 op
[arg_count
] = gen_lowpart (V1SImode
, op
[arg_count
]);
10729 if (! (*insn_data
[icode
].operand
[idx
].predicate
) (op
[arg_count
],
10731 op
[arg_count
] = copy_to_mode_reg (insn_op
->mode
, op
[arg_count
]);
10737 pat
= GEN_FCN (icode
) (op
[0]);
10741 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
10743 pat
= GEN_FCN (icode
) (op
[1]);
10746 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
10749 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
10752 gcc_unreachable ();
10760 return (nonvoid
? op
[0] : const0_rtx
);
10763 /* Return the upper 16 bits of the 8x16 multiplication. */
10766 sparc_vis_mul8x16 (int e8
, int e16
)
10768 return (e8
* e16
+ 128) / 256;
10771 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
10772 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
10775 sparc_handle_vis_mul8x16 (tree
*n_elts
, enum sparc_builtins fncode
,
10776 tree inner_type
, tree cst0
, tree cst1
)
10778 unsigned i
, num
= VECTOR_CST_NELTS (cst0
);
10783 case SPARC_BUILTIN_FMUL8X16
:
10784 for (i
= 0; i
< num
; ++i
)
10787 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0
, i
)),
10788 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1
, i
)));
10789 n_elts
[i
] = build_int_cst (inner_type
, val
);
10793 case SPARC_BUILTIN_FMUL8X16AU
:
10794 scale
= TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1
, 0));
10796 for (i
= 0; i
< num
; ++i
)
10799 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0
, i
)),
10801 n_elts
[i
] = build_int_cst (inner_type
, val
);
10805 case SPARC_BUILTIN_FMUL8X16AL
:
10806 scale
= TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1
, 1));
10808 for (i
= 0; i
< num
; ++i
)
10811 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0
, i
)),
10813 n_elts
[i
] = build_int_cst (inner_type
, val
);
10818 gcc_unreachable ();
10822 /* Implement TARGET_FOLD_BUILTIN hook.
10824 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
10825 result of the function call is ignored. NULL_TREE is returned if the
10826 function could not be folded. */
10829 sparc_fold_builtin (tree fndecl
, int n_args ATTRIBUTE_UNUSED
,
10830 tree
*args
, bool ignore
)
10832 enum sparc_builtins code
= (enum sparc_builtins
) DECL_FUNCTION_CODE (fndecl
);
10833 tree rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
10834 tree arg0
, arg1
, arg2
;
10839 case SPARC_BUILTIN_LDFSR
:
10840 case SPARC_BUILTIN_STFSR
:
10841 case SPARC_BUILTIN_ALIGNADDR
:
10842 case SPARC_BUILTIN_WRGSR
:
10843 case SPARC_BUILTIN_BMASK
:
10844 case SPARC_BUILTIN_CMASK8
:
10845 case SPARC_BUILTIN_CMASK16
:
10846 case SPARC_BUILTIN_CMASK32
:
10850 return build_zero_cst (rtype
);
10855 case SPARC_BUILTIN_FEXPAND
:
10859 if (TREE_CODE (arg0
) == VECTOR_CST
)
10861 tree inner_type
= TREE_TYPE (rtype
);
10865 n_elts
= XALLOCAVEC (tree
, VECTOR_CST_NELTS (arg0
));
10866 for (i
= 0; i
< VECTOR_CST_NELTS (arg0
); ++i
)
10867 n_elts
[i
] = build_int_cst (inner_type
,
10869 (VECTOR_CST_ELT (arg0
, i
)) << 4);
10870 return build_vector (rtype
, n_elts
);
10874 case SPARC_BUILTIN_FMUL8X16
:
10875 case SPARC_BUILTIN_FMUL8X16AU
:
10876 case SPARC_BUILTIN_FMUL8X16AL
:
10882 if (TREE_CODE (arg0
) == VECTOR_CST
&& TREE_CODE (arg1
) == VECTOR_CST
)
10884 tree inner_type
= TREE_TYPE (rtype
);
10885 tree
*n_elts
= XALLOCAVEC (tree
, VECTOR_CST_NELTS (arg0
));
10886 sparc_handle_vis_mul8x16 (n_elts
, code
, inner_type
, arg0
, arg1
);
10887 return build_vector (rtype
, n_elts
);
10891 case SPARC_BUILTIN_FPMERGE
:
10897 if (TREE_CODE (arg0
) == VECTOR_CST
&& TREE_CODE (arg1
) == VECTOR_CST
)
10899 tree
*n_elts
= XALLOCAVEC (tree
, 2 * VECTOR_CST_NELTS (arg0
));
10901 for (i
= 0; i
< VECTOR_CST_NELTS (arg0
); ++i
)
10903 n_elts
[2*i
] = VECTOR_CST_ELT (arg0
, i
);
10904 n_elts
[2*i
+1] = VECTOR_CST_ELT (arg1
, i
);
10907 return build_vector (rtype
, n_elts
);
10911 case SPARC_BUILTIN_PDIST
:
10912 case SPARC_BUILTIN_PDISTN
:
10917 if (code
== SPARC_BUILTIN_PDIST
)
10923 arg2
= integer_zero_node
;
10925 if (TREE_CODE (arg0
) == VECTOR_CST
10926 && TREE_CODE (arg1
) == VECTOR_CST
10927 && TREE_CODE (arg2
) == INTEGER_CST
)
10929 bool overflow
= false;
10930 double_int result
= TREE_INT_CST (arg2
);
10934 for (i
= 0; i
< VECTOR_CST_NELTS (arg0
); ++i
)
10936 double_int e0
= TREE_INT_CST (VECTOR_CST_ELT (arg0
, i
));
10937 double_int e1
= TREE_INT_CST (VECTOR_CST_ELT (arg1
, i
));
10939 bool neg1_ovf
, neg2_ovf
, add1_ovf
, add2_ovf
;
10941 tmp
= e1
.neg_with_overflow (&neg1_ovf
);
10942 tmp
= e0
.add_with_sign (tmp
, false, &add1_ovf
);
10943 if (tmp
.is_negative ())
10944 tmp
= tmp
.neg_with_overflow (&neg2_ovf
);
10947 result
= result
.add_with_sign (tmp
, false, &add2_ovf
);
10948 overflow
|= neg1_ovf
| neg2_ovf
| add1_ovf
| add2_ovf
;
10951 gcc_assert (!overflow
);
10953 return build_int_cst_wide (rtype
, result
.low
, result
.high
);
10963 /* ??? This duplicates information provided to the compiler by the
10964 ??? scheduler description. Some day, teach genautomata to output
10965 ??? the latencies and then CSE will just use that. */
10968 sparc_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
10969 int *total
, bool speed ATTRIBUTE_UNUSED
)
10971 enum machine_mode mode
= GET_MODE (x
);
10972 bool float_mode_p
= FLOAT_MODE_P (mode
);
10977 if (INTVAL (x
) < 0x1000 && INTVAL (x
) >= -0x1000)
10995 if (GET_MODE (x
) == VOIDmode
10996 && ((CONST_DOUBLE_HIGH (x
) == 0
10997 && CONST_DOUBLE_LOW (x
) < 0x1000)
10998 || (CONST_DOUBLE_HIGH (x
) == -1
10999 && CONST_DOUBLE_LOW (x
) < 0
11000 && CONST_DOUBLE_LOW (x
) >= -0x1000)))
11007 /* If outer-code was a sign or zero extension, a cost
11008 of COSTS_N_INSNS (1) was already added in. This is
11009 why we are subtracting it back out. */
11010 if (outer_code
== ZERO_EXTEND
)
11012 *total
= sparc_costs
->int_zload
- COSTS_N_INSNS (1);
11014 else if (outer_code
== SIGN_EXTEND
)
11016 *total
= sparc_costs
->int_sload
- COSTS_N_INSNS (1);
11018 else if (float_mode_p
)
11020 *total
= sparc_costs
->float_load
;
11024 *total
= sparc_costs
->int_load
;
11032 *total
= sparc_costs
->float_plusminus
;
11034 *total
= COSTS_N_INSNS (1);
11041 gcc_assert (float_mode_p
);
11042 *total
= sparc_costs
->float_mul
;
11045 if (GET_CODE (sub
) == NEG
)
11046 sub
= XEXP (sub
, 0);
11047 *total
+= rtx_cost (sub
, FMA
, 0, speed
);
11050 if (GET_CODE (sub
) == NEG
)
11051 sub
= XEXP (sub
, 0);
11052 *total
+= rtx_cost (sub
, FMA
, 2, speed
);
11058 *total
= sparc_costs
->float_mul
;
11059 else if (! TARGET_HARD_MUL
)
11060 *total
= COSTS_N_INSNS (25);
11066 if (sparc_costs
->int_mul_bit_factor
)
11070 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
11072 unsigned HOST_WIDE_INT value
= INTVAL (XEXP (x
, 1));
11073 for (nbits
= 0; value
!= 0; value
&= value
- 1)
11076 else if (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
11077 && GET_MODE (XEXP (x
, 1)) == VOIDmode
)
11079 rtx x1
= XEXP (x
, 1);
11080 unsigned HOST_WIDE_INT value1
= CONST_DOUBLE_LOW (x1
);
11081 unsigned HOST_WIDE_INT value2
= CONST_DOUBLE_HIGH (x1
);
11083 for (nbits
= 0; value1
!= 0; value1
&= value1
- 1)
11085 for (; value2
!= 0; value2
&= value2
- 1)
11093 bit_cost
= (nbits
- 3) / sparc_costs
->int_mul_bit_factor
;
11094 bit_cost
= COSTS_N_INSNS (bit_cost
);
11097 if (mode
== DImode
)
11098 *total
= sparc_costs
->int_mulX
+ bit_cost
;
11100 *total
= sparc_costs
->int_mul
+ bit_cost
;
11107 *total
= COSTS_N_INSNS (1) + sparc_costs
->shift_penalty
;
11116 if (mode
== DFmode
)
11117 *total
= sparc_costs
->float_div_df
;
11119 *total
= sparc_costs
->float_div_sf
;
11123 if (mode
== DImode
)
11124 *total
= sparc_costs
->int_divX
;
11126 *total
= sparc_costs
->int_div
;
11131 if (! float_mode_p
)
11133 *total
= COSTS_N_INSNS (1);
11140 case UNSIGNED_FLOAT
:
11144 case FLOAT_TRUNCATE
:
11145 *total
= sparc_costs
->float_move
;
11149 if (mode
== DFmode
)
11150 *total
= sparc_costs
->float_sqrt_df
;
11152 *total
= sparc_costs
->float_sqrt_sf
;
11157 *total
= sparc_costs
->float_cmp
;
11159 *total
= COSTS_N_INSNS (1);
11164 *total
= sparc_costs
->float_cmove
;
11166 *total
= sparc_costs
->int_cmove
;
11170 /* Handle the NAND vector patterns. */
11171 if (sparc_vector_mode_supported_p (GET_MODE (x
))
11172 && GET_CODE (XEXP (x
, 0)) == NOT
11173 && GET_CODE (XEXP (x
, 1)) == NOT
)
11175 *total
= COSTS_N_INSNS (1);
11186 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
11189 general_or_i64_p (reg_class_t rclass
)
11191 return (rclass
== GENERAL_REGS
|| rclass
== I64_REGS
);
11194 /* Implement TARGET_REGISTER_MOVE_COST. */
11197 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
11198 reg_class_t from
, reg_class_t to
)
11200 bool need_memory
= false;
11202 if (from
== FPCC_REGS
|| to
== FPCC_REGS
)
11203 need_memory
= true;
11204 else if ((FP_REG_CLASS_P (from
) && general_or_i64_p (to
))
11205 || (general_or_i64_p (from
) && FP_REG_CLASS_P (to
)))
11209 int size
= GET_MODE_SIZE (mode
);
11210 if (size
== 8 || size
== 4)
11212 if (! TARGET_ARCH32
|| size
== 4)
11218 need_memory
= true;
11223 if (sparc_cpu
== PROCESSOR_ULTRASPARC
11224 || sparc_cpu
== PROCESSOR_ULTRASPARC3
11225 || sparc_cpu
== PROCESSOR_NIAGARA
11226 || sparc_cpu
== PROCESSOR_NIAGARA2
11227 || sparc_cpu
== PROCESSOR_NIAGARA3
11228 || sparc_cpu
== PROCESSOR_NIAGARA4
)
11237 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
11238 This is achieved by means of a manual dynamic stack space allocation in
11239 the current frame. We make the assumption that SEQ doesn't contain any
11240 function calls, with the possible exception of calls to the GOT helper. */
11243 emit_and_preserve (rtx seq
, rtx reg
, rtx reg2
)
11245 /* We must preserve the lowest 16 words for the register save area. */
11246 HOST_WIDE_INT offset
= 16*UNITS_PER_WORD
;
11247 /* We really need only 2 words of fresh stack space. */
11248 HOST_WIDE_INT size
= SPARC_STACK_ALIGN (offset
+ 2*UNITS_PER_WORD
);
11251 = gen_rtx_MEM (word_mode
, plus_constant (Pmode
, stack_pointer_rtx
,
11252 SPARC_STACK_BIAS
+ offset
));
11254 emit_insn (gen_stack_pointer_inc (GEN_INT (-size
)));
11255 emit_insn (gen_rtx_SET (VOIDmode
, slot
, reg
));
11257 emit_insn (gen_rtx_SET (VOIDmode
,
11258 adjust_address (slot
, word_mode
, UNITS_PER_WORD
),
11262 emit_insn (gen_rtx_SET (VOIDmode
,
11264 adjust_address (slot
, word_mode
, UNITS_PER_WORD
)));
11265 emit_insn (gen_rtx_SET (VOIDmode
, reg
, slot
));
11266 emit_insn (gen_stack_pointer_inc (GEN_INT (size
)));
11269 /* Output the assembler code for a thunk function. THUNK_DECL is the
11270 declaration for the thunk function itself, FUNCTION is the decl for
11271 the target function. DELTA is an immediate constant offset to be
11272 added to THIS. If VCALL_OFFSET is nonzero, the word at address
11273 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
11276 sparc_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
11277 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
11280 rtx this_rtx
, insn
, funexp
;
11281 unsigned int int_arg_first
;
11283 reload_completed
= 1;
11284 epilogue_completed
= 1;
11286 emit_note (NOTE_INSN_PROLOGUE_END
);
11290 sparc_leaf_function_p
= 1;
11292 int_arg_first
= SPARC_OUTGOING_INT_ARG_FIRST
;
11294 else if (flag_delayed_branch
)
11296 /* We will emit a regular sibcall below, so we need to instruct
11297 output_sibcall that we are in a leaf function. */
11298 sparc_leaf_function_p
= crtl
->uses_only_leaf_regs
= 1;
11300 /* This will cause final.c to invoke leaf_renumber_regs so we
11301 must behave as if we were in a not-yet-leafified function. */
11302 int_arg_first
= SPARC_INCOMING_INT_ARG_FIRST
;
11306 /* We will emit the sibcall manually below, so we will need to
11307 manually spill non-leaf registers. */
11308 sparc_leaf_function_p
= crtl
->uses_only_leaf_regs
= 0;
11310 /* We really are in a leaf function. */
11311 int_arg_first
= SPARC_OUTGOING_INT_ARG_FIRST
;
11314 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
11315 returns a structure, the structure return pointer is there instead. */
11317 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
11318 this_rtx
= gen_rtx_REG (Pmode
, int_arg_first
+ 1);
11320 this_rtx
= gen_rtx_REG (Pmode
, int_arg_first
);
11322 /* Add DELTA. When possible use a plain add, otherwise load it into
11323 a register first. */
11326 rtx delta_rtx
= GEN_INT (delta
);
11328 if (! SPARC_SIMM13_P (delta
))
11330 rtx scratch
= gen_rtx_REG (Pmode
, 1);
11331 emit_move_insn (scratch
, delta_rtx
);
11332 delta_rtx
= scratch
;
11335 /* THIS_RTX += DELTA. */
11336 emit_insn (gen_add2_insn (this_rtx
, delta_rtx
));
11339 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
11342 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
11343 rtx scratch
= gen_rtx_REG (Pmode
, 1);
11345 gcc_assert (vcall_offset
< 0);
11347 /* SCRATCH = *THIS_RTX. */
11348 emit_move_insn (scratch
, gen_rtx_MEM (Pmode
, this_rtx
));
11350 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
11351 may not have any available scratch register at this point. */
11352 if (SPARC_SIMM13_P (vcall_offset
))
11354 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
11355 else if (! fixed_regs
[5]
11356 /* The below sequence is made up of at least 2 insns,
11357 while the default method may need only one. */
11358 && vcall_offset
< -8192)
11360 rtx scratch2
= gen_rtx_REG (Pmode
, 5);
11361 emit_move_insn (scratch2
, vcall_offset_rtx
);
11362 vcall_offset_rtx
= scratch2
;
11366 rtx increment
= GEN_INT (-4096);
11368 /* VCALL_OFFSET is a negative number whose typical range can be
11369 estimated as -32768..0 in 32-bit mode. In almost all cases
11370 it is therefore cheaper to emit multiple add insns than
11371 spilling and loading the constant into a register (at least
11373 while (! SPARC_SIMM13_P (vcall_offset
))
11375 emit_insn (gen_add2_insn (scratch
, increment
));
11376 vcall_offset
+= 4096;
11378 vcall_offset_rtx
= GEN_INT (vcall_offset
); /* cannot be 0 */
11381 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
11382 emit_move_insn (scratch
, gen_rtx_MEM (Pmode
,
11383 gen_rtx_PLUS (Pmode
,
11385 vcall_offset_rtx
)));
11387 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
11388 emit_insn (gen_add2_insn (this_rtx
, scratch
));
11391 /* Generate a tail call to the target function. */
11392 if (! TREE_USED (function
))
11394 assemble_external (function
);
11395 TREE_USED (function
) = 1;
11397 funexp
= XEXP (DECL_RTL (function
), 0);
11399 if (flag_delayed_branch
)
11401 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
11402 insn
= emit_call_insn (gen_sibcall (funexp
));
11403 SIBLING_CALL_P (insn
) = 1;
11407 /* The hoops we have to jump through in order to generate a sibcall
11408 without using delay slots... */
11409 rtx spill_reg
, seq
, scratch
= gen_rtx_REG (Pmode
, 1);
11413 spill_reg
= gen_rtx_REG (word_mode
, 15); /* %o7 */
11415 load_got_register (); /* clobbers %o7 */
11416 scratch
= sparc_legitimize_pic_address (funexp
, scratch
);
11417 seq
= get_insns ();
11419 emit_and_preserve (seq
, spill_reg
, pic_offset_table_rtx
);
11421 else if (TARGET_ARCH32
)
11423 emit_insn (gen_rtx_SET (VOIDmode
,
11425 gen_rtx_HIGH (SImode
, funexp
)));
11426 emit_insn (gen_rtx_SET (VOIDmode
,
11428 gen_rtx_LO_SUM (SImode
, scratch
, funexp
)));
11430 else /* TARGET_ARCH64 */
11432 switch (sparc_cmodel
)
11436 /* The destination can serve as a temporary. */
11437 sparc_emit_set_symbolic_const64 (scratch
, funexp
, scratch
);
11442 /* The destination cannot serve as a temporary. */
11443 spill_reg
= gen_rtx_REG (DImode
, 15); /* %o7 */
11445 sparc_emit_set_symbolic_const64 (scratch
, funexp
, spill_reg
);
11446 seq
= get_insns ();
11448 emit_and_preserve (seq
, spill_reg
, 0);
11452 gcc_unreachable ();
11456 emit_jump_insn (gen_indirect_jump (scratch
));
11461 /* Run just enough of rest_of_compilation to get the insns emitted.
11462 There's not really enough bulk here to make other passes such as
11463 instruction scheduling worth while. Note that use_thunk calls
11464 assemble_start_function and assemble_end_function. */
11465 insn
= get_insns ();
11466 shorten_branches (insn
);
11467 final_start_function (insn
, file
, 1);
11468 final (insn
, file
, 1);
11469 final_end_function ();
11471 reload_completed
= 0;
11472 epilogue_completed
= 0;
11475 /* Return true if sparc_output_mi_thunk would be able to output the
11476 assembler code for the thunk function specified by the arguments
11477 it is passed, and false otherwise. */
11479 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
11480 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
11481 HOST_WIDE_INT vcall_offset
,
11482 const_tree function ATTRIBUTE_UNUSED
)
11484 /* Bound the loop used in the default method above. */
11485 return (vcall_offset
>= -32768 || ! fixed_regs
[5]);
11488 /* How to allocate a 'struct machine_function'. */
11490 static struct machine_function
*
11491 sparc_init_machine_status (void)
11493 return ggc_alloc_cleared_machine_function ();
11496 /* Locate some local-dynamic symbol still in use by this function
11497 so that we can print its name in local-dynamic base patterns. */
11499 static const char *
11500 get_some_local_dynamic_name (void)
11504 if (cfun
->machine
->some_ld_name
)
11505 return cfun
->machine
->some_ld_name
;
11507 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
11509 && for_each_rtx (&PATTERN (insn
), get_some_local_dynamic_name_1
, 0))
11510 return cfun
->machine
->some_ld_name
;
11512 gcc_unreachable ();
11516 get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
11521 && GET_CODE (x
) == SYMBOL_REF
11522 && SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
11524 cfun
->machine
->some_ld_name
= XSTR (x
, 0);
11531 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
11532 We need to emit DTP-relative relocations. */
11535 sparc_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
11540 fputs ("\t.word\t%r_tls_dtpoff32(", file
);
11543 fputs ("\t.xword\t%r_tls_dtpoff64(", file
);
11546 gcc_unreachable ();
11548 output_addr_const (file
, x
);
11552 /* Do whatever processing is required at the end of a file. */
11555 sparc_file_end (void)
11557 /* If we need to emit the special GOT helper function, do so now. */
11558 if (got_helper_rtx
)
11560 const char *name
= XSTR (got_helper_rtx
, 0);
11561 const char *reg_name
= reg_names
[GLOBAL_OFFSET_TABLE_REGNUM
];
11562 #ifdef DWARF2_UNWIND_INFO
11566 if (USE_HIDDEN_LINKONCE
)
11568 tree decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
11569 get_identifier (name
),
11570 build_function_type_list (void_type_node
,
11572 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
11573 NULL_TREE
, void_type_node
);
11574 TREE_PUBLIC (decl
) = 1;
11575 TREE_STATIC (decl
) = 1;
11576 make_decl_one_only (decl
, DECL_ASSEMBLER_NAME (decl
));
11577 DECL_VISIBILITY (decl
) = VISIBILITY_HIDDEN
;
11578 DECL_VISIBILITY_SPECIFIED (decl
) = 1;
11579 resolve_unique_section (decl
, 0, flag_function_sections
);
11580 allocate_struct_function (decl
, true);
11581 cfun
->is_thunk
= 1;
11582 current_function_decl
= decl
;
11583 init_varasm_status ();
11584 assemble_start_function (decl
, name
);
11588 const int align
= floor_log2 (FUNCTION_BOUNDARY
/ BITS_PER_UNIT
);
11589 switch_to_section (text_section
);
11591 ASM_OUTPUT_ALIGN (asm_out_file
, align
);
11592 ASM_OUTPUT_LABEL (asm_out_file
, name
);
11595 #ifdef DWARF2_UNWIND_INFO
11596 do_cfi
= dwarf2out_do_cfi_asm ();
11598 fprintf (asm_out_file
, "\t.cfi_startproc\n");
11600 if (flag_delayed_branch
)
11601 fprintf (asm_out_file
, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
11602 reg_name
, reg_name
);
11604 fprintf (asm_out_file
, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
11605 reg_name
, reg_name
);
11606 #ifdef DWARF2_UNWIND_INFO
11608 fprintf (asm_out_file
, "\t.cfi_endproc\n");
11612 if (NEED_INDICATE_EXEC_STACK
)
11613 file_end_indicate_exec_stack ();
11615 #ifdef TARGET_SOLARIS
11616 solaris_file_end ();
11620 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11621 /* Implement TARGET_MANGLE_TYPE. */
11623 static const char *
11624 sparc_mangle_type (const_tree type
)
11627 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
11628 && TARGET_LONG_DOUBLE_128
)
11631 /* For all other types, use normal C++ mangling. */
11636 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
11637 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
11638 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
11641 sparc_emit_membar_for_model (enum memmodel model
,
11642 int load_store
, int before_after
)
11644 /* Bits for the MEMBAR mmask field. */
11645 const int LoadLoad
= 1;
11646 const int StoreLoad
= 2;
11647 const int LoadStore
= 4;
11648 const int StoreStore
= 8;
11650 int mm
= 0, implied
= 0;
11652 switch (sparc_memory_model
)
11655 /* Sequential Consistency. All memory transactions are immediately
11656 visible in sequential execution order. No barriers needed. */
11657 implied
= LoadLoad
| StoreLoad
| LoadStore
| StoreStore
;
11661 /* Total Store Ordering: all memory transactions with store semantics
11662 are followed by an implied StoreStore. */
11663 implied
|= StoreStore
;
11665 /* If we're not looking for a raw barrer (before+after), then atomic
11666 operations get the benefit of being both load and store. */
11667 if (load_store
== 3 && before_after
== 1)
11668 implied
|= StoreLoad
;
11672 /* Partial Store Ordering: all memory transactions with load semantics
11673 are followed by an implied LoadLoad | LoadStore. */
11674 implied
|= LoadLoad
| LoadStore
;
11676 /* If we're not looking for a raw barrer (before+after), then atomic
11677 operations get the benefit of being both load and store. */
11678 if (load_store
== 3 && before_after
== 2)
11679 implied
|= StoreLoad
| StoreStore
;
11683 /* Relaxed Memory Ordering: no implicit bits. */
11687 gcc_unreachable ();
11690 if (before_after
& 1)
11692 if (model
== MEMMODEL_RELEASE
11693 || model
== MEMMODEL_ACQ_REL
11694 || model
== MEMMODEL_SEQ_CST
)
11696 if (load_store
& 1)
11697 mm
|= LoadLoad
| StoreLoad
;
11698 if (load_store
& 2)
11699 mm
|= LoadStore
| StoreStore
;
11702 if (before_after
& 2)
11704 if (model
== MEMMODEL_ACQUIRE
11705 || model
== MEMMODEL_ACQ_REL
11706 || model
== MEMMODEL_SEQ_CST
)
11708 if (load_store
& 1)
11709 mm
|= LoadLoad
| LoadStore
;
11710 if (load_store
& 2)
11711 mm
|= StoreLoad
| StoreStore
;
11715 /* Remove the bits implied by the system memory model. */
11718 /* For raw barriers (before+after), always emit a barrier.
11719 This will become a compile-time barrier if needed. */
11720 if (mm
|| before_after
== 3)
11721 emit_insn (gen_membar (GEN_INT (mm
)));
11724 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
11725 compare and swap on the word containing the byte or half-word. */
11728 sparc_expand_compare_and_swap_12 (rtx bool_result
, rtx result
, rtx mem
,
11729 rtx oldval
, rtx newval
)
11731 rtx addr1
= force_reg (Pmode
, XEXP (mem
, 0));
11732 rtx addr
= gen_reg_rtx (Pmode
);
11733 rtx off
= gen_reg_rtx (SImode
);
11734 rtx oldv
= gen_reg_rtx (SImode
);
11735 rtx newv
= gen_reg_rtx (SImode
);
11736 rtx oldvalue
= gen_reg_rtx (SImode
);
11737 rtx newvalue
= gen_reg_rtx (SImode
);
11738 rtx res
= gen_reg_rtx (SImode
);
11739 rtx resv
= gen_reg_rtx (SImode
);
11740 rtx memsi
, val
, mask
, end_label
, loop_label
, cc
;
11742 emit_insn (gen_rtx_SET (VOIDmode
, addr
,
11743 gen_rtx_AND (Pmode
, addr1
, GEN_INT (-4))));
11745 if (Pmode
!= SImode
)
11746 addr1
= gen_lowpart (SImode
, addr1
);
11747 emit_insn (gen_rtx_SET (VOIDmode
, off
,
11748 gen_rtx_AND (SImode
, addr1
, GEN_INT (3))));
11750 memsi
= gen_rtx_MEM (SImode
, addr
);
11751 set_mem_alias_set (memsi
, ALIAS_SET_MEMORY_BARRIER
);
11752 MEM_VOLATILE_P (memsi
) = MEM_VOLATILE_P (mem
);
11754 val
= copy_to_reg (memsi
);
11756 emit_insn (gen_rtx_SET (VOIDmode
, off
,
11757 gen_rtx_XOR (SImode
, off
,
11758 GEN_INT (GET_MODE (mem
) == QImode
11761 emit_insn (gen_rtx_SET (VOIDmode
, off
,
11762 gen_rtx_ASHIFT (SImode
, off
, GEN_INT (3))));
11764 if (GET_MODE (mem
) == QImode
)
11765 mask
= force_reg (SImode
, GEN_INT (0xff));
11767 mask
= force_reg (SImode
, GEN_INT (0xffff));
11769 emit_insn (gen_rtx_SET (VOIDmode
, mask
,
11770 gen_rtx_ASHIFT (SImode
, mask
, off
)));
11772 emit_insn (gen_rtx_SET (VOIDmode
, val
,
11773 gen_rtx_AND (SImode
, gen_rtx_NOT (SImode
, mask
),
11776 oldval
= gen_lowpart (SImode
, oldval
);
11777 emit_insn (gen_rtx_SET (VOIDmode
, oldv
,
11778 gen_rtx_ASHIFT (SImode
, oldval
, off
)));
11780 newval
= gen_lowpart_common (SImode
, newval
);
11781 emit_insn (gen_rtx_SET (VOIDmode
, newv
,
11782 gen_rtx_ASHIFT (SImode
, newval
, off
)));
11784 emit_insn (gen_rtx_SET (VOIDmode
, oldv
,
11785 gen_rtx_AND (SImode
, oldv
, mask
)));
11787 emit_insn (gen_rtx_SET (VOIDmode
, newv
,
11788 gen_rtx_AND (SImode
, newv
, mask
)));
11790 end_label
= gen_label_rtx ();
11791 loop_label
= gen_label_rtx ();
11792 emit_label (loop_label
);
11794 emit_insn (gen_rtx_SET (VOIDmode
, oldvalue
,
11795 gen_rtx_IOR (SImode
, oldv
, val
)));
11797 emit_insn (gen_rtx_SET (VOIDmode
, newvalue
,
11798 gen_rtx_IOR (SImode
, newv
, val
)));
11800 emit_move_insn (bool_result
, const1_rtx
);
11802 emit_insn (gen_atomic_compare_and_swapsi_1 (res
, memsi
, oldvalue
, newvalue
));
11804 emit_cmp_and_jump_insns (res
, oldvalue
, EQ
, NULL
, SImode
, 0, end_label
);
11806 emit_insn (gen_rtx_SET (VOIDmode
, resv
,
11807 gen_rtx_AND (SImode
, gen_rtx_NOT (SImode
, mask
),
11810 emit_move_insn (bool_result
, const0_rtx
);
11812 cc
= gen_compare_reg_1 (NE
, resv
, val
);
11813 emit_insn (gen_rtx_SET (VOIDmode
, val
, resv
));
11815 /* Use cbranchcc4 to separate the compare and branch! */
11816 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode
, cc
, const0_rtx
),
11817 cc
, const0_rtx
, loop_label
));
11819 emit_label (end_label
);
11821 emit_insn (gen_rtx_SET (VOIDmode
, res
,
11822 gen_rtx_AND (SImode
, res
, mask
)));
11824 emit_insn (gen_rtx_SET (VOIDmode
, res
,
11825 gen_rtx_LSHIFTRT (SImode
, res
, off
)));
11827 emit_move_insn (result
, gen_lowpart (GET_MODE (result
), res
));
11830 /* Expand code to perform a compare-and-swap. */
11833 sparc_expand_compare_and_swap (rtx operands
[])
11835 rtx bval
, retval
, mem
, oldval
, newval
;
11836 enum machine_mode mode
;
11837 enum memmodel model
;
11839 bval
= operands
[0];
11840 retval
= operands
[1];
11842 oldval
= operands
[3];
11843 newval
= operands
[4];
11844 model
= (enum memmodel
) INTVAL (operands
[6]);
11845 mode
= GET_MODE (mem
);
11847 sparc_emit_membar_for_model (model
, 3, 1);
11849 if (reg_overlap_mentioned_p (retval
, oldval
))
11850 oldval
= copy_to_reg (oldval
);
11852 if (mode
== QImode
|| mode
== HImode
)
11853 sparc_expand_compare_and_swap_12 (bval
, retval
, mem
, oldval
, newval
);
11856 rtx (*gen
) (rtx
, rtx
, rtx
, rtx
);
11859 if (mode
== SImode
)
11860 gen
= gen_atomic_compare_and_swapsi_1
;
11862 gen
= gen_atomic_compare_and_swapdi_1
;
11863 emit_insn (gen (retval
, mem
, oldval
, newval
));
11865 x
= emit_store_flag (bval
, EQ
, retval
, oldval
, mode
, 1, 1);
11867 convert_move (bval
, x
, 1);
11870 sparc_emit_membar_for_model (model
, 3, 2);
11874 sparc_expand_vec_perm_bmask (enum machine_mode vmode
, rtx sel
)
11878 sel
= gen_lowpart (DImode
, sel
);
11882 /* inp = xxxxxxxAxxxxxxxB */
11883 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (16),
11884 NULL_RTX
, 1, OPTAB_DIRECT
);
11885 /* t_1 = ....xxxxxxxAxxx. */
11886 sel
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, sel
),
11887 GEN_INT (3), NULL_RTX
, 1, OPTAB_DIRECT
);
11888 t_1
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, t_1
),
11889 GEN_INT (0x30000), NULL_RTX
, 1, OPTAB_DIRECT
);
11890 /* sel = .......B */
11891 /* t_1 = ...A.... */
11892 sel
= expand_simple_binop (SImode
, IOR
, sel
, t_1
, sel
, 1, OPTAB_DIRECT
);
11893 /* sel = ...A...B */
11894 sel
= expand_mult (SImode
, sel
, GEN_INT (0x4444), sel
, 1);
11895 /* sel = AAAABBBB * 4 */
11896 t_1
= force_reg (SImode
, GEN_INT (0x01230123));
11897 /* sel = { A*4, A*4+1, A*4+2, ... } */
11901 /* inp = xxxAxxxBxxxCxxxD */
11902 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (8),
11903 NULL_RTX
, 1, OPTAB_DIRECT
);
11904 t_2
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (16),
11905 NULL_RTX
, 1, OPTAB_DIRECT
);
11906 t_3
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (24),
11907 NULL_RTX
, 1, OPTAB_DIRECT
);
11908 /* t_1 = ..xxxAxxxBxxxCxx */
11909 /* t_2 = ....xxxAxxxBxxxC */
11910 /* t_3 = ......xxxAxxxBxx */
11911 sel
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, sel
),
11913 NULL_RTX
, 1, OPTAB_DIRECT
);
11914 t_1
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, t_1
),
11916 NULL_RTX
, 1, OPTAB_DIRECT
);
11917 t_2
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, t_2
),
11918 GEN_INT (0x070000),
11919 NULL_RTX
, 1, OPTAB_DIRECT
);
11920 t_3
= expand_simple_binop (SImode
, AND
, gen_lowpart (SImode
, t_3
),
11921 GEN_INT (0x07000000),
11922 NULL_RTX
, 1, OPTAB_DIRECT
);
11923 /* sel = .......D */
11924 /* t_1 = .....C.. */
11925 /* t_2 = ...B.... */
11926 /* t_3 = .A...... */
11927 sel
= expand_simple_binop (SImode
, IOR
, sel
, t_1
, sel
, 1, OPTAB_DIRECT
);
11928 t_2
= expand_simple_binop (SImode
, IOR
, t_2
, t_3
, t_2
, 1, OPTAB_DIRECT
);
11929 sel
= expand_simple_binop (SImode
, IOR
, sel
, t_2
, sel
, 1, OPTAB_DIRECT
);
11930 /* sel = .A.B.C.D */
11931 sel
= expand_mult (SImode
, sel
, GEN_INT (0x22), sel
, 1);
11932 /* sel = AABBCCDD * 2 */
11933 t_1
= force_reg (SImode
, GEN_INT (0x01010101));
11934 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
11938 /* input = xAxBxCxDxExFxGxH */
11939 sel
= expand_simple_binop (DImode
, AND
, sel
,
11940 GEN_INT ((HOST_WIDE_INT
)0x0f0f0f0f << 32
11942 NULL_RTX
, 1, OPTAB_DIRECT
);
11943 /* sel = .A.B.C.D.E.F.G.H */
11944 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (4),
11945 NULL_RTX
, 1, OPTAB_DIRECT
);
11946 /* t_1 = ..A.B.C.D.E.F.G. */
11947 sel
= expand_simple_binop (DImode
, IOR
, sel
, t_1
,
11948 NULL_RTX
, 1, OPTAB_DIRECT
);
11949 /* sel = .AABBCCDDEEFFGGH */
11950 sel
= expand_simple_binop (DImode
, AND
, sel
,
11951 GEN_INT ((HOST_WIDE_INT
)0xff00ff << 32
11953 NULL_RTX
, 1, OPTAB_DIRECT
);
11954 /* sel = ..AB..CD..EF..GH */
11955 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (8),
11956 NULL_RTX
, 1, OPTAB_DIRECT
);
11957 /* t_1 = ....AB..CD..EF.. */
11958 sel
= expand_simple_binop (DImode
, IOR
, sel
, t_1
,
11959 NULL_RTX
, 1, OPTAB_DIRECT
);
11960 /* sel = ..ABABCDCDEFEFGH */
11961 sel
= expand_simple_binop (DImode
, AND
, sel
,
11962 GEN_INT ((HOST_WIDE_INT
)0xffff << 32 | 0xffff),
11963 NULL_RTX
, 1, OPTAB_DIRECT
);
11964 /* sel = ....ABCD....EFGH */
11965 t_1
= expand_simple_binop (DImode
, LSHIFTRT
, sel
, GEN_INT (16),
11966 NULL_RTX
, 1, OPTAB_DIRECT
);
11967 /* t_1 = ........ABCD.... */
11968 sel
= gen_lowpart (SImode
, sel
);
11969 t_1
= gen_lowpart (SImode
, t_1
);
11973 gcc_unreachable ();
11976 /* Always perform the final addition/merge within the bmask insn. */
11977 emit_insn (gen_bmasksi_vis (gen_rtx_REG (SImode
, 0), sel
, t_1
));
11980 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11983 sparc_frame_pointer_required (void)
11985 /* If the stack pointer is dynamically modified in the function, it cannot
11986 serve as the frame pointer. */
11987 if (cfun
->calls_alloca
)
11990 /* If the function receives nonlocal gotos, it needs to save the frame
11991 pointer in the nonlocal_goto_save_area object. */
11992 if (cfun
->has_nonlocal_label
)
11995 /* In flat mode, that's it. */
11999 /* Otherwise, the frame pointer is required if the function isn't leaf. */
12000 return !(crtl
->is_leaf
&& only_leaf_regs_used ());
12003 /* The way this is structured, we can't eliminate SFP in favor of SP
12004 if the frame pointer is required: we want to use the SFP->HFP elimination
12005 in that case. But the test in update_eliminables doesn't know we are
12006 assuming below that we only do the former elimination. */
12009 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
12011 return to
== HARD_FRAME_POINTER_REGNUM
|| !sparc_frame_pointer_required ();
12014 /* Return the hard frame pointer directly to bypass the stack bias. */
12017 sparc_builtin_setjmp_frame_value (void)
12019 return hard_frame_pointer_rtx
;
12022 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
12023 they won't be allocated. */
12026 sparc_conditional_register_usage (void)
12028 if (PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
12030 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
12031 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
12033 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
12034 /* then honor it. */
12035 if (TARGET_ARCH32
&& fixed_regs
[5])
12037 else if (TARGET_ARCH64
&& fixed_regs
[5] == 2)
12042 for (regno
= SPARC_FIRST_V9_FP_REG
;
12043 regno
<= SPARC_LAST_V9_FP_REG
;
12045 fixed_regs
[regno
] = 1;
12046 /* %fcc0 is used by v8 and v9. */
12047 for (regno
= SPARC_FIRST_V9_FCC_REG
+ 1;
12048 regno
<= SPARC_LAST_V9_FCC_REG
;
12050 fixed_regs
[regno
] = 1;
12055 for (regno
= 32; regno
< SPARC_LAST_V9_FCC_REG
; regno
++)
12056 fixed_regs
[regno
] = 1;
12058 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
12059 /* then honor it. Likewise with g3 and g4. */
12060 if (fixed_regs
[2] == 2)
12061 fixed_regs
[2] = ! TARGET_APP_REGS
;
12062 if (fixed_regs
[3] == 2)
12063 fixed_regs
[3] = ! TARGET_APP_REGS
;
12064 if (TARGET_ARCH32
&& fixed_regs
[4] == 2)
12065 fixed_regs
[4] = ! TARGET_APP_REGS
;
12066 else if (TARGET_CM_EMBMEDANY
)
12068 else if (fixed_regs
[4] == 2)
12073 /* Disable leaf functions. */
12074 memset (sparc_leaf_regs
, 0, FIRST_PSEUDO_REGISTER
);
12075 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
12076 leaf_reg_remap
[regno
] = regno
;
12079 global_regs
[SPARC_GSR_REG
] = 1;
12082 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
12084 - We can't load constants into FP registers.
12085 - We can't load FP constants into integer registers when soft-float,
12086 because there is no soft-float pattern with a r/F constraint.
12087 - We can't load FP constants into integer registers for TFmode unless
12088 it is 0.0L, because there is no movtf pattern with a r/F constraint.
12089 - Try and reload integer constants (symbolic or otherwise) back into
12090 registers directly, rather than having them dumped to memory. */
12093 sparc_preferred_reload_class (rtx x
, reg_class_t rclass
)
12095 enum machine_mode mode
= GET_MODE (x
);
12096 if (CONSTANT_P (x
))
12098 if (FP_REG_CLASS_P (rclass
)
12099 || rclass
== GENERAL_OR_FP_REGS
12100 || rclass
== GENERAL_OR_EXTRA_FP_REGS
12101 || (GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ! TARGET_FPU
)
12102 || (mode
== TFmode
&& ! const_zero_operand (x
, mode
)))
12105 if (GET_MODE_CLASS (mode
) == MODE_INT
)
12106 return GENERAL_REGS
;
12108 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
12110 if (! FP_REG_CLASS_P (rclass
)
12111 || !(const_zero_operand (x
, mode
)
12112 || const_all_ones_operand (x
, mode
)))
12119 && (rclass
== EXTRA_FP_REGS
12120 || rclass
== GENERAL_OR_EXTRA_FP_REGS
))
12122 int regno
= true_regnum (x
);
12124 if (SPARC_INT_REG_P (regno
))
12125 return (rclass
== EXTRA_FP_REGS
12126 ? FP_REGS
: GENERAL_OR_FP_REGS
);
12132 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
12133 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
12136 output_v8plus_mult (rtx insn
, rtx
*operands
, const char *opcode
)
12140 gcc_assert (! TARGET_ARCH64
);
12142 if (sparc_check_64 (operands
[1], insn
) <= 0)
12143 output_asm_insn ("srl\t%L1, 0, %L1", operands
);
12144 if (which_alternative
== 1)
12145 output_asm_insn ("sllx\t%H1, 32, %H1", operands
);
12146 if (GET_CODE (operands
[2]) == CONST_INT
)
12148 if (which_alternative
== 1)
12150 output_asm_insn ("or\t%L1, %H1, %H1", operands
);
12151 sprintf (mulstr
, "%s\t%%H1, %%2, %%L0", opcode
);
12152 output_asm_insn (mulstr
, operands
);
12153 return "srlx\t%L0, 32, %H0";
12157 output_asm_insn ("sllx\t%H1, 32, %3", operands
);
12158 output_asm_insn ("or\t%L1, %3, %3", operands
);
12159 sprintf (mulstr
, "%s\t%%3, %%2, %%3", opcode
);
12160 output_asm_insn (mulstr
, operands
);
12161 output_asm_insn ("srlx\t%3, 32, %H0", operands
);
12162 return "mov\t%3, %L0";
12165 else if (rtx_equal_p (operands
[1], operands
[2]))
12167 if (which_alternative
== 1)
12169 output_asm_insn ("or\t%L1, %H1, %H1", operands
);
12170 sprintf (mulstr
, "%s\t%%H1, %%H1, %%L0", opcode
);
12171 output_asm_insn (mulstr
, operands
);
12172 return "srlx\t%L0, 32, %H0";
12176 output_asm_insn ("sllx\t%H1, 32, %3", operands
);
12177 output_asm_insn ("or\t%L1, %3, %3", operands
);
12178 sprintf (mulstr
, "%s\t%%3, %%3, %%3", opcode
);
12179 output_asm_insn (mulstr
, operands
);
12180 output_asm_insn ("srlx\t%3, 32, %H0", operands
);
12181 return "mov\t%3, %L0";
12184 if (sparc_check_64 (operands
[2], insn
) <= 0)
12185 output_asm_insn ("srl\t%L2, 0, %L2", operands
);
12186 if (which_alternative
== 1)
12188 output_asm_insn ("or\t%L1, %H1, %H1", operands
);
12189 output_asm_insn ("sllx\t%H2, 32, %L1", operands
);
12190 output_asm_insn ("or\t%L2, %L1, %L1", operands
);
12191 sprintf (mulstr
, "%s\t%%H1, %%L1, %%L0", opcode
);
12192 output_asm_insn (mulstr
, operands
);
12193 return "srlx\t%L0, 32, %H0";
12197 output_asm_insn ("sllx\t%H1, 32, %3", operands
);
12198 output_asm_insn ("sllx\t%H2, 32, %4", operands
);
12199 output_asm_insn ("or\t%L1, %3, %3", operands
);
12200 output_asm_insn ("or\t%L2, %4, %4", operands
);
12201 sprintf (mulstr
, "%s\t%%3, %%4, %%3", opcode
);
12202 output_asm_insn (mulstr
, operands
);
12203 output_asm_insn ("srlx\t%3, 32, %H0", operands
);
12204 return "mov\t%3, %L0";
12208 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12209 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
12210 and INNER_MODE are the modes describing TARGET. */
12213 vector_init_bshuffle (rtx target
, rtx elt
, enum machine_mode mode
,
12214 enum machine_mode inner_mode
)
12216 rtx t1
, final_insn
, sel
;
12219 t1
= gen_reg_rtx (mode
);
12221 elt
= convert_modes (SImode
, inner_mode
, elt
, true);
12222 emit_move_insn (gen_lowpart(SImode
, t1
), elt
);
12227 final_insn
= gen_bshufflev2si_vis (target
, t1
, t1
);
12228 bmask
= 0x45674567;
12231 final_insn
= gen_bshufflev4hi_vis (target
, t1
, t1
);
12232 bmask
= 0x67676767;
12235 final_insn
= gen_bshufflev8qi_vis (target
, t1
, t1
);
12236 bmask
= 0x77777777;
12239 gcc_unreachable ();
12242 sel
= force_reg (SImode
, GEN_INT (bmask
));
12243 emit_insn (gen_bmasksi_vis (gen_rtx_REG (SImode
, 0), sel
, const0_rtx
));
12244 emit_insn (final_insn
);
12247 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12248 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
12251 vector_init_fpmerge (rtx target
, rtx elt
)
12253 rtx t1
, t2
, t2_low
, t3
, t3_low
;
12255 t1
= gen_reg_rtx (V4QImode
);
12256 elt
= convert_modes (SImode
, QImode
, elt
, true);
12257 emit_move_insn (gen_lowpart (SImode
, t1
), elt
);
12259 t2
= gen_reg_rtx (V8QImode
);
12260 t2_low
= gen_lowpart (V4QImode
, t2
);
12261 emit_insn (gen_fpmerge_vis (t2
, t1
, t1
));
12263 t3
= gen_reg_rtx (V8QImode
);
12264 t3_low
= gen_lowpart (V4QImode
, t3
);
12265 emit_insn (gen_fpmerge_vis (t3
, t2_low
, t2_low
));
12267 emit_insn (gen_fpmerge_vis (target
, t3_low
, t3_low
));
12270 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
12271 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
12274 vector_init_faligndata (rtx target
, rtx elt
)
12276 rtx t1
= gen_reg_rtx (V4HImode
);
12279 elt
= convert_modes (SImode
, HImode
, elt
, true);
12280 emit_move_insn (gen_lowpart (SImode
, t1
), elt
);
12282 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode
),
12283 force_reg (SImode
, GEN_INT (6)),
12286 for (i
= 0; i
< 4; i
++)
12287 emit_insn (gen_faligndatav4hi_vis (target
, t1
, target
));
12290 /* Emit code to initialize TARGET to values for individual fields VALS. */
12293 sparc_expand_vector_init (rtx target
, rtx vals
)
12295 const enum machine_mode mode
= GET_MODE (target
);
12296 const enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
12297 const int n_elts
= GET_MODE_NUNITS (mode
);
12303 for (i
= 0; i
< n_elts
; i
++)
12305 rtx x
= XVECEXP (vals
, 0, i
);
12306 if (!CONSTANT_P (x
))
12309 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
12315 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
12319 if (GET_MODE_SIZE (inner_mode
) == GET_MODE_SIZE (mode
))
12321 if (GET_MODE_SIZE (inner_mode
) == 4)
12323 emit_move_insn (gen_lowpart (SImode
, target
),
12324 gen_lowpart (SImode
, XVECEXP (vals
, 0, 0)));
12327 else if (GET_MODE_SIZE (inner_mode
) == 8)
12329 emit_move_insn (gen_lowpart (DImode
, target
),
12330 gen_lowpart (DImode
, XVECEXP (vals
, 0, 0)));
12334 else if (GET_MODE_SIZE (inner_mode
) == GET_MODE_SIZE (word_mode
)
12335 && GET_MODE_SIZE (mode
) == 2 * GET_MODE_SIZE (word_mode
))
12337 emit_move_insn (gen_highpart (word_mode
, target
),
12338 gen_lowpart (word_mode
, XVECEXP (vals
, 0, 0)));
12339 emit_move_insn (gen_lowpart (word_mode
, target
),
12340 gen_lowpart (word_mode
, XVECEXP (vals
, 0, 1)));
12344 if (all_same
&& GET_MODE_SIZE (mode
) == 8)
12348 vector_init_bshuffle (target
, XVECEXP (vals
, 0, 0), mode
, inner_mode
);
12351 if (mode
== V8QImode
)
12353 vector_init_fpmerge (target
, XVECEXP (vals
, 0, 0));
12356 if (mode
== V4HImode
)
12358 vector_init_faligndata (target
, XVECEXP (vals
, 0, 0));
12363 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
12364 for (i
= 0; i
< n_elts
; i
++)
12365 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
12366 i
* GET_MODE_SIZE (inner_mode
)),
12367 XVECEXP (vals
, 0, i
));
12368 emit_move_insn (target
, mem
);
12371 /* Implement TARGET_SECONDARY_RELOAD. */
12374 sparc_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
12375 enum machine_mode mode
, secondary_reload_info
*sri
)
12377 enum reg_class rclass
= (enum reg_class
) rclass_i
;
12379 sri
->icode
= CODE_FOR_nothing
;
12380 sri
->extra_cost
= 0;
12382 /* We need a temporary when loading/storing a HImode/QImode value
12383 between memory and the FPU registers. This can happen when combine puts
12384 a paradoxical subreg in a float/fix conversion insn. */
12385 if (FP_REG_CLASS_P (rclass
)
12386 && (mode
== HImode
|| mode
== QImode
)
12387 && (GET_CODE (x
) == MEM
12388 || ((GET_CODE (x
) == REG
|| GET_CODE (x
) == SUBREG
)
12389 && true_regnum (x
) == -1)))
12390 return GENERAL_REGS
;
12392 /* On 32-bit we need a temporary when loading/storing a DFmode value
12393 between unaligned memory and the upper FPU registers. */
12395 && rclass
== EXTRA_FP_REGS
12397 && GET_CODE (x
) == MEM
12398 && ! mem_min_alignment (x
, 8))
12401 if (((TARGET_CM_MEDANY
12402 && symbolic_operand (x
, mode
))
12403 || (TARGET_CM_EMBMEDANY
12404 && text_segment_operand (x
, mode
)))
12408 sri
->icode
= direct_optab_handler (reload_in_optab
, mode
);
12410 sri
->icode
= direct_optab_handler (reload_out_optab
, mode
);
12414 if (TARGET_VIS3
&& TARGET_ARCH32
)
12416 int regno
= true_regnum (x
);
12418 /* When using VIS3 fp<-->int register moves, on 32-bit we have
12419 to move 8-byte values in 4-byte pieces. This only works via
12420 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
12421 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
12422 an FP_REGS intermediate move. */
12423 if ((rclass
== EXTRA_FP_REGS
&& SPARC_INT_REG_P (regno
))
12424 || ((general_or_i64_p (rclass
)
12425 || rclass
== GENERAL_OR_FP_REGS
)
12426 && SPARC_FP_REG_P (regno
)))
12428 sri
->extra_cost
= 2;
12436 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
12437 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
12440 sparc_expand_conditional_move (enum machine_mode mode
, rtx
*operands
)
12442 enum rtx_code rc
= GET_CODE (operands
[1]);
12443 enum machine_mode cmp_mode
;
12444 rtx cc_reg
, dst
, cmp
;
12447 if (GET_MODE (XEXP (cmp
, 0)) == DImode
&& !TARGET_ARCH64
)
12450 if (GET_MODE (XEXP (cmp
, 0)) == TFmode
&& !TARGET_HARD_QUAD
)
12451 cmp
= sparc_emit_float_lib_cmp (XEXP (cmp
, 0), XEXP (cmp
, 1), rc
);
12453 cmp_mode
= GET_MODE (XEXP (cmp
, 0));
12454 rc
= GET_CODE (cmp
);
12457 if (! rtx_equal_p (operands
[2], dst
)
12458 && ! rtx_equal_p (operands
[3], dst
))
12460 if (reg_overlap_mentioned_p (dst
, cmp
))
12461 dst
= gen_reg_rtx (mode
);
12463 emit_move_insn (dst
, operands
[3]);
12465 else if (operands
[2] == dst
)
12467 operands
[2] = operands
[3];
12469 if (GET_MODE_CLASS (cmp_mode
) == MODE_FLOAT
)
12470 rc
= reverse_condition_maybe_unordered (rc
);
12472 rc
= reverse_condition (rc
);
12475 if (XEXP (cmp
, 1) == const0_rtx
12476 && GET_CODE (XEXP (cmp
, 0)) == REG
12477 && cmp_mode
== DImode
12478 && v9_regcmp_p (rc
))
12479 cc_reg
= XEXP (cmp
, 0);
12481 cc_reg
= gen_compare_reg_1 (rc
, XEXP (cmp
, 0), XEXP (cmp
, 1));
12483 cmp
= gen_rtx_fmt_ee (rc
, GET_MODE (cc_reg
), cc_reg
, const0_rtx
);
12485 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
12486 gen_rtx_IF_THEN_ELSE (mode
, cmp
, operands
[2], dst
)));
12488 if (dst
!= operands
[0])
12489 emit_move_insn (operands
[0], dst
);
12494 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
12495 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
12496 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
12497 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
12498 code to be used for the condition mask. */
12501 sparc_expand_vcond (enum machine_mode mode
, rtx
*operands
, int ccode
, int fcode
)
12503 rtx mask
, cop0
, cop1
, fcmp
, cmask
, bshuf
, gsr
;
12504 enum rtx_code code
= GET_CODE (operands
[3]);
12506 mask
= gen_reg_rtx (Pmode
);
12507 cop0
= operands
[4];
12508 cop1
= operands
[5];
12509 if (code
== LT
|| code
== GE
)
12513 code
= swap_condition (code
);
12514 t
= cop0
; cop0
= cop1
; cop1
= t
;
12517 gsr
= gen_rtx_REG (DImode
, SPARC_GSR_REG
);
12519 fcmp
= gen_rtx_UNSPEC (Pmode
,
12520 gen_rtvec (1, gen_rtx_fmt_ee (code
, mode
, cop0
, cop1
)),
12523 cmask
= gen_rtx_UNSPEC (DImode
,
12524 gen_rtvec (2, mask
, gsr
),
12527 bshuf
= gen_rtx_UNSPEC (mode
,
12528 gen_rtvec (3, operands
[1], operands
[2], gsr
),
12531 emit_insn (gen_rtx_SET (VOIDmode
, mask
, fcmp
));
12532 emit_insn (gen_rtx_SET (VOIDmode
, gsr
, cmask
));
12534 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], bshuf
));
12537 /* On sparc, any mode which naturally allocates into the float
12538 registers should return 4 here. */
12541 sparc_regmode_natural_size (enum machine_mode mode
)
12543 int size
= UNITS_PER_WORD
;
12547 enum mode_class mclass
= GET_MODE_CLASS (mode
);
12549 if (mclass
== MODE_FLOAT
|| mclass
== MODE_VECTOR_INT
)
12556 /* Return TRUE if it is a good idea to tie two pseudo registers
12557 when one has mode MODE1 and one has mode MODE2.
12558 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
12559 for any hard reg, then this must be FALSE for correct output.
12561 For V9 we have to deal with the fact that only the lower 32 floating
12562 point registers are 32-bit addressable. */
12565 sparc_modes_tieable_p (enum machine_mode mode1
, enum machine_mode mode2
)
12567 enum mode_class mclass1
, mclass2
;
12568 unsigned short size1
, size2
;
12570 if (mode1
== mode2
)
12573 mclass1
= GET_MODE_CLASS (mode1
);
12574 mclass2
= GET_MODE_CLASS (mode2
);
12575 if (mclass1
!= mclass2
)
12581 /* Classes are the same and we are V9 so we have to deal with upper
12582 vs. lower floating point registers. If one of the modes is a
12583 4-byte mode, and the other is not, we have to mark them as not
12584 tieable because only the lower 32 floating point register are
12585 addressable 32-bits at a time.
12587 We can't just test explicitly for SFmode, otherwise we won't
12588 cover the vector mode cases properly. */
12590 if (mclass1
!= MODE_FLOAT
&& mclass1
!= MODE_VECTOR_INT
)
12593 size1
= GET_MODE_SIZE (mode1
);
12594 size2
= GET_MODE_SIZE (mode2
);
12595 if ((size1
> 4 && size2
== 4)
12596 || (size2
> 4 && size1
== 4))
12602 /* Implement TARGET_CSTORE_MODE. */
12604 static enum machine_mode
12605 sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED
)
12607 return (TARGET_ARCH64
? DImode
: SImode
);
12610 /* Return the compound expression made of T1 and T2. */
12613 compound_expr (tree t1
, tree t2
)
12615 return build2 (COMPOUND_EXPR
, void_type_node
, t1
, t2
);
12618 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
12621 sparc_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
12626 const unsigned HOST_WIDE_INT accrued_exception_mask
= 0x1f << 5;
12627 const unsigned HOST_WIDE_INT trap_enable_mask
= 0x1f << 23;
12629 /* We generate the equivalent of feholdexcept (&fenv_var):
12631 unsigned int fenv_var;
12632 __builtin_store_fsr (&fenv_var);
12634 unsigned int tmp1_var;
12635 tmp1_var = fenv_var & ~(accrued_exception_mask | trap_enable_mask);
12637 __builtin_load_fsr (&tmp1_var); */
12639 tree fenv_var
= create_tmp_var (unsigned_type_node
, NULL
);
12640 mark_addressable (fenv_var
);
12641 tree fenv_addr
= build_fold_addr_expr (fenv_var
);
12642 tree stfsr
= sparc_builtins
[SPARC_BUILTIN_STFSR
];
12643 tree hold_stfsr
= build_call_expr (stfsr
, 1, fenv_addr
);
12645 tree tmp1_var
= create_tmp_var (unsigned_type_node
, NULL
);
12646 mark_addressable (tmp1_var
);
12647 tree masked_fenv_var
12648 = build2 (BIT_AND_EXPR
, unsigned_type_node
, fenv_var
,
12649 build_int_cst (unsigned_type_node
,
12650 ~(accrued_exception_mask
| trap_enable_mask
)));
12652 = build2 (MODIFY_EXPR
, void_type_node
, tmp1_var
, masked_fenv_var
);
12654 tree tmp1_addr
= build_fold_addr_expr (tmp1_var
);
12655 tree ldfsr
= sparc_builtins
[SPARC_BUILTIN_LDFSR
];
12656 tree hold_ldfsr
= build_call_expr (ldfsr
, 1, tmp1_addr
);
12658 *hold
= compound_expr (compound_expr (hold_stfsr
, hold_mask
), hold_ldfsr
);
12660 /* We reload the value of tmp1_var to clear the exceptions:
12662 __builtin_load_fsr (&tmp1_var); */
12664 *clear
= build_call_expr (ldfsr
, 1, tmp1_addr
);
12666 /* We generate the equivalent of feupdateenv (&fenv_var):
12668 unsigned int tmp2_var;
12669 __builtin_store_fsr (&tmp2_var);
12671 __builtin_load_fsr (&fenv_var);
12673 if (SPARC_LOW_FE_EXCEPT_VALUES)
12675 __atomic_feraiseexcept ((int) tmp2_var); */
12677 tree tmp2_var
= create_tmp_var (unsigned_type_node
, NULL
);
12678 mark_addressable (tmp2_var
);
12679 tree tmp3_addr
= build_fold_addr_expr (tmp2_var
);
12680 tree update_stfsr
= build_call_expr (stfsr
, 1, tmp3_addr
);
12682 tree update_ldfsr
= build_call_expr (ldfsr
, 1, fenv_addr
);
12684 tree atomic_feraiseexcept
12685 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT
);
12687 = build_call_expr (atomic_feraiseexcept
, 1,
12688 fold_convert (integer_type_node
, tmp2_var
));
12690 if (SPARC_LOW_FE_EXCEPT_VALUES
)
12692 tree shifted_tmp2_var
12693 = build2 (RSHIFT_EXPR
, unsigned_type_node
, tmp2_var
,
12694 build_int_cst (unsigned_type_node
, 5));
12696 = build2 (MODIFY_EXPR
, void_type_node
, tmp2_var
, shifted_tmp2_var
);
12697 update_call
= compound_expr (update_shift
, update_call
);
12701 = compound_expr (compound_expr (update_stfsr
, update_ldfsr
), update_call
);
12704 #include "gt-sparc.h"