PR middle-end/54635
[official-gcc.git] / gcc / config / sparc / sparc.c
blob32ac9f3b5b66f859d427f508efccc19c00b8ce3e
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
8 at Cygnus Support.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "except.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "diagnostic-core.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "gimple.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "params.h"
56 #include "df.h"
57 #include "opts.h"
59 /* Processor costs */
61 struct processor_costs {
62 /* Integer load */
63 const int int_load;
65 /* Integer signed load */
66 const int int_sload;
68 /* Integer zeroed load */
69 const int int_zload;
71 /* Float load */
72 const int float_load;
74 /* fmov, fneg, fabs */
75 const int float_move;
77 /* fadd, fsub */
78 const int float_plusminus;
80 /* fcmp */
81 const int float_cmp;
83 /* fmov, fmovr */
84 const int float_cmove;
86 /* fmul */
87 const int float_mul;
89 /* fdivs */
90 const int float_div_sf;
92 /* fdivd */
93 const int float_div_df;
95 /* fsqrts */
96 const int float_sqrt_sf;
98 /* fsqrtd */
99 const int float_sqrt_df;
101 /* umul/smul */
102 const int int_mul;
104 /* mulX */
105 const int int_mulX;
107 /* integer multiply cost for each bit set past the most
108 significant 3, so the formula for multiply cost becomes:
110 if (rs1 < 0)
111 highest_bit = highest_clear_bit(rs1);
112 else
113 highest_bit = highest_set_bit(rs1);
114 if (highest_bit < 3)
115 highest_bit = 3;
116 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
118 A value of zero indicates that the multiply costs is fixed,
119 and not variable. */
120 const int int_mul_bit_factor;
122 /* udiv/sdiv */
123 const int int_div;
125 /* divX */
126 const int int_divX;
128 /* movcc, movr */
129 const int int_cmove;
131 /* penalty for shifts, due to scheduling rules etc. */
132 const int shift_penalty;
135 static const
136 struct processor_costs cypress_costs = {
137 COSTS_N_INSNS (2), /* int load */
138 COSTS_N_INSNS (2), /* int signed load */
139 COSTS_N_INSNS (2), /* int zeroed load */
140 COSTS_N_INSNS (2), /* float load */
141 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
142 COSTS_N_INSNS (5), /* fadd, fsub */
143 COSTS_N_INSNS (1), /* fcmp */
144 COSTS_N_INSNS (1), /* fmov, fmovr */
145 COSTS_N_INSNS (7), /* fmul */
146 COSTS_N_INSNS (37), /* fdivs */
147 COSTS_N_INSNS (37), /* fdivd */
148 COSTS_N_INSNS (63), /* fsqrts */
149 COSTS_N_INSNS (63), /* fsqrtd */
150 COSTS_N_INSNS (1), /* imul */
151 COSTS_N_INSNS (1), /* imulX */
152 0, /* imul bit factor */
153 COSTS_N_INSNS (1), /* idiv */
154 COSTS_N_INSNS (1), /* idivX */
155 COSTS_N_INSNS (1), /* movcc/movr */
156 0, /* shift penalty */
159 static const
160 struct processor_costs supersparc_costs = {
161 COSTS_N_INSNS (1), /* int load */
162 COSTS_N_INSNS (1), /* int signed load */
163 COSTS_N_INSNS (1), /* int zeroed load */
164 COSTS_N_INSNS (0), /* float load */
165 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
166 COSTS_N_INSNS (3), /* fadd, fsub */
167 COSTS_N_INSNS (3), /* fcmp */
168 COSTS_N_INSNS (1), /* fmov, fmovr */
169 COSTS_N_INSNS (3), /* fmul */
170 COSTS_N_INSNS (6), /* fdivs */
171 COSTS_N_INSNS (9), /* fdivd */
172 COSTS_N_INSNS (12), /* fsqrts */
173 COSTS_N_INSNS (12), /* fsqrtd */
174 COSTS_N_INSNS (4), /* imul */
175 COSTS_N_INSNS (4), /* imulX */
176 0, /* imul bit factor */
177 COSTS_N_INSNS (4), /* idiv */
178 COSTS_N_INSNS (4), /* idivX */
179 COSTS_N_INSNS (1), /* movcc/movr */
180 1, /* shift penalty */
183 static const
184 struct processor_costs hypersparc_costs = {
185 COSTS_N_INSNS (1), /* int load */
186 COSTS_N_INSNS (1), /* int signed load */
187 COSTS_N_INSNS (1), /* int zeroed load */
188 COSTS_N_INSNS (1), /* float load */
189 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
190 COSTS_N_INSNS (1), /* fadd, fsub */
191 COSTS_N_INSNS (1), /* fcmp */
192 COSTS_N_INSNS (1), /* fmov, fmovr */
193 COSTS_N_INSNS (1), /* fmul */
194 COSTS_N_INSNS (8), /* fdivs */
195 COSTS_N_INSNS (12), /* fdivd */
196 COSTS_N_INSNS (17), /* fsqrts */
197 COSTS_N_INSNS (17), /* fsqrtd */
198 COSTS_N_INSNS (17), /* imul */
199 COSTS_N_INSNS (17), /* imulX */
200 0, /* imul bit factor */
201 COSTS_N_INSNS (17), /* idiv */
202 COSTS_N_INSNS (17), /* idivX */
203 COSTS_N_INSNS (1), /* movcc/movr */
204 0, /* shift penalty */
207 static const
208 struct processor_costs leon_costs = {
209 COSTS_N_INSNS (1), /* int load */
210 COSTS_N_INSNS (1), /* int signed load */
211 COSTS_N_INSNS (1), /* int zeroed load */
212 COSTS_N_INSNS (1), /* float load */
213 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
214 COSTS_N_INSNS (1), /* fadd, fsub */
215 COSTS_N_INSNS (1), /* fcmp */
216 COSTS_N_INSNS (1), /* fmov, fmovr */
217 COSTS_N_INSNS (1), /* fmul */
218 COSTS_N_INSNS (15), /* fdivs */
219 COSTS_N_INSNS (15), /* fdivd */
220 COSTS_N_INSNS (23), /* fsqrts */
221 COSTS_N_INSNS (23), /* fsqrtd */
222 COSTS_N_INSNS (5), /* imul */
223 COSTS_N_INSNS (5), /* imulX */
224 0, /* imul bit factor */
225 COSTS_N_INSNS (5), /* idiv */
226 COSTS_N_INSNS (5), /* idivX */
227 COSTS_N_INSNS (1), /* movcc/movr */
228 0, /* shift penalty */
231 static const
232 struct processor_costs sparclet_costs = {
233 COSTS_N_INSNS (3), /* int load */
234 COSTS_N_INSNS (3), /* int signed load */
235 COSTS_N_INSNS (1), /* int zeroed load */
236 COSTS_N_INSNS (1), /* float load */
237 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
238 COSTS_N_INSNS (1), /* fadd, fsub */
239 COSTS_N_INSNS (1), /* fcmp */
240 COSTS_N_INSNS (1), /* fmov, fmovr */
241 COSTS_N_INSNS (1), /* fmul */
242 COSTS_N_INSNS (1), /* fdivs */
243 COSTS_N_INSNS (1), /* fdivd */
244 COSTS_N_INSNS (1), /* fsqrts */
245 COSTS_N_INSNS (1), /* fsqrtd */
246 COSTS_N_INSNS (5), /* imul */
247 COSTS_N_INSNS (5), /* imulX */
248 0, /* imul bit factor */
249 COSTS_N_INSNS (5), /* idiv */
250 COSTS_N_INSNS (5), /* idivX */
251 COSTS_N_INSNS (1), /* movcc/movr */
252 0, /* shift penalty */
255 static const
256 struct processor_costs ultrasparc_costs = {
257 COSTS_N_INSNS (2), /* int load */
258 COSTS_N_INSNS (3), /* int signed load */
259 COSTS_N_INSNS (2), /* int zeroed load */
260 COSTS_N_INSNS (2), /* float load */
261 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
262 COSTS_N_INSNS (4), /* fadd, fsub */
263 COSTS_N_INSNS (1), /* fcmp */
264 COSTS_N_INSNS (2), /* fmov, fmovr */
265 COSTS_N_INSNS (4), /* fmul */
266 COSTS_N_INSNS (13), /* fdivs */
267 COSTS_N_INSNS (23), /* fdivd */
268 COSTS_N_INSNS (13), /* fsqrts */
269 COSTS_N_INSNS (23), /* fsqrtd */
270 COSTS_N_INSNS (4), /* imul */
271 COSTS_N_INSNS (4), /* imulX */
272 2, /* imul bit factor */
273 COSTS_N_INSNS (37), /* idiv */
274 COSTS_N_INSNS (68), /* idivX */
275 COSTS_N_INSNS (2), /* movcc/movr */
276 2, /* shift penalty */
279 static const
280 struct processor_costs ultrasparc3_costs = {
281 COSTS_N_INSNS (2), /* int load */
282 COSTS_N_INSNS (3), /* int signed load */
283 COSTS_N_INSNS (3), /* int zeroed load */
284 COSTS_N_INSNS (2), /* float load */
285 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
286 COSTS_N_INSNS (4), /* fadd, fsub */
287 COSTS_N_INSNS (5), /* fcmp */
288 COSTS_N_INSNS (3), /* fmov, fmovr */
289 COSTS_N_INSNS (4), /* fmul */
290 COSTS_N_INSNS (17), /* fdivs */
291 COSTS_N_INSNS (20), /* fdivd */
292 COSTS_N_INSNS (20), /* fsqrts */
293 COSTS_N_INSNS (29), /* fsqrtd */
294 COSTS_N_INSNS (6), /* imul */
295 COSTS_N_INSNS (6), /* imulX */
296 0, /* imul bit factor */
297 COSTS_N_INSNS (40), /* idiv */
298 COSTS_N_INSNS (71), /* idivX */
299 COSTS_N_INSNS (2), /* movcc/movr */
300 0, /* shift penalty */
303 static const
304 struct processor_costs niagara_costs = {
305 COSTS_N_INSNS (3), /* int load */
306 COSTS_N_INSNS (3), /* int signed load */
307 COSTS_N_INSNS (3), /* int zeroed load */
308 COSTS_N_INSNS (9), /* float load */
309 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
310 COSTS_N_INSNS (8), /* fadd, fsub */
311 COSTS_N_INSNS (26), /* fcmp */
312 COSTS_N_INSNS (8), /* fmov, fmovr */
313 COSTS_N_INSNS (29), /* fmul */
314 COSTS_N_INSNS (54), /* fdivs */
315 COSTS_N_INSNS (83), /* fdivd */
316 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
317 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
318 COSTS_N_INSNS (11), /* imul */
319 COSTS_N_INSNS (11), /* imulX */
320 0, /* imul bit factor */
321 COSTS_N_INSNS (72), /* idiv */
322 COSTS_N_INSNS (72), /* idivX */
323 COSTS_N_INSNS (1), /* movcc/movr */
324 0, /* shift penalty */
327 static const
328 struct processor_costs niagara2_costs = {
329 COSTS_N_INSNS (3), /* int load */
330 COSTS_N_INSNS (3), /* int signed load */
331 COSTS_N_INSNS (3), /* int zeroed load */
332 COSTS_N_INSNS (3), /* float load */
333 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
334 COSTS_N_INSNS (6), /* fadd, fsub */
335 COSTS_N_INSNS (6), /* fcmp */
336 COSTS_N_INSNS (6), /* fmov, fmovr */
337 COSTS_N_INSNS (6), /* fmul */
338 COSTS_N_INSNS (19), /* fdivs */
339 COSTS_N_INSNS (33), /* fdivd */
340 COSTS_N_INSNS (19), /* fsqrts */
341 COSTS_N_INSNS (33), /* fsqrtd */
342 COSTS_N_INSNS (5), /* imul */
343 COSTS_N_INSNS (5), /* imulX */
344 0, /* imul bit factor */
345 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
346 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
347 COSTS_N_INSNS (1), /* movcc/movr */
348 0, /* shift penalty */
351 static const
352 struct processor_costs niagara3_costs = {
353 COSTS_N_INSNS (3), /* int load */
354 COSTS_N_INSNS (3), /* int signed load */
355 COSTS_N_INSNS (3), /* int zeroed load */
356 COSTS_N_INSNS (3), /* float load */
357 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
358 COSTS_N_INSNS (9), /* fadd, fsub */
359 COSTS_N_INSNS (9), /* fcmp */
360 COSTS_N_INSNS (9), /* fmov, fmovr */
361 COSTS_N_INSNS (9), /* fmul */
362 COSTS_N_INSNS (23), /* fdivs */
363 COSTS_N_INSNS (37), /* fdivd */
364 COSTS_N_INSNS (23), /* fsqrts */
365 COSTS_N_INSNS (37), /* fsqrtd */
366 COSTS_N_INSNS (9), /* imul */
367 COSTS_N_INSNS (9), /* imulX */
368 0, /* imul bit factor */
369 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
370 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
371 COSTS_N_INSNS (1), /* movcc/movr */
372 0, /* shift penalty */
375 static const
376 struct processor_costs niagara4_costs = {
377 COSTS_N_INSNS (5), /* int load */
378 COSTS_N_INSNS (5), /* int signed load */
379 COSTS_N_INSNS (5), /* int zeroed load */
380 COSTS_N_INSNS (5), /* float load */
381 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
382 COSTS_N_INSNS (11), /* fadd, fsub */
383 COSTS_N_INSNS (11), /* fcmp */
384 COSTS_N_INSNS (11), /* fmov, fmovr */
385 COSTS_N_INSNS (11), /* fmul */
386 COSTS_N_INSNS (24), /* fdivs */
387 COSTS_N_INSNS (37), /* fdivd */
388 COSTS_N_INSNS (24), /* fsqrts */
389 COSTS_N_INSNS (37), /* fsqrtd */
390 COSTS_N_INSNS (12), /* imul */
391 COSTS_N_INSNS (12), /* imulX */
392 0, /* imul bit factor */
393 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
394 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
395 COSTS_N_INSNS (1), /* movcc/movr */
396 0, /* shift penalty */
399 static const struct processor_costs *sparc_costs = &cypress_costs;
401 #ifdef HAVE_AS_RELAX_OPTION
402 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
403 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
404 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
405 somebody does not branch between the sethi and jmp. */
406 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
407 #else
408 #define LEAF_SIBCALL_SLOT_RESERVED_P \
409 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
410 #endif
412 /* Vector to say how input registers are mapped to output registers.
413 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
414 eliminate it. You must use -fomit-frame-pointer to get that. */
415 char leaf_reg_remap[] =
416 { 0, 1, 2, 3, 4, 5, 6, 7,
417 -1, -1, -1, -1, -1, -1, 14, -1,
418 -1, -1, -1, -1, -1, -1, -1, -1,
419 8, 9, 10, 11, 12, 13, -1, 15,
421 32, 33, 34, 35, 36, 37, 38, 39,
422 40, 41, 42, 43, 44, 45, 46, 47,
423 48, 49, 50, 51, 52, 53, 54, 55,
424 56, 57, 58, 59, 60, 61, 62, 63,
425 64, 65, 66, 67, 68, 69, 70, 71,
426 72, 73, 74, 75, 76, 77, 78, 79,
427 80, 81, 82, 83, 84, 85, 86, 87,
428 88, 89, 90, 91, 92, 93, 94, 95,
429 96, 97, 98, 99, 100, 101, 102};
431 /* Vector, indexed by hard register number, which contains 1
432 for a register that is allowable in a candidate for leaf
433 function treatment. */
434 char sparc_leaf_regs[] =
435 { 1, 1, 1, 1, 1, 1, 1, 1,
436 0, 0, 0, 0, 0, 0, 1, 0,
437 0, 0, 0, 0, 0, 0, 0, 0,
438 1, 1, 1, 1, 1, 1, 0, 1,
439 1, 1, 1, 1, 1, 1, 1, 1,
440 1, 1, 1, 1, 1, 1, 1, 1,
441 1, 1, 1, 1, 1, 1, 1, 1,
442 1, 1, 1, 1, 1, 1, 1, 1,
443 1, 1, 1, 1, 1, 1, 1, 1,
444 1, 1, 1, 1, 1, 1, 1, 1,
445 1, 1, 1, 1, 1, 1, 1, 1,
446 1, 1, 1, 1, 1, 1, 1, 1,
447 1, 1, 1, 1, 1, 1, 1};
449 struct GTY(()) machine_function
451 /* Size of the frame of the function. */
452 HOST_WIDE_INT frame_size;
454 /* Size of the frame of the function minus the register window save area
455 and the outgoing argument area. */
456 HOST_WIDE_INT apparent_frame_size;
458 /* Register we pretend the frame pointer is allocated to. Normally, this
459 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
460 record "offset" separately as it may be too big for (reg + disp). */
461 rtx frame_base_reg;
462 HOST_WIDE_INT frame_base_offset;
464 /* Some local-dynamic TLS symbol name. */
465 const char *some_ld_name;
467 /* Number of global or FP registers to be saved (as 4-byte quantities). */
468 int n_global_fp_regs;
470 /* True if the current function is leaf and uses only leaf regs,
471 so that the SPARC leaf function optimization can be applied.
472 Private version of crtl->uses_only_leaf_regs, see
473 sparc_expand_prologue for the rationale. */
474 int leaf_function_p;
476 /* True if the prologue saves local or in registers. */
477 bool save_local_in_regs_p;
479 /* True if the data calculated by sparc_expand_prologue are valid. */
480 bool prologue_data_valid_p;
483 #define sparc_frame_size cfun->machine->frame_size
484 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
485 #define sparc_frame_base_reg cfun->machine->frame_base_reg
486 #define sparc_frame_base_offset cfun->machine->frame_base_offset
487 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
488 #define sparc_leaf_function_p cfun->machine->leaf_function_p
489 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
490 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
492 /* 1 if the next opcode is to be specially indented. */
493 int sparc_indent_opcode = 0;
495 static void sparc_option_override (void);
496 static void sparc_init_modes (void);
497 static void scan_record_type (const_tree, int *, int *, int *);
498 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
499 const_tree, bool, bool, int *, int *);
501 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
502 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
504 static void sparc_emit_set_const32 (rtx, rtx);
505 static void sparc_emit_set_const64 (rtx, rtx);
506 static void sparc_output_addr_vec (rtx);
507 static void sparc_output_addr_diff_vec (rtx);
508 static void sparc_output_deferred_case_vectors (void);
509 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
510 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
511 static rtx sparc_builtin_saveregs (void);
512 static int epilogue_renumber (rtx *, int);
513 static bool sparc_assemble_integer (rtx, unsigned int, int);
514 static int set_extends (rtx);
515 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
516 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
517 #ifdef TARGET_SOLARIS
518 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
519 tree) ATTRIBUTE_UNUSED;
520 #endif
521 static int sparc_adjust_cost (rtx, rtx, rtx, int);
522 static int sparc_issue_rate (void);
523 static void sparc_sched_init (FILE *, int, int);
524 static int sparc_use_sched_lookahead (void);
526 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
527 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
528 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
529 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
530 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
532 static bool sparc_function_ok_for_sibcall (tree, tree);
533 static void sparc_init_libfuncs (void);
534 static void sparc_init_builtins (void);
535 static void sparc_vis_init_builtins (void);
536 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
537 static tree sparc_fold_builtin (tree, int, tree *, bool);
538 static int sparc_vis_mul8x16 (int, int);
539 static void sparc_handle_vis_mul8x16 (tree *, int, tree, tree, tree);
540 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
541 HOST_WIDE_INT, tree);
542 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
543 HOST_WIDE_INT, const_tree);
544 static void sparc_reorg (void);
545 static struct machine_function * sparc_init_machine_status (void);
546 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
547 static rtx sparc_tls_get_addr (void);
548 static rtx sparc_tls_got (void);
549 static const char *get_some_local_dynamic_name (void);
550 static int get_some_local_dynamic_name_1 (rtx *, void *);
551 static int sparc_register_move_cost (enum machine_mode,
552 reg_class_t, reg_class_t);
553 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
554 static rtx sparc_function_value (const_tree, const_tree, bool);
555 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
556 static bool sparc_function_value_regno_p (const unsigned int);
557 static rtx sparc_struct_value_rtx (tree, int);
558 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
559 int *, const_tree, int);
560 static bool sparc_return_in_memory (const_tree, const_tree);
561 static bool sparc_strict_argument_naming (cumulative_args_t);
562 static void sparc_va_start (tree, rtx);
563 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
564 static bool sparc_vector_mode_supported_p (enum machine_mode);
565 static bool sparc_tls_referenced_p (rtx);
566 static rtx sparc_legitimize_tls_address (rtx);
567 static rtx sparc_legitimize_pic_address (rtx, rtx);
568 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
569 static rtx sparc_delegitimize_address (rtx);
570 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
571 static bool sparc_pass_by_reference (cumulative_args_t,
572 enum machine_mode, const_tree, bool);
573 static void sparc_function_arg_advance (cumulative_args_t,
574 enum machine_mode, const_tree, bool);
575 static rtx sparc_function_arg_1 (cumulative_args_t,
576 enum machine_mode, const_tree, bool, bool);
577 static rtx sparc_function_arg (cumulative_args_t,
578 enum machine_mode, const_tree, bool);
579 static rtx sparc_function_incoming_arg (cumulative_args_t,
580 enum machine_mode, const_tree, bool);
581 static unsigned int sparc_function_arg_boundary (enum machine_mode,
582 const_tree);
583 static int sparc_arg_partial_bytes (cumulative_args_t,
584 enum machine_mode, tree, bool);
585 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
586 static void sparc_file_end (void);
587 static bool sparc_frame_pointer_required (void);
588 static bool sparc_can_eliminate (const int, const int);
589 static rtx sparc_builtin_setjmp_frame_value (void);
590 static void sparc_conditional_register_usage (void);
591 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
592 static const char *sparc_mangle_type (const_tree);
593 #endif
594 static void sparc_trampoline_init (rtx, tree, rtx);
595 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
596 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
597 static bool sparc_print_operand_punct_valid_p (unsigned char);
598 static void sparc_print_operand (FILE *, rtx, int);
599 static void sparc_print_operand_address (FILE *, rtx);
600 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
601 enum machine_mode,
602 secondary_reload_info *);
604 #ifdef SUBTARGET_ATTRIBUTE_TABLE
605 /* Table of valid machine attributes. */
606 static const struct attribute_spec sparc_attribute_table[] =
608 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
609 do_diagnostic } */
610 SUBTARGET_ATTRIBUTE_TABLE,
611 { NULL, 0, 0, false, false, false, NULL, false }
613 #endif
615 /* Option handling. */
617 /* Parsed value. */
618 enum cmodel sparc_cmodel;
620 char sparc_hard_reg_printed[8];
622 /* Initialize the GCC target structure. */
624 /* The default is to use .half rather than .short for aligned HI objects. */
625 #undef TARGET_ASM_ALIGNED_HI_OP
626 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
628 #undef TARGET_ASM_UNALIGNED_HI_OP
629 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
630 #undef TARGET_ASM_UNALIGNED_SI_OP
631 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
632 #undef TARGET_ASM_UNALIGNED_DI_OP
633 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
635 /* The target hook has to handle DI-mode values. */
636 #undef TARGET_ASM_INTEGER
637 #define TARGET_ASM_INTEGER sparc_assemble_integer
639 #undef TARGET_ASM_FUNCTION_PROLOGUE
640 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
641 #undef TARGET_ASM_FUNCTION_EPILOGUE
642 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
644 #undef TARGET_SCHED_ADJUST_COST
645 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
646 #undef TARGET_SCHED_ISSUE_RATE
647 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
648 #undef TARGET_SCHED_INIT
649 #define TARGET_SCHED_INIT sparc_sched_init
650 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
651 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
653 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
654 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
656 #undef TARGET_INIT_LIBFUNCS
657 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
658 #undef TARGET_INIT_BUILTINS
659 #define TARGET_INIT_BUILTINS sparc_init_builtins
661 #undef TARGET_LEGITIMIZE_ADDRESS
662 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
663 #undef TARGET_DELEGITIMIZE_ADDRESS
664 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
665 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
666 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
668 #undef TARGET_EXPAND_BUILTIN
669 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
670 #undef TARGET_FOLD_BUILTIN
671 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
673 #if TARGET_TLS
674 #undef TARGET_HAVE_TLS
675 #define TARGET_HAVE_TLS true
676 #endif
678 #undef TARGET_CANNOT_FORCE_CONST_MEM
679 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
681 #undef TARGET_ASM_OUTPUT_MI_THUNK
682 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
683 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
684 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
686 #undef TARGET_MACHINE_DEPENDENT_REORG
687 #define TARGET_MACHINE_DEPENDENT_REORG sparc_reorg
689 #undef TARGET_RTX_COSTS
690 #define TARGET_RTX_COSTS sparc_rtx_costs
691 #undef TARGET_ADDRESS_COST
692 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
693 #undef TARGET_REGISTER_MOVE_COST
694 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
696 #undef TARGET_PROMOTE_FUNCTION_MODE
697 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
699 #undef TARGET_FUNCTION_VALUE
700 #define TARGET_FUNCTION_VALUE sparc_function_value
701 #undef TARGET_LIBCALL_VALUE
702 #define TARGET_LIBCALL_VALUE sparc_libcall_value
703 #undef TARGET_FUNCTION_VALUE_REGNO_P
704 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
706 #undef TARGET_STRUCT_VALUE_RTX
707 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
708 #undef TARGET_RETURN_IN_MEMORY
709 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
710 #undef TARGET_MUST_PASS_IN_STACK
711 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
712 #undef TARGET_PASS_BY_REFERENCE
713 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
714 #undef TARGET_ARG_PARTIAL_BYTES
715 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
716 #undef TARGET_FUNCTION_ARG_ADVANCE
717 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
718 #undef TARGET_FUNCTION_ARG
719 #define TARGET_FUNCTION_ARG sparc_function_arg
720 #undef TARGET_FUNCTION_INCOMING_ARG
721 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
722 #undef TARGET_FUNCTION_ARG_BOUNDARY
723 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
725 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
726 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
727 #undef TARGET_STRICT_ARGUMENT_NAMING
728 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
730 #undef TARGET_EXPAND_BUILTIN_VA_START
731 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
732 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
733 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
735 #undef TARGET_VECTOR_MODE_SUPPORTED_P
736 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
738 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
739 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
741 #ifdef SUBTARGET_INSERT_ATTRIBUTES
742 #undef TARGET_INSERT_ATTRIBUTES
743 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
744 #endif
746 #ifdef SUBTARGET_ATTRIBUTE_TABLE
747 #undef TARGET_ATTRIBUTE_TABLE
748 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
749 #endif
751 #undef TARGET_RELAXED_ORDERING
752 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
754 #undef TARGET_OPTION_OVERRIDE
755 #define TARGET_OPTION_OVERRIDE sparc_option_override
757 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
758 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
759 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
760 #endif
762 #undef TARGET_ASM_FILE_END
763 #define TARGET_ASM_FILE_END sparc_file_end
765 #undef TARGET_FRAME_POINTER_REQUIRED
766 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
768 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
769 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
771 #undef TARGET_CAN_ELIMINATE
772 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
774 #undef TARGET_PREFERRED_RELOAD_CLASS
775 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
777 #undef TARGET_SECONDARY_RELOAD
778 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
780 #undef TARGET_CONDITIONAL_REGISTER_USAGE
781 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
783 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
784 #undef TARGET_MANGLE_TYPE
785 #define TARGET_MANGLE_TYPE sparc_mangle_type
786 #endif
788 #undef TARGET_LEGITIMATE_ADDRESS_P
789 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
791 #undef TARGET_LEGITIMATE_CONSTANT_P
792 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
794 #undef TARGET_TRAMPOLINE_INIT
795 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
797 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
798 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
799 #undef TARGET_PRINT_OPERAND
800 #define TARGET_PRINT_OPERAND sparc_print_operand
801 #undef TARGET_PRINT_OPERAND_ADDRESS
802 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
804 /* The value stored by LDSTUB. */
805 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
806 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
808 struct gcc_target targetm = TARGET_INITIALIZER;
810 static void
811 dump_target_flag_bits (const int flags)
813 if (flags & MASK_64BIT)
814 fprintf (stderr, "64BIT ");
815 if (flags & MASK_APP_REGS)
816 fprintf (stderr, "APP_REGS ");
817 if (flags & MASK_FASTER_STRUCTS)
818 fprintf (stderr, "FASTER_STRUCTS ");
819 if (flags & MASK_FLAT)
820 fprintf (stderr, "FLAT ");
821 if (flags & MASK_FMAF)
822 fprintf (stderr, "FMAF ");
823 if (flags & MASK_FPU)
824 fprintf (stderr, "FPU ");
825 if (flags & MASK_HARD_QUAD)
826 fprintf (stderr, "HARD_QUAD ");
827 if (flags & MASK_POPC)
828 fprintf (stderr, "POPC ");
829 if (flags & MASK_PTR64)
830 fprintf (stderr, "PTR64 ");
831 if (flags & MASK_STACK_BIAS)
832 fprintf (stderr, "STACK_BIAS ");
833 if (flags & MASK_UNALIGNED_DOUBLES)
834 fprintf (stderr, "UNALIGNED_DOUBLES ");
835 if (flags & MASK_V8PLUS)
836 fprintf (stderr, "V8PLUS ");
837 if (flags & MASK_VIS)
838 fprintf (stderr, "VIS ");
839 if (flags & MASK_VIS2)
840 fprintf (stderr, "VIS2 ");
841 if (flags & MASK_VIS3)
842 fprintf (stderr, "VIS3 ");
843 if (flags & MASK_DEPRECATED_V8_INSNS)
844 fprintf (stderr, "DEPRECATED_V8_INSNS ");
845 if (flags & MASK_SPARCLET)
846 fprintf (stderr, "SPARCLET ");
847 if (flags & MASK_SPARCLITE)
848 fprintf (stderr, "SPARCLITE ");
849 if (flags & MASK_V8)
850 fprintf (stderr, "V8 ");
851 if (flags & MASK_V9)
852 fprintf (stderr, "V9 ");
855 static void
856 dump_target_flags (const char *prefix, const int flags)
858 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
859 dump_target_flag_bits (flags);
860 fprintf(stderr, "]\n");
863 /* Validate and override various options, and do some machine dependent
864 initialization. */
866 static void
867 sparc_option_override (void)
869 static struct code_model {
870 const char *const name;
871 const enum cmodel value;
872 } const cmodels[] = {
873 { "32", CM_32 },
874 { "medlow", CM_MEDLOW },
875 { "medmid", CM_MEDMID },
876 { "medany", CM_MEDANY },
877 { "embmedany", CM_EMBMEDANY },
878 { NULL, (enum cmodel) 0 }
880 const struct code_model *cmodel;
881 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
882 static struct cpu_default {
883 const int cpu;
884 const enum processor_type processor;
885 } const cpu_default[] = {
886 /* There must be one entry here for each TARGET_CPU value. */
887 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
888 { TARGET_CPU_v8, PROCESSOR_V8 },
889 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
890 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
891 { TARGET_CPU_leon, PROCESSOR_LEON },
892 { TARGET_CPU_sparclite, PROCESSOR_F930 },
893 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
894 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
895 { TARGET_CPU_v9, PROCESSOR_V9 },
896 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
897 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
898 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
899 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
900 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
901 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
902 { -1, PROCESSOR_V7 }
904 const struct cpu_default *def;
905 /* Table of values for -m{cpu,tune}=. This must match the order of
906 the PROCESSOR_* enumeration. */
907 static struct cpu_table {
908 const char *const name;
909 const int disable;
910 const int enable;
911 } const cpu_table[] = {
912 { "v7", MASK_ISA, 0 },
913 { "cypress", MASK_ISA, 0 },
914 { "v8", MASK_ISA, MASK_V8 },
915 /* TI TMS390Z55 supersparc */
916 { "supersparc", MASK_ISA, MASK_V8 },
917 { "hypersparc", MASK_ISA, MASK_V8|MASK_FPU },
918 /* LEON */
919 { "leon", MASK_ISA, MASK_V8|MASK_FPU },
920 { "sparclite", MASK_ISA, MASK_SPARCLITE },
921 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
922 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
923 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
924 { "f934", MASK_ISA, MASK_SPARCLITE|MASK_FPU },
925 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
926 { "sparclet", MASK_ISA, MASK_SPARCLET },
927 /* TEMIC sparclet */
928 { "tsc701", MASK_ISA, MASK_SPARCLET },
929 { "v9", MASK_ISA, MASK_V9 },
930 /* UltraSPARC I, II, IIi */
931 { "ultrasparc", MASK_ISA,
932 /* Although insns using %y are deprecated, it is a clear win. */
933 MASK_V9|MASK_DEPRECATED_V8_INSNS },
934 /* UltraSPARC III */
935 /* ??? Check if %y issue still holds true. */
936 { "ultrasparc3", MASK_ISA,
937 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
938 /* UltraSPARC T1 */
939 { "niagara", MASK_ISA,
940 MASK_V9|MASK_DEPRECATED_V8_INSNS },
941 /* UltraSPARC T2 */
942 { "niagara2", MASK_ISA,
943 MASK_V9|MASK_POPC|MASK_VIS2 },
944 /* UltraSPARC T3 */
945 { "niagara3", MASK_ISA,
946 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
947 /* UltraSPARC T4 */
948 { "niagara4", MASK_ISA,
949 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
951 const struct cpu_table *cpu;
952 unsigned int i;
953 int fpu;
955 if (sparc_debug_string != NULL)
957 const char *q;
958 char *p;
960 p = ASTRDUP (sparc_debug_string);
961 while ((q = strtok (p, ",")) != NULL)
963 bool invert;
964 int mask;
966 p = NULL;
967 if (*q == '!')
969 invert = true;
970 q++;
972 else
973 invert = false;
975 if (! strcmp (q, "all"))
976 mask = MASK_DEBUG_ALL;
977 else if (! strcmp (q, "options"))
978 mask = MASK_DEBUG_OPTIONS;
979 else
980 error ("unknown -mdebug-%s switch", q);
982 if (invert)
983 sparc_debug &= ~mask;
984 else
985 sparc_debug |= mask;
989 if (TARGET_DEBUG_OPTIONS)
991 dump_target_flags("Initial target_flags", target_flags);
992 dump_target_flags("target_flags_explicit", target_flags_explicit);
995 #ifdef SUBTARGET_OVERRIDE_OPTIONS
996 SUBTARGET_OVERRIDE_OPTIONS;
997 #endif
999 #ifndef SPARC_BI_ARCH
1000 /* Check for unsupported architecture size. */
1001 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
1002 error ("%s is not supported by this configuration",
1003 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1004 #endif
1006 /* We force all 64bit archs to use 128 bit long double */
1007 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
1009 error ("-mlong-double-64 not allowed with -m64");
1010 target_flags |= MASK_LONG_DOUBLE_128;
1013 /* Code model selection. */
1014 sparc_cmodel = SPARC_DEFAULT_CMODEL;
1016 #ifdef SPARC_BI_ARCH
1017 if (TARGET_ARCH32)
1018 sparc_cmodel = CM_32;
1019 #endif
1021 if (sparc_cmodel_string != NULL)
1023 if (TARGET_ARCH64)
1025 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1026 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1027 break;
1028 if (cmodel->name == NULL)
1029 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1030 else
1031 sparc_cmodel = cmodel->value;
1033 else
1034 error ("-mcmodel= is not supported on 32 bit systems");
1037 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1038 for (i = 8; i < 16; i++)
1039 if (!call_used_regs [i])
1041 error ("-fcall-saved-REG is not supported for out registers");
1042 call_used_regs [i] = 1;
1045 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
1047 /* Set the default CPU. */
1048 if (!global_options_set.x_sparc_cpu_and_features)
1050 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1051 if (def->cpu == TARGET_CPU_DEFAULT)
1052 break;
1053 gcc_assert (def->cpu != -1);
1054 sparc_cpu_and_features = def->processor;
1057 if (!global_options_set.x_sparc_cpu)
1058 sparc_cpu = sparc_cpu_and_features;
1060 cpu = &cpu_table[(int) sparc_cpu_and_features];
1062 if (TARGET_DEBUG_OPTIONS)
1064 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1065 fprintf (stderr, "sparc_cpu: %s\n",
1066 cpu_table[(int) sparc_cpu].name);
1067 dump_target_flags ("cpu->disable", cpu->disable);
1068 dump_target_flags ("cpu->enable", cpu->enable);
1071 target_flags &= ~cpu->disable;
1072 target_flags |= (cpu->enable
1073 #ifndef HAVE_AS_FMAF_HPC_VIS3
1074 & ~(MASK_FMAF | MASK_VIS3)
1075 #endif
1078 /* If -mfpu or -mno-fpu was explicitly used, don't override with
1079 the processor default. */
1080 if (target_flags_explicit & MASK_FPU)
1081 target_flags = (target_flags & ~MASK_FPU) | fpu;
1083 /* -mvis2 implies -mvis */
1084 if (TARGET_VIS2)
1085 target_flags |= MASK_VIS;
1087 /* -mvis3 implies -mvis2 and -mvis */
1088 if (TARGET_VIS3)
1089 target_flags |= MASK_VIS2 | MASK_VIS;
1091 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is disabled. */
1092 if (! TARGET_FPU)
1093 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_FMAF);
1095 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1096 are available.
1097 -m64 also implies v9. */
1098 if (TARGET_VIS || TARGET_ARCH64)
1100 target_flags |= MASK_V9;
1101 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1104 /* -mvis also implies -mv8plus on 32-bit */
1105 if (TARGET_VIS && ! TARGET_ARCH64)
1106 target_flags |= MASK_V8PLUS;
1108 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1109 if (TARGET_V9 && TARGET_ARCH32)
1110 target_flags |= MASK_DEPRECATED_V8_INSNS;
1112 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1113 if (! TARGET_V9 || TARGET_ARCH64)
1114 target_flags &= ~MASK_V8PLUS;
1116 /* Don't use stack biasing in 32 bit mode. */
1117 if (TARGET_ARCH32)
1118 target_flags &= ~MASK_STACK_BIAS;
1120 /* Supply a default value for align_functions. */
1121 if (align_functions == 0
1122 && (sparc_cpu == PROCESSOR_ULTRASPARC
1123 || sparc_cpu == PROCESSOR_ULTRASPARC3
1124 || sparc_cpu == PROCESSOR_NIAGARA
1125 || sparc_cpu == PROCESSOR_NIAGARA2
1126 || sparc_cpu == PROCESSOR_NIAGARA3
1127 || sparc_cpu == PROCESSOR_NIAGARA4))
1128 align_functions = 32;
1130 /* Validate PCC_STRUCT_RETURN. */
1131 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1132 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1134 /* Only use .uaxword when compiling for a 64-bit target. */
1135 if (!TARGET_ARCH64)
1136 targetm.asm_out.unaligned_op.di = NULL;
1138 /* Do various machine dependent initializations. */
1139 sparc_init_modes ();
1141 /* Set up function hooks. */
1142 init_machine_status = sparc_init_machine_status;
1144 switch (sparc_cpu)
1146 case PROCESSOR_V7:
1147 case PROCESSOR_CYPRESS:
1148 sparc_costs = &cypress_costs;
1149 break;
1150 case PROCESSOR_V8:
1151 case PROCESSOR_SPARCLITE:
1152 case PROCESSOR_SUPERSPARC:
1153 sparc_costs = &supersparc_costs;
1154 break;
1155 case PROCESSOR_F930:
1156 case PROCESSOR_F934:
1157 case PROCESSOR_HYPERSPARC:
1158 case PROCESSOR_SPARCLITE86X:
1159 sparc_costs = &hypersparc_costs;
1160 break;
1161 case PROCESSOR_LEON:
1162 sparc_costs = &leon_costs;
1163 break;
1164 case PROCESSOR_SPARCLET:
1165 case PROCESSOR_TSC701:
1166 sparc_costs = &sparclet_costs;
1167 break;
1168 case PROCESSOR_V9:
1169 case PROCESSOR_ULTRASPARC:
1170 sparc_costs = &ultrasparc_costs;
1171 break;
1172 case PROCESSOR_ULTRASPARC3:
1173 sparc_costs = &ultrasparc3_costs;
1174 break;
1175 case PROCESSOR_NIAGARA:
1176 sparc_costs = &niagara_costs;
1177 break;
1178 case PROCESSOR_NIAGARA2:
1179 sparc_costs = &niagara2_costs;
1180 break;
1181 case PROCESSOR_NIAGARA3:
1182 sparc_costs = &niagara3_costs;
1183 break;
1184 case PROCESSOR_NIAGARA4:
1185 sparc_costs = &niagara4_costs;
1186 break;
1187 case PROCESSOR_NATIVE:
1188 gcc_unreachable ();
1191 if (sparc_memory_model == SMM_DEFAULT)
1193 /* Choose the memory model for the operating system. */
1194 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1195 if (os_default != SMM_DEFAULT)
1196 sparc_memory_model = os_default;
1197 /* Choose the most relaxed model for the processor. */
1198 else if (TARGET_V9)
1199 sparc_memory_model = SMM_RMO;
1200 else if (TARGET_V8)
1201 sparc_memory_model = SMM_PSO;
1202 else
1203 sparc_memory_model = SMM_SC;
1206 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1207 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1208 target_flags |= MASK_LONG_DOUBLE_128;
1209 #endif
1211 if (TARGET_DEBUG_OPTIONS)
1212 dump_target_flags ("Final target_flags", target_flags);
1214 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
1215 ((sparc_cpu == PROCESSOR_ULTRASPARC
1216 || sparc_cpu == PROCESSOR_NIAGARA
1217 || sparc_cpu == PROCESSOR_NIAGARA2
1218 || sparc_cpu == PROCESSOR_NIAGARA3
1219 || sparc_cpu == PROCESSOR_NIAGARA4)
1221 : (sparc_cpu == PROCESSOR_ULTRASPARC3
1222 ? 8 : 3)),
1223 global_options.x_param_values,
1224 global_options_set.x_param_values);
1225 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
1226 ((sparc_cpu == PROCESSOR_ULTRASPARC
1227 || sparc_cpu == PROCESSOR_ULTRASPARC3
1228 || sparc_cpu == PROCESSOR_NIAGARA
1229 || sparc_cpu == PROCESSOR_NIAGARA2
1230 || sparc_cpu == PROCESSOR_NIAGARA3
1231 || sparc_cpu == PROCESSOR_NIAGARA4)
1232 ? 64 : 32),
1233 global_options.x_param_values,
1234 global_options_set.x_param_values);
1236 /* Disable save slot sharing for call-clobbered registers by default.
1237 The IRA sharing algorithm works on single registers only and this
1238 pessimizes for double floating-point registers. */
1239 if (!global_options_set.x_flag_ira_share_save_slots)
1240 flag_ira_share_save_slots = 0;
1243 /* Miscellaneous utilities. */
1245 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1246 or branch on register contents instructions. */
1249 v9_regcmp_p (enum rtx_code code)
1251 return (code == EQ || code == NE || code == GE || code == LT
1252 || code == LE || code == GT);
1255 /* Nonzero if OP is a floating point constant which can
1256 be loaded into an integer register using a single
1257 sethi instruction. */
1260 fp_sethi_p (rtx op)
1262 if (GET_CODE (op) == CONST_DOUBLE)
1264 REAL_VALUE_TYPE r;
1265 long i;
1267 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1268 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1269 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1272 return 0;
1275 /* Nonzero if OP is a floating point constant which can
1276 be loaded into an integer register using a single
1277 mov instruction. */
1280 fp_mov_p (rtx op)
1282 if (GET_CODE (op) == CONST_DOUBLE)
1284 REAL_VALUE_TYPE r;
1285 long i;
1287 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1288 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1289 return SPARC_SIMM13_P (i);
1292 return 0;
1295 /* Nonzero if OP is a floating point constant which can
1296 be loaded into an integer register using a high/losum
1297 instruction sequence. */
1300 fp_high_losum_p (rtx op)
1302 /* The constraints calling this should only be in
1303 SFmode move insns, so any constant which cannot
1304 be moved using a single insn will do. */
1305 if (GET_CODE (op) == CONST_DOUBLE)
1307 REAL_VALUE_TYPE r;
1308 long i;
1310 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1311 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1312 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1315 return 0;
1318 /* Return true if the address of LABEL can be loaded by means of the
1319 mov{si,di}_pic_label_ref patterns in PIC mode. */
1321 static bool
1322 can_use_mov_pic_label_ref (rtx label)
1324 /* VxWorks does not impose a fixed gap between segments; the run-time
1325 gap can be different from the object-file gap. We therefore can't
1326 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1327 are absolutely sure that X is in the same segment as the GOT.
1328 Unfortunately, the flexibility of linker scripts means that we
1329 can't be sure of that in general, so assume that GOT-relative
1330 accesses are never valid on VxWorks. */
1331 if (TARGET_VXWORKS_RTP)
1332 return false;
1334 /* Similarly, if the label is non-local, it might end up being placed
1335 in a different section than the current one; now mov_pic_label_ref
1336 requires the label and the code to be in the same section. */
1337 if (LABEL_REF_NONLOCAL_P (label))
1338 return false;
1340 /* Finally, if we are reordering basic blocks and partition into hot
1341 and cold sections, this might happen for any label. */
1342 if (flag_reorder_blocks_and_partition)
1343 return false;
1345 return true;
1348 /* Expand a move instruction. Return true if all work is done. */
1350 bool
1351 sparc_expand_move (enum machine_mode mode, rtx *operands)
1353 /* Handle sets of MEM first. */
1354 if (GET_CODE (operands[0]) == MEM)
1356 /* 0 is a register (or a pair of registers) on SPARC. */
1357 if (register_or_zero_operand (operands[1], mode))
1358 return false;
1360 if (!reload_in_progress)
1362 operands[0] = validize_mem (operands[0]);
1363 operands[1] = force_reg (mode, operands[1]);
1367 /* Fixup TLS cases. */
1368 if (TARGET_HAVE_TLS
1369 && CONSTANT_P (operands[1])
1370 && sparc_tls_referenced_p (operands [1]))
1372 operands[1] = sparc_legitimize_tls_address (operands[1]);
1373 return false;
1376 /* Fixup PIC cases. */
1377 if (flag_pic && CONSTANT_P (operands[1]))
1379 if (pic_address_needs_scratch (operands[1]))
1380 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1382 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1383 if (GET_CODE (operands[1]) == LABEL_REF
1384 && can_use_mov_pic_label_ref (operands[1]))
1386 if (mode == SImode)
1388 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1389 return true;
1392 if (mode == DImode)
1394 gcc_assert (TARGET_ARCH64);
1395 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1396 return true;
1400 if (symbolic_operand (operands[1], mode))
1402 operands[1]
1403 = sparc_legitimize_pic_address (operands[1],
1404 reload_in_progress
1405 ? operands[0] : NULL_RTX);
1406 return false;
1410 /* If we are trying to toss an integer constant into FP registers,
1411 or loading a FP or vector constant, force it into memory. */
1412 if (CONSTANT_P (operands[1])
1413 && REG_P (operands[0])
1414 && (SPARC_FP_REG_P (REGNO (operands[0]))
1415 || SCALAR_FLOAT_MODE_P (mode)
1416 || VECTOR_MODE_P (mode)))
1418 /* emit_group_store will send such bogosity to us when it is
1419 not storing directly into memory. So fix this up to avoid
1420 crashes in output_constant_pool. */
1421 if (operands [1] == const0_rtx)
1422 operands[1] = CONST0_RTX (mode);
1424 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1425 always other regs. */
1426 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1427 && (const_zero_operand (operands[1], mode)
1428 || const_all_ones_operand (operands[1], mode)))
1429 return false;
1431 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1432 /* We are able to build any SF constant in integer registers
1433 with at most 2 instructions. */
1434 && (mode == SFmode
1435 /* And any DF constant in integer registers. */
1436 || (mode == DFmode
1437 && ! can_create_pseudo_p ())))
1438 return false;
1440 operands[1] = force_const_mem (mode, operands[1]);
1441 if (!reload_in_progress)
1442 operands[1] = validize_mem (operands[1]);
1443 return false;
1446 /* Accept non-constants and valid constants unmodified. */
1447 if (!CONSTANT_P (operands[1])
1448 || GET_CODE (operands[1]) == HIGH
1449 || input_operand (operands[1], mode))
1450 return false;
1452 switch (mode)
1454 case QImode:
1455 /* All QImode constants require only one insn, so proceed. */
1456 break;
1458 case HImode:
1459 case SImode:
1460 sparc_emit_set_const32 (operands[0], operands[1]);
1461 return true;
1463 case DImode:
1464 /* input_operand should have filtered out 32-bit mode. */
1465 sparc_emit_set_const64 (operands[0], operands[1]);
1466 return true;
1468 case TImode:
1470 rtx high, low;
1471 /* TImode isn't available in 32-bit mode. */
1472 split_double (operands[1], &high, &low);
1473 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
1474 high));
1475 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
1476 low));
1478 return true;
1480 default:
1481 gcc_unreachable ();
1484 return false;
1487 /* Load OP1, a 32-bit constant, into OP0, a register.
1488 We know it can't be done in one insn when we get
1489 here, the move expander guarantees this. */
1491 static void
1492 sparc_emit_set_const32 (rtx op0, rtx op1)
1494 enum machine_mode mode = GET_MODE (op0);
1495 rtx temp = op0;
1497 if (can_create_pseudo_p ())
1498 temp = gen_reg_rtx (mode);
1500 if (GET_CODE (op1) == CONST_INT)
1502 gcc_assert (!small_int_operand (op1, mode)
1503 && !const_high_operand (op1, mode));
1505 /* Emit them as real moves instead of a HIGH/LO_SUM,
1506 this way CSE can see everything and reuse intermediate
1507 values if it wants. */
1508 emit_insn (gen_rtx_SET (VOIDmode, temp,
1509 GEN_INT (INTVAL (op1)
1510 & ~(HOST_WIDE_INT)0x3ff)));
1512 emit_insn (gen_rtx_SET (VOIDmode,
1513 op0,
1514 gen_rtx_IOR (mode, temp,
1515 GEN_INT (INTVAL (op1) & 0x3ff))));
1517 else
1519 /* A symbol, emit in the traditional way. */
1520 emit_insn (gen_rtx_SET (VOIDmode, temp,
1521 gen_rtx_HIGH (mode, op1)));
1522 emit_insn (gen_rtx_SET (VOIDmode,
1523 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1527 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1528 If TEMP is nonzero, we are forbidden to use any other scratch
1529 registers. Otherwise, we are allowed to generate them as needed.
1531 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1532 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1534 void
1535 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1537 rtx temp1, temp2, temp3, temp4, temp5;
1538 rtx ti_temp = 0;
1540 if (temp && GET_MODE (temp) == TImode)
1542 ti_temp = temp;
1543 temp = gen_rtx_REG (DImode, REGNO (temp));
1546 /* SPARC-V9 code-model support. */
1547 switch (sparc_cmodel)
1549 case CM_MEDLOW:
1550 /* The range spanned by all instructions in the object is less
1551 than 2^31 bytes (2GB) and the distance from any instruction
1552 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1553 than 2^31 bytes (2GB).
1555 The executable must be in the low 4TB of the virtual address
1556 space.
1558 sethi %hi(symbol), %temp1
1559 or %temp1, %lo(symbol), %reg */
1560 if (temp)
1561 temp1 = temp; /* op0 is allowed. */
1562 else
1563 temp1 = gen_reg_rtx (DImode);
1565 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1566 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1567 break;
1569 case CM_MEDMID:
1570 /* The range spanned by all instructions in the object is less
1571 than 2^31 bytes (2GB) and the distance from any instruction
1572 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1573 than 2^31 bytes (2GB).
1575 The executable must be in the low 16TB of the virtual address
1576 space.
1578 sethi %h44(symbol), %temp1
1579 or %temp1, %m44(symbol), %temp2
1580 sllx %temp2, 12, %temp3
1581 or %temp3, %l44(symbol), %reg */
1582 if (temp)
1584 temp1 = op0;
1585 temp2 = op0;
1586 temp3 = temp; /* op0 is allowed. */
1588 else
1590 temp1 = gen_reg_rtx (DImode);
1591 temp2 = gen_reg_rtx (DImode);
1592 temp3 = gen_reg_rtx (DImode);
1595 emit_insn (gen_seth44 (temp1, op1));
1596 emit_insn (gen_setm44 (temp2, temp1, op1));
1597 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1598 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1599 emit_insn (gen_setl44 (op0, temp3, op1));
1600 break;
1602 case CM_MEDANY:
1603 /* The range spanned by all instructions in the object is less
1604 than 2^31 bytes (2GB) and the distance from any instruction
1605 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1606 than 2^31 bytes (2GB).
1608 The executable can be placed anywhere in the virtual address
1609 space.
1611 sethi %hh(symbol), %temp1
1612 sethi %lm(symbol), %temp2
1613 or %temp1, %hm(symbol), %temp3
1614 sllx %temp3, 32, %temp4
1615 or %temp4, %temp2, %temp5
1616 or %temp5, %lo(symbol), %reg */
1617 if (temp)
1619 /* It is possible that one of the registers we got for operands[2]
1620 might coincide with that of operands[0] (which is why we made
1621 it TImode). Pick the other one to use as our scratch. */
1622 if (rtx_equal_p (temp, op0))
1624 gcc_assert (ti_temp);
1625 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1627 temp1 = op0;
1628 temp2 = temp; /* op0 is _not_ allowed, see above. */
1629 temp3 = op0;
1630 temp4 = op0;
1631 temp5 = op0;
1633 else
1635 temp1 = gen_reg_rtx (DImode);
1636 temp2 = gen_reg_rtx (DImode);
1637 temp3 = gen_reg_rtx (DImode);
1638 temp4 = gen_reg_rtx (DImode);
1639 temp5 = gen_reg_rtx (DImode);
1642 emit_insn (gen_sethh (temp1, op1));
1643 emit_insn (gen_setlm (temp2, op1));
1644 emit_insn (gen_sethm (temp3, temp1, op1));
1645 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1646 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1647 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1648 gen_rtx_PLUS (DImode, temp4, temp2)));
1649 emit_insn (gen_setlo (op0, temp5, op1));
1650 break;
1652 case CM_EMBMEDANY:
1653 /* Old old old backwards compatibility kruft here.
1654 Essentially it is MEDLOW with a fixed 64-bit
1655 virtual base added to all data segment addresses.
1656 Text-segment stuff is computed like MEDANY, we can't
1657 reuse the code above because the relocation knobs
1658 look different.
1660 Data segment: sethi %hi(symbol), %temp1
1661 add %temp1, EMBMEDANY_BASE_REG, %temp2
1662 or %temp2, %lo(symbol), %reg */
1663 if (data_segment_operand (op1, GET_MODE (op1)))
1665 if (temp)
1667 temp1 = temp; /* op0 is allowed. */
1668 temp2 = op0;
1670 else
1672 temp1 = gen_reg_rtx (DImode);
1673 temp2 = gen_reg_rtx (DImode);
1676 emit_insn (gen_embmedany_sethi (temp1, op1));
1677 emit_insn (gen_embmedany_brsum (temp2, temp1));
1678 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1681 /* Text segment: sethi %uhi(symbol), %temp1
1682 sethi %hi(symbol), %temp2
1683 or %temp1, %ulo(symbol), %temp3
1684 sllx %temp3, 32, %temp4
1685 or %temp4, %temp2, %temp5
1686 or %temp5, %lo(symbol), %reg */
1687 else
1689 if (temp)
1691 /* It is possible that one of the registers we got for operands[2]
1692 might coincide with that of operands[0] (which is why we made
1693 it TImode). Pick the other one to use as our scratch. */
1694 if (rtx_equal_p (temp, op0))
1696 gcc_assert (ti_temp);
1697 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1699 temp1 = op0;
1700 temp2 = temp; /* op0 is _not_ allowed, see above. */
1701 temp3 = op0;
1702 temp4 = op0;
1703 temp5 = op0;
1705 else
1707 temp1 = gen_reg_rtx (DImode);
1708 temp2 = gen_reg_rtx (DImode);
1709 temp3 = gen_reg_rtx (DImode);
1710 temp4 = gen_reg_rtx (DImode);
1711 temp5 = gen_reg_rtx (DImode);
1714 emit_insn (gen_embmedany_textuhi (temp1, op1));
1715 emit_insn (gen_embmedany_texthi (temp2, op1));
1716 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1717 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1718 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1719 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1720 gen_rtx_PLUS (DImode, temp4, temp2)));
1721 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1723 break;
1725 default:
1726 gcc_unreachable ();
1730 #if HOST_BITS_PER_WIDE_INT == 32
1731 static void
1732 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1734 gcc_unreachable ();
1736 #else
1737 /* These avoid problems when cross compiling. If we do not
1738 go through all this hair then the optimizer will see
1739 invalid REG_EQUAL notes or in some cases none at all. */
1740 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1741 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1742 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1743 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1745 /* The optimizer is not to assume anything about exactly
1746 which bits are set for a HIGH, they are unspecified.
1747 Unfortunately this leads to many missed optimizations
1748 during CSE. We mask out the non-HIGH bits, and matches
1749 a plain movdi, to alleviate this problem. */
1750 static rtx
1751 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1753 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1756 static rtx
1757 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1759 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1762 static rtx
1763 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1765 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1768 static rtx
1769 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1771 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1774 /* Worker routines for 64-bit constant formation on arch64.
1775 One of the key things to be doing in these emissions is
1776 to create as many temp REGs as possible. This makes it
1777 possible for half-built constants to be used later when
1778 such values are similar to something required later on.
1779 Without doing this, the optimizer cannot see such
1780 opportunities. */
1782 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1783 unsigned HOST_WIDE_INT, int);
1785 static void
1786 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1787 unsigned HOST_WIDE_INT low_bits, int is_neg)
1789 unsigned HOST_WIDE_INT high_bits;
1791 if (is_neg)
1792 high_bits = (~low_bits) & 0xffffffff;
1793 else
1794 high_bits = low_bits;
1796 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1797 if (!is_neg)
1799 emit_insn (gen_rtx_SET (VOIDmode, op0,
1800 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1802 else
1804 /* If we are XOR'ing with -1, then we should emit a one's complement
1805 instead. This way the combiner will notice logical operations
1806 such as ANDN later on and substitute. */
1807 if ((low_bits & 0x3ff) == 0x3ff)
1809 emit_insn (gen_rtx_SET (VOIDmode, op0,
1810 gen_rtx_NOT (DImode, temp)));
1812 else
1814 emit_insn (gen_rtx_SET (VOIDmode, op0,
1815 gen_safe_XOR64 (temp,
1816 (-(HOST_WIDE_INT)0x400
1817 | (low_bits & 0x3ff)))));
1822 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1823 unsigned HOST_WIDE_INT, int);
1825 static void
1826 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1827 unsigned HOST_WIDE_INT high_bits,
1828 unsigned HOST_WIDE_INT low_immediate,
1829 int shift_count)
1831 rtx temp2 = op0;
1833 if ((high_bits & 0xfffffc00) != 0)
1835 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1836 if ((high_bits & ~0xfffffc00) != 0)
1837 emit_insn (gen_rtx_SET (VOIDmode, op0,
1838 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1839 else
1840 temp2 = temp;
1842 else
1844 emit_insn (gen_safe_SET64 (temp, high_bits));
1845 temp2 = temp;
1848 /* Now shift it up into place. */
1849 emit_insn (gen_rtx_SET (VOIDmode, op0,
1850 gen_rtx_ASHIFT (DImode, temp2,
1851 GEN_INT (shift_count))));
1853 /* If there is a low immediate part piece, finish up by
1854 putting that in as well. */
1855 if (low_immediate != 0)
1856 emit_insn (gen_rtx_SET (VOIDmode, op0,
1857 gen_safe_OR64 (op0, low_immediate)));
1860 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1861 unsigned HOST_WIDE_INT);
1863 /* Full 64-bit constant decomposition. Even though this is the
1864 'worst' case, we still optimize a few things away. */
1865 static void
1866 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1867 unsigned HOST_WIDE_INT high_bits,
1868 unsigned HOST_WIDE_INT low_bits)
1870 rtx sub_temp = op0;
1872 if (can_create_pseudo_p ())
1873 sub_temp = gen_reg_rtx (DImode);
1875 if ((high_bits & 0xfffffc00) != 0)
1877 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1878 if ((high_bits & ~0xfffffc00) != 0)
1879 emit_insn (gen_rtx_SET (VOIDmode,
1880 sub_temp,
1881 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1882 else
1883 sub_temp = temp;
1885 else
1887 emit_insn (gen_safe_SET64 (temp, high_bits));
1888 sub_temp = temp;
1891 if (can_create_pseudo_p ())
1893 rtx temp2 = gen_reg_rtx (DImode);
1894 rtx temp3 = gen_reg_rtx (DImode);
1895 rtx temp4 = gen_reg_rtx (DImode);
1897 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1898 gen_rtx_ASHIFT (DImode, sub_temp,
1899 GEN_INT (32))));
1901 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1902 if ((low_bits & ~0xfffffc00) != 0)
1904 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1905 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1906 emit_insn (gen_rtx_SET (VOIDmode, op0,
1907 gen_rtx_PLUS (DImode, temp4, temp3)));
1909 else
1911 emit_insn (gen_rtx_SET (VOIDmode, op0,
1912 gen_rtx_PLUS (DImode, temp4, temp2)));
1915 else
1917 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1918 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1919 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1920 int to_shift = 12;
1922 /* We are in the middle of reload, so this is really
1923 painful. However we do still make an attempt to
1924 avoid emitting truly stupid code. */
1925 if (low1 != const0_rtx)
1927 emit_insn (gen_rtx_SET (VOIDmode, op0,
1928 gen_rtx_ASHIFT (DImode, sub_temp,
1929 GEN_INT (to_shift))));
1930 emit_insn (gen_rtx_SET (VOIDmode, op0,
1931 gen_rtx_IOR (DImode, op0, low1)));
1932 sub_temp = op0;
1933 to_shift = 12;
1935 else
1937 to_shift += 12;
1939 if (low2 != const0_rtx)
1941 emit_insn (gen_rtx_SET (VOIDmode, op0,
1942 gen_rtx_ASHIFT (DImode, sub_temp,
1943 GEN_INT (to_shift))));
1944 emit_insn (gen_rtx_SET (VOIDmode, op0,
1945 gen_rtx_IOR (DImode, op0, low2)));
1946 sub_temp = op0;
1947 to_shift = 8;
1949 else
1951 to_shift += 8;
1953 emit_insn (gen_rtx_SET (VOIDmode, op0,
1954 gen_rtx_ASHIFT (DImode, sub_temp,
1955 GEN_INT (to_shift))));
1956 if (low3 != const0_rtx)
1957 emit_insn (gen_rtx_SET (VOIDmode, op0,
1958 gen_rtx_IOR (DImode, op0, low3)));
1959 /* phew... */
1963 /* Analyze a 64-bit constant for certain properties. */
1964 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1965 unsigned HOST_WIDE_INT,
1966 int *, int *, int *);
1968 static void
1969 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1970 unsigned HOST_WIDE_INT low_bits,
1971 int *hbsp, int *lbsp, int *abbasp)
1973 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1974 int i;
1976 lowest_bit_set = highest_bit_set = -1;
1977 i = 0;
1980 if ((lowest_bit_set == -1)
1981 && ((low_bits >> i) & 1))
1982 lowest_bit_set = i;
1983 if ((highest_bit_set == -1)
1984 && ((high_bits >> (32 - i - 1)) & 1))
1985 highest_bit_set = (64 - i - 1);
1987 while (++i < 32
1988 && ((highest_bit_set == -1)
1989 || (lowest_bit_set == -1)));
1990 if (i == 32)
1992 i = 0;
1995 if ((lowest_bit_set == -1)
1996 && ((high_bits >> i) & 1))
1997 lowest_bit_set = i + 32;
1998 if ((highest_bit_set == -1)
1999 && ((low_bits >> (32 - i - 1)) & 1))
2000 highest_bit_set = 32 - i - 1;
2002 while (++i < 32
2003 && ((highest_bit_set == -1)
2004 || (lowest_bit_set == -1)));
2006 /* If there are no bits set this should have gone out
2007 as one instruction! */
2008 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2009 all_bits_between_are_set = 1;
2010 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2012 if (i < 32)
2014 if ((low_bits & (1 << i)) != 0)
2015 continue;
2017 else
2019 if ((high_bits & (1 << (i - 32))) != 0)
2020 continue;
2022 all_bits_between_are_set = 0;
2023 break;
2025 *hbsp = highest_bit_set;
2026 *lbsp = lowest_bit_set;
2027 *abbasp = all_bits_between_are_set;
2030 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2032 static int
2033 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2034 unsigned HOST_WIDE_INT low_bits)
2036 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2038 if (high_bits == 0
2039 || high_bits == 0xffffffff)
2040 return 1;
2042 analyze_64bit_constant (high_bits, low_bits,
2043 &highest_bit_set, &lowest_bit_set,
2044 &all_bits_between_are_set);
2046 if ((highest_bit_set == 63
2047 || lowest_bit_set == 0)
2048 && all_bits_between_are_set != 0)
2049 return 1;
2051 if ((highest_bit_set - lowest_bit_set) < 21)
2052 return 1;
2054 return 0;
2057 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2058 unsigned HOST_WIDE_INT,
2059 int, int);
2061 static unsigned HOST_WIDE_INT
2062 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2063 unsigned HOST_WIDE_INT low_bits,
2064 int lowest_bit_set, int shift)
2066 HOST_WIDE_INT hi, lo;
2068 if (lowest_bit_set < 32)
2070 lo = (low_bits >> lowest_bit_set) << shift;
2071 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2073 else
2075 lo = 0;
2076 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2078 gcc_assert (! (hi & lo));
2079 return (hi | lo);
2082 /* Here we are sure to be arch64 and this is an integer constant
2083 being loaded into a register. Emit the most efficient
2084 insn sequence possible. Detection of all the 1-insn cases
2085 has been done already. */
2086 static void
2087 sparc_emit_set_const64 (rtx op0, rtx op1)
2089 unsigned HOST_WIDE_INT high_bits, low_bits;
2090 int lowest_bit_set, highest_bit_set;
2091 int all_bits_between_are_set;
2092 rtx temp = 0;
2094 /* Sanity check that we know what we are working with. */
2095 gcc_assert (TARGET_ARCH64
2096 && (GET_CODE (op0) == SUBREG
2097 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2099 if (! can_create_pseudo_p ())
2100 temp = op0;
2102 if (GET_CODE (op1) != CONST_INT)
2104 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2105 return;
2108 if (! temp)
2109 temp = gen_reg_rtx (DImode);
2111 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2112 low_bits = (INTVAL (op1) & 0xffffffff);
2114 /* low_bits bits 0 --> 31
2115 high_bits bits 32 --> 63 */
2117 analyze_64bit_constant (high_bits, low_bits,
2118 &highest_bit_set, &lowest_bit_set,
2119 &all_bits_between_are_set);
2121 /* First try for a 2-insn sequence. */
2123 /* These situations are preferred because the optimizer can
2124 * do more things with them:
2125 * 1) mov -1, %reg
2126 * sllx %reg, shift, %reg
2127 * 2) mov -1, %reg
2128 * srlx %reg, shift, %reg
2129 * 3) mov some_small_const, %reg
2130 * sllx %reg, shift, %reg
2132 if (((highest_bit_set == 63
2133 || lowest_bit_set == 0)
2134 && all_bits_between_are_set != 0)
2135 || ((highest_bit_set - lowest_bit_set) < 12))
2137 HOST_WIDE_INT the_const = -1;
2138 int shift = lowest_bit_set;
2140 if ((highest_bit_set != 63
2141 && lowest_bit_set != 0)
2142 || all_bits_between_are_set == 0)
2144 the_const =
2145 create_simple_focus_bits (high_bits, low_bits,
2146 lowest_bit_set, 0);
2148 else if (lowest_bit_set == 0)
2149 shift = -(63 - highest_bit_set);
2151 gcc_assert (SPARC_SIMM13_P (the_const));
2152 gcc_assert (shift != 0);
2154 emit_insn (gen_safe_SET64 (temp, the_const));
2155 if (shift > 0)
2156 emit_insn (gen_rtx_SET (VOIDmode,
2157 op0,
2158 gen_rtx_ASHIFT (DImode,
2159 temp,
2160 GEN_INT (shift))));
2161 else if (shift < 0)
2162 emit_insn (gen_rtx_SET (VOIDmode,
2163 op0,
2164 gen_rtx_LSHIFTRT (DImode,
2165 temp,
2166 GEN_INT (-shift))));
2167 return;
2170 /* Now a range of 22 or less bits set somewhere.
2171 * 1) sethi %hi(focus_bits), %reg
2172 * sllx %reg, shift, %reg
2173 * 2) sethi %hi(focus_bits), %reg
2174 * srlx %reg, shift, %reg
2176 if ((highest_bit_set - lowest_bit_set) < 21)
2178 unsigned HOST_WIDE_INT focus_bits =
2179 create_simple_focus_bits (high_bits, low_bits,
2180 lowest_bit_set, 10);
2182 gcc_assert (SPARC_SETHI_P (focus_bits));
2183 gcc_assert (lowest_bit_set != 10);
2185 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
2187 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2188 if (lowest_bit_set < 10)
2189 emit_insn (gen_rtx_SET (VOIDmode,
2190 op0,
2191 gen_rtx_LSHIFTRT (DImode, temp,
2192 GEN_INT (10 - lowest_bit_set))));
2193 else if (lowest_bit_set > 10)
2194 emit_insn (gen_rtx_SET (VOIDmode,
2195 op0,
2196 gen_rtx_ASHIFT (DImode, temp,
2197 GEN_INT (lowest_bit_set - 10))));
2198 return;
2201 /* 1) sethi %hi(low_bits), %reg
2202 * or %reg, %lo(low_bits), %reg
2203 * 2) sethi %hi(~low_bits), %reg
2204 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2206 if (high_bits == 0
2207 || high_bits == 0xffffffff)
2209 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2210 (high_bits == 0xffffffff));
2211 return;
2214 /* Now, try 3-insn sequences. */
2216 /* 1) sethi %hi(high_bits), %reg
2217 * or %reg, %lo(high_bits), %reg
2218 * sllx %reg, 32, %reg
2220 if (low_bits == 0)
2222 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2223 return;
2226 /* We may be able to do something quick
2227 when the constant is negated, so try that. */
2228 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2229 (~low_bits) & 0xfffffc00))
2231 /* NOTE: The trailing bits get XOR'd so we need the
2232 non-negated bits, not the negated ones. */
2233 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2235 if ((((~high_bits) & 0xffffffff) == 0
2236 && ((~low_bits) & 0x80000000) == 0)
2237 || (((~high_bits) & 0xffffffff) == 0xffffffff
2238 && ((~low_bits) & 0x80000000) != 0))
2240 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
2242 if ((SPARC_SETHI_P (fast_int)
2243 && (~high_bits & 0xffffffff) == 0)
2244 || SPARC_SIMM13_P (fast_int))
2245 emit_insn (gen_safe_SET64 (temp, fast_int));
2246 else
2247 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2249 else
2251 rtx negated_const;
2252 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2253 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2254 sparc_emit_set_const64 (temp, negated_const);
2257 /* If we are XOR'ing with -1, then we should emit a one's complement
2258 instead. This way the combiner will notice logical operations
2259 such as ANDN later on and substitute. */
2260 if (trailing_bits == 0x3ff)
2262 emit_insn (gen_rtx_SET (VOIDmode, op0,
2263 gen_rtx_NOT (DImode, temp)));
2265 else
2267 emit_insn (gen_rtx_SET (VOIDmode,
2268 op0,
2269 gen_safe_XOR64 (temp,
2270 (-0x400 | trailing_bits))));
2272 return;
2275 /* 1) sethi %hi(xxx), %reg
2276 * or %reg, %lo(xxx), %reg
2277 * sllx %reg, yyy, %reg
2279 * ??? This is just a generalized version of the low_bits==0
2280 * thing above, FIXME...
2282 if ((highest_bit_set - lowest_bit_set) < 32)
2284 unsigned HOST_WIDE_INT focus_bits =
2285 create_simple_focus_bits (high_bits, low_bits,
2286 lowest_bit_set, 0);
2288 /* We can't get here in this state. */
2289 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2291 /* So what we know is that the set bits straddle the
2292 middle of the 64-bit word. */
2293 sparc_emit_set_const64_quick2 (op0, temp,
2294 focus_bits, 0,
2295 lowest_bit_set);
2296 return;
2299 /* 1) sethi %hi(high_bits), %reg
2300 * or %reg, %lo(high_bits), %reg
2301 * sllx %reg, 32, %reg
2302 * or %reg, low_bits, %reg
2304 if (SPARC_SIMM13_P(low_bits)
2305 && ((int)low_bits > 0))
2307 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2308 return;
2311 /* The easiest way when all else fails, is full decomposition. */
2312 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2314 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2316 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2317 return the mode to be used for the comparison. For floating-point,
2318 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2319 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2320 processing is needed. */
2322 enum machine_mode
2323 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2325 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2327 switch (op)
2329 case EQ:
2330 case NE:
2331 case UNORDERED:
2332 case ORDERED:
2333 case UNLT:
2334 case UNLE:
2335 case UNGT:
2336 case UNGE:
2337 case UNEQ:
2338 case LTGT:
2339 return CCFPmode;
2341 case LT:
2342 case LE:
2343 case GT:
2344 case GE:
2345 return CCFPEmode;
2347 default:
2348 gcc_unreachable ();
2351 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2352 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2354 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2355 return CCX_NOOVmode;
2356 else
2357 return CC_NOOVmode;
2359 else
2361 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2362 return CCXmode;
2363 else
2364 return CCmode;
2368 /* Emit the compare insn and return the CC reg for a CODE comparison
2369 with operands X and Y. */
2371 static rtx
2372 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2374 enum machine_mode mode;
2375 rtx cc_reg;
2377 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2378 return x;
2380 mode = SELECT_CC_MODE (code, x, y);
2382 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2383 fcc regs (cse can't tell they're really call clobbered regs and will
2384 remove a duplicate comparison even if there is an intervening function
2385 call - it will then try to reload the cc reg via an int reg which is why
2386 we need the movcc patterns). It is possible to provide the movcc
2387 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2388 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2389 to tell cse that CCFPE mode registers (even pseudos) are call
2390 clobbered. */
2392 /* ??? This is an experiment. Rather than making changes to cse which may
2393 or may not be easy/clean, we do our own cse. This is possible because
2394 we will generate hard registers. Cse knows they're call clobbered (it
2395 doesn't know the same thing about pseudos). If we guess wrong, no big
2396 deal, but if we win, great! */
2398 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2399 #if 1 /* experiment */
2401 int reg;
2402 /* We cycle through the registers to ensure they're all exercised. */
2403 static int next_fcc_reg = 0;
2404 /* Previous x,y for each fcc reg. */
2405 static rtx prev_args[4][2];
2407 /* Scan prev_args for x,y. */
2408 for (reg = 0; reg < 4; reg++)
2409 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2410 break;
2411 if (reg == 4)
2413 reg = next_fcc_reg;
2414 prev_args[reg][0] = x;
2415 prev_args[reg][1] = y;
2416 next_fcc_reg = (next_fcc_reg + 1) & 3;
2418 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2420 #else
2421 cc_reg = gen_reg_rtx (mode);
2422 #endif /* ! experiment */
2423 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2424 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2425 else
2426 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2428 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2429 will only result in an unrecognizable insn so no point in asserting. */
2430 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2432 return cc_reg;
2436 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2439 gen_compare_reg (rtx cmp)
2441 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2444 /* This function is used for v9 only.
2445 DEST is the target of the Scc insn.
2446 CODE is the code for an Scc's comparison.
2447 X and Y are the values we compare.
2449 This function is needed to turn
2451 (set (reg:SI 110)
2452 (gt (reg:CCX 100 %icc)
2453 (const_int 0)))
2454 into
2455 (set (reg:SI 110)
2456 (gt:DI (reg:CCX 100 %icc)
2457 (const_int 0)))
2459 IE: The instruction recognizer needs to see the mode of the comparison to
2460 find the right instruction. We could use "gt:DI" right in the
2461 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2463 static int
2464 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2466 if (! TARGET_ARCH64
2467 && (GET_MODE (x) == DImode
2468 || GET_MODE (dest) == DImode))
2469 return 0;
2471 /* Try to use the movrCC insns. */
2472 if (TARGET_ARCH64
2473 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2474 && y == const0_rtx
2475 && v9_regcmp_p (compare_code))
2477 rtx op0 = x;
2478 rtx temp;
2480 /* Special case for op0 != 0. This can be done with one instruction if
2481 dest == x. */
2483 if (compare_code == NE
2484 && GET_MODE (dest) == DImode
2485 && rtx_equal_p (op0, dest))
2487 emit_insn (gen_rtx_SET (VOIDmode, dest,
2488 gen_rtx_IF_THEN_ELSE (DImode,
2489 gen_rtx_fmt_ee (compare_code, DImode,
2490 op0, const0_rtx),
2491 const1_rtx,
2492 dest)));
2493 return 1;
2496 if (reg_overlap_mentioned_p (dest, op0))
2498 /* Handle the case where dest == x.
2499 We "early clobber" the result. */
2500 op0 = gen_reg_rtx (GET_MODE (x));
2501 emit_move_insn (op0, x);
2504 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2505 if (GET_MODE (op0) != DImode)
2507 temp = gen_reg_rtx (DImode);
2508 convert_move (temp, op0, 0);
2510 else
2511 temp = op0;
2512 emit_insn (gen_rtx_SET (VOIDmode, dest,
2513 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2514 gen_rtx_fmt_ee (compare_code, DImode,
2515 temp, const0_rtx),
2516 const1_rtx,
2517 dest)));
2518 return 1;
2520 else
2522 x = gen_compare_reg_1 (compare_code, x, y);
2523 y = const0_rtx;
2525 gcc_assert (GET_MODE (x) != CC_NOOVmode
2526 && GET_MODE (x) != CCX_NOOVmode);
2528 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2529 emit_insn (gen_rtx_SET (VOIDmode, dest,
2530 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2531 gen_rtx_fmt_ee (compare_code,
2532 GET_MODE (x), x, y),
2533 const1_rtx, dest)));
2534 return 1;
2539 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2540 without jumps using the addx/subx instructions. */
2542 bool
2543 emit_scc_insn (rtx operands[])
2545 rtx tem;
2546 rtx x;
2547 rtx y;
2548 enum rtx_code code;
2550 /* The quad-word fp compare library routines all return nonzero to indicate
2551 true, which is different from the equivalent libgcc routines, so we must
2552 handle them specially here. */
2553 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2555 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2556 GET_CODE (operands[1]));
2557 operands[2] = XEXP (operands[1], 0);
2558 operands[3] = XEXP (operands[1], 1);
2561 code = GET_CODE (operands[1]);
2562 x = operands[2];
2563 y = operands[3];
2565 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2566 more applications). The exception to this is "reg != 0" which can
2567 be done in one instruction on v9 (so we do it). */
2568 if (code == EQ)
2570 if (GET_MODE (x) == SImode)
2572 rtx pat = gen_seqsi_special (operands[0], x, y);
2573 emit_insn (pat);
2574 return true;
2576 else if (GET_MODE (x) == DImode)
2578 rtx pat = gen_seqdi_special (operands[0], x, y);
2579 emit_insn (pat);
2580 return true;
2584 if (code == NE)
2586 if (GET_MODE (x) == SImode)
2588 rtx pat = gen_snesi_special (operands[0], x, y);
2589 emit_insn (pat);
2590 return true;
2592 else if (GET_MODE (x) == DImode)
2594 rtx pat;
2595 if (TARGET_VIS3)
2596 pat = gen_snedi_special_vis3 (operands[0], x, y);
2597 else
2598 pat = gen_snedi_special (operands[0], x, y);
2599 emit_insn (pat);
2600 return true;
2604 if (TARGET_V9
2605 && TARGET_ARCH64
2606 && GET_MODE (x) == DImode
2607 && !(TARGET_VIS3
2608 && (code == GTU || code == LTU))
2609 && gen_v9_scc (operands[0], code, x, y))
2610 return true;
2612 /* We can do LTU and GEU using the addx/subx instructions too. And
2613 for GTU/LEU, if both operands are registers swap them and fall
2614 back to the easy case. */
2615 if (code == GTU || code == LEU)
2617 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2618 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2620 tem = x;
2621 x = y;
2622 y = tem;
2623 code = swap_condition (code);
2627 if (code == LTU
2628 || (!TARGET_VIS3 && code == GEU))
2630 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2631 gen_rtx_fmt_ee (code, SImode,
2632 gen_compare_reg_1 (code, x, y),
2633 const0_rtx)));
2634 return true;
2637 /* All the posibilities to use addx/subx based sequences has been
2638 exhausted, try for a 3 instruction sequence using v9 conditional
2639 moves. */
2640 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
2641 return true;
2643 /* Nope, do branches. */
2644 return false;
2647 /* Emit a conditional jump insn for the v9 architecture using comparison code
2648 CODE and jump target LABEL.
2649 This function exists to take advantage of the v9 brxx insns. */
2651 static void
2652 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2654 emit_jump_insn (gen_rtx_SET (VOIDmode,
2655 pc_rtx,
2656 gen_rtx_IF_THEN_ELSE (VOIDmode,
2657 gen_rtx_fmt_ee (code, GET_MODE (op0),
2658 op0, const0_rtx),
2659 gen_rtx_LABEL_REF (VOIDmode, label),
2660 pc_rtx)));
2663 void
2664 emit_conditional_branch_insn (rtx operands[])
2666 /* The quad-word fp compare library routines all return nonzero to indicate
2667 true, which is different from the equivalent libgcc routines, so we must
2668 handle them specially here. */
2669 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2671 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2672 GET_CODE (operands[0]));
2673 operands[1] = XEXP (operands[0], 0);
2674 operands[2] = XEXP (operands[0], 1);
2677 if (TARGET_ARCH64 && operands[2] == const0_rtx
2678 && GET_CODE (operands[1]) == REG
2679 && GET_MODE (operands[1]) == DImode)
2681 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2682 return;
2685 operands[1] = gen_compare_reg (operands[0]);
2686 operands[2] = const0_rtx;
2687 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2688 operands[1], operands[2]);
2689 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2690 operands[3]));
2694 /* Generate a DFmode part of a hard TFmode register.
2695 REG is the TFmode hard register, LOW is 1 for the
2696 low 64bit of the register and 0 otherwise.
2699 gen_df_reg (rtx reg, int low)
2701 int regno = REGNO (reg);
2703 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2704 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
2705 return gen_rtx_REG (DFmode, regno);
2708 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2709 Unlike normal calls, TFmode operands are passed by reference. It is
2710 assumed that no more than 3 operands are required. */
2712 static void
2713 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2715 rtx ret_slot = NULL, arg[3], func_sym;
2716 int i;
2718 /* We only expect to be called for conversions, unary, and binary ops. */
2719 gcc_assert (nargs == 2 || nargs == 3);
2721 for (i = 0; i < nargs; ++i)
2723 rtx this_arg = operands[i];
2724 rtx this_slot;
2726 /* TFmode arguments and return values are passed by reference. */
2727 if (GET_MODE (this_arg) == TFmode)
2729 int force_stack_temp;
2731 force_stack_temp = 0;
2732 if (TARGET_BUGGY_QP_LIB && i == 0)
2733 force_stack_temp = 1;
2735 if (GET_CODE (this_arg) == MEM
2736 && ! force_stack_temp)
2738 tree expr = MEM_EXPR (this_arg);
2739 if (expr)
2740 mark_addressable (expr);
2741 this_arg = XEXP (this_arg, 0);
2743 else if (CONSTANT_P (this_arg)
2744 && ! force_stack_temp)
2746 this_slot = force_const_mem (TFmode, this_arg);
2747 this_arg = XEXP (this_slot, 0);
2749 else
2751 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
2753 /* Operand 0 is the return value. We'll copy it out later. */
2754 if (i > 0)
2755 emit_move_insn (this_slot, this_arg);
2756 else
2757 ret_slot = this_slot;
2759 this_arg = XEXP (this_slot, 0);
2763 arg[i] = this_arg;
2766 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2768 if (GET_MODE (operands[0]) == TFmode)
2770 if (nargs == 2)
2771 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2772 arg[0], GET_MODE (arg[0]),
2773 arg[1], GET_MODE (arg[1]));
2774 else
2775 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2776 arg[0], GET_MODE (arg[0]),
2777 arg[1], GET_MODE (arg[1]),
2778 arg[2], GET_MODE (arg[2]));
2780 if (ret_slot)
2781 emit_move_insn (operands[0], ret_slot);
2783 else
2785 rtx ret;
2787 gcc_assert (nargs == 2);
2789 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2790 GET_MODE (operands[0]), 1,
2791 arg[1], GET_MODE (arg[1]));
2793 if (ret != operands[0])
2794 emit_move_insn (operands[0], ret);
2798 /* Expand soft-float TFmode calls to sparc abi routines. */
2800 static void
2801 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2803 const char *func;
2805 switch (code)
2807 case PLUS:
2808 func = "_Qp_add";
2809 break;
2810 case MINUS:
2811 func = "_Qp_sub";
2812 break;
2813 case MULT:
2814 func = "_Qp_mul";
2815 break;
2816 case DIV:
2817 func = "_Qp_div";
2818 break;
2819 default:
2820 gcc_unreachable ();
2823 emit_soft_tfmode_libcall (func, 3, operands);
2826 static void
2827 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2829 const char *func;
2831 gcc_assert (code == SQRT);
2832 func = "_Qp_sqrt";
2834 emit_soft_tfmode_libcall (func, 2, operands);
2837 static void
2838 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2840 const char *func;
2842 switch (code)
2844 case FLOAT_EXTEND:
2845 switch (GET_MODE (operands[1]))
2847 case SFmode:
2848 func = "_Qp_stoq";
2849 break;
2850 case DFmode:
2851 func = "_Qp_dtoq";
2852 break;
2853 default:
2854 gcc_unreachable ();
2856 break;
2858 case FLOAT_TRUNCATE:
2859 switch (GET_MODE (operands[0]))
2861 case SFmode:
2862 func = "_Qp_qtos";
2863 break;
2864 case DFmode:
2865 func = "_Qp_qtod";
2866 break;
2867 default:
2868 gcc_unreachable ();
2870 break;
2872 case FLOAT:
2873 switch (GET_MODE (operands[1]))
2875 case SImode:
2876 func = "_Qp_itoq";
2877 if (TARGET_ARCH64)
2878 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2879 break;
2880 case DImode:
2881 func = "_Qp_xtoq";
2882 break;
2883 default:
2884 gcc_unreachable ();
2886 break;
2888 case UNSIGNED_FLOAT:
2889 switch (GET_MODE (operands[1]))
2891 case SImode:
2892 func = "_Qp_uitoq";
2893 if (TARGET_ARCH64)
2894 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2895 break;
2896 case DImode:
2897 func = "_Qp_uxtoq";
2898 break;
2899 default:
2900 gcc_unreachable ();
2902 break;
2904 case FIX:
2905 switch (GET_MODE (operands[0]))
2907 case SImode:
2908 func = "_Qp_qtoi";
2909 break;
2910 case DImode:
2911 func = "_Qp_qtox";
2912 break;
2913 default:
2914 gcc_unreachable ();
2916 break;
2918 case UNSIGNED_FIX:
2919 switch (GET_MODE (operands[0]))
2921 case SImode:
2922 func = "_Qp_qtoui";
2923 break;
2924 case DImode:
2925 func = "_Qp_qtoux";
2926 break;
2927 default:
2928 gcc_unreachable ();
2930 break;
2932 default:
2933 gcc_unreachable ();
2936 emit_soft_tfmode_libcall (func, 2, operands);
2939 /* Expand a hard-float tfmode operation. All arguments must be in
2940 registers. */
2942 static void
2943 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2945 rtx op, dest;
2947 if (GET_RTX_CLASS (code) == RTX_UNARY)
2949 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2950 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2952 else
2954 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2955 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2956 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2957 operands[1], operands[2]);
2960 if (register_operand (operands[0], VOIDmode))
2961 dest = operands[0];
2962 else
2963 dest = gen_reg_rtx (GET_MODE (operands[0]));
2965 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2967 if (dest != operands[0])
2968 emit_move_insn (operands[0], dest);
2971 void
2972 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2974 if (TARGET_HARD_QUAD)
2975 emit_hard_tfmode_operation (code, operands);
2976 else
2977 emit_soft_tfmode_binop (code, operands);
2980 void
2981 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2983 if (TARGET_HARD_QUAD)
2984 emit_hard_tfmode_operation (code, operands);
2985 else
2986 emit_soft_tfmode_unop (code, operands);
2989 void
2990 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2992 if (TARGET_HARD_QUAD)
2993 emit_hard_tfmode_operation (code, operands);
2994 else
2995 emit_soft_tfmode_cvt (code, operands);
2998 /* Return nonzero if a branch/jump/call instruction will be emitting
2999 nop into its delay slot. */
3002 empty_delay_slot (rtx insn)
3004 rtx seq;
3006 /* If no previous instruction (should not happen), return true. */
3007 if (PREV_INSN (insn) == NULL)
3008 return 1;
3010 seq = NEXT_INSN (PREV_INSN (insn));
3011 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3012 return 0;
3014 return 1;
3017 /* Return nonzero if TRIAL can go into the call delay slot. */
3020 tls_call_delay (rtx trial)
3022 rtx pat;
3024 /* Binutils allows
3025 call __tls_get_addr, %tgd_call (foo)
3026 add %l7, %o0, %o0, %tgd_add (foo)
3027 while Sun as/ld does not. */
3028 if (TARGET_GNU_TLS || !TARGET_TLS)
3029 return 1;
3031 pat = PATTERN (trial);
3033 /* We must reject tgd_add{32|64}, i.e.
3034 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3035 and tldm_add{32|64}, i.e.
3036 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3037 for Sun as/ld. */
3038 if (GET_CODE (pat) == SET
3039 && GET_CODE (SET_SRC (pat)) == PLUS)
3041 rtx unspec = XEXP (SET_SRC (pat), 1);
3043 if (GET_CODE (unspec) == UNSPEC
3044 && (XINT (unspec, 1) == UNSPEC_TLSGD
3045 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3046 return 0;
3049 return 1;
3052 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3053 instruction. RETURN_P is true if the v9 variant 'return' is to be
3054 considered in the test too.
3056 TRIAL must be a SET whose destination is a REG appropriate for the
3057 'restore' instruction or, if RETURN_P is true, for the 'return'
3058 instruction. */
3060 static int
3061 eligible_for_restore_insn (rtx trial, bool return_p)
3063 rtx pat = PATTERN (trial);
3064 rtx src = SET_SRC (pat);
3065 bool src_is_freg = false;
3066 rtx src_reg;
3068 /* Since we now can do moves between float and integer registers when
3069 VIS3 is enabled, we have to catch this case. We can allow such
3070 moves when doing a 'return' however. */
3071 src_reg = src;
3072 if (GET_CODE (src_reg) == SUBREG)
3073 src_reg = SUBREG_REG (src_reg);
3074 if (GET_CODE (src_reg) == REG
3075 && SPARC_FP_REG_P (REGNO (src_reg)))
3076 src_is_freg = true;
3078 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3079 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3080 && arith_operand (src, GET_MODE (src))
3081 && ! src_is_freg)
3083 if (TARGET_ARCH64)
3084 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3085 else
3086 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3089 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3090 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3091 && arith_double_operand (src, GET_MODE (src))
3092 && ! src_is_freg)
3093 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3095 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3096 else if (! TARGET_FPU && register_operand (src, SFmode))
3097 return 1;
3099 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3100 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3101 return 1;
3103 /* If we have the 'return' instruction, anything that does not use
3104 local or output registers and can go into a delay slot wins. */
3105 else if (return_p
3106 && TARGET_V9
3107 && !epilogue_renumber (&pat, 1)
3108 && get_attr_in_uncond_branch_delay (trial)
3109 == IN_UNCOND_BRANCH_DELAY_TRUE)
3110 return 1;
3112 /* The 'restore src1,src2,dest' pattern for SImode. */
3113 else if (GET_CODE (src) == PLUS
3114 && register_operand (XEXP (src, 0), SImode)
3115 && arith_operand (XEXP (src, 1), SImode))
3116 return 1;
3118 /* The 'restore src1,src2,dest' pattern for DImode. */
3119 else if (GET_CODE (src) == PLUS
3120 && register_operand (XEXP (src, 0), DImode)
3121 && arith_double_operand (XEXP (src, 1), DImode))
3122 return 1;
3124 /* The 'restore src1,%lo(src2),dest' pattern. */
3125 else if (GET_CODE (src) == LO_SUM
3126 && ! TARGET_CM_MEDMID
3127 && ((register_operand (XEXP (src, 0), SImode)
3128 && immediate_operand (XEXP (src, 1), SImode))
3129 || (TARGET_ARCH64
3130 && register_operand (XEXP (src, 0), DImode)
3131 && immediate_operand (XEXP (src, 1), DImode))))
3132 return 1;
3134 /* The 'restore src,src,dest' pattern. */
3135 else if (GET_CODE (src) == ASHIFT
3136 && (register_operand (XEXP (src, 0), SImode)
3137 || register_operand (XEXP (src, 0), DImode))
3138 && XEXP (src, 1) == const1_rtx)
3139 return 1;
3141 return 0;
3144 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3147 eligible_for_return_delay (rtx trial)
3149 int regno;
3150 rtx pat;
3152 if (GET_CODE (trial) != INSN)
3153 return 0;
3155 if (get_attr_length (trial) != 1)
3156 return 0;
3158 /* If the function uses __builtin_eh_return, the eh_return machinery
3159 occupies the delay slot. */
3160 if (crtl->calls_eh_return)
3161 return 0;
3163 /* In the case of a leaf or flat function, anything can go into the slot. */
3164 if (sparc_leaf_function_p || TARGET_FLAT)
3165 return
3166 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
3168 pat = PATTERN (trial);
3169 if (GET_CODE (pat) == PARALLEL)
3171 int i;
3173 if (! TARGET_V9)
3174 return 0;
3175 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3177 rtx expr = XVECEXP (pat, 0, i);
3178 if (GET_CODE (expr) != SET)
3179 return 0;
3180 if (GET_CODE (SET_DEST (expr)) != REG)
3181 return 0;
3182 regno = REGNO (SET_DEST (expr));
3183 if (regno >= 8 && regno < 24)
3184 return 0;
3186 return !epilogue_renumber (&pat, 1)
3187 && (get_attr_in_uncond_branch_delay (trial)
3188 == IN_UNCOND_BRANCH_DELAY_TRUE);
3191 if (GET_CODE (pat) != SET)
3192 return 0;
3194 if (GET_CODE (SET_DEST (pat)) != REG)
3195 return 0;
3197 regno = REGNO (SET_DEST (pat));
3199 /* Otherwise, only operations which can be done in tandem with
3200 a `restore' or `return' insn can go into the delay slot. */
3201 if (regno >= 8 && regno < 24)
3202 return 0;
3204 /* If this instruction sets up floating point register and we have a return
3205 instruction, it can probably go in. But restore will not work
3206 with FP_REGS. */
3207 if (! SPARC_INT_REG_P (regno))
3208 return (TARGET_V9
3209 && !epilogue_renumber (&pat, 1)
3210 && get_attr_in_uncond_branch_delay (trial)
3211 == IN_UNCOND_BRANCH_DELAY_TRUE);
3213 return eligible_for_restore_insn (trial, true);
3216 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3219 eligible_for_sibcall_delay (rtx trial)
3221 rtx pat;
3223 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3224 return 0;
3226 if (get_attr_length (trial) != 1)
3227 return 0;
3229 pat = PATTERN (trial);
3231 if (sparc_leaf_function_p || TARGET_FLAT)
3233 /* If the tail call is done using the call instruction,
3234 we have to restore %o7 in the delay slot. */
3235 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3236 return 0;
3238 /* %g1 is used to build the function address */
3239 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3240 return 0;
3242 return 1;
3245 /* Otherwise, only operations which can be done in tandem with
3246 a `restore' insn can go into the delay slot. */
3247 if (GET_CODE (SET_DEST (pat)) != REG
3248 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3249 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
3250 return 0;
3252 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3253 in most cases. */
3254 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3255 return 0;
3257 return eligible_for_restore_insn (trial, false);
3260 /* Determine if it's legal to put X into the constant pool. This
3261 is not possible if X contains the address of a symbol that is
3262 not constant (TLS) or not known at final link time (PIC). */
3264 static bool
3265 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
3267 switch (GET_CODE (x))
3269 case CONST_INT:
3270 case CONST_DOUBLE:
3271 case CONST_VECTOR:
3272 /* Accept all non-symbolic constants. */
3273 return false;
3275 case LABEL_REF:
3276 /* Labels are OK iff we are non-PIC. */
3277 return flag_pic != 0;
3279 case SYMBOL_REF:
3280 /* 'Naked' TLS symbol references are never OK,
3281 non-TLS symbols are OK iff we are non-PIC. */
3282 if (SYMBOL_REF_TLS_MODEL (x))
3283 return true;
3284 else
3285 return flag_pic != 0;
3287 case CONST:
3288 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3289 case PLUS:
3290 case MINUS:
3291 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3292 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3293 case UNSPEC:
3294 return true;
3295 default:
3296 gcc_unreachable ();
3300 /* Global Offset Table support. */
3301 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3302 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3304 /* Return the SYMBOL_REF for the Global Offset Table. */
3306 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3308 static rtx
3309 sparc_got (void)
3311 if (!sparc_got_symbol)
3312 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3314 return sparc_got_symbol;
3317 /* Ensure that we are not using patterns that are not OK with PIC. */
3320 check_pic (int i)
3322 rtx op;
3324 switch (flag_pic)
3326 case 1:
3327 op = recog_data.operand[i];
3328 gcc_assert (GET_CODE (op) != SYMBOL_REF
3329 && (GET_CODE (op) != CONST
3330 || (GET_CODE (XEXP (op, 0)) == MINUS
3331 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3332 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3333 case 2:
3334 default:
3335 return 1;
3339 /* Return true if X is an address which needs a temporary register when
3340 reloaded while generating PIC code. */
3343 pic_address_needs_scratch (rtx x)
3345 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3346 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3347 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3348 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3349 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3350 return 1;
3352 return 0;
3355 /* Determine if a given RTX is a valid constant. We already know this
3356 satisfies CONSTANT_P. */
3358 static bool
3359 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3361 switch (GET_CODE (x))
3363 case CONST:
3364 case SYMBOL_REF:
3365 if (sparc_tls_referenced_p (x))
3366 return false;
3367 break;
3369 case CONST_DOUBLE:
3370 if (GET_MODE (x) == VOIDmode)
3371 return true;
3373 /* Floating point constants are generally not ok.
3374 The only exception is 0.0 and all-ones in VIS. */
3375 if (TARGET_VIS
3376 && SCALAR_FLOAT_MODE_P (mode)
3377 && (const_zero_operand (x, mode)
3378 || const_all_ones_operand (x, mode)))
3379 return true;
3381 return false;
3383 case CONST_VECTOR:
3384 /* Vector constants are generally not ok.
3385 The only exception is 0 or -1 in VIS. */
3386 if (TARGET_VIS
3387 && (const_zero_operand (x, mode)
3388 || const_all_ones_operand (x, mode)))
3389 return true;
3391 return false;
3393 default:
3394 break;
3397 return true;
3400 /* Determine if a given RTX is a valid constant address. */
3402 bool
3403 constant_address_p (rtx x)
3405 switch (GET_CODE (x))
3407 case LABEL_REF:
3408 case CONST_INT:
3409 case HIGH:
3410 return true;
3412 case CONST:
3413 if (flag_pic && pic_address_needs_scratch (x))
3414 return false;
3415 return sparc_legitimate_constant_p (Pmode, x);
3417 case SYMBOL_REF:
3418 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3420 default:
3421 return false;
3425 /* Nonzero if the constant value X is a legitimate general operand
3426 when generating PIC code. It is given that flag_pic is on and
3427 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3429 bool
3430 legitimate_pic_operand_p (rtx x)
3432 if (pic_address_needs_scratch (x))
3433 return false;
3434 if (sparc_tls_referenced_p (x))
3435 return false;
3436 return true;
3439 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3440 (CONST_INT_P (X) \
3441 && INTVAL (X) >= -0x1000 \
3442 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3444 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3445 (CONST_INT_P (X) \
3446 && INTVAL (X) >= -0x1000 \
3447 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3449 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3451 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3452 ordinarily. This changes a bit when generating PIC. */
3454 static bool
3455 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3457 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3459 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3460 rs1 = addr;
3461 else if (GET_CODE (addr) == PLUS)
3463 rs1 = XEXP (addr, 0);
3464 rs2 = XEXP (addr, 1);
3466 /* Canonicalize. REG comes first, if there are no regs,
3467 LO_SUM comes first. */
3468 if (!REG_P (rs1)
3469 && GET_CODE (rs1) != SUBREG
3470 && (REG_P (rs2)
3471 || GET_CODE (rs2) == SUBREG
3472 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3474 rs1 = XEXP (addr, 1);
3475 rs2 = XEXP (addr, 0);
3478 if ((flag_pic == 1
3479 && rs1 == pic_offset_table_rtx
3480 && !REG_P (rs2)
3481 && GET_CODE (rs2) != SUBREG
3482 && GET_CODE (rs2) != LO_SUM
3483 && GET_CODE (rs2) != MEM
3484 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3485 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3486 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3487 || ((REG_P (rs1)
3488 || GET_CODE (rs1) == SUBREG)
3489 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3491 imm1 = rs2;
3492 rs2 = NULL;
3494 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3495 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3497 /* We prohibit REG + REG for TFmode when there are no quad move insns
3498 and we consequently need to split. We do this because REG+REG
3499 is not an offsettable address. If we get the situation in reload
3500 where source and destination of a movtf pattern are both MEMs with
3501 REG+REG address, then only one of them gets converted to an
3502 offsettable address. */
3503 if (mode == TFmode
3504 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
3505 return 0;
3507 /* Likewise for TImode, but in all cases. */
3508 if (mode == TImode)
3509 return 0;
3511 /* We prohibit REG + REG on ARCH32 if not optimizing for
3512 DFmode/DImode because then mem_min_alignment is likely to be zero
3513 after reload and the forced split would lack a matching splitter
3514 pattern. */
3515 if (TARGET_ARCH32 && !optimize
3516 && (mode == DFmode || mode == DImode))
3517 return 0;
3519 else if (USE_AS_OFFSETABLE_LO10
3520 && GET_CODE (rs1) == LO_SUM
3521 && TARGET_ARCH64
3522 && ! TARGET_CM_MEDMID
3523 && RTX_OK_FOR_OLO10_P (rs2, mode))
3525 rs2 = NULL;
3526 imm1 = XEXP (rs1, 1);
3527 rs1 = XEXP (rs1, 0);
3528 if (!CONSTANT_P (imm1)
3529 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3530 return 0;
3533 else if (GET_CODE (addr) == LO_SUM)
3535 rs1 = XEXP (addr, 0);
3536 imm1 = XEXP (addr, 1);
3538 if (!CONSTANT_P (imm1)
3539 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3540 return 0;
3542 /* We can't allow TFmode in 32-bit mode, because an offset greater
3543 than the alignment (8) may cause the LO_SUM to overflow. */
3544 if (mode == TFmode && TARGET_ARCH32)
3545 return 0;
3547 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3548 return 1;
3549 else
3550 return 0;
3552 if (GET_CODE (rs1) == SUBREG)
3553 rs1 = SUBREG_REG (rs1);
3554 if (!REG_P (rs1))
3555 return 0;
3557 if (rs2)
3559 if (GET_CODE (rs2) == SUBREG)
3560 rs2 = SUBREG_REG (rs2);
3561 if (!REG_P (rs2))
3562 return 0;
3565 if (strict)
3567 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3568 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3569 return 0;
3571 else
3573 if ((! SPARC_INT_REG_P (REGNO (rs1))
3574 && REGNO (rs1) != FRAME_POINTER_REGNUM
3575 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3576 || (rs2
3577 && (! SPARC_INT_REG_P (REGNO (rs2))
3578 && REGNO (rs2) != FRAME_POINTER_REGNUM
3579 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3580 return 0;
3582 return 1;
3585 /* Return the SYMBOL_REF for the tls_get_addr function. */
3587 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3589 static rtx
3590 sparc_tls_get_addr (void)
3592 if (!sparc_tls_symbol)
3593 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3595 return sparc_tls_symbol;
3598 /* Return the Global Offset Table to be used in TLS mode. */
3600 static rtx
3601 sparc_tls_got (void)
3603 /* In PIC mode, this is just the PIC offset table. */
3604 if (flag_pic)
3606 crtl->uses_pic_offset_table = 1;
3607 return pic_offset_table_rtx;
3610 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3611 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3612 if (TARGET_SUN_TLS && TARGET_ARCH32)
3614 load_got_register ();
3615 return global_offset_table_rtx;
3618 /* In all other cases, we load a new pseudo with the GOT symbol. */
3619 return copy_to_reg (sparc_got ());
3622 /* Return true if X contains a thread-local symbol. */
3624 static bool
3625 sparc_tls_referenced_p (rtx x)
3627 if (!TARGET_HAVE_TLS)
3628 return false;
3630 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3631 x = XEXP (XEXP (x, 0), 0);
3633 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3634 return true;
3636 /* That's all we handle in sparc_legitimize_tls_address for now. */
3637 return false;
3640 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3641 this (thread-local) address. */
3643 static rtx
3644 sparc_legitimize_tls_address (rtx addr)
3646 rtx temp1, temp2, temp3, ret, o0, got, insn;
3648 gcc_assert (can_create_pseudo_p ());
3650 if (GET_CODE (addr) == SYMBOL_REF)
3651 switch (SYMBOL_REF_TLS_MODEL (addr))
3653 case TLS_MODEL_GLOBAL_DYNAMIC:
3654 start_sequence ();
3655 temp1 = gen_reg_rtx (SImode);
3656 temp2 = gen_reg_rtx (SImode);
3657 ret = gen_reg_rtx (Pmode);
3658 o0 = gen_rtx_REG (Pmode, 8);
3659 got = sparc_tls_got ();
3660 emit_insn (gen_tgd_hi22 (temp1, addr));
3661 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3662 if (TARGET_ARCH32)
3664 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3665 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3666 addr, const1_rtx));
3668 else
3670 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3671 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3672 addr, const1_rtx));
3674 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3675 insn = get_insns ();
3676 end_sequence ();
3677 emit_libcall_block (insn, ret, o0, addr);
3678 break;
3680 case TLS_MODEL_LOCAL_DYNAMIC:
3681 start_sequence ();
3682 temp1 = gen_reg_rtx (SImode);
3683 temp2 = gen_reg_rtx (SImode);
3684 temp3 = gen_reg_rtx (Pmode);
3685 ret = gen_reg_rtx (Pmode);
3686 o0 = gen_rtx_REG (Pmode, 8);
3687 got = sparc_tls_got ();
3688 emit_insn (gen_tldm_hi22 (temp1));
3689 emit_insn (gen_tldm_lo10 (temp2, temp1));
3690 if (TARGET_ARCH32)
3692 emit_insn (gen_tldm_add32 (o0, got, temp2));
3693 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3694 const1_rtx));
3696 else
3698 emit_insn (gen_tldm_add64 (o0, got, temp2));
3699 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3700 const1_rtx));
3702 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3703 insn = get_insns ();
3704 end_sequence ();
3705 emit_libcall_block (insn, temp3, o0,
3706 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3707 UNSPEC_TLSLD_BASE));
3708 temp1 = gen_reg_rtx (SImode);
3709 temp2 = gen_reg_rtx (SImode);
3710 emit_insn (gen_tldo_hix22 (temp1, addr));
3711 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3712 if (TARGET_ARCH32)
3713 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3714 else
3715 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3716 break;
3718 case TLS_MODEL_INITIAL_EXEC:
3719 temp1 = gen_reg_rtx (SImode);
3720 temp2 = gen_reg_rtx (SImode);
3721 temp3 = gen_reg_rtx (Pmode);
3722 got = sparc_tls_got ();
3723 emit_insn (gen_tie_hi22 (temp1, addr));
3724 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3725 if (TARGET_ARCH32)
3726 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3727 else
3728 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3729 if (TARGET_SUN_TLS)
3731 ret = gen_reg_rtx (Pmode);
3732 if (TARGET_ARCH32)
3733 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3734 temp3, addr));
3735 else
3736 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3737 temp3, addr));
3739 else
3740 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3741 break;
3743 case TLS_MODEL_LOCAL_EXEC:
3744 temp1 = gen_reg_rtx (Pmode);
3745 temp2 = gen_reg_rtx (Pmode);
3746 if (TARGET_ARCH32)
3748 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3749 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3751 else
3753 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3754 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3756 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3757 break;
3759 default:
3760 gcc_unreachable ();
3763 else if (GET_CODE (addr) == CONST)
3765 rtx base, offset;
3767 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3769 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3770 offset = XEXP (XEXP (addr, 0), 1);
3772 base = force_operand (base, NULL_RTX);
3773 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3774 offset = force_reg (Pmode, offset);
3775 ret = gen_rtx_PLUS (Pmode, base, offset);
3778 else
3779 gcc_unreachable (); /* for now ... */
3781 return ret;
3784 /* Legitimize PIC addresses. If the address is already position-independent,
3785 we return ORIG. Newly generated position-independent addresses go into a
3786 reg. This is REG if nonzero, otherwise we allocate register(s) as
3787 necessary. */
3789 static rtx
3790 sparc_legitimize_pic_address (rtx orig, rtx reg)
3792 bool gotdata_op = false;
3794 if (GET_CODE (orig) == SYMBOL_REF
3795 /* See the comment in sparc_expand_move. */
3796 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3798 rtx pic_ref, address;
3799 rtx insn;
3801 if (reg == 0)
3803 gcc_assert (can_create_pseudo_p ());
3804 reg = gen_reg_rtx (Pmode);
3807 if (flag_pic == 2)
3809 /* If not during reload, allocate another temp reg here for loading
3810 in the address, so that these instructions can be optimized
3811 properly. */
3812 rtx temp_reg = (! can_create_pseudo_p ()
3813 ? reg : gen_reg_rtx (Pmode));
3815 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3816 won't get confused into thinking that these two instructions
3817 are loading in the true address of the symbol. If in the
3818 future a PIC rtx exists, that should be used instead. */
3819 if (TARGET_ARCH64)
3821 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3822 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3824 else
3826 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3827 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3829 address = temp_reg;
3830 gotdata_op = true;
3832 else
3833 address = orig;
3835 crtl->uses_pic_offset_table = 1;
3836 if (gotdata_op)
3838 if (TARGET_ARCH64)
3839 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3840 pic_offset_table_rtx,
3841 address, orig));
3842 else
3843 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3844 pic_offset_table_rtx,
3845 address, orig));
3847 else
3849 pic_ref
3850 = gen_const_mem (Pmode,
3851 gen_rtx_PLUS (Pmode,
3852 pic_offset_table_rtx, address));
3853 insn = emit_move_insn (reg, pic_ref);
3856 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3857 by loop. */
3858 set_unique_reg_note (insn, REG_EQUAL, orig);
3859 return reg;
3861 else if (GET_CODE (orig) == CONST)
3863 rtx base, offset;
3865 if (GET_CODE (XEXP (orig, 0)) == PLUS
3866 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3867 return orig;
3869 if (reg == 0)
3871 gcc_assert (can_create_pseudo_p ());
3872 reg = gen_reg_rtx (Pmode);
3875 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3876 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3877 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3878 base == reg ? NULL_RTX : reg);
3880 if (GET_CODE (offset) == CONST_INT)
3882 if (SMALL_INT (offset))
3883 return plus_constant (Pmode, base, INTVAL (offset));
3884 else if (can_create_pseudo_p ())
3885 offset = force_reg (Pmode, offset);
3886 else
3887 /* If we reach here, then something is seriously wrong. */
3888 gcc_unreachable ();
3890 return gen_rtx_PLUS (Pmode, base, offset);
3892 else if (GET_CODE (orig) == LABEL_REF)
3893 /* ??? We ought to be checking that the register is live instead, in case
3894 it is eliminated. */
3895 crtl->uses_pic_offset_table = 1;
3897 return orig;
3900 /* Try machine-dependent ways of modifying an illegitimate address X
3901 to be legitimate. If we find one, return the new, valid address.
3903 OLDX is the address as it was before break_out_memory_refs was called.
3904 In some cases it is useful to look at this to decide what needs to be done.
3906 MODE is the mode of the operand pointed to by X.
3908 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3910 static rtx
3911 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3912 enum machine_mode mode)
3914 rtx orig_x = x;
3916 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3917 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3918 force_operand (XEXP (x, 0), NULL_RTX));
3919 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3920 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3921 force_operand (XEXP (x, 1), NULL_RTX));
3922 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3923 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3924 XEXP (x, 1));
3925 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3926 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3927 force_operand (XEXP (x, 1), NULL_RTX));
3929 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3930 return x;
3932 if (sparc_tls_referenced_p (x))
3933 x = sparc_legitimize_tls_address (x);
3934 else if (flag_pic)
3935 x = sparc_legitimize_pic_address (x, NULL_RTX);
3936 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3937 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3938 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3939 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3940 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3941 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3942 else if (GET_CODE (x) == SYMBOL_REF
3943 || GET_CODE (x) == CONST
3944 || GET_CODE (x) == LABEL_REF)
3945 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3947 return x;
3950 /* Delegitimize an address that was legitimized by the above function. */
3952 static rtx
3953 sparc_delegitimize_address (rtx x)
3955 x = delegitimize_mem_from_attrs (x);
3957 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3958 switch (XINT (XEXP (x, 1), 1))
3960 case UNSPEC_MOVE_PIC:
3961 case UNSPEC_TLSLE:
3962 x = XVECEXP (XEXP (x, 1), 0, 0);
3963 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3964 break;
3965 default:
3966 break;
3969 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3970 if (GET_CODE (x) == MINUS
3971 && REG_P (XEXP (x, 0))
3972 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3973 && GET_CODE (XEXP (x, 1)) == LO_SUM
3974 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3975 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3977 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3978 gcc_assert (GET_CODE (x) == LABEL_REF);
3981 return x;
3984 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3985 replace the input X, or the original X if no replacement is called for.
3986 The output parameter *WIN is 1 if the calling macro should goto WIN,
3987 0 if it should not.
3989 For SPARC, we wish to handle addresses by splitting them into
3990 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3991 This cuts the number of extra insns by one.
3993 Do nothing when generating PIC code and the address is a symbolic
3994 operand or requires a scratch register. */
3997 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3998 int opnum, int type,
3999 int ind_levels ATTRIBUTE_UNUSED, int *win)
4001 /* Decompose SImode constants into HIGH+LO_SUM. */
4002 if (CONSTANT_P (x)
4003 && (mode != TFmode || TARGET_ARCH64)
4004 && GET_MODE (x) == SImode
4005 && GET_CODE (x) != LO_SUM
4006 && GET_CODE (x) != HIGH
4007 && sparc_cmodel <= CM_MEDLOW
4008 && !(flag_pic
4009 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
4011 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
4012 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4013 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4014 opnum, (enum reload_type)type);
4015 *win = 1;
4016 return x;
4019 /* We have to recognize what we have already generated above. */
4020 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
4022 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4023 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4024 opnum, (enum reload_type)type);
4025 *win = 1;
4026 return x;
4029 *win = 0;
4030 return x;
4033 /* Return true if ADDR (a legitimate address expression)
4034 has an effect that depends on the machine mode it is used for.
4036 In PIC mode,
4038 (mem:HI [%l7+a])
4040 is not equivalent to
4042 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
4044 because [%l7+a+1] is interpreted as the address of (a+1). */
4047 static bool
4048 sparc_mode_dependent_address_p (const_rtx addr,
4049 addr_space_t as ATTRIBUTE_UNUSED)
4051 if (flag_pic && GET_CODE (addr) == PLUS)
4053 rtx op0 = XEXP (addr, 0);
4054 rtx op1 = XEXP (addr, 1);
4055 if (op0 == pic_offset_table_rtx
4056 && symbolic_operand (op1, VOIDmode))
4057 return true;
4060 return false;
4063 #ifdef HAVE_GAS_HIDDEN
4064 # define USE_HIDDEN_LINKONCE 1
4065 #else
4066 # define USE_HIDDEN_LINKONCE 0
4067 #endif
4069 static void
4070 get_pc_thunk_name (char name[32], unsigned int regno)
4072 const char *reg_name = reg_names[regno];
4074 /* Skip the leading '%' as that cannot be used in a
4075 symbol name. */
4076 reg_name += 1;
4078 if (USE_HIDDEN_LINKONCE)
4079 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4080 else
4081 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4084 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4086 static rtx
4087 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
4089 int orig_flag_pic = flag_pic;
4090 rtx insn;
4092 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4093 flag_pic = 0;
4094 if (TARGET_ARCH64)
4095 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
4096 else
4097 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
4098 flag_pic = orig_flag_pic;
4100 return insn;
4103 /* Emit code to load the GOT register. */
4105 void
4106 load_got_register (void)
4108 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4109 if (!global_offset_table_rtx)
4110 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4112 if (TARGET_VXWORKS_RTP)
4113 emit_insn (gen_vxworks_load_got ());
4114 else
4116 /* The GOT symbol is subject to a PC-relative relocation so we need a
4117 helper function to add the PC value and thus get the final value. */
4118 if (!got_helper_rtx)
4120 char name[32];
4121 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4122 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4125 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
4126 got_helper_rtx,
4127 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
4130 /* Need to emit this whether or not we obey regdecls,
4131 since setjmp/longjmp can cause life info to screw up.
4132 ??? In the case where we don't obey regdecls, this is not sufficient
4133 since we may not fall out the bottom. */
4134 emit_use (global_offset_table_rtx);
4137 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4138 address of the call target. */
4140 void
4141 sparc_emit_call_insn (rtx pat, rtx addr)
4143 rtx insn;
4145 insn = emit_call_insn (pat);
4147 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4148 if (TARGET_VXWORKS_RTP
4149 && flag_pic
4150 && GET_CODE (addr) == SYMBOL_REF
4151 && (SYMBOL_REF_DECL (addr)
4152 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
4153 : !SYMBOL_REF_LOCAL_P (addr)))
4155 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4156 crtl->uses_pic_offset_table = 1;
4160 /* Return 1 if RTX is a MEM which is known to be aligned to at
4161 least a DESIRED byte boundary. */
4164 mem_min_alignment (rtx mem, int desired)
4166 rtx addr, base, offset;
4168 /* If it's not a MEM we can't accept it. */
4169 if (GET_CODE (mem) != MEM)
4170 return 0;
4172 /* Obviously... */
4173 if (!TARGET_UNALIGNED_DOUBLES
4174 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
4175 return 1;
4177 /* ??? The rest of the function predates MEM_ALIGN so
4178 there is probably a bit of redundancy. */
4179 addr = XEXP (mem, 0);
4180 base = offset = NULL_RTX;
4181 if (GET_CODE (addr) == PLUS)
4183 if (GET_CODE (XEXP (addr, 0)) == REG)
4185 base = XEXP (addr, 0);
4187 /* What we are saying here is that if the base
4188 REG is aligned properly, the compiler will make
4189 sure any REG based index upon it will be so
4190 as well. */
4191 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4192 offset = XEXP (addr, 1);
4193 else
4194 offset = const0_rtx;
4197 else if (GET_CODE (addr) == REG)
4199 base = addr;
4200 offset = const0_rtx;
4203 if (base != NULL_RTX)
4205 int regno = REGNO (base);
4207 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4209 /* Check if the compiler has recorded some information
4210 about the alignment of the base REG. If reload has
4211 completed, we already matched with proper alignments.
4212 If not running global_alloc, reload might give us
4213 unaligned pointer to local stack though. */
4214 if (((cfun != 0
4215 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4216 || (optimize && reload_completed))
4217 && (INTVAL (offset) & (desired - 1)) == 0)
4218 return 1;
4220 else
4222 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4223 return 1;
4226 else if (! TARGET_UNALIGNED_DOUBLES
4227 || CONSTANT_P (addr)
4228 || GET_CODE (addr) == LO_SUM)
4230 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4231 is true, in which case we can only assume that an access is aligned if
4232 it is to a constant address, or the address involves a LO_SUM. */
4233 return 1;
4236 /* An obviously unaligned address. */
4237 return 0;
4241 /* Vectors to keep interesting information about registers where it can easily
4242 be got. We used to use the actual mode value as the bit number, but there
4243 are more than 32 modes now. Instead we use two tables: one indexed by
4244 hard register number, and one indexed by mode. */
4246 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4247 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4248 mapped into one sparc_mode_class mode. */
4250 enum sparc_mode_class {
4251 S_MODE, D_MODE, T_MODE, O_MODE,
4252 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4253 CC_MODE, CCFP_MODE
4256 /* Modes for single-word and smaller quantities. */
4257 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4259 /* Modes for double-word and smaller quantities. */
4260 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4262 /* Modes for quad-word and smaller quantities. */
4263 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4265 /* Modes for 8-word and smaller quantities. */
4266 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4268 /* Modes for single-float quantities. We must allow any single word or
4269 smaller quantity. This is because the fix/float conversion instructions
4270 take integer inputs/outputs from the float registers. */
4271 #define SF_MODES (S_MODES)
4273 /* Modes for double-float and smaller quantities. */
4274 #define DF_MODES (D_MODES)
4276 /* Modes for quad-float and smaller quantities. */
4277 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4279 /* Modes for quad-float pairs and smaller quantities. */
4280 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4282 /* Modes for double-float only quantities. */
4283 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4285 /* Modes for quad-float and double-float only quantities. */
4286 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4288 /* Modes for quad-float pairs and double-float only quantities. */
4289 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4291 /* Modes for condition codes. */
4292 #define CC_MODES (1 << (int) CC_MODE)
4293 #define CCFP_MODES (1 << (int) CCFP_MODE)
4295 /* Value is 1 if register/mode pair is acceptable on sparc.
4296 The funny mixture of D and T modes is because integer operations
4297 do not specially operate on tetra quantities, so non-quad-aligned
4298 registers can hold quadword quantities (except %o4 and %i4 because
4299 they cross fixed registers). */
4301 /* This points to either the 32 bit or the 64 bit version. */
4302 const int *hard_regno_mode_classes;
4304 static const int hard_32bit_mode_classes[] = {
4305 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4306 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4307 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4308 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4310 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4311 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4312 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4313 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4315 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4316 and none can hold SFmode/SImode values. */
4317 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4318 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4319 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4320 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4322 /* %fcc[0123] */
4323 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4325 /* %icc, %sfp, %gsr */
4326 CC_MODES, 0, D_MODES
4329 static const int hard_64bit_mode_classes[] = {
4330 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4331 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4332 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4333 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4335 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4336 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4337 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4338 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4340 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4341 and none can hold SFmode/SImode values. */
4342 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4343 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4344 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4345 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4347 /* %fcc[0123] */
4348 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4350 /* %icc, %sfp, %gsr */
4351 CC_MODES, 0, D_MODES
4354 int sparc_mode_class [NUM_MACHINE_MODES];
4356 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4358 static void
4359 sparc_init_modes (void)
4361 int i;
4363 for (i = 0; i < NUM_MACHINE_MODES; i++)
4365 switch (GET_MODE_CLASS (i))
4367 case MODE_INT:
4368 case MODE_PARTIAL_INT:
4369 case MODE_COMPLEX_INT:
4370 if (GET_MODE_SIZE (i) <= 4)
4371 sparc_mode_class[i] = 1 << (int) S_MODE;
4372 else if (GET_MODE_SIZE (i) == 8)
4373 sparc_mode_class[i] = 1 << (int) D_MODE;
4374 else if (GET_MODE_SIZE (i) == 16)
4375 sparc_mode_class[i] = 1 << (int) T_MODE;
4376 else if (GET_MODE_SIZE (i) == 32)
4377 sparc_mode_class[i] = 1 << (int) O_MODE;
4378 else
4379 sparc_mode_class[i] = 0;
4380 break;
4381 case MODE_VECTOR_INT:
4382 if (GET_MODE_SIZE (i) <= 4)
4383 sparc_mode_class[i] = 1 << (int)SF_MODE;
4384 else if (GET_MODE_SIZE (i) == 8)
4385 sparc_mode_class[i] = 1 << (int)DF_MODE;
4386 break;
4387 case MODE_FLOAT:
4388 case MODE_COMPLEX_FLOAT:
4389 if (GET_MODE_SIZE (i) <= 4)
4390 sparc_mode_class[i] = 1 << (int) SF_MODE;
4391 else if (GET_MODE_SIZE (i) == 8)
4392 sparc_mode_class[i] = 1 << (int) DF_MODE;
4393 else if (GET_MODE_SIZE (i) == 16)
4394 sparc_mode_class[i] = 1 << (int) TF_MODE;
4395 else if (GET_MODE_SIZE (i) == 32)
4396 sparc_mode_class[i] = 1 << (int) OF_MODE;
4397 else
4398 sparc_mode_class[i] = 0;
4399 break;
4400 case MODE_CC:
4401 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4402 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4403 else
4404 sparc_mode_class[i] = 1 << (int) CC_MODE;
4405 break;
4406 default:
4407 sparc_mode_class[i] = 0;
4408 break;
4412 if (TARGET_ARCH64)
4413 hard_regno_mode_classes = hard_64bit_mode_classes;
4414 else
4415 hard_regno_mode_classes = hard_32bit_mode_classes;
4417 /* Initialize the array used by REGNO_REG_CLASS. */
4418 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4420 if (i < 16 && TARGET_V8PLUS)
4421 sparc_regno_reg_class[i] = I64_REGS;
4422 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4423 sparc_regno_reg_class[i] = GENERAL_REGS;
4424 else if (i < 64)
4425 sparc_regno_reg_class[i] = FP_REGS;
4426 else if (i < 96)
4427 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4428 else if (i < 100)
4429 sparc_regno_reg_class[i] = FPCC_REGS;
4430 else
4431 sparc_regno_reg_class[i] = NO_REGS;
4435 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4437 static inline bool
4438 save_global_or_fp_reg_p (unsigned int regno,
4439 int leaf_function ATTRIBUTE_UNUSED)
4441 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4444 /* Return whether the return address register (%i7) is needed. */
4446 static inline bool
4447 return_addr_reg_needed_p (int leaf_function)
4449 /* If it is live, for example because of __builtin_return_address (0). */
4450 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4451 return true;
4453 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4454 if (!leaf_function
4455 /* Loading the GOT register clobbers %o7. */
4456 || crtl->uses_pic_offset_table
4457 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4458 return true;
4460 return false;
4463 /* Return whether REGNO, a local or in register, must be saved/restored. */
4465 static bool
4466 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4468 /* General case: call-saved registers live at some point. */
4469 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4470 return true;
4472 /* Frame pointer register (%fp) if needed. */
4473 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4474 return true;
4476 /* Return address register (%i7) if needed. */
4477 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4478 return true;
4480 /* GOT register (%l7) if needed. */
4481 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4482 return true;
4484 /* If the function accesses prior frames, the frame pointer and the return
4485 address of the previous frame must be saved on the stack. */
4486 if (crtl->accesses_prior_frames
4487 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4488 return true;
4490 return false;
4493 /* Compute the frame size required by the function. This function is called
4494 during the reload pass and also by sparc_expand_prologue. */
4496 HOST_WIDE_INT
4497 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4499 HOST_WIDE_INT frame_size, apparent_frame_size;
4500 int args_size, n_global_fp_regs = 0;
4501 bool save_local_in_regs_p = false;
4502 unsigned int i;
4504 /* If the function allocates dynamic stack space, the dynamic offset is
4505 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4506 if (leaf_function && !cfun->calls_alloca)
4507 args_size = 0;
4508 else
4509 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4511 /* Calculate space needed for global registers. */
4512 if (TARGET_ARCH64)
4513 for (i = 0; i < 8; i++)
4514 if (save_global_or_fp_reg_p (i, 0))
4515 n_global_fp_regs += 2;
4516 else
4517 for (i = 0; i < 8; i += 2)
4518 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4519 n_global_fp_regs += 2;
4521 /* In the flat window model, find out which local and in registers need to
4522 be saved. We don't reserve space in the current frame for them as they
4523 will be spilled into the register window save area of the caller's frame.
4524 However, as soon as we use this register window save area, we must create
4525 that of the current frame to make it the live one. */
4526 if (TARGET_FLAT)
4527 for (i = 16; i < 32; i++)
4528 if (save_local_or_in_reg_p (i, leaf_function))
4530 save_local_in_regs_p = true;
4531 break;
4534 /* Calculate space needed for FP registers. */
4535 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4536 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4537 n_global_fp_regs += 2;
4539 if (size == 0
4540 && n_global_fp_regs == 0
4541 && args_size == 0
4542 && !save_local_in_regs_p)
4543 frame_size = apparent_frame_size = 0;
4544 else
4546 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4547 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4548 apparent_frame_size += n_global_fp_regs * 4;
4550 /* We need to add the size of the outgoing argument area. */
4551 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4553 /* And that of the register window save area. */
4554 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4556 /* Finally, bump to the appropriate alignment. */
4557 frame_size = SPARC_STACK_ALIGN (frame_size);
4560 /* Set up values for use in prologue and epilogue. */
4561 sparc_frame_size = frame_size;
4562 sparc_apparent_frame_size = apparent_frame_size;
4563 sparc_n_global_fp_regs = n_global_fp_regs;
4564 sparc_save_local_in_regs_p = save_local_in_regs_p;
4566 return frame_size;
4569 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
4572 sparc_initial_elimination_offset (int to)
4574 int offset;
4576 if (to == STACK_POINTER_REGNUM)
4577 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
4578 else
4579 offset = 0;
4581 offset += SPARC_STACK_BIAS;
4582 return offset;
4585 /* Output any necessary .register pseudo-ops. */
4587 void
4588 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4590 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4591 int i;
4593 if (TARGET_ARCH32)
4594 return;
4596 /* Check if %g[2367] were used without
4597 .register being printed for them already. */
4598 for (i = 2; i < 8; i++)
4600 if (df_regs_ever_live_p (i)
4601 && ! sparc_hard_reg_printed [i])
4603 sparc_hard_reg_printed [i] = 1;
4604 /* %g7 is used as TLS base register, use #ignore
4605 for it instead of #scratch. */
4606 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4607 i == 7 ? "ignore" : "scratch");
4609 if (i == 3) i = 5;
4611 #endif
4614 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4616 #if PROBE_INTERVAL > 4096
4617 #error Cannot use indexed addressing mode for stack probing
4618 #endif
4620 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4621 inclusive. These are offsets from the current stack pointer.
4623 Note that we don't use the REG+REG addressing mode for the probes because
4624 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4625 so the advantages of having a single code win here. */
4627 static void
4628 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4630 rtx g1 = gen_rtx_REG (Pmode, 1);
4632 /* See if we have a constant small number of probes to generate. If so,
4633 that's the easy case. */
4634 if (size <= PROBE_INTERVAL)
4636 emit_move_insn (g1, GEN_INT (first));
4637 emit_insn (gen_rtx_SET (VOIDmode, g1,
4638 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4639 emit_stack_probe (plus_constant (Pmode, g1, -size));
4642 /* The run-time loop is made up of 10 insns in the generic case while the
4643 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4644 else if (size <= 5 * PROBE_INTERVAL)
4646 HOST_WIDE_INT i;
4648 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4649 emit_insn (gen_rtx_SET (VOIDmode, g1,
4650 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4651 emit_stack_probe (g1);
4653 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4654 it exceeds SIZE. If only two probes are needed, this will not
4655 generate any code. Then probe at FIRST + SIZE. */
4656 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4658 emit_insn (gen_rtx_SET (VOIDmode, g1,
4659 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
4660 emit_stack_probe (g1);
4663 emit_stack_probe (plus_constant (Pmode, g1,
4664 (i - PROBE_INTERVAL) - size));
4667 /* Otherwise, do the same as above, but in a loop. Note that we must be
4668 extra careful with variables wrapping around because we might be at
4669 the very top (or the very bottom) of the address space and we have
4670 to be able to handle this case properly; in particular, we use an
4671 equality test for the loop condition. */
4672 else
4674 HOST_WIDE_INT rounded_size;
4675 rtx g4 = gen_rtx_REG (Pmode, 4);
4677 emit_move_insn (g1, GEN_INT (first));
4680 /* Step 1: round SIZE to the previous multiple of the interval. */
4682 rounded_size = size & -PROBE_INTERVAL;
4683 emit_move_insn (g4, GEN_INT (rounded_size));
4686 /* Step 2: compute initial and final value of the loop counter. */
4688 /* TEST_ADDR = SP + FIRST. */
4689 emit_insn (gen_rtx_SET (VOIDmode, g1,
4690 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4692 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4693 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4696 /* Step 3: the loop
4698 while (TEST_ADDR != LAST_ADDR)
4700 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4701 probe at TEST_ADDR
4704 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4705 until it is equal to ROUNDED_SIZE. */
4707 if (TARGET_64BIT)
4708 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4709 else
4710 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4713 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4714 that SIZE is equal to ROUNDED_SIZE. */
4716 if (size != rounded_size)
4717 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
4720 /* Make sure nothing is scheduled before we are done. */
4721 emit_insn (gen_blockage ());
4724 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4725 absolute addresses. */
4727 const char *
4728 output_probe_stack_range (rtx reg1, rtx reg2)
4730 static int labelno = 0;
4731 char loop_lab[32], end_lab[32];
4732 rtx xops[2];
4734 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4735 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4737 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4739 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4740 xops[0] = reg1;
4741 xops[1] = reg2;
4742 output_asm_insn ("cmp\t%0, %1", xops);
4743 if (TARGET_ARCH64)
4744 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4745 else
4746 fputs ("\tbe\t", asm_out_file);
4747 assemble_name_raw (asm_out_file, end_lab);
4748 fputc ('\n', asm_out_file);
4750 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4751 xops[1] = GEN_INT (-PROBE_INTERVAL);
4752 output_asm_insn (" add\t%0, %1, %0", xops);
4754 /* Probe at TEST_ADDR and branch. */
4755 if (TARGET_ARCH64)
4756 fputs ("\tba,pt\t%xcc,", asm_out_file);
4757 else
4758 fputs ("\tba\t", asm_out_file);
4759 assemble_name_raw (asm_out_file, loop_lab);
4760 fputc ('\n', asm_out_file);
4761 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4762 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4764 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4766 return "";
4769 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4770 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4771 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4772 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4773 the action to be performed if it returns false. Return the new offset. */
4775 typedef bool (*sorr_pred_t) (unsigned int, int);
4776 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4778 static int
4779 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4780 int offset, int leaf_function, sorr_pred_t save_p,
4781 sorr_act_t action_true, sorr_act_t action_false)
4783 unsigned int i;
4784 rtx mem, insn;
4786 if (TARGET_ARCH64 && high <= 32)
4788 int fp_offset = -1;
4790 for (i = low; i < high; i++)
4792 if (save_p (i, leaf_function))
4794 mem = gen_frame_mem (DImode, plus_constant (Pmode,
4795 base, offset));
4796 if (action_true == SORR_SAVE)
4798 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4799 RTX_FRAME_RELATED_P (insn) = 1;
4801 else /* action_true == SORR_RESTORE */
4803 /* The frame pointer must be restored last since its old
4804 value may be used as base address for the frame. This
4805 is problematic in 64-bit mode only because of the lack
4806 of double-word load instruction. */
4807 if (i == HARD_FRAME_POINTER_REGNUM)
4808 fp_offset = offset;
4809 else
4810 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4812 offset += 8;
4814 else if (action_false == SORR_ADVANCE)
4815 offset += 8;
4818 if (fp_offset >= 0)
4820 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
4821 emit_move_insn (hard_frame_pointer_rtx, mem);
4824 else
4826 for (i = low; i < high; i += 2)
4828 bool reg0 = save_p (i, leaf_function);
4829 bool reg1 = save_p (i + 1, leaf_function);
4830 enum machine_mode mode;
4831 int regno;
4833 if (reg0 && reg1)
4835 mode = SPARC_INT_REG_P (i) ? DImode : DFmode;
4836 regno = i;
4838 else if (reg0)
4840 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4841 regno = i;
4843 else if (reg1)
4845 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4846 regno = i + 1;
4847 offset += 4;
4849 else
4851 if (action_false == SORR_ADVANCE)
4852 offset += 8;
4853 continue;
4856 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
4857 if (action_true == SORR_SAVE)
4859 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4860 RTX_FRAME_RELATED_P (insn) = 1;
4861 if (mode == DImode)
4863 rtx set1, set2;
4864 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
4865 offset));
4866 set1 = gen_rtx_SET (VOIDmode, mem,
4867 gen_rtx_REG (SImode, regno));
4868 RTX_FRAME_RELATED_P (set1) = 1;
4870 = gen_frame_mem (SImode, plus_constant (Pmode, base,
4871 offset + 4));
4872 set2 = gen_rtx_SET (VOIDmode, mem,
4873 gen_rtx_REG (SImode, regno + 1));
4874 RTX_FRAME_RELATED_P (set2) = 1;
4875 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4876 gen_rtx_PARALLEL (VOIDmode,
4877 gen_rtvec (2, set1, set2)));
4880 else /* action_true == SORR_RESTORE */
4881 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4883 /* Always preserve double-word alignment. */
4884 offset = (offset + 8) & -8;
4888 return offset;
4891 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4893 static rtx
4894 emit_adjust_base_to_offset (rtx base, int offset)
4896 /* ??? This might be optimized a little as %g1 might already have a
4897 value close enough that a single add insn will do. */
4898 /* ??? Although, all of this is probably only a temporary fix because
4899 if %g1 can hold a function result, then sparc_expand_epilogue will
4900 lose (the result will be clobbered). */
4901 rtx new_base = gen_rtx_REG (Pmode, 1);
4902 emit_move_insn (new_base, GEN_INT (offset));
4903 emit_insn (gen_rtx_SET (VOIDmode,
4904 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4905 return new_base;
4908 /* Emit code to save/restore call-saved global and FP registers. */
4910 static void
4911 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4913 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4915 base = emit_adjust_base_to_offset (base, offset);
4916 offset = 0;
4919 offset
4920 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4921 save_global_or_fp_reg_p, action, SORR_NONE);
4922 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4923 save_global_or_fp_reg_p, action, SORR_NONE);
4926 /* Emit code to save/restore call-saved local and in registers. */
4928 static void
4929 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4931 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4933 base = emit_adjust_base_to_offset (base, offset);
4934 offset = 0;
4937 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4938 save_local_or_in_reg_p, action, SORR_ADVANCE);
4941 /* Emit a window_save insn. */
4943 static rtx
4944 emit_window_save (rtx increment)
4946 rtx insn = emit_insn (gen_window_save (increment));
4947 RTX_FRAME_RELATED_P (insn) = 1;
4949 /* The incoming return address (%o7) is saved in %i7. */
4950 add_reg_note (insn, REG_CFA_REGISTER,
4951 gen_rtx_SET (VOIDmode,
4952 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4953 gen_rtx_REG (Pmode,
4954 INCOMING_RETURN_ADDR_REGNUM)));
4956 /* The window save event. */
4957 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4959 /* The CFA is %fp, the hard frame pointer. */
4960 add_reg_note (insn, REG_CFA_DEF_CFA,
4961 plus_constant (Pmode, hard_frame_pointer_rtx,
4962 INCOMING_FRAME_SP_OFFSET));
4964 return insn;
4967 /* Generate an increment for the stack pointer. */
4969 static rtx
4970 gen_stack_pointer_inc (rtx increment)
4972 return gen_rtx_SET (VOIDmode,
4973 stack_pointer_rtx,
4974 gen_rtx_PLUS (Pmode,
4975 stack_pointer_rtx,
4976 increment));
4979 /* Generate a decrement for the stack pointer. */
4981 static rtx
4982 gen_stack_pointer_dec (rtx decrement)
4984 return gen_rtx_SET (VOIDmode,
4985 stack_pointer_rtx,
4986 gen_rtx_MINUS (Pmode,
4987 stack_pointer_rtx,
4988 decrement));
4991 /* Expand the function prologue. The prologue is responsible for reserving
4992 storage for the frame, saving the call-saved registers and loading the
4993 GOT register if needed. */
4995 void
4996 sparc_expand_prologue (void)
4998 HOST_WIDE_INT size;
4999 rtx insn;
5001 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5002 on the final value of the flag means deferring the prologue/epilogue
5003 expansion until just before the second scheduling pass, which is too
5004 late to emit multiple epilogues or return insns.
5006 Of course we are making the assumption that the value of the flag
5007 will not change between now and its final value. Of the three parts
5008 of the formula, only the last one can reasonably vary. Let's take a
5009 closer look, after assuming that the first two ones are set to true
5010 (otherwise the last value is effectively silenced).
5012 If only_leaf_regs_used returns false, the global predicate will also
5013 be false so the actual frame size calculated below will be positive.
5014 As a consequence, the save_register_window insn will be emitted in
5015 the instruction stream; now this insn explicitly references %fp
5016 which is not a leaf register so only_leaf_regs_used will always
5017 return false subsequently.
5019 If only_leaf_regs_used returns true, we hope that the subsequent
5020 optimization passes won't cause non-leaf registers to pop up. For
5021 example, the regrename pass has special provisions to not rename to
5022 non-leaf registers in a leaf function. */
5023 sparc_leaf_function_p
5024 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
5026 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5028 if (flag_stack_usage_info)
5029 current_function_static_stack_size = size;
5031 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
5032 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5034 if (size == 0)
5035 ; /* do nothing. */
5036 else if (sparc_leaf_function_p)
5038 rtx size_int_rtx = GEN_INT (-size);
5040 if (size <= 4096)
5041 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5042 else if (size <= 8192)
5044 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5045 RTX_FRAME_RELATED_P (insn) = 1;
5047 /* %sp is still the CFA register. */
5048 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5050 else
5052 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5053 emit_move_insn (size_rtx, size_int_rtx);
5054 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5055 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5056 gen_stack_pointer_inc (size_int_rtx));
5059 RTX_FRAME_RELATED_P (insn) = 1;
5061 else
5063 rtx size_int_rtx = GEN_INT (-size);
5065 if (size <= 4096)
5066 emit_window_save (size_int_rtx);
5067 else if (size <= 8192)
5069 emit_window_save (GEN_INT (-4096));
5071 /* %sp is not the CFA register anymore. */
5072 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5074 /* Make sure no %fp-based store is issued until after the frame is
5075 established. The offset between the frame pointer and the stack
5076 pointer is calculated relative to the value of the stack pointer
5077 at the end of the function prologue, and moving instructions that
5078 access the stack via the frame pointer between the instructions
5079 that decrement the stack pointer could result in accessing the
5080 register window save area, which is volatile. */
5081 emit_insn (gen_frame_blockage ());
5083 else
5085 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5086 emit_move_insn (size_rtx, size_int_rtx);
5087 emit_window_save (size_rtx);
5091 if (sparc_leaf_function_p)
5093 sparc_frame_base_reg = stack_pointer_rtx;
5094 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5096 else
5098 sparc_frame_base_reg = hard_frame_pointer_rtx;
5099 sparc_frame_base_offset = SPARC_STACK_BIAS;
5102 if (sparc_n_global_fp_regs > 0)
5103 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5104 sparc_frame_base_offset
5105 - sparc_apparent_frame_size,
5106 SORR_SAVE);
5108 /* Load the GOT register if needed. */
5109 if (crtl->uses_pic_offset_table)
5110 load_got_register ();
5112 /* Advertise that the data calculated just above are now valid. */
5113 sparc_prologue_data_valid_p = true;
5116 /* Expand the function prologue. The prologue is responsible for reserving
5117 storage for the frame, saving the call-saved registers and loading the
5118 GOT register if needed. */
5120 void
5121 sparc_flat_expand_prologue (void)
5123 HOST_WIDE_INT size;
5124 rtx insn;
5126 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
5128 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5130 if (flag_stack_usage_info)
5131 current_function_static_stack_size = size;
5133 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
5134 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5136 if (sparc_save_local_in_regs_p)
5137 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
5138 SORR_SAVE);
5140 if (size == 0)
5141 ; /* do nothing. */
5142 else
5144 rtx size_int_rtx, size_rtx;
5146 size_rtx = size_int_rtx = GEN_INT (-size);
5148 /* We establish the frame (i.e. decrement the stack pointer) first, even
5149 if we use a frame pointer, because we cannot clobber any call-saved
5150 registers, including the frame pointer, if we haven't created a new
5151 register save area, for the sake of compatibility with the ABI. */
5152 if (size <= 4096)
5153 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5154 else if (size <= 8192 && !frame_pointer_needed)
5156 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5157 RTX_FRAME_RELATED_P (insn) = 1;
5158 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5160 else
5162 size_rtx = gen_rtx_REG (Pmode, 1);
5163 emit_move_insn (size_rtx, size_int_rtx);
5164 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5165 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5166 gen_stack_pointer_inc (size_int_rtx));
5168 RTX_FRAME_RELATED_P (insn) = 1;
5170 /* Ensure nothing is scheduled until after the frame is established. */
5171 emit_insn (gen_blockage ());
5173 if (frame_pointer_needed)
5175 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5176 gen_rtx_MINUS (Pmode,
5177 stack_pointer_rtx,
5178 size_rtx)));
5179 RTX_FRAME_RELATED_P (insn) = 1;
5181 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5182 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5183 plus_constant (Pmode, stack_pointer_rtx,
5184 size)));
5187 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5189 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
5190 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
5192 insn = emit_move_insn (i7, o7);
5193 RTX_FRAME_RELATED_P (insn) = 1;
5195 add_reg_note (insn, REG_CFA_REGISTER,
5196 gen_rtx_SET (VOIDmode, i7, o7));
5198 /* Prevent this instruction from ever being considered dead,
5199 even if this function has no epilogue. */
5200 emit_use (i7);
5204 if (frame_pointer_needed)
5206 sparc_frame_base_reg = hard_frame_pointer_rtx;
5207 sparc_frame_base_offset = SPARC_STACK_BIAS;
5209 else
5211 sparc_frame_base_reg = stack_pointer_rtx;
5212 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5215 if (sparc_n_global_fp_regs > 0)
5216 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5217 sparc_frame_base_offset
5218 - sparc_apparent_frame_size,
5219 SORR_SAVE);
5221 /* Load the GOT register if needed. */
5222 if (crtl->uses_pic_offset_table)
5223 load_got_register ();
5225 /* Advertise that the data calculated just above are now valid. */
5226 sparc_prologue_data_valid_p = true;
5229 /* This function generates the assembly code for function entry, which boils
5230 down to emitting the necessary .register directives. */
5232 static void
5233 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5235 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5236 if (!TARGET_FLAT)
5237 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
5239 sparc_output_scratch_registers (file);
5242 /* Expand the function epilogue, either normal or part of a sibcall.
5243 We emit all the instructions except the return or the call. */
5245 void
5246 sparc_expand_epilogue (bool for_eh)
5248 HOST_WIDE_INT size = sparc_frame_size;
5250 if (sparc_n_global_fp_regs > 0)
5251 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5252 sparc_frame_base_offset
5253 - sparc_apparent_frame_size,
5254 SORR_RESTORE);
5256 if (size == 0 || for_eh)
5257 ; /* do nothing. */
5258 else if (sparc_leaf_function_p)
5260 if (size <= 4096)
5261 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5262 else if (size <= 8192)
5264 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5265 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5267 else
5269 rtx reg = gen_rtx_REG (Pmode, 1);
5270 emit_move_insn (reg, GEN_INT (-size));
5271 emit_insn (gen_stack_pointer_dec (reg));
5276 /* Expand the function epilogue, either normal or part of a sibcall.
5277 We emit all the instructions except the return or the call. */
5279 void
5280 sparc_flat_expand_epilogue (bool for_eh)
5282 HOST_WIDE_INT size = sparc_frame_size;
5284 if (sparc_n_global_fp_regs > 0)
5285 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5286 sparc_frame_base_offset
5287 - sparc_apparent_frame_size,
5288 SORR_RESTORE);
5290 /* If we have a frame pointer, we'll need both to restore it before the
5291 frame is destroyed and use its current value in destroying the frame.
5292 Since we don't have an atomic way to do that in the flat window model,
5293 we save the current value into a temporary register (%g1). */
5294 if (frame_pointer_needed && !for_eh)
5295 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5297 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5298 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5299 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5301 if (sparc_save_local_in_regs_p)
5302 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5303 sparc_frame_base_offset,
5304 SORR_RESTORE);
5306 if (size == 0 || for_eh)
5307 ; /* do nothing. */
5308 else if (frame_pointer_needed)
5310 /* Make sure the frame is destroyed after everything else is done. */
5311 emit_insn (gen_blockage ());
5313 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5315 else
5317 /* Likewise. */
5318 emit_insn (gen_blockage ());
5320 if (size <= 4096)
5321 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5322 else if (size <= 8192)
5324 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5325 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5327 else
5329 rtx reg = gen_rtx_REG (Pmode, 1);
5330 emit_move_insn (reg, GEN_INT (-size));
5331 emit_insn (gen_stack_pointer_dec (reg));
5336 /* Return true if it is appropriate to emit `return' instructions in the
5337 body of a function. */
5339 bool
5340 sparc_can_use_return_insn_p (void)
5342 return sparc_prologue_data_valid_p
5343 && sparc_n_global_fp_regs == 0
5344 && TARGET_FLAT
5345 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
5346 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
5349 /* This function generates the assembly code for function exit. */
5351 static void
5352 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5354 /* If the last two instructions of a function are "call foo; dslot;"
5355 the return address might point to the first instruction in the next
5356 function and we have to output a dummy nop for the sake of sane
5357 backtraces in such cases. This is pointless for sibling calls since
5358 the return address is explicitly adjusted. */
5360 rtx insn, last_real_insn;
5362 insn = get_last_insn ();
5364 last_real_insn = prev_real_insn (insn);
5365 if (last_real_insn
5366 && GET_CODE (last_real_insn) == INSN
5367 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5368 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5370 if (last_real_insn
5371 && CALL_P (last_real_insn)
5372 && !SIBLING_CALL_P (last_real_insn))
5373 fputs("\tnop\n", file);
5375 sparc_output_deferred_case_vectors ();
5378 /* Output a 'restore' instruction. */
5380 static void
5381 output_restore (rtx pat)
5383 rtx operands[3];
5385 if (! pat)
5387 fputs ("\t restore\n", asm_out_file);
5388 return;
5391 gcc_assert (GET_CODE (pat) == SET);
5393 operands[0] = SET_DEST (pat);
5394 pat = SET_SRC (pat);
5396 switch (GET_CODE (pat))
5398 case PLUS:
5399 operands[1] = XEXP (pat, 0);
5400 operands[2] = XEXP (pat, 1);
5401 output_asm_insn (" restore %r1, %2, %Y0", operands);
5402 break;
5403 case LO_SUM:
5404 operands[1] = XEXP (pat, 0);
5405 operands[2] = XEXP (pat, 1);
5406 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5407 break;
5408 case ASHIFT:
5409 operands[1] = XEXP (pat, 0);
5410 gcc_assert (XEXP (pat, 1) == const1_rtx);
5411 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5412 break;
5413 default:
5414 operands[1] = pat;
5415 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5416 break;
5420 /* Output a return. */
5422 const char *
5423 output_return (rtx insn)
5425 if (crtl->calls_eh_return)
5427 /* If the function uses __builtin_eh_return, the eh_return
5428 machinery occupies the delay slot. */
5429 gcc_assert (!final_sequence);
5431 if (flag_delayed_branch)
5433 if (!TARGET_FLAT && TARGET_V9)
5434 fputs ("\treturn\t%i7+8\n", asm_out_file);
5435 else
5437 if (!TARGET_FLAT)
5438 fputs ("\trestore\n", asm_out_file);
5440 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5443 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5445 else
5447 if (!TARGET_FLAT)
5448 fputs ("\trestore\n", asm_out_file);
5450 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5451 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5454 else if (sparc_leaf_function_p || TARGET_FLAT)
5456 /* This is a leaf or flat function so we don't have to bother restoring
5457 the register window, which frees us from dealing with the convoluted
5458 semantics of restore/return. We simply output the jump to the
5459 return address and the insn in the delay slot (if any). */
5461 return "jmp\t%%o7+%)%#";
5463 else
5465 /* This is a regular function so we have to restore the register window.
5466 We may have a pending insn for the delay slot, which will be either
5467 combined with the 'restore' instruction or put in the delay slot of
5468 the 'return' instruction. */
5470 if (final_sequence)
5472 rtx delay, pat;
5474 delay = NEXT_INSN (insn);
5475 gcc_assert (delay);
5477 pat = PATTERN (delay);
5479 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5481 epilogue_renumber (&pat, 0);
5482 return "return\t%%i7+%)%#";
5484 else
5486 output_asm_insn ("jmp\t%%i7+%)", NULL);
5487 output_restore (pat);
5488 PATTERN (delay) = gen_blockage ();
5489 INSN_CODE (delay) = -1;
5492 else
5494 /* The delay slot is empty. */
5495 if (TARGET_V9)
5496 return "return\t%%i7+%)\n\t nop";
5497 else if (flag_delayed_branch)
5498 return "jmp\t%%i7+%)\n\t restore";
5499 else
5500 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5504 return "";
5507 /* Output a sibling call. */
5509 const char *
5510 output_sibcall (rtx insn, rtx call_operand)
5512 rtx operands[1];
5514 gcc_assert (flag_delayed_branch);
5516 operands[0] = call_operand;
5518 if (sparc_leaf_function_p || TARGET_FLAT)
5520 /* This is a leaf or flat function so we don't have to bother restoring
5521 the register window. We simply output the jump to the function and
5522 the insn in the delay slot (if any). */
5524 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5526 if (final_sequence)
5527 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5528 operands);
5529 else
5530 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5531 it into branch if possible. */
5532 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5533 operands);
5535 else
5537 /* This is a regular function so we have to restore the register window.
5538 We may have a pending insn for the delay slot, which will be combined
5539 with the 'restore' instruction. */
5541 output_asm_insn ("call\t%a0, 0", operands);
5543 if (final_sequence)
5545 rtx delay = NEXT_INSN (insn);
5546 gcc_assert (delay);
5548 output_restore (PATTERN (delay));
5550 PATTERN (delay) = gen_blockage ();
5551 INSN_CODE (delay) = -1;
5553 else
5554 output_restore (NULL_RTX);
5557 return "";
5560 /* Functions for handling argument passing.
5562 For 32-bit, the first 6 args are normally in registers and the rest are
5563 pushed. Any arg that starts within the first 6 words is at least
5564 partially passed in a register unless its data type forbids.
5566 For 64-bit, the argument registers are laid out as an array of 16 elements
5567 and arguments are added sequentially. The first 6 int args and up to the
5568 first 16 fp args (depending on size) are passed in regs.
5570 Slot Stack Integral Float Float in structure Double Long Double
5571 ---- ----- -------- ----- ------------------ ------ -----------
5572 15 [SP+248] %f31 %f30,%f31 %d30
5573 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5574 13 [SP+232] %f27 %f26,%f27 %d26
5575 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5576 11 [SP+216] %f23 %f22,%f23 %d22
5577 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5578 9 [SP+200] %f19 %f18,%f19 %d18
5579 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5580 7 [SP+184] %f15 %f14,%f15 %d14
5581 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5582 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5583 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5584 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5585 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5586 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5587 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5589 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5591 Integral arguments are always passed as 64-bit quantities appropriately
5592 extended.
5594 Passing of floating point values is handled as follows.
5595 If a prototype is in scope:
5596 If the value is in a named argument (i.e. not a stdarg function or a
5597 value not part of the `...') then the value is passed in the appropriate
5598 fp reg.
5599 If the value is part of the `...' and is passed in one of the first 6
5600 slots then the value is passed in the appropriate int reg.
5601 If the value is part of the `...' and is not passed in one of the first 6
5602 slots then the value is passed in memory.
5603 If a prototype is not in scope:
5604 If the value is one of the first 6 arguments the value is passed in the
5605 appropriate integer reg and the appropriate fp reg.
5606 If the value is not one of the first 6 arguments the value is passed in
5607 the appropriate fp reg and in memory.
5610 Summary of the calling conventions implemented by GCC on the SPARC:
5612 32-bit ABI:
5613 size argument return value
5615 small integer <4 int. reg. int. reg.
5616 word 4 int. reg. int. reg.
5617 double word 8 int. reg. int. reg.
5619 _Complex small integer <8 int. reg. int. reg.
5620 _Complex word 8 int. reg. int. reg.
5621 _Complex double word 16 memory int. reg.
5623 vector integer <=8 int. reg. FP reg.
5624 vector integer >8 memory memory
5626 float 4 int. reg. FP reg.
5627 double 8 int. reg. FP reg.
5628 long double 16 memory memory
5630 _Complex float 8 memory FP reg.
5631 _Complex double 16 memory FP reg.
5632 _Complex long double 32 memory FP reg.
5634 vector float any memory memory
5636 aggregate any memory memory
5640 64-bit ABI:
5641 size argument return value
5643 small integer <8 int. reg. int. reg.
5644 word 8 int. reg. int. reg.
5645 double word 16 int. reg. int. reg.
5647 _Complex small integer <16 int. reg. int. reg.
5648 _Complex word 16 int. reg. int. reg.
5649 _Complex double word 32 memory int. reg.
5651 vector integer <=16 FP reg. FP reg.
5652 vector integer 16<s<=32 memory FP reg.
5653 vector integer >32 memory memory
5655 float 4 FP reg. FP reg.
5656 double 8 FP reg. FP reg.
5657 long double 16 FP reg. FP reg.
5659 _Complex float 8 FP reg. FP reg.
5660 _Complex double 16 FP reg. FP reg.
5661 _Complex long double 32 memory FP reg.
5663 vector float <=16 FP reg. FP reg.
5664 vector float 16<s<=32 memory FP reg.
5665 vector float >32 memory memory
5667 aggregate <=16 reg. reg.
5668 aggregate 16<s<=32 memory reg.
5669 aggregate >32 memory memory
5673 Note #1: complex floating-point types follow the extended SPARC ABIs as
5674 implemented by the Sun compiler.
5676 Note #2: integral vector types follow the scalar floating-point types
5677 conventions to match what is implemented by the Sun VIS SDK.
5679 Note #3: floating-point vector types follow the aggregate types
5680 conventions. */
5683 /* Maximum number of int regs for args. */
5684 #define SPARC_INT_ARG_MAX 6
5685 /* Maximum number of fp regs for args. */
5686 #define SPARC_FP_ARG_MAX 16
5688 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5690 /* Handle the INIT_CUMULATIVE_ARGS macro.
5691 Initialize a variable CUM of type CUMULATIVE_ARGS
5692 for a call to a function whose data type is FNTYPE.
5693 For a library call, FNTYPE is 0. */
5695 void
5696 init_cumulative_args (struct sparc_args *cum, tree fntype,
5697 rtx libname ATTRIBUTE_UNUSED,
5698 tree fndecl ATTRIBUTE_UNUSED)
5700 cum->words = 0;
5701 cum->prototype_p = fntype && prototype_p (fntype);
5702 cum->libcall_p = fntype == 0;
5705 /* Handle promotion of pointer and integer arguments. */
5707 static enum machine_mode
5708 sparc_promote_function_mode (const_tree type,
5709 enum machine_mode mode,
5710 int *punsignedp,
5711 const_tree fntype ATTRIBUTE_UNUSED,
5712 int for_return ATTRIBUTE_UNUSED)
5714 if (type != NULL_TREE && POINTER_TYPE_P (type))
5716 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5717 return Pmode;
5720 /* Integral arguments are passed as full words, as per the ABI. */
5721 if (GET_MODE_CLASS (mode) == MODE_INT
5722 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5723 return word_mode;
5725 return mode;
5728 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5730 static bool
5731 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5733 return TARGET_ARCH64 ? true : false;
5736 /* Scan the record type TYPE and return the following predicates:
5737 - INTREGS_P: the record contains at least one field or sub-field
5738 that is eligible for promotion in integer registers.
5739 - FP_REGS_P: the record contains at least one field or sub-field
5740 that is eligible for promotion in floating-point registers.
5741 - PACKED_P: the record contains at least one field that is packed.
5743 Sub-fields are not taken into account for the PACKED_P predicate. */
5745 static void
5746 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5747 int *packed_p)
5749 tree field;
5751 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5753 if (TREE_CODE (field) == FIELD_DECL)
5755 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5756 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5757 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5758 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5759 && TARGET_FPU)
5760 *fpregs_p = 1;
5761 else
5762 *intregs_p = 1;
5764 if (packed_p && DECL_PACKED (field))
5765 *packed_p = 1;
5770 /* Compute the slot number to pass an argument in.
5771 Return the slot number or -1 if passing on the stack.
5773 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5774 the preceding args and about the function being called.
5775 MODE is the argument's machine mode.
5776 TYPE is the data type of the argument (as a tree).
5777 This is null for libcalls where that information may
5778 not be available.
5779 NAMED is nonzero if this argument is a named parameter
5780 (otherwise it is an extra parameter matching an ellipsis).
5781 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5782 *PREGNO records the register number to use if scalar type.
5783 *PPADDING records the amount of padding needed in words. */
5785 static int
5786 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5787 const_tree type, bool named, bool incoming_p,
5788 int *pregno, int *ppadding)
5790 int regbase = (incoming_p
5791 ? SPARC_INCOMING_INT_ARG_FIRST
5792 : SPARC_OUTGOING_INT_ARG_FIRST);
5793 int slotno = cum->words;
5794 enum mode_class mclass;
5795 int regno;
5797 *ppadding = 0;
5799 if (type && TREE_ADDRESSABLE (type))
5800 return -1;
5802 if (TARGET_ARCH32
5803 && mode == BLKmode
5804 && type
5805 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5806 return -1;
5808 /* For SPARC64, objects requiring 16-byte alignment get it. */
5809 if (TARGET_ARCH64
5810 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5811 && (slotno & 1) != 0)
5812 slotno++, *ppadding = 1;
5814 mclass = GET_MODE_CLASS (mode);
5815 if (type && TREE_CODE (type) == VECTOR_TYPE)
5817 /* Vector types deserve special treatment because they are
5818 polymorphic wrt their mode, depending upon whether VIS
5819 instructions are enabled. */
5820 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5822 /* The SPARC port defines no floating-point vector modes. */
5823 gcc_assert (mode == BLKmode);
5825 else
5827 /* Integral vector types should either have a vector
5828 mode or an integral mode, because we are guaranteed
5829 by pass_by_reference that their size is not greater
5830 than 16 bytes and TImode is 16-byte wide. */
5831 gcc_assert (mode != BLKmode);
5833 /* Vector integers are handled like floats according to
5834 the Sun VIS SDK. */
5835 mclass = MODE_FLOAT;
5839 switch (mclass)
5841 case MODE_FLOAT:
5842 case MODE_COMPLEX_FLOAT:
5843 case MODE_VECTOR_INT:
5844 if (TARGET_ARCH64 && TARGET_FPU && named)
5846 if (slotno >= SPARC_FP_ARG_MAX)
5847 return -1;
5848 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5849 /* Arguments filling only one single FP register are
5850 right-justified in the outer double FP register. */
5851 if (GET_MODE_SIZE (mode) <= 4)
5852 regno++;
5853 break;
5855 /* fallthrough */
5857 case MODE_INT:
5858 case MODE_COMPLEX_INT:
5859 if (slotno >= SPARC_INT_ARG_MAX)
5860 return -1;
5861 regno = regbase + slotno;
5862 break;
5864 case MODE_RANDOM:
5865 if (mode == VOIDmode)
5866 /* MODE is VOIDmode when generating the actual call. */
5867 return -1;
5869 gcc_assert (mode == BLKmode);
5871 if (TARGET_ARCH32
5872 || !type
5873 || (TREE_CODE (type) != VECTOR_TYPE
5874 && TREE_CODE (type) != RECORD_TYPE))
5876 if (slotno >= SPARC_INT_ARG_MAX)
5877 return -1;
5878 regno = regbase + slotno;
5880 else /* TARGET_ARCH64 && type */
5882 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5884 /* First see what kinds of registers we would need. */
5885 if (TREE_CODE (type) == VECTOR_TYPE)
5886 fpregs_p = 1;
5887 else
5888 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5890 /* The ABI obviously doesn't specify how packed structures
5891 are passed. These are defined to be passed in int regs
5892 if possible, otherwise memory. */
5893 if (packed_p || !named)
5894 fpregs_p = 0, intregs_p = 1;
5896 /* If all arg slots are filled, then must pass on stack. */
5897 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5898 return -1;
5900 /* If there are only int args and all int arg slots are filled,
5901 then must pass on stack. */
5902 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5903 return -1;
5905 /* Note that even if all int arg slots are filled, fp members may
5906 still be passed in regs if such regs are available.
5907 *PREGNO isn't set because there may be more than one, it's up
5908 to the caller to compute them. */
5909 return slotno;
5911 break;
5913 default :
5914 gcc_unreachable ();
5917 *pregno = regno;
5918 return slotno;
5921 /* Handle recursive register counting for structure field layout. */
5923 struct function_arg_record_value_parms
5925 rtx ret; /* return expression being built. */
5926 int slotno; /* slot number of the argument. */
5927 int named; /* whether the argument is named. */
5928 int regbase; /* regno of the base register. */
5929 int stack; /* 1 if part of the argument is on the stack. */
5930 int intoffset; /* offset of the first pending integer field. */
5931 unsigned int nregs; /* number of words passed in registers. */
5934 static void function_arg_record_value_3
5935 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5936 static void function_arg_record_value_2
5937 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5938 static void function_arg_record_value_1
5939 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5940 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5941 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5943 /* A subroutine of function_arg_record_value. Traverse the structure
5944 recursively and determine how many registers will be required. */
5946 static void
5947 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5948 struct function_arg_record_value_parms *parms,
5949 bool packed_p)
5951 tree field;
5953 /* We need to compute how many registers are needed so we can
5954 allocate the PARALLEL but before we can do that we need to know
5955 whether there are any packed fields. The ABI obviously doesn't
5956 specify how structures are passed in this case, so they are
5957 defined to be passed in int regs if possible, otherwise memory,
5958 regardless of whether there are fp values present. */
5960 if (! packed_p)
5961 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5963 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5965 packed_p = true;
5966 break;
5970 /* Compute how many registers we need. */
5971 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5973 if (TREE_CODE (field) == FIELD_DECL)
5975 HOST_WIDE_INT bitpos = startbitpos;
5977 if (DECL_SIZE (field) != 0)
5979 if (integer_zerop (DECL_SIZE (field)))
5980 continue;
5982 if (host_integerp (bit_position (field), 1))
5983 bitpos += int_bit_position (field);
5986 /* ??? FIXME: else assume zero offset. */
5988 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5989 function_arg_record_value_1 (TREE_TYPE (field),
5990 bitpos,
5991 parms,
5992 packed_p);
5993 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5994 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5995 && TARGET_FPU
5996 && parms->named
5997 && ! packed_p)
5999 if (parms->intoffset != -1)
6001 unsigned int startbit, endbit;
6002 int intslots, this_slotno;
6004 startbit = parms->intoffset & -BITS_PER_WORD;
6005 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6007 intslots = (endbit - startbit) / BITS_PER_WORD;
6008 this_slotno = parms->slotno + parms->intoffset
6009 / BITS_PER_WORD;
6011 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
6013 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
6014 /* We need to pass this field on the stack. */
6015 parms->stack = 1;
6018 parms->nregs += intslots;
6019 parms->intoffset = -1;
6022 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
6023 If it wasn't true we wouldn't be here. */
6024 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6025 && DECL_MODE (field) == BLKmode)
6026 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6027 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6028 parms->nregs += 2;
6029 else
6030 parms->nregs += 1;
6032 else
6034 if (parms->intoffset == -1)
6035 parms->intoffset = bitpos;
6041 /* A subroutine of function_arg_record_value. Assign the bits of the
6042 structure between parms->intoffset and bitpos to integer registers. */
6044 static void
6045 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
6046 struct function_arg_record_value_parms *parms)
6048 enum machine_mode mode;
6049 unsigned int regno;
6050 unsigned int startbit, endbit;
6051 int this_slotno, intslots, intoffset;
6052 rtx reg;
6054 if (parms->intoffset == -1)
6055 return;
6057 intoffset = parms->intoffset;
6058 parms->intoffset = -1;
6060 startbit = intoffset & -BITS_PER_WORD;
6061 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6062 intslots = (endbit - startbit) / BITS_PER_WORD;
6063 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
6065 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
6066 if (intslots <= 0)
6067 return;
6069 /* If this is the trailing part of a word, only load that much into
6070 the register. Otherwise load the whole register. Note that in
6071 the latter case we may pick up unwanted bits. It's not a problem
6072 at the moment but may wish to revisit. */
6074 if (intoffset % BITS_PER_WORD != 0)
6075 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
6076 MODE_INT);
6077 else
6078 mode = word_mode;
6080 intoffset /= BITS_PER_UNIT;
6083 regno = parms->regbase + this_slotno;
6084 reg = gen_rtx_REG (mode, regno);
6085 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6086 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
6088 this_slotno += 1;
6089 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
6090 mode = word_mode;
6091 parms->nregs += 1;
6092 intslots -= 1;
6094 while (intslots > 0);
6097 /* A subroutine of function_arg_record_value. Traverse the structure
6098 recursively and assign bits to floating point registers. Track which
6099 bits in between need integer registers; invoke function_arg_record_value_3
6100 to make that happen. */
6102 static void
6103 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
6104 struct function_arg_record_value_parms *parms,
6105 bool packed_p)
6107 tree field;
6109 if (! packed_p)
6110 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6112 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6114 packed_p = true;
6115 break;
6119 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6121 if (TREE_CODE (field) == FIELD_DECL)
6123 HOST_WIDE_INT bitpos = startbitpos;
6125 if (DECL_SIZE (field) != 0)
6127 if (integer_zerop (DECL_SIZE (field)))
6128 continue;
6130 if (host_integerp (bit_position (field), 1))
6131 bitpos += int_bit_position (field);
6134 /* ??? FIXME: else assume zero offset. */
6136 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
6137 function_arg_record_value_2 (TREE_TYPE (field),
6138 bitpos,
6139 parms,
6140 packed_p);
6141 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
6142 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6143 && TARGET_FPU
6144 && parms->named
6145 && ! packed_p)
6147 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
6148 int regno, nregs, pos;
6149 enum machine_mode mode = DECL_MODE (field);
6150 rtx reg;
6152 function_arg_record_value_3 (bitpos, parms);
6154 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6155 && mode == BLKmode)
6157 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6158 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6160 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6162 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6163 nregs = 2;
6165 else
6166 nregs = 1;
6168 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
6169 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
6170 regno++;
6171 reg = gen_rtx_REG (mode, regno);
6172 pos = bitpos / BITS_PER_UNIT;
6173 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6174 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6175 parms->nregs += 1;
6176 while (--nregs > 0)
6178 regno += GET_MODE_SIZE (mode) / 4;
6179 reg = gen_rtx_REG (mode, regno);
6180 pos += GET_MODE_SIZE (mode);
6181 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6182 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6183 parms->nregs += 1;
6186 else
6188 if (parms->intoffset == -1)
6189 parms->intoffset = bitpos;
6195 /* Used by function_arg and sparc_function_value_1 to implement the complex
6196 conventions of the 64-bit ABI for passing and returning structures.
6197 Return an expression valid as a return value for the FUNCTION_ARG
6198 and TARGET_FUNCTION_VALUE.
6200 TYPE is the data type of the argument (as a tree).
6201 This is null for libcalls where that information may
6202 not be available.
6203 MODE is the argument's machine mode.
6204 SLOTNO is the index number of the argument's slot in the parameter array.
6205 NAMED is nonzero if this argument is a named parameter
6206 (otherwise it is an extra parameter matching an ellipsis).
6207 REGBASE is the regno of the base register for the parameter array. */
6209 static rtx
6210 function_arg_record_value (const_tree type, enum machine_mode mode,
6211 int slotno, int named, int regbase)
6213 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6214 struct function_arg_record_value_parms parms;
6215 unsigned int nregs;
6217 parms.ret = NULL_RTX;
6218 parms.slotno = slotno;
6219 parms.named = named;
6220 parms.regbase = regbase;
6221 parms.stack = 0;
6223 /* Compute how many registers we need. */
6224 parms.nregs = 0;
6225 parms.intoffset = 0;
6226 function_arg_record_value_1 (type, 0, &parms, false);
6228 /* Take into account pending integer fields. */
6229 if (parms.intoffset != -1)
6231 unsigned int startbit, endbit;
6232 int intslots, this_slotno;
6234 startbit = parms.intoffset & -BITS_PER_WORD;
6235 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6236 intslots = (endbit - startbit) / BITS_PER_WORD;
6237 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
6239 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
6241 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
6242 /* We need to pass this field on the stack. */
6243 parms.stack = 1;
6246 parms.nregs += intslots;
6248 nregs = parms.nregs;
6250 /* Allocate the vector and handle some annoying special cases. */
6251 if (nregs == 0)
6253 /* ??? Empty structure has no value? Duh? */
6254 if (typesize <= 0)
6256 /* Though there's nothing really to store, return a word register
6257 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6258 leads to breakage due to the fact that there are zero bytes to
6259 load. */
6260 return gen_rtx_REG (mode, regbase);
6262 else
6264 /* ??? C++ has structures with no fields, and yet a size. Give up
6265 for now and pass everything back in integer registers. */
6266 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6268 if (nregs + slotno > SPARC_INT_ARG_MAX)
6269 nregs = SPARC_INT_ARG_MAX - slotno;
6271 gcc_assert (nregs != 0);
6273 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
6275 /* If at least one field must be passed on the stack, generate
6276 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6277 also be passed on the stack. We can't do much better because the
6278 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6279 of structures for which the fields passed exclusively in registers
6280 are not at the beginning of the structure. */
6281 if (parms.stack)
6282 XVECEXP (parms.ret, 0, 0)
6283 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6285 /* Fill in the entries. */
6286 parms.nregs = 0;
6287 parms.intoffset = 0;
6288 function_arg_record_value_2 (type, 0, &parms, false);
6289 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
6291 gcc_assert (parms.nregs == nregs);
6293 return parms.ret;
6296 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6297 of the 64-bit ABI for passing and returning unions.
6298 Return an expression valid as a return value for the FUNCTION_ARG
6299 and TARGET_FUNCTION_VALUE.
6301 SIZE is the size in bytes of the union.
6302 MODE is the argument's machine mode.
6303 REGNO is the hard register the union will be passed in. */
6305 static rtx
6306 function_arg_union_value (int size, enum machine_mode mode, int slotno,
6307 int regno)
6309 int nwords = ROUND_ADVANCE (size), i;
6310 rtx regs;
6312 /* See comment in previous function for empty structures. */
6313 if (nwords == 0)
6314 return gen_rtx_REG (mode, regno);
6316 if (slotno == SPARC_INT_ARG_MAX - 1)
6317 nwords = 1;
6319 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6321 for (i = 0; i < nwords; i++)
6323 /* Unions are passed left-justified. */
6324 XVECEXP (regs, 0, i)
6325 = gen_rtx_EXPR_LIST (VOIDmode,
6326 gen_rtx_REG (word_mode, regno),
6327 GEN_INT (UNITS_PER_WORD * i));
6328 regno++;
6331 return regs;
6334 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6335 for passing and returning large (BLKmode) vectors.
6336 Return an expression valid as a return value for the FUNCTION_ARG
6337 and TARGET_FUNCTION_VALUE.
6339 SIZE is the size in bytes of the vector (at least 8 bytes).
6340 REGNO is the FP hard register the vector will be passed in. */
6342 static rtx
6343 function_arg_vector_value (int size, int regno)
6345 int i, nregs = size / 8;
6346 rtx regs;
6348 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6350 for (i = 0; i < nregs; i++)
6352 XVECEXP (regs, 0, i)
6353 = gen_rtx_EXPR_LIST (VOIDmode,
6354 gen_rtx_REG (DImode, regno + 2*i),
6355 GEN_INT (i*8));
6358 return regs;
6361 /* Determine where to put an argument to a function.
6362 Value is zero to push the argument on the stack,
6363 or a hard register in which to store the argument.
6365 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6366 the preceding args and about the function being called.
6367 MODE is the argument's machine mode.
6368 TYPE is the data type of the argument (as a tree).
6369 This is null for libcalls where that information may
6370 not be available.
6371 NAMED is true if this argument is a named parameter
6372 (otherwise it is an extra parameter matching an ellipsis).
6373 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6374 TARGET_FUNCTION_INCOMING_ARG. */
6376 static rtx
6377 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6378 const_tree type, bool named, bool incoming_p)
6380 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6382 int regbase = (incoming_p
6383 ? SPARC_INCOMING_INT_ARG_FIRST
6384 : SPARC_OUTGOING_INT_ARG_FIRST);
6385 int slotno, regno, padding;
6386 enum mode_class mclass = GET_MODE_CLASS (mode);
6388 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6389 &regno, &padding);
6390 if (slotno == -1)
6391 return 0;
6393 /* Vector types deserve special treatment because they are polymorphic wrt
6394 their mode, depending upon whether VIS instructions are enabled. */
6395 if (type && TREE_CODE (type) == VECTOR_TYPE)
6397 HOST_WIDE_INT size = int_size_in_bytes (type);
6398 gcc_assert ((TARGET_ARCH32 && size <= 8)
6399 || (TARGET_ARCH64 && size <= 16));
6401 if (mode == BLKmode)
6402 return function_arg_vector_value (size,
6403 SPARC_FP_ARG_FIRST + 2*slotno);
6404 else
6405 mclass = MODE_FLOAT;
6408 if (TARGET_ARCH32)
6409 return gen_rtx_REG (mode, regno);
6411 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6412 and are promoted to registers if possible. */
6413 if (type && TREE_CODE (type) == RECORD_TYPE)
6415 HOST_WIDE_INT size = int_size_in_bytes (type);
6416 gcc_assert (size <= 16);
6418 return function_arg_record_value (type, mode, slotno, named, regbase);
6421 /* Unions up to 16 bytes in size are passed in integer registers. */
6422 else if (type && TREE_CODE (type) == UNION_TYPE)
6424 HOST_WIDE_INT size = int_size_in_bytes (type);
6425 gcc_assert (size <= 16);
6427 return function_arg_union_value (size, mode, slotno, regno);
6430 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6431 but also have the slot allocated for them.
6432 If no prototype is in scope fp values in register slots get passed
6433 in two places, either fp regs and int regs or fp regs and memory. */
6434 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6435 && SPARC_FP_REG_P (regno))
6437 rtx reg = gen_rtx_REG (mode, regno);
6438 if (cum->prototype_p || cum->libcall_p)
6440 /* "* 2" because fp reg numbers are recorded in 4 byte
6441 quantities. */
6442 #if 0
6443 /* ??? This will cause the value to be passed in the fp reg and
6444 in the stack. When a prototype exists we want to pass the
6445 value in the reg but reserve space on the stack. That's an
6446 optimization, and is deferred [for a bit]. */
6447 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6448 return gen_rtx_PARALLEL (mode,
6449 gen_rtvec (2,
6450 gen_rtx_EXPR_LIST (VOIDmode,
6451 NULL_RTX, const0_rtx),
6452 gen_rtx_EXPR_LIST (VOIDmode,
6453 reg, const0_rtx)));
6454 else
6455 #else
6456 /* ??? It seems that passing back a register even when past
6457 the area declared by REG_PARM_STACK_SPACE will allocate
6458 space appropriately, and will not copy the data onto the
6459 stack, exactly as we desire.
6461 This is due to locate_and_pad_parm being called in
6462 expand_call whenever reg_parm_stack_space > 0, which
6463 while beneficial to our example here, would seem to be
6464 in error from what had been intended. Ho hum... -- r~ */
6465 #endif
6466 return reg;
6468 else
6470 rtx v0, v1;
6472 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6474 int intreg;
6476 /* On incoming, we don't need to know that the value
6477 is passed in %f0 and %i0, and it confuses other parts
6478 causing needless spillage even on the simplest cases. */
6479 if (incoming_p)
6480 return reg;
6482 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6483 + (regno - SPARC_FP_ARG_FIRST) / 2);
6485 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6486 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6487 const0_rtx);
6488 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6490 else
6492 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6493 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6494 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6499 /* All other aggregate types are passed in an integer register in a mode
6500 corresponding to the size of the type. */
6501 else if (type && AGGREGATE_TYPE_P (type))
6503 HOST_WIDE_INT size = int_size_in_bytes (type);
6504 gcc_assert (size <= 16);
6506 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6509 return gen_rtx_REG (mode, regno);
6512 /* Handle the TARGET_FUNCTION_ARG target hook. */
6514 static rtx
6515 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6516 const_tree type, bool named)
6518 return sparc_function_arg_1 (cum, mode, type, named, false);
6521 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6523 static rtx
6524 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6525 const_tree type, bool named)
6527 return sparc_function_arg_1 (cum, mode, type, named, true);
6530 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6532 static unsigned int
6533 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6535 return ((TARGET_ARCH64
6536 && (GET_MODE_ALIGNMENT (mode) == 128
6537 || (type && TYPE_ALIGN (type) == 128)))
6538 ? 128
6539 : PARM_BOUNDARY);
6542 /* For an arg passed partly in registers and partly in memory,
6543 this is the number of bytes of registers used.
6544 For args passed entirely in registers or entirely in memory, zero.
6546 Any arg that starts in the first 6 regs but won't entirely fit in them
6547 needs partial registers on v8. On v9, structures with integer
6548 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6549 values that begin in the last fp reg [where "last fp reg" varies with the
6550 mode] will be split between that reg and memory. */
6552 static int
6553 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6554 tree type, bool named)
6556 int slotno, regno, padding;
6558 /* We pass false for incoming_p here, it doesn't matter. */
6559 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6560 false, &regno, &padding);
6562 if (slotno == -1)
6563 return 0;
6565 if (TARGET_ARCH32)
6567 if ((slotno + (mode == BLKmode
6568 ? ROUND_ADVANCE (int_size_in_bytes (type))
6569 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6570 > SPARC_INT_ARG_MAX)
6571 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6573 else
6575 /* We are guaranteed by pass_by_reference that the size of the
6576 argument is not greater than 16 bytes, so we only need to return
6577 one word if the argument is partially passed in registers. */
6579 if (type && AGGREGATE_TYPE_P (type))
6581 int size = int_size_in_bytes (type);
6583 if (size > UNITS_PER_WORD
6584 && slotno == SPARC_INT_ARG_MAX - 1)
6585 return UNITS_PER_WORD;
6587 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6588 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6589 && ! (TARGET_FPU && named)))
6591 /* The complex types are passed as packed types. */
6592 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6593 && slotno == SPARC_INT_ARG_MAX - 1)
6594 return UNITS_PER_WORD;
6596 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6598 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6599 > SPARC_FP_ARG_MAX)
6600 return UNITS_PER_WORD;
6604 return 0;
6607 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6608 Specify whether to pass the argument by reference. */
6610 static bool
6611 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6612 enum machine_mode mode, const_tree type,
6613 bool named ATTRIBUTE_UNUSED)
6615 if (TARGET_ARCH32)
6616 /* Original SPARC 32-bit ABI says that structures and unions,
6617 and quad-precision floats are passed by reference. For Pascal,
6618 also pass arrays by reference. All other base types are passed
6619 in registers.
6621 Extended ABI (as implemented by the Sun compiler) says that all
6622 complex floats are passed by reference. Pass complex integers
6623 in registers up to 8 bytes. More generally, enforce the 2-word
6624 cap for passing arguments in registers.
6626 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6627 integers are passed like floats of the same size, that is in
6628 registers up to 8 bytes. Pass all vector floats by reference
6629 like structure and unions. */
6630 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6631 || mode == SCmode
6632 /* Catch CDImode, TFmode, DCmode and TCmode. */
6633 || GET_MODE_SIZE (mode) > 8
6634 || (type
6635 && TREE_CODE (type) == VECTOR_TYPE
6636 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6637 else
6638 /* Original SPARC 64-bit ABI says that structures and unions
6639 smaller than 16 bytes are passed in registers, as well as
6640 all other base types.
6642 Extended ABI (as implemented by the Sun compiler) says that
6643 complex floats are passed in registers up to 16 bytes. Pass
6644 all complex integers in registers up to 16 bytes. More generally,
6645 enforce the 2-word cap for passing arguments in registers.
6647 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6648 integers are passed like floats of the same size, that is in
6649 registers (up to 16 bytes). Pass all vector floats like structure
6650 and unions. */
6651 return ((type
6652 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6653 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6654 /* Catch CTImode and TCmode. */
6655 || GET_MODE_SIZE (mode) > 16);
6658 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6659 Update the data in CUM to advance over an argument
6660 of mode MODE and data type TYPE.
6661 TYPE is null for libcalls where that information may not be available. */
6663 static void
6664 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6665 const_tree type, bool named)
6667 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6668 int regno, padding;
6670 /* We pass false for incoming_p here, it doesn't matter. */
6671 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
6673 /* If argument requires leading padding, add it. */
6674 cum->words += padding;
6676 if (TARGET_ARCH32)
6678 cum->words += (mode != BLKmode
6679 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6680 : ROUND_ADVANCE (int_size_in_bytes (type)));
6682 else
6684 if (type && AGGREGATE_TYPE_P (type))
6686 int size = int_size_in_bytes (type);
6688 if (size <= 8)
6689 ++cum->words;
6690 else if (size <= 16)
6691 cum->words += 2;
6692 else /* passed by reference */
6693 ++cum->words;
6695 else
6697 cum->words += (mode != BLKmode
6698 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6699 : ROUND_ADVANCE (int_size_in_bytes (type)));
6704 /* Handle the FUNCTION_ARG_PADDING macro.
6705 For the 64 bit ABI structs are always stored left shifted in their
6706 argument slot. */
6708 enum direction
6709 function_arg_padding (enum machine_mode mode, const_tree type)
6711 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6712 return upward;
6714 /* Fall back to the default. */
6715 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6718 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6719 Specify whether to return the return value in memory. */
6721 static bool
6722 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6724 if (TARGET_ARCH32)
6725 /* Original SPARC 32-bit ABI says that structures and unions,
6726 and quad-precision floats are returned in memory. All other
6727 base types are returned in registers.
6729 Extended ABI (as implemented by the Sun compiler) says that
6730 all complex floats are returned in registers (8 FP registers
6731 at most for '_Complex long double'). Return all complex integers
6732 in registers (4 at most for '_Complex long long').
6734 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6735 integers are returned like floats of the same size, that is in
6736 registers up to 8 bytes and in memory otherwise. Return all
6737 vector floats in memory like structure and unions; note that
6738 they always have BLKmode like the latter. */
6739 return (TYPE_MODE (type) == BLKmode
6740 || TYPE_MODE (type) == TFmode
6741 || (TREE_CODE (type) == VECTOR_TYPE
6742 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6743 else
6744 /* Original SPARC 64-bit ABI says that structures and unions
6745 smaller than 32 bytes are returned in registers, as well as
6746 all other base types.
6748 Extended ABI (as implemented by the Sun compiler) says that all
6749 complex floats are returned in registers (8 FP registers at most
6750 for '_Complex long double'). Return all complex integers in
6751 registers (4 at most for '_Complex TItype').
6753 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6754 integers are returned like floats of the same size, that is in
6755 registers. Return all vector floats like structure and unions;
6756 note that they always have BLKmode like the latter. */
6757 return (TYPE_MODE (type) == BLKmode
6758 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6761 /* Handle the TARGET_STRUCT_VALUE target hook.
6762 Return where to find the structure return value address. */
6764 static rtx
6765 sparc_struct_value_rtx (tree fndecl, int incoming)
6767 if (TARGET_ARCH64)
6768 return 0;
6769 else
6771 rtx mem;
6773 if (incoming)
6774 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
6775 STRUCT_VALUE_OFFSET));
6776 else
6777 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
6778 STRUCT_VALUE_OFFSET));
6780 /* Only follow the SPARC ABI for fixed-size structure returns.
6781 Variable size structure returns are handled per the normal
6782 procedures in GCC. This is enabled by -mstd-struct-return */
6783 if (incoming == 2
6784 && sparc_std_struct_return
6785 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6786 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6788 /* We must check and adjust the return address, as it is
6789 optional as to whether the return object is really
6790 provided. */
6791 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6792 rtx scratch = gen_reg_rtx (SImode);
6793 rtx endlab = gen_label_rtx ();
6795 /* Calculate the return object size */
6796 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6797 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6798 /* Construct a temporary return value */
6799 rtx temp_val
6800 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6802 /* Implement SPARC 32-bit psABI callee return struct checking:
6804 Fetch the instruction where we will return to and see if
6805 it's an unimp instruction (the most significant 10 bits
6806 will be zero). */
6807 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6808 plus_constant (Pmode,
6809 ret_reg, 8)));
6810 /* Assume the size is valid and pre-adjust */
6811 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6812 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6813 0, endlab);
6814 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6815 /* Write the address of the memory pointed to by temp_val into
6816 the memory pointed to by mem */
6817 emit_move_insn (mem, XEXP (temp_val, 0));
6818 emit_label (endlab);
6821 return mem;
6825 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6826 For v9, function return values are subject to the same rules as arguments,
6827 except that up to 32 bytes may be returned in registers. */
6829 static rtx
6830 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6831 bool outgoing)
6833 /* Beware that the two values are swapped here wrt function_arg. */
6834 int regbase = (outgoing
6835 ? SPARC_INCOMING_INT_ARG_FIRST
6836 : SPARC_OUTGOING_INT_ARG_FIRST);
6837 enum mode_class mclass = GET_MODE_CLASS (mode);
6838 int regno;
6840 /* Vector types deserve special treatment because they are polymorphic wrt
6841 their mode, depending upon whether VIS instructions are enabled. */
6842 if (type && TREE_CODE (type) == VECTOR_TYPE)
6844 HOST_WIDE_INT size = int_size_in_bytes (type);
6845 gcc_assert ((TARGET_ARCH32 && size <= 8)
6846 || (TARGET_ARCH64 && size <= 32));
6848 if (mode == BLKmode)
6849 return function_arg_vector_value (size,
6850 SPARC_FP_ARG_FIRST);
6851 else
6852 mclass = MODE_FLOAT;
6855 if (TARGET_ARCH64 && type)
6857 /* Structures up to 32 bytes in size are returned in registers. */
6858 if (TREE_CODE (type) == RECORD_TYPE)
6860 HOST_WIDE_INT size = int_size_in_bytes (type);
6861 gcc_assert (size <= 32);
6863 return function_arg_record_value (type, mode, 0, 1, regbase);
6866 /* Unions up to 32 bytes in size are returned in integer registers. */
6867 else if (TREE_CODE (type) == UNION_TYPE)
6869 HOST_WIDE_INT size = int_size_in_bytes (type);
6870 gcc_assert (size <= 32);
6872 return function_arg_union_value (size, mode, 0, regbase);
6875 /* Objects that require it are returned in FP registers. */
6876 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6879 /* All other aggregate types are returned in an integer register in a
6880 mode corresponding to the size of the type. */
6881 else if (AGGREGATE_TYPE_P (type))
6883 /* All other aggregate types are passed in an integer register
6884 in a mode corresponding to the size of the type. */
6885 HOST_WIDE_INT size = int_size_in_bytes (type);
6886 gcc_assert (size <= 32);
6888 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6890 /* ??? We probably should have made the same ABI change in
6891 3.4.0 as the one we made for unions. The latter was
6892 required by the SCD though, while the former is not
6893 specified, so we favored compatibility and efficiency.
6895 Now we're stuck for aggregates larger than 16 bytes,
6896 because OImode vanished in the meantime. Let's not
6897 try to be unduly clever, and simply follow the ABI
6898 for unions in that case. */
6899 if (mode == BLKmode)
6900 return function_arg_union_value (size, mode, 0, regbase);
6901 else
6902 mclass = MODE_INT;
6905 /* We should only have pointer and integer types at this point. This
6906 must match sparc_promote_function_mode. */
6907 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6908 mode = word_mode;
6911 /* We should only have pointer and integer types at this point. This must
6912 match sparc_promote_function_mode. */
6913 else if (TARGET_ARCH32
6914 && mclass == MODE_INT
6915 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6916 mode = word_mode;
6918 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6919 regno = SPARC_FP_ARG_FIRST;
6920 else
6921 regno = regbase;
6923 return gen_rtx_REG (mode, regno);
6926 /* Handle TARGET_FUNCTION_VALUE.
6927 On the SPARC, the value is found in the first "output" register, but the
6928 called function leaves it in the first "input" register. */
6930 static rtx
6931 sparc_function_value (const_tree valtype,
6932 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6933 bool outgoing)
6935 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6938 /* Handle TARGET_LIBCALL_VALUE. */
6940 static rtx
6941 sparc_libcall_value (enum machine_mode mode,
6942 const_rtx fun ATTRIBUTE_UNUSED)
6944 return sparc_function_value_1 (NULL_TREE, mode, false);
6947 /* Handle FUNCTION_VALUE_REGNO_P.
6948 On the SPARC, the first "output" reg is used for integer values, and the
6949 first floating point register is used for floating point values. */
6951 static bool
6952 sparc_function_value_regno_p (const unsigned int regno)
6954 return (regno == 8 || regno == 32);
6957 /* Do what is necessary for `va_start'. We look at the current function
6958 to determine if stdarg or varargs is used and return the address of
6959 the first unnamed parameter. */
6961 static rtx
6962 sparc_builtin_saveregs (void)
6964 int first_reg = crtl->args.info.words;
6965 rtx address;
6966 int regno;
6968 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6969 emit_move_insn (gen_rtx_MEM (word_mode,
6970 gen_rtx_PLUS (Pmode,
6971 frame_pointer_rtx,
6972 GEN_INT (FIRST_PARM_OFFSET (0)
6973 + (UNITS_PER_WORD
6974 * regno)))),
6975 gen_rtx_REG (word_mode,
6976 SPARC_INCOMING_INT_ARG_FIRST + regno));
6978 address = gen_rtx_PLUS (Pmode,
6979 frame_pointer_rtx,
6980 GEN_INT (FIRST_PARM_OFFSET (0)
6981 + UNITS_PER_WORD * first_reg));
6983 return address;
6986 /* Implement `va_start' for stdarg. */
6988 static void
6989 sparc_va_start (tree valist, rtx nextarg)
6991 nextarg = expand_builtin_saveregs ();
6992 std_expand_builtin_va_start (valist, nextarg);
6995 /* Implement `va_arg' for stdarg. */
6997 static tree
6998 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6999 gimple_seq *post_p)
7001 HOST_WIDE_INT size, rsize, align;
7002 tree addr, incr;
7003 bool indirect;
7004 tree ptrtype = build_pointer_type (type);
7006 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
7008 indirect = true;
7009 size = rsize = UNITS_PER_WORD;
7010 align = 0;
7012 else
7014 indirect = false;
7015 size = int_size_in_bytes (type);
7016 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7017 align = 0;
7019 if (TARGET_ARCH64)
7021 /* For SPARC64, objects requiring 16-byte alignment get it. */
7022 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7023 align = 2 * UNITS_PER_WORD;
7025 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7026 are left-justified in their slots. */
7027 if (AGGREGATE_TYPE_P (type))
7029 if (size == 0)
7030 size = rsize = UNITS_PER_WORD;
7031 else
7032 size = rsize;
7037 incr = valist;
7038 if (align)
7040 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7041 incr = fold_convert (sizetype, incr);
7042 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7043 size_int (-align));
7044 incr = fold_convert (ptr_type_node, incr);
7047 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7048 addr = incr;
7050 if (BYTES_BIG_ENDIAN && size < rsize)
7051 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7053 if (indirect)
7055 addr = fold_convert (build_pointer_type (ptrtype), addr);
7056 addr = build_va_arg_indirect_ref (addr);
7059 /* If the address isn't aligned properly for the type, we need a temporary.
7060 FIXME: This is inefficient, usually we can do this in registers. */
7061 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7063 tree tmp = create_tmp_var (type, "va_arg_tmp");
7064 tree dest_addr = build_fold_addr_expr (tmp);
7065 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7066 3, dest_addr, addr, size_int (rsize));
7067 TREE_ADDRESSABLE (tmp) = 1;
7068 gimplify_and_add (copy, pre_p);
7069 addr = dest_addr;
7072 else
7073 addr = fold_convert (ptrtype, addr);
7075 incr = fold_build_pointer_plus_hwi (incr, rsize);
7076 gimplify_assign (valist, incr, post_p);
7078 return build_va_arg_indirect_ref (addr);
7081 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
7082 Specify whether the vector mode is supported by the hardware. */
7084 static bool
7085 sparc_vector_mode_supported_p (enum machine_mode mode)
7087 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
7090 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
7092 static enum machine_mode
7093 sparc_preferred_simd_mode (enum machine_mode mode)
7095 if (TARGET_VIS)
7096 switch (mode)
7098 case SImode:
7099 return V2SImode;
7100 case HImode:
7101 return V4HImode;
7102 case QImode:
7103 return V8QImode;
7105 default:;
7108 return word_mode;
7111 /* Return the string to output an unconditional branch to LABEL, which is
7112 the operand number of the label.
7114 DEST is the destination insn (i.e. the label), INSN is the source. */
7116 const char *
7117 output_ubranch (rtx dest, int label, rtx insn)
7119 static char string[64];
7120 bool v9_form = false;
7121 char *p;
7123 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
7125 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7126 - INSN_ADDRESSES (INSN_UID (insn)));
7127 /* Leave some instructions for "slop". */
7128 if (delta >= -260000 && delta < 260000)
7129 v9_form = true;
7132 if (v9_form)
7133 strcpy (string, "ba%*,pt\t%%xcc, ");
7134 else
7135 strcpy (string, "b%*\t");
7137 p = strchr (string, '\0');
7138 *p++ = '%';
7139 *p++ = 'l';
7140 *p++ = '0' + label;
7141 *p++ = '%';
7142 *p++ = '(';
7143 *p = '\0';
7145 return string;
7148 /* Return the string to output a conditional branch to LABEL, which is
7149 the operand number of the label. OP is the conditional expression.
7150 XEXP (OP, 0) is assumed to be a condition code register (integer or
7151 floating point) and its mode specifies what kind of comparison we made.
7153 DEST is the destination insn (i.e. the label), INSN is the source.
7155 REVERSED is nonzero if we should reverse the sense of the comparison.
7157 ANNUL is nonzero if we should generate an annulling branch. */
7159 const char *
7160 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
7161 rtx insn)
7163 static char string[64];
7164 enum rtx_code code = GET_CODE (op);
7165 rtx cc_reg = XEXP (op, 0);
7166 enum machine_mode mode = GET_MODE (cc_reg);
7167 const char *labelno, *branch;
7168 int spaces = 8, far;
7169 char *p;
7171 /* v9 branches are limited to +-1MB. If it is too far away,
7172 change
7174 bne,pt %xcc, .LC30
7178 be,pn %xcc, .+12
7180 ba .LC30
7184 fbne,a,pn %fcc2, .LC29
7188 fbe,pt %fcc2, .+16
7190 ba .LC29 */
7192 far = TARGET_V9 && (get_attr_length (insn) >= 3);
7193 if (reversed ^ far)
7195 /* Reversal of FP compares takes care -- an ordered compare
7196 becomes an unordered compare and vice versa. */
7197 if (mode == CCFPmode || mode == CCFPEmode)
7198 code = reverse_condition_maybe_unordered (code);
7199 else
7200 code = reverse_condition (code);
7203 /* Start by writing the branch condition. */
7204 if (mode == CCFPmode || mode == CCFPEmode)
7206 switch (code)
7208 case NE:
7209 branch = "fbne";
7210 break;
7211 case EQ:
7212 branch = "fbe";
7213 break;
7214 case GE:
7215 branch = "fbge";
7216 break;
7217 case GT:
7218 branch = "fbg";
7219 break;
7220 case LE:
7221 branch = "fble";
7222 break;
7223 case LT:
7224 branch = "fbl";
7225 break;
7226 case UNORDERED:
7227 branch = "fbu";
7228 break;
7229 case ORDERED:
7230 branch = "fbo";
7231 break;
7232 case UNGT:
7233 branch = "fbug";
7234 break;
7235 case UNLT:
7236 branch = "fbul";
7237 break;
7238 case UNEQ:
7239 branch = "fbue";
7240 break;
7241 case UNGE:
7242 branch = "fbuge";
7243 break;
7244 case UNLE:
7245 branch = "fbule";
7246 break;
7247 case LTGT:
7248 branch = "fblg";
7249 break;
7251 default:
7252 gcc_unreachable ();
7255 /* ??? !v9: FP branches cannot be preceded by another floating point
7256 insn. Because there is currently no concept of pre-delay slots,
7257 we can fix this only by always emitting a nop before a floating
7258 point branch. */
7260 string[0] = '\0';
7261 if (! TARGET_V9)
7262 strcpy (string, "nop\n\t");
7263 strcat (string, branch);
7265 else
7267 switch (code)
7269 case NE:
7270 branch = "bne";
7271 break;
7272 case EQ:
7273 branch = "be";
7274 break;
7275 case GE:
7276 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7277 branch = "bpos";
7278 else
7279 branch = "bge";
7280 break;
7281 case GT:
7282 branch = "bg";
7283 break;
7284 case LE:
7285 branch = "ble";
7286 break;
7287 case LT:
7288 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7289 branch = "bneg";
7290 else
7291 branch = "bl";
7292 break;
7293 case GEU:
7294 branch = "bgeu";
7295 break;
7296 case GTU:
7297 branch = "bgu";
7298 break;
7299 case LEU:
7300 branch = "bleu";
7301 break;
7302 case LTU:
7303 branch = "blu";
7304 break;
7306 default:
7307 gcc_unreachable ();
7309 strcpy (string, branch);
7311 spaces -= strlen (branch);
7312 p = strchr (string, '\0');
7314 /* Now add the annulling, the label, and a possible noop. */
7315 if (annul && ! far)
7317 strcpy (p, ",a");
7318 p += 2;
7319 spaces -= 2;
7322 if (TARGET_V9)
7324 rtx note;
7325 int v8 = 0;
7327 if (! far && insn && INSN_ADDRESSES_SET_P ())
7329 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7330 - INSN_ADDRESSES (INSN_UID (insn)));
7331 /* Leave some instructions for "slop". */
7332 if (delta < -260000 || delta >= 260000)
7333 v8 = 1;
7336 if (mode == CCFPmode || mode == CCFPEmode)
7338 static char v9_fcc_labelno[] = "%%fccX, ";
7339 /* Set the char indicating the number of the fcc reg to use. */
7340 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7341 labelno = v9_fcc_labelno;
7342 if (v8)
7344 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7345 labelno = "";
7348 else if (mode == CCXmode || mode == CCX_NOOVmode)
7350 labelno = "%%xcc, ";
7351 gcc_assert (! v8);
7353 else
7355 labelno = "%%icc, ";
7356 if (v8)
7357 labelno = "";
7360 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7362 strcpy (p,
7363 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7364 ? ",pt" : ",pn");
7365 p += 3;
7366 spaces -= 3;
7369 else
7370 labelno = "";
7372 if (spaces > 0)
7373 *p++ = '\t';
7374 else
7375 *p++ = ' ';
7376 strcpy (p, labelno);
7377 p = strchr (p, '\0');
7378 if (far)
7380 strcpy (p, ".+12\n\t nop\n\tb\t");
7381 /* Skip the next insn if requested or
7382 if we know that it will be a nop. */
7383 if (annul || ! final_sequence)
7384 p[3] = '6';
7385 p += 14;
7387 *p++ = '%';
7388 *p++ = 'l';
7389 *p++ = label + '0';
7390 *p++ = '%';
7391 *p++ = '#';
7392 *p = '\0';
7394 return string;
7397 /* Emit a library call comparison between floating point X and Y.
7398 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7399 Return the new operator to be used in the comparison sequence.
7401 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7402 values as arguments instead of the TFmode registers themselves,
7403 that's why we cannot call emit_float_lib_cmp. */
7406 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7408 const char *qpfunc;
7409 rtx slot0, slot1, result, tem, tem2, libfunc;
7410 enum machine_mode mode;
7411 enum rtx_code new_comparison;
7413 switch (comparison)
7415 case EQ:
7416 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7417 break;
7419 case NE:
7420 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7421 break;
7423 case GT:
7424 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7425 break;
7427 case GE:
7428 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7429 break;
7431 case LT:
7432 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7433 break;
7435 case LE:
7436 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7437 break;
7439 case ORDERED:
7440 case UNORDERED:
7441 case UNGT:
7442 case UNLT:
7443 case UNEQ:
7444 case UNGE:
7445 case UNLE:
7446 case LTGT:
7447 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7448 break;
7450 default:
7451 gcc_unreachable ();
7454 if (TARGET_ARCH64)
7456 if (MEM_P (x))
7458 tree expr = MEM_EXPR (x);
7459 if (expr)
7460 mark_addressable (expr);
7461 slot0 = x;
7463 else
7465 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
7466 emit_move_insn (slot0, x);
7469 if (MEM_P (y))
7471 tree expr = MEM_EXPR (y);
7472 if (expr)
7473 mark_addressable (expr);
7474 slot1 = y;
7476 else
7478 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
7479 emit_move_insn (slot1, y);
7482 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7483 emit_library_call (libfunc, LCT_NORMAL,
7484 DImode, 2,
7485 XEXP (slot0, 0), Pmode,
7486 XEXP (slot1, 0), Pmode);
7487 mode = DImode;
7489 else
7491 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7492 emit_library_call (libfunc, LCT_NORMAL,
7493 SImode, 2,
7494 x, TFmode, y, TFmode);
7495 mode = SImode;
7499 /* Immediately move the result of the libcall into a pseudo
7500 register so reload doesn't clobber the value if it needs
7501 the return register for a spill reg. */
7502 result = gen_reg_rtx (mode);
7503 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7505 switch (comparison)
7507 default:
7508 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7509 case ORDERED:
7510 case UNORDERED:
7511 new_comparison = (comparison == UNORDERED ? EQ : NE);
7512 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7513 case UNGT:
7514 case UNGE:
7515 new_comparison = (comparison == UNGT ? GT : NE);
7516 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7517 case UNLE:
7518 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7519 case UNLT:
7520 tem = gen_reg_rtx (mode);
7521 if (TARGET_ARCH32)
7522 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7523 else
7524 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7525 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7526 case UNEQ:
7527 case LTGT:
7528 tem = gen_reg_rtx (mode);
7529 if (TARGET_ARCH32)
7530 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7531 else
7532 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7533 tem2 = gen_reg_rtx (mode);
7534 if (TARGET_ARCH32)
7535 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7536 else
7537 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7538 new_comparison = (comparison == UNEQ ? EQ : NE);
7539 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7542 gcc_unreachable ();
7545 /* Generate an unsigned DImode to FP conversion. This is the same code
7546 optabs would emit if we didn't have TFmode patterns. */
7548 void
7549 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7551 rtx neglab, donelab, i0, i1, f0, in, out;
7553 out = operands[0];
7554 in = force_reg (DImode, operands[1]);
7555 neglab = gen_label_rtx ();
7556 donelab = gen_label_rtx ();
7557 i0 = gen_reg_rtx (DImode);
7558 i1 = gen_reg_rtx (DImode);
7559 f0 = gen_reg_rtx (mode);
7561 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7563 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7564 emit_jump_insn (gen_jump (donelab));
7565 emit_barrier ();
7567 emit_label (neglab);
7569 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7570 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7571 emit_insn (gen_iordi3 (i0, i0, i1));
7572 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7573 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7575 emit_label (donelab);
7578 /* Generate an FP to unsigned DImode conversion. This is the same code
7579 optabs would emit if we didn't have TFmode patterns. */
7581 void
7582 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7584 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7586 out = operands[0];
7587 in = force_reg (mode, operands[1]);
7588 neglab = gen_label_rtx ();
7589 donelab = gen_label_rtx ();
7590 i0 = gen_reg_rtx (DImode);
7591 i1 = gen_reg_rtx (DImode);
7592 limit = gen_reg_rtx (mode);
7593 f0 = gen_reg_rtx (mode);
7595 emit_move_insn (limit,
7596 CONST_DOUBLE_FROM_REAL_VALUE (
7597 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7598 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7600 emit_insn (gen_rtx_SET (VOIDmode,
7601 out,
7602 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7603 emit_jump_insn (gen_jump (donelab));
7604 emit_barrier ();
7606 emit_label (neglab);
7608 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7609 emit_insn (gen_rtx_SET (VOIDmode,
7611 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7612 emit_insn (gen_movdi (i1, const1_rtx));
7613 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7614 emit_insn (gen_xordi3 (out, i0, i1));
7616 emit_label (donelab);
7619 /* Return the string to output a conditional branch to LABEL, testing
7620 register REG. LABEL is the operand number of the label; REG is the
7621 operand number of the reg. OP is the conditional expression. The mode
7622 of REG says what kind of comparison we made.
7624 DEST is the destination insn (i.e. the label), INSN is the source.
7626 REVERSED is nonzero if we should reverse the sense of the comparison.
7628 ANNUL is nonzero if we should generate an annulling branch. */
7630 const char *
7631 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7632 int annul, rtx insn)
7634 static char string[64];
7635 enum rtx_code code = GET_CODE (op);
7636 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7637 rtx note;
7638 int far;
7639 char *p;
7641 /* branch on register are limited to +-128KB. If it is too far away,
7642 change
7644 brnz,pt %g1, .LC30
7648 brz,pn %g1, .+12
7650 ba,pt %xcc, .LC30
7654 brgez,a,pn %o1, .LC29
7658 brlz,pt %o1, .+16
7660 ba,pt %xcc, .LC29 */
7662 far = get_attr_length (insn) >= 3;
7664 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7665 if (reversed ^ far)
7666 code = reverse_condition (code);
7668 /* Only 64 bit versions of these instructions exist. */
7669 gcc_assert (mode == DImode);
7671 /* Start by writing the branch condition. */
7673 switch (code)
7675 case NE:
7676 strcpy (string, "brnz");
7677 break;
7679 case EQ:
7680 strcpy (string, "brz");
7681 break;
7683 case GE:
7684 strcpy (string, "brgez");
7685 break;
7687 case LT:
7688 strcpy (string, "brlz");
7689 break;
7691 case LE:
7692 strcpy (string, "brlez");
7693 break;
7695 case GT:
7696 strcpy (string, "brgz");
7697 break;
7699 default:
7700 gcc_unreachable ();
7703 p = strchr (string, '\0');
7705 /* Now add the annulling, reg, label, and nop. */
7706 if (annul && ! far)
7708 strcpy (p, ",a");
7709 p += 2;
7712 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7714 strcpy (p,
7715 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7716 ? ",pt" : ",pn");
7717 p += 3;
7720 *p = p < string + 8 ? '\t' : ' ';
7721 p++;
7722 *p++ = '%';
7723 *p++ = '0' + reg;
7724 *p++ = ',';
7725 *p++ = ' ';
7726 if (far)
7728 int veryfar = 1, delta;
7730 if (INSN_ADDRESSES_SET_P ())
7732 delta = (INSN_ADDRESSES (INSN_UID (dest))
7733 - INSN_ADDRESSES (INSN_UID (insn)));
7734 /* Leave some instructions for "slop". */
7735 if (delta >= -260000 && delta < 260000)
7736 veryfar = 0;
7739 strcpy (p, ".+12\n\t nop\n\t");
7740 /* Skip the next insn if requested or
7741 if we know that it will be a nop. */
7742 if (annul || ! final_sequence)
7743 p[3] = '6';
7744 p += 12;
7745 if (veryfar)
7747 strcpy (p, "b\t");
7748 p += 2;
7750 else
7752 strcpy (p, "ba,pt\t%%xcc, ");
7753 p += 13;
7756 *p++ = '%';
7757 *p++ = 'l';
7758 *p++ = '0' + label;
7759 *p++ = '%';
7760 *p++ = '#';
7761 *p = '\0';
7763 return string;
7766 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7767 Such instructions cannot be used in the delay slot of return insn on v9.
7768 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7771 static int
7772 epilogue_renumber (register rtx *where, int test)
7774 register const char *fmt;
7775 register int i;
7776 register enum rtx_code code;
7778 if (*where == 0)
7779 return 0;
7781 code = GET_CODE (*where);
7783 switch (code)
7785 case REG:
7786 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7787 return 1;
7788 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7789 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7790 case SCRATCH:
7791 case CC0:
7792 case PC:
7793 case CONST_INT:
7794 case CONST_DOUBLE:
7795 return 0;
7797 /* Do not replace the frame pointer with the stack pointer because
7798 it can cause the delayed instruction to load below the stack.
7799 This occurs when instructions like:
7801 (set (reg/i:SI 24 %i0)
7802 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7803 (const_int -20 [0xffffffec])) 0))
7805 are in the return delayed slot. */
7806 case PLUS:
7807 if (GET_CODE (XEXP (*where, 0)) == REG
7808 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7809 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7810 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7811 return 1;
7812 break;
7814 case MEM:
7815 if (SPARC_STACK_BIAS
7816 && GET_CODE (XEXP (*where, 0)) == REG
7817 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7818 return 1;
7819 break;
7821 default:
7822 break;
7825 fmt = GET_RTX_FORMAT (code);
7827 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7829 if (fmt[i] == 'E')
7831 register int j;
7832 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7833 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7834 return 1;
7836 else if (fmt[i] == 'e'
7837 && epilogue_renumber (&(XEXP (*where, i)), test))
7838 return 1;
7840 return 0;
7843 /* Leaf functions and non-leaf functions have different needs. */
7845 static const int
7846 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7848 static const int
7849 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7851 static const int *const reg_alloc_orders[] = {
7852 reg_leaf_alloc_order,
7853 reg_nonleaf_alloc_order};
7855 void
7856 order_regs_for_local_alloc (void)
7858 static int last_order_nonleaf = 1;
7860 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7862 last_order_nonleaf = !last_order_nonleaf;
7863 memcpy ((char *) reg_alloc_order,
7864 (const char *) reg_alloc_orders[last_order_nonleaf],
7865 FIRST_PSEUDO_REGISTER * sizeof (int));
7869 /* Return 1 if REG and MEM are legitimate enough to allow the various
7870 mem<-->reg splits to be run. */
7873 sparc_splitdi_legitimate (rtx reg, rtx mem)
7875 /* Punt if we are here by mistake. */
7876 gcc_assert (reload_completed);
7878 /* We must have an offsettable memory reference. */
7879 if (! offsettable_memref_p (mem))
7880 return 0;
7882 /* If we have legitimate args for ldd/std, we do not want
7883 the split to happen. */
7884 if ((REGNO (reg) % 2) == 0
7885 && mem_min_alignment (mem, 8))
7886 return 0;
7888 /* Success. */
7889 return 1;
7892 /* Like sparc_splitdi_legitimate but for REG <--> REG moves. */
7895 sparc_split_regreg_legitimate (rtx reg1, rtx reg2)
7897 int regno1, regno2;
7899 if (GET_CODE (reg1) == SUBREG)
7900 reg1 = SUBREG_REG (reg1);
7901 if (GET_CODE (reg1) != REG)
7902 return 0;
7903 regno1 = REGNO (reg1);
7905 if (GET_CODE (reg2) == SUBREG)
7906 reg2 = SUBREG_REG (reg2);
7907 if (GET_CODE (reg2) != REG)
7908 return 0;
7909 regno2 = REGNO (reg2);
7911 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
7912 return 1;
7914 if (TARGET_VIS3)
7916 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
7917 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
7918 return 1;
7921 return 0;
7924 /* Return 1 if x and y are some kind of REG and they refer to
7925 different hard registers. This test is guaranteed to be
7926 run after reload. */
7929 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7931 if (GET_CODE (x) != REG)
7932 return 0;
7933 if (GET_CODE (y) != REG)
7934 return 0;
7935 if (REGNO (x) == REGNO (y))
7936 return 0;
7937 return 1;
7940 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7941 This makes them candidates for using ldd and std insns.
7943 Note reg1 and reg2 *must* be hard registers. */
7946 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7948 /* We might have been passed a SUBREG. */
7949 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7950 return 0;
7952 if (REGNO (reg1) % 2 != 0)
7953 return 0;
7955 /* Integer ldd is deprecated in SPARC V9 */
7956 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
7957 return 0;
7959 return (REGNO (reg1) == REGNO (reg2) - 1);
7962 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7963 an ldd or std insn.
7965 This can only happen when addr1 and addr2, the addresses in mem1
7966 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7967 addr1 must also be aligned on a 64-bit boundary.
7969 Also iff dependent_reg_rtx is not null it should not be used to
7970 compute the address for mem1, i.e. we cannot optimize a sequence
7971 like:
7972 ld [%o0], %o0
7973 ld [%o0 + 4], %o1
7975 ldd [%o0], %o0
7976 nor:
7977 ld [%g3 + 4], %g3
7978 ld [%g3], %g2
7980 ldd [%g3], %g2
7982 But, note that the transformation from:
7983 ld [%g2 + 4], %g3
7984 ld [%g2], %g2
7986 ldd [%g2], %g2
7987 is perfectly fine. Thus, the peephole2 patterns always pass us
7988 the destination register of the first load, never the second one.
7990 For stores we don't have a similar problem, so dependent_reg_rtx is
7991 NULL_RTX. */
7994 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7996 rtx addr1, addr2;
7997 unsigned int reg1;
7998 HOST_WIDE_INT offset1;
8000 /* The mems cannot be volatile. */
8001 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
8002 return 0;
8004 /* MEM1 should be aligned on a 64-bit boundary. */
8005 if (MEM_ALIGN (mem1) < 64)
8006 return 0;
8008 addr1 = XEXP (mem1, 0);
8009 addr2 = XEXP (mem2, 0);
8011 /* Extract a register number and offset (if used) from the first addr. */
8012 if (GET_CODE (addr1) == PLUS)
8014 /* If not a REG, return zero. */
8015 if (GET_CODE (XEXP (addr1, 0)) != REG)
8016 return 0;
8017 else
8019 reg1 = REGNO (XEXP (addr1, 0));
8020 /* The offset must be constant! */
8021 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
8022 return 0;
8023 offset1 = INTVAL (XEXP (addr1, 1));
8026 else if (GET_CODE (addr1) != REG)
8027 return 0;
8028 else
8030 reg1 = REGNO (addr1);
8031 /* This was a simple (mem (reg)) expression. Offset is 0. */
8032 offset1 = 0;
8035 /* Make sure the second address is a (mem (plus (reg) (const_int). */
8036 if (GET_CODE (addr2) != PLUS)
8037 return 0;
8039 if (GET_CODE (XEXP (addr2, 0)) != REG
8040 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
8041 return 0;
8043 if (reg1 != REGNO (XEXP (addr2, 0)))
8044 return 0;
8046 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
8047 return 0;
8049 /* The first offset must be evenly divisible by 8 to ensure the
8050 address is 64 bit aligned. */
8051 if (offset1 % 8 != 0)
8052 return 0;
8054 /* The offset for the second addr must be 4 more than the first addr. */
8055 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
8056 return 0;
8058 /* All the tests passed. addr1 and addr2 are valid for ldd and std
8059 instructions. */
8060 return 1;
8063 /* Return 1 if reg is a pseudo, or is the first register in
8064 a hard register pair. This makes it suitable for use in
8065 ldd and std insns. */
8068 register_ok_for_ldd (rtx reg)
8070 /* We might have been passed a SUBREG. */
8071 if (!REG_P (reg))
8072 return 0;
8074 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
8075 return (REGNO (reg) % 2 == 0);
8077 return 1;
8080 /* Return 1 if OP is a memory whose address is known to be
8081 aligned to 8-byte boundary, or a pseudo during reload.
8082 This makes it suitable for use in ldd and std insns. */
8085 memory_ok_for_ldd (rtx op)
8087 if (MEM_P (op))
8089 /* In 64-bit mode, we assume that the address is word-aligned. */
8090 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
8091 return 0;
8093 if (! can_create_pseudo_p ()
8094 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
8095 return 0;
8097 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
8099 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
8100 return 0;
8102 else
8103 return 0;
8105 return 1;
8108 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8110 static bool
8111 sparc_print_operand_punct_valid_p (unsigned char code)
8113 if (code == '#'
8114 || code == '*'
8115 || code == '('
8116 || code == ')'
8117 || code == '_'
8118 || code == '&')
8119 return true;
8121 return false;
8124 /* Implement TARGET_PRINT_OPERAND.
8125 Print operand X (an rtx) in assembler syntax to file FILE.
8126 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8127 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8129 static void
8130 sparc_print_operand (FILE *file, rtx x, int code)
8132 switch (code)
8134 case '#':
8135 /* Output an insn in a delay slot. */
8136 if (final_sequence)
8137 sparc_indent_opcode = 1;
8138 else
8139 fputs ("\n\t nop", file);
8140 return;
8141 case '*':
8142 /* Output an annul flag if there's nothing for the delay slot and we
8143 are optimizing. This is always used with '(' below.
8144 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8145 this is a dbx bug. So, we only do this when optimizing.
8146 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8147 Always emit a nop in case the next instruction is a branch. */
8148 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
8149 fputs (",a", file);
8150 return;
8151 case '(':
8152 /* Output a 'nop' if there's nothing for the delay slot and we are
8153 not optimizing. This is always used with '*' above. */
8154 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
8155 fputs ("\n\t nop", file);
8156 else if (final_sequence)
8157 sparc_indent_opcode = 1;
8158 return;
8159 case ')':
8160 /* Output the right displacement from the saved PC on function return.
8161 The caller may have placed an "unimp" insn immediately after the call
8162 so we have to account for it. This insn is used in the 32-bit ABI
8163 when calling a function that returns a non zero-sized structure. The
8164 64-bit ABI doesn't have it. Be careful to have this test be the same
8165 as that for the call. The exception is when sparc_std_struct_return
8166 is enabled, the psABI is followed exactly and the adjustment is made
8167 by the code in sparc_struct_value_rtx. The call emitted is the same
8168 when sparc_std_struct_return is enabled. */
8169 if (!TARGET_ARCH64
8170 && cfun->returns_struct
8171 && !sparc_std_struct_return
8172 && DECL_SIZE (DECL_RESULT (current_function_decl))
8173 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
8174 == INTEGER_CST
8175 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
8176 fputs ("12", file);
8177 else
8178 fputc ('8', file);
8179 return;
8180 case '_':
8181 /* Output the Embedded Medium/Anywhere code model base register. */
8182 fputs (EMBMEDANY_BASE_REG, file);
8183 return;
8184 case '&':
8185 /* Print some local dynamic TLS name. */
8186 assemble_name (file, get_some_local_dynamic_name ());
8187 return;
8189 case 'Y':
8190 /* Adjust the operand to take into account a RESTORE operation. */
8191 if (GET_CODE (x) == CONST_INT)
8192 break;
8193 else if (GET_CODE (x) != REG)
8194 output_operand_lossage ("invalid %%Y operand");
8195 else if (REGNO (x) < 8)
8196 fputs (reg_names[REGNO (x)], file);
8197 else if (REGNO (x) >= 24 && REGNO (x) < 32)
8198 fputs (reg_names[REGNO (x)-16], file);
8199 else
8200 output_operand_lossage ("invalid %%Y operand");
8201 return;
8202 case 'L':
8203 /* Print out the low order register name of a register pair. */
8204 if (WORDS_BIG_ENDIAN)
8205 fputs (reg_names[REGNO (x)+1], file);
8206 else
8207 fputs (reg_names[REGNO (x)], file);
8208 return;
8209 case 'H':
8210 /* Print out the high order register name of a register pair. */
8211 if (WORDS_BIG_ENDIAN)
8212 fputs (reg_names[REGNO (x)], file);
8213 else
8214 fputs (reg_names[REGNO (x)+1], file);
8215 return;
8216 case 'R':
8217 /* Print out the second register name of a register pair or quad.
8218 I.e., R (%o0) => %o1. */
8219 fputs (reg_names[REGNO (x)+1], file);
8220 return;
8221 case 'S':
8222 /* Print out the third register name of a register quad.
8223 I.e., S (%o0) => %o2. */
8224 fputs (reg_names[REGNO (x)+2], file);
8225 return;
8226 case 'T':
8227 /* Print out the fourth register name of a register quad.
8228 I.e., T (%o0) => %o3. */
8229 fputs (reg_names[REGNO (x)+3], file);
8230 return;
8231 case 'x':
8232 /* Print a condition code register. */
8233 if (REGNO (x) == SPARC_ICC_REG)
8235 /* We don't handle CC[X]_NOOVmode because they're not supposed
8236 to occur here. */
8237 if (GET_MODE (x) == CCmode)
8238 fputs ("%icc", file);
8239 else if (GET_MODE (x) == CCXmode)
8240 fputs ("%xcc", file);
8241 else
8242 gcc_unreachable ();
8244 else
8245 /* %fccN register */
8246 fputs (reg_names[REGNO (x)], file);
8247 return;
8248 case 'm':
8249 /* Print the operand's address only. */
8250 output_address (XEXP (x, 0));
8251 return;
8252 case 'r':
8253 /* In this case we need a register. Use %g0 if the
8254 operand is const0_rtx. */
8255 if (x == const0_rtx
8256 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
8258 fputs ("%g0", file);
8259 return;
8261 else
8262 break;
8264 case 'A':
8265 switch (GET_CODE (x))
8267 case IOR: fputs ("or", file); break;
8268 case AND: fputs ("and", file); break;
8269 case XOR: fputs ("xor", file); break;
8270 default: output_operand_lossage ("invalid %%A operand");
8272 return;
8274 case 'B':
8275 switch (GET_CODE (x))
8277 case IOR: fputs ("orn", file); break;
8278 case AND: fputs ("andn", file); break;
8279 case XOR: fputs ("xnor", file); break;
8280 default: output_operand_lossage ("invalid %%B operand");
8282 return;
8284 /* This is used by the conditional move instructions. */
8285 case 'C':
8287 enum rtx_code rc = GET_CODE (x);
8289 switch (rc)
8291 case NE: fputs ("ne", file); break;
8292 case EQ: fputs ("e", file); break;
8293 case GE: fputs ("ge", file); break;
8294 case GT: fputs ("g", file); break;
8295 case LE: fputs ("le", file); break;
8296 case LT: fputs ("l", file); break;
8297 case GEU: fputs ("geu", file); break;
8298 case GTU: fputs ("gu", file); break;
8299 case LEU: fputs ("leu", file); break;
8300 case LTU: fputs ("lu", file); break;
8301 case LTGT: fputs ("lg", file); break;
8302 case UNORDERED: fputs ("u", file); break;
8303 case ORDERED: fputs ("o", file); break;
8304 case UNLT: fputs ("ul", file); break;
8305 case UNLE: fputs ("ule", file); break;
8306 case UNGT: fputs ("ug", file); break;
8307 case UNGE: fputs ("uge", file); break;
8308 case UNEQ: fputs ("ue", file); break;
8309 default: output_operand_lossage ("invalid %%C operand");
8311 return;
8314 /* This are used by the movr instruction pattern. */
8315 case 'D':
8317 enum rtx_code rc = GET_CODE (x);
8318 switch (rc)
8320 case NE: fputs ("ne", file); break;
8321 case EQ: fputs ("e", file); break;
8322 case GE: fputs ("gez", file); break;
8323 case LT: fputs ("lz", file); break;
8324 case LE: fputs ("lez", file); break;
8325 case GT: fputs ("gz", file); break;
8326 default: output_operand_lossage ("invalid %%D operand");
8328 return;
8331 case 'b':
8333 /* Print a sign-extended character. */
8334 int i = trunc_int_for_mode (INTVAL (x), QImode);
8335 fprintf (file, "%d", i);
8336 return;
8339 case 'f':
8340 /* Operand must be a MEM; write its address. */
8341 if (GET_CODE (x) != MEM)
8342 output_operand_lossage ("invalid %%f operand");
8343 output_address (XEXP (x, 0));
8344 return;
8346 case 's':
8348 /* Print a sign-extended 32-bit value. */
8349 HOST_WIDE_INT i;
8350 if (GET_CODE(x) == CONST_INT)
8351 i = INTVAL (x);
8352 else if (GET_CODE(x) == CONST_DOUBLE)
8353 i = CONST_DOUBLE_LOW (x);
8354 else
8356 output_operand_lossage ("invalid %%s operand");
8357 return;
8359 i = trunc_int_for_mode (i, SImode);
8360 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
8361 return;
8364 case 0:
8365 /* Do nothing special. */
8366 break;
8368 default:
8369 /* Undocumented flag. */
8370 output_operand_lossage ("invalid operand output code");
8373 if (GET_CODE (x) == REG)
8374 fputs (reg_names[REGNO (x)], file);
8375 else if (GET_CODE (x) == MEM)
8377 fputc ('[', file);
8378 /* Poor Sun assembler doesn't understand absolute addressing. */
8379 if (CONSTANT_P (XEXP (x, 0)))
8380 fputs ("%g0+", file);
8381 output_address (XEXP (x, 0));
8382 fputc (']', file);
8384 else if (GET_CODE (x) == HIGH)
8386 fputs ("%hi(", file);
8387 output_addr_const (file, XEXP (x, 0));
8388 fputc (')', file);
8390 else if (GET_CODE (x) == LO_SUM)
8392 sparc_print_operand (file, XEXP (x, 0), 0);
8393 if (TARGET_CM_MEDMID)
8394 fputs ("+%l44(", file);
8395 else
8396 fputs ("+%lo(", file);
8397 output_addr_const (file, XEXP (x, 1));
8398 fputc (')', file);
8400 else if (GET_CODE (x) == CONST_DOUBLE
8401 && (GET_MODE (x) == VOIDmode
8402 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8404 if (CONST_DOUBLE_HIGH (x) == 0)
8405 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8406 else if (CONST_DOUBLE_HIGH (x) == -1
8407 && CONST_DOUBLE_LOW (x) < 0)
8408 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8409 else
8410 output_operand_lossage ("long long constant not a valid immediate operand");
8412 else if (GET_CODE (x) == CONST_DOUBLE)
8413 output_operand_lossage ("floating point constant not a valid immediate operand");
8414 else { output_addr_const (file, x); }
8417 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8419 static void
8420 sparc_print_operand_address (FILE *file, rtx x)
8422 register rtx base, index = 0;
8423 int offset = 0;
8424 register rtx addr = x;
8426 if (REG_P (addr))
8427 fputs (reg_names[REGNO (addr)], file);
8428 else if (GET_CODE (addr) == PLUS)
8430 if (CONST_INT_P (XEXP (addr, 0)))
8431 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8432 else if (CONST_INT_P (XEXP (addr, 1)))
8433 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8434 else
8435 base = XEXP (addr, 0), index = XEXP (addr, 1);
8436 if (GET_CODE (base) == LO_SUM)
8438 gcc_assert (USE_AS_OFFSETABLE_LO10
8439 && TARGET_ARCH64
8440 && ! TARGET_CM_MEDMID);
8441 output_operand (XEXP (base, 0), 0);
8442 fputs ("+%lo(", file);
8443 output_address (XEXP (base, 1));
8444 fprintf (file, ")+%d", offset);
8446 else
8448 fputs (reg_names[REGNO (base)], file);
8449 if (index == 0)
8450 fprintf (file, "%+d", offset);
8451 else if (REG_P (index))
8452 fprintf (file, "+%s", reg_names[REGNO (index)]);
8453 else if (GET_CODE (index) == SYMBOL_REF
8454 || GET_CODE (index) == LABEL_REF
8455 || GET_CODE (index) == CONST)
8456 fputc ('+', file), output_addr_const (file, index);
8457 else gcc_unreachable ();
8460 else if (GET_CODE (addr) == MINUS
8461 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8463 output_addr_const (file, XEXP (addr, 0));
8464 fputs ("-(", file);
8465 output_addr_const (file, XEXP (addr, 1));
8466 fputs ("-.)", file);
8468 else if (GET_CODE (addr) == LO_SUM)
8470 output_operand (XEXP (addr, 0), 0);
8471 if (TARGET_CM_MEDMID)
8472 fputs ("+%l44(", file);
8473 else
8474 fputs ("+%lo(", file);
8475 output_address (XEXP (addr, 1));
8476 fputc (')', file);
8478 else if (flag_pic
8479 && GET_CODE (addr) == CONST
8480 && GET_CODE (XEXP (addr, 0)) == MINUS
8481 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8482 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8483 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8485 addr = XEXP (addr, 0);
8486 output_addr_const (file, XEXP (addr, 0));
8487 /* Group the args of the second CONST in parenthesis. */
8488 fputs ("-(", file);
8489 /* Skip past the second CONST--it does nothing for us. */
8490 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8491 /* Close the parenthesis. */
8492 fputc (')', file);
8494 else
8496 output_addr_const (file, addr);
8500 /* Target hook for assembling integer objects. The sparc version has
8501 special handling for aligned DI-mode objects. */
8503 static bool
8504 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8506 /* ??? We only output .xword's for symbols and only then in environments
8507 where the assembler can handle them. */
8508 if (aligned_p && size == 8
8509 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8511 if (TARGET_V9)
8513 assemble_integer_with_op ("\t.xword\t", x);
8514 return true;
8516 else
8518 assemble_aligned_integer (4, const0_rtx);
8519 assemble_aligned_integer (4, x);
8520 return true;
8523 return default_assemble_integer (x, size, aligned_p);
8526 /* Return the value of a code used in the .proc pseudo-op that says
8527 what kind of result this function returns. For non-C types, we pick
8528 the closest C type. */
8530 #ifndef SHORT_TYPE_SIZE
8531 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8532 #endif
8534 #ifndef INT_TYPE_SIZE
8535 #define INT_TYPE_SIZE BITS_PER_WORD
8536 #endif
8538 #ifndef LONG_TYPE_SIZE
8539 #define LONG_TYPE_SIZE BITS_PER_WORD
8540 #endif
8542 #ifndef LONG_LONG_TYPE_SIZE
8543 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8544 #endif
8546 #ifndef FLOAT_TYPE_SIZE
8547 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8548 #endif
8550 #ifndef DOUBLE_TYPE_SIZE
8551 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8552 #endif
8554 #ifndef LONG_DOUBLE_TYPE_SIZE
8555 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8556 #endif
8558 unsigned long
8559 sparc_type_code (register tree type)
8561 register unsigned long qualifiers = 0;
8562 register unsigned shift;
8564 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8565 setting more, since some assemblers will give an error for this. Also,
8566 we must be careful to avoid shifts of 32 bits or more to avoid getting
8567 unpredictable results. */
8569 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8571 switch (TREE_CODE (type))
8573 case ERROR_MARK:
8574 return qualifiers;
8576 case ARRAY_TYPE:
8577 qualifiers |= (3 << shift);
8578 break;
8580 case FUNCTION_TYPE:
8581 case METHOD_TYPE:
8582 qualifiers |= (2 << shift);
8583 break;
8585 case POINTER_TYPE:
8586 case REFERENCE_TYPE:
8587 case OFFSET_TYPE:
8588 qualifiers |= (1 << shift);
8589 break;
8591 case RECORD_TYPE:
8592 return (qualifiers | 8);
8594 case UNION_TYPE:
8595 case QUAL_UNION_TYPE:
8596 return (qualifiers | 9);
8598 case ENUMERAL_TYPE:
8599 return (qualifiers | 10);
8601 case VOID_TYPE:
8602 return (qualifiers | 16);
8604 case INTEGER_TYPE:
8605 /* If this is a range type, consider it to be the underlying
8606 type. */
8607 if (TREE_TYPE (type) != 0)
8608 break;
8610 /* Carefully distinguish all the standard types of C,
8611 without messing up if the language is not C. We do this by
8612 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8613 look at both the names and the above fields, but that's redundant.
8614 Any type whose size is between two C types will be considered
8615 to be the wider of the two types. Also, we do not have a
8616 special code to use for "long long", so anything wider than
8617 long is treated the same. Note that we can't distinguish
8618 between "int" and "long" in this code if they are the same
8619 size, but that's fine, since neither can the assembler. */
8621 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8622 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8624 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8625 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8627 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8628 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8630 else
8631 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8633 case REAL_TYPE:
8634 /* If this is a range type, consider it to be the underlying
8635 type. */
8636 if (TREE_TYPE (type) != 0)
8637 break;
8639 /* Carefully distinguish all the standard types of C,
8640 without messing up if the language is not C. */
8642 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8643 return (qualifiers | 6);
8645 else
8646 return (qualifiers | 7);
8648 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8649 /* ??? We need to distinguish between double and float complex types,
8650 but I don't know how yet because I can't reach this code from
8651 existing front-ends. */
8652 return (qualifiers | 7); /* Who knows? */
8654 case VECTOR_TYPE:
8655 case BOOLEAN_TYPE: /* Boolean truth value type. */
8656 case LANG_TYPE:
8657 case NULLPTR_TYPE:
8658 return qualifiers;
8660 default:
8661 gcc_unreachable (); /* Not a type! */
8665 return qualifiers;
8668 /* Nested function support. */
8670 /* Emit RTL insns to initialize the variable parts of a trampoline.
8671 FNADDR is an RTX for the address of the function's pure code.
8672 CXT is an RTX for the static chain value for the function.
8674 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8675 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8676 (to store insns). This is a bit excessive. Perhaps a different
8677 mechanism would be better here.
8679 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8681 static void
8682 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8684 /* SPARC 32-bit trampoline:
8686 sethi %hi(fn), %g1
8687 sethi %hi(static), %g2
8688 jmp %g1+%lo(fn)
8689 or %g2, %lo(static), %g2
8691 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8692 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8695 emit_move_insn
8696 (adjust_address (m_tramp, SImode, 0),
8697 expand_binop (SImode, ior_optab,
8698 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8699 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8700 NULL_RTX, 1, OPTAB_DIRECT));
8702 emit_move_insn
8703 (adjust_address (m_tramp, SImode, 4),
8704 expand_binop (SImode, ior_optab,
8705 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8706 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8707 NULL_RTX, 1, OPTAB_DIRECT));
8709 emit_move_insn
8710 (adjust_address (m_tramp, SImode, 8),
8711 expand_binop (SImode, ior_optab,
8712 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8713 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8714 NULL_RTX, 1, OPTAB_DIRECT));
8716 emit_move_insn
8717 (adjust_address (m_tramp, SImode, 12),
8718 expand_binop (SImode, ior_optab,
8719 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8720 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8721 NULL_RTX, 1, OPTAB_DIRECT));
8723 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8724 aligned on a 16 byte boundary so one flush clears it all. */
8725 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8726 if (sparc_cpu != PROCESSOR_ULTRASPARC
8727 && sparc_cpu != PROCESSOR_ULTRASPARC3
8728 && sparc_cpu != PROCESSOR_NIAGARA
8729 && sparc_cpu != PROCESSOR_NIAGARA2
8730 && sparc_cpu != PROCESSOR_NIAGARA3
8731 && sparc_cpu != PROCESSOR_NIAGARA4)
8732 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8734 /* Call __enable_execute_stack after writing onto the stack to make sure
8735 the stack address is accessible. */
8736 #ifdef HAVE_ENABLE_EXECUTE_STACK
8737 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8738 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8739 #endif
8743 /* The 64-bit version is simpler because it makes more sense to load the
8744 values as "immediate" data out of the trampoline. It's also easier since
8745 we can read the PC without clobbering a register. */
8747 static void
8748 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8750 /* SPARC 64-bit trampoline:
8752 rd %pc, %g1
8753 ldx [%g1+24], %g5
8754 jmp %g5
8755 ldx [%g1+16], %g5
8756 +16 bytes data
8759 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8760 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8761 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8762 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8763 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8764 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8765 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8766 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8767 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8768 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8769 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8771 if (sparc_cpu != PROCESSOR_ULTRASPARC
8772 && sparc_cpu != PROCESSOR_ULTRASPARC3
8773 && sparc_cpu != PROCESSOR_NIAGARA
8774 && sparc_cpu != PROCESSOR_NIAGARA2
8775 && sparc_cpu != PROCESSOR_NIAGARA3
8776 && sparc_cpu != PROCESSOR_NIAGARA4)
8777 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8779 /* Call __enable_execute_stack after writing onto the stack to make sure
8780 the stack address is accessible. */
8781 #ifdef HAVE_ENABLE_EXECUTE_STACK
8782 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8783 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8784 #endif
8787 /* Worker for TARGET_TRAMPOLINE_INIT. */
8789 static void
8790 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8792 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8793 cxt = force_reg (Pmode, cxt);
8794 if (TARGET_ARCH64)
8795 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8796 else
8797 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8800 /* Adjust the cost of a scheduling dependency. Return the new cost of
8801 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8803 static int
8804 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8806 enum attr_type insn_type;
8808 if (! recog_memoized (insn))
8809 return 0;
8811 insn_type = get_attr_type (insn);
8813 if (REG_NOTE_KIND (link) == 0)
8815 /* Data dependency; DEP_INSN writes a register that INSN reads some
8816 cycles later. */
8818 /* if a load, then the dependence must be on the memory address;
8819 add an extra "cycle". Note that the cost could be two cycles
8820 if the reg was written late in an instruction group; we ca not tell
8821 here. */
8822 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8823 return cost + 3;
8825 /* Get the delay only if the address of the store is the dependence. */
8826 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8828 rtx pat = PATTERN(insn);
8829 rtx dep_pat = PATTERN (dep_insn);
8831 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8832 return cost; /* This should not happen! */
8834 /* The dependency between the two instructions was on the data that
8835 is being stored. Assume that this implies that the address of the
8836 store is not dependent. */
8837 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8838 return cost;
8840 return cost + 3; /* An approximation. */
8843 /* A shift instruction cannot receive its data from an instruction
8844 in the same cycle; add a one cycle penalty. */
8845 if (insn_type == TYPE_SHIFT)
8846 return cost + 3; /* Split before cascade into shift. */
8848 else
8850 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8851 INSN writes some cycles later. */
8853 /* These are only significant for the fpu unit; writing a fp reg before
8854 the fpu has finished with it stalls the processor. */
8856 /* Reusing an integer register causes no problems. */
8857 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8858 return 0;
8861 return cost;
8864 static int
8865 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8867 enum attr_type insn_type, dep_type;
8868 rtx pat = PATTERN(insn);
8869 rtx dep_pat = PATTERN (dep_insn);
8871 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8872 return cost;
8874 insn_type = get_attr_type (insn);
8875 dep_type = get_attr_type (dep_insn);
8877 switch (REG_NOTE_KIND (link))
8879 case 0:
8880 /* Data dependency; DEP_INSN writes a register that INSN reads some
8881 cycles later. */
8883 switch (insn_type)
8885 case TYPE_STORE:
8886 case TYPE_FPSTORE:
8887 /* Get the delay iff the address of the store is the dependence. */
8888 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8889 return cost;
8891 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8892 return cost;
8893 return cost + 3;
8895 case TYPE_LOAD:
8896 case TYPE_SLOAD:
8897 case TYPE_FPLOAD:
8898 /* If a load, then the dependence must be on the memory address. If
8899 the addresses aren't equal, then it might be a false dependency */
8900 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8902 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8903 || GET_CODE (SET_DEST (dep_pat)) != MEM
8904 || GET_CODE (SET_SRC (pat)) != MEM
8905 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8906 XEXP (SET_SRC (pat), 0)))
8907 return cost + 2;
8909 return cost + 8;
8911 break;
8913 case TYPE_BRANCH:
8914 /* Compare to branch latency is 0. There is no benefit from
8915 separating compare and branch. */
8916 if (dep_type == TYPE_COMPARE)
8917 return 0;
8918 /* Floating point compare to branch latency is less than
8919 compare to conditional move. */
8920 if (dep_type == TYPE_FPCMP)
8921 return cost - 1;
8922 break;
8923 default:
8924 break;
8926 break;
8928 case REG_DEP_ANTI:
8929 /* Anti-dependencies only penalize the fpu unit. */
8930 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8931 return 0;
8932 break;
8934 default:
8935 break;
8938 return cost;
8941 static int
8942 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8944 switch (sparc_cpu)
8946 case PROCESSOR_SUPERSPARC:
8947 cost = supersparc_adjust_cost (insn, link, dep, cost);
8948 break;
8949 case PROCESSOR_HYPERSPARC:
8950 case PROCESSOR_SPARCLITE86X:
8951 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8952 break;
8953 default:
8954 break;
8956 return cost;
8959 static void
8960 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8961 int sched_verbose ATTRIBUTE_UNUSED,
8962 int max_ready ATTRIBUTE_UNUSED)
8965 static int
8966 sparc_use_sched_lookahead (void)
8968 if (sparc_cpu == PROCESSOR_NIAGARA
8969 || sparc_cpu == PROCESSOR_NIAGARA2
8970 || sparc_cpu == PROCESSOR_NIAGARA3)
8971 return 0;
8972 if (sparc_cpu == PROCESSOR_NIAGARA4)
8973 return 2;
8974 if (sparc_cpu == PROCESSOR_ULTRASPARC
8975 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8976 return 4;
8977 if ((1 << sparc_cpu) &
8978 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8979 (1 << PROCESSOR_SPARCLITE86X)))
8980 return 3;
8981 return 0;
8984 static int
8985 sparc_issue_rate (void)
8987 switch (sparc_cpu)
8989 case PROCESSOR_NIAGARA:
8990 case PROCESSOR_NIAGARA2:
8991 case PROCESSOR_NIAGARA3:
8992 default:
8993 return 1;
8994 case PROCESSOR_NIAGARA4:
8995 case PROCESSOR_V9:
8996 /* Assume V9 processors are capable of at least dual-issue. */
8997 return 2;
8998 case PROCESSOR_SUPERSPARC:
8999 return 3;
9000 case PROCESSOR_HYPERSPARC:
9001 case PROCESSOR_SPARCLITE86X:
9002 return 2;
9003 case PROCESSOR_ULTRASPARC:
9004 case PROCESSOR_ULTRASPARC3:
9005 return 4;
9009 static int
9010 set_extends (rtx insn)
9012 register rtx pat = PATTERN (insn);
9014 switch (GET_CODE (SET_SRC (pat)))
9016 /* Load and some shift instructions zero extend. */
9017 case MEM:
9018 case ZERO_EXTEND:
9019 /* sethi clears the high bits */
9020 case HIGH:
9021 /* LO_SUM is used with sethi. sethi cleared the high
9022 bits and the values used with lo_sum are positive */
9023 case LO_SUM:
9024 /* Store flag stores 0 or 1 */
9025 case LT: case LTU:
9026 case GT: case GTU:
9027 case LE: case LEU:
9028 case GE: case GEU:
9029 case EQ:
9030 case NE:
9031 return 1;
9032 case AND:
9034 rtx op0 = XEXP (SET_SRC (pat), 0);
9035 rtx op1 = XEXP (SET_SRC (pat), 1);
9036 if (GET_CODE (op1) == CONST_INT)
9037 return INTVAL (op1) >= 0;
9038 if (GET_CODE (op0) != REG)
9039 return 0;
9040 if (sparc_check_64 (op0, insn) == 1)
9041 return 1;
9042 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9044 case IOR:
9045 case XOR:
9047 rtx op0 = XEXP (SET_SRC (pat), 0);
9048 rtx op1 = XEXP (SET_SRC (pat), 1);
9049 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
9050 return 0;
9051 if (GET_CODE (op1) == CONST_INT)
9052 return INTVAL (op1) >= 0;
9053 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9055 case LSHIFTRT:
9056 return GET_MODE (SET_SRC (pat)) == SImode;
9057 /* Positive integers leave the high bits zero. */
9058 case CONST_DOUBLE:
9059 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
9060 case CONST_INT:
9061 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
9062 case ASHIFTRT:
9063 case SIGN_EXTEND:
9064 return - (GET_MODE (SET_SRC (pat)) == SImode);
9065 case REG:
9066 return sparc_check_64 (SET_SRC (pat), insn);
9067 default:
9068 return 0;
9072 /* We _ought_ to have only one kind per function, but... */
9073 static GTY(()) rtx sparc_addr_diff_list;
9074 static GTY(()) rtx sparc_addr_list;
9076 void
9077 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
9079 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9080 if (diff)
9081 sparc_addr_diff_list
9082 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
9083 else
9084 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
9087 static void
9088 sparc_output_addr_vec (rtx vec)
9090 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9091 int idx, vlen = XVECLEN (body, 0);
9093 #ifdef ASM_OUTPUT_ADDR_VEC_START
9094 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9095 #endif
9097 #ifdef ASM_OUTPUT_CASE_LABEL
9098 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9099 NEXT_INSN (lab));
9100 #else
9101 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9102 #endif
9104 for (idx = 0; idx < vlen; idx++)
9106 ASM_OUTPUT_ADDR_VEC_ELT
9107 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9110 #ifdef ASM_OUTPUT_ADDR_VEC_END
9111 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9112 #endif
9115 static void
9116 sparc_output_addr_diff_vec (rtx vec)
9118 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9119 rtx base = XEXP (XEXP (body, 0), 0);
9120 int idx, vlen = XVECLEN (body, 1);
9122 #ifdef ASM_OUTPUT_ADDR_VEC_START
9123 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9124 #endif
9126 #ifdef ASM_OUTPUT_CASE_LABEL
9127 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9128 NEXT_INSN (lab));
9129 #else
9130 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9131 #endif
9133 for (idx = 0; idx < vlen; idx++)
9135 ASM_OUTPUT_ADDR_DIFF_ELT
9136 (asm_out_file,
9137 body,
9138 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
9139 CODE_LABEL_NUMBER (base));
9142 #ifdef ASM_OUTPUT_ADDR_VEC_END
9143 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9144 #endif
9147 static void
9148 sparc_output_deferred_case_vectors (void)
9150 rtx t;
9151 int align;
9153 if (sparc_addr_list == NULL_RTX
9154 && sparc_addr_diff_list == NULL_RTX)
9155 return;
9157 /* Align to cache line in the function's code section. */
9158 switch_to_section (current_function_section ());
9160 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9161 if (align > 0)
9162 ASM_OUTPUT_ALIGN (asm_out_file, align);
9164 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
9165 sparc_output_addr_vec (XEXP (t, 0));
9166 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
9167 sparc_output_addr_diff_vec (XEXP (t, 0));
9169 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
9172 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
9173 unknown. Return 1 if the high bits are zero, -1 if the register is
9174 sign extended. */
9176 sparc_check_64 (rtx x, rtx insn)
9178 /* If a register is set only once it is safe to ignore insns this
9179 code does not know how to handle. The loop will either recognize
9180 the single set and return the correct value or fail to recognize
9181 it and return 0. */
9182 int set_once = 0;
9183 rtx y = x;
9185 gcc_assert (GET_CODE (x) == REG);
9187 if (GET_MODE (x) == DImode)
9188 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
9190 if (flag_expensive_optimizations
9191 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
9192 set_once = 1;
9194 if (insn == 0)
9196 if (set_once)
9197 insn = get_last_insn_anywhere ();
9198 else
9199 return 0;
9202 while ((insn = PREV_INSN (insn)))
9204 switch (GET_CODE (insn))
9206 case JUMP_INSN:
9207 case NOTE:
9208 break;
9209 case CODE_LABEL:
9210 case CALL_INSN:
9211 default:
9212 if (! set_once)
9213 return 0;
9214 break;
9215 case INSN:
9217 rtx pat = PATTERN (insn);
9218 if (GET_CODE (pat) != SET)
9219 return 0;
9220 if (rtx_equal_p (x, SET_DEST (pat)))
9221 return set_extends (insn);
9222 if (y && rtx_equal_p (y, SET_DEST (pat)))
9223 return set_extends (insn);
9224 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
9225 return 0;
9229 return 0;
9232 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
9233 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
9235 const char *
9236 output_v8plus_shift (rtx insn, rtx *operands, const char *opcode)
9238 static char asm_code[60];
9240 /* The scratch register is only required when the destination
9241 register is not a 64-bit global or out register. */
9242 if (which_alternative != 2)
9243 operands[3] = operands[0];
9245 /* We can only shift by constants <= 63. */
9246 if (GET_CODE (operands[2]) == CONST_INT)
9247 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
9249 if (GET_CODE (operands[1]) == CONST_INT)
9251 output_asm_insn ("mov\t%1, %3", operands);
9253 else
9255 output_asm_insn ("sllx\t%H1, 32, %3", operands);
9256 if (sparc_check_64 (operands[1], insn) <= 0)
9257 output_asm_insn ("srl\t%L1, 0, %L1", operands);
9258 output_asm_insn ("or\t%L1, %3, %3", operands);
9261 strcpy (asm_code, opcode);
9263 if (which_alternative != 2)
9264 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
9265 else
9266 return
9267 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
9270 /* Output rtl to increment the profiler label LABELNO
9271 for profiling a function entry. */
9273 void
9274 sparc_profile_hook (int labelno)
9276 char buf[32];
9277 rtx lab, fun;
9279 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
9280 if (NO_PROFILE_COUNTERS)
9282 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
9284 else
9286 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9287 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
9288 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
9292 #ifdef TARGET_SOLARIS
9293 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9295 static void
9296 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
9297 tree decl ATTRIBUTE_UNUSED)
9299 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
9301 solaris_elf_asm_comdat_section (name, flags, decl);
9302 return;
9305 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
9307 if (!(flags & SECTION_DEBUG))
9308 fputs (",#alloc", asm_out_file);
9309 if (flags & SECTION_WRITE)
9310 fputs (",#write", asm_out_file);
9311 if (flags & SECTION_TLS)
9312 fputs (",#tls", asm_out_file);
9313 if (flags & SECTION_CODE)
9314 fputs (",#execinstr", asm_out_file);
9316 /* ??? Handle SECTION_BSS. */
9318 fputc ('\n', asm_out_file);
9320 #endif /* TARGET_SOLARIS */
9322 /* We do not allow indirect calls to be optimized into sibling calls.
9324 We cannot use sibling calls when delayed branches are disabled
9325 because they will likely require the call delay slot to be filled.
9327 Also, on SPARC 32-bit we cannot emit a sibling call when the
9328 current function returns a structure. This is because the "unimp
9329 after call" convention would cause the callee to return to the
9330 wrong place. The generic code already disallows cases where the
9331 function being called returns a structure.
9333 It may seem strange how this last case could occur. Usually there
9334 is code after the call which jumps to epilogue code which dumps the
9335 return value into the struct return area. That ought to invalidate
9336 the sibling call right? Well, in the C++ case we can end up passing
9337 the pointer to the struct return area to a constructor (which returns
9338 void) and then nothing else happens. Such a sibling call would look
9339 valid without the added check here.
9341 VxWorks PIC PLT entries require the global pointer to be initialized
9342 on entry. We therefore can't emit sibling calls to them. */
9343 static bool
9344 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9346 return (decl
9347 && flag_delayed_branch
9348 && (TARGET_ARCH64 || ! cfun->returns_struct)
9349 && !(TARGET_VXWORKS_RTP
9350 && flag_pic
9351 && !targetm.binds_local_p (decl)));
9354 /* libfunc renaming. */
9356 static void
9357 sparc_init_libfuncs (void)
9359 if (TARGET_ARCH32)
9361 /* Use the subroutines that Sun's library provides for integer
9362 multiply and divide. The `*' prevents an underscore from
9363 being prepended by the compiler. .umul is a little faster
9364 than .mul. */
9365 set_optab_libfunc (smul_optab, SImode, "*.umul");
9366 set_optab_libfunc (sdiv_optab, SImode, "*.div");
9367 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
9368 set_optab_libfunc (smod_optab, SImode, "*.rem");
9369 set_optab_libfunc (umod_optab, SImode, "*.urem");
9371 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9372 set_optab_libfunc (add_optab, TFmode, "_Q_add");
9373 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
9374 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
9375 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
9376 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
9378 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9379 is because with soft-float, the SFmode and DFmode sqrt
9380 instructions will be absent, and the compiler will notice and
9381 try to use the TFmode sqrt instruction for calls to the
9382 builtin function sqrt, but this fails. */
9383 if (TARGET_FPU)
9384 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
9386 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
9387 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
9388 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
9389 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
9390 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
9391 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
9393 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
9394 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9395 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9396 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9398 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9399 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9400 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9401 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9403 if (DITF_CONVERSION_LIBFUNCS)
9405 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9406 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9407 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9408 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9411 if (SUN_CONVERSION_LIBFUNCS)
9413 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9414 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9415 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9416 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9419 if (TARGET_ARCH64)
9421 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9422 do not exist in the library. Make sure the compiler does not
9423 emit calls to them by accident. (It should always use the
9424 hardware instructions.) */
9425 set_optab_libfunc (smul_optab, SImode, 0);
9426 set_optab_libfunc (sdiv_optab, SImode, 0);
9427 set_optab_libfunc (udiv_optab, SImode, 0);
9428 set_optab_libfunc (smod_optab, SImode, 0);
9429 set_optab_libfunc (umod_optab, SImode, 0);
9431 if (SUN_INTEGER_MULTIPLY_64)
9433 set_optab_libfunc (smul_optab, DImode, "__mul64");
9434 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9435 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9436 set_optab_libfunc (smod_optab, DImode, "__rem64");
9437 set_optab_libfunc (umod_optab, DImode, "__urem64");
9440 if (SUN_CONVERSION_LIBFUNCS)
9442 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9443 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9444 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9445 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9450 static tree def_builtin(const char *name, int code, tree type)
9452 return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
9453 NULL_TREE);
9456 static tree def_builtin_const(const char *name, int code, tree type)
9458 tree t = def_builtin(name, code, type);
9460 if (t)
9461 TREE_READONLY (t) = 1;
9463 return t;
9466 /* Implement the TARGET_INIT_BUILTINS target hook.
9467 Create builtin functions for special SPARC instructions. */
9469 static void
9470 sparc_init_builtins (void)
9472 if (TARGET_VIS)
9473 sparc_vis_init_builtins ();
9476 /* Create builtin functions for VIS 1.0 instructions. */
9478 static void
9479 sparc_vis_init_builtins (void)
9481 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9482 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9483 tree v4hi = build_vector_type (intHI_type_node, 4);
9484 tree v2hi = build_vector_type (intHI_type_node, 2);
9485 tree v2si = build_vector_type (intSI_type_node, 2);
9486 tree v1si = build_vector_type (intSI_type_node, 1);
9488 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9489 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9490 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9491 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9492 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9493 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9494 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9495 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9496 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9497 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9498 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9499 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9500 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
9501 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
9502 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9503 v8qi, v8qi,
9504 intDI_type_node, 0);
9505 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
9506 v8qi, v8qi, 0);
9507 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
9508 v8qi, v8qi, 0);
9509 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9510 intDI_type_node,
9511 intDI_type_node, 0);
9512 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
9513 intSI_type_node,
9514 intSI_type_node, 0);
9515 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9516 ptr_type_node,
9517 intSI_type_node, 0);
9518 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9519 ptr_type_node,
9520 intDI_type_node, 0);
9521 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
9522 ptr_type_node,
9523 ptr_type_node, 0);
9524 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
9525 ptr_type_node,
9526 ptr_type_node, 0);
9527 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
9528 v4hi, v4hi, 0);
9529 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
9530 v2si, v2si, 0);
9531 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
9532 v4hi, v4hi, 0);
9533 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
9534 v2si, v2si, 0);
9535 tree void_ftype_di = build_function_type_list (void_type_node,
9536 intDI_type_node, 0);
9537 tree di_ftype_void = build_function_type_list (intDI_type_node,
9538 void_type_node, 0);
9539 tree void_ftype_si = build_function_type_list (void_type_node,
9540 intSI_type_node, 0);
9541 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
9542 float_type_node,
9543 float_type_node, 0);
9544 tree df_ftype_df_df = build_function_type_list (double_type_node,
9545 double_type_node,
9546 double_type_node, 0);
9548 /* Packing and expanding vectors. */
9549 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
9550 v4qi_ftype_v4hi);
9551 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9552 v8qi_ftype_v2si_v8qi);
9553 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9554 v2hi_ftype_v2si);
9555 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
9556 v4hi_ftype_v4qi);
9557 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9558 v8qi_ftype_v4qi_v4qi);
9560 /* Multiplications. */
9561 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9562 v4hi_ftype_v4qi_v4hi);
9563 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9564 v4hi_ftype_v4qi_v2hi);
9565 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9566 v4hi_ftype_v4qi_v2hi);
9567 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9568 v4hi_ftype_v8qi_v4hi);
9569 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9570 v4hi_ftype_v8qi_v4hi);
9571 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9572 v2si_ftype_v4qi_v2hi);
9573 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9574 v2si_ftype_v4qi_v2hi);
9576 /* Data aligning. */
9577 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9578 v4hi_ftype_v4hi_v4hi);
9579 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9580 v8qi_ftype_v8qi_v8qi);
9581 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9582 v2si_ftype_v2si_v2si);
9583 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
9584 di_ftype_di_di);
9586 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
9587 void_ftype_di);
9588 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
9589 di_ftype_void);
9591 if (TARGET_ARCH64)
9593 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9594 ptr_ftype_ptr_di);
9595 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
9596 ptr_ftype_ptr_di);
9598 else
9600 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9601 ptr_ftype_ptr_si);
9602 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
9603 ptr_ftype_ptr_si);
9606 /* Pixel distance. */
9607 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9608 di_ftype_v8qi_v8qi_di);
9610 /* Edge handling. */
9611 if (TARGET_ARCH64)
9613 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
9614 di_ftype_ptr_ptr);
9615 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
9616 di_ftype_ptr_ptr);
9617 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
9618 di_ftype_ptr_ptr);
9619 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
9620 di_ftype_ptr_ptr);
9621 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
9622 di_ftype_ptr_ptr);
9623 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
9624 di_ftype_ptr_ptr);
9625 if (TARGET_VIS2)
9627 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
9628 di_ftype_ptr_ptr);
9629 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
9630 di_ftype_ptr_ptr);
9631 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
9632 di_ftype_ptr_ptr);
9633 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
9634 di_ftype_ptr_ptr);
9635 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
9636 di_ftype_ptr_ptr);
9637 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
9638 di_ftype_ptr_ptr);
9641 else
9643 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
9644 si_ftype_ptr_ptr);
9645 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
9646 si_ftype_ptr_ptr);
9647 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
9648 si_ftype_ptr_ptr);
9649 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
9650 si_ftype_ptr_ptr);
9651 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
9652 si_ftype_ptr_ptr);
9653 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
9654 si_ftype_ptr_ptr);
9655 if (TARGET_VIS2)
9657 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
9658 si_ftype_ptr_ptr);
9659 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
9660 si_ftype_ptr_ptr);
9661 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
9662 si_ftype_ptr_ptr);
9663 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
9664 si_ftype_ptr_ptr);
9665 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
9666 si_ftype_ptr_ptr);
9667 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
9668 si_ftype_ptr_ptr);
9672 /* Pixel compare. */
9673 if (TARGET_ARCH64)
9675 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
9676 di_ftype_v4hi_v4hi);
9677 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
9678 di_ftype_v2si_v2si);
9679 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
9680 di_ftype_v4hi_v4hi);
9681 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
9682 di_ftype_v2si_v2si);
9683 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
9684 di_ftype_v4hi_v4hi);
9685 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
9686 di_ftype_v2si_v2si);
9687 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
9688 di_ftype_v4hi_v4hi);
9689 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
9690 di_ftype_v2si_v2si);
9692 else
9694 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
9695 si_ftype_v4hi_v4hi);
9696 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
9697 si_ftype_v2si_v2si);
9698 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
9699 si_ftype_v4hi_v4hi);
9700 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
9701 si_ftype_v2si_v2si);
9702 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
9703 si_ftype_v4hi_v4hi);
9704 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
9705 si_ftype_v2si_v2si);
9706 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
9707 si_ftype_v4hi_v4hi);
9708 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
9709 si_ftype_v2si_v2si);
9712 /* Addition and subtraction. */
9713 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
9714 v4hi_ftype_v4hi_v4hi);
9715 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
9716 v2hi_ftype_v2hi_v2hi);
9717 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
9718 v2si_ftype_v2si_v2si);
9719 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
9720 v1si_ftype_v1si_v1si);
9721 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
9722 v4hi_ftype_v4hi_v4hi);
9723 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
9724 v2hi_ftype_v2hi_v2hi);
9725 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
9726 v2si_ftype_v2si_v2si);
9727 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
9728 v1si_ftype_v1si_v1si);
9730 /* Three-dimensional array addressing. */
9731 if (TARGET_ARCH64)
9733 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
9734 di_ftype_di_di);
9735 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
9736 di_ftype_di_di);
9737 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
9738 di_ftype_di_di);
9740 else
9742 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
9743 si_ftype_si_si);
9744 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
9745 si_ftype_si_si);
9746 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
9747 si_ftype_si_si);
9750 if (TARGET_VIS2)
9752 /* Byte mask and shuffle */
9753 if (TARGET_ARCH64)
9754 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
9755 di_ftype_di_di);
9756 else
9757 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
9758 si_ftype_si_si);
9759 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
9760 v4hi_ftype_v4hi_v4hi);
9761 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
9762 v8qi_ftype_v8qi_v8qi);
9763 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
9764 v2si_ftype_v2si_v2si);
9765 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
9766 di_ftype_di_di);
9769 if (TARGET_VIS3)
9771 if (TARGET_ARCH64)
9773 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
9774 void_ftype_di);
9775 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
9776 void_ftype_di);
9777 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
9778 void_ftype_di);
9780 else
9782 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
9783 void_ftype_si);
9784 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
9785 void_ftype_si);
9786 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
9787 void_ftype_si);
9790 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
9791 v4hi_ftype_v4hi_v4hi);
9793 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
9794 v4hi_ftype_v4hi_v4hi);
9795 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
9796 v4hi_ftype_v4hi_v4hi);
9797 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
9798 v4hi_ftype_v4hi_v4hi);
9799 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
9800 v4hi_ftype_v4hi_v4hi);
9801 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
9802 v2si_ftype_v2si_v2si);
9803 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
9804 v2si_ftype_v2si_v2si);
9805 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
9806 v2si_ftype_v2si_v2si);
9807 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
9808 v2si_ftype_v2si_v2si);
9810 if (TARGET_ARCH64)
9811 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
9812 di_ftype_v8qi_v8qi);
9813 else
9814 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
9815 si_ftype_v8qi_v8qi);
9817 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
9818 v4hi_ftype_v4hi_v4hi);
9819 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
9820 di_ftype_di_di);
9821 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
9822 di_ftype_di_di);
9824 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
9825 v4hi_ftype_v4hi_v4hi);
9826 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
9827 v2hi_ftype_v2hi_v2hi);
9828 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
9829 v4hi_ftype_v4hi_v4hi);
9830 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
9831 v2hi_ftype_v2hi_v2hi);
9832 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
9833 v2si_ftype_v2si_v2si);
9834 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
9835 v1si_ftype_v1si_v1si);
9836 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
9837 v2si_ftype_v2si_v2si);
9838 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
9839 v1si_ftype_v1si_v1si);
9841 if (TARGET_ARCH64)
9843 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
9844 di_ftype_v8qi_v8qi);
9845 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
9846 di_ftype_v8qi_v8qi);
9847 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
9848 di_ftype_v8qi_v8qi);
9849 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
9850 di_ftype_v8qi_v8qi);
9852 else
9854 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
9855 si_ftype_v8qi_v8qi);
9856 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
9857 si_ftype_v8qi_v8qi);
9858 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
9859 si_ftype_v8qi_v8qi);
9860 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
9861 si_ftype_v8qi_v8qi);
9864 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
9865 sf_ftype_sf_sf);
9866 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
9867 df_ftype_df_df);
9868 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
9869 sf_ftype_sf_sf);
9870 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
9871 df_ftype_df_df);
9872 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
9873 sf_ftype_sf_sf);
9874 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
9875 df_ftype_df_df);
9877 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
9878 di_ftype_di_di);
9879 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
9880 di_ftype_di_di);
9881 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
9882 di_ftype_di_di);
9886 /* Handle TARGET_EXPAND_BUILTIN target hook.
9887 Expand builtin functions for sparc intrinsics. */
9889 static rtx
9890 sparc_expand_builtin (tree exp, rtx target,
9891 rtx subtarget ATTRIBUTE_UNUSED,
9892 enum machine_mode tmode ATTRIBUTE_UNUSED,
9893 int ignore ATTRIBUTE_UNUSED)
9895 tree arg;
9896 call_expr_arg_iterator iter;
9897 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9898 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9899 rtx pat, op[4];
9900 int arg_count = 0;
9901 bool nonvoid;
9903 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9905 if (nonvoid)
9907 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9908 if (!target
9909 || GET_MODE (target) != tmode
9910 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9911 op[0] = gen_reg_rtx (tmode);
9912 else
9913 op[0] = target;
9915 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9917 const struct insn_operand_data *insn_op;
9918 int idx;
9920 if (arg == error_mark_node)
9921 return NULL_RTX;
9923 arg_count++;
9924 idx = arg_count - !nonvoid;
9925 insn_op = &insn_data[icode].operand[idx];
9926 op[arg_count] = expand_normal (arg);
9928 if (insn_op->mode == V1DImode
9929 && GET_MODE (op[arg_count]) == DImode)
9930 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
9931 else if (insn_op->mode == V1SImode
9932 && GET_MODE (op[arg_count]) == SImode)
9933 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
9935 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
9936 insn_op->mode))
9937 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
9940 switch (arg_count)
9942 case 0:
9943 pat = GEN_FCN (icode) (op[0]);
9944 break;
9945 case 1:
9946 if (nonvoid)
9947 pat = GEN_FCN (icode) (op[0], op[1]);
9948 else
9949 pat = GEN_FCN (icode) (op[1]);
9950 break;
9951 case 2:
9952 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9953 break;
9954 case 3:
9955 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9956 break;
9957 default:
9958 gcc_unreachable ();
9961 if (!pat)
9962 return NULL_RTX;
9964 emit_insn (pat);
9966 if (nonvoid)
9967 return op[0];
9968 else
9969 return const0_rtx;
9972 static int
9973 sparc_vis_mul8x16 (int e8, int e16)
9975 return (e8 * e16 + 128) / 256;
9978 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
9979 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
9981 static void
9982 sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type,
9983 tree cst0, tree cst1)
9985 unsigned i, num = VECTOR_CST_NELTS (cst0);
9986 int scale;
9988 switch (fncode)
9990 case CODE_FOR_fmul8x16_vis:
9991 for (i = 0; i < num; ++i)
9993 int val
9994 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
9995 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
9996 n_elts[i] = build_int_cst (inner_type, val);
9998 break;
10000 case CODE_FOR_fmul8x16au_vis:
10001 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
10003 for (i = 0; i < num; ++i)
10005 int val
10006 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
10007 scale);
10008 n_elts[i] = build_int_cst (inner_type, val);
10010 break;
10012 case CODE_FOR_fmul8x16al_vis:
10013 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
10015 for (i = 0; i < num; ++i)
10017 int val
10018 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
10019 scale);
10020 n_elts[i] = build_int_cst (inner_type, val);
10022 break;
10024 default:
10025 gcc_unreachable ();
10029 /* Handle TARGET_FOLD_BUILTIN target hook.
10030 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
10031 result of the function call is ignored. NULL_TREE is returned if the
10032 function could not be folded. */
10034 static tree
10035 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
10036 tree *args, bool ignore)
10038 tree arg0, arg1, arg2;
10039 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
10040 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
10042 if (ignore)
10044 /* Note that a switch statement instead of the sequence of tests would
10045 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
10046 and that would yield multiple alternatives with identical values. */
10047 if (icode == CODE_FOR_alignaddrsi_vis
10048 || icode == CODE_FOR_alignaddrdi_vis
10049 || icode == CODE_FOR_wrgsr_vis
10050 || icode == CODE_FOR_bmasksi_vis
10051 || icode == CODE_FOR_bmaskdi_vis
10052 || icode == CODE_FOR_cmask8si_vis
10053 || icode == CODE_FOR_cmask8di_vis
10054 || icode == CODE_FOR_cmask16si_vis
10055 || icode == CODE_FOR_cmask16di_vis
10056 || icode == CODE_FOR_cmask32si_vis
10057 || icode == CODE_FOR_cmask32di_vis)
10059 else
10060 return build_zero_cst (rtype);
10063 switch (icode)
10065 case CODE_FOR_fexpand_vis:
10066 arg0 = args[0];
10067 STRIP_NOPS (arg0);
10069 if (TREE_CODE (arg0) == VECTOR_CST)
10071 tree inner_type = TREE_TYPE (rtype);
10072 tree *n_elts;
10073 unsigned i;
10075 n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
10076 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10077 n_elts[i] = build_int_cst (inner_type,
10078 TREE_INT_CST_LOW
10079 (VECTOR_CST_ELT (arg0, i)) << 4);
10080 return build_vector (rtype, n_elts);
10082 break;
10084 case CODE_FOR_fmul8x16_vis:
10085 case CODE_FOR_fmul8x16au_vis:
10086 case CODE_FOR_fmul8x16al_vis:
10087 arg0 = args[0];
10088 arg1 = args[1];
10089 STRIP_NOPS (arg0);
10090 STRIP_NOPS (arg1);
10092 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10094 tree inner_type = TREE_TYPE (rtype);
10095 tree *n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
10096 sparc_handle_vis_mul8x16 (n_elts, icode, inner_type, arg0, arg1);
10097 return build_vector (rtype, n_elts);
10099 break;
10101 case CODE_FOR_fpmerge_vis:
10102 arg0 = args[0];
10103 arg1 = args[1];
10104 STRIP_NOPS (arg0);
10105 STRIP_NOPS (arg1);
10107 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10109 tree *n_elts = XALLOCAVEC (tree, 2 * VECTOR_CST_NELTS (arg0));
10110 unsigned i;
10111 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10113 n_elts[2*i] = VECTOR_CST_ELT (arg0, i);
10114 n_elts[2*i+1] = VECTOR_CST_ELT (arg1, i);
10117 return build_vector (rtype, n_elts);
10119 break;
10121 case CODE_FOR_pdist_vis:
10122 arg0 = args[0];
10123 arg1 = args[1];
10124 arg2 = args[2];
10125 STRIP_NOPS (arg0);
10126 STRIP_NOPS (arg1);
10127 STRIP_NOPS (arg2);
10129 if (TREE_CODE (arg0) == VECTOR_CST
10130 && TREE_CODE (arg1) == VECTOR_CST
10131 && TREE_CODE (arg2) == INTEGER_CST)
10133 bool overflow = false;
10134 double_int di_arg2 = TREE_INT_CST (arg2);
10135 double_int tmp;
10136 unsigned i;
10138 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10140 double_int e0 = TREE_INT_CST (VECTOR_CST_ELT (arg0, i));
10141 double_int e1 = TREE_INT_CST (VECTOR_CST_ELT (arg1, i));
10143 bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
10145 tmp = e1.neg_with_overflow (&neg1_ovf);
10146 tmp = e0.add_with_sign (tmp, false, &add1_ovf);
10147 if (tmp.is_negative ())
10148 tmp = tmp.neg_with_overflow (&neg2_ovf);
10150 tmp = di_arg2.add_with_sign (tmp, false, &add2_ovf);
10151 overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf;
10154 gcc_assert (!overflow);
10156 return build_int_cst_wide (rtype, tmp.low, tmp.high);
10159 default:
10160 break;
10163 return NULL_TREE;
10166 /* ??? This duplicates information provided to the compiler by the
10167 ??? scheduler description. Some day, teach genautomata to output
10168 ??? the latencies and then CSE will just use that. */
10170 static bool
10171 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
10172 int *total, bool speed ATTRIBUTE_UNUSED)
10174 enum machine_mode mode = GET_MODE (x);
10175 bool float_mode_p = FLOAT_MODE_P (mode);
10177 switch (code)
10179 case CONST_INT:
10180 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
10182 *total = 0;
10183 return true;
10185 /* FALLTHRU */
10187 case HIGH:
10188 *total = 2;
10189 return true;
10191 case CONST:
10192 case LABEL_REF:
10193 case SYMBOL_REF:
10194 *total = 4;
10195 return true;
10197 case CONST_DOUBLE:
10198 if (GET_MODE (x) == VOIDmode
10199 && ((CONST_DOUBLE_HIGH (x) == 0
10200 && CONST_DOUBLE_LOW (x) < 0x1000)
10201 || (CONST_DOUBLE_HIGH (x) == -1
10202 && CONST_DOUBLE_LOW (x) < 0
10203 && CONST_DOUBLE_LOW (x) >= -0x1000)))
10204 *total = 0;
10205 else
10206 *total = 8;
10207 return true;
10209 case MEM:
10210 /* If outer-code was a sign or zero extension, a cost
10211 of COSTS_N_INSNS (1) was already added in. This is
10212 why we are subtracting it back out. */
10213 if (outer_code == ZERO_EXTEND)
10215 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
10217 else if (outer_code == SIGN_EXTEND)
10219 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
10221 else if (float_mode_p)
10223 *total = sparc_costs->float_load;
10225 else
10227 *total = sparc_costs->int_load;
10230 return true;
10232 case PLUS:
10233 case MINUS:
10234 if (float_mode_p)
10235 *total = sparc_costs->float_plusminus;
10236 else
10237 *total = COSTS_N_INSNS (1);
10238 return false;
10240 case FMA:
10242 rtx sub;
10244 gcc_assert (float_mode_p);
10245 *total = sparc_costs->float_mul;
10247 sub = XEXP (x, 0);
10248 if (GET_CODE (sub) == NEG)
10249 sub = XEXP (sub, 0);
10250 *total += rtx_cost (sub, FMA, 0, speed);
10252 sub = XEXP (x, 2);
10253 if (GET_CODE (sub) == NEG)
10254 sub = XEXP (sub, 0);
10255 *total += rtx_cost (sub, FMA, 2, speed);
10256 return true;
10259 case MULT:
10260 if (float_mode_p)
10261 *total = sparc_costs->float_mul;
10262 else if (! TARGET_HARD_MUL)
10263 *total = COSTS_N_INSNS (25);
10264 else
10266 int bit_cost;
10268 bit_cost = 0;
10269 if (sparc_costs->int_mul_bit_factor)
10271 int nbits;
10273 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
10275 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
10276 for (nbits = 0; value != 0; value &= value - 1)
10277 nbits++;
10279 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
10280 && GET_MODE (XEXP (x, 1)) == VOIDmode)
10282 rtx x1 = XEXP (x, 1);
10283 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
10284 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
10286 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
10287 nbits++;
10288 for (; value2 != 0; value2 &= value2 - 1)
10289 nbits++;
10291 else
10292 nbits = 7;
10294 if (nbits < 3)
10295 nbits = 3;
10296 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
10297 bit_cost = COSTS_N_INSNS (bit_cost);
10300 if (mode == DImode)
10301 *total = sparc_costs->int_mulX + bit_cost;
10302 else
10303 *total = sparc_costs->int_mul + bit_cost;
10305 return false;
10307 case ASHIFT:
10308 case ASHIFTRT:
10309 case LSHIFTRT:
10310 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
10311 return false;
10313 case DIV:
10314 case UDIV:
10315 case MOD:
10316 case UMOD:
10317 if (float_mode_p)
10319 if (mode == DFmode)
10320 *total = sparc_costs->float_div_df;
10321 else
10322 *total = sparc_costs->float_div_sf;
10324 else
10326 if (mode == DImode)
10327 *total = sparc_costs->int_divX;
10328 else
10329 *total = sparc_costs->int_div;
10331 return false;
10333 case NEG:
10334 if (! float_mode_p)
10336 *total = COSTS_N_INSNS (1);
10337 return false;
10339 /* FALLTHRU */
10341 case ABS:
10342 case FLOAT:
10343 case UNSIGNED_FLOAT:
10344 case FIX:
10345 case UNSIGNED_FIX:
10346 case FLOAT_EXTEND:
10347 case FLOAT_TRUNCATE:
10348 *total = sparc_costs->float_move;
10349 return false;
10351 case SQRT:
10352 if (mode == DFmode)
10353 *total = sparc_costs->float_sqrt_df;
10354 else
10355 *total = sparc_costs->float_sqrt_sf;
10356 return false;
10358 case COMPARE:
10359 if (float_mode_p)
10360 *total = sparc_costs->float_cmp;
10361 else
10362 *total = COSTS_N_INSNS (1);
10363 return false;
10365 case IF_THEN_ELSE:
10366 if (float_mode_p)
10367 *total = sparc_costs->float_cmove;
10368 else
10369 *total = sparc_costs->int_cmove;
10370 return false;
10372 case IOR:
10373 /* Handle the NAND vector patterns. */
10374 if (sparc_vector_mode_supported_p (GET_MODE (x))
10375 && GET_CODE (XEXP (x, 0)) == NOT
10376 && GET_CODE (XEXP (x, 1)) == NOT)
10378 *total = COSTS_N_INSNS (1);
10379 return true;
10381 else
10382 return false;
10384 default:
10385 return false;
10389 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10391 static inline bool
10392 general_or_i64_p (reg_class_t rclass)
10394 return (rclass == GENERAL_REGS || rclass == I64_REGS);
10397 /* Implement TARGET_REGISTER_MOVE_COST. */
10399 static int
10400 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10401 reg_class_t from, reg_class_t to)
10403 bool need_memory = false;
10405 if (from == FPCC_REGS || to == FPCC_REGS)
10406 need_memory = true;
10407 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
10408 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
10410 if (TARGET_VIS3)
10412 int size = GET_MODE_SIZE (mode);
10413 if (size == 8 || size == 4)
10415 if (! TARGET_ARCH32 || size == 4)
10416 return 4;
10417 else
10418 return 6;
10421 need_memory = true;
10424 if (need_memory)
10426 if (sparc_cpu == PROCESSOR_ULTRASPARC
10427 || sparc_cpu == PROCESSOR_ULTRASPARC3
10428 || sparc_cpu == PROCESSOR_NIAGARA
10429 || sparc_cpu == PROCESSOR_NIAGARA2
10430 || sparc_cpu == PROCESSOR_NIAGARA3
10431 || sparc_cpu == PROCESSOR_NIAGARA4)
10432 return 12;
10434 return 6;
10437 return 2;
10440 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10441 This is achieved by means of a manual dynamic stack space allocation in
10442 the current frame. We make the assumption that SEQ doesn't contain any
10443 function calls, with the possible exception of calls to the GOT helper. */
10445 static void
10446 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
10448 /* We must preserve the lowest 16 words for the register save area. */
10449 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
10450 /* We really need only 2 words of fresh stack space. */
10451 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
10453 rtx slot
10454 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
10455 SPARC_STACK_BIAS + offset));
10457 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
10458 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
10459 if (reg2)
10460 emit_insn (gen_rtx_SET (VOIDmode,
10461 adjust_address (slot, word_mode, UNITS_PER_WORD),
10462 reg2));
10463 emit_insn (seq);
10464 if (reg2)
10465 emit_insn (gen_rtx_SET (VOIDmode,
10466 reg2,
10467 adjust_address (slot, word_mode, UNITS_PER_WORD)));
10468 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
10469 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
10472 /* Output the assembler code for a thunk function. THUNK_DECL is the
10473 declaration for the thunk function itself, FUNCTION is the decl for
10474 the target function. DELTA is an immediate constant offset to be
10475 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10476 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10478 static void
10479 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10480 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10481 tree function)
10483 rtx this_rtx, insn, funexp;
10484 unsigned int int_arg_first;
10486 reload_completed = 1;
10487 epilogue_completed = 1;
10489 emit_note (NOTE_INSN_PROLOGUE_END);
10491 if (TARGET_FLAT)
10493 sparc_leaf_function_p = 1;
10495 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10497 else if (flag_delayed_branch)
10499 /* We will emit a regular sibcall below, so we need to instruct
10500 output_sibcall that we are in a leaf function. */
10501 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
10503 /* This will cause final.c to invoke leaf_renumber_regs so we
10504 must behave as if we were in a not-yet-leafified function. */
10505 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
10507 else
10509 /* We will emit the sibcall manually below, so we will need to
10510 manually spill non-leaf registers. */
10511 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
10513 /* We really are in a leaf function. */
10514 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10517 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
10518 returns a structure, the structure return pointer is there instead. */
10519 if (TARGET_ARCH64
10520 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10521 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
10522 else
10523 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
10525 /* Add DELTA. When possible use a plain add, otherwise load it into
10526 a register first. */
10527 if (delta)
10529 rtx delta_rtx = GEN_INT (delta);
10531 if (! SPARC_SIMM13_P (delta))
10533 rtx scratch = gen_rtx_REG (Pmode, 1);
10534 emit_move_insn (scratch, delta_rtx);
10535 delta_rtx = scratch;
10538 /* THIS_RTX += DELTA. */
10539 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
10542 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
10543 if (vcall_offset)
10545 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10546 rtx scratch = gen_rtx_REG (Pmode, 1);
10548 gcc_assert (vcall_offset < 0);
10550 /* SCRATCH = *THIS_RTX. */
10551 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
10553 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
10554 may not have any available scratch register at this point. */
10555 if (SPARC_SIMM13_P (vcall_offset))
10557 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
10558 else if (! fixed_regs[5]
10559 /* The below sequence is made up of at least 2 insns,
10560 while the default method may need only one. */
10561 && vcall_offset < -8192)
10563 rtx scratch2 = gen_rtx_REG (Pmode, 5);
10564 emit_move_insn (scratch2, vcall_offset_rtx);
10565 vcall_offset_rtx = scratch2;
10567 else
10569 rtx increment = GEN_INT (-4096);
10571 /* VCALL_OFFSET is a negative number whose typical range can be
10572 estimated as -32768..0 in 32-bit mode. In almost all cases
10573 it is therefore cheaper to emit multiple add insns than
10574 spilling and loading the constant into a register (at least
10575 6 insns). */
10576 while (! SPARC_SIMM13_P (vcall_offset))
10578 emit_insn (gen_add2_insn (scratch, increment));
10579 vcall_offset += 4096;
10581 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
10584 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
10585 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
10586 gen_rtx_PLUS (Pmode,
10587 scratch,
10588 vcall_offset_rtx)));
10590 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
10591 emit_insn (gen_add2_insn (this_rtx, scratch));
10594 /* Generate a tail call to the target function. */
10595 if (! TREE_USED (function))
10597 assemble_external (function);
10598 TREE_USED (function) = 1;
10600 funexp = XEXP (DECL_RTL (function), 0);
10602 if (flag_delayed_branch)
10604 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10605 insn = emit_call_insn (gen_sibcall (funexp));
10606 SIBLING_CALL_P (insn) = 1;
10608 else
10610 /* The hoops we have to jump through in order to generate a sibcall
10611 without using delay slots... */
10612 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
10614 if (flag_pic)
10616 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
10617 start_sequence ();
10618 load_got_register (); /* clobbers %o7 */
10619 scratch = sparc_legitimize_pic_address (funexp, scratch);
10620 seq = get_insns ();
10621 end_sequence ();
10622 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
10624 else if (TARGET_ARCH32)
10626 emit_insn (gen_rtx_SET (VOIDmode,
10627 scratch,
10628 gen_rtx_HIGH (SImode, funexp)));
10629 emit_insn (gen_rtx_SET (VOIDmode,
10630 scratch,
10631 gen_rtx_LO_SUM (SImode, scratch, funexp)));
10633 else /* TARGET_ARCH64 */
10635 switch (sparc_cmodel)
10637 case CM_MEDLOW:
10638 case CM_MEDMID:
10639 /* The destination can serve as a temporary. */
10640 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
10641 break;
10643 case CM_MEDANY:
10644 case CM_EMBMEDANY:
10645 /* The destination cannot serve as a temporary. */
10646 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
10647 start_sequence ();
10648 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
10649 seq = get_insns ();
10650 end_sequence ();
10651 emit_and_preserve (seq, spill_reg, 0);
10652 break;
10654 default:
10655 gcc_unreachable ();
10659 emit_jump_insn (gen_indirect_jump (scratch));
10662 emit_barrier ();
10664 /* Run just enough of rest_of_compilation to get the insns emitted.
10665 There's not really enough bulk here to make other passes such as
10666 instruction scheduling worth while. Note that use_thunk calls
10667 assemble_start_function and assemble_end_function. */
10668 insn = get_insns ();
10669 shorten_branches (insn);
10670 final_start_function (insn, file, 1);
10671 final (insn, file, 1);
10672 final_end_function ();
10674 reload_completed = 0;
10675 epilogue_completed = 0;
10678 /* Return true if sparc_output_mi_thunk would be able to output the
10679 assembler code for the thunk function specified by the arguments
10680 it is passed, and false otherwise. */
10681 static bool
10682 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
10683 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
10684 HOST_WIDE_INT vcall_offset,
10685 const_tree function ATTRIBUTE_UNUSED)
10687 /* Bound the loop used in the default method above. */
10688 return (vcall_offset >= -32768 || ! fixed_regs[5]);
10691 /* We use the machine specific reorg pass to enable workarounds for errata. */
10693 static void
10694 sparc_reorg (void)
10696 rtx insn, next;
10698 /* The only erratum we handle for now is that of the AT697F processor. */
10699 if (!sparc_fix_at697f)
10700 return;
10702 /* We need to have the (essentially) final form of the insn stream in order
10703 to properly detect the various hazards. Run delay slot scheduling. */
10704 if (optimize > 0 && flag_delayed_branch)
10706 cleanup_barriers ();
10707 dbr_schedule (get_insns ());
10710 /* Now look for specific patterns in the insn stream. */
10711 for (insn = get_insns (); insn; insn = next)
10713 bool insert_nop = false;
10714 rtx set;
10716 /* Look for a single-word load into an odd-numbered FP register. */
10717 if (NONJUMP_INSN_P (insn)
10718 && (set = single_set (insn)) != NULL_RTX
10719 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
10720 && MEM_P (SET_SRC (set))
10721 && REG_P (SET_DEST (set))
10722 && REGNO (SET_DEST (set)) > 31
10723 && REGNO (SET_DEST (set)) % 2 != 0)
10725 /* The wrong dependency is on the enclosing double register. */
10726 unsigned int x = REGNO (SET_DEST (set)) - 1;
10727 unsigned int src1, src2, dest;
10728 int code;
10730 /* If the insn has a delay slot, then it cannot be problematic. */
10731 next = next_active_insn (insn);
10732 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
10733 code = -1;
10734 else
10736 extract_insn (next);
10737 code = INSN_CODE (next);
10740 switch (code)
10742 case CODE_FOR_adddf3:
10743 case CODE_FOR_subdf3:
10744 case CODE_FOR_muldf3:
10745 case CODE_FOR_divdf3:
10746 dest = REGNO (recog_data.operand[0]);
10747 src1 = REGNO (recog_data.operand[1]);
10748 src2 = REGNO (recog_data.operand[2]);
10749 if (src1 != src2)
10751 /* Case [1-4]:
10752 ld [address], %fx+1
10753 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
10754 if ((src1 == x || src2 == x)
10755 && (dest == src1 || dest == src2))
10756 insert_nop = true;
10758 else
10760 /* Case 5:
10761 ld [address], %fx+1
10762 FPOPd %fx, %fx, %fx */
10763 if (src1 == x
10764 && dest == src1
10765 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
10766 insert_nop = true;
10768 break;
10770 case CODE_FOR_sqrtdf2:
10771 dest = REGNO (recog_data.operand[0]);
10772 src1 = REGNO (recog_data.operand[1]);
10773 /* Case 6:
10774 ld [address], %fx+1
10775 fsqrtd %fx, %fx */
10776 if (src1 == x && dest == src1)
10777 insert_nop = true;
10778 break;
10780 default:
10781 break;
10784 else
10785 next = NEXT_INSN (insn);
10787 if (insert_nop)
10788 emit_insn_after (gen_nop (), insn);
10792 /* How to allocate a 'struct machine_function'. */
10794 static struct machine_function *
10795 sparc_init_machine_status (void)
10797 return ggc_alloc_cleared_machine_function ();
10800 /* Locate some local-dynamic symbol still in use by this function
10801 so that we can print its name in local-dynamic base patterns. */
10803 static const char *
10804 get_some_local_dynamic_name (void)
10806 rtx insn;
10808 if (cfun->machine->some_ld_name)
10809 return cfun->machine->some_ld_name;
10811 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10812 if (INSN_P (insn)
10813 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10814 return cfun->machine->some_ld_name;
10816 gcc_unreachable ();
10819 static int
10820 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10822 rtx x = *px;
10824 if (x
10825 && GET_CODE (x) == SYMBOL_REF
10826 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10828 cfun->machine->some_ld_name = XSTR (x, 0);
10829 return 1;
10832 return 0;
10835 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10836 We need to emit DTP-relative relocations. */
10838 static void
10839 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
10841 switch (size)
10843 case 4:
10844 fputs ("\t.word\t%r_tls_dtpoff32(", file);
10845 break;
10846 case 8:
10847 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
10848 break;
10849 default:
10850 gcc_unreachable ();
10852 output_addr_const (file, x);
10853 fputs (")", file);
10856 /* Do whatever processing is required at the end of a file. */
10858 static void
10859 sparc_file_end (void)
10861 /* If we need to emit the special GOT helper function, do so now. */
10862 if (got_helper_rtx)
10864 const char *name = XSTR (got_helper_rtx, 0);
10865 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
10866 #ifdef DWARF2_UNWIND_INFO
10867 bool do_cfi;
10868 #endif
10870 if (USE_HIDDEN_LINKONCE)
10872 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
10873 get_identifier (name),
10874 build_function_type_list (void_type_node,
10875 NULL_TREE));
10876 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
10877 NULL_TREE, void_type_node);
10878 TREE_PUBLIC (decl) = 1;
10879 TREE_STATIC (decl) = 1;
10880 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
10881 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
10882 DECL_VISIBILITY_SPECIFIED (decl) = 1;
10883 resolve_unique_section (decl, 0, flag_function_sections);
10884 allocate_struct_function (decl, true);
10885 cfun->is_thunk = 1;
10886 current_function_decl = decl;
10887 init_varasm_status ();
10888 assemble_start_function (decl, name);
10890 else
10892 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10893 switch_to_section (text_section);
10894 if (align > 0)
10895 ASM_OUTPUT_ALIGN (asm_out_file, align);
10896 ASM_OUTPUT_LABEL (asm_out_file, name);
10899 #ifdef DWARF2_UNWIND_INFO
10900 do_cfi = dwarf2out_do_cfi_asm ();
10901 if (do_cfi)
10902 fprintf (asm_out_file, "\t.cfi_startproc\n");
10903 #endif
10904 if (flag_delayed_branch)
10905 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10906 reg_name, reg_name);
10907 else
10908 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10909 reg_name, reg_name);
10910 #ifdef DWARF2_UNWIND_INFO
10911 if (do_cfi)
10912 fprintf (asm_out_file, "\t.cfi_endproc\n");
10913 #endif
10916 if (NEED_INDICATE_EXEC_STACK)
10917 file_end_indicate_exec_stack ();
10919 #ifdef TARGET_SOLARIS
10920 solaris_file_end ();
10921 #endif
10924 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10925 /* Implement TARGET_MANGLE_TYPE. */
10927 static const char *
10928 sparc_mangle_type (const_tree type)
10930 if (!TARGET_64BIT
10931 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10932 && TARGET_LONG_DOUBLE_128)
10933 return "g";
10935 /* For all other types, use normal C++ mangling. */
10936 return NULL;
10938 #endif
10940 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
10941 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
10942 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
10944 void
10945 sparc_emit_membar_for_model (enum memmodel model,
10946 int load_store, int before_after)
10948 /* Bits for the MEMBAR mmask field. */
10949 const int LoadLoad = 1;
10950 const int StoreLoad = 2;
10951 const int LoadStore = 4;
10952 const int StoreStore = 8;
10954 int mm = 0, implied = 0;
10956 switch (sparc_memory_model)
10958 case SMM_SC:
10959 /* Sequential Consistency. All memory transactions are immediately
10960 visible in sequential execution order. No barriers needed. */
10961 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
10962 break;
10964 case SMM_TSO:
10965 /* Total Store Ordering: all memory transactions with store semantics
10966 are followed by an implied StoreStore. */
10967 implied |= StoreStore;
10968 /* FALLTHRU */
10970 case SMM_PSO:
10971 /* Partial Store Ordering: all memory transactions with load semantics
10972 are followed by an implied LoadLoad | LoadStore. */
10973 implied |= LoadLoad | LoadStore;
10975 /* If we're not looking for a raw barrer (before+after), then atomic
10976 operations get the benefit of being both load and store. */
10977 if (load_store == 3 && before_after == 2)
10978 implied |= StoreLoad | StoreStore;
10979 /* FALLTHRU */
10981 case SMM_RMO:
10982 /* Relaxed Memory Ordering: no implicit bits. */
10983 break;
10985 default:
10986 gcc_unreachable ();
10989 if (before_after & 1)
10991 if (model == MEMMODEL_ACQUIRE
10992 || model == MEMMODEL_ACQ_REL
10993 || model == MEMMODEL_SEQ_CST)
10995 if (load_store & 1)
10996 mm |= LoadLoad | LoadStore;
10997 if (load_store & 2)
10998 mm |= StoreLoad | StoreStore;
11001 if (before_after & 2)
11003 if (model == MEMMODEL_RELEASE
11004 || model == MEMMODEL_ACQ_REL
11005 || model == MEMMODEL_SEQ_CST)
11007 if (load_store & 1)
11008 mm |= LoadLoad | StoreLoad;
11009 if (load_store & 2)
11010 mm |= LoadStore | StoreStore;
11014 /* Remove the bits implied by the system memory model. */
11015 mm &= ~implied;
11017 /* For raw barriers (before+after), always emit a barrier.
11018 This will become a compile-time barrier if needed. */
11019 if (mm || before_after == 3)
11020 emit_insn (gen_membar (GEN_INT (mm)));
11023 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
11024 compare and swap on the word containing the byte or half-word. */
11026 static void
11027 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
11028 rtx oldval, rtx newval)
11030 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
11031 rtx addr = gen_reg_rtx (Pmode);
11032 rtx off = gen_reg_rtx (SImode);
11033 rtx oldv = gen_reg_rtx (SImode);
11034 rtx newv = gen_reg_rtx (SImode);
11035 rtx oldvalue = gen_reg_rtx (SImode);
11036 rtx newvalue = gen_reg_rtx (SImode);
11037 rtx res = gen_reg_rtx (SImode);
11038 rtx resv = gen_reg_rtx (SImode);
11039 rtx memsi, val, mask, end_label, loop_label, cc;
11041 emit_insn (gen_rtx_SET (VOIDmode, addr,
11042 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
11044 if (Pmode != SImode)
11045 addr1 = gen_lowpart (SImode, addr1);
11046 emit_insn (gen_rtx_SET (VOIDmode, off,
11047 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
11049 memsi = gen_rtx_MEM (SImode, addr);
11050 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
11051 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
11053 val = copy_to_reg (memsi);
11055 emit_insn (gen_rtx_SET (VOIDmode, off,
11056 gen_rtx_XOR (SImode, off,
11057 GEN_INT (GET_MODE (mem) == QImode
11058 ? 3 : 2))));
11060 emit_insn (gen_rtx_SET (VOIDmode, off,
11061 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
11063 if (GET_MODE (mem) == QImode)
11064 mask = force_reg (SImode, GEN_INT (0xff));
11065 else
11066 mask = force_reg (SImode, GEN_INT (0xffff));
11068 emit_insn (gen_rtx_SET (VOIDmode, mask,
11069 gen_rtx_ASHIFT (SImode, mask, off)));
11071 emit_insn (gen_rtx_SET (VOIDmode, val,
11072 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
11073 val)));
11075 oldval = gen_lowpart (SImode, oldval);
11076 emit_insn (gen_rtx_SET (VOIDmode, oldv,
11077 gen_rtx_ASHIFT (SImode, oldval, off)));
11079 newval = gen_lowpart_common (SImode, newval);
11080 emit_insn (gen_rtx_SET (VOIDmode, newv,
11081 gen_rtx_ASHIFT (SImode, newval, off)));
11083 emit_insn (gen_rtx_SET (VOIDmode, oldv,
11084 gen_rtx_AND (SImode, oldv, mask)));
11086 emit_insn (gen_rtx_SET (VOIDmode, newv,
11087 gen_rtx_AND (SImode, newv, mask)));
11089 end_label = gen_label_rtx ();
11090 loop_label = gen_label_rtx ();
11091 emit_label (loop_label);
11093 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
11094 gen_rtx_IOR (SImode, oldv, val)));
11096 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
11097 gen_rtx_IOR (SImode, newv, val)));
11099 emit_move_insn (bool_result, const1_rtx);
11101 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
11103 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
11105 emit_insn (gen_rtx_SET (VOIDmode, resv,
11106 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
11107 res)));
11109 emit_move_insn (bool_result, const0_rtx);
11111 cc = gen_compare_reg_1 (NE, resv, val);
11112 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
11114 /* Use cbranchcc4 to separate the compare and branch! */
11115 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
11116 cc, const0_rtx, loop_label));
11118 emit_label (end_label);
11120 emit_insn (gen_rtx_SET (VOIDmode, res,
11121 gen_rtx_AND (SImode, res, mask)));
11123 emit_insn (gen_rtx_SET (VOIDmode, res,
11124 gen_rtx_LSHIFTRT (SImode, res, off)));
11126 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
11129 /* Expand code to perform a compare-and-swap. */
11131 void
11132 sparc_expand_compare_and_swap (rtx operands[])
11134 rtx bval, retval, mem, oldval, newval;
11135 enum machine_mode mode;
11136 enum memmodel model;
11138 bval = operands[0];
11139 retval = operands[1];
11140 mem = operands[2];
11141 oldval = operands[3];
11142 newval = operands[4];
11143 model = (enum memmodel) INTVAL (operands[6]);
11144 mode = GET_MODE (mem);
11146 sparc_emit_membar_for_model (model, 3, 1);
11148 if (reg_overlap_mentioned_p (retval, oldval))
11149 oldval = copy_to_reg (oldval);
11151 if (mode == QImode || mode == HImode)
11152 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
11153 else
11155 rtx (*gen) (rtx, rtx, rtx, rtx);
11156 rtx x;
11158 if (mode == SImode)
11159 gen = gen_atomic_compare_and_swapsi_1;
11160 else
11161 gen = gen_atomic_compare_and_swapdi_1;
11162 emit_insn (gen (retval, mem, oldval, newval));
11164 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
11165 if (x != bval)
11166 convert_move (bval, x, 1);
11169 sparc_emit_membar_for_model (model, 3, 2);
11172 void
11173 sparc_expand_vec_perm_bmask (enum machine_mode vmode, rtx sel)
11175 rtx t_1, t_2, t_3;
11177 sel = gen_lowpart (DImode, sel);
11178 switch (vmode)
11180 case V2SImode:
11181 /* inp = xxxxxxxAxxxxxxxB */
11182 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11183 NULL_RTX, 1, OPTAB_DIRECT);
11184 /* t_1 = ....xxxxxxxAxxx. */
11185 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
11186 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
11187 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
11188 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
11189 /* sel = .......B */
11190 /* t_1 = ...A.... */
11191 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
11192 /* sel = ...A...B */
11193 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
11194 /* sel = AAAABBBB * 4 */
11195 t_1 = force_reg (SImode, GEN_INT (0x01230123));
11196 /* sel = { A*4, A*4+1, A*4+2, ... } */
11197 break;
11199 case V4HImode:
11200 /* inp = xxxAxxxBxxxCxxxD */
11201 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11202 NULL_RTX, 1, OPTAB_DIRECT);
11203 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11204 NULL_RTX, 1, OPTAB_DIRECT);
11205 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
11206 NULL_RTX, 1, OPTAB_DIRECT);
11207 /* t_1 = ..xxxAxxxBxxxCxx */
11208 /* t_2 = ....xxxAxxxBxxxC */
11209 /* t_3 = ......xxxAxxxBxx */
11210 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
11211 GEN_INT (0x07),
11212 NULL_RTX, 1, OPTAB_DIRECT);
11213 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
11214 GEN_INT (0x0700),
11215 NULL_RTX, 1, OPTAB_DIRECT);
11216 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
11217 GEN_INT (0x070000),
11218 NULL_RTX, 1, OPTAB_DIRECT);
11219 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
11220 GEN_INT (0x07000000),
11221 NULL_RTX, 1, OPTAB_DIRECT);
11222 /* sel = .......D */
11223 /* t_1 = .....C.. */
11224 /* t_2 = ...B.... */
11225 /* t_3 = .A...... */
11226 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
11227 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
11228 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
11229 /* sel = .A.B.C.D */
11230 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
11231 /* sel = AABBCCDD * 2 */
11232 t_1 = force_reg (SImode, GEN_INT (0x01010101));
11233 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
11234 break;
11236 case V8QImode:
11237 /* input = xAxBxCxDxExFxGxH */
11238 sel = expand_simple_binop (DImode, AND, sel,
11239 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
11240 | 0x0f0f0f0f),
11241 NULL_RTX, 1, OPTAB_DIRECT);
11242 /* sel = .A.B.C.D.E.F.G.H */
11243 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
11244 NULL_RTX, 1, OPTAB_DIRECT);
11245 /* t_1 = ..A.B.C.D.E.F.G. */
11246 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11247 NULL_RTX, 1, OPTAB_DIRECT);
11248 /* sel = .AABBCCDDEEFFGGH */
11249 sel = expand_simple_binop (DImode, AND, sel,
11250 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
11251 | 0xff00ff),
11252 NULL_RTX, 1, OPTAB_DIRECT);
11253 /* sel = ..AB..CD..EF..GH */
11254 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11255 NULL_RTX, 1, OPTAB_DIRECT);
11256 /* t_1 = ....AB..CD..EF.. */
11257 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11258 NULL_RTX, 1, OPTAB_DIRECT);
11259 /* sel = ..ABABCDCDEFEFGH */
11260 sel = expand_simple_binop (DImode, AND, sel,
11261 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
11262 NULL_RTX, 1, OPTAB_DIRECT);
11263 /* sel = ....ABCD....EFGH */
11264 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11265 NULL_RTX, 1, OPTAB_DIRECT);
11266 /* t_1 = ........ABCD.... */
11267 sel = gen_lowpart (SImode, sel);
11268 t_1 = gen_lowpart (SImode, t_1);
11269 break;
11271 default:
11272 gcc_unreachable ();
11275 /* Always perform the final addition/merge within the bmask insn. */
11276 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
11279 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11281 static bool
11282 sparc_frame_pointer_required (void)
11284 /* If the stack pointer is dynamically modified in the function, it cannot
11285 serve as the frame pointer. */
11286 if (cfun->calls_alloca)
11287 return true;
11289 /* If the function receives nonlocal gotos, it needs to save the frame
11290 pointer in the nonlocal_goto_save_area object. */
11291 if (cfun->has_nonlocal_label)
11292 return true;
11294 /* In flat mode, that's it. */
11295 if (TARGET_FLAT)
11296 return false;
11298 /* Otherwise, the frame pointer is required if the function isn't leaf. */
11299 return !(crtl->is_leaf && only_leaf_regs_used ());
11302 /* The way this is structured, we can't eliminate SFP in favor of SP
11303 if the frame pointer is required: we want to use the SFP->HFP elimination
11304 in that case. But the test in update_eliminables doesn't know we are
11305 assuming below that we only do the former elimination. */
11307 static bool
11308 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
11310 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
11313 /* Return the hard frame pointer directly to bypass the stack bias. */
11315 static rtx
11316 sparc_builtin_setjmp_frame_value (void)
11318 return hard_frame_pointer_rtx;
11321 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
11322 they won't be allocated. */
11324 static void
11325 sparc_conditional_register_usage (void)
11327 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
11329 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11330 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11332 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
11333 /* then honor it. */
11334 if (TARGET_ARCH32 && fixed_regs[5])
11335 fixed_regs[5] = 1;
11336 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
11337 fixed_regs[5] = 0;
11338 if (! TARGET_V9)
11340 int regno;
11341 for (regno = SPARC_FIRST_V9_FP_REG;
11342 regno <= SPARC_LAST_V9_FP_REG;
11343 regno++)
11344 fixed_regs[regno] = 1;
11345 /* %fcc0 is used by v8 and v9. */
11346 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
11347 regno <= SPARC_LAST_V9_FCC_REG;
11348 regno++)
11349 fixed_regs[regno] = 1;
11351 if (! TARGET_FPU)
11353 int regno;
11354 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
11355 fixed_regs[regno] = 1;
11357 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
11358 /* then honor it. Likewise with g3 and g4. */
11359 if (fixed_regs[2] == 2)
11360 fixed_regs[2] = ! TARGET_APP_REGS;
11361 if (fixed_regs[3] == 2)
11362 fixed_regs[3] = ! TARGET_APP_REGS;
11363 if (TARGET_ARCH32 && fixed_regs[4] == 2)
11364 fixed_regs[4] = ! TARGET_APP_REGS;
11365 else if (TARGET_CM_EMBMEDANY)
11366 fixed_regs[4] = 1;
11367 else if (fixed_regs[4] == 2)
11368 fixed_regs[4] = 0;
11369 if (TARGET_FLAT)
11371 int regno;
11372 /* Disable leaf functions. */
11373 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
11374 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
11375 leaf_reg_remap [regno] = regno;
11377 if (TARGET_VIS)
11378 global_regs[SPARC_GSR_REG] = 1;
11381 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
11383 - We can't load constants into FP registers.
11384 - We can't load FP constants into integer registers when soft-float,
11385 because there is no soft-float pattern with a r/F constraint.
11386 - We can't load FP constants into integer registers for TFmode unless
11387 it is 0.0L, because there is no movtf pattern with a r/F constraint.
11388 - Try and reload integer constants (symbolic or otherwise) back into
11389 registers directly, rather than having them dumped to memory. */
11391 static reg_class_t
11392 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
11394 enum machine_mode mode = GET_MODE (x);
11395 if (CONSTANT_P (x))
11397 if (FP_REG_CLASS_P (rclass)
11398 || rclass == GENERAL_OR_FP_REGS
11399 || rclass == GENERAL_OR_EXTRA_FP_REGS
11400 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
11401 || (mode == TFmode && ! const_zero_operand (x, mode)))
11402 return NO_REGS;
11404 if (GET_MODE_CLASS (mode) == MODE_INT)
11405 return GENERAL_REGS;
11407 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
11409 if (! FP_REG_CLASS_P (rclass)
11410 || !(const_zero_operand (x, mode)
11411 || const_all_ones_operand (x, mode)))
11412 return NO_REGS;
11416 if (TARGET_VIS3
11417 && ! TARGET_ARCH64
11418 && (rclass == EXTRA_FP_REGS
11419 || rclass == GENERAL_OR_EXTRA_FP_REGS))
11421 int regno = true_regnum (x);
11423 if (SPARC_INT_REG_P (regno))
11424 return (rclass == EXTRA_FP_REGS
11425 ? FP_REGS : GENERAL_OR_FP_REGS);
11428 return rclass;
11431 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
11432 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
11434 const char *
11435 output_v8plus_mult (rtx insn, rtx *operands, const char *opcode)
11437 char mulstr[32];
11439 gcc_assert (! TARGET_ARCH64);
11441 if (sparc_check_64 (operands[1], insn) <= 0)
11442 output_asm_insn ("srl\t%L1, 0, %L1", operands);
11443 if (which_alternative == 1)
11444 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
11445 if (GET_CODE (operands[2]) == CONST_INT)
11447 if (which_alternative == 1)
11449 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11450 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
11451 output_asm_insn (mulstr, operands);
11452 return "srlx\t%L0, 32, %H0";
11454 else
11456 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11457 output_asm_insn ("or\t%L1, %3, %3", operands);
11458 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
11459 output_asm_insn (mulstr, operands);
11460 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11461 return "mov\t%3, %L0";
11464 else if (rtx_equal_p (operands[1], operands[2]))
11466 if (which_alternative == 1)
11468 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11469 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
11470 output_asm_insn (mulstr, operands);
11471 return "srlx\t%L0, 32, %H0";
11473 else
11475 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11476 output_asm_insn ("or\t%L1, %3, %3", operands);
11477 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
11478 output_asm_insn (mulstr, operands);
11479 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11480 return "mov\t%3, %L0";
11483 if (sparc_check_64 (operands[2], insn) <= 0)
11484 output_asm_insn ("srl\t%L2, 0, %L2", operands);
11485 if (which_alternative == 1)
11487 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11488 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
11489 output_asm_insn ("or\t%L2, %L1, %L1", operands);
11490 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
11491 output_asm_insn (mulstr, operands);
11492 return "srlx\t%L0, 32, %H0";
11494 else
11496 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11497 output_asm_insn ("sllx\t%H2, 32, %4", operands);
11498 output_asm_insn ("or\t%L1, %3, %3", operands);
11499 output_asm_insn ("or\t%L2, %4, %4", operands);
11500 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
11501 output_asm_insn (mulstr, operands);
11502 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11503 return "mov\t%3, %L0";
11507 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11508 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
11509 and INNER_MODE are the modes describing TARGET. */
11511 static void
11512 vector_init_bshuffle (rtx target, rtx elt, enum machine_mode mode,
11513 enum machine_mode inner_mode)
11515 rtx t1, final_insn;
11516 int bmask;
11518 t1 = gen_reg_rtx (mode);
11520 elt = convert_modes (SImode, inner_mode, elt, true);
11521 emit_move_insn (gen_lowpart(SImode, t1), elt);
11523 switch (mode)
11525 case V2SImode:
11526 final_insn = gen_bshufflev2si_vis (target, t1, t1);
11527 bmask = 0x45674567;
11528 break;
11529 case V4HImode:
11530 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
11531 bmask = 0x67676767;
11532 break;
11533 case V8QImode:
11534 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
11535 bmask = 0x77777777;
11536 break;
11537 default:
11538 gcc_unreachable ();
11541 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), CONST0_RTX (SImode),
11542 force_reg (SImode, GEN_INT (bmask))));
11543 emit_insn (final_insn);
11546 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11547 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
11549 static void
11550 vector_init_fpmerge (rtx target, rtx elt)
11552 rtx t1, t2, t2_low, t3, t3_low;
11554 t1 = gen_reg_rtx (V4QImode);
11555 elt = convert_modes (SImode, QImode, elt, true);
11556 emit_move_insn (gen_lowpart (SImode, t1), elt);
11558 t2 = gen_reg_rtx (V8QImode);
11559 t2_low = gen_lowpart (V4QImode, t2);
11560 emit_insn (gen_fpmerge_vis (t2, t1, t1));
11562 t3 = gen_reg_rtx (V8QImode);
11563 t3_low = gen_lowpart (V4QImode, t3);
11564 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
11566 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
11569 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11570 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
11572 static void
11573 vector_init_faligndata (rtx target, rtx elt)
11575 rtx t1 = gen_reg_rtx (V4HImode);
11576 int i;
11578 elt = convert_modes (SImode, HImode, elt, true);
11579 emit_move_insn (gen_lowpart (SImode, t1), elt);
11581 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
11582 force_reg (SImode, GEN_INT (6)),
11583 const0_rtx));
11585 for (i = 0; i < 4; i++)
11586 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
11589 /* Emit code to initialize TARGET to values for individual fields VALS. */
11591 void
11592 sparc_expand_vector_init (rtx target, rtx vals)
11594 const enum machine_mode mode = GET_MODE (target);
11595 const enum machine_mode inner_mode = GET_MODE_INNER (mode);
11596 const int n_elts = GET_MODE_NUNITS (mode);
11597 int i, n_var = 0;
11598 bool all_same;
11599 rtx mem;
11601 all_same = true;
11602 for (i = 0; i < n_elts; i++)
11604 rtx x = XVECEXP (vals, 0, i);
11605 if (!CONSTANT_P (x))
11606 n_var++;
11608 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
11609 all_same = false;
11612 if (n_var == 0)
11614 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
11615 return;
11618 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
11620 if (GET_MODE_SIZE (inner_mode) == 4)
11622 emit_move_insn (gen_lowpart (SImode, target),
11623 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
11624 return;
11626 else if (GET_MODE_SIZE (inner_mode) == 8)
11628 emit_move_insn (gen_lowpart (DImode, target),
11629 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
11630 return;
11633 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
11634 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
11636 emit_move_insn (gen_highpart (word_mode, target),
11637 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
11638 emit_move_insn (gen_lowpart (word_mode, target),
11639 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
11640 return;
11643 if (all_same && GET_MODE_SIZE (mode) == 8)
11645 if (TARGET_VIS2)
11647 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
11648 return;
11650 if (mode == V8QImode)
11652 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
11653 return;
11655 if (mode == V4HImode)
11657 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
11658 return;
11662 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
11663 for (i = 0; i < n_elts; i++)
11664 emit_move_insn (adjust_address_nv (mem, inner_mode,
11665 i * GET_MODE_SIZE (inner_mode)),
11666 XVECEXP (vals, 0, i));
11667 emit_move_insn (target, mem);
11670 /* Implement TARGET_SECONDARY_RELOAD. */
11672 static reg_class_t
11673 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
11674 enum machine_mode mode, secondary_reload_info *sri)
11676 enum reg_class rclass = (enum reg_class) rclass_i;
11678 sri->icode = CODE_FOR_nothing;
11679 sri->extra_cost = 0;
11681 /* We need a temporary when loading/storing a HImode/QImode value
11682 between memory and the FPU registers. This can happen when combine puts
11683 a paradoxical subreg in a float/fix conversion insn. */
11684 if (FP_REG_CLASS_P (rclass)
11685 && (mode == HImode || mode == QImode)
11686 && (GET_CODE (x) == MEM
11687 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
11688 && true_regnum (x) == -1)))
11689 return GENERAL_REGS;
11691 /* On 32-bit we need a temporary when loading/storing a DFmode value
11692 between unaligned memory and the upper FPU registers. */
11693 if (TARGET_ARCH32
11694 && rclass == EXTRA_FP_REGS
11695 && mode == DFmode
11696 && GET_CODE (x) == MEM
11697 && ! mem_min_alignment (x, 8))
11698 return FP_REGS;
11700 if (((TARGET_CM_MEDANY
11701 && symbolic_operand (x, mode))
11702 || (TARGET_CM_EMBMEDANY
11703 && text_segment_operand (x, mode)))
11704 && ! flag_pic)
11706 if (in_p)
11707 sri->icode = direct_optab_handler (reload_in_optab, mode);
11708 else
11709 sri->icode = direct_optab_handler (reload_out_optab, mode);
11710 return NO_REGS;
11713 if (TARGET_VIS3 && TARGET_ARCH32)
11715 int regno = true_regnum (x);
11717 /* When using VIS3 fp<-->int register moves, on 32-bit we have
11718 to move 8-byte values in 4-byte pieces. This only works via
11719 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
11720 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
11721 an FP_REGS intermediate move. */
11722 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
11723 || ((general_or_i64_p (rclass)
11724 || rclass == GENERAL_OR_FP_REGS)
11725 && SPARC_FP_REG_P (regno)))
11727 sri->extra_cost = 2;
11728 return FP_REGS;
11732 return NO_REGS;
11735 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
11736 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
11738 bool
11739 sparc_expand_conditional_move (enum machine_mode mode, rtx *operands)
11741 enum rtx_code rc = GET_CODE (operands[1]);
11742 enum machine_mode cmp_mode;
11743 rtx cc_reg, dst, cmp;
11745 cmp = operands[1];
11746 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
11747 return false;
11749 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
11750 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
11752 cmp_mode = GET_MODE (XEXP (cmp, 0));
11753 rc = GET_CODE (cmp);
11755 dst = operands[0];
11756 if (! rtx_equal_p (operands[2], dst)
11757 && ! rtx_equal_p (operands[3], dst))
11759 if (reg_overlap_mentioned_p (dst, cmp))
11760 dst = gen_reg_rtx (mode);
11762 emit_move_insn (dst, operands[3]);
11764 else if (operands[2] == dst)
11766 operands[2] = operands[3];
11768 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
11769 rc = reverse_condition_maybe_unordered (rc);
11770 else
11771 rc = reverse_condition (rc);
11774 if (XEXP (cmp, 1) == const0_rtx
11775 && GET_CODE (XEXP (cmp, 0)) == REG
11776 && cmp_mode == DImode
11777 && v9_regcmp_p (rc))
11778 cc_reg = XEXP (cmp, 0);
11779 else
11780 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
11782 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
11784 emit_insn (gen_rtx_SET (VOIDmode, dst,
11785 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
11787 if (dst != operands[0])
11788 emit_move_insn (operands[0], dst);
11790 return true;
11793 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
11794 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
11795 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
11796 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
11797 code to be used for the condition mask. */
11799 void
11800 sparc_expand_vcond (enum machine_mode mode, rtx *operands, int ccode, int fcode)
11802 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
11803 enum rtx_code code = GET_CODE (operands[3]);
11805 mask = gen_reg_rtx (Pmode);
11806 cop0 = operands[4];
11807 cop1 = operands[5];
11808 if (code == LT || code == GE)
11810 rtx t;
11812 code = swap_condition (code);
11813 t = cop0; cop0 = cop1; cop1 = t;
11816 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
11818 fcmp = gen_rtx_UNSPEC (Pmode,
11819 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
11820 fcode);
11822 cmask = gen_rtx_UNSPEC (DImode,
11823 gen_rtvec (2, mask, gsr),
11824 ccode);
11826 bshuf = gen_rtx_UNSPEC (mode,
11827 gen_rtvec (3, operands[1], operands[2], gsr),
11828 UNSPEC_BSHUFFLE);
11830 emit_insn (gen_rtx_SET (VOIDmode, mask, fcmp));
11831 emit_insn (gen_rtx_SET (VOIDmode, gsr, cmask));
11833 emit_insn (gen_rtx_SET (VOIDmode, operands[0], bshuf));
11836 /* On sparc, any mode which naturally allocates into the float
11837 registers should return 4 here. */
11839 unsigned int
11840 sparc_regmode_natural_size (enum machine_mode mode)
11842 int size = UNITS_PER_WORD;
11844 if (TARGET_ARCH64)
11846 enum mode_class mclass = GET_MODE_CLASS (mode);
11848 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
11849 size = 4;
11852 return size;
11855 /* Return TRUE if it is a good idea to tie two pseudo registers
11856 when one has mode MODE1 and one has mode MODE2.
11857 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
11858 for any hard reg, then this must be FALSE for correct output.
11860 For V9 we have to deal with the fact that only the lower 32 floating
11861 point registers are 32-bit addressable. */
11863 bool
11864 sparc_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
11866 enum mode_class mclass1, mclass2;
11867 unsigned short size1, size2;
11869 if (mode1 == mode2)
11870 return true;
11872 mclass1 = GET_MODE_CLASS (mode1);
11873 mclass2 = GET_MODE_CLASS (mode2);
11874 if (mclass1 != mclass2)
11875 return false;
11877 if (! TARGET_V9)
11878 return true;
11880 /* Classes are the same and we are V9 so we have to deal with upper
11881 vs. lower floating point registers. If one of the modes is a
11882 4-byte mode, and the other is not, we have to mark them as not
11883 tieable because only the lower 32 floating point register are
11884 addressable 32-bits at a time.
11886 We can't just test explicitly for SFmode, otherwise we won't
11887 cover the vector mode cases properly. */
11889 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
11890 return true;
11892 size1 = GET_MODE_SIZE (mode1);
11893 size2 = GET_MODE_SIZE (mode2);
11894 if ((size1 > 4 && size2 == 4)
11895 || (size2 > 4 && size1 == 4))
11896 return false;
11898 return true;
11901 #include "gt-sparc.h"