2013-11-13 Jan-Benedict Glaw <jbglaw@lug-owl.de>
[official-gcc.git] / gcc / config / sparc / sparc.c
blobf2552bea63cfe378348a6c94acd6af9f6e3d0df4
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
5 at Cygnus Support.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "insn-codes.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "except.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "recog.h"
42 #include "diagnostic-core.h"
43 #include "ggc.h"
44 #include "tm_p.h"
45 #include "debug.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "common/common-target.h"
49 #include "gimplify.h"
50 #include "langhooks.h"
51 #include "reload.h"
52 #include "params.h"
53 #include "df.h"
54 #include "opts.h"
55 #include "tree-pass.h"
56 #include "context.h"
58 /* Processor costs */
60 struct processor_costs {
61 /* Integer load */
62 const int int_load;
64 /* Integer signed load */
65 const int int_sload;
67 /* Integer zeroed load */
68 const int int_zload;
70 /* Float load */
71 const int float_load;
73 /* fmov, fneg, fabs */
74 const int float_move;
76 /* fadd, fsub */
77 const int float_plusminus;
79 /* fcmp */
80 const int float_cmp;
82 /* fmov, fmovr */
83 const int float_cmove;
85 /* fmul */
86 const int float_mul;
88 /* fdivs */
89 const int float_div_sf;
91 /* fdivd */
92 const int float_div_df;
94 /* fsqrts */
95 const int float_sqrt_sf;
97 /* fsqrtd */
98 const int float_sqrt_df;
100 /* umul/smul */
101 const int int_mul;
103 /* mulX */
104 const int int_mulX;
106 /* integer multiply cost for each bit set past the most
107 significant 3, so the formula for multiply cost becomes:
109 if (rs1 < 0)
110 highest_bit = highest_clear_bit(rs1);
111 else
112 highest_bit = highest_set_bit(rs1);
113 if (highest_bit < 3)
114 highest_bit = 3;
115 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
117 A value of zero indicates that the multiply costs is fixed,
118 and not variable. */
119 const int int_mul_bit_factor;
121 /* udiv/sdiv */
122 const int int_div;
124 /* divX */
125 const int int_divX;
127 /* movcc, movr */
128 const int int_cmove;
130 /* penalty for shifts, due to scheduling rules etc. */
131 const int shift_penalty;
134 static const
135 struct processor_costs cypress_costs = {
136 COSTS_N_INSNS (2), /* int load */
137 COSTS_N_INSNS (2), /* int signed load */
138 COSTS_N_INSNS (2), /* int zeroed load */
139 COSTS_N_INSNS (2), /* float load */
140 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
141 COSTS_N_INSNS (5), /* fadd, fsub */
142 COSTS_N_INSNS (1), /* fcmp */
143 COSTS_N_INSNS (1), /* fmov, fmovr */
144 COSTS_N_INSNS (7), /* fmul */
145 COSTS_N_INSNS (37), /* fdivs */
146 COSTS_N_INSNS (37), /* fdivd */
147 COSTS_N_INSNS (63), /* fsqrts */
148 COSTS_N_INSNS (63), /* fsqrtd */
149 COSTS_N_INSNS (1), /* imul */
150 COSTS_N_INSNS (1), /* imulX */
151 0, /* imul bit factor */
152 COSTS_N_INSNS (1), /* idiv */
153 COSTS_N_INSNS (1), /* idivX */
154 COSTS_N_INSNS (1), /* movcc/movr */
155 0, /* shift penalty */
158 static const
159 struct processor_costs supersparc_costs = {
160 COSTS_N_INSNS (1), /* int load */
161 COSTS_N_INSNS (1), /* int signed load */
162 COSTS_N_INSNS (1), /* int zeroed load */
163 COSTS_N_INSNS (0), /* float load */
164 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
165 COSTS_N_INSNS (3), /* fadd, fsub */
166 COSTS_N_INSNS (3), /* fcmp */
167 COSTS_N_INSNS (1), /* fmov, fmovr */
168 COSTS_N_INSNS (3), /* fmul */
169 COSTS_N_INSNS (6), /* fdivs */
170 COSTS_N_INSNS (9), /* fdivd */
171 COSTS_N_INSNS (12), /* fsqrts */
172 COSTS_N_INSNS (12), /* fsqrtd */
173 COSTS_N_INSNS (4), /* imul */
174 COSTS_N_INSNS (4), /* imulX */
175 0, /* imul bit factor */
176 COSTS_N_INSNS (4), /* idiv */
177 COSTS_N_INSNS (4), /* idivX */
178 COSTS_N_INSNS (1), /* movcc/movr */
179 1, /* shift penalty */
182 static const
183 struct processor_costs hypersparc_costs = {
184 COSTS_N_INSNS (1), /* int load */
185 COSTS_N_INSNS (1), /* int signed load */
186 COSTS_N_INSNS (1), /* int zeroed load */
187 COSTS_N_INSNS (1), /* float load */
188 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
189 COSTS_N_INSNS (1), /* fadd, fsub */
190 COSTS_N_INSNS (1), /* fcmp */
191 COSTS_N_INSNS (1), /* fmov, fmovr */
192 COSTS_N_INSNS (1), /* fmul */
193 COSTS_N_INSNS (8), /* fdivs */
194 COSTS_N_INSNS (12), /* fdivd */
195 COSTS_N_INSNS (17), /* fsqrts */
196 COSTS_N_INSNS (17), /* fsqrtd */
197 COSTS_N_INSNS (17), /* imul */
198 COSTS_N_INSNS (17), /* imulX */
199 0, /* imul bit factor */
200 COSTS_N_INSNS (17), /* idiv */
201 COSTS_N_INSNS (17), /* idivX */
202 COSTS_N_INSNS (1), /* movcc/movr */
203 0, /* shift penalty */
206 static const
207 struct processor_costs leon_costs = {
208 COSTS_N_INSNS (1), /* int load */
209 COSTS_N_INSNS (1), /* int signed load */
210 COSTS_N_INSNS (1), /* int zeroed load */
211 COSTS_N_INSNS (1), /* float load */
212 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
213 COSTS_N_INSNS (1), /* fadd, fsub */
214 COSTS_N_INSNS (1), /* fcmp */
215 COSTS_N_INSNS (1), /* fmov, fmovr */
216 COSTS_N_INSNS (1), /* fmul */
217 COSTS_N_INSNS (15), /* fdivs */
218 COSTS_N_INSNS (15), /* fdivd */
219 COSTS_N_INSNS (23), /* fsqrts */
220 COSTS_N_INSNS (23), /* fsqrtd */
221 COSTS_N_INSNS (5), /* imul */
222 COSTS_N_INSNS (5), /* imulX */
223 0, /* imul bit factor */
224 COSTS_N_INSNS (5), /* idiv */
225 COSTS_N_INSNS (5), /* idivX */
226 COSTS_N_INSNS (1), /* movcc/movr */
227 0, /* shift penalty */
230 static const
231 struct processor_costs leon3_costs = {
232 COSTS_N_INSNS (1), /* int load */
233 COSTS_N_INSNS (1), /* int signed load */
234 COSTS_N_INSNS (1), /* int zeroed load */
235 COSTS_N_INSNS (1), /* float load */
236 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
237 COSTS_N_INSNS (1), /* fadd, fsub */
238 COSTS_N_INSNS (1), /* fcmp */
239 COSTS_N_INSNS (1), /* fmov, fmovr */
240 COSTS_N_INSNS (1), /* fmul */
241 COSTS_N_INSNS (14), /* fdivs */
242 COSTS_N_INSNS (15), /* fdivd */
243 COSTS_N_INSNS (22), /* fsqrts */
244 COSTS_N_INSNS (23), /* fsqrtd */
245 COSTS_N_INSNS (5), /* imul */
246 COSTS_N_INSNS (5), /* imulX */
247 0, /* imul bit factor */
248 COSTS_N_INSNS (35), /* idiv */
249 COSTS_N_INSNS (35), /* idivX */
250 COSTS_N_INSNS (1), /* movcc/movr */
251 0, /* shift penalty */
254 static const
255 struct processor_costs sparclet_costs = {
256 COSTS_N_INSNS (3), /* int load */
257 COSTS_N_INSNS (3), /* int signed load */
258 COSTS_N_INSNS (1), /* int zeroed load */
259 COSTS_N_INSNS (1), /* float load */
260 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
261 COSTS_N_INSNS (1), /* fadd, fsub */
262 COSTS_N_INSNS (1), /* fcmp */
263 COSTS_N_INSNS (1), /* fmov, fmovr */
264 COSTS_N_INSNS (1), /* fmul */
265 COSTS_N_INSNS (1), /* fdivs */
266 COSTS_N_INSNS (1), /* fdivd */
267 COSTS_N_INSNS (1), /* fsqrts */
268 COSTS_N_INSNS (1), /* fsqrtd */
269 COSTS_N_INSNS (5), /* imul */
270 COSTS_N_INSNS (5), /* imulX */
271 0, /* imul bit factor */
272 COSTS_N_INSNS (5), /* idiv */
273 COSTS_N_INSNS (5), /* idivX */
274 COSTS_N_INSNS (1), /* movcc/movr */
275 0, /* shift penalty */
278 static const
279 struct processor_costs ultrasparc_costs = {
280 COSTS_N_INSNS (2), /* int load */
281 COSTS_N_INSNS (3), /* int signed load */
282 COSTS_N_INSNS (2), /* int zeroed load */
283 COSTS_N_INSNS (2), /* float load */
284 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
285 COSTS_N_INSNS (4), /* fadd, fsub */
286 COSTS_N_INSNS (1), /* fcmp */
287 COSTS_N_INSNS (2), /* fmov, fmovr */
288 COSTS_N_INSNS (4), /* fmul */
289 COSTS_N_INSNS (13), /* fdivs */
290 COSTS_N_INSNS (23), /* fdivd */
291 COSTS_N_INSNS (13), /* fsqrts */
292 COSTS_N_INSNS (23), /* fsqrtd */
293 COSTS_N_INSNS (4), /* imul */
294 COSTS_N_INSNS (4), /* imulX */
295 2, /* imul bit factor */
296 COSTS_N_INSNS (37), /* idiv */
297 COSTS_N_INSNS (68), /* idivX */
298 COSTS_N_INSNS (2), /* movcc/movr */
299 2, /* shift penalty */
302 static const
303 struct processor_costs ultrasparc3_costs = {
304 COSTS_N_INSNS (2), /* int load */
305 COSTS_N_INSNS (3), /* int signed load */
306 COSTS_N_INSNS (3), /* int zeroed load */
307 COSTS_N_INSNS (2), /* float load */
308 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
309 COSTS_N_INSNS (4), /* fadd, fsub */
310 COSTS_N_INSNS (5), /* fcmp */
311 COSTS_N_INSNS (3), /* fmov, fmovr */
312 COSTS_N_INSNS (4), /* fmul */
313 COSTS_N_INSNS (17), /* fdivs */
314 COSTS_N_INSNS (20), /* fdivd */
315 COSTS_N_INSNS (20), /* fsqrts */
316 COSTS_N_INSNS (29), /* fsqrtd */
317 COSTS_N_INSNS (6), /* imul */
318 COSTS_N_INSNS (6), /* imulX */
319 0, /* imul bit factor */
320 COSTS_N_INSNS (40), /* idiv */
321 COSTS_N_INSNS (71), /* idivX */
322 COSTS_N_INSNS (2), /* movcc/movr */
323 0, /* shift penalty */
326 static const
327 struct processor_costs niagara_costs = {
328 COSTS_N_INSNS (3), /* int load */
329 COSTS_N_INSNS (3), /* int signed load */
330 COSTS_N_INSNS (3), /* int zeroed load */
331 COSTS_N_INSNS (9), /* float load */
332 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
333 COSTS_N_INSNS (8), /* fadd, fsub */
334 COSTS_N_INSNS (26), /* fcmp */
335 COSTS_N_INSNS (8), /* fmov, fmovr */
336 COSTS_N_INSNS (29), /* fmul */
337 COSTS_N_INSNS (54), /* fdivs */
338 COSTS_N_INSNS (83), /* fdivd */
339 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
340 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
341 COSTS_N_INSNS (11), /* imul */
342 COSTS_N_INSNS (11), /* imulX */
343 0, /* imul bit factor */
344 COSTS_N_INSNS (72), /* idiv */
345 COSTS_N_INSNS (72), /* idivX */
346 COSTS_N_INSNS (1), /* movcc/movr */
347 0, /* shift penalty */
350 static const
351 struct processor_costs niagara2_costs = {
352 COSTS_N_INSNS (3), /* int load */
353 COSTS_N_INSNS (3), /* int signed load */
354 COSTS_N_INSNS (3), /* int zeroed load */
355 COSTS_N_INSNS (3), /* float load */
356 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
357 COSTS_N_INSNS (6), /* fadd, fsub */
358 COSTS_N_INSNS (6), /* fcmp */
359 COSTS_N_INSNS (6), /* fmov, fmovr */
360 COSTS_N_INSNS (6), /* fmul */
361 COSTS_N_INSNS (19), /* fdivs */
362 COSTS_N_INSNS (33), /* fdivd */
363 COSTS_N_INSNS (19), /* fsqrts */
364 COSTS_N_INSNS (33), /* fsqrtd */
365 COSTS_N_INSNS (5), /* imul */
366 COSTS_N_INSNS (5), /* imulX */
367 0, /* imul bit factor */
368 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
369 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
370 COSTS_N_INSNS (1), /* movcc/movr */
371 0, /* shift penalty */
374 static const
375 struct processor_costs niagara3_costs = {
376 COSTS_N_INSNS (3), /* int load */
377 COSTS_N_INSNS (3), /* int signed load */
378 COSTS_N_INSNS (3), /* int zeroed load */
379 COSTS_N_INSNS (3), /* float load */
380 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
381 COSTS_N_INSNS (9), /* fadd, fsub */
382 COSTS_N_INSNS (9), /* fcmp */
383 COSTS_N_INSNS (9), /* fmov, fmovr */
384 COSTS_N_INSNS (9), /* fmul */
385 COSTS_N_INSNS (23), /* fdivs */
386 COSTS_N_INSNS (37), /* fdivd */
387 COSTS_N_INSNS (23), /* fsqrts */
388 COSTS_N_INSNS (37), /* fsqrtd */
389 COSTS_N_INSNS (9), /* imul */
390 COSTS_N_INSNS (9), /* imulX */
391 0, /* imul bit factor */
392 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
393 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
394 COSTS_N_INSNS (1), /* movcc/movr */
395 0, /* shift penalty */
398 static const
399 struct processor_costs niagara4_costs = {
400 COSTS_N_INSNS (5), /* int load */
401 COSTS_N_INSNS (5), /* int signed load */
402 COSTS_N_INSNS (5), /* int zeroed load */
403 COSTS_N_INSNS (5), /* float load */
404 COSTS_N_INSNS (11), /* fmov, fneg, fabs */
405 COSTS_N_INSNS (11), /* fadd, fsub */
406 COSTS_N_INSNS (11), /* fcmp */
407 COSTS_N_INSNS (11), /* fmov, fmovr */
408 COSTS_N_INSNS (11), /* fmul */
409 COSTS_N_INSNS (24), /* fdivs */
410 COSTS_N_INSNS (37), /* fdivd */
411 COSTS_N_INSNS (24), /* fsqrts */
412 COSTS_N_INSNS (37), /* fsqrtd */
413 COSTS_N_INSNS (12), /* imul */
414 COSTS_N_INSNS (12), /* imulX */
415 0, /* imul bit factor */
416 COSTS_N_INSNS (50), /* idiv, average of 41 - 60 cycle range */
417 COSTS_N_INSNS (35), /* idivX, average of 26 - 44 cycle range */
418 COSTS_N_INSNS (1), /* movcc/movr */
419 0, /* shift penalty */
422 static const struct processor_costs *sparc_costs = &cypress_costs;
424 #ifdef HAVE_AS_RELAX_OPTION
425 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
426 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
427 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
428 somebody does not branch between the sethi and jmp. */
429 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
430 #else
431 #define LEAF_SIBCALL_SLOT_RESERVED_P \
432 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
433 #endif
435 /* Vector to say how input registers are mapped to output registers.
436 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
437 eliminate it. You must use -fomit-frame-pointer to get that. */
438 char leaf_reg_remap[] =
439 { 0, 1, 2, 3, 4, 5, 6, 7,
440 -1, -1, -1, -1, -1, -1, 14, -1,
441 -1, -1, -1, -1, -1, -1, -1, -1,
442 8, 9, 10, 11, 12, 13, -1, 15,
444 32, 33, 34, 35, 36, 37, 38, 39,
445 40, 41, 42, 43, 44, 45, 46, 47,
446 48, 49, 50, 51, 52, 53, 54, 55,
447 56, 57, 58, 59, 60, 61, 62, 63,
448 64, 65, 66, 67, 68, 69, 70, 71,
449 72, 73, 74, 75, 76, 77, 78, 79,
450 80, 81, 82, 83, 84, 85, 86, 87,
451 88, 89, 90, 91, 92, 93, 94, 95,
452 96, 97, 98, 99, 100, 101, 102};
454 /* Vector, indexed by hard register number, which contains 1
455 for a register that is allowable in a candidate for leaf
456 function treatment. */
457 char sparc_leaf_regs[] =
458 { 1, 1, 1, 1, 1, 1, 1, 1,
459 0, 0, 0, 0, 0, 0, 1, 0,
460 0, 0, 0, 0, 0, 0, 0, 0,
461 1, 1, 1, 1, 1, 1, 0, 1,
462 1, 1, 1, 1, 1, 1, 1, 1,
463 1, 1, 1, 1, 1, 1, 1, 1,
464 1, 1, 1, 1, 1, 1, 1, 1,
465 1, 1, 1, 1, 1, 1, 1, 1,
466 1, 1, 1, 1, 1, 1, 1, 1,
467 1, 1, 1, 1, 1, 1, 1, 1,
468 1, 1, 1, 1, 1, 1, 1, 1,
469 1, 1, 1, 1, 1, 1, 1, 1,
470 1, 1, 1, 1, 1, 1, 1};
472 struct GTY(()) machine_function
474 /* Size of the frame of the function. */
475 HOST_WIDE_INT frame_size;
477 /* Size of the frame of the function minus the register window save area
478 and the outgoing argument area. */
479 HOST_WIDE_INT apparent_frame_size;
481 /* Register we pretend the frame pointer is allocated to. Normally, this
482 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
483 record "offset" separately as it may be too big for (reg + disp). */
484 rtx frame_base_reg;
485 HOST_WIDE_INT frame_base_offset;
487 /* Some local-dynamic TLS symbol name. */
488 const char *some_ld_name;
490 /* Number of global or FP registers to be saved (as 4-byte quantities). */
491 int n_global_fp_regs;
493 /* True if the current function is leaf and uses only leaf regs,
494 so that the SPARC leaf function optimization can be applied.
495 Private version of crtl->uses_only_leaf_regs, see
496 sparc_expand_prologue for the rationale. */
497 int leaf_function_p;
499 /* True if the prologue saves local or in registers. */
500 bool save_local_in_regs_p;
502 /* True if the data calculated by sparc_expand_prologue are valid. */
503 bool prologue_data_valid_p;
506 #define sparc_frame_size cfun->machine->frame_size
507 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
508 #define sparc_frame_base_reg cfun->machine->frame_base_reg
509 #define sparc_frame_base_offset cfun->machine->frame_base_offset
510 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
511 #define sparc_leaf_function_p cfun->machine->leaf_function_p
512 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
513 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
515 /* 1 if the next opcode is to be specially indented. */
516 int sparc_indent_opcode = 0;
518 static void sparc_option_override (void);
519 static void sparc_init_modes (void);
520 static void scan_record_type (const_tree, int *, int *, int *);
521 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
522 const_tree, bool, bool, int *, int *);
524 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
525 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
527 static void sparc_emit_set_const32 (rtx, rtx);
528 static void sparc_emit_set_const64 (rtx, rtx);
529 static void sparc_output_addr_vec (rtx);
530 static void sparc_output_addr_diff_vec (rtx);
531 static void sparc_output_deferred_case_vectors (void);
532 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
533 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
534 static rtx sparc_builtin_saveregs (void);
535 static int epilogue_renumber (rtx *, int);
536 static bool sparc_assemble_integer (rtx, unsigned int, int);
537 static int set_extends (rtx);
538 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
539 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
540 #ifdef TARGET_SOLARIS
541 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
542 tree) ATTRIBUTE_UNUSED;
543 #endif
544 static int sparc_adjust_cost (rtx, rtx, rtx, int);
545 static int sparc_issue_rate (void);
546 static void sparc_sched_init (FILE *, int, int);
547 static int sparc_use_sched_lookahead (void);
549 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
550 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
551 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
552 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
553 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
555 static bool sparc_function_ok_for_sibcall (tree, tree);
556 static void sparc_init_libfuncs (void);
557 static void sparc_init_builtins (void);
558 static void sparc_vis_init_builtins (void);
559 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
560 static tree sparc_fold_builtin (tree, int, tree *, bool);
561 static int sparc_vis_mul8x16 (int, int);
562 static void sparc_handle_vis_mul8x16 (tree *, int, tree, tree, tree);
563 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
564 HOST_WIDE_INT, tree);
565 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
566 HOST_WIDE_INT, const_tree);
567 static struct machine_function * sparc_init_machine_status (void);
568 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
569 static rtx sparc_tls_get_addr (void);
570 static rtx sparc_tls_got (void);
571 static const char *get_some_local_dynamic_name (void);
572 static int get_some_local_dynamic_name_1 (rtx *, void *);
573 static int sparc_register_move_cost (enum machine_mode,
574 reg_class_t, reg_class_t);
575 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
576 static rtx sparc_function_value (const_tree, const_tree, bool);
577 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
578 static bool sparc_function_value_regno_p (const unsigned int);
579 static rtx sparc_struct_value_rtx (tree, int);
580 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
581 int *, const_tree, int);
582 static bool sparc_return_in_memory (const_tree, const_tree);
583 static bool sparc_strict_argument_naming (cumulative_args_t);
584 static void sparc_va_start (tree, rtx);
585 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
586 static bool sparc_vector_mode_supported_p (enum machine_mode);
587 static bool sparc_tls_referenced_p (rtx);
588 static rtx sparc_legitimize_tls_address (rtx);
589 static rtx sparc_legitimize_pic_address (rtx, rtx);
590 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
591 static rtx sparc_delegitimize_address (rtx);
592 static bool sparc_mode_dependent_address_p (const_rtx, addr_space_t);
593 static bool sparc_pass_by_reference (cumulative_args_t,
594 enum machine_mode, const_tree, bool);
595 static void sparc_function_arg_advance (cumulative_args_t,
596 enum machine_mode, const_tree, bool);
597 static rtx sparc_function_arg_1 (cumulative_args_t,
598 enum machine_mode, const_tree, bool, bool);
599 static rtx sparc_function_arg (cumulative_args_t,
600 enum machine_mode, const_tree, bool);
601 static rtx sparc_function_incoming_arg (cumulative_args_t,
602 enum machine_mode, const_tree, bool);
603 static unsigned int sparc_function_arg_boundary (enum machine_mode,
604 const_tree);
605 static int sparc_arg_partial_bytes (cumulative_args_t,
606 enum machine_mode, tree, bool);
607 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
608 static void sparc_file_end (void);
609 static bool sparc_frame_pointer_required (void);
610 static bool sparc_can_eliminate (const int, const int);
611 static rtx sparc_builtin_setjmp_frame_value (void);
612 static void sparc_conditional_register_usage (void);
613 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
614 static const char *sparc_mangle_type (const_tree);
615 #endif
616 static void sparc_trampoline_init (rtx, tree, rtx);
617 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
618 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
619 static bool sparc_print_operand_punct_valid_p (unsigned char);
620 static void sparc_print_operand (FILE *, rtx, int);
621 static void sparc_print_operand_address (FILE *, rtx);
622 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
623 enum machine_mode,
624 secondary_reload_info *);
625 static enum machine_mode sparc_cstore_mode (enum insn_code icode);
627 #ifdef SUBTARGET_ATTRIBUTE_TABLE
628 /* Table of valid machine attributes. */
629 static const struct attribute_spec sparc_attribute_table[] =
631 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
632 do_diagnostic } */
633 SUBTARGET_ATTRIBUTE_TABLE,
634 { NULL, 0, 0, false, false, false, NULL, false }
636 #endif
638 /* Option handling. */
640 /* Parsed value. */
641 enum cmodel sparc_cmodel;
643 char sparc_hard_reg_printed[8];
645 /* Initialize the GCC target structure. */
647 /* The default is to use .half rather than .short for aligned HI objects. */
648 #undef TARGET_ASM_ALIGNED_HI_OP
649 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
651 #undef TARGET_ASM_UNALIGNED_HI_OP
652 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
653 #undef TARGET_ASM_UNALIGNED_SI_OP
654 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
655 #undef TARGET_ASM_UNALIGNED_DI_OP
656 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
658 /* The target hook has to handle DI-mode values. */
659 #undef TARGET_ASM_INTEGER
660 #define TARGET_ASM_INTEGER sparc_assemble_integer
662 #undef TARGET_ASM_FUNCTION_PROLOGUE
663 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
664 #undef TARGET_ASM_FUNCTION_EPILOGUE
665 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
667 #undef TARGET_SCHED_ADJUST_COST
668 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
669 #undef TARGET_SCHED_ISSUE_RATE
670 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
671 #undef TARGET_SCHED_INIT
672 #define TARGET_SCHED_INIT sparc_sched_init
673 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
674 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
676 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
677 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
679 #undef TARGET_INIT_LIBFUNCS
680 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
681 #undef TARGET_INIT_BUILTINS
682 #define TARGET_INIT_BUILTINS sparc_init_builtins
684 #undef TARGET_LEGITIMIZE_ADDRESS
685 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
686 #undef TARGET_DELEGITIMIZE_ADDRESS
687 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
688 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
689 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
691 #undef TARGET_EXPAND_BUILTIN
692 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
693 #undef TARGET_FOLD_BUILTIN
694 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
696 #if TARGET_TLS
697 #undef TARGET_HAVE_TLS
698 #define TARGET_HAVE_TLS true
699 #endif
701 #undef TARGET_CANNOT_FORCE_CONST_MEM
702 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
704 #undef TARGET_ASM_OUTPUT_MI_THUNK
705 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
706 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
707 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
709 #undef TARGET_RTX_COSTS
710 #define TARGET_RTX_COSTS sparc_rtx_costs
711 #undef TARGET_ADDRESS_COST
712 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
713 #undef TARGET_REGISTER_MOVE_COST
714 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
716 #undef TARGET_PROMOTE_FUNCTION_MODE
717 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
719 #undef TARGET_FUNCTION_VALUE
720 #define TARGET_FUNCTION_VALUE sparc_function_value
721 #undef TARGET_LIBCALL_VALUE
722 #define TARGET_LIBCALL_VALUE sparc_libcall_value
723 #undef TARGET_FUNCTION_VALUE_REGNO_P
724 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
726 #undef TARGET_STRUCT_VALUE_RTX
727 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
728 #undef TARGET_RETURN_IN_MEMORY
729 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
730 #undef TARGET_MUST_PASS_IN_STACK
731 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
732 #undef TARGET_PASS_BY_REFERENCE
733 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
734 #undef TARGET_ARG_PARTIAL_BYTES
735 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
736 #undef TARGET_FUNCTION_ARG_ADVANCE
737 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
738 #undef TARGET_FUNCTION_ARG
739 #define TARGET_FUNCTION_ARG sparc_function_arg
740 #undef TARGET_FUNCTION_INCOMING_ARG
741 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
742 #undef TARGET_FUNCTION_ARG_BOUNDARY
743 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
745 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
746 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
747 #undef TARGET_STRICT_ARGUMENT_NAMING
748 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
750 #undef TARGET_EXPAND_BUILTIN_VA_START
751 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
752 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
753 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
755 #undef TARGET_VECTOR_MODE_SUPPORTED_P
756 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
758 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
759 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
761 #ifdef SUBTARGET_INSERT_ATTRIBUTES
762 #undef TARGET_INSERT_ATTRIBUTES
763 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
764 #endif
766 #ifdef SUBTARGET_ATTRIBUTE_TABLE
767 #undef TARGET_ATTRIBUTE_TABLE
768 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
769 #endif
771 #undef TARGET_RELAXED_ORDERING
772 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
774 #undef TARGET_OPTION_OVERRIDE
775 #define TARGET_OPTION_OVERRIDE sparc_option_override
777 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
778 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
779 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
780 #endif
782 #undef TARGET_ASM_FILE_END
783 #define TARGET_ASM_FILE_END sparc_file_end
785 #undef TARGET_FRAME_POINTER_REQUIRED
786 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
788 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
789 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
791 #undef TARGET_CAN_ELIMINATE
792 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
794 #undef TARGET_PREFERRED_RELOAD_CLASS
795 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
797 #undef TARGET_SECONDARY_RELOAD
798 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
800 #undef TARGET_CONDITIONAL_REGISTER_USAGE
801 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
803 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
804 #undef TARGET_MANGLE_TYPE
805 #define TARGET_MANGLE_TYPE sparc_mangle_type
806 #endif
808 #undef TARGET_LEGITIMATE_ADDRESS_P
809 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
811 #undef TARGET_LEGITIMATE_CONSTANT_P
812 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
814 #undef TARGET_TRAMPOLINE_INIT
815 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
817 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
818 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
819 #undef TARGET_PRINT_OPERAND
820 #define TARGET_PRINT_OPERAND sparc_print_operand
821 #undef TARGET_PRINT_OPERAND_ADDRESS
822 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
824 /* The value stored by LDSTUB. */
825 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
826 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0xff
828 #undef TARGET_CSTORE_MODE
829 #define TARGET_CSTORE_MODE sparc_cstore_mode
831 struct gcc_target targetm = TARGET_INITIALIZER;
833 /* Return the memory reference contained in X if any, zero otherwise. */
835 static rtx
836 mem_ref (rtx x)
838 if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND)
839 x = XEXP (x, 0);
841 if (MEM_P (x))
842 return x;
844 return NULL_RTX;
847 /* We use a machine specific pass to enable workarounds for errata.
848 We need to have the (essentially) final form of the insn stream in order
849 to properly detect the various hazards. Therefore, this machine specific
850 pass runs as late as possible. The pass is inserted in the pass pipeline
851 at the end of sparc_option_override. */
853 static bool
854 sparc_gate_work_around_errata (void)
856 /* The only errata we handle are those of the AT697F and UT699. */
857 return sparc_fix_at697f != 0 || sparc_fix_ut699 != 0;
860 static unsigned int
861 sparc_do_work_around_errata (void)
863 rtx insn, next;
865 /* Force all instructions to be split into their final form. */
866 split_all_insns_noflow ();
868 /* Now look for specific patterns in the insn stream. */
869 for (insn = get_insns (); insn; insn = next)
871 bool insert_nop = false;
872 rtx set;
874 /* Look into the instruction in a delay slot. */
875 if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
876 insn = XVECEXP (PATTERN (insn), 0, 1);
878 /* Look for a single-word load into an odd-numbered FP register. */
879 if (sparc_fix_at697f
880 && NONJUMP_INSN_P (insn)
881 && (set = single_set (insn)) != NULL_RTX
882 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
883 && MEM_P (SET_SRC (set))
884 && REG_P (SET_DEST (set))
885 && REGNO (SET_DEST (set)) > 31
886 && REGNO (SET_DEST (set)) % 2 != 0)
888 /* The wrong dependency is on the enclosing double register. */
889 unsigned int x = REGNO (SET_DEST (set)) - 1;
890 unsigned int src1, src2, dest;
891 int code;
893 /* If the insn has a delay slot, then it cannot be problematic. */
894 next = next_active_insn (insn);
895 if (!next)
896 break;
897 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
898 continue;
900 extract_insn (next);
901 code = INSN_CODE (next);
903 switch (code)
905 case CODE_FOR_adddf3:
906 case CODE_FOR_subdf3:
907 case CODE_FOR_muldf3:
908 case CODE_FOR_divdf3:
909 dest = REGNO (recog_data.operand[0]);
910 src1 = REGNO (recog_data.operand[1]);
911 src2 = REGNO (recog_data.operand[2]);
912 if (src1 != src2)
914 /* Case [1-4]:
915 ld [address], %fx+1
916 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
917 if ((src1 == x || src2 == x)
918 && (dest == src1 || dest == src2))
919 insert_nop = true;
921 else
923 /* Case 5:
924 ld [address], %fx+1
925 FPOPd %fx, %fx, %fx */
926 if (src1 == x
927 && dest == src1
928 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
929 insert_nop = true;
931 break;
933 case CODE_FOR_sqrtdf2:
934 dest = REGNO (recog_data.operand[0]);
935 src1 = REGNO (recog_data.operand[1]);
936 /* Case 6:
937 ld [address], %fx+1
938 fsqrtd %fx, %fx */
939 if (src1 == x && dest == src1)
940 insert_nop = true;
941 break;
943 default:
944 break;
948 /* Look for a single-word load into an integer register. */
949 else if (sparc_fix_ut699
950 && NONJUMP_INSN_P (insn)
951 && (set = single_set (insn)) != NULL_RTX
952 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) <= 4
953 && mem_ref (SET_SRC (set)) != NULL_RTX
954 && REG_P (SET_DEST (set))
955 && REGNO (SET_DEST (set)) < 32)
957 /* There is no problem if the second memory access has a data
958 dependency on the first single-cycle load. */
959 rtx x = SET_DEST (set);
961 /* If the insn has a delay slot, then it cannot be problematic. */
962 next = next_active_insn (insn);
963 if (!next)
964 break;
965 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
966 continue;
968 /* Look for a second memory access to/from an integer register. */
969 if ((set = single_set (next)) != NULL_RTX)
971 rtx src = SET_SRC (set);
972 rtx dest = SET_DEST (set);
973 rtx mem;
975 /* LDD is affected. */
976 if ((mem = mem_ref (src)) != NULL_RTX
977 && REG_P (dest)
978 && REGNO (dest) < 32
979 && !reg_mentioned_p (x, XEXP (mem, 0)))
980 insert_nop = true;
982 /* STD is *not* affected. */
983 else if ((mem = mem_ref (dest)) != NULL_RTX
984 && GET_MODE_SIZE (GET_MODE (mem)) <= 4
985 && (src == const0_rtx
986 || (REG_P (src)
987 && REGNO (src) < 32
988 && REGNO (src) != REGNO (x)))
989 && !reg_mentioned_p (x, XEXP (mem, 0)))
990 insert_nop = true;
994 else
995 next = NEXT_INSN (insn);
997 if (insert_nop)
998 emit_insn_before (gen_nop (), next);
1001 return 0;
1004 namespace {
1006 const pass_data pass_data_work_around_errata =
1008 RTL_PASS, /* type */
1009 "errata", /* name */
1010 OPTGROUP_NONE, /* optinfo_flags */
1011 true, /* has_gate */
1012 true, /* has_execute */
1013 TV_MACH_DEP, /* tv_id */
1014 0, /* properties_required */
1015 0, /* properties_provided */
1016 0, /* properties_destroyed */
1017 0, /* todo_flags_start */
1018 TODO_verify_rtl_sharing, /* todo_flags_finish */
1021 class pass_work_around_errata : public rtl_opt_pass
1023 public:
1024 pass_work_around_errata(gcc::context *ctxt)
1025 : rtl_opt_pass(pass_data_work_around_errata, ctxt)
1028 /* opt_pass methods: */
1029 bool gate () { return sparc_gate_work_around_errata (); }
1030 unsigned int execute () { return sparc_do_work_around_errata (); }
1032 }; // class pass_work_around_errata
1034 } // anon namespace
1036 rtl_opt_pass *
1037 make_pass_work_around_errata (gcc::context *ctxt)
1039 return new pass_work_around_errata (ctxt);
1042 /* Helpers for TARGET_DEBUG_OPTIONS. */
1043 static void
1044 dump_target_flag_bits (const int flags)
1046 if (flags & MASK_64BIT)
1047 fprintf (stderr, "64BIT ");
1048 if (flags & MASK_APP_REGS)
1049 fprintf (stderr, "APP_REGS ");
1050 if (flags & MASK_FASTER_STRUCTS)
1051 fprintf (stderr, "FASTER_STRUCTS ");
1052 if (flags & MASK_FLAT)
1053 fprintf (stderr, "FLAT ");
1054 if (flags & MASK_FMAF)
1055 fprintf (stderr, "FMAF ");
1056 if (flags & MASK_FPU)
1057 fprintf (stderr, "FPU ");
1058 if (flags & MASK_HARD_QUAD)
1059 fprintf (stderr, "HARD_QUAD ");
1060 if (flags & MASK_POPC)
1061 fprintf (stderr, "POPC ");
1062 if (flags & MASK_PTR64)
1063 fprintf (stderr, "PTR64 ");
1064 if (flags & MASK_STACK_BIAS)
1065 fprintf (stderr, "STACK_BIAS ");
1066 if (flags & MASK_UNALIGNED_DOUBLES)
1067 fprintf (stderr, "UNALIGNED_DOUBLES ");
1068 if (flags & MASK_V8PLUS)
1069 fprintf (stderr, "V8PLUS ");
1070 if (flags & MASK_VIS)
1071 fprintf (stderr, "VIS ");
1072 if (flags & MASK_VIS2)
1073 fprintf (stderr, "VIS2 ");
1074 if (flags & MASK_VIS3)
1075 fprintf (stderr, "VIS3 ");
1076 if (flags & MASK_CBCOND)
1077 fprintf (stderr, "CBCOND ");
1078 if (flags & MASK_DEPRECATED_V8_INSNS)
1079 fprintf (stderr, "DEPRECATED_V8_INSNS ");
1080 if (flags & MASK_SPARCLET)
1081 fprintf (stderr, "SPARCLET ");
1082 if (flags & MASK_SPARCLITE)
1083 fprintf (stderr, "SPARCLITE ");
1084 if (flags & MASK_V8)
1085 fprintf (stderr, "V8 ");
1086 if (flags & MASK_V9)
1087 fprintf (stderr, "V9 ");
1090 static void
1091 dump_target_flags (const char *prefix, const int flags)
1093 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
1094 dump_target_flag_bits (flags);
1095 fprintf(stderr, "]\n");
1098 /* Validate and override various options, and do some machine dependent
1099 initialization. */
1101 static void
1102 sparc_option_override (void)
1104 static struct code_model {
1105 const char *const name;
1106 const enum cmodel value;
1107 } const cmodels[] = {
1108 { "32", CM_32 },
1109 { "medlow", CM_MEDLOW },
1110 { "medmid", CM_MEDMID },
1111 { "medany", CM_MEDANY },
1112 { "embmedany", CM_EMBMEDANY },
1113 { NULL, (enum cmodel) 0 }
1115 const struct code_model *cmodel;
1116 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
1117 static struct cpu_default {
1118 const int cpu;
1119 const enum processor_type processor;
1120 } const cpu_default[] = {
1121 /* There must be one entry here for each TARGET_CPU value. */
1122 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
1123 { TARGET_CPU_v8, PROCESSOR_V8 },
1124 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
1125 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
1126 { TARGET_CPU_leon, PROCESSOR_LEON },
1127 { TARGET_CPU_leon3, PROCESSOR_LEON3 },
1128 { TARGET_CPU_sparclite, PROCESSOR_F930 },
1129 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
1130 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
1131 { TARGET_CPU_v9, PROCESSOR_V9 },
1132 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
1133 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
1134 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
1135 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
1136 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
1137 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
1138 { -1, PROCESSOR_V7 }
1140 const struct cpu_default *def;
1141 /* Table of values for -m{cpu,tune}=. This must match the order of
1142 the enum processor_type in sparc-opts.h. */
1143 static struct cpu_table {
1144 const char *const name;
1145 const int disable;
1146 const int enable;
1147 } const cpu_table[] = {
1148 { "v7", MASK_ISA, 0 },
1149 { "cypress", MASK_ISA, 0 },
1150 { "v8", MASK_ISA, MASK_V8 },
1151 /* TI TMS390Z55 supersparc */
1152 { "supersparc", MASK_ISA, MASK_V8 },
1153 { "hypersparc", MASK_ISA, MASK_V8|MASK_FPU },
1154 { "leon", MASK_ISA, MASK_V8|MASK_LEON|MASK_FPU },
1155 { "leon3", MASK_ISA, MASK_V8|MASK_LEON3|MASK_FPU },
1156 { "sparclite", MASK_ISA, MASK_SPARCLITE },
1157 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
1158 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1159 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
1160 { "f934", MASK_ISA, MASK_SPARCLITE|MASK_FPU },
1161 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
1162 { "sparclet", MASK_ISA, MASK_SPARCLET },
1163 /* TEMIC sparclet */
1164 { "tsc701", MASK_ISA, MASK_SPARCLET },
1165 { "v9", MASK_ISA, MASK_V9 },
1166 /* UltraSPARC I, II, IIi */
1167 { "ultrasparc", MASK_ISA,
1168 /* Although insns using %y are deprecated, it is a clear win. */
1169 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1170 /* UltraSPARC III */
1171 /* ??? Check if %y issue still holds true. */
1172 { "ultrasparc3", MASK_ISA,
1173 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
1174 /* UltraSPARC T1 */
1175 { "niagara", MASK_ISA,
1176 MASK_V9|MASK_DEPRECATED_V8_INSNS },
1177 /* UltraSPARC T2 */
1178 { "niagara2", MASK_ISA,
1179 MASK_V9|MASK_POPC|MASK_VIS2 },
1180 /* UltraSPARC T3 */
1181 { "niagara3", MASK_ISA,
1182 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
1183 /* UltraSPARC T4 */
1184 { "niagara4", MASK_ISA,
1185 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF|MASK_CBCOND },
1187 const struct cpu_table *cpu;
1188 unsigned int i;
1189 int fpu;
1191 if (sparc_debug_string != NULL)
1193 const char *q;
1194 char *p;
1196 p = ASTRDUP (sparc_debug_string);
1197 while ((q = strtok (p, ",")) != NULL)
1199 bool invert;
1200 int mask;
1202 p = NULL;
1203 if (*q == '!')
1205 invert = true;
1206 q++;
1208 else
1209 invert = false;
1211 if (! strcmp (q, "all"))
1212 mask = MASK_DEBUG_ALL;
1213 else if (! strcmp (q, "options"))
1214 mask = MASK_DEBUG_OPTIONS;
1215 else
1216 error ("unknown -mdebug-%s switch", q);
1218 if (invert)
1219 sparc_debug &= ~mask;
1220 else
1221 sparc_debug |= mask;
1225 if (TARGET_DEBUG_OPTIONS)
1227 dump_target_flags("Initial target_flags", target_flags);
1228 dump_target_flags("target_flags_explicit", target_flags_explicit);
1231 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1232 SUBTARGET_OVERRIDE_OPTIONS;
1233 #endif
1235 #ifndef SPARC_BI_ARCH
1236 /* Check for unsupported architecture size. */
1237 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
1238 error ("%s is not supported by this configuration",
1239 DEFAULT_ARCH32_P ? "-m64" : "-m32");
1240 #endif
1242 /* We force all 64bit archs to use 128 bit long double */
1243 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
1245 error ("-mlong-double-64 not allowed with -m64");
1246 target_flags |= MASK_LONG_DOUBLE_128;
1249 /* Code model selection. */
1250 sparc_cmodel = SPARC_DEFAULT_CMODEL;
1252 #ifdef SPARC_BI_ARCH
1253 if (TARGET_ARCH32)
1254 sparc_cmodel = CM_32;
1255 #endif
1257 if (sparc_cmodel_string != NULL)
1259 if (TARGET_ARCH64)
1261 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
1262 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1263 break;
1264 if (cmodel->name == NULL)
1265 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1266 else
1267 sparc_cmodel = cmodel->value;
1269 else
1270 error ("-mcmodel= is not supported on 32 bit systems");
1273 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1274 for (i = 8; i < 16; i++)
1275 if (!call_used_regs [i])
1277 error ("-fcall-saved-REG is not supported for out registers");
1278 call_used_regs [i] = 1;
1281 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
1283 /* Set the default CPU. */
1284 if (!global_options_set.x_sparc_cpu_and_features)
1286 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1287 if (def->cpu == TARGET_CPU_DEFAULT)
1288 break;
1289 gcc_assert (def->cpu != -1);
1290 sparc_cpu_and_features = def->processor;
1293 if (!global_options_set.x_sparc_cpu)
1294 sparc_cpu = sparc_cpu_and_features;
1296 cpu = &cpu_table[(int) sparc_cpu_and_features];
1298 if (TARGET_DEBUG_OPTIONS)
1300 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1301 fprintf (stderr, "sparc_cpu: %s\n",
1302 cpu_table[(int) sparc_cpu].name);
1303 dump_target_flags ("cpu->disable", cpu->disable);
1304 dump_target_flags ("cpu->enable", cpu->enable);
1307 target_flags &= ~cpu->disable;
1308 target_flags |= (cpu->enable
1309 #ifndef HAVE_AS_FMAF_HPC_VIS3
1310 & ~(MASK_FMAF | MASK_VIS3)
1311 #endif
1312 #ifndef HAVE_AS_SPARC4
1313 & ~MASK_CBCOND
1314 #endif
1315 #ifndef HAVE_AS_LEON
1316 & ~(MASK_LEON | MASK_LEON3)
1317 #endif
1320 /* If -mfpu or -mno-fpu was explicitly used, don't override with
1321 the processor default. */
1322 if (target_flags_explicit & MASK_FPU)
1323 target_flags = (target_flags & ~MASK_FPU) | fpu;
1325 /* -mvis2 implies -mvis */
1326 if (TARGET_VIS2)
1327 target_flags |= MASK_VIS;
1329 /* -mvis3 implies -mvis2 and -mvis */
1330 if (TARGET_VIS3)
1331 target_flags |= MASK_VIS2 | MASK_VIS;
1333 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is
1334 disabled. */
1335 if (! TARGET_FPU)
1336 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_FMAF);
1338 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1339 are available.
1340 -m64 also implies v9. */
1341 if (TARGET_VIS || TARGET_ARCH64)
1343 target_flags |= MASK_V9;
1344 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1347 /* -mvis also implies -mv8plus on 32-bit */
1348 if (TARGET_VIS && ! TARGET_ARCH64)
1349 target_flags |= MASK_V8PLUS;
1351 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1352 if (TARGET_V9 && TARGET_ARCH32)
1353 target_flags |= MASK_DEPRECATED_V8_INSNS;
1355 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1356 if (! TARGET_V9 || TARGET_ARCH64)
1357 target_flags &= ~MASK_V8PLUS;
1359 /* Don't use stack biasing in 32 bit mode. */
1360 if (TARGET_ARCH32)
1361 target_flags &= ~MASK_STACK_BIAS;
1363 /* Supply a default value for align_functions. */
1364 if (align_functions == 0
1365 && (sparc_cpu == PROCESSOR_ULTRASPARC
1366 || sparc_cpu == PROCESSOR_ULTRASPARC3
1367 || sparc_cpu == PROCESSOR_NIAGARA
1368 || sparc_cpu == PROCESSOR_NIAGARA2
1369 || sparc_cpu == PROCESSOR_NIAGARA3
1370 || sparc_cpu == PROCESSOR_NIAGARA4))
1371 align_functions = 32;
1373 /* Validate PCC_STRUCT_RETURN. */
1374 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1375 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1377 /* Only use .uaxword when compiling for a 64-bit target. */
1378 if (!TARGET_ARCH64)
1379 targetm.asm_out.unaligned_op.di = NULL;
1381 /* Do various machine dependent initializations. */
1382 sparc_init_modes ();
1384 /* Set up function hooks. */
1385 init_machine_status = sparc_init_machine_status;
1387 switch (sparc_cpu)
1389 case PROCESSOR_V7:
1390 case PROCESSOR_CYPRESS:
1391 sparc_costs = &cypress_costs;
1392 break;
1393 case PROCESSOR_V8:
1394 case PROCESSOR_SPARCLITE:
1395 case PROCESSOR_SUPERSPARC:
1396 sparc_costs = &supersparc_costs;
1397 break;
1398 case PROCESSOR_F930:
1399 case PROCESSOR_F934:
1400 case PROCESSOR_HYPERSPARC:
1401 case PROCESSOR_SPARCLITE86X:
1402 sparc_costs = &hypersparc_costs;
1403 break;
1404 case PROCESSOR_LEON:
1405 sparc_costs = &leon_costs;
1406 break;
1407 case PROCESSOR_LEON3:
1408 sparc_costs = &leon3_costs;
1409 break;
1410 case PROCESSOR_SPARCLET:
1411 case PROCESSOR_TSC701:
1412 sparc_costs = &sparclet_costs;
1413 break;
1414 case PROCESSOR_V9:
1415 case PROCESSOR_ULTRASPARC:
1416 sparc_costs = &ultrasparc_costs;
1417 break;
1418 case PROCESSOR_ULTRASPARC3:
1419 sparc_costs = &ultrasparc3_costs;
1420 break;
1421 case PROCESSOR_NIAGARA:
1422 sparc_costs = &niagara_costs;
1423 break;
1424 case PROCESSOR_NIAGARA2:
1425 sparc_costs = &niagara2_costs;
1426 break;
1427 case PROCESSOR_NIAGARA3:
1428 sparc_costs = &niagara3_costs;
1429 break;
1430 case PROCESSOR_NIAGARA4:
1431 sparc_costs = &niagara4_costs;
1432 break;
1433 case PROCESSOR_NATIVE:
1434 gcc_unreachable ();
1437 if (sparc_memory_model == SMM_DEFAULT)
1439 /* Choose the memory model for the operating system. */
1440 enum sparc_memory_model_type os_default = SUBTARGET_DEFAULT_MEMORY_MODEL;
1441 if (os_default != SMM_DEFAULT)
1442 sparc_memory_model = os_default;
1443 /* Choose the most relaxed model for the processor. */
1444 else if (TARGET_V9)
1445 sparc_memory_model = SMM_RMO;
1446 else if (TARGET_LEON3)
1447 sparc_memory_model = SMM_TSO;
1448 else if (TARGET_LEON)
1449 sparc_memory_model = SMM_SC;
1450 else if (TARGET_V8)
1451 sparc_memory_model = SMM_PSO;
1452 else
1453 sparc_memory_model = SMM_SC;
1456 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1457 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1458 target_flags |= MASK_LONG_DOUBLE_128;
1459 #endif
1461 if (TARGET_DEBUG_OPTIONS)
1462 dump_target_flags ("Final target_flags", target_flags);
1464 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
1465 ((sparc_cpu == PROCESSOR_ULTRASPARC
1466 || sparc_cpu == PROCESSOR_NIAGARA
1467 || sparc_cpu == PROCESSOR_NIAGARA2
1468 || sparc_cpu == PROCESSOR_NIAGARA3
1469 || sparc_cpu == PROCESSOR_NIAGARA4)
1471 : (sparc_cpu == PROCESSOR_ULTRASPARC3
1472 ? 8 : 3)),
1473 global_options.x_param_values,
1474 global_options_set.x_param_values);
1475 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
1476 ((sparc_cpu == PROCESSOR_ULTRASPARC
1477 || sparc_cpu == PROCESSOR_ULTRASPARC3
1478 || sparc_cpu == PROCESSOR_NIAGARA
1479 || sparc_cpu == PROCESSOR_NIAGARA2
1480 || sparc_cpu == PROCESSOR_NIAGARA3
1481 || sparc_cpu == PROCESSOR_NIAGARA4)
1482 ? 64 : 32),
1483 global_options.x_param_values,
1484 global_options_set.x_param_values);
1486 /* Disable save slot sharing for call-clobbered registers by default.
1487 The IRA sharing algorithm works on single registers only and this
1488 pessimizes for double floating-point registers. */
1489 if (!global_options_set.x_flag_ira_share_save_slots)
1490 flag_ira_share_save_slots = 0;
1492 /* We register a machine specific pass to work around errata, if any.
1493 The pass mut be scheduled as late as possible so that we have the
1494 (essentially) final form of the insn stream to work on.
1495 Registering the pass must be done at start up. It's convenient to
1496 do it here. */
1497 opt_pass *errata_pass = make_pass_work_around_errata (g);
1498 struct register_pass_info insert_pass_work_around_errata =
1500 errata_pass, /* pass */
1501 "dbr", /* reference_pass_name */
1502 1, /* ref_pass_instance_number */
1503 PASS_POS_INSERT_AFTER /* po_op */
1505 register_pass (&insert_pass_work_around_errata);
1508 /* Miscellaneous utilities. */
1510 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1511 or branch on register contents instructions. */
1514 v9_regcmp_p (enum rtx_code code)
1516 return (code == EQ || code == NE || code == GE || code == LT
1517 || code == LE || code == GT);
1520 /* Nonzero if OP is a floating point constant which can
1521 be loaded into an integer register using a single
1522 sethi instruction. */
1525 fp_sethi_p (rtx op)
1527 if (GET_CODE (op) == CONST_DOUBLE)
1529 REAL_VALUE_TYPE r;
1530 long i;
1532 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1533 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1534 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1537 return 0;
1540 /* Nonzero if OP is a floating point constant which can
1541 be loaded into an integer register using a single
1542 mov instruction. */
1545 fp_mov_p (rtx op)
1547 if (GET_CODE (op) == CONST_DOUBLE)
1549 REAL_VALUE_TYPE r;
1550 long i;
1552 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1553 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1554 return SPARC_SIMM13_P (i);
1557 return 0;
1560 /* Nonzero if OP is a floating point constant which can
1561 be loaded into an integer register using a high/losum
1562 instruction sequence. */
1565 fp_high_losum_p (rtx op)
1567 /* The constraints calling this should only be in
1568 SFmode move insns, so any constant which cannot
1569 be moved using a single insn will do. */
1570 if (GET_CODE (op) == CONST_DOUBLE)
1572 REAL_VALUE_TYPE r;
1573 long i;
1575 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1576 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1577 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1580 return 0;
1583 /* Return true if the address of LABEL can be loaded by means of the
1584 mov{si,di}_pic_label_ref patterns in PIC mode. */
1586 static bool
1587 can_use_mov_pic_label_ref (rtx label)
1589 /* VxWorks does not impose a fixed gap between segments; the run-time
1590 gap can be different from the object-file gap. We therefore can't
1591 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1592 are absolutely sure that X is in the same segment as the GOT.
1593 Unfortunately, the flexibility of linker scripts means that we
1594 can't be sure of that in general, so assume that GOT-relative
1595 accesses are never valid on VxWorks. */
1596 if (TARGET_VXWORKS_RTP)
1597 return false;
1599 /* Similarly, if the label is non-local, it might end up being placed
1600 in a different section than the current one; now mov_pic_label_ref
1601 requires the label and the code to be in the same section. */
1602 if (LABEL_REF_NONLOCAL_P (label))
1603 return false;
1605 /* Finally, if we are reordering basic blocks and partition into hot
1606 and cold sections, this might happen for any label. */
1607 if (flag_reorder_blocks_and_partition)
1608 return false;
1610 return true;
1613 /* Expand a move instruction. Return true if all work is done. */
1615 bool
1616 sparc_expand_move (enum machine_mode mode, rtx *operands)
1618 /* Handle sets of MEM first. */
1619 if (GET_CODE (operands[0]) == MEM)
1621 /* 0 is a register (or a pair of registers) on SPARC. */
1622 if (register_or_zero_operand (operands[1], mode))
1623 return false;
1625 if (!reload_in_progress)
1627 operands[0] = validize_mem (operands[0]);
1628 operands[1] = force_reg (mode, operands[1]);
1632 /* Fixup TLS cases. */
1633 if (TARGET_HAVE_TLS
1634 && CONSTANT_P (operands[1])
1635 && sparc_tls_referenced_p (operands [1]))
1637 operands[1] = sparc_legitimize_tls_address (operands[1]);
1638 return false;
1641 /* Fixup PIC cases. */
1642 if (flag_pic && CONSTANT_P (operands[1]))
1644 if (pic_address_needs_scratch (operands[1]))
1645 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1647 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1648 if (GET_CODE (operands[1]) == LABEL_REF
1649 && can_use_mov_pic_label_ref (operands[1]))
1651 if (mode == SImode)
1653 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1654 return true;
1657 if (mode == DImode)
1659 gcc_assert (TARGET_ARCH64);
1660 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1661 return true;
1665 if (symbolic_operand (operands[1], mode))
1667 operands[1]
1668 = sparc_legitimize_pic_address (operands[1],
1669 reload_in_progress
1670 ? operands[0] : NULL_RTX);
1671 return false;
1675 /* If we are trying to toss an integer constant into FP registers,
1676 or loading a FP or vector constant, force it into memory. */
1677 if (CONSTANT_P (operands[1])
1678 && REG_P (operands[0])
1679 && (SPARC_FP_REG_P (REGNO (operands[0]))
1680 || SCALAR_FLOAT_MODE_P (mode)
1681 || VECTOR_MODE_P (mode)))
1683 /* emit_group_store will send such bogosity to us when it is
1684 not storing directly into memory. So fix this up to avoid
1685 crashes in output_constant_pool. */
1686 if (operands [1] == const0_rtx)
1687 operands[1] = CONST0_RTX (mode);
1689 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1690 always other regs. */
1691 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1692 && (const_zero_operand (operands[1], mode)
1693 || const_all_ones_operand (operands[1], mode)))
1694 return false;
1696 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1697 /* We are able to build any SF constant in integer registers
1698 with at most 2 instructions. */
1699 && (mode == SFmode
1700 /* And any DF constant in integer registers. */
1701 || (mode == DFmode
1702 && ! can_create_pseudo_p ())))
1703 return false;
1705 operands[1] = force_const_mem (mode, operands[1]);
1706 if (!reload_in_progress)
1707 operands[1] = validize_mem (operands[1]);
1708 return false;
1711 /* Accept non-constants and valid constants unmodified. */
1712 if (!CONSTANT_P (operands[1])
1713 || GET_CODE (operands[1]) == HIGH
1714 || input_operand (operands[1], mode))
1715 return false;
1717 switch (mode)
1719 case QImode:
1720 /* All QImode constants require only one insn, so proceed. */
1721 break;
1723 case HImode:
1724 case SImode:
1725 sparc_emit_set_const32 (operands[0], operands[1]);
1726 return true;
1728 case DImode:
1729 /* input_operand should have filtered out 32-bit mode. */
1730 sparc_emit_set_const64 (operands[0], operands[1]);
1731 return true;
1733 case TImode:
1735 rtx high, low;
1736 /* TImode isn't available in 32-bit mode. */
1737 split_double (operands[1], &high, &low);
1738 emit_insn (gen_movdi (operand_subword (operands[0], 0, 0, TImode),
1739 high));
1740 emit_insn (gen_movdi (operand_subword (operands[0], 1, 0, TImode),
1741 low));
1743 return true;
1745 default:
1746 gcc_unreachable ();
1749 return false;
1752 /* Load OP1, a 32-bit constant, into OP0, a register.
1753 We know it can't be done in one insn when we get
1754 here, the move expander guarantees this. */
1756 static void
1757 sparc_emit_set_const32 (rtx op0, rtx op1)
1759 enum machine_mode mode = GET_MODE (op0);
1760 rtx temp = op0;
1762 if (can_create_pseudo_p ())
1763 temp = gen_reg_rtx (mode);
1765 if (GET_CODE (op1) == CONST_INT)
1767 gcc_assert (!small_int_operand (op1, mode)
1768 && !const_high_operand (op1, mode));
1770 /* Emit them as real moves instead of a HIGH/LO_SUM,
1771 this way CSE can see everything and reuse intermediate
1772 values if it wants. */
1773 emit_insn (gen_rtx_SET (VOIDmode, temp,
1774 GEN_INT (INTVAL (op1)
1775 & ~(HOST_WIDE_INT)0x3ff)));
1777 emit_insn (gen_rtx_SET (VOIDmode,
1778 op0,
1779 gen_rtx_IOR (mode, temp,
1780 GEN_INT (INTVAL (op1) & 0x3ff))));
1782 else
1784 /* A symbol, emit in the traditional way. */
1785 emit_insn (gen_rtx_SET (VOIDmode, temp,
1786 gen_rtx_HIGH (mode, op1)));
1787 emit_insn (gen_rtx_SET (VOIDmode,
1788 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1792 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1793 If TEMP is nonzero, we are forbidden to use any other scratch
1794 registers. Otherwise, we are allowed to generate them as needed.
1796 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1797 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1799 void
1800 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1802 rtx temp1, temp2, temp3, temp4, temp5;
1803 rtx ti_temp = 0;
1805 if (temp && GET_MODE (temp) == TImode)
1807 ti_temp = temp;
1808 temp = gen_rtx_REG (DImode, REGNO (temp));
1811 /* SPARC-V9 code-model support. */
1812 switch (sparc_cmodel)
1814 case CM_MEDLOW:
1815 /* The range spanned by all instructions in the object is less
1816 than 2^31 bytes (2GB) and the distance from any instruction
1817 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1818 than 2^31 bytes (2GB).
1820 The executable must be in the low 4TB of the virtual address
1821 space.
1823 sethi %hi(symbol), %temp1
1824 or %temp1, %lo(symbol), %reg */
1825 if (temp)
1826 temp1 = temp; /* op0 is allowed. */
1827 else
1828 temp1 = gen_reg_rtx (DImode);
1830 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1831 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1832 break;
1834 case CM_MEDMID:
1835 /* The range spanned by all instructions in the object is less
1836 than 2^31 bytes (2GB) and the distance from any instruction
1837 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1838 than 2^31 bytes (2GB).
1840 The executable must be in the low 16TB of the virtual address
1841 space.
1843 sethi %h44(symbol), %temp1
1844 or %temp1, %m44(symbol), %temp2
1845 sllx %temp2, 12, %temp3
1846 or %temp3, %l44(symbol), %reg */
1847 if (temp)
1849 temp1 = op0;
1850 temp2 = op0;
1851 temp3 = temp; /* op0 is allowed. */
1853 else
1855 temp1 = gen_reg_rtx (DImode);
1856 temp2 = gen_reg_rtx (DImode);
1857 temp3 = gen_reg_rtx (DImode);
1860 emit_insn (gen_seth44 (temp1, op1));
1861 emit_insn (gen_setm44 (temp2, temp1, op1));
1862 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1863 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1864 emit_insn (gen_setl44 (op0, temp3, op1));
1865 break;
1867 case CM_MEDANY:
1868 /* The range spanned by all instructions in the object is less
1869 than 2^31 bytes (2GB) and the distance from any instruction
1870 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1871 than 2^31 bytes (2GB).
1873 The executable can be placed anywhere in the virtual address
1874 space.
1876 sethi %hh(symbol), %temp1
1877 sethi %lm(symbol), %temp2
1878 or %temp1, %hm(symbol), %temp3
1879 sllx %temp3, 32, %temp4
1880 or %temp4, %temp2, %temp5
1881 or %temp5, %lo(symbol), %reg */
1882 if (temp)
1884 /* It is possible that one of the registers we got for operands[2]
1885 might coincide with that of operands[0] (which is why we made
1886 it TImode). Pick the other one to use as our scratch. */
1887 if (rtx_equal_p (temp, op0))
1889 gcc_assert (ti_temp);
1890 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1892 temp1 = op0;
1893 temp2 = temp; /* op0 is _not_ allowed, see above. */
1894 temp3 = op0;
1895 temp4 = op0;
1896 temp5 = op0;
1898 else
1900 temp1 = gen_reg_rtx (DImode);
1901 temp2 = gen_reg_rtx (DImode);
1902 temp3 = gen_reg_rtx (DImode);
1903 temp4 = gen_reg_rtx (DImode);
1904 temp5 = gen_reg_rtx (DImode);
1907 emit_insn (gen_sethh (temp1, op1));
1908 emit_insn (gen_setlm (temp2, op1));
1909 emit_insn (gen_sethm (temp3, temp1, op1));
1910 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1911 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1912 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1913 gen_rtx_PLUS (DImode, temp4, temp2)));
1914 emit_insn (gen_setlo (op0, temp5, op1));
1915 break;
1917 case CM_EMBMEDANY:
1918 /* Old old old backwards compatibility kruft here.
1919 Essentially it is MEDLOW with a fixed 64-bit
1920 virtual base added to all data segment addresses.
1921 Text-segment stuff is computed like MEDANY, we can't
1922 reuse the code above because the relocation knobs
1923 look different.
1925 Data segment: sethi %hi(symbol), %temp1
1926 add %temp1, EMBMEDANY_BASE_REG, %temp2
1927 or %temp2, %lo(symbol), %reg */
1928 if (data_segment_operand (op1, GET_MODE (op1)))
1930 if (temp)
1932 temp1 = temp; /* op0 is allowed. */
1933 temp2 = op0;
1935 else
1937 temp1 = gen_reg_rtx (DImode);
1938 temp2 = gen_reg_rtx (DImode);
1941 emit_insn (gen_embmedany_sethi (temp1, op1));
1942 emit_insn (gen_embmedany_brsum (temp2, temp1));
1943 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1946 /* Text segment: sethi %uhi(symbol), %temp1
1947 sethi %hi(symbol), %temp2
1948 or %temp1, %ulo(symbol), %temp3
1949 sllx %temp3, 32, %temp4
1950 or %temp4, %temp2, %temp5
1951 or %temp5, %lo(symbol), %reg */
1952 else
1954 if (temp)
1956 /* It is possible that one of the registers we got for operands[2]
1957 might coincide with that of operands[0] (which is why we made
1958 it TImode). Pick the other one to use as our scratch. */
1959 if (rtx_equal_p (temp, op0))
1961 gcc_assert (ti_temp);
1962 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1964 temp1 = op0;
1965 temp2 = temp; /* op0 is _not_ allowed, see above. */
1966 temp3 = op0;
1967 temp4 = op0;
1968 temp5 = op0;
1970 else
1972 temp1 = gen_reg_rtx (DImode);
1973 temp2 = gen_reg_rtx (DImode);
1974 temp3 = gen_reg_rtx (DImode);
1975 temp4 = gen_reg_rtx (DImode);
1976 temp5 = gen_reg_rtx (DImode);
1979 emit_insn (gen_embmedany_textuhi (temp1, op1));
1980 emit_insn (gen_embmedany_texthi (temp2, op1));
1981 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1982 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1983 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1984 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1985 gen_rtx_PLUS (DImode, temp4, temp2)));
1986 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1988 break;
1990 default:
1991 gcc_unreachable ();
1995 #if HOST_BITS_PER_WIDE_INT == 32
1996 static void
1997 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1999 gcc_unreachable ();
2001 #else
2002 /* These avoid problems when cross compiling. If we do not
2003 go through all this hair then the optimizer will see
2004 invalid REG_EQUAL notes or in some cases none at all. */
2005 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
2006 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
2007 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
2008 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
2010 /* The optimizer is not to assume anything about exactly
2011 which bits are set for a HIGH, they are unspecified.
2012 Unfortunately this leads to many missed optimizations
2013 during CSE. We mask out the non-HIGH bits, and matches
2014 a plain movdi, to alleviate this problem. */
2015 static rtx
2016 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
2018 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
2021 static rtx
2022 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
2024 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
2027 static rtx
2028 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
2030 return gen_rtx_IOR (DImode, src, GEN_INT (val));
2033 static rtx
2034 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
2036 return gen_rtx_XOR (DImode, src, GEN_INT (val));
2039 /* Worker routines for 64-bit constant formation on arch64.
2040 One of the key things to be doing in these emissions is
2041 to create as many temp REGs as possible. This makes it
2042 possible for half-built constants to be used later when
2043 such values are similar to something required later on.
2044 Without doing this, the optimizer cannot see such
2045 opportunities. */
2047 static void sparc_emit_set_const64_quick1 (rtx, rtx,
2048 unsigned HOST_WIDE_INT, int);
2050 static void
2051 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
2052 unsigned HOST_WIDE_INT low_bits, int is_neg)
2054 unsigned HOST_WIDE_INT high_bits;
2056 if (is_neg)
2057 high_bits = (~low_bits) & 0xffffffff;
2058 else
2059 high_bits = low_bits;
2061 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2062 if (!is_neg)
2064 emit_insn (gen_rtx_SET (VOIDmode, op0,
2065 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2067 else
2069 /* If we are XOR'ing with -1, then we should emit a one's complement
2070 instead. This way the combiner will notice logical operations
2071 such as ANDN later on and substitute. */
2072 if ((low_bits & 0x3ff) == 0x3ff)
2074 emit_insn (gen_rtx_SET (VOIDmode, op0,
2075 gen_rtx_NOT (DImode, temp)));
2077 else
2079 emit_insn (gen_rtx_SET (VOIDmode, op0,
2080 gen_safe_XOR64 (temp,
2081 (-(HOST_WIDE_INT)0x400
2082 | (low_bits & 0x3ff)))));
2087 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
2088 unsigned HOST_WIDE_INT, int);
2090 static void
2091 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
2092 unsigned HOST_WIDE_INT high_bits,
2093 unsigned HOST_WIDE_INT low_immediate,
2094 int shift_count)
2096 rtx temp2 = op0;
2098 if ((high_bits & 0xfffffc00) != 0)
2100 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2101 if ((high_bits & ~0xfffffc00) != 0)
2102 emit_insn (gen_rtx_SET (VOIDmode, op0,
2103 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2104 else
2105 temp2 = temp;
2107 else
2109 emit_insn (gen_safe_SET64 (temp, high_bits));
2110 temp2 = temp;
2113 /* Now shift it up into place. */
2114 emit_insn (gen_rtx_SET (VOIDmode, op0,
2115 gen_rtx_ASHIFT (DImode, temp2,
2116 GEN_INT (shift_count))));
2118 /* If there is a low immediate part piece, finish up by
2119 putting that in as well. */
2120 if (low_immediate != 0)
2121 emit_insn (gen_rtx_SET (VOIDmode, op0,
2122 gen_safe_OR64 (op0, low_immediate)));
2125 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
2126 unsigned HOST_WIDE_INT);
2128 /* Full 64-bit constant decomposition. Even though this is the
2129 'worst' case, we still optimize a few things away. */
2130 static void
2131 sparc_emit_set_const64_longway (rtx op0, rtx temp,
2132 unsigned HOST_WIDE_INT high_bits,
2133 unsigned HOST_WIDE_INT low_bits)
2135 rtx sub_temp = op0;
2137 if (can_create_pseudo_p ())
2138 sub_temp = gen_reg_rtx (DImode);
2140 if ((high_bits & 0xfffffc00) != 0)
2142 emit_insn (gen_safe_HIGH64 (temp, high_bits));
2143 if ((high_bits & ~0xfffffc00) != 0)
2144 emit_insn (gen_rtx_SET (VOIDmode,
2145 sub_temp,
2146 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2147 else
2148 sub_temp = temp;
2150 else
2152 emit_insn (gen_safe_SET64 (temp, high_bits));
2153 sub_temp = temp;
2156 if (can_create_pseudo_p ())
2158 rtx temp2 = gen_reg_rtx (DImode);
2159 rtx temp3 = gen_reg_rtx (DImode);
2160 rtx temp4 = gen_reg_rtx (DImode);
2162 emit_insn (gen_rtx_SET (VOIDmode, temp4,
2163 gen_rtx_ASHIFT (DImode, sub_temp,
2164 GEN_INT (32))));
2166 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
2167 if ((low_bits & ~0xfffffc00) != 0)
2169 emit_insn (gen_rtx_SET (VOIDmode, temp3,
2170 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2171 emit_insn (gen_rtx_SET (VOIDmode, op0,
2172 gen_rtx_PLUS (DImode, temp4, temp3)));
2174 else
2176 emit_insn (gen_rtx_SET (VOIDmode, op0,
2177 gen_rtx_PLUS (DImode, temp4, temp2)));
2180 else
2182 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2183 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2184 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2185 int to_shift = 12;
2187 /* We are in the middle of reload, so this is really
2188 painful. However we do still make an attempt to
2189 avoid emitting truly stupid code. */
2190 if (low1 != const0_rtx)
2192 emit_insn (gen_rtx_SET (VOIDmode, op0,
2193 gen_rtx_ASHIFT (DImode, sub_temp,
2194 GEN_INT (to_shift))));
2195 emit_insn (gen_rtx_SET (VOIDmode, op0,
2196 gen_rtx_IOR (DImode, op0, low1)));
2197 sub_temp = op0;
2198 to_shift = 12;
2200 else
2202 to_shift += 12;
2204 if (low2 != const0_rtx)
2206 emit_insn (gen_rtx_SET (VOIDmode, op0,
2207 gen_rtx_ASHIFT (DImode, sub_temp,
2208 GEN_INT (to_shift))));
2209 emit_insn (gen_rtx_SET (VOIDmode, op0,
2210 gen_rtx_IOR (DImode, op0, low2)));
2211 sub_temp = op0;
2212 to_shift = 8;
2214 else
2216 to_shift += 8;
2218 emit_insn (gen_rtx_SET (VOIDmode, op0,
2219 gen_rtx_ASHIFT (DImode, sub_temp,
2220 GEN_INT (to_shift))));
2221 if (low3 != const0_rtx)
2222 emit_insn (gen_rtx_SET (VOIDmode, op0,
2223 gen_rtx_IOR (DImode, op0, low3)));
2224 /* phew... */
2228 /* Analyze a 64-bit constant for certain properties. */
2229 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2230 unsigned HOST_WIDE_INT,
2231 int *, int *, int *);
2233 static void
2234 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2235 unsigned HOST_WIDE_INT low_bits,
2236 int *hbsp, int *lbsp, int *abbasp)
2238 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2239 int i;
2241 lowest_bit_set = highest_bit_set = -1;
2242 i = 0;
2245 if ((lowest_bit_set == -1)
2246 && ((low_bits >> i) & 1))
2247 lowest_bit_set = i;
2248 if ((highest_bit_set == -1)
2249 && ((high_bits >> (32 - i - 1)) & 1))
2250 highest_bit_set = (64 - i - 1);
2252 while (++i < 32
2253 && ((highest_bit_set == -1)
2254 || (lowest_bit_set == -1)));
2255 if (i == 32)
2257 i = 0;
2260 if ((lowest_bit_set == -1)
2261 && ((high_bits >> i) & 1))
2262 lowest_bit_set = i + 32;
2263 if ((highest_bit_set == -1)
2264 && ((low_bits >> (32 - i - 1)) & 1))
2265 highest_bit_set = 32 - i - 1;
2267 while (++i < 32
2268 && ((highest_bit_set == -1)
2269 || (lowest_bit_set == -1)));
2271 /* If there are no bits set this should have gone out
2272 as one instruction! */
2273 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
2274 all_bits_between_are_set = 1;
2275 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2277 if (i < 32)
2279 if ((low_bits & (1 << i)) != 0)
2280 continue;
2282 else
2284 if ((high_bits & (1 << (i - 32))) != 0)
2285 continue;
2287 all_bits_between_are_set = 0;
2288 break;
2290 *hbsp = highest_bit_set;
2291 *lbsp = lowest_bit_set;
2292 *abbasp = all_bits_between_are_set;
2295 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2297 static int
2298 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2299 unsigned HOST_WIDE_INT low_bits)
2301 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2303 if (high_bits == 0
2304 || high_bits == 0xffffffff)
2305 return 1;
2307 analyze_64bit_constant (high_bits, low_bits,
2308 &highest_bit_set, &lowest_bit_set,
2309 &all_bits_between_are_set);
2311 if ((highest_bit_set == 63
2312 || lowest_bit_set == 0)
2313 && all_bits_between_are_set != 0)
2314 return 1;
2316 if ((highest_bit_set - lowest_bit_set) < 21)
2317 return 1;
2319 return 0;
2322 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2323 unsigned HOST_WIDE_INT,
2324 int, int);
2326 static unsigned HOST_WIDE_INT
2327 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2328 unsigned HOST_WIDE_INT low_bits,
2329 int lowest_bit_set, int shift)
2331 HOST_WIDE_INT hi, lo;
2333 if (lowest_bit_set < 32)
2335 lo = (low_bits >> lowest_bit_set) << shift;
2336 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2338 else
2340 lo = 0;
2341 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2343 gcc_assert (! (hi & lo));
2344 return (hi | lo);
2347 /* Here we are sure to be arch64 and this is an integer constant
2348 being loaded into a register. Emit the most efficient
2349 insn sequence possible. Detection of all the 1-insn cases
2350 has been done already. */
2351 static void
2352 sparc_emit_set_const64 (rtx op0, rtx op1)
2354 unsigned HOST_WIDE_INT high_bits, low_bits;
2355 int lowest_bit_set, highest_bit_set;
2356 int all_bits_between_are_set;
2357 rtx temp = 0;
2359 /* Sanity check that we know what we are working with. */
2360 gcc_assert (TARGET_ARCH64
2361 && (GET_CODE (op0) == SUBREG
2362 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2364 if (! can_create_pseudo_p ())
2365 temp = op0;
2367 if (GET_CODE (op1) != CONST_INT)
2369 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2370 return;
2373 if (! temp)
2374 temp = gen_reg_rtx (DImode);
2376 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2377 low_bits = (INTVAL (op1) & 0xffffffff);
2379 /* low_bits bits 0 --> 31
2380 high_bits bits 32 --> 63 */
2382 analyze_64bit_constant (high_bits, low_bits,
2383 &highest_bit_set, &lowest_bit_set,
2384 &all_bits_between_are_set);
2386 /* First try for a 2-insn sequence. */
2388 /* These situations are preferred because the optimizer can
2389 * do more things with them:
2390 * 1) mov -1, %reg
2391 * sllx %reg, shift, %reg
2392 * 2) mov -1, %reg
2393 * srlx %reg, shift, %reg
2394 * 3) mov some_small_const, %reg
2395 * sllx %reg, shift, %reg
2397 if (((highest_bit_set == 63
2398 || lowest_bit_set == 0)
2399 && all_bits_between_are_set != 0)
2400 || ((highest_bit_set - lowest_bit_set) < 12))
2402 HOST_WIDE_INT the_const = -1;
2403 int shift = lowest_bit_set;
2405 if ((highest_bit_set != 63
2406 && lowest_bit_set != 0)
2407 || all_bits_between_are_set == 0)
2409 the_const =
2410 create_simple_focus_bits (high_bits, low_bits,
2411 lowest_bit_set, 0);
2413 else if (lowest_bit_set == 0)
2414 shift = -(63 - highest_bit_set);
2416 gcc_assert (SPARC_SIMM13_P (the_const));
2417 gcc_assert (shift != 0);
2419 emit_insn (gen_safe_SET64 (temp, the_const));
2420 if (shift > 0)
2421 emit_insn (gen_rtx_SET (VOIDmode,
2422 op0,
2423 gen_rtx_ASHIFT (DImode,
2424 temp,
2425 GEN_INT (shift))));
2426 else if (shift < 0)
2427 emit_insn (gen_rtx_SET (VOIDmode,
2428 op0,
2429 gen_rtx_LSHIFTRT (DImode,
2430 temp,
2431 GEN_INT (-shift))));
2432 return;
2435 /* Now a range of 22 or less bits set somewhere.
2436 * 1) sethi %hi(focus_bits), %reg
2437 * sllx %reg, shift, %reg
2438 * 2) sethi %hi(focus_bits), %reg
2439 * srlx %reg, shift, %reg
2441 if ((highest_bit_set - lowest_bit_set) < 21)
2443 unsigned HOST_WIDE_INT focus_bits =
2444 create_simple_focus_bits (high_bits, low_bits,
2445 lowest_bit_set, 10);
2447 gcc_assert (SPARC_SETHI_P (focus_bits));
2448 gcc_assert (lowest_bit_set != 10);
2450 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
2452 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2453 if (lowest_bit_set < 10)
2454 emit_insn (gen_rtx_SET (VOIDmode,
2455 op0,
2456 gen_rtx_LSHIFTRT (DImode, temp,
2457 GEN_INT (10 - lowest_bit_set))));
2458 else if (lowest_bit_set > 10)
2459 emit_insn (gen_rtx_SET (VOIDmode,
2460 op0,
2461 gen_rtx_ASHIFT (DImode, temp,
2462 GEN_INT (lowest_bit_set - 10))));
2463 return;
2466 /* 1) sethi %hi(low_bits), %reg
2467 * or %reg, %lo(low_bits), %reg
2468 * 2) sethi %hi(~low_bits), %reg
2469 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2471 if (high_bits == 0
2472 || high_bits == 0xffffffff)
2474 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2475 (high_bits == 0xffffffff));
2476 return;
2479 /* Now, try 3-insn sequences. */
2481 /* 1) sethi %hi(high_bits), %reg
2482 * or %reg, %lo(high_bits), %reg
2483 * sllx %reg, 32, %reg
2485 if (low_bits == 0)
2487 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2488 return;
2491 /* We may be able to do something quick
2492 when the constant is negated, so try that. */
2493 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2494 (~low_bits) & 0xfffffc00))
2496 /* NOTE: The trailing bits get XOR'd so we need the
2497 non-negated bits, not the negated ones. */
2498 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2500 if ((((~high_bits) & 0xffffffff) == 0
2501 && ((~low_bits) & 0x80000000) == 0)
2502 || (((~high_bits) & 0xffffffff) == 0xffffffff
2503 && ((~low_bits) & 0x80000000) != 0))
2505 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
2507 if ((SPARC_SETHI_P (fast_int)
2508 && (~high_bits & 0xffffffff) == 0)
2509 || SPARC_SIMM13_P (fast_int))
2510 emit_insn (gen_safe_SET64 (temp, fast_int));
2511 else
2512 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2514 else
2516 rtx negated_const;
2517 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2518 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2519 sparc_emit_set_const64 (temp, negated_const);
2522 /* If we are XOR'ing with -1, then we should emit a one's complement
2523 instead. This way the combiner will notice logical operations
2524 such as ANDN later on and substitute. */
2525 if (trailing_bits == 0x3ff)
2527 emit_insn (gen_rtx_SET (VOIDmode, op0,
2528 gen_rtx_NOT (DImode, temp)));
2530 else
2532 emit_insn (gen_rtx_SET (VOIDmode,
2533 op0,
2534 gen_safe_XOR64 (temp,
2535 (-0x400 | trailing_bits))));
2537 return;
2540 /* 1) sethi %hi(xxx), %reg
2541 * or %reg, %lo(xxx), %reg
2542 * sllx %reg, yyy, %reg
2544 * ??? This is just a generalized version of the low_bits==0
2545 * thing above, FIXME...
2547 if ((highest_bit_set - lowest_bit_set) < 32)
2549 unsigned HOST_WIDE_INT focus_bits =
2550 create_simple_focus_bits (high_bits, low_bits,
2551 lowest_bit_set, 0);
2553 /* We can't get here in this state. */
2554 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2556 /* So what we know is that the set bits straddle the
2557 middle of the 64-bit word. */
2558 sparc_emit_set_const64_quick2 (op0, temp,
2559 focus_bits, 0,
2560 lowest_bit_set);
2561 return;
2564 /* 1) sethi %hi(high_bits), %reg
2565 * or %reg, %lo(high_bits), %reg
2566 * sllx %reg, 32, %reg
2567 * or %reg, low_bits, %reg
2569 if (SPARC_SIMM13_P(low_bits)
2570 && ((int)low_bits > 0))
2572 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2573 return;
2576 /* The easiest way when all else fails, is full decomposition. */
2577 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2579 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2581 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2582 return the mode to be used for the comparison. For floating-point,
2583 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2584 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2585 processing is needed. */
2587 enum machine_mode
2588 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2590 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2592 switch (op)
2594 case EQ:
2595 case NE:
2596 case UNORDERED:
2597 case ORDERED:
2598 case UNLT:
2599 case UNLE:
2600 case UNGT:
2601 case UNGE:
2602 case UNEQ:
2603 case LTGT:
2604 return CCFPmode;
2606 case LT:
2607 case LE:
2608 case GT:
2609 case GE:
2610 return CCFPEmode;
2612 default:
2613 gcc_unreachable ();
2616 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2617 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2619 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2620 return CCX_NOOVmode;
2621 else
2622 return CC_NOOVmode;
2624 else
2626 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2627 return CCXmode;
2628 else
2629 return CCmode;
2633 /* Emit the compare insn and return the CC reg for a CODE comparison
2634 with operands X and Y. */
2636 static rtx
2637 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2639 enum machine_mode mode;
2640 rtx cc_reg;
2642 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2643 return x;
2645 mode = SELECT_CC_MODE (code, x, y);
2647 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2648 fcc regs (cse can't tell they're really call clobbered regs and will
2649 remove a duplicate comparison even if there is an intervening function
2650 call - it will then try to reload the cc reg via an int reg which is why
2651 we need the movcc patterns). It is possible to provide the movcc
2652 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2653 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2654 to tell cse that CCFPE mode registers (even pseudos) are call
2655 clobbered. */
2657 /* ??? This is an experiment. Rather than making changes to cse which may
2658 or may not be easy/clean, we do our own cse. This is possible because
2659 we will generate hard registers. Cse knows they're call clobbered (it
2660 doesn't know the same thing about pseudos). If we guess wrong, no big
2661 deal, but if we win, great! */
2663 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2664 #if 1 /* experiment */
2666 int reg;
2667 /* We cycle through the registers to ensure they're all exercised. */
2668 static int next_fcc_reg = 0;
2669 /* Previous x,y for each fcc reg. */
2670 static rtx prev_args[4][2];
2672 /* Scan prev_args for x,y. */
2673 for (reg = 0; reg < 4; reg++)
2674 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2675 break;
2676 if (reg == 4)
2678 reg = next_fcc_reg;
2679 prev_args[reg][0] = x;
2680 prev_args[reg][1] = y;
2681 next_fcc_reg = (next_fcc_reg + 1) & 3;
2683 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2685 #else
2686 cc_reg = gen_reg_rtx (mode);
2687 #endif /* ! experiment */
2688 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2689 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2690 else
2691 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2693 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2694 will only result in an unrecognizable insn so no point in asserting. */
2695 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2697 return cc_reg;
2701 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2704 gen_compare_reg (rtx cmp)
2706 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2709 /* This function is used for v9 only.
2710 DEST is the target of the Scc insn.
2711 CODE is the code for an Scc's comparison.
2712 X and Y are the values we compare.
2714 This function is needed to turn
2716 (set (reg:SI 110)
2717 (gt (reg:CCX 100 %icc)
2718 (const_int 0)))
2719 into
2720 (set (reg:SI 110)
2721 (gt:DI (reg:CCX 100 %icc)
2722 (const_int 0)))
2724 IE: The instruction recognizer needs to see the mode of the comparison to
2725 find the right instruction. We could use "gt:DI" right in the
2726 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2728 static int
2729 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2731 if (! TARGET_ARCH64
2732 && (GET_MODE (x) == DImode
2733 || GET_MODE (dest) == DImode))
2734 return 0;
2736 /* Try to use the movrCC insns. */
2737 if (TARGET_ARCH64
2738 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2739 && y == const0_rtx
2740 && v9_regcmp_p (compare_code))
2742 rtx op0 = x;
2743 rtx temp;
2745 /* Special case for op0 != 0. This can be done with one instruction if
2746 dest == x. */
2748 if (compare_code == NE
2749 && GET_MODE (dest) == DImode
2750 && rtx_equal_p (op0, dest))
2752 emit_insn (gen_rtx_SET (VOIDmode, dest,
2753 gen_rtx_IF_THEN_ELSE (DImode,
2754 gen_rtx_fmt_ee (compare_code, DImode,
2755 op0, const0_rtx),
2756 const1_rtx,
2757 dest)));
2758 return 1;
2761 if (reg_overlap_mentioned_p (dest, op0))
2763 /* Handle the case where dest == x.
2764 We "early clobber" the result. */
2765 op0 = gen_reg_rtx (GET_MODE (x));
2766 emit_move_insn (op0, x);
2769 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2770 if (GET_MODE (op0) != DImode)
2772 temp = gen_reg_rtx (DImode);
2773 convert_move (temp, op0, 0);
2775 else
2776 temp = op0;
2777 emit_insn (gen_rtx_SET (VOIDmode, dest,
2778 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2779 gen_rtx_fmt_ee (compare_code, DImode,
2780 temp, const0_rtx),
2781 const1_rtx,
2782 dest)));
2783 return 1;
2785 else
2787 x = gen_compare_reg_1 (compare_code, x, y);
2788 y = const0_rtx;
2790 gcc_assert (GET_MODE (x) != CC_NOOVmode
2791 && GET_MODE (x) != CCX_NOOVmode);
2793 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2794 emit_insn (gen_rtx_SET (VOIDmode, dest,
2795 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2796 gen_rtx_fmt_ee (compare_code,
2797 GET_MODE (x), x, y),
2798 const1_rtx, dest)));
2799 return 1;
2804 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2805 without jumps using the addx/subx instructions. */
2807 bool
2808 emit_scc_insn (rtx operands[])
2810 rtx tem;
2811 rtx x;
2812 rtx y;
2813 enum rtx_code code;
2815 /* The quad-word fp compare library routines all return nonzero to indicate
2816 true, which is different from the equivalent libgcc routines, so we must
2817 handle them specially here. */
2818 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2820 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2821 GET_CODE (operands[1]));
2822 operands[2] = XEXP (operands[1], 0);
2823 operands[3] = XEXP (operands[1], 1);
2826 code = GET_CODE (operands[1]);
2827 x = operands[2];
2828 y = operands[3];
2830 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2831 more applications). The exception to this is "reg != 0" which can
2832 be done in one instruction on v9 (so we do it). */
2833 if (code == EQ)
2835 if (GET_MODE (x) == SImode)
2837 rtx pat;
2838 if (TARGET_ARCH64)
2839 pat = gen_seqsidi_special (operands[0], x, y);
2840 else
2841 pat = gen_seqsisi_special (operands[0], x, y);
2842 emit_insn (pat);
2843 return true;
2845 else if (GET_MODE (x) == DImode)
2847 rtx pat = gen_seqdi_special (operands[0], x, y);
2848 emit_insn (pat);
2849 return true;
2853 if (code == NE)
2855 if (GET_MODE (x) == SImode)
2857 rtx pat;
2858 if (TARGET_ARCH64)
2859 pat = gen_snesidi_special (operands[0], x, y);
2860 else
2861 pat = gen_snesisi_special (operands[0], x, y);
2862 emit_insn (pat);
2863 return true;
2865 else if (GET_MODE (x) == DImode)
2867 rtx pat;
2868 if (TARGET_VIS3)
2869 pat = gen_snedi_special_vis3 (operands[0], x, y);
2870 else
2871 pat = gen_snedi_special (operands[0], x, y);
2872 emit_insn (pat);
2873 return true;
2877 if (TARGET_V9
2878 && TARGET_ARCH64
2879 && GET_MODE (x) == DImode
2880 && !(TARGET_VIS3
2881 && (code == GTU || code == LTU))
2882 && gen_v9_scc (operands[0], code, x, y))
2883 return true;
2885 /* We can do LTU and GEU using the addx/subx instructions too. And
2886 for GTU/LEU, if both operands are registers swap them and fall
2887 back to the easy case. */
2888 if (code == GTU || code == LEU)
2890 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2891 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2893 tem = x;
2894 x = y;
2895 y = tem;
2896 code = swap_condition (code);
2900 if (code == LTU
2901 || (!TARGET_VIS3 && code == GEU))
2903 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2904 gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2905 gen_compare_reg_1 (code, x, y),
2906 const0_rtx)));
2907 return true;
2910 /* All the posibilities to use addx/subx based sequences has been
2911 exhausted, try for a 3 instruction sequence using v9 conditional
2912 moves. */
2913 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
2914 return true;
2916 /* Nope, do branches. */
2917 return false;
2920 /* Emit a conditional jump insn for the v9 architecture using comparison code
2921 CODE and jump target LABEL.
2922 This function exists to take advantage of the v9 brxx insns. */
2924 static void
2925 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2927 emit_jump_insn (gen_rtx_SET (VOIDmode,
2928 pc_rtx,
2929 gen_rtx_IF_THEN_ELSE (VOIDmode,
2930 gen_rtx_fmt_ee (code, GET_MODE (op0),
2931 op0, const0_rtx),
2932 gen_rtx_LABEL_REF (VOIDmode, label),
2933 pc_rtx)));
2936 /* Emit a conditional jump insn for the UA2011 architecture using
2937 comparison code CODE and jump target LABEL. This function exists
2938 to take advantage of the UA2011 Compare and Branch insns. */
2940 static void
2941 emit_cbcond_insn (enum rtx_code code, rtx op0, rtx op1, rtx label)
2943 rtx if_then_else;
2945 if_then_else = gen_rtx_IF_THEN_ELSE (VOIDmode,
2946 gen_rtx_fmt_ee(code, GET_MODE(op0),
2947 op0, op1),
2948 gen_rtx_LABEL_REF (VOIDmode, label),
2949 pc_rtx);
2951 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, if_then_else));
2954 void
2955 emit_conditional_branch_insn (rtx operands[])
2957 /* The quad-word fp compare library routines all return nonzero to indicate
2958 true, which is different from the equivalent libgcc routines, so we must
2959 handle them specially here. */
2960 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2962 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2963 GET_CODE (operands[0]));
2964 operands[1] = XEXP (operands[0], 0);
2965 operands[2] = XEXP (operands[0], 1);
2968 /* If we can tell early on that the comparison is against a constant
2969 that won't fit in the 5-bit signed immediate field of a cbcond,
2970 use one of the other v9 conditional branch sequences. */
2971 if (TARGET_CBCOND
2972 && GET_CODE (operands[1]) == REG
2973 && (GET_MODE (operands[1]) == SImode
2974 || (TARGET_ARCH64 && GET_MODE (operands[1]) == DImode))
2975 && (GET_CODE (operands[2]) != CONST_INT
2976 || SPARC_SIMM5_P (INTVAL (operands[2]))))
2978 emit_cbcond_insn (GET_CODE (operands[0]), operands[1], operands[2], operands[3]);
2979 return;
2982 if (TARGET_ARCH64 && operands[2] == const0_rtx
2983 && GET_CODE (operands[1]) == REG
2984 && GET_MODE (operands[1]) == DImode)
2986 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2987 return;
2990 operands[1] = gen_compare_reg (operands[0]);
2991 operands[2] = const0_rtx;
2992 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2993 operands[1], operands[2]);
2994 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2995 operands[3]));
2999 /* Generate a DFmode part of a hard TFmode register.
3000 REG is the TFmode hard register, LOW is 1 for the
3001 low 64bit of the register and 0 otherwise.
3004 gen_df_reg (rtx reg, int low)
3006 int regno = REGNO (reg);
3008 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
3009 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
3010 return gen_rtx_REG (DFmode, regno);
3013 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
3014 Unlike normal calls, TFmode operands are passed by reference. It is
3015 assumed that no more than 3 operands are required. */
3017 static void
3018 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
3020 rtx ret_slot = NULL, arg[3], func_sym;
3021 int i;
3023 /* We only expect to be called for conversions, unary, and binary ops. */
3024 gcc_assert (nargs == 2 || nargs == 3);
3026 for (i = 0; i < nargs; ++i)
3028 rtx this_arg = operands[i];
3029 rtx this_slot;
3031 /* TFmode arguments and return values are passed by reference. */
3032 if (GET_MODE (this_arg) == TFmode)
3034 int force_stack_temp;
3036 force_stack_temp = 0;
3037 if (TARGET_BUGGY_QP_LIB && i == 0)
3038 force_stack_temp = 1;
3040 if (GET_CODE (this_arg) == MEM
3041 && ! force_stack_temp)
3043 tree expr = MEM_EXPR (this_arg);
3044 if (expr)
3045 mark_addressable (expr);
3046 this_arg = XEXP (this_arg, 0);
3048 else if (CONSTANT_P (this_arg)
3049 && ! force_stack_temp)
3051 this_slot = force_const_mem (TFmode, this_arg);
3052 this_arg = XEXP (this_slot, 0);
3054 else
3056 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode));
3058 /* Operand 0 is the return value. We'll copy it out later. */
3059 if (i > 0)
3060 emit_move_insn (this_slot, this_arg);
3061 else
3062 ret_slot = this_slot;
3064 this_arg = XEXP (this_slot, 0);
3068 arg[i] = this_arg;
3071 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
3073 if (GET_MODE (operands[0]) == TFmode)
3075 if (nargs == 2)
3076 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
3077 arg[0], GET_MODE (arg[0]),
3078 arg[1], GET_MODE (arg[1]));
3079 else
3080 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
3081 arg[0], GET_MODE (arg[0]),
3082 arg[1], GET_MODE (arg[1]),
3083 arg[2], GET_MODE (arg[2]));
3085 if (ret_slot)
3086 emit_move_insn (operands[0], ret_slot);
3088 else
3090 rtx ret;
3092 gcc_assert (nargs == 2);
3094 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
3095 GET_MODE (operands[0]), 1,
3096 arg[1], GET_MODE (arg[1]));
3098 if (ret != operands[0])
3099 emit_move_insn (operands[0], ret);
3103 /* Expand soft-float TFmode calls to sparc abi routines. */
3105 static void
3106 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
3108 const char *func;
3110 switch (code)
3112 case PLUS:
3113 func = "_Qp_add";
3114 break;
3115 case MINUS:
3116 func = "_Qp_sub";
3117 break;
3118 case MULT:
3119 func = "_Qp_mul";
3120 break;
3121 case DIV:
3122 func = "_Qp_div";
3123 break;
3124 default:
3125 gcc_unreachable ();
3128 emit_soft_tfmode_libcall (func, 3, operands);
3131 static void
3132 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
3134 const char *func;
3136 gcc_assert (code == SQRT);
3137 func = "_Qp_sqrt";
3139 emit_soft_tfmode_libcall (func, 2, operands);
3142 static void
3143 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
3145 const char *func;
3147 switch (code)
3149 case FLOAT_EXTEND:
3150 switch (GET_MODE (operands[1]))
3152 case SFmode:
3153 func = "_Qp_stoq";
3154 break;
3155 case DFmode:
3156 func = "_Qp_dtoq";
3157 break;
3158 default:
3159 gcc_unreachable ();
3161 break;
3163 case FLOAT_TRUNCATE:
3164 switch (GET_MODE (operands[0]))
3166 case SFmode:
3167 func = "_Qp_qtos";
3168 break;
3169 case DFmode:
3170 func = "_Qp_qtod";
3171 break;
3172 default:
3173 gcc_unreachable ();
3175 break;
3177 case FLOAT:
3178 switch (GET_MODE (operands[1]))
3180 case SImode:
3181 func = "_Qp_itoq";
3182 if (TARGET_ARCH64)
3183 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
3184 break;
3185 case DImode:
3186 func = "_Qp_xtoq";
3187 break;
3188 default:
3189 gcc_unreachable ();
3191 break;
3193 case UNSIGNED_FLOAT:
3194 switch (GET_MODE (operands[1]))
3196 case SImode:
3197 func = "_Qp_uitoq";
3198 if (TARGET_ARCH64)
3199 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
3200 break;
3201 case DImode:
3202 func = "_Qp_uxtoq";
3203 break;
3204 default:
3205 gcc_unreachable ();
3207 break;
3209 case FIX:
3210 switch (GET_MODE (operands[0]))
3212 case SImode:
3213 func = "_Qp_qtoi";
3214 break;
3215 case DImode:
3216 func = "_Qp_qtox";
3217 break;
3218 default:
3219 gcc_unreachable ();
3221 break;
3223 case UNSIGNED_FIX:
3224 switch (GET_MODE (operands[0]))
3226 case SImode:
3227 func = "_Qp_qtoui";
3228 break;
3229 case DImode:
3230 func = "_Qp_qtoux";
3231 break;
3232 default:
3233 gcc_unreachable ();
3235 break;
3237 default:
3238 gcc_unreachable ();
3241 emit_soft_tfmode_libcall (func, 2, operands);
3244 /* Expand a hard-float tfmode operation. All arguments must be in
3245 registers. */
3247 static void
3248 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
3250 rtx op, dest;
3252 if (GET_RTX_CLASS (code) == RTX_UNARY)
3254 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3255 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
3257 else
3259 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
3260 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
3261 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
3262 operands[1], operands[2]);
3265 if (register_operand (operands[0], VOIDmode))
3266 dest = operands[0];
3267 else
3268 dest = gen_reg_rtx (GET_MODE (operands[0]));
3270 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
3272 if (dest != operands[0])
3273 emit_move_insn (operands[0], dest);
3276 void
3277 emit_tfmode_binop (enum rtx_code code, rtx *operands)
3279 if (TARGET_HARD_QUAD)
3280 emit_hard_tfmode_operation (code, operands);
3281 else
3282 emit_soft_tfmode_binop (code, operands);
3285 void
3286 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3288 if (TARGET_HARD_QUAD)
3289 emit_hard_tfmode_operation (code, operands);
3290 else
3291 emit_soft_tfmode_unop (code, operands);
3294 void
3295 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3297 if (TARGET_HARD_QUAD)
3298 emit_hard_tfmode_operation (code, operands);
3299 else
3300 emit_soft_tfmode_cvt (code, operands);
3303 /* Return nonzero if a branch/jump/call instruction will be emitting
3304 nop into its delay slot. */
3307 empty_delay_slot (rtx insn)
3309 rtx seq;
3311 /* If no previous instruction (should not happen), return true. */
3312 if (PREV_INSN (insn) == NULL)
3313 return 1;
3315 seq = NEXT_INSN (PREV_INSN (insn));
3316 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3317 return 0;
3319 return 1;
3322 /* Return nonzero if we should emit a nop after a cbcond instruction.
3323 The cbcond instruction does not have a delay slot, however there is
3324 a severe performance penalty if a control transfer appears right
3325 after a cbcond. Therefore we emit a nop when we detect this
3326 situation. */
3329 emit_cbcond_nop (rtx insn)
3331 rtx next = next_active_insn (insn);
3333 if (!next)
3334 return 1;
3336 if (NONJUMP_INSN_P (next)
3337 && GET_CODE (PATTERN (next)) == SEQUENCE)
3338 next = XVECEXP (PATTERN (next), 0, 0);
3339 else if (CALL_P (next)
3340 && GET_CODE (PATTERN (next)) == PARALLEL)
3342 rtx delay = XVECEXP (PATTERN (next), 0, 1);
3344 if (GET_CODE (delay) == RETURN)
3346 /* It's a sibling call. Do not emit the nop if we're going
3347 to emit something other than the jump itself as the first
3348 instruction of the sibcall sequence. */
3349 if (sparc_leaf_function_p || TARGET_FLAT)
3350 return 0;
3354 if (NONJUMP_INSN_P (next))
3355 return 0;
3357 return 1;
3360 /* Return nonzero if TRIAL can go into the call delay slot. */
3363 tls_call_delay (rtx trial)
3365 rtx pat;
3367 /* Binutils allows
3368 call __tls_get_addr, %tgd_call (foo)
3369 add %l7, %o0, %o0, %tgd_add (foo)
3370 while Sun as/ld does not. */
3371 if (TARGET_GNU_TLS || !TARGET_TLS)
3372 return 1;
3374 pat = PATTERN (trial);
3376 /* We must reject tgd_add{32|64}, i.e.
3377 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
3378 and tldm_add{32|64}, i.e.
3379 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
3380 for Sun as/ld. */
3381 if (GET_CODE (pat) == SET
3382 && GET_CODE (SET_SRC (pat)) == PLUS)
3384 rtx unspec = XEXP (SET_SRC (pat), 1);
3386 if (GET_CODE (unspec) == UNSPEC
3387 && (XINT (unspec, 1) == UNSPEC_TLSGD
3388 || XINT (unspec, 1) == UNSPEC_TLSLDM))
3389 return 0;
3392 return 1;
3395 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3396 instruction. RETURN_P is true if the v9 variant 'return' is to be
3397 considered in the test too.
3399 TRIAL must be a SET whose destination is a REG appropriate for the
3400 'restore' instruction or, if RETURN_P is true, for the 'return'
3401 instruction. */
3403 static int
3404 eligible_for_restore_insn (rtx trial, bool return_p)
3406 rtx pat = PATTERN (trial);
3407 rtx src = SET_SRC (pat);
3408 bool src_is_freg = false;
3409 rtx src_reg;
3411 /* Since we now can do moves between float and integer registers when
3412 VIS3 is enabled, we have to catch this case. We can allow such
3413 moves when doing a 'return' however. */
3414 src_reg = src;
3415 if (GET_CODE (src_reg) == SUBREG)
3416 src_reg = SUBREG_REG (src_reg);
3417 if (GET_CODE (src_reg) == REG
3418 && SPARC_FP_REG_P (REGNO (src_reg)))
3419 src_is_freg = true;
3421 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3422 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3423 && arith_operand (src, GET_MODE (src))
3424 && ! src_is_freg)
3426 if (TARGET_ARCH64)
3427 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3428 else
3429 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3432 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3433 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3434 && arith_double_operand (src, GET_MODE (src))
3435 && ! src_is_freg)
3436 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3438 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3439 else if (! TARGET_FPU && register_operand (src, SFmode))
3440 return 1;
3442 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3443 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3444 return 1;
3446 /* If we have the 'return' instruction, anything that does not use
3447 local or output registers and can go into a delay slot wins. */
3448 else if (return_p
3449 && TARGET_V9
3450 && !epilogue_renumber (&pat, 1)
3451 && get_attr_in_uncond_branch_delay (trial)
3452 == IN_UNCOND_BRANCH_DELAY_TRUE)
3453 return 1;
3455 /* The 'restore src1,src2,dest' pattern for SImode. */
3456 else if (GET_CODE (src) == PLUS
3457 && register_operand (XEXP (src, 0), SImode)
3458 && arith_operand (XEXP (src, 1), SImode))
3459 return 1;
3461 /* The 'restore src1,src2,dest' pattern for DImode. */
3462 else if (GET_CODE (src) == PLUS
3463 && register_operand (XEXP (src, 0), DImode)
3464 && arith_double_operand (XEXP (src, 1), DImode))
3465 return 1;
3467 /* The 'restore src1,%lo(src2),dest' pattern. */
3468 else if (GET_CODE (src) == LO_SUM
3469 && ! TARGET_CM_MEDMID
3470 && ((register_operand (XEXP (src, 0), SImode)
3471 && immediate_operand (XEXP (src, 1), SImode))
3472 || (TARGET_ARCH64
3473 && register_operand (XEXP (src, 0), DImode)
3474 && immediate_operand (XEXP (src, 1), DImode))))
3475 return 1;
3477 /* The 'restore src,src,dest' pattern. */
3478 else if (GET_CODE (src) == ASHIFT
3479 && (register_operand (XEXP (src, 0), SImode)
3480 || register_operand (XEXP (src, 0), DImode))
3481 && XEXP (src, 1) == const1_rtx)
3482 return 1;
3484 return 0;
3487 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3490 eligible_for_return_delay (rtx trial)
3492 int regno;
3493 rtx pat;
3495 if (! NONJUMP_INSN_P (trial))
3496 return 0;
3498 if (get_attr_length (trial) != 1)
3499 return 0;
3501 /* If the function uses __builtin_eh_return, the eh_return machinery
3502 occupies the delay slot. */
3503 if (crtl->calls_eh_return)
3504 return 0;
3506 /* In the case of a leaf or flat function, anything can go into the slot. */
3507 if (sparc_leaf_function_p || TARGET_FLAT)
3508 return
3509 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
3511 pat = PATTERN (trial);
3512 if (GET_CODE (pat) == PARALLEL)
3514 int i;
3516 if (! TARGET_V9)
3517 return 0;
3518 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3520 rtx expr = XVECEXP (pat, 0, i);
3521 if (GET_CODE (expr) != SET)
3522 return 0;
3523 if (GET_CODE (SET_DEST (expr)) != REG)
3524 return 0;
3525 regno = REGNO (SET_DEST (expr));
3526 if (regno >= 8 && regno < 24)
3527 return 0;
3529 return !epilogue_renumber (&pat, 1)
3530 && (get_attr_in_uncond_branch_delay (trial)
3531 == IN_UNCOND_BRANCH_DELAY_TRUE);
3534 if (GET_CODE (pat) != SET)
3535 return 0;
3537 if (GET_CODE (SET_DEST (pat)) != REG)
3538 return 0;
3540 regno = REGNO (SET_DEST (pat));
3542 /* Otherwise, only operations which can be done in tandem with
3543 a `restore' or `return' insn can go into the delay slot. */
3544 if (regno >= 8 && regno < 24)
3545 return 0;
3547 /* If this instruction sets up floating point register and we have a return
3548 instruction, it can probably go in. But restore will not work
3549 with FP_REGS. */
3550 if (! SPARC_INT_REG_P (regno))
3551 return (TARGET_V9
3552 && !epilogue_renumber (&pat, 1)
3553 && get_attr_in_uncond_branch_delay (trial)
3554 == IN_UNCOND_BRANCH_DELAY_TRUE);
3556 return eligible_for_restore_insn (trial, true);
3559 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3562 eligible_for_sibcall_delay (rtx trial)
3564 rtx pat;
3566 if (! NONJUMP_INSN_P (trial) || GET_CODE (PATTERN (trial)) != SET)
3567 return 0;
3569 if (get_attr_length (trial) != 1)
3570 return 0;
3572 pat = PATTERN (trial);
3574 if (sparc_leaf_function_p || TARGET_FLAT)
3576 /* If the tail call is done using the call instruction,
3577 we have to restore %o7 in the delay slot. */
3578 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3579 return 0;
3581 /* %g1 is used to build the function address */
3582 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3583 return 0;
3585 return 1;
3588 /* Otherwise, only operations which can be done in tandem with
3589 a `restore' insn can go into the delay slot. */
3590 if (GET_CODE (SET_DEST (pat)) != REG
3591 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3592 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
3593 return 0;
3595 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3596 in most cases. */
3597 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3598 return 0;
3600 return eligible_for_restore_insn (trial, false);
3603 /* Determine if it's legal to put X into the constant pool. This
3604 is not possible if X contains the address of a symbol that is
3605 not constant (TLS) or not known at final link time (PIC). */
3607 static bool
3608 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
3610 switch (GET_CODE (x))
3612 case CONST_INT:
3613 case CONST_DOUBLE:
3614 case CONST_VECTOR:
3615 /* Accept all non-symbolic constants. */
3616 return false;
3618 case LABEL_REF:
3619 /* Labels are OK iff we are non-PIC. */
3620 return flag_pic != 0;
3622 case SYMBOL_REF:
3623 /* 'Naked' TLS symbol references are never OK,
3624 non-TLS symbols are OK iff we are non-PIC. */
3625 if (SYMBOL_REF_TLS_MODEL (x))
3626 return true;
3627 else
3628 return flag_pic != 0;
3630 case CONST:
3631 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3632 case PLUS:
3633 case MINUS:
3634 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3635 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3636 case UNSPEC:
3637 return true;
3638 default:
3639 gcc_unreachable ();
3643 /* Global Offset Table support. */
3644 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3645 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3647 /* Return the SYMBOL_REF for the Global Offset Table. */
3649 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3651 static rtx
3652 sparc_got (void)
3654 if (!sparc_got_symbol)
3655 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3657 return sparc_got_symbol;
3660 /* Ensure that we are not using patterns that are not OK with PIC. */
3663 check_pic (int i)
3665 rtx op;
3667 switch (flag_pic)
3669 case 1:
3670 op = recog_data.operand[i];
3671 gcc_assert (GET_CODE (op) != SYMBOL_REF
3672 && (GET_CODE (op) != CONST
3673 || (GET_CODE (XEXP (op, 0)) == MINUS
3674 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3675 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3676 case 2:
3677 default:
3678 return 1;
3682 /* Return true if X is an address which needs a temporary register when
3683 reloaded while generating PIC code. */
3686 pic_address_needs_scratch (rtx x)
3688 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3689 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3690 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3691 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3692 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3693 return 1;
3695 return 0;
3698 /* Determine if a given RTX is a valid constant. We already know this
3699 satisfies CONSTANT_P. */
3701 static bool
3702 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3704 switch (GET_CODE (x))
3706 case CONST:
3707 case SYMBOL_REF:
3708 if (sparc_tls_referenced_p (x))
3709 return false;
3710 break;
3712 case CONST_DOUBLE:
3713 if (GET_MODE (x) == VOIDmode)
3714 return true;
3716 /* Floating point constants are generally not ok.
3717 The only exception is 0.0 and all-ones in VIS. */
3718 if (TARGET_VIS
3719 && SCALAR_FLOAT_MODE_P (mode)
3720 && (const_zero_operand (x, mode)
3721 || const_all_ones_operand (x, mode)))
3722 return true;
3724 return false;
3726 case CONST_VECTOR:
3727 /* Vector constants are generally not ok.
3728 The only exception is 0 or -1 in VIS. */
3729 if (TARGET_VIS
3730 && (const_zero_operand (x, mode)
3731 || const_all_ones_operand (x, mode)))
3732 return true;
3734 return false;
3736 default:
3737 break;
3740 return true;
3743 /* Determine if a given RTX is a valid constant address. */
3745 bool
3746 constant_address_p (rtx x)
3748 switch (GET_CODE (x))
3750 case LABEL_REF:
3751 case CONST_INT:
3752 case HIGH:
3753 return true;
3755 case CONST:
3756 if (flag_pic && pic_address_needs_scratch (x))
3757 return false;
3758 return sparc_legitimate_constant_p (Pmode, x);
3760 case SYMBOL_REF:
3761 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3763 default:
3764 return false;
3768 /* Nonzero if the constant value X is a legitimate general operand
3769 when generating PIC code. It is given that flag_pic is on and
3770 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3772 bool
3773 legitimate_pic_operand_p (rtx x)
3775 if (pic_address_needs_scratch (x))
3776 return false;
3777 if (sparc_tls_referenced_p (x))
3778 return false;
3779 return true;
3782 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3783 (CONST_INT_P (X) \
3784 && INTVAL (X) >= -0x1000 \
3785 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3787 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3788 (CONST_INT_P (X) \
3789 && INTVAL (X) >= -0x1000 \
3790 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3792 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3794 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3795 ordinarily. This changes a bit when generating PIC. */
3797 static bool
3798 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3800 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3802 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3803 rs1 = addr;
3804 else if (GET_CODE (addr) == PLUS)
3806 rs1 = XEXP (addr, 0);
3807 rs2 = XEXP (addr, 1);
3809 /* Canonicalize. REG comes first, if there are no regs,
3810 LO_SUM comes first. */
3811 if (!REG_P (rs1)
3812 && GET_CODE (rs1) != SUBREG
3813 && (REG_P (rs2)
3814 || GET_CODE (rs2) == SUBREG
3815 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3817 rs1 = XEXP (addr, 1);
3818 rs2 = XEXP (addr, 0);
3821 if ((flag_pic == 1
3822 && rs1 == pic_offset_table_rtx
3823 && !REG_P (rs2)
3824 && GET_CODE (rs2) != SUBREG
3825 && GET_CODE (rs2) != LO_SUM
3826 && GET_CODE (rs2) != MEM
3827 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3828 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3829 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3830 || ((REG_P (rs1)
3831 || GET_CODE (rs1) == SUBREG)
3832 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3834 imm1 = rs2;
3835 rs2 = NULL;
3837 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3838 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3840 /* We prohibit REG + REG for TFmode when there are no quad move insns
3841 and we consequently need to split. We do this because REG+REG
3842 is not an offsettable address. If we get the situation in reload
3843 where source and destination of a movtf pattern are both MEMs with
3844 REG+REG address, then only one of them gets converted to an
3845 offsettable address. */
3846 if (mode == TFmode
3847 && ! (TARGET_ARCH64 && TARGET_HARD_QUAD))
3848 return 0;
3850 /* Likewise for TImode, but in all cases. */
3851 if (mode == TImode)
3852 return 0;
3854 /* We prohibit REG + REG on ARCH32 if not optimizing for
3855 DFmode/DImode because then mem_min_alignment is likely to be zero
3856 after reload and the forced split would lack a matching splitter
3857 pattern. */
3858 if (TARGET_ARCH32 && !optimize
3859 && (mode == DFmode || mode == DImode))
3860 return 0;
3862 else if (USE_AS_OFFSETABLE_LO10
3863 && GET_CODE (rs1) == LO_SUM
3864 && TARGET_ARCH64
3865 && ! TARGET_CM_MEDMID
3866 && RTX_OK_FOR_OLO10_P (rs2, mode))
3868 rs2 = NULL;
3869 imm1 = XEXP (rs1, 1);
3870 rs1 = XEXP (rs1, 0);
3871 if (!CONSTANT_P (imm1)
3872 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3873 return 0;
3876 else if (GET_CODE (addr) == LO_SUM)
3878 rs1 = XEXP (addr, 0);
3879 imm1 = XEXP (addr, 1);
3881 if (!CONSTANT_P (imm1)
3882 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3883 return 0;
3885 /* We can't allow TFmode in 32-bit mode, because an offset greater
3886 than the alignment (8) may cause the LO_SUM to overflow. */
3887 if (mode == TFmode && TARGET_ARCH32)
3888 return 0;
3890 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3891 return 1;
3892 else
3893 return 0;
3895 if (GET_CODE (rs1) == SUBREG)
3896 rs1 = SUBREG_REG (rs1);
3897 if (!REG_P (rs1))
3898 return 0;
3900 if (rs2)
3902 if (GET_CODE (rs2) == SUBREG)
3903 rs2 = SUBREG_REG (rs2);
3904 if (!REG_P (rs2))
3905 return 0;
3908 if (strict)
3910 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3911 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3912 return 0;
3914 else
3916 if ((! SPARC_INT_REG_P (REGNO (rs1))
3917 && REGNO (rs1) != FRAME_POINTER_REGNUM
3918 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3919 || (rs2
3920 && (! SPARC_INT_REG_P (REGNO (rs2))
3921 && REGNO (rs2) != FRAME_POINTER_REGNUM
3922 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3923 return 0;
3925 return 1;
3928 /* Return the SYMBOL_REF for the tls_get_addr function. */
3930 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3932 static rtx
3933 sparc_tls_get_addr (void)
3935 if (!sparc_tls_symbol)
3936 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3938 return sparc_tls_symbol;
3941 /* Return the Global Offset Table to be used in TLS mode. */
3943 static rtx
3944 sparc_tls_got (void)
3946 /* In PIC mode, this is just the PIC offset table. */
3947 if (flag_pic)
3949 crtl->uses_pic_offset_table = 1;
3950 return pic_offset_table_rtx;
3953 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3954 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3955 if (TARGET_SUN_TLS && TARGET_ARCH32)
3957 load_got_register ();
3958 return global_offset_table_rtx;
3961 /* In all other cases, we load a new pseudo with the GOT symbol. */
3962 return copy_to_reg (sparc_got ());
3965 /* Return true if X contains a thread-local symbol. */
3967 static bool
3968 sparc_tls_referenced_p (rtx x)
3970 if (!TARGET_HAVE_TLS)
3971 return false;
3973 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3974 x = XEXP (XEXP (x, 0), 0);
3976 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3977 return true;
3979 /* That's all we handle in sparc_legitimize_tls_address for now. */
3980 return false;
3983 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3984 this (thread-local) address. */
3986 static rtx
3987 sparc_legitimize_tls_address (rtx addr)
3989 rtx temp1, temp2, temp3, ret, o0, got, insn;
3991 gcc_assert (can_create_pseudo_p ());
3993 if (GET_CODE (addr) == SYMBOL_REF)
3994 switch (SYMBOL_REF_TLS_MODEL (addr))
3996 case TLS_MODEL_GLOBAL_DYNAMIC:
3997 start_sequence ();
3998 temp1 = gen_reg_rtx (SImode);
3999 temp2 = gen_reg_rtx (SImode);
4000 ret = gen_reg_rtx (Pmode);
4001 o0 = gen_rtx_REG (Pmode, 8);
4002 got = sparc_tls_got ();
4003 emit_insn (gen_tgd_hi22 (temp1, addr));
4004 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
4005 if (TARGET_ARCH32)
4007 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
4008 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
4009 addr, const1_rtx));
4011 else
4013 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
4014 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
4015 addr, const1_rtx));
4017 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4018 insn = get_insns ();
4019 end_sequence ();
4020 emit_libcall_block (insn, ret, o0, addr);
4021 break;
4023 case TLS_MODEL_LOCAL_DYNAMIC:
4024 start_sequence ();
4025 temp1 = gen_reg_rtx (SImode);
4026 temp2 = gen_reg_rtx (SImode);
4027 temp3 = gen_reg_rtx (Pmode);
4028 ret = gen_reg_rtx (Pmode);
4029 o0 = gen_rtx_REG (Pmode, 8);
4030 got = sparc_tls_got ();
4031 emit_insn (gen_tldm_hi22 (temp1));
4032 emit_insn (gen_tldm_lo10 (temp2, temp1));
4033 if (TARGET_ARCH32)
4035 emit_insn (gen_tldm_add32 (o0, got, temp2));
4036 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
4037 const1_rtx));
4039 else
4041 emit_insn (gen_tldm_add64 (o0, got, temp2));
4042 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
4043 const1_rtx));
4045 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
4046 insn = get_insns ();
4047 end_sequence ();
4048 emit_libcall_block (insn, temp3, o0,
4049 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
4050 UNSPEC_TLSLD_BASE));
4051 temp1 = gen_reg_rtx (SImode);
4052 temp2 = gen_reg_rtx (SImode);
4053 emit_insn (gen_tldo_hix22 (temp1, addr));
4054 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
4055 if (TARGET_ARCH32)
4056 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
4057 else
4058 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
4059 break;
4061 case TLS_MODEL_INITIAL_EXEC:
4062 temp1 = gen_reg_rtx (SImode);
4063 temp2 = gen_reg_rtx (SImode);
4064 temp3 = gen_reg_rtx (Pmode);
4065 got = sparc_tls_got ();
4066 emit_insn (gen_tie_hi22 (temp1, addr));
4067 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
4068 if (TARGET_ARCH32)
4069 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
4070 else
4071 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
4072 if (TARGET_SUN_TLS)
4074 ret = gen_reg_rtx (Pmode);
4075 if (TARGET_ARCH32)
4076 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
4077 temp3, addr));
4078 else
4079 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
4080 temp3, addr));
4082 else
4083 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
4084 break;
4086 case TLS_MODEL_LOCAL_EXEC:
4087 temp1 = gen_reg_rtx (Pmode);
4088 temp2 = gen_reg_rtx (Pmode);
4089 if (TARGET_ARCH32)
4091 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
4092 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
4094 else
4096 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
4097 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
4099 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
4100 break;
4102 default:
4103 gcc_unreachable ();
4106 else if (GET_CODE (addr) == CONST)
4108 rtx base, offset;
4110 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
4112 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
4113 offset = XEXP (XEXP (addr, 0), 1);
4115 base = force_operand (base, NULL_RTX);
4116 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
4117 offset = force_reg (Pmode, offset);
4118 ret = gen_rtx_PLUS (Pmode, base, offset);
4121 else
4122 gcc_unreachable (); /* for now ... */
4124 return ret;
4127 /* Legitimize PIC addresses. If the address is already position-independent,
4128 we return ORIG. Newly generated position-independent addresses go into a
4129 reg. This is REG if nonzero, otherwise we allocate register(s) as
4130 necessary. */
4132 static rtx
4133 sparc_legitimize_pic_address (rtx orig, rtx reg)
4135 bool gotdata_op = false;
4137 if (GET_CODE (orig) == SYMBOL_REF
4138 /* See the comment in sparc_expand_move. */
4139 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
4141 rtx pic_ref, address;
4142 rtx insn;
4144 if (reg == 0)
4146 gcc_assert (can_create_pseudo_p ());
4147 reg = gen_reg_rtx (Pmode);
4150 if (flag_pic == 2)
4152 /* If not during reload, allocate another temp reg here for loading
4153 in the address, so that these instructions can be optimized
4154 properly. */
4155 rtx temp_reg = (! can_create_pseudo_p ()
4156 ? reg : gen_reg_rtx (Pmode));
4158 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
4159 won't get confused into thinking that these two instructions
4160 are loading in the true address of the symbol. If in the
4161 future a PIC rtx exists, that should be used instead. */
4162 if (TARGET_ARCH64)
4164 emit_insn (gen_movdi_high_pic (temp_reg, orig));
4165 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
4167 else
4169 emit_insn (gen_movsi_high_pic (temp_reg, orig));
4170 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
4172 address = temp_reg;
4173 gotdata_op = true;
4175 else
4176 address = orig;
4178 crtl->uses_pic_offset_table = 1;
4179 if (gotdata_op)
4181 if (TARGET_ARCH64)
4182 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
4183 pic_offset_table_rtx,
4184 address, orig));
4185 else
4186 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
4187 pic_offset_table_rtx,
4188 address, orig));
4190 else
4192 pic_ref
4193 = gen_const_mem (Pmode,
4194 gen_rtx_PLUS (Pmode,
4195 pic_offset_table_rtx, address));
4196 insn = emit_move_insn (reg, pic_ref);
4199 /* Put a REG_EQUAL note on this insn, so that it can be optimized
4200 by loop. */
4201 set_unique_reg_note (insn, REG_EQUAL, orig);
4202 return reg;
4204 else if (GET_CODE (orig) == CONST)
4206 rtx base, offset;
4208 if (GET_CODE (XEXP (orig, 0)) == PLUS
4209 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
4210 return orig;
4212 if (reg == 0)
4214 gcc_assert (can_create_pseudo_p ());
4215 reg = gen_reg_rtx (Pmode);
4218 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4219 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
4220 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
4221 base == reg ? NULL_RTX : reg);
4223 if (GET_CODE (offset) == CONST_INT)
4225 if (SMALL_INT (offset))
4226 return plus_constant (Pmode, base, INTVAL (offset));
4227 else if (can_create_pseudo_p ())
4228 offset = force_reg (Pmode, offset);
4229 else
4230 /* If we reach here, then something is seriously wrong. */
4231 gcc_unreachable ();
4233 return gen_rtx_PLUS (Pmode, base, offset);
4235 else if (GET_CODE (orig) == LABEL_REF)
4236 /* ??? We ought to be checking that the register is live instead, in case
4237 it is eliminated. */
4238 crtl->uses_pic_offset_table = 1;
4240 return orig;
4243 /* Try machine-dependent ways of modifying an illegitimate address X
4244 to be legitimate. If we find one, return the new, valid address.
4246 OLDX is the address as it was before break_out_memory_refs was called.
4247 In some cases it is useful to look at this to decide what needs to be done.
4249 MODE is the mode of the operand pointed to by X.
4251 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
4253 static rtx
4254 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4255 enum machine_mode mode)
4257 rtx orig_x = x;
4259 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
4260 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4261 force_operand (XEXP (x, 0), NULL_RTX));
4262 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
4263 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4264 force_operand (XEXP (x, 1), NULL_RTX));
4265 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
4266 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
4267 XEXP (x, 1));
4268 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
4269 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4270 force_operand (XEXP (x, 1), NULL_RTX));
4272 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
4273 return x;
4275 if (sparc_tls_referenced_p (x))
4276 x = sparc_legitimize_tls_address (x);
4277 else if (flag_pic)
4278 x = sparc_legitimize_pic_address (x, NULL_RTX);
4279 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
4280 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
4281 copy_to_mode_reg (Pmode, XEXP (x, 1)));
4282 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
4283 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
4284 copy_to_mode_reg (Pmode, XEXP (x, 0)));
4285 else if (GET_CODE (x) == SYMBOL_REF
4286 || GET_CODE (x) == CONST
4287 || GET_CODE (x) == LABEL_REF)
4288 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
4290 return x;
4293 /* Delegitimize an address that was legitimized by the above function. */
4295 static rtx
4296 sparc_delegitimize_address (rtx x)
4298 x = delegitimize_mem_from_attrs (x);
4300 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
4301 switch (XINT (XEXP (x, 1), 1))
4303 case UNSPEC_MOVE_PIC:
4304 case UNSPEC_TLSLE:
4305 x = XVECEXP (XEXP (x, 1), 0, 0);
4306 gcc_assert (GET_CODE (x) == SYMBOL_REF);
4307 break;
4308 default:
4309 break;
4312 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
4313 if (GET_CODE (x) == MINUS
4314 && REG_P (XEXP (x, 0))
4315 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
4316 && GET_CODE (XEXP (x, 1)) == LO_SUM
4317 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
4318 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
4320 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
4321 gcc_assert (GET_CODE (x) == LABEL_REF);
4324 return x;
4327 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4328 replace the input X, or the original X if no replacement is called for.
4329 The output parameter *WIN is 1 if the calling macro should goto WIN,
4330 0 if it should not.
4332 For SPARC, we wish to handle addresses by splitting them into
4333 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
4334 This cuts the number of extra insns by one.
4336 Do nothing when generating PIC code and the address is a symbolic
4337 operand or requires a scratch register. */
4340 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
4341 int opnum, int type,
4342 int ind_levels ATTRIBUTE_UNUSED, int *win)
4344 /* Decompose SImode constants into HIGH+LO_SUM. */
4345 if (CONSTANT_P (x)
4346 && (mode != TFmode || TARGET_ARCH64)
4347 && GET_MODE (x) == SImode
4348 && GET_CODE (x) != LO_SUM
4349 && GET_CODE (x) != HIGH
4350 && sparc_cmodel <= CM_MEDLOW
4351 && !(flag_pic
4352 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
4354 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
4355 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4356 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4357 opnum, (enum reload_type)type);
4358 *win = 1;
4359 return x;
4362 /* We have to recognize what we have already generated above. */
4363 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
4365 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
4366 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
4367 opnum, (enum reload_type)type);
4368 *win = 1;
4369 return x;
4372 *win = 0;
4373 return x;
4376 /* Return true if ADDR (a legitimate address expression)
4377 has an effect that depends on the machine mode it is used for.
4379 In PIC mode,
4381 (mem:HI [%l7+a])
4383 is not equivalent to
4385 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
4387 because [%l7+a+1] is interpreted as the address of (a+1). */
4390 static bool
4391 sparc_mode_dependent_address_p (const_rtx addr,
4392 addr_space_t as ATTRIBUTE_UNUSED)
4394 if (flag_pic && GET_CODE (addr) == PLUS)
4396 rtx op0 = XEXP (addr, 0);
4397 rtx op1 = XEXP (addr, 1);
4398 if (op0 == pic_offset_table_rtx
4399 && symbolic_operand (op1, VOIDmode))
4400 return true;
4403 return false;
4406 #ifdef HAVE_GAS_HIDDEN
4407 # define USE_HIDDEN_LINKONCE 1
4408 #else
4409 # define USE_HIDDEN_LINKONCE 0
4410 #endif
4412 static void
4413 get_pc_thunk_name (char name[32], unsigned int regno)
4415 const char *reg_name = reg_names[regno];
4417 /* Skip the leading '%' as that cannot be used in a
4418 symbol name. */
4419 reg_name += 1;
4421 if (USE_HIDDEN_LINKONCE)
4422 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4423 else
4424 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4427 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4429 static rtx
4430 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
4432 int orig_flag_pic = flag_pic;
4433 rtx insn;
4435 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4436 flag_pic = 0;
4437 if (TARGET_ARCH64)
4438 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
4439 else
4440 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
4441 flag_pic = orig_flag_pic;
4443 return insn;
4446 /* Emit code to load the GOT register. */
4448 void
4449 load_got_register (void)
4451 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4452 if (!global_offset_table_rtx)
4453 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4455 if (TARGET_VXWORKS_RTP)
4456 emit_insn (gen_vxworks_load_got ());
4457 else
4459 /* The GOT symbol is subject to a PC-relative relocation so we need a
4460 helper function to add the PC value and thus get the final value. */
4461 if (!got_helper_rtx)
4463 char name[32];
4464 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4465 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4468 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
4469 got_helper_rtx,
4470 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
4473 /* Need to emit this whether or not we obey regdecls,
4474 since setjmp/longjmp can cause life info to screw up.
4475 ??? In the case where we don't obey regdecls, this is not sufficient
4476 since we may not fall out the bottom. */
4477 emit_use (global_offset_table_rtx);
4480 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4481 address of the call target. */
4483 void
4484 sparc_emit_call_insn (rtx pat, rtx addr)
4486 rtx insn;
4488 insn = emit_call_insn (pat);
4490 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4491 if (TARGET_VXWORKS_RTP
4492 && flag_pic
4493 && GET_CODE (addr) == SYMBOL_REF
4494 && (SYMBOL_REF_DECL (addr)
4495 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
4496 : !SYMBOL_REF_LOCAL_P (addr)))
4498 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4499 crtl->uses_pic_offset_table = 1;
4503 /* Return 1 if RTX is a MEM which is known to be aligned to at
4504 least a DESIRED byte boundary. */
4507 mem_min_alignment (rtx mem, int desired)
4509 rtx addr, base, offset;
4511 /* If it's not a MEM we can't accept it. */
4512 if (GET_CODE (mem) != MEM)
4513 return 0;
4515 /* Obviously... */
4516 if (!TARGET_UNALIGNED_DOUBLES
4517 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
4518 return 1;
4520 /* ??? The rest of the function predates MEM_ALIGN so
4521 there is probably a bit of redundancy. */
4522 addr = XEXP (mem, 0);
4523 base = offset = NULL_RTX;
4524 if (GET_CODE (addr) == PLUS)
4526 if (GET_CODE (XEXP (addr, 0)) == REG)
4528 base = XEXP (addr, 0);
4530 /* What we are saying here is that if the base
4531 REG is aligned properly, the compiler will make
4532 sure any REG based index upon it will be so
4533 as well. */
4534 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4535 offset = XEXP (addr, 1);
4536 else
4537 offset = const0_rtx;
4540 else if (GET_CODE (addr) == REG)
4542 base = addr;
4543 offset = const0_rtx;
4546 if (base != NULL_RTX)
4548 int regno = REGNO (base);
4550 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4552 /* Check if the compiler has recorded some information
4553 about the alignment of the base REG. If reload has
4554 completed, we already matched with proper alignments.
4555 If not running global_alloc, reload might give us
4556 unaligned pointer to local stack though. */
4557 if (((cfun != 0
4558 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4559 || (optimize && reload_completed))
4560 && (INTVAL (offset) & (desired - 1)) == 0)
4561 return 1;
4563 else
4565 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4566 return 1;
4569 else if (! TARGET_UNALIGNED_DOUBLES
4570 || CONSTANT_P (addr)
4571 || GET_CODE (addr) == LO_SUM)
4573 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4574 is true, in which case we can only assume that an access is aligned if
4575 it is to a constant address, or the address involves a LO_SUM. */
4576 return 1;
4579 /* An obviously unaligned address. */
4580 return 0;
4584 /* Vectors to keep interesting information about registers where it can easily
4585 be got. We used to use the actual mode value as the bit number, but there
4586 are more than 32 modes now. Instead we use two tables: one indexed by
4587 hard register number, and one indexed by mode. */
4589 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4590 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4591 mapped into one sparc_mode_class mode. */
4593 enum sparc_mode_class {
4594 H_MODE, S_MODE, D_MODE, T_MODE, O_MODE,
4595 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4596 CC_MODE, CCFP_MODE
4599 /* Modes for single-word and smaller quantities. */
4600 #define S_MODES \
4601 ((1 << (int) H_MODE) | (1 << (int) S_MODE) | (1 << (int) SF_MODE))
4603 /* Modes for double-word and smaller quantities. */
4604 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4606 /* Modes for quad-word and smaller quantities. */
4607 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4609 /* Modes for 8-word and smaller quantities. */
4610 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4612 /* Modes for single-float quantities. */
4613 #define SF_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4615 /* Modes for double-float and smaller quantities. */
4616 #define DF_MODES (SF_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4618 /* Modes for quad-float and smaller quantities. */
4619 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4621 /* Modes for quad-float pairs and smaller quantities. */
4622 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4624 /* Modes for double-float only quantities. */
4625 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4627 /* Modes for quad-float and double-float only quantities. */
4628 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4630 /* Modes for quad-float pairs and double-float only quantities. */
4631 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4633 /* Modes for condition codes. */
4634 #define CC_MODES (1 << (int) CC_MODE)
4635 #define CCFP_MODES (1 << (int) CCFP_MODE)
4637 /* Value is 1 if register/mode pair is acceptable on sparc.
4638 The funny mixture of D and T modes is because integer operations
4639 do not specially operate on tetra quantities, so non-quad-aligned
4640 registers can hold quadword quantities (except %o4 and %i4 because
4641 they cross fixed registers). */
4643 /* This points to either the 32 bit or the 64 bit version. */
4644 const int *hard_regno_mode_classes;
4646 static const int hard_32bit_mode_classes[] = {
4647 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4648 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4649 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4650 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4652 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4653 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4654 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4655 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4657 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4658 and none can hold SFmode/SImode values. */
4659 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4660 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4661 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4662 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4664 /* %fcc[0123] */
4665 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4667 /* %icc, %sfp, %gsr */
4668 CC_MODES, 0, D_MODES
4671 static const int hard_64bit_mode_classes[] = {
4672 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4673 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4674 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4675 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4677 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4678 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4679 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4680 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4682 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4683 and none can hold SFmode/SImode values. */
4684 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4685 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4686 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4687 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4689 /* %fcc[0123] */
4690 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4692 /* %icc, %sfp, %gsr */
4693 CC_MODES, 0, D_MODES
4696 int sparc_mode_class [NUM_MACHINE_MODES];
4698 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4700 static void
4701 sparc_init_modes (void)
4703 int i;
4705 for (i = 0; i < NUM_MACHINE_MODES; i++)
4707 switch (GET_MODE_CLASS (i))
4709 case MODE_INT:
4710 case MODE_PARTIAL_INT:
4711 case MODE_COMPLEX_INT:
4712 if (GET_MODE_SIZE (i) < 4)
4713 sparc_mode_class[i] = 1 << (int) H_MODE;
4714 else if (GET_MODE_SIZE (i) == 4)
4715 sparc_mode_class[i] = 1 << (int) S_MODE;
4716 else if (GET_MODE_SIZE (i) == 8)
4717 sparc_mode_class[i] = 1 << (int) D_MODE;
4718 else if (GET_MODE_SIZE (i) == 16)
4719 sparc_mode_class[i] = 1 << (int) T_MODE;
4720 else if (GET_MODE_SIZE (i) == 32)
4721 sparc_mode_class[i] = 1 << (int) O_MODE;
4722 else
4723 sparc_mode_class[i] = 0;
4724 break;
4725 case MODE_VECTOR_INT:
4726 if (GET_MODE_SIZE (i) == 4)
4727 sparc_mode_class[i] = 1 << (int) SF_MODE;
4728 else if (GET_MODE_SIZE (i) == 8)
4729 sparc_mode_class[i] = 1 << (int) DF_MODE;
4730 else
4731 sparc_mode_class[i] = 0;
4732 break;
4733 case MODE_FLOAT:
4734 case MODE_COMPLEX_FLOAT:
4735 if (GET_MODE_SIZE (i) == 4)
4736 sparc_mode_class[i] = 1 << (int) SF_MODE;
4737 else if (GET_MODE_SIZE (i) == 8)
4738 sparc_mode_class[i] = 1 << (int) DF_MODE;
4739 else if (GET_MODE_SIZE (i) == 16)
4740 sparc_mode_class[i] = 1 << (int) TF_MODE;
4741 else if (GET_MODE_SIZE (i) == 32)
4742 sparc_mode_class[i] = 1 << (int) OF_MODE;
4743 else
4744 sparc_mode_class[i] = 0;
4745 break;
4746 case MODE_CC:
4747 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4748 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4749 else
4750 sparc_mode_class[i] = 1 << (int) CC_MODE;
4751 break;
4752 default:
4753 sparc_mode_class[i] = 0;
4754 break;
4758 if (TARGET_ARCH64)
4759 hard_regno_mode_classes = hard_64bit_mode_classes;
4760 else
4761 hard_regno_mode_classes = hard_32bit_mode_classes;
4763 /* Initialize the array used by REGNO_REG_CLASS. */
4764 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4766 if (i < 16 && TARGET_V8PLUS)
4767 sparc_regno_reg_class[i] = I64_REGS;
4768 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4769 sparc_regno_reg_class[i] = GENERAL_REGS;
4770 else if (i < 64)
4771 sparc_regno_reg_class[i] = FP_REGS;
4772 else if (i < 96)
4773 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4774 else if (i < 100)
4775 sparc_regno_reg_class[i] = FPCC_REGS;
4776 else
4777 sparc_regno_reg_class[i] = NO_REGS;
4781 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4783 static inline bool
4784 save_global_or_fp_reg_p (unsigned int regno,
4785 int leaf_function ATTRIBUTE_UNUSED)
4787 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4790 /* Return whether the return address register (%i7) is needed. */
4792 static inline bool
4793 return_addr_reg_needed_p (int leaf_function)
4795 /* If it is live, for example because of __builtin_return_address (0). */
4796 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4797 return true;
4799 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4800 if (!leaf_function
4801 /* Loading the GOT register clobbers %o7. */
4802 || crtl->uses_pic_offset_table
4803 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4804 return true;
4806 return false;
4809 /* Return whether REGNO, a local or in register, must be saved/restored. */
4811 static bool
4812 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4814 /* General case: call-saved registers live at some point. */
4815 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4816 return true;
4818 /* Frame pointer register (%fp) if needed. */
4819 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4820 return true;
4822 /* Return address register (%i7) if needed. */
4823 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4824 return true;
4826 /* GOT register (%l7) if needed. */
4827 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4828 return true;
4830 /* If the function accesses prior frames, the frame pointer and the return
4831 address of the previous frame must be saved on the stack. */
4832 if (crtl->accesses_prior_frames
4833 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4834 return true;
4836 return false;
4839 /* Compute the frame size required by the function. This function is called
4840 during the reload pass and also by sparc_expand_prologue. */
4842 HOST_WIDE_INT
4843 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4845 HOST_WIDE_INT frame_size, apparent_frame_size;
4846 int args_size, n_global_fp_regs = 0;
4847 bool save_local_in_regs_p = false;
4848 unsigned int i;
4850 /* If the function allocates dynamic stack space, the dynamic offset is
4851 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4852 if (leaf_function && !cfun->calls_alloca)
4853 args_size = 0;
4854 else
4855 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4857 /* Calculate space needed for global registers. */
4858 if (TARGET_ARCH64)
4859 for (i = 0; i < 8; i++)
4860 if (save_global_or_fp_reg_p (i, 0))
4861 n_global_fp_regs += 2;
4862 else
4863 for (i = 0; i < 8; i += 2)
4864 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4865 n_global_fp_regs += 2;
4867 /* In the flat window model, find out which local and in registers need to
4868 be saved. We don't reserve space in the current frame for them as they
4869 will be spilled into the register window save area of the caller's frame.
4870 However, as soon as we use this register window save area, we must create
4871 that of the current frame to make it the live one. */
4872 if (TARGET_FLAT)
4873 for (i = 16; i < 32; i++)
4874 if (save_local_or_in_reg_p (i, leaf_function))
4876 save_local_in_regs_p = true;
4877 break;
4880 /* Calculate space needed for FP registers. */
4881 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4882 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4883 n_global_fp_regs += 2;
4885 if (size == 0
4886 && n_global_fp_regs == 0
4887 && args_size == 0
4888 && !save_local_in_regs_p)
4889 frame_size = apparent_frame_size = 0;
4890 else
4892 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4893 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4894 apparent_frame_size += n_global_fp_regs * 4;
4896 /* We need to add the size of the outgoing argument area. */
4897 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4899 /* And that of the register window save area. */
4900 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4902 /* Finally, bump to the appropriate alignment. */
4903 frame_size = SPARC_STACK_ALIGN (frame_size);
4906 /* Set up values for use in prologue and epilogue. */
4907 sparc_frame_size = frame_size;
4908 sparc_apparent_frame_size = apparent_frame_size;
4909 sparc_n_global_fp_regs = n_global_fp_regs;
4910 sparc_save_local_in_regs_p = save_local_in_regs_p;
4912 return frame_size;
4915 /* Implement the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
4918 sparc_initial_elimination_offset (int to)
4920 int offset;
4922 if (to == STACK_POINTER_REGNUM)
4923 offset = sparc_compute_frame_size (get_frame_size (), crtl->is_leaf);
4924 else
4925 offset = 0;
4927 offset += SPARC_STACK_BIAS;
4928 return offset;
4931 /* Output any necessary .register pseudo-ops. */
4933 void
4934 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4936 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4937 int i;
4939 if (TARGET_ARCH32)
4940 return;
4942 /* Check if %g[2367] were used without
4943 .register being printed for them already. */
4944 for (i = 2; i < 8; i++)
4946 if (df_regs_ever_live_p (i)
4947 && ! sparc_hard_reg_printed [i])
4949 sparc_hard_reg_printed [i] = 1;
4950 /* %g7 is used as TLS base register, use #ignore
4951 for it instead of #scratch. */
4952 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4953 i == 7 ? "ignore" : "scratch");
4955 if (i == 3) i = 5;
4957 #endif
4960 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4962 #if PROBE_INTERVAL > 4096
4963 #error Cannot use indexed addressing mode for stack probing
4964 #endif
4966 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4967 inclusive. These are offsets from the current stack pointer.
4969 Note that we don't use the REG+REG addressing mode for the probes because
4970 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4971 so the advantages of having a single code win here. */
4973 static void
4974 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4976 rtx g1 = gen_rtx_REG (Pmode, 1);
4978 /* See if we have a constant small number of probes to generate. If so,
4979 that's the easy case. */
4980 if (size <= PROBE_INTERVAL)
4982 emit_move_insn (g1, GEN_INT (first));
4983 emit_insn (gen_rtx_SET (VOIDmode, g1,
4984 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4985 emit_stack_probe (plus_constant (Pmode, g1, -size));
4988 /* The run-time loop is made up of 10 insns in the generic case while the
4989 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4990 else if (size <= 5 * PROBE_INTERVAL)
4992 HOST_WIDE_INT i;
4994 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4995 emit_insn (gen_rtx_SET (VOIDmode, g1,
4996 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4997 emit_stack_probe (g1);
4999 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
5000 it exceeds SIZE. If only two probes are needed, this will not
5001 generate any code. Then probe at FIRST + SIZE. */
5002 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
5004 emit_insn (gen_rtx_SET (VOIDmode, g1,
5005 plus_constant (Pmode, g1, -PROBE_INTERVAL)));
5006 emit_stack_probe (g1);
5009 emit_stack_probe (plus_constant (Pmode, g1,
5010 (i - PROBE_INTERVAL) - size));
5013 /* Otherwise, do the same as above, but in a loop. Note that we must be
5014 extra careful with variables wrapping around because we might be at
5015 the very top (or the very bottom) of the address space and we have
5016 to be able to handle this case properly; in particular, we use an
5017 equality test for the loop condition. */
5018 else
5020 HOST_WIDE_INT rounded_size;
5021 rtx g4 = gen_rtx_REG (Pmode, 4);
5023 emit_move_insn (g1, GEN_INT (first));
5026 /* Step 1: round SIZE to the previous multiple of the interval. */
5028 rounded_size = size & -PROBE_INTERVAL;
5029 emit_move_insn (g4, GEN_INT (rounded_size));
5032 /* Step 2: compute initial and final value of the loop counter. */
5034 /* TEST_ADDR = SP + FIRST. */
5035 emit_insn (gen_rtx_SET (VOIDmode, g1,
5036 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
5038 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
5039 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
5042 /* Step 3: the loop
5044 while (TEST_ADDR != LAST_ADDR)
5046 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
5047 probe at TEST_ADDR
5050 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
5051 until it is equal to ROUNDED_SIZE. */
5053 if (TARGET_ARCH64)
5054 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
5055 else
5056 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
5059 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
5060 that SIZE is equal to ROUNDED_SIZE. */
5062 if (size != rounded_size)
5063 emit_stack_probe (plus_constant (Pmode, g4, rounded_size - size));
5066 /* Make sure nothing is scheduled before we are done. */
5067 emit_insn (gen_blockage ());
5070 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
5071 absolute addresses. */
5073 const char *
5074 output_probe_stack_range (rtx reg1, rtx reg2)
5076 static int labelno = 0;
5077 char loop_lab[32], end_lab[32];
5078 rtx xops[2];
5080 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
5081 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
5083 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
5085 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
5086 xops[0] = reg1;
5087 xops[1] = reg2;
5088 output_asm_insn ("cmp\t%0, %1", xops);
5089 if (TARGET_ARCH64)
5090 fputs ("\tbe,pn\t%xcc,", asm_out_file);
5091 else
5092 fputs ("\tbe\t", asm_out_file);
5093 assemble_name_raw (asm_out_file, end_lab);
5094 fputc ('\n', asm_out_file);
5096 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
5097 xops[1] = GEN_INT (-PROBE_INTERVAL);
5098 output_asm_insn (" add\t%0, %1, %0", xops);
5100 /* Probe at TEST_ADDR and branch. */
5101 if (TARGET_ARCH64)
5102 fputs ("\tba,pt\t%xcc,", asm_out_file);
5103 else
5104 fputs ("\tba\t", asm_out_file);
5105 assemble_name_raw (asm_out_file, loop_lab);
5106 fputc ('\n', asm_out_file);
5107 xops[1] = GEN_INT (SPARC_STACK_BIAS);
5108 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
5110 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
5112 return "";
5115 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
5116 needed. LOW is supposed to be double-word aligned for 32-bit registers.
5117 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
5118 is the action to be performed if SAVE_P returns true and ACTION_FALSE
5119 the action to be performed if it returns false. Return the new offset. */
5121 typedef bool (*sorr_pred_t) (unsigned int, int);
5122 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
5124 static int
5125 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
5126 int offset, int leaf_function, sorr_pred_t save_p,
5127 sorr_act_t action_true, sorr_act_t action_false)
5129 unsigned int i;
5130 rtx mem, insn;
5132 if (TARGET_ARCH64 && high <= 32)
5134 int fp_offset = -1;
5136 for (i = low; i < high; i++)
5138 if (save_p (i, leaf_function))
5140 mem = gen_frame_mem (DImode, plus_constant (Pmode,
5141 base, offset));
5142 if (action_true == SORR_SAVE)
5144 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
5145 RTX_FRAME_RELATED_P (insn) = 1;
5147 else /* action_true == SORR_RESTORE */
5149 /* The frame pointer must be restored last since its old
5150 value may be used as base address for the frame. This
5151 is problematic in 64-bit mode only because of the lack
5152 of double-word load instruction. */
5153 if (i == HARD_FRAME_POINTER_REGNUM)
5154 fp_offset = offset;
5155 else
5156 emit_move_insn (gen_rtx_REG (DImode, i), mem);
5158 offset += 8;
5160 else if (action_false == SORR_ADVANCE)
5161 offset += 8;
5164 if (fp_offset >= 0)
5166 mem = gen_frame_mem (DImode, plus_constant (Pmode, base, fp_offset));
5167 emit_move_insn (hard_frame_pointer_rtx, mem);
5170 else
5172 for (i = low; i < high; i += 2)
5174 bool reg0 = save_p (i, leaf_function);
5175 bool reg1 = save_p (i + 1, leaf_function);
5176 enum machine_mode mode;
5177 int regno;
5179 if (reg0 && reg1)
5181 mode = SPARC_INT_REG_P (i) ? DImode : DFmode;
5182 regno = i;
5184 else if (reg0)
5186 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
5187 regno = i;
5189 else if (reg1)
5191 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
5192 regno = i + 1;
5193 offset += 4;
5195 else
5197 if (action_false == SORR_ADVANCE)
5198 offset += 8;
5199 continue;
5202 mem = gen_frame_mem (mode, plus_constant (Pmode, base, offset));
5203 if (action_true == SORR_SAVE)
5205 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
5206 RTX_FRAME_RELATED_P (insn) = 1;
5207 if (mode == DImode)
5209 rtx set1, set2;
5210 mem = gen_frame_mem (SImode, plus_constant (Pmode, base,
5211 offset));
5212 set1 = gen_rtx_SET (VOIDmode, mem,
5213 gen_rtx_REG (SImode, regno));
5214 RTX_FRAME_RELATED_P (set1) = 1;
5216 = gen_frame_mem (SImode, plus_constant (Pmode, base,
5217 offset + 4));
5218 set2 = gen_rtx_SET (VOIDmode, mem,
5219 gen_rtx_REG (SImode, regno + 1));
5220 RTX_FRAME_RELATED_P (set2) = 1;
5221 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5222 gen_rtx_PARALLEL (VOIDmode,
5223 gen_rtvec (2, set1, set2)));
5226 else /* action_true == SORR_RESTORE */
5227 emit_move_insn (gen_rtx_REG (mode, regno), mem);
5229 /* Always preserve double-word alignment. */
5230 offset = (offset + 8) & -8;
5234 return offset;
5237 /* Emit code to adjust BASE to OFFSET. Return the new base. */
5239 static rtx
5240 emit_adjust_base_to_offset (rtx base, int offset)
5242 /* ??? This might be optimized a little as %g1 might already have a
5243 value close enough that a single add insn will do. */
5244 /* ??? Although, all of this is probably only a temporary fix because
5245 if %g1 can hold a function result, then sparc_expand_epilogue will
5246 lose (the result will be clobbered). */
5247 rtx new_base = gen_rtx_REG (Pmode, 1);
5248 emit_move_insn (new_base, GEN_INT (offset));
5249 emit_insn (gen_rtx_SET (VOIDmode,
5250 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
5251 return new_base;
5254 /* Emit code to save/restore call-saved global and FP registers. */
5256 static void
5257 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
5259 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
5261 base = emit_adjust_base_to_offset (base, offset);
5262 offset = 0;
5265 offset
5266 = emit_save_or_restore_regs (0, 8, base, offset, 0,
5267 save_global_or_fp_reg_p, action, SORR_NONE);
5268 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
5269 save_global_or_fp_reg_p, action, SORR_NONE);
5272 /* Emit code to save/restore call-saved local and in registers. */
5274 static void
5275 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
5277 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
5279 base = emit_adjust_base_to_offset (base, offset);
5280 offset = 0;
5283 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
5284 save_local_or_in_reg_p, action, SORR_ADVANCE);
5287 /* Emit a window_save insn. */
5289 static rtx
5290 emit_window_save (rtx increment)
5292 rtx insn = emit_insn (gen_window_save (increment));
5293 RTX_FRAME_RELATED_P (insn) = 1;
5295 /* The incoming return address (%o7) is saved in %i7. */
5296 add_reg_note (insn, REG_CFA_REGISTER,
5297 gen_rtx_SET (VOIDmode,
5298 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
5299 gen_rtx_REG (Pmode,
5300 INCOMING_RETURN_ADDR_REGNUM)));
5302 /* The window save event. */
5303 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
5305 /* The CFA is %fp, the hard frame pointer. */
5306 add_reg_note (insn, REG_CFA_DEF_CFA,
5307 plus_constant (Pmode, hard_frame_pointer_rtx,
5308 INCOMING_FRAME_SP_OFFSET));
5310 return insn;
5313 /* Generate an increment for the stack pointer. */
5315 static rtx
5316 gen_stack_pointer_inc (rtx increment)
5318 return gen_rtx_SET (VOIDmode,
5319 stack_pointer_rtx,
5320 gen_rtx_PLUS (Pmode,
5321 stack_pointer_rtx,
5322 increment));
5325 /* Expand the function prologue. The prologue is responsible for reserving
5326 storage for the frame, saving the call-saved registers and loading the
5327 GOT register if needed. */
5329 void
5330 sparc_expand_prologue (void)
5332 HOST_WIDE_INT size;
5333 rtx insn;
5335 /* Compute a snapshot of crtl->uses_only_leaf_regs. Relying
5336 on the final value of the flag means deferring the prologue/epilogue
5337 expansion until just before the second scheduling pass, which is too
5338 late to emit multiple epilogues or return insns.
5340 Of course we are making the assumption that the value of the flag
5341 will not change between now and its final value. Of the three parts
5342 of the formula, only the last one can reasonably vary. Let's take a
5343 closer look, after assuming that the first two ones are set to true
5344 (otherwise the last value is effectively silenced).
5346 If only_leaf_regs_used returns false, the global predicate will also
5347 be false so the actual frame size calculated below will be positive.
5348 As a consequence, the save_register_window insn will be emitted in
5349 the instruction stream; now this insn explicitly references %fp
5350 which is not a leaf register so only_leaf_regs_used will always
5351 return false subsequently.
5353 If only_leaf_regs_used returns true, we hope that the subsequent
5354 optimization passes won't cause non-leaf registers to pop up. For
5355 example, the regrename pass has special provisions to not rename to
5356 non-leaf registers in a leaf function. */
5357 sparc_leaf_function_p
5358 = optimize > 0 && crtl->is_leaf && only_leaf_regs_used ();
5360 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5362 if (flag_stack_usage_info)
5363 current_function_static_stack_size = size;
5365 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5367 if (crtl->is_leaf && !cfun->calls_alloca)
5369 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
5370 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
5371 size - STACK_CHECK_PROTECT);
5373 else if (size > 0)
5374 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5377 if (size == 0)
5378 ; /* do nothing. */
5379 else if (sparc_leaf_function_p)
5381 rtx size_int_rtx = GEN_INT (-size);
5383 if (size <= 4096)
5384 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5385 else if (size <= 8192)
5387 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5388 RTX_FRAME_RELATED_P (insn) = 1;
5390 /* %sp is still the CFA register. */
5391 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5393 else
5395 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5396 emit_move_insn (size_rtx, size_int_rtx);
5397 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5398 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
5399 gen_stack_pointer_inc (size_int_rtx));
5402 RTX_FRAME_RELATED_P (insn) = 1;
5404 else
5406 rtx size_int_rtx = GEN_INT (-size);
5408 if (size <= 4096)
5409 emit_window_save (size_int_rtx);
5410 else if (size <= 8192)
5412 emit_window_save (GEN_INT (-4096));
5414 /* %sp is not the CFA register anymore. */
5415 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5417 /* Make sure no %fp-based store is issued until after the frame is
5418 established. The offset between the frame pointer and the stack
5419 pointer is calculated relative to the value of the stack pointer
5420 at the end of the function prologue, and moving instructions that
5421 access the stack via the frame pointer between the instructions
5422 that decrement the stack pointer could result in accessing the
5423 register window save area, which is volatile. */
5424 emit_insn (gen_frame_blockage ());
5426 else
5428 rtx size_rtx = gen_rtx_REG (Pmode, 1);
5429 emit_move_insn (size_rtx, size_int_rtx);
5430 emit_window_save (size_rtx);
5434 if (sparc_leaf_function_p)
5436 sparc_frame_base_reg = stack_pointer_rtx;
5437 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5439 else
5441 sparc_frame_base_reg = hard_frame_pointer_rtx;
5442 sparc_frame_base_offset = SPARC_STACK_BIAS;
5445 if (sparc_n_global_fp_regs > 0)
5446 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5447 sparc_frame_base_offset
5448 - sparc_apparent_frame_size,
5449 SORR_SAVE);
5451 /* Load the GOT register if needed. */
5452 if (crtl->uses_pic_offset_table)
5453 load_got_register ();
5455 /* Advertise that the data calculated just above are now valid. */
5456 sparc_prologue_data_valid_p = true;
5459 /* Expand the function prologue. The prologue is responsible for reserving
5460 storage for the frame, saving the call-saved registers and loading the
5461 GOT register if needed. */
5463 void
5464 sparc_flat_expand_prologue (void)
5466 HOST_WIDE_INT size;
5467 rtx insn;
5469 sparc_leaf_function_p = optimize > 0 && crtl->is_leaf;
5471 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5473 if (flag_stack_usage_info)
5474 current_function_static_stack_size = size;
5476 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
5478 if (crtl->is_leaf && !cfun->calls_alloca)
5480 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
5481 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT,
5482 size - STACK_CHECK_PROTECT);
5484 else if (size > 0)
5485 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5488 if (sparc_save_local_in_regs_p)
5489 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
5490 SORR_SAVE);
5492 if (size == 0)
5493 ; /* do nothing. */
5494 else
5496 rtx size_int_rtx, size_rtx;
5498 size_rtx = size_int_rtx = GEN_INT (-size);
5500 /* We establish the frame (i.e. decrement the stack pointer) first, even
5501 if we use a frame pointer, because we cannot clobber any call-saved
5502 registers, including the frame pointer, if we haven't created a new
5503 register save area, for the sake of compatibility with the ABI. */
5504 if (size <= 4096)
5505 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5506 else if (size <= 8192 && !frame_pointer_needed)
5508 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5509 RTX_FRAME_RELATED_P (insn) = 1;
5510 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5512 else
5514 size_rtx = gen_rtx_REG (Pmode, 1);
5515 emit_move_insn (size_rtx, size_int_rtx);
5516 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5517 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5518 gen_stack_pointer_inc (size_int_rtx));
5520 RTX_FRAME_RELATED_P (insn) = 1;
5522 /* Ensure nothing is scheduled until after the frame is established. */
5523 emit_insn (gen_blockage ());
5525 if (frame_pointer_needed)
5527 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5528 gen_rtx_MINUS (Pmode,
5529 stack_pointer_rtx,
5530 size_rtx)));
5531 RTX_FRAME_RELATED_P (insn) = 1;
5533 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5534 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5535 plus_constant (Pmode, stack_pointer_rtx,
5536 size)));
5539 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5541 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
5542 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
5544 insn = emit_move_insn (i7, o7);
5545 RTX_FRAME_RELATED_P (insn) = 1;
5547 add_reg_note (insn, REG_CFA_REGISTER,
5548 gen_rtx_SET (VOIDmode, i7, o7));
5550 /* Prevent this instruction from ever being considered dead,
5551 even if this function has no epilogue. */
5552 emit_use (i7);
5556 if (frame_pointer_needed)
5558 sparc_frame_base_reg = hard_frame_pointer_rtx;
5559 sparc_frame_base_offset = SPARC_STACK_BIAS;
5561 else
5563 sparc_frame_base_reg = stack_pointer_rtx;
5564 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5567 if (sparc_n_global_fp_regs > 0)
5568 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5569 sparc_frame_base_offset
5570 - sparc_apparent_frame_size,
5571 SORR_SAVE);
5573 /* Load the GOT register if needed. */
5574 if (crtl->uses_pic_offset_table)
5575 load_got_register ();
5577 /* Advertise that the data calculated just above are now valid. */
5578 sparc_prologue_data_valid_p = true;
5581 /* This function generates the assembly code for function entry, which boils
5582 down to emitting the necessary .register directives. */
5584 static void
5585 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5587 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5588 if (!TARGET_FLAT)
5589 gcc_assert (sparc_leaf_function_p == crtl->uses_only_leaf_regs);
5591 sparc_output_scratch_registers (file);
5594 /* Expand the function epilogue, either normal or part of a sibcall.
5595 We emit all the instructions except the return or the call. */
5597 void
5598 sparc_expand_epilogue (bool for_eh)
5600 HOST_WIDE_INT size = sparc_frame_size;
5602 if (sparc_n_global_fp_regs > 0)
5603 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5604 sparc_frame_base_offset
5605 - sparc_apparent_frame_size,
5606 SORR_RESTORE);
5608 if (size == 0 || for_eh)
5609 ; /* do nothing. */
5610 else if (sparc_leaf_function_p)
5612 if (size <= 4096)
5613 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
5614 else if (size <= 8192)
5616 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
5617 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
5619 else
5621 rtx reg = gen_rtx_REG (Pmode, 1);
5622 emit_move_insn (reg, GEN_INT (size));
5623 emit_insn (gen_stack_pointer_inc (reg));
5628 /* Expand the function epilogue, either normal or part of a sibcall.
5629 We emit all the instructions except the return or the call. */
5631 void
5632 sparc_flat_expand_epilogue (bool for_eh)
5634 HOST_WIDE_INT size = sparc_frame_size;
5636 if (sparc_n_global_fp_regs > 0)
5637 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5638 sparc_frame_base_offset
5639 - sparc_apparent_frame_size,
5640 SORR_RESTORE);
5642 /* If we have a frame pointer, we'll need both to restore it before the
5643 frame is destroyed and use its current value in destroying the frame.
5644 Since we don't have an atomic way to do that in the flat window model,
5645 we save the current value into a temporary register (%g1). */
5646 if (frame_pointer_needed && !for_eh)
5647 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5649 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5650 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5651 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5653 if (sparc_save_local_in_regs_p)
5654 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5655 sparc_frame_base_offset,
5656 SORR_RESTORE);
5658 if (size == 0 || for_eh)
5659 ; /* do nothing. */
5660 else if (frame_pointer_needed)
5662 /* Make sure the frame is destroyed after everything else is done. */
5663 emit_insn (gen_blockage ());
5665 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5667 else
5669 /* Likewise. */
5670 emit_insn (gen_blockage ());
5672 if (size <= 4096)
5673 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
5674 else if (size <= 8192)
5676 emit_insn (gen_stack_pointer_inc (GEN_INT (4096)));
5677 emit_insn (gen_stack_pointer_inc (GEN_INT (size - 4096)));
5679 else
5681 rtx reg = gen_rtx_REG (Pmode, 1);
5682 emit_move_insn (reg, GEN_INT (size));
5683 emit_insn (gen_stack_pointer_inc (reg));
5688 /* Return true if it is appropriate to emit `return' instructions in the
5689 body of a function. */
5691 bool
5692 sparc_can_use_return_insn_p (void)
5694 return sparc_prologue_data_valid_p
5695 && sparc_n_global_fp_regs == 0
5696 && TARGET_FLAT
5697 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
5698 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
5701 /* This function generates the assembly code for function exit. */
5703 static void
5704 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5706 /* If the last two instructions of a function are "call foo; dslot;"
5707 the return address might point to the first instruction in the next
5708 function and we have to output a dummy nop for the sake of sane
5709 backtraces in such cases. This is pointless for sibling calls since
5710 the return address is explicitly adjusted. */
5712 rtx insn, last_real_insn;
5714 insn = get_last_insn ();
5716 last_real_insn = prev_real_insn (insn);
5717 if (last_real_insn
5718 && NONJUMP_INSN_P (last_real_insn)
5719 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5720 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5722 if (last_real_insn
5723 && CALL_P (last_real_insn)
5724 && !SIBLING_CALL_P (last_real_insn))
5725 fputs("\tnop\n", file);
5727 sparc_output_deferred_case_vectors ();
5730 /* Output a 'restore' instruction. */
5732 static void
5733 output_restore (rtx pat)
5735 rtx operands[3];
5737 if (! pat)
5739 fputs ("\t restore\n", asm_out_file);
5740 return;
5743 gcc_assert (GET_CODE (pat) == SET);
5745 operands[0] = SET_DEST (pat);
5746 pat = SET_SRC (pat);
5748 switch (GET_CODE (pat))
5750 case PLUS:
5751 operands[1] = XEXP (pat, 0);
5752 operands[2] = XEXP (pat, 1);
5753 output_asm_insn (" restore %r1, %2, %Y0", operands);
5754 break;
5755 case LO_SUM:
5756 operands[1] = XEXP (pat, 0);
5757 operands[2] = XEXP (pat, 1);
5758 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5759 break;
5760 case ASHIFT:
5761 operands[1] = XEXP (pat, 0);
5762 gcc_assert (XEXP (pat, 1) == const1_rtx);
5763 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5764 break;
5765 default:
5766 operands[1] = pat;
5767 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5768 break;
5772 /* Output a return. */
5774 const char *
5775 output_return (rtx insn)
5777 if (crtl->calls_eh_return)
5779 /* If the function uses __builtin_eh_return, the eh_return
5780 machinery occupies the delay slot. */
5781 gcc_assert (!final_sequence);
5783 if (flag_delayed_branch)
5785 if (!TARGET_FLAT && TARGET_V9)
5786 fputs ("\treturn\t%i7+8\n", asm_out_file);
5787 else
5789 if (!TARGET_FLAT)
5790 fputs ("\trestore\n", asm_out_file);
5792 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5795 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5797 else
5799 if (!TARGET_FLAT)
5800 fputs ("\trestore\n", asm_out_file);
5802 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5803 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5806 else if (sparc_leaf_function_p || TARGET_FLAT)
5808 /* This is a leaf or flat function so we don't have to bother restoring
5809 the register window, which frees us from dealing with the convoluted
5810 semantics of restore/return. We simply output the jump to the
5811 return address and the insn in the delay slot (if any). */
5813 return "jmp\t%%o7+%)%#";
5815 else
5817 /* This is a regular function so we have to restore the register window.
5818 We may have a pending insn for the delay slot, which will be either
5819 combined with the 'restore' instruction or put in the delay slot of
5820 the 'return' instruction. */
5822 if (final_sequence)
5824 rtx delay, pat;
5826 delay = NEXT_INSN (insn);
5827 gcc_assert (delay);
5829 pat = PATTERN (delay);
5831 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5833 epilogue_renumber (&pat, 0);
5834 return "return\t%%i7+%)%#";
5836 else
5838 output_asm_insn ("jmp\t%%i7+%)", NULL);
5839 output_restore (pat);
5840 PATTERN (delay) = gen_blockage ();
5841 INSN_CODE (delay) = -1;
5844 else
5846 /* The delay slot is empty. */
5847 if (TARGET_V9)
5848 return "return\t%%i7+%)\n\t nop";
5849 else if (flag_delayed_branch)
5850 return "jmp\t%%i7+%)\n\t restore";
5851 else
5852 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5856 return "";
5859 /* Output a sibling call. */
5861 const char *
5862 output_sibcall (rtx insn, rtx call_operand)
5864 rtx operands[1];
5866 gcc_assert (flag_delayed_branch);
5868 operands[0] = call_operand;
5870 if (sparc_leaf_function_p || TARGET_FLAT)
5872 /* This is a leaf or flat function so we don't have to bother restoring
5873 the register window. We simply output the jump to the function and
5874 the insn in the delay slot (if any). */
5876 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5878 if (final_sequence)
5879 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5880 operands);
5881 else
5882 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5883 it into branch if possible. */
5884 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5885 operands);
5887 else
5889 /* This is a regular function so we have to restore the register window.
5890 We may have a pending insn for the delay slot, which will be combined
5891 with the 'restore' instruction. */
5893 output_asm_insn ("call\t%a0, 0", operands);
5895 if (final_sequence)
5897 rtx delay = NEXT_INSN (insn);
5898 gcc_assert (delay);
5900 output_restore (PATTERN (delay));
5902 PATTERN (delay) = gen_blockage ();
5903 INSN_CODE (delay) = -1;
5905 else
5906 output_restore (NULL_RTX);
5909 return "";
5912 /* Functions for handling argument passing.
5914 For 32-bit, the first 6 args are normally in registers and the rest are
5915 pushed. Any arg that starts within the first 6 words is at least
5916 partially passed in a register unless its data type forbids.
5918 For 64-bit, the argument registers are laid out as an array of 16 elements
5919 and arguments are added sequentially. The first 6 int args and up to the
5920 first 16 fp args (depending on size) are passed in regs.
5922 Slot Stack Integral Float Float in structure Double Long Double
5923 ---- ----- -------- ----- ------------------ ------ -----------
5924 15 [SP+248] %f31 %f30,%f31 %d30
5925 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5926 13 [SP+232] %f27 %f26,%f27 %d26
5927 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5928 11 [SP+216] %f23 %f22,%f23 %d22
5929 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5930 9 [SP+200] %f19 %f18,%f19 %d18
5931 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5932 7 [SP+184] %f15 %f14,%f15 %d14
5933 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5934 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5935 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5936 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5937 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5938 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5939 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5941 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5943 Integral arguments are always passed as 64-bit quantities appropriately
5944 extended.
5946 Passing of floating point values is handled as follows.
5947 If a prototype is in scope:
5948 If the value is in a named argument (i.e. not a stdarg function or a
5949 value not part of the `...') then the value is passed in the appropriate
5950 fp reg.
5951 If the value is part of the `...' and is passed in one of the first 6
5952 slots then the value is passed in the appropriate int reg.
5953 If the value is part of the `...' and is not passed in one of the first 6
5954 slots then the value is passed in memory.
5955 If a prototype is not in scope:
5956 If the value is one of the first 6 arguments the value is passed in the
5957 appropriate integer reg and the appropriate fp reg.
5958 If the value is not one of the first 6 arguments the value is passed in
5959 the appropriate fp reg and in memory.
5962 Summary of the calling conventions implemented by GCC on the SPARC:
5964 32-bit ABI:
5965 size argument return value
5967 small integer <4 int. reg. int. reg.
5968 word 4 int. reg. int. reg.
5969 double word 8 int. reg. int. reg.
5971 _Complex small integer <8 int. reg. int. reg.
5972 _Complex word 8 int. reg. int. reg.
5973 _Complex double word 16 memory int. reg.
5975 vector integer <=8 int. reg. FP reg.
5976 vector integer >8 memory memory
5978 float 4 int. reg. FP reg.
5979 double 8 int. reg. FP reg.
5980 long double 16 memory memory
5982 _Complex float 8 memory FP reg.
5983 _Complex double 16 memory FP reg.
5984 _Complex long double 32 memory FP reg.
5986 vector float any memory memory
5988 aggregate any memory memory
5992 64-bit ABI:
5993 size argument return value
5995 small integer <8 int. reg. int. reg.
5996 word 8 int. reg. int. reg.
5997 double word 16 int. reg. int. reg.
5999 _Complex small integer <16 int. reg. int. reg.
6000 _Complex word 16 int. reg. int. reg.
6001 _Complex double word 32 memory int. reg.
6003 vector integer <=16 FP reg. FP reg.
6004 vector integer 16<s<=32 memory FP reg.
6005 vector integer >32 memory memory
6007 float 4 FP reg. FP reg.
6008 double 8 FP reg. FP reg.
6009 long double 16 FP reg. FP reg.
6011 _Complex float 8 FP reg. FP reg.
6012 _Complex double 16 FP reg. FP reg.
6013 _Complex long double 32 memory FP reg.
6015 vector float <=16 FP reg. FP reg.
6016 vector float 16<s<=32 memory FP reg.
6017 vector float >32 memory memory
6019 aggregate <=16 reg. reg.
6020 aggregate 16<s<=32 memory reg.
6021 aggregate >32 memory memory
6025 Note #1: complex floating-point types follow the extended SPARC ABIs as
6026 implemented by the Sun compiler.
6028 Note #2: integral vector types follow the scalar floating-point types
6029 conventions to match what is implemented by the Sun VIS SDK.
6031 Note #3: floating-point vector types follow the aggregate types
6032 conventions. */
6035 /* Maximum number of int regs for args. */
6036 #define SPARC_INT_ARG_MAX 6
6037 /* Maximum number of fp regs for args. */
6038 #define SPARC_FP_ARG_MAX 16
6040 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
6042 /* Handle the INIT_CUMULATIVE_ARGS macro.
6043 Initialize a variable CUM of type CUMULATIVE_ARGS
6044 for a call to a function whose data type is FNTYPE.
6045 For a library call, FNTYPE is 0. */
6047 void
6048 init_cumulative_args (struct sparc_args *cum, tree fntype,
6049 rtx libname ATTRIBUTE_UNUSED,
6050 tree fndecl ATTRIBUTE_UNUSED)
6052 cum->words = 0;
6053 cum->prototype_p = fntype && prototype_p (fntype);
6054 cum->libcall_p = fntype == 0;
6057 /* Handle promotion of pointer and integer arguments. */
6059 static enum machine_mode
6060 sparc_promote_function_mode (const_tree type,
6061 enum machine_mode mode,
6062 int *punsignedp,
6063 const_tree fntype ATTRIBUTE_UNUSED,
6064 int for_return ATTRIBUTE_UNUSED)
6066 if (type != NULL_TREE && POINTER_TYPE_P (type))
6068 *punsignedp = POINTERS_EXTEND_UNSIGNED;
6069 return Pmode;
6072 /* Integral arguments are passed as full words, as per the ABI. */
6073 if (GET_MODE_CLASS (mode) == MODE_INT
6074 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6075 return word_mode;
6077 return mode;
6080 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
6082 static bool
6083 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
6085 return TARGET_ARCH64 ? true : false;
6088 /* Scan the record type TYPE and return the following predicates:
6089 - INTREGS_P: the record contains at least one field or sub-field
6090 that is eligible for promotion in integer registers.
6091 - FP_REGS_P: the record contains at least one field or sub-field
6092 that is eligible for promotion in floating-point registers.
6093 - PACKED_P: the record contains at least one field that is packed.
6095 Sub-fields are not taken into account for the PACKED_P predicate. */
6097 static void
6098 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
6099 int *packed_p)
6101 tree field;
6103 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6105 if (TREE_CODE (field) == FIELD_DECL)
6107 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
6108 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
6109 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
6110 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6111 && TARGET_FPU)
6112 *fpregs_p = 1;
6113 else
6114 *intregs_p = 1;
6116 if (packed_p && DECL_PACKED (field))
6117 *packed_p = 1;
6122 /* Compute the slot number to pass an argument in.
6123 Return the slot number or -1 if passing on the stack.
6125 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6126 the preceding args and about the function being called.
6127 MODE is the argument's machine mode.
6128 TYPE is the data type of the argument (as a tree).
6129 This is null for libcalls where that information may
6130 not be available.
6131 NAMED is nonzero if this argument is a named parameter
6132 (otherwise it is an extra parameter matching an ellipsis).
6133 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
6134 *PREGNO records the register number to use if scalar type.
6135 *PPADDING records the amount of padding needed in words. */
6137 static int
6138 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
6139 const_tree type, bool named, bool incoming_p,
6140 int *pregno, int *ppadding)
6142 int regbase = (incoming_p
6143 ? SPARC_INCOMING_INT_ARG_FIRST
6144 : SPARC_OUTGOING_INT_ARG_FIRST);
6145 int slotno = cum->words;
6146 enum mode_class mclass;
6147 int regno;
6149 *ppadding = 0;
6151 if (type && TREE_ADDRESSABLE (type))
6152 return -1;
6154 if (TARGET_ARCH32
6155 && mode == BLKmode
6156 && type
6157 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
6158 return -1;
6160 /* For SPARC64, objects requiring 16-byte alignment get it. */
6161 if (TARGET_ARCH64
6162 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
6163 && (slotno & 1) != 0)
6164 slotno++, *ppadding = 1;
6166 mclass = GET_MODE_CLASS (mode);
6167 if (type && TREE_CODE (type) == VECTOR_TYPE)
6169 /* Vector types deserve special treatment because they are
6170 polymorphic wrt their mode, depending upon whether VIS
6171 instructions are enabled. */
6172 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
6174 /* The SPARC port defines no floating-point vector modes. */
6175 gcc_assert (mode == BLKmode);
6177 else
6179 /* Integral vector types should either have a vector
6180 mode or an integral mode, because we are guaranteed
6181 by pass_by_reference that their size is not greater
6182 than 16 bytes and TImode is 16-byte wide. */
6183 gcc_assert (mode != BLKmode);
6185 /* Vector integers are handled like floats according to
6186 the Sun VIS SDK. */
6187 mclass = MODE_FLOAT;
6191 switch (mclass)
6193 case MODE_FLOAT:
6194 case MODE_COMPLEX_FLOAT:
6195 case MODE_VECTOR_INT:
6196 if (TARGET_ARCH64 && TARGET_FPU && named)
6198 if (slotno >= SPARC_FP_ARG_MAX)
6199 return -1;
6200 regno = SPARC_FP_ARG_FIRST + slotno * 2;
6201 /* Arguments filling only one single FP register are
6202 right-justified in the outer double FP register. */
6203 if (GET_MODE_SIZE (mode) <= 4)
6204 regno++;
6205 break;
6207 /* fallthrough */
6209 case MODE_INT:
6210 case MODE_COMPLEX_INT:
6211 if (slotno >= SPARC_INT_ARG_MAX)
6212 return -1;
6213 regno = regbase + slotno;
6214 break;
6216 case MODE_RANDOM:
6217 if (mode == VOIDmode)
6218 /* MODE is VOIDmode when generating the actual call. */
6219 return -1;
6221 gcc_assert (mode == BLKmode);
6223 if (TARGET_ARCH32
6224 || !type
6225 || (TREE_CODE (type) != VECTOR_TYPE
6226 && TREE_CODE (type) != RECORD_TYPE))
6228 if (slotno >= SPARC_INT_ARG_MAX)
6229 return -1;
6230 regno = regbase + slotno;
6232 else /* TARGET_ARCH64 && type */
6234 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
6236 /* First see what kinds of registers we would need. */
6237 if (TREE_CODE (type) == VECTOR_TYPE)
6238 fpregs_p = 1;
6239 else
6240 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
6242 /* The ABI obviously doesn't specify how packed structures
6243 are passed. These are defined to be passed in int regs
6244 if possible, otherwise memory. */
6245 if (packed_p || !named)
6246 fpregs_p = 0, intregs_p = 1;
6248 /* If all arg slots are filled, then must pass on stack. */
6249 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
6250 return -1;
6252 /* If there are only int args and all int arg slots are filled,
6253 then must pass on stack. */
6254 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
6255 return -1;
6257 /* Note that even if all int arg slots are filled, fp members may
6258 still be passed in regs if such regs are available.
6259 *PREGNO isn't set because there may be more than one, it's up
6260 to the caller to compute them. */
6261 return slotno;
6263 break;
6265 default :
6266 gcc_unreachable ();
6269 *pregno = regno;
6270 return slotno;
6273 /* Handle recursive register counting for structure field layout. */
6275 struct function_arg_record_value_parms
6277 rtx ret; /* return expression being built. */
6278 int slotno; /* slot number of the argument. */
6279 int named; /* whether the argument is named. */
6280 int regbase; /* regno of the base register. */
6281 int stack; /* 1 if part of the argument is on the stack. */
6282 int intoffset; /* offset of the first pending integer field. */
6283 unsigned int nregs; /* number of words passed in registers. */
6286 static void function_arg_record_value_3
6287 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
6288 static void function_arg_record_value_2
6289 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
6290 static void function_arg_record_value_1
6291 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
6292 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
6293 static rtx function_arg_union_value (int, enum machine_mode, int, int);
6295 /* A subroutine of function_arg_record_value. Traverse the structure
6296 recursively and determine how many registers will be required. */
6298 static void
6299 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
6300 struct function_arg_record_value_parms *parms,
6301 bool packed_p)
6303 tree field;
6305 /* We need to compute how many registers are needed so we can
6306 allocate the PARALLEL but before we can do that we need to know
6307 whether there are any packed fields. The ABI obviously doesn't
6308 specify how structures are passed in this case, so they are
6309 defined to be passed in int regs if possible, otherwise memory,
6310 regardless of whether there are fp values present. */
6312 if (! packed_p)
6313 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6315 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6317 packed_p = true;
6318 break;
6322 /* Compute how many registers we need. */
6323 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6325 if (TREE_CODE (field) == FIELD_DECL)
6327 HOST_WIDE_INT bitpos = startbitpos;
6329 if (DECL_SIZE (field) != 0)
6331 if (integer_zerop (DECL_SIZE (field)))
6332 continue;
6334 if (host_integerp (bit_position (field), 1))
6335 bitpos += int_bit_position (field);
6338 /* ??? FIXME: else assume zero offset. */
6340 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
6341 function_arg_record_value_1 (TREE_TYPE (field),
6342 bitpos,
6343 parms,
6344 packed_p);
6345 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
6346 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6347 && TARGET_FPU
6348 && parms->named
6349 && ! packed_p)
6351 if (parms->intoffset != -1)
6353 unsigned int startbit, endbit;
6354 int intslots, this_slotno;
6356 startbit = parms->intoffset & -BITS_PER_WORD;
6357 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6359 intslots = (endbit - startbit) / BITS_PER_WORD;
6360 this_slotno = parms->slotno + parms->intoffset
6361 / BITS_PER_WORD;
6363 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
6365 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
6366 /* We need to pass this field on the stack. */
6367 parms->stack = 1;
6370 parms->nregs += intslots;
6371 parms->intoffset = -1;
6374 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
6375 If it wasn't true we wouldn't be here. */
6376 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6377 && DECL_MODE (field) == BLKmode)
6378 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6379 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6380 parms->nregs += 2;
6381 else
6382 parms->nregs += 1;
6384 else
6386 if (parms->intoffset == -1)
6387 parms->intoffset = bitpos;
6393 /* A subroutine of function_arg_record_value. Assign the bits of the
6394 structure between parms->intoffset and bitpos to integer registers. */
6396 static void
6397 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
6398 struct function_arg_record_value_parms *parms)
6400 enum machine_mode mode;
6401 unsigned int regno;
6402 unsigned int startbit, endbit;
6403 int this_slotno, intslots, intoffset;
6404 rtx reg;
6406 if (parms->intoffset == -1)
6407 return;
6409 intoffset = parms->intoffset;
6410 parms->intoffset = -1;
6412 startbit = intoffset & -BITS_PER_WORD;
6413 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6414 intslots = (endbit - startbit) / BITS_PER_WORD;
6415 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
6417 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
6418 if (intslots <= 0)
6419 return;
6421 /* If this is the trailing part of a word, only load that much into
6422 the register. Otherwise load the whole register. Note that in
6423 the latter case we may pick up unwanted bits. It's not a problem
6424 at the moment but may wish to revisit. */
6426 if (intoffset % BITS_PER_WORD != 0)
6427 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
6428 MODE_INT);
6429 else
6430 mode = word_mode;
6432 intoffset /= BITS_PER_UNIT;
6435 regno = parms->regbase + this_slotno;
6436 reg = gen_rtx_REG (mode, regno);
6437 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6438 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
6440 this_slotno += 1;
6441 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
6442 mode = word_mode;
6443 parms->nregs += 1;
6444 intslots -= 1;
6446 while (intslots > 0);
6449 /* A subroutine of function_arg_record_value. Traverse the structure
6450 recursively and assign bits to floating point registers. Track which
6451 bits in between need integer registers; invoke function_arg_record_value_3
6452 to make that happen. */
6454 static void
6455 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
6456 struct function_arg_record_value_parms *parms,
6457 bool packed_p)
6459 tree field;
6461 if (! packed_p)
6462 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6464 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6466 packed_p = true;
6467 break;
6471 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6473 if (TREE_CODE (field) == FIELD_DECL)
6475 HOST_WIDE_INT bitpos = startbitpos;
6477 if (DECL_SIZE (field) != 0)
6479 if (integer_zerop (DECL_SIZE (field)))
6480 continue;
6482 if (host_integerp (bit_position (field), 1))
6483 bitpos += int_bit_position (field);
6486 /* ??? FIXME: else assume zero offset. */
6488 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
6489 function_arg_record_value_2 (TREE_TYPE (field),
6490 bitpos,
6491 parms,
6492 packed_p);
6493 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
6494 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6495 && TARGET_FPU
6496 && parms->named
6497 && ! packed_p)
6499 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
6500 int regno, nregs, pos;
6501 enum machine_mode mode = DECL_MODE (field);
6502 rtx reg;
6504 function_arg_record_value_3 (bitpos, parms);
6506 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6507 && mode == BLKmode)
6509 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6510 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6512 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6514 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6515 nregs = 2;
6517 else
6518 nregs = 1;
6520 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
6521 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
6522 regno++;
6523 reg = gen_rtx_REG (mode, regno);
6524 pos = bitpos / BITS_PER_UNIT;
6525 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6526 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6527 parms->nregs += 1;
6528 while (--nregs > 0)
6530 regno += GET_MODE_SIZE (mode) / 4;
6531 reg = gen_rtx_REG (mode, regno);
6532 pos += GET_MODE_SIZE (mode);
6533 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6534 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6535 parms->nregs += 1;
6538 else
6540 if (parms->intoffset == -1)
6541 parms->intoffset = bitpos;
6547 /* Used by function_arg and sparc_function_value_1 to implement the complex
6548 conventions of the 64-bit ABI for passing and returning structures.
6549 Return an expression valid as a return value for the FUNCTION_ARG
6550 and TARGET_FUNCTION_VALUE.
6552 TYPE is the data type of the argument (as a tree).
6553 This is null for libcalls where that information may
6554 not be available.
6555 MODE is the argument's machine mode.
6556 SLOTNO is the index number of the argument's slot in the parameter array.
6557 NAMED is nonzero if this argument is a named parameter
6558 (otherwise it is an extra parameter matching an ellipsis).
6559 REGBASE is the regno of the base register for the parameter array. */
6561 static rtx
6562 function_arg_record_value (const_tree type, enum machine_mode mode,
6563 int slotno, int named, int regbase)
6565 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6566 struct function_arg_record_value_parms parms;
6567 unsigned int nregs;
6569 parms.ret = NULL_RTX;
6570 parms.slotno = slotno;
6571 parms.named = named;
6572 parms.regbase = regbase;
6573 parms.stack = 0;
6575 /* Compute how many registers we need. */
6576 parms.nregs = 0;
6577 parms.intoffset = 0;
6578 function_arg_record_value_1 (type, 0, &parms, false);
6580 /* Take into account pending integer fields. */
6581 if (parms.intoffset != -1)
6583 unsigned int startbit, endbit;
6584 int intslots, this_slotno;
6586 startbit = parms.intoffset & -BITS_PER_WORD;
6587 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6588 intslots = (endbit - startbit) / BITS_PER_WORD;
6589 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
6591 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
6593 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
6594 /* We need to pass this field on the stack. */
6595 parms.stack = 1;
6598 parms.nregs += intslots;
6600 nregs = parms.nregs;
6602 /* Allocate the vector and handle some annoying special cases. */
6603 if (nregs == 0)
6605 /* ??? Empty structure has no value? Duh? */
6606 if (typesize <= 0)
6608 /* Though there's nothing really to store, return a word register
6609 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6610 leads to breakage due to the fact that there are zero bytes to
6611 load. */
6612 return gen_rtx_REG (mode, regbase);
6614 else
6616 /* ??? C++ has structures with no fields, and yet a size. Give up
6617 for now and pass everything back in integer registers. */
6618 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6620 if (nregs + slotno > SPARC_INT_ARG_MAX)
6621 nregs = SPARC_INT_ARG_MAX - slotno;
6623 gcc_assert (nregs != 0);
6625 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
6627 /* If at least one field must be passed on the stack, generate
6628 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6629 also be passed on the stack. We can't do much better because the
6630 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6631 of structures for which the fields passed exclusively in registers
6632 are not at the beginning of the structure. */
6633 if (parms.stack)
6634 XVECEXP (parms.ret, 0, 0)
6635 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6637 /* Fill in the entries. */
6638 parms.nregs = 0;
6639 parms.intoffset = 0;
6640 function_arg_record_value_2 (type, 0, &parms, false);
6641 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
6643 gcc_assert (parms.nregs == nregs);
6645 return parms.ret;
6648 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6649 of the 64-bit ABI for passing and returning unions.
6650 Return an expression valid as a return value for the FUNCTION_ARG
6651 and TARGET_FUNCTION_VALUE.
6653 SIZE is the size in bytes of the union.
6654 MODE is the argument's machine mode.
6655 REGNO is the hard register the union will be passed in. */
6657 static rtx
6658 function_arg_union_value (int size, enum machine_mode mode, int slotno,
6659 int regno)
6661 int nwords = ROUND_ADVANCE (size), i;
6662 rtx regs;
6664 /* See comment in previous function for empty structures. */
6665 if (nwords == 0)
6666 return gen_rtx_REG (mode, regno);
6668 if (slotno == SPARC_INT_ARG_MAX - 1)
6669 nwords = 1;
6671 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6673 for (i = 0; i < nwords; i++)
6675 /* Unions are passed left-justified. */
6676 XVECEXP (regs, 0, i)
6677 = gen_rtx_EXPR_LIST (VOIDmode,
6678 gen_rtx_REG (word_mode, regno),
6679 GEN_INT (UNITS_PER_WORD * i));
6680 regno++;
6683 return regs;
6686 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6687 for passing and returning large (BLKmode) vectors.
6688 Return an expression valid as a return value for the FUNCTION_ARG
6689 and TARGET_FUNCTION_VALUE.
6691 SIZE is the size in bytes of the vector (at least 8 bytes).
6692 REGNO is the FP hard register the vector will be passed in. */
6694 static rtx
6695 function_arg_vector_value (int size, int regno)
6697 int i, nregs = size / 8;
6698 rtx regs;
6700 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6702 for (i = 0; i < nregs; i++)
6704 XVECEXP (regs, 0, i)
6705 = gen_rtx_EXPR_LIST (VOIDmode,
6706 gen_rtx_REG (DImode, regno + 2*i),
6707 GEN_INT (i*8));
6710 return regs;
6713 /* Determine where to put an argument to a function.
6714 Value is zero to push the argument on the stack,
6715 or a hard register in which to store the argument.
6717 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6718 the preceding args and about the function being called.
6719 MODE is the argument's machine mode.
6720 TYPE is the data type of the argument (as a tree).
6721 This is null for libcalls where that information may
6722 not be available.
6723 NAMED is true if this argument is a named parameter
6724 (otherwise it is an extra parameter matching an ellipsis).
6725 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6726 TARGET_FUNCTION_INCOMING_ARG. */
6728 static rtx
6729 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6730 const_tree type, bool named, bool incoming_p)
6732 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6734 int regbase = (incoming_p
6735 ? SPARC_INCOMING_INT_ARG_FIRST
6736 : SPARC_OUTGOING_INT_ARG_FIRST);
6737 int slotno, regno, padding;
6738 enum mode_class mclass = GET_MODE_CLASS (mode);
6740 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6741 &regno, &padding);
6742 if (slotno == -1)
6743 return 0;
6745 /* Vector types deserve special treatment because they are polymorphic wrt
6746 their mode, depending upon whether VIS instructions are enabled. */
6747 if (type && TREE_CODE (type) == VECTOR_TYPE)
6749 HOST_WIDE_INT size = int_size_in_bytes (type);
6750 gcc_assert ((TARGET_ARCH32 && size <= 8)
6751 || (TARGET_ARCH64 && size <= 16));
6753 if (mode == BLKmode)
6754 return function_arg_vector_value (size,
6755 SPARC_FP_ARG_FIRST + 2*slotno);
6756 else
6757 mclass = MODE_FLOAT;
6760 if (TARGET_ARCH32)
6761 return gen_rtx_REG (mode, regno);
6763 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6764 and are promoted to registers if possible. */
6765 if (type && TREE_CODE (type) == RECORD_TYPE)
6767 HOST_WIDE_INT size = int_size_in_bytes (type);
6768 gcc_assert (size <= 16);
6770 return function_arg_record_value (type, mode, slotno, named, regbase);
6773 /* Unions up to 16 bytes in size are passed in integer registers. */
6774 else if (type && TREE_CODE (type) == UNION_TYPE)
6776 HOST_WIDE_INT size = int_size_in_bytes (type);
6777 gcc_assert (size <= 16);
6779 return function_arg_union_value (size, mode, slotno, regno);
6782 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6783 but also have the slot allocated for them.
6784 If no prototype is in scope fp values in register slots get passed
6785 in two places, either fp regs and int regs or fp regs and memory. */
6786 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6787 && SPARC_FP_REG_P (regno))
6789 rtx reg = gen_rtx_REG (mode, regno);
6790 if (cum->prototype_p || cum->libcall_p)
6792 /* "* 2" because fp reg numbers are recorded in 4 byte
6793 quantities. */
6794 #if 0
6795 /* ??? This will cause the value to be passed in the fp reg and
6796 in the stack. When a prototype exists we want to pass the
6797 value in the reg but reserve space on the stack. That's an
6798 optimization, and is deferred [for a bit]. */
6799 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6800 return gen_rtx_PARALLEL (mode,
6801 gen_rtvec (2,
6802 gen_rtx_EXPR_LIST (VOIDmode,
6803 NULL_RTX, const0_rtx),
6804 gen_rtx_EXPR_LIST (VOIDmode,
6805 reg, const0_rtx)));
6806 else
6807 #else
6808 /* ??? It seems that passing back a register even when past
6809 the area declared by REG_PARM_STACK_SPACE will allocate
6810 space appropriately, and will not copy the data onto the
6811 stack, exactly as we desire.
6813 This is due to locate_and_pad_parm being called in
6814 expand_call whenever reg_parm_stack_space > 0, which
6815 while beneficial to our example here, would seem to be
6816 in error from what had been intended. Ho hum... -- r~ */
6817 #endif
6818 return reg;
6820 else
6822 rtx v0, v1;
6824 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6826 int intreg;
6828 /* On incoming, we don't need to know that the value
6829 is passed in %f0 and %i0, and it confuses other parts
6830 causing needless spillage even on the simplest cases. */
6831 if (incoming_p)
6832 return reg;
6834 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6835 + (regno - SPARC_FP_ARG_FIRST) / 2);
6837 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6838 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6839 const0_rtx);
6840 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6842 else
6844 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6845 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6846 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6851 /* All other aggregate types are passed in an integer register in a mode
6852 corresponding to the size of the type. */
6853 else if (type && AGGREGATE_TYPE_P (type))
6855 HOST_WIDE_INT size = int_size_in_bytes (type);
6856 gcc_assert (size <= 16);
6858 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6861 return gen_rtx_REG (mode, regno);
6864 /* Handle the TARGET_FUNCTION_ARG target hook. */
6866 static rtx
6867 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6868 const_tree type, bool named)
6870 return sparc_function_arg_1 (cum, mode, type, named, false);
6873 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6875 static rtx
6876 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6877 const_tree type, bool named)
6879 return sparc_function_arg_1 (cum, mode, type, named, true);
6882 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6884 static unsigned int
6885 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6887 return ((TARGET_ARCH64
6888 && (GET_MODE_ALIGNMENT (mode) == 128
6889 || (type && TYPE_ALIGN (type) == 128)))
6890 ? 128
6891 : PARM_BOUNDARY);
6894 /* For an arg passed partly in registers and partly in memory,
6895 this is the number of bytes of registers used.
6896 For args passed entirely in registers or entirely in memory, zero.
6898 Any arg that starts in the first 6 regs but won't entirely fit in them
6899 needs partial registers on v8. On v9, structures with integer
6900 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6901 values that begin in the last fp reg [where "last fp reg" varies with the
6902 mode] will be split between that reg and memory. */
6904 static int
6905 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6906 tree type, bool named)
6908 int slotno, regno, padding;
6910 /* We pass false for incoming_p here, it doesn't matter. */
6911 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6912 false, &regno, &padding);
6914 if (slotno == -1)
6915 return 0;
6917 if (TARGET_ARCH32)
6919 if ((slotno + (mode == BLKmode
6920 ? ROUND_ADVANCE (int_size_in_bytes (type))
6921 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6922 > SPARC_INT_ARG_MAX)
6923 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6925 else
6927 /* We are guaranteed by pass_by_reference that the size of the
6928 argument is not greater than 16 bytes, so we only need to return
6929 one word if the argument is partially passed in registers. */
6931 if (type && AGGREGATE_TYPE_P (type))
6933 int size = int_size_in_bytes (type);
6935 if (size > UNITS_PER_WORD
6936 && slotno == SPARC_INT_ARG_MAX - 1)
6937 return UNITS_PER_WORD;
6939 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6940 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6941 && ! (TARGET_FPU && named)))
6943 /* The complex types are passed as packed types. */
6944 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6945 && slotno == SPARC_INT_ARG_MAX - 1)
6946 return UNITS_PER_WORD;
6948 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6950 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6951 > SPARC_FP_ARG_MAX)
6952 return UNITS_PER_WORD;
6956 return 0;
6959 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6960 Specify whether to pass the argument by reference. */
6962 static bool
6963 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6964 enum machine_mode mode, const_tree type,
6965 bool named ATTRIBUTE_UNUSED)
6967 if (TARGET_ARCH32)
6968 /* Original SPARC 32-bit ABI says that structures and unions,
6969 and quad-precision floats are passed by reference. For Pascal,
6970 also pass arrays by reference. All other base types are passed
6971 in registers.
6973 Extended ABI (as implemented by the Sun compiler) says that all
6974 complex floats are passed by reference. Pass complex integers
6975 in registers up to 8 bytes. More generally, enforce the 2-word
6976 cap for passing arguments in registers.
6978 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6979 integers are passed like floats of the same size, that is in
6980 registers up to 8 bytes. Pass all vector floats by reference
6981 like structure and unions. */
6982 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6983 || mode == SCmode
6984 /* Catch CDImode, TFmode, DCmode and TCmode. */
6985 || GET_MODE_SIZE (mode) > 8
6986 || (type
6987 && TREE_CODE (type) == VECTOR_TYPE
6988 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6989 else
6990 /* Original SPARC 64-bit ABI says that structures and unions
6991 smaller than 16 bytes are passed in registers, as well as
6992 all other base types.
6994 Extended ABI (as implemented by the Sun compiler) says that
6995 complex floats are passed in registers up to 16 bytes. Pass
6996 all complex integers in registers up to 16 bytes. More generally,
6997 enforce the 2-word cap for passing arguments in registers.
6999 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7000 integers are passed like floats of the same size, that is in
7001 registers (up to 16 bytes). Pass all vector floats like structure
7002 and unions. */
7003 return ((type
7004 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
7005 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
7006 /* Catch CTImode and TCmode. */
7007 || GET_MODE_SIZE (mode) > 16);
7010 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
7011 Update the data in CUM to advance over an argument
7012 of mode MODE and data type TYPE.
7013 TYPE is null for libcalls where that information may not be available. */
7015 static void
7016 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
7017 const_tree type, bool named)
7019 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
7020 int regno, padding;
7022 /* We pass false for incoming_p here, it doesn't matter. */
7023 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
7025 /* If argument requires leading padding, add it. */
7026 cum->words += padding;
7028 if (TARGET_ARCH32)
7030 cum->words += (mode != BLKmode
7031 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7032 : ROUND_ADVANCE (int_size_in_bytes (type)));
7034 else
7036 if (type && AGGREGATE_TYPE_P (type))
7038 int size = int_size_in_bytes (type);
7040 if (size <= 8)
7041 ++cum->words;
7042 else if (size <= 16)
7043 cum->words += 2;
7044 else /* passed by reference */
7045 ++cum->words;
7047 else
7049 cum->words += (mode != BLKmode
7050 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7051 : ROUND_ADVANCE (int_size_in_bytes (type)));
7056 /* Handle the FUNCTION_ARG_PADDING macro.
7057 For the 64 bit ABI structs are always stored left shifted in their
7058 argument slot. */
7060 enum direction
7061 function_arg_padding (enum machine_mode mode, const_tree type)
7063 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
7064 return upward;
7066 /* Fall back to the default. */
7067 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
7070 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
7071 Specify whether to return the return value in memory. */
7073 static bool
7074 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
7076 if (TARGET_ARCH32)
7077 /* Original SPARC 32-bit ABI says that structures and unions,
7078 and quad-precision floats are returned in memory. All other
7079 base types are returned in registers.
7081 Extended ABI (as implemented by the Sun compiler) says that
7082 all complex floats are returned in registers (8 FP registers
7083 at most for '_Complex long double'). Return all complex integers
7084 in registers (4 at most for '_Complex long long').
7086 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7087 integers are returned like floats of the same size, that is in
7088 registers up to 8 bytes and in memory otherwise. Return all
7089 vector floats in memory like structure and unions; note that
7090 they always have BLKmode like the latter. */
7091 return (TYPE_MODE (type) == BLKmode
7092 || TYPE_MODE (type) == TFmode
7093 || (TREE_CODE (type) == VECTOR_TYPE
7094 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
7095 else
7096 /* Original SPARC 64-bit ABI says that structures and unions
7097 smaller than 32 bytes are returned in registers, as well as
7098 all other base types.
7100 Extended ABI (as implemented by the Sun compiler) says that all
7101 complex floats are returned in registers (8 FP registers at most
7102 for '_Complex long double'). Return all complex integers in
7103 registers (4 at most for '_Complex TItype').
7105 Vector ABI (as implemented by the Sun VIS SDK) says that vector
7106 integers are returned like floats of the same size, that is in
7107 registers. Return all vector floats like structure and unions;
7108 note that they always have BLKmode like the latter. */
7109 return (TYPE_MODE (type) == BLKmode
7110 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
7113 /* Handle the TARGET_STRUCT_VALUE target hook.
7114 Return where to find the structure return value address. */
7116 static rtx
7117 sparc_struct_value_rtx (tree fndecl, int incoming)
7119 if (TARGET_ARCH64)
7120 return 0;
7121 else
7123 rtx mem;
7125 if (incoming)
7126 mem = gen_frame_mem (Pmode, plus_constant (Pmode, frame_pointer_rtx,
7127 STRUCT_VALUE_OFFSET));
7128 else
7129 mem = gen_frame_mem (Pmode, plus_constant (Pmode, stack_pointer_rtx,
7130 STRUCT_VALUE_OFFSET));
7132 /* Only follow the SPARC ABI for fixed-size structure returns.
7133 Variable size structure returns are handled per the normal
7134 procedures in GCC. This is enabled by -mstd-struct-return */
7135 if (incoming == 2
7136 && sparc_std_struct_return
7137 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
7138 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
7140 /* We must check and adjust the return address, as it is
7141 optional as to whether the return object is really
7142 provided. */
7143 rtx ret_reg = gen_rtx_REG (Pmode, 31);
7144 rtx scratch = gen_reg_rtx (SImode);
7145 rtx endlab = gen_label_rtx ();
7147 /* Calculate the return object size */
7148 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
7149 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
7150 /* Construct a temporary return value */
7151 rtx temp_val
7152 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
7154 /* Implement SPARC 32-bit psABI callee return struct checking:
7156 Fetch the instruction where we will return to and see if
7157 it's an unimp instruction (the most significant 10 bits
7158 will be zero). */
7159 emit_move_insn (scratch, gen_rtx_MEM (SImode,
7160 plus_constant (Pmode,
7161 ret_reg, 8)));
7162 /* Assume the size is valid and pre-adjust */
7163 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
7164 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
7165 0, endlab);
7166 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
7167 /* Write the address of the memory pointed to by temp_val into
7168 the memory pointed to by mem */
7169 emit_move_insn (mem, XEXP (temp_val, 0));
7170 emit_label (endlab);
7173 return mem;
7177 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
7178 For v9, function return values are subject to the same rules as arguments,
7179 except that up to 32 bytes may be returned in registers. */
7181 static rtx
7182 sparc_function_value_1 (const_tree type, enum machine_mode mode,
7183 bool outgoing)
7185 /* Beware that the two values are swapped here wrt function_arg. */
7186 int regbase = (outgoing
7187 ? SPARC_INCOMING_INT_ARG_FIRST
7188 : SPARC_OUTGOING_INT_ARG_FIRST);
7189 enum mode_class mclass = GET_MODE_CLASS (mode);
7190 int regno;
7192 /* Vector types deserve special treatment because they are polymorphic wrt
7193 their mode, depending upon whether VIS instructions are enabled. */
7194 if (type && TREE_CODE (type) == VECTOR_TYPE)
7196 HOST_WIDE_INT size = int_size_in_bytes (type);
7197 gcc_assert ((TARGET_ARCH32 && size <= 8)
7198 || (TARGET_ARCH64 && size <= 32));
7200 if (mode == BLKmode)
7201 return function_arg_vector_value (size,
7202 SPARC_FP_ARG_FIRST);
7203 else
7204 mclass = MODE_FLOAT;
7207 if (TARGET_ARCH64 && type)
7209 /* Structures up to 32 bytes in size are returned in registers. */
7210 if (TREE_CODE (type) == RECORD_TYPE)
7212 HOST_WIDE_INT size = int_size_in_bytes (type);
7213 gcc_assert (size <= 32);
7215 return function_arg_record_value (type, mode, 0, 1, regbase);
7218 /* Unions up to 32 bytes in size are returned in integer registers. */
7219 else if (TREE_CODE (type) == UNION_TYPE)
7221 HOST_WIDE_INT size = int_size_in_bytes (type);
7222 gcc_assert (size <= 32);
7224 return function_arg_union_value (size, mode, 0, regbase);
7227 /* Objects that require it are returned in FP registers. */
7228 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
7231 /* All other aggregate types are returned in an integer register in a
7232 mode corresponding to the size of the type. */
7233 else if (AGGREGATE_TYPE_P (type))
7235 /* All other aggregate types are passed in an integer register
7236 in a mode corresponding to the size of the type. */
7237 HOST_WIDE_INT size = int_size_in_bytes (type);
7238 gcc_assert (size <= 32);
7240 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7242 /* ??? We probably should have made the same ABI change in
7243 3.4.0 as the one we made for unions. The latter was
7244 required by the SCD though, while the former is not
7245 specified, so we favored compatibility and efficiency.
7247 Now we're stuck for aggregates larger than 16 bytes,
7248 because OImode vanished in the meantime. Let's not
7249 try to be unduly clever, and simply follow the ABI
7250 for unions in that case. */
7251 if (mode == BLKmode)
7252 return function_arg_union_value (size, mode, 0, regbase);
7253 else
7254 mclass = MODE_INT;
7257 /* We should only have pointer and integer types at this point. This
7258 must match sparc_promote_function_mode. */
7259 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7260 mode = word_mode;
7263 /* We should only have pointer and integer types at this point. This must
7264 match sparc_promote_function_mode. */
7265 else if (TARGET_ARCH32
7266 && mclass == MODE_INT
7267 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
7268 mode = word_mode;
7270 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
7271 regno = SPARC_FP_ARG_FIRST;
7272 else
7273 regno = regbase;
7275 return gen_rtx_REG (mode, regno);
7278 /* Handle TARGET_FUNCTION_VALUE.
7279 On the SPARC, the value is found in the first "output" register, but the
7280 called function leaves it in the first "input" register. */
7282 static rtx
7283 sparc_function_value (const_tree valtype,
7284 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
7285 bool outgoing)
7287 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
7290 /* Handle TARGET_LIBCALL_VALUE. */
7292 static rtx
7293 sparc_libcall_value (enum machine_mode mode,
7294 const_rtx fun ATTRIBUTE_UNUSED)
7296 return sparc_function_value_1 (NULL_TREE, mode, false);
7299 /* Handle FUNCTION_VALUE_REGNO_P.
7300 On the SPARC, the first "output" reg is used for integer values, and the
7301 first floating point register is used for floating point values. */
7303 static bool
7304 sparc_function_value_regno_p (const unsigned int regno)
7306 return (regno == 8 || regno == 32);
7309 /* Do what is necessary for `va_start'. We look at the current function
7310 to determine if stdarg or varargs is used and return the address of
7311 the first unnamed parameter. */
7313 static rtx
7314 sparc_builtin_saveregs (void)
7316 int first_reg = crtl->args.info.words;
7317 rtx address;
7318 int regno;
7320 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
7321 emit_move_insn (gen_rtx_MEM (word_mode,
7322 gen_rtx_PLUS (Pmode,
7323 frame_pointer_rtx,
7324 GEN_INT (FIRST_PARM_OFFSET (0)
7325 + (UNITS_PER_WORD
7326 * regno)))),
7327 gen_rtx_REG (word_mode,
7328 SPARC_INCOMING_INT_ARG_FIRST + regno));
7330 address = gen_rtx_PLUS (Pmode,
7331 frame_pointer_rtx,
7332 GEN_INT (FIRST_PARM_OFFSET (0)
7333 + UNITS_PER_WORD * first_reg));
7335 return address;
7338 /* Implement `va_start' for stdarg. */
7340 static void
7341 sparc_va_start (tree valist, rtx nextarg)
7343 nextarg = expand_builtin_saveregs ();
7344 std_expand_builtin_va_start (valist, nextarg);
7347 /* Implement `va_arg' for stdarg. */
7349 static tree
7350 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
7351 gimple_seq *post_p)
7353 HOST_WIDE_INT size, rsize, align;
7354 tree addr, incr;
7355 bool indirect;
7356 tree ptrtype = build_pointer_type (type);
7358 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
7360 indirect = true;
7361 size = rsize = UNITS_PER_WORD;
7362 align = 0;
7364 else
7366 indirect = false;
7367 size = int_size_in_bytes (type);
7368 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7369 align = 0;
7371 if (TARGET_ARCH64)
7373 /* For SPARC64, objects requiring 16-byte alignment get it. */
7374 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
7375 align = 2 * UNITS_PER_WORD;
7377 /* SPARC-V9 ABI states that structures up to 16 bytes in size
7378 are left-justified in their slots. */
7379 if (AGGREGATE_TYPE_P (type))
7381 if (size == 0)
7382 size = rsize = UNITS_PER_WORD;
7383 else
7384 size = rsize;
7389 incr = valist;
7390 if (align)
7392 incr = fold_build_pointer_plus_hwi (incr, align - 1);
7393 incr = fold_convert (sizetype, incr);
7394 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
7395 size_int (-align));
7396 incr = fold_convert (ptr_type_node, incr);
7399 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
7400 addr = incr;
7402 if (BYTES_BIG_ENDIAN && size < rsize)
7403 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
7405 if (indirect)
7407 addr = fold_convert (build_pointer_type (ptrtype), addr);
7408 addr = build_va_arg_indirect_ref (addr);
7411 /* If the address isn't aligned properly for the type, we need a temporary.
7412 FIXME: This is inefficient, usually we can do this in registers. */
7413 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
7415 tree tmp = create_tmp_var (type, "va_arg_tmp");
7416 tree dest_addr = build_fold_addr_expr (tmp);
7417 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
7418 3, dest_addr, addr, size_int (rsize));
7419 TREE_ADDRESSABLE (tmp) = 1;
7420 gimplify_and_add (copy, pre_p);
7421 addr = dest_addr;
7424 else
7425 addr = fold_convert (ptrtype, addr);
7427 incr = fold_build_pointer_plus_hwi (incr, rsize);
7428 gimplify_assign (valist, incr, post_p);
7430 return build_va_arg_indirect_ref (addr);
7433 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
7434 Specify whether the vector mode is supported by the hardware. */
7436 static bool
7437 sparc_vector_mode_supported_p (enum machine_mode mode)
7439 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
7442 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
7444 static enum machine_mode
7445 sparc_preferred_simd_mode (enum machine_mode mode)
7447 if (TARGET_VIS)
7448 switch (mode)
7450 case SImode:
7451 return V2SImode;
7452 case HImode:
7453 return V4HImode;
7454 case QImode:
7455 return V8QImode;
7457 default:;
7460 return word_mode;
7463 /* Return the string to output an unconditional branch to LABEL, which is
7464 the operand number of the label.
7466 DEST is the destination insn (i.e. the label), INSN is the source. */
7468 const char *
7469 output_ubranch (rtx dest, rtx insn)
7471 static char string[64];
7472 bool v9_form = false;
7473 int delta;
7474 char *p;
7476 /* Even if we are trying to use cbcond for this, evaluate
7477 whether we can use V9 branches as our backup plan. */
7479 delta = 5000000;
7480 if (INSN_ADDRESSES_SET_P ())
7481 delta = (INSN_ADDRESSES (INSN_UID (dest))
7482 - INSN_ADDRESSES (INSN_UID (insn)));
7484 /* Leave some instructions for "slop". */
7485 if (TARGET_V9 && delta >= -260000 && delta < 260000)
7486 v9_form = true;
7488 if (TARGET_CBCOND)
7490 bool emit_nop = emit_cbcond_nop (insn);
7491 bool far = false;
7492 const char *rval;
7494 if (delta < -500 || delta > 500)
7495 far = true;
7497 if (far)
7499 if (v9_form)
7500 rval = "ba,a,pt\t%%xcc, %l0";
7501 else
7502 rval = "b,a\t%l0";
7504 else
7506 if (emit_nop)
7507 rval = "cwbe\t%%g0, %%g0, %l0\n\tnop";
7508 else
7509 rval = "cwbe\t%%g0, %%g0, %l0";
7511 return rval;
7514 if (v9_form)
7515 strcpy (string, "ba%*,pt\t%%xcc, ");
7516 else
7517 strcpy (string, "b%*\t");
7519 p = strchr (string, '\0');
7520 *p++ = '%';
7521 *p++ = 'l';
7522 *p++ = '0';
7523 *p++ = '%';
7524 *p++ = '(';
7525 *p = '\0';
7527 return string;
7530 /* Return the string to output a conditional branch to LABEL, which is
7531 the operand number of the label. OP is the conditional expression.
7532 XEXP (OP, 0) is assumed to be a condition code register (integer or
7533 floating point) and its mode specifies what kind of comparison we made.
7535 DEST is the destination insn (i.e. the label), INSN is the source.
7537 REVERSED is nonzero if we should reverse the sense of the comparison.
7539 ANNUL is nonzero if we should generate an annulling branch. */
7541 const char *
7542 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
7543 rtx insn)
7545 static char string[64];
7546 enum rtx_code code = GET_CODE (op);
7547 rtx cc_reg = XEXP (op, 0);
7548 enum machine_mode mode = GET_MODE (cc_reg);
7549 const char *labelno, *branch;
7550 int spaces = 8, far;
7551 char *p;
7553 /* v9 branches are limited to +-1MB. If it is too far away,
7554 change
7556 bne,pt %xcc, .LC30
7560 be,pn %xcc, .+12
7562 ba .LC30
7566 fbne,a,pn %fcc2, .LC29
7570 fbe,pt %fcc2, .+16
7572 ba .LC29 */
7574 far = TARGET_V9 && (get_attr_length (insn) >= 3);
7575 if (reversed ^ far)
7577 /* Reversal of FP compares takes care -- an ordered compare
7578 becomes an unordered compare and vice versa. */
7579 if (mode == CCFPmode || mode == CCFPEmode)
7580 code = reverse_condition_maybe_unordered (code);
7581 else
7582 code = reverse_condition (code);
7585 /* Start by writing the branch condition. */
7586 if (mode == CCFPmode || mode == CCFPEmode)
7588 switch (code)
7590 case NE:
7591 branch = "fbne";
7592 break;
7593 case EQ:
7594 branch = "fbe";
7595 break;
7596 case GE:
7597 branch = "fbge";
7598 break;
7599 case GT:
7600 branch = "fbg";
7601 break;
7602 case LE:
7603 branch = "fble";
7604 break;
7605 case LT:
7606 branch = "fbl";
7607 break;
7608 case UNORDERED:
7609 branch = "fbu";
7610 break;
7611 case ORDERED:
7612 branch = "fbo";
7613 break;
7614 case UNGT:
7615 branch = "fbug";
7616 break;
7617 case UNLT:
7618 branch = "fbul";
7619 break;
7620 case UNEQ:
7621 branch = "fbue";
7622 break;
7623 case UNGE:
7624 branch = "fbuge";
7625 break;
7626 case UNLE:
7627 branch = "fbule";
7628 break;
7629 case LTGT:
7630 branch = "fblg";
7631 break;
7633 default:
7634 gcc_unreachable ();
7637 /* ??? !v9: FP branches cannot be preceded by another floating point
7638 insn. Because there is currently no concept of pre-delay slots,
7639 we can fix this only by always emitting a nop before a floating
7640 point branch. */
7642 string[0] = '\0';
7643 if (! TARGET_V9)
7644 strcpy (string, "nop\n\t");
7645 strcat (string, branch);
7647 else
7649 switch (code)
7651 case NE:
7652 branch = "bne";
7653 break;
7654 case EQ:
7655 branch = "be";
7656 break;
7657 case GE:
7658 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7659 branch = "bpos";
7660 else
7661 branch = "bge";
7662 break;
7663 case GT:
7664 branch = "bg";
7665 break;
7666 case LE:
7667 branch = "ble";
7668 break;
7669 case LT:
7670 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7671 branch = "bneg";
7672 else
7673 branch = "bl";
7674 break;
7675 case GEU:
7676 branch = "bgeu";
7677 break;
7678 case GTU:
7679 branch = "bgu";
7680 break;
7681 case LEU:
7682 branch = "bleu";
7683 break;
7684 case LTU:
7685 branch = "blu";
7686 break;
7688 default:
7689 gcc_unreachable ();
7691 strcpy (string, branch);
7693 spaces -= strlen (branch);
7694 p = strchr (string, '\0');
7696 /* Now add the annulling, the label, and a possible noop. */
7697 if (annul && ! far)
7699 strcpy (p, ",a");
7700 p += 2;
7701 spaces -= 2;
7704 if (TARGET_V9)
7706 rtx note;
7707 int v8 = 0;
7709 if (! far && insn && INSN_ADDRESSES_SET_P ())
7711 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7712 - INSN_ADDRESSES (INSN_UID (insn)));
7713 /* Leave some instructions for "slop". */
7714 if (delta < -260000 || delta >= 260000)
7715 v8 = 1;
7718 if (mode == CCFPmode || mode == CCFPEmode)
7720 static char v9_fcc_labelno[] = "%%fccX, ";
7721 /* Set the char indicating the number of the fcc reg to use. */
7722 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7723 labelno = v9_fcc_labelno;
7724 if (v8)
7726 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7727 labelno = "";
7730 else if (mode == CCXmode || mode == CCX_NOOVmode)
7732 labelno = "%%xcc, ";
7733 gcc_assert (! v8);
7735 else
7737 labelno = "%%icc, ";
7738 if (v8)
7739 labelno = "";
7742 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7744 strcpy (p,
7745 ((XINT (note, 0) >= REG_BR_PROB_BASE / 2) ^ far)
7746 ? ",pt" : ",pn");
7747 p += 3;
7748 spaces -= 3;
7751 else
7752 labelno = "";
7754 if (spaces > 0)
7755 *p++ = '\t';
7756 else
7757 *p++ = ' ';
7758 strcpy (p, labelno);
7759 p = strchr (p, '\0');
7760 if (far)
7762 strcpy (p, ".+12\n\t nop\n\tb\t");
7763 /* Skip the next insn if requested or
7764 if we know that it will be a nop. */
7765 if (annul || ! final_sequence)
7766 p[3] = '6';
7767 p += 14;
7769 *p++ = '%';
7770 *p++ = 'l';
7771 *p++ = label + '0';
7772 *p++ = '%';
7773 *p++ = '#';
7774 *p = '\0';
7776 return string;
7779 /* Emit a library call comparison between floating point X and Y.
7780 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7781 Return the new operator to be used in the comparison sequence.
7783 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7784 values as arguments instead of the TFmode registers themselves,
7785 that's why we cannot call emit_float_lib_cmp. */
7788 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7790 const char *qpfunc;
7791 rtx slot0, slot1, result, tem, tem2, libfunc;
7792 enum machine_mode mode;
7793 enum rtx_code new_comparison;
7795 switch (comparison)
7797 case EQ:
7798 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7799 break;
7801 case NE:
7802 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7803 break;
7805 case GT:
7806 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7807 break;
7809 case GE:
7810 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7811 break;
7813 case LT:
7814 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7815 break;
7817 case LE:
7818 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7819 break;
7821 case ORDERED:
7822 case UNORDERED:
7823 case UNGT:
7824 case UNLT:
7825 case UNEQ:
7826 case UNGE:
7827 case UNLE:
7828 case LTGT:
7829 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7830 break;
7832 default:
7833 gcc_unreachable ();
7836 if (TARGET_ARCH64)
7838 if (MEM_P (x))
7840 tree expr = MEM_EXPR (x);
7841 if (expr)
7842 mark_addressable (expr);
7843 slot0 = x;
7845 else
7847 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
7848 emit_move_insn (slot0, x);
7851 if (MEM_P (y))
7853 tree expr = MEM_EXPR (y);
7854 if (expr)
7855 mark_addressable (expr);
7856 slot1 = y;
7858 else
7860 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode));
7861 emit_move_insn (slot1, y);
7864 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7865 emit_library_call (libfunc, LCT_NORMAL,
7866 DImode, 2,
7867 XEXP (slot0, 0), Pmode,
7868 XEXP (slot1, 0), Pmode);
7869 mode = DImode;
7871 else
7873 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7874 emit_library_call (libfunc, LCT_NORMAL,
7875 SImode, 2,
7876 x, TFmode, y, TFmode);
7877 mode = SImode;
7881 /* Immediately move the result of the libcall into a pseudo
7882 register so reload doesn't clobber the value if it needs
7883 the return register for a spill reg. */
7884 result = gen_reg_rtx (mode);
7885 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7887 switch (comparison)
7889 default:
7890 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7891 case ORDERED:
7892 case UNORDERED:
7893 new_comparison = (comparison == UNORDERED ? EQ : NE);
7894 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7895 case UNGT:
7896 case UNGE:
7897 new_comparison = (comparison == UNGT ? GT : NE);
7898 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7899 case UNLE:
7900 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7901 case UNLT:
7902 tem = gen_reg_rtx (mode);
7903 if (TARGET_ARCH32)
7904 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7905 else
7906 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7907 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7908 case UNEQ:
7909 case LTGT:
7910 tem = gen_reg_rtx (mode);
7911 if (TARGET_ARCH32)
7912 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7913 else
7914 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7915 tem2 = gen_reg_rtx (mode);
7916 if (TARGET_ARCH32)
7917 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7918 else
7919 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7920 new_comparison = (comparison == UNEQ ? EQ : NE);
7921 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7924 gcc_unreachable ();
7927 /* Generate an unsigned DImode to FP conversion. This is the same code
7928 optabs would emit if we didn't have TFmode patterns. */
7930 void
7931 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7933 rtx neglab, donelab, i0, i1, f0, in, out;
7935 out = operands[0];
7936 in = force_reg (DImode, operands[1]);
7937 neglab = gen_label_rtx ();
7938 donelab = gen_label_rtx ();
7939 i0 = gen_reg_rtx (DImode);
7940 i1 = gen_reg_rtx (DImode);
7941 f0 = gen_reg_rtx (mode);
7943 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7945 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7946 emit_jump_insn (gen_jump (donelab));
7947 emit_barrier ();
7949 emit_label (neglab);
7951 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7952 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7953 emit_insn (gen_iordi3 (i0, i0, i1));
7954 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7955 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7957 emit_label (donelab);
7960 /* Generate an FP to unsigned DImode conversion. This is the same code
7961 optabs would emit if we didn't have TFmode patterns. */
7963 void
7964 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7966 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7968 out = operands[0];
7969 in = force_reg (mode, operands[1]);
7970 neglab = gen_label_rtx ();
7971 donelab = gen_label_rtx ();
7972 i0 = gen_reg_rtx (DImode);
7973 i1 = gen_reg_rtx (DImode);
7974 limit = gen_reg_rtx (mode);
7975 f0 = gen_reg_rtx (mode);
7977 emit_move_insn (limit,
7978 CONST_DOUBLE_FROM_REAL_VALUE (
7979 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7980 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7982 emit_insn (gen_rtx_SET (VOIDmode,
7983 out,
7984 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7985 emit_jump_insn (gen_jump (donelab));
7986 emit_barrier ();
7988 emit_label (neglab);
7990 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7991 emit_insn (gen_rtx_SET (VOIDmode,
7993 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7994 emit_insn (gen_movdi (i1, const1_rtx));
7995 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7996 emit_insn (gen_xordi3 (out, i0, i1));
7998 emit_label (donelab);
8001 /* Return the string to output a compare and branch instruction to DEST.
8002 DEST is the destination insn (i.e. the label), INSN is the source,
8003 and OP is the conditional expression. */
8005 const char *
8006 output_cbcond (rtx op, rtx dest, rtx insn)
8008 enum machine_mode mode = GET_MODE (XEXP (op, 0));
8009 enum rtx_code code = GET_CODE (op);
8010 const char *cond_str, *tmpl;
8011 int far, emit_nop, len;
8012 static char string[64];
8013 char size_char;
8015 /* Compare and Branch is limited to +-2KB. If it is too far away,
8016 change
8018 cxbne X, Y, .LC30
8022 cxbe X, Y, .+16
8024 ba,pt xcc, .LC30
8025 nop */
8027 len = get_attr_length (insn);
8029 far = len == 4;
8030 emit_nop = len == 2;
8032 if (far)
8033 code = reverse_condition (code);
8035 size_char = ((mode == SImode) ? 'w' : 'x');
8037 switch (code)
8039 case NE:
8040 cond_str = "ne";
8041 break;
8043 case EQ:
8044 cond_str = "e";
8045 break;
8047 case GE:
8048 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
8049 cond_str = "pos";
8050 else
8051 cond_str = "ge";
8052 break;
8054 case GT:
8055 cond_str = "g";
8056 break;
8058 case LE:
8059 cond_str = "le";
8060 break;
8062 case LT:
8063 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
8064 cond_str = "neg";
8065 else
8066 cond_str = "l";
8067 break;
8069 case GEU:
8070 cond_str = "cc";
8071 break;
8073 case GTU:
8074 cond_str = "gu";
8075 break;
8077 case LEU:
8078 cond_str = "leu";
8079 break;
8081 case LTU:
8082 cond_str = "cs";
8083 break;
8085 default:
8086 gcc_unreachable ();
8089 if (far)
8091 int veryfar = 1, delta;
8093 if (INSN_ADDRESSES_SET_P ())
8095 delta = (INSN_ADDRESSES (INSN_UID (dest))
8096 - INSN_ADDRESSES (INSN_UID (insn)));
8097 /* Leave some instructions for "slop". */
8098 if (delta >= -260000 && delta < 260000)
8099 veryfar = 0;
8102 if (veryfar)
8103 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tb\t%%3\n\tnop";
8104 else
8105 tmpl = "c%cb%s\t%%1, %%2, .+16\n\tnop\n\tba,pt\t%%%%xcc, %%3\n\tnop";
8107 else
8109 if (emit_nop)
8110 tmpl = "c%cb%s\t%%1, %%2, %%3\n\tnop";
8111 else
8112 tmpl = "c%cb%s\t%%1, %%2, %%3";
8115 snprintf (string, sizeof(string), tmpl, size_char, cond_str);
8117 return string;
8120 /* Return the string to output a conditional branch to LABEL, testing
8121 register REG. LABEL is the operand number of the label; REG is the
8122 operand number of the reg. OP is the conditional expression. The mode
8123 of REG says what kind of comparison we made.
8125 DEST is the destination insn (i.e. the label), INSN is the source.
8127 REVERSED is nonzero if we should reverse the sense of the comparison.
8129 ANNUL is nonzero if we should generate an annulling branch. */
8131 const char *
8132 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
8133 int annul, rtx insn)
8135 static char string[64];
8136 enum rtx_code code = GET_CODE (op);
8137 enum machine_mode mode = GET_MODE (XEXP (op, 0));
8138 rtx note;
8139 int far;
8140 char *p;
8142 /* branch on register are limited to +-128KB. If it is too far away,
8143 change
8145 brnz,pt %g1, .LC30
8149 brz,pn %g1, .+12
8151 ba,pt %xcc, .LC30
8155 brgez,a,pn %o1, .LC29
8159 brlz,pt %o1, .+16
8161 ba,pt %xcc, .LC29 */
8163 far = get_attr_length (insn) >= 3;
8165 /* If not floating-point or if EQ or NE, we can just reverse the code. */
8166 if (reversed ^ far)
8167 code = reverse_condition (code);
8169 /* Only 64 bit versions of these instructions exist. */
8170 gcc_assert (mode == DImode);
8172 /* Start by writing the branch condition. */
8174 switch (code)
8176 case NE:
8177 strcpy (string, "brnz");
8178 break;
8180 case EQ:
8181 strcpy (string, "brz");
8182 break;
8184 case GE:
8185 strcpy (string, "brgez");
8186 break;
8188 case LT:
8189 strcpy (string, "brlz");
8190 break;
8192 case LE:
8193 strcpy (string, "brlez");
8194 break;
8196 case GT:
8197 strcpy (string, "brgz");
8198 break;
8200 default:
8201 gcc_unreachable ();
8204 p = strchr (string, '\0');
8206 /* Now add the annulling, reg, label, and nop. */
8207 if (annul && ! far)
8209 strcpy (p, ",a");
8210 p += 2;
8213 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
8215 strcpy (p,
8216 ((XINT (note, 0) >= REG_BR_PROB_BASE / 2) ^ far)
8217 ? ",pt" : ",pn");
8218 p += 3;
8221 *p = p < string + 8 ? '\t' : ' ';
8222 p++;
8223 *p++ = '%';
8224 *p++ = '0' + reg;
8225 *p++ = ',';
8226 *p++ = ' ';
8227 if (far)
8229 int veryfar = 1, delta;
8231 if (INSN_ADDRESSES_SET_P ())
8233 delta = (INSN_ADDRESSES (INSN_UID (dest))
8234 - INSN_ADDRESSES (INSN_UID (insn)));
8235 /* Leave some instructions for "slop". */
8236 if (delta >= -260000 && delta < 260000)
8237 veryfar = 0;
8240 strcpy (p, ".+12\n\t nop\n\t");
8241 /* Skip the next insn if requested or
8242 if we know that it will be a nop. */
8243 if (annul || ! final_sequence)
8244 p[3] = '6';
8245 p += 12;
8246 if (veryfar)
8248 strcpy (p, "b\t");
8249 p += 2;
8251 else
8253 strcpy (p, "ba,pt\t%%xcc, ");
8254 p += 13;
8257 *p++ = '%';
8258 *p++ = 'l';
8259 *p++ = '0' + label;
8260 *p++ = '%';
8261 *p++ = '#';
8262 *p = '\0';
8264 return string;
8267 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
8268 Such instructions cannot be used in the delay slot of return insn on v9.
8269 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
8272 static int
8273 epilogue_renumber (register rtx *where, int test)
8275 register const char *fmt;
8276 register int i;
8277 register enum rtx_code code;
8279 if (*where == 0)
8280 return 0;
8282 code = GET_CODE (*where);
8284 switch (code)
8286 case REG:
8287 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
8288 return 1;
8289 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
8290 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
8291 case SCRATCH:
8292 case CC0:
8293 case PC:
8294 case CONST_INT:
8295 case CONST_DOUBLE:
8296 return 0;
8298 /* Do not replace the frame pointer with the stack pointer because
8299 it can cause the delayed instruction to load below the stack.
8300 This occurs when instructions like:
8302 (set (reg/i:SI 24 %i0)
8303 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
8304 (const_int -20 [0xffffffec])) 0))
8306 are in the return delayed slot. */
8307 case PLUS:
8308 if (GET_CODE (XEXP (*where, 0)) == REG
8309 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
8310 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
8311 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
8312 return 1;
8313 break;
8315 case MEM:
8316 if (SPARC_STACK_BIAS
8317 && GET_CODE (XEXP (*where, 0)) == REG
8318 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
8319 return 1;
8320 break;
8322 default:
8323 break;
8326 fmt = GET_RTX_FORMAT (code);
8328 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8330 if (fmt[i] == 'E')
8332 register int j;
8333 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
8334 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
8335 return 1;
8337 else if (fmt[i] == 'e'
8338 && epilogue_renumber (&(XEXP (*where, i)), test))
8339 return 1;
8341 return 0;
8344 /* Leaf functions and non-leaf functions have different needs. */
8346 static const int
8347 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
8349 static const int
8350 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
8352 static const int *const reg_alloc_orders[] = {
8353 reg_leaf_alloc_order,
8354 reg_nonleaf_alloc_order};
8356 void
8357 order_regs_for_local_alloc (void)
8359 static int last_order_nonleaf = 1;
8361 if (df_regs_ever_live_p (15) != last_order_nonleaf)
8363 last_order_nonleaf = !last_order_nonleaf;
8364 memcpy ((char *) reg_alloc_order,
8365 (const char *) reg_alloc_orders[last_order_nonleaf],
8366 FIRST_PSEUDO_REGISTER * sizeof (int));
8370 /* Return 1 if REG and MEM are legitimate enough to allow the various
8371 mem<-->reg splits to be run. */
8374 sparc_splitdi_legitimate (rtx reg, rtx mem)
8376 /* Punt if we are here by mistake. */
8377 gcc_assert (reload_completed);
8379 /* We must have an offsettable memory reference. */
8380 if (! offsettable_memref_p (mem))
8381 return 0;
8383 /* If we have legitimate args for ldd/std, we do not want
8384 the split to happen. */
8385 if ((REGNO (reg) % 2) == 0
8386 && mem_min_alignment (mem, 8))
8387 return 0;
8389 /* Success. */
8390 return 1;
8393 /* Like sparc_splitdi_legitimate but for REG <--> REG moves. */
8396 sparc_split_regreg_legitimate (rtx reg1, rtx reg2)
8398 int regno1, regno2;
8400 if (GET_CODE (reg1) == SUBREG)
8401 reg1 = SUBREG_REG (reg1);
8402 if (GET_CODE (reg1) != REG)
8403 return 0;
8404 regno1 = REGNO (reg1);
8406 if (GET_CODE (reg2) == SUBREG)
8407 reg2 = SUBREG_REG (reg2);
8408 if (GET_CODE (reg2) != REG)
8409 return 0;
8410 regno2 = REGNO (reg2);
8412 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
8413 return 1;
8415 if (TARGET_VIS3)
8417 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
8418 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
8419 return 1;
8422 return 0;
8425 /* Return 1 if x and y are some kind of REG and they refer to
8426 different hard registers. This test is guaranteed to be
8427 run after reload. */
8430 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
8432 if (GET_CODE (x) != REG)
8433 return 0;
8434 if (GET_CODE (y) != REG)
8435 return 0;
8436 if (REGNO (x) == REGNO (y))
8437 return 0;
8438 return 1;
8441 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
8442 This makes them candidates for using ldd and std insns.
8444 Note reg1 and reg2 *must* be hard registers. */
8447 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
8449 /* We might have been passed a SUBREG. */
8450 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
8451 return 0;
8453 if (REGNO (reg1) % 2 != 0)
8454 return 0;
8456 /* Integer ldd is deprecated in SPARC V9 */
8457 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
8458 return 0;
8460 return (REGNO (reg1) == REGNO (reg2) - 1);
8463 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
8464 an ldd or std insn.
8466 This can only happen when addr1 and addr2, the addresses in mem1
8467 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
8468 addr1 must also be aligned on a 64-bit boundary.
8470 Also iff dependent_reg_rtx is not null it should not be used to
8471 compute the address for mem1, i.e. we cannot optimize a sequence
8472 like:
8473 ld [%o0], %o0
8474 ld [%o0 + 4], %o1
8476 ldd [%o0], %o0
8477 nor:
8478 ld [%g3 + 4], %g3
8479 ld [%g3], %g2
8481 ldd [%g3], %g2
8483 But, note that the transformation from:
8484 ld [%g2 + 4], %g3
8485 ld [%g2], %g2
8487 ldd [%g2], %g2
8488 is perfectly fine. Thus, the peephole2 patterns always pass us
8489 the destination register of the first load, never the second one.
8491 For stores we don't have a similar problem, so dependent_reg_rtx is
8492 NULL_RTX. */
8495 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
8497 rtx addr1, addr2;
8498 unsigned int reg1;
8499 HOST_WIDE_INT offset1;
8501 /* The mems cannot be volatile. */
8502 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
8503 return 0;
8505 /* MEM1 should be aligned on a 64-bit boundary. */
8506 if (MEM_ALIGN (mem1) < 64)
8507 return 0;
8509 addr1 = XEXP (mem1, 0);
8510 addr2 = XEXP (mem2, 0);
8512 /* Extract a register number and offset (if used) from the first addr. */
8513 if (GET_CODE (addr1) == PLUS)
8515 /* If not a REG, return zero. */
8516 if (GET_CODE (XEXP (addr1, 0)) != REG)
8517 return 0;
8518 else
8520 reg1 = REGNO (XEXP (addr1, 0));
8521 /* The offset must be constant! */
8522 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
8523 return 0;
8524 offset1 = INTVAL (XEXP (addr1, 1));
8527 else if (GET_CODE (addr1) != REG)
8528 return 0;
8529 else
8531 reg1 = REGNO (addr1);
8532 /* This was a simple (mem (reg)) expression. Offset is 0. */
8533 offset1 = 0;
8536 /* Make sure the second address is a (mem (plus (reg) (const_int). */
8537 if (GET_CODE (addr2) != PLUS)
8538 return 0;
8540 if (GET_CODE (XEXP (addr2, 0)) != REG
8541 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
8542 return 0;
8544 if (reg1 != REGNO (XEXP (addr2, 0)))
8545 return 0;
8547 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
8548 return 0;
8550 /* The first offset must be evenly divisible by 8 to ensure the
8551 address is 64 bit aligned. */
8552 if (offset1 % 8 != 0)
8553 return 0;
8555 /* The offset for the second addr must be 4 more than the first addr. */
8556 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
8557 return 0;
8559 /* All the tests passed. addr1 and addr2 are valid for ldd and std
8560 instructions. */
8561 return 1;
8564 /* Return the widened memory access made of MEM1 and MEM2 in MODE. */
8567 widen_mem_for_ldd_peep (rtx mem1, rtx mem2, enum machine_mode mode)
8569 rtx x = widen_memory_access (mem1, mode, 0);
8570 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (mem1) && MEM_NOTRAP_P (mem2);
8571 return x;
8574 /* Return 1 if reg is a pseudo, or is the first register in
8575 a hard register pair. This makes it suitable for use in
8576 ldd and std insns. */
8579 register_ok_for_ldd (rtx reg)
8581 /* We might have been passed a SUBREG. */
8582 if (!REG_P (reg))
8583 return 0;
8585 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
8586 return (REGNO (reg) % 2 == 0);
8588 return 1;
8591 /* Return 1 if OP, a MEM, has an address which is known to be
8592 aligned to an 8-byte boundary. */
8595 memory_ok_for_ldd (rtx op)
8597 /* In 64-bit mode, we assume that the address is word-aligned. */
8598 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
8599 return 0;
8601 if (! can_create_pseudo_p ()
8602 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
8603 return 0;
8605 return 1;
8608 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8610 static bool
8611 sparc_print_operand_punct_valid_p (unsigned char code)
8613 if (code == '#'
8614 || code == '*'
8615 || code == '('
8616 || code == ')'
8617 || code == '_'
8618 || code == '&')
8619 return true;
8621 return false;
8624 /* Implement TARGET_PRINT_OPERAND.
8625 Print operand X (an rtx) in assembler syntax to file FILE.
8626 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8627 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8629 static void
8630 sparc_print_operand (FILE *file, rtx x, int code)
8632 switch (code)
8634 case '#':
8635 /* Output an insn in a delay slot. */
8636 if (final_sequence)
8637 sparc_indent_opcode = 1;
8638 else
8639 fputs ("\n\t nop", file);
8640 return;
8641 case '*':
8642 /* Output an annul flag if there's nothing for the delay slot and we
8643 are optimizing. This is always used with '(' below.
8644 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8645 this is a dbx bug. So, we only do this when optimizing.
8646 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8647 Always emit a nop in case the next instruction is a branch. */
8648 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
8649 fputs (",a", file);
8650 return;
8651 case '(':
8652 /* Output a 'nop' if there's nothing for the delay slot and we are
8653 not optimizing. This is always used with '*' above. */
8654 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
8655 fputs ("\n\t nop", file);
8656 else if (final_sequence)
8657 sparc_indent_opcode = 1;
8658 return;
8659 case ')':
8660 /* Output the right displacement from the saved PC on function return.
8661 The caller may have placed an "unimp" insn immediately after the call
8662 so we have to account for it. This insn is used in the 32-bit ABI
8663 when calling a function that returns a non zero-sized structure. The
8664 64-bit ABI doesn't have it. Be careful to have this test be the same
8665 as that for the call. The exception is when sparc_std_struct_return
8666 is enabled, the psABI is followed exactly and the adjustment is made
8667 by the code in sparc_struct_value_rtx. The call emitted is the same
8668 when sparc_std_struct_return is enabled. */
8669 if (!TARGET_ARCH64
8670 && cfun->returns_struct
8671 && !sparc_std_struct_return
8672 && DECL_SIZE (DECL_RESULT (current_function_decl))
8673 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
8674 == INTEGER_CST
8675 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
8676 fputs ("12", file);
8677 else
8678 fputc ('8', file);
8679 return;
8680 case '_':
8681 /* Output the Embedded Medium/Anywhere code model base register. */
8682 fputs (EMBMEDANY_BASE_REG, file);
8683 return;
8684 case '&':
8685 /* Print some local dynamic TLS name. */
8686 assemble_name (file, get_some_local_dynamic_name ());
8687 return;
8689 case 'Y':
8690 /* Adjust the operand to take into account a RESTORE operation. */
8691 if (GET_CODE (x) == CONST_INT)
8692 break;
8693 else if (GET_CODE (x) != REG)
8694 output_operand_lossage ("invalid %%Y operand");
8695 else if (REGNO (x) < 8)
8696 fputs (reg_names[REGNO (x)], file);
8697 else if (REGNO (x) >= 24 && REGNO (x) < 32)
8698 fputs (reg_names[REGNO (x)-16], file);
8699 else
8700 output_operand_lossage ("invalid %%Y operand");
8701 return;
8702 case 'L':
8703 /* Print out the low order register name of a register pair. */
8704 if (WORDS_BIG_ENDIAN)
8705 fputs (reg_names[REGNO (x)+1], file);
8706 else
8707 fputs (reg_names[REGNO (x)], file);
8708 return;
8709 case 'H':
8710 /* Print out the high order register name of a register pair. */
8711 if (WORDS_BIG_ENDIAN)
8712 fputs (reg_names[REGNO (x)], file);
8713 else
8714 fputs (reg_names[REGNO (x)+1], file);
8715 return;
8716 case 'R':
8717 /* Print out the second register name of a register pair or quad.
8718 I.e., R (%o0) => %o1. */
8719 fputs (reg_names[REGNO (x)+1], file);
8720 return;
8721 case 'S':
8722 /* Print out the third register name of a register quad.
8723 I.e., S (%o0) => %o2. */
8724 fputs (reg_names[REGNO (x)+2], file);
8725 return;
8726 case 'T':
8727 /* Print out the fourth register name of a register quad.
8728 I.e., T (%o0) => %o3. */
8729 fputs (reg_names[REGNO (x)+3], file);
8730 return;
8731 case 'x':
8732 /* Print a condition code register. */
8733 if (REGNO (x) == SPARC_ICC_REG)
8735 /* We don't handle CC[X]_NOOVmode because they're not supposed
8736 to occur here. */
8737 if (GET_MODE (x) == CCmode)
8738 fputs ("%icc", file);
8739 else if (GET_MODE (x) == CCXmode)
8740 fputs ("%xcc", file);
8741 else
8742 gcc_unreachable ();
8744 else
8745 /* %fccN register */
8746 fputs (reg_names[REGNO (x)], file);
8747 return;
8748 case 'm':
8749 /* Print the operand's address only. */
8750 output_address (XEXP (x, 0));
8751 return;
8752 case 'r':
8753 /* In this case we need a register. Use %g0 if the
8754 operand is const0_rtx. */
8755 if (x == const0_rtx
8756 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
8758 fputs ("%g0", file);
8759 return;
8761 else
8762 break;
8764 case 'A':
8765 switch (GET_CODE (x))
8767 case IOR: fputs ("or", file); break;
8768 case AND: fputs ("and", file); break;
8769 case XOR: fputs ("xor", file); break;
8770 default: output_operand_lossage ("invalid %%A operand");
8772 return;
8774 case 'B':
8775 switch (GET_CODE (x))
8777 case IOR: fputs ("orn", file); break;
8778 case AND: fputs ("andn", file); break;
8779 case XOR: fputs ("xnor", file); break;
8780 default: output_operand_lossage ("invalid %%B operand");
8782 return;
8784 /* This is used by the conditional move instructions. */
8785 case 'C':
8787 enum rtx_code rc = GET_CODE (x);
8789 switch (rc)
8791 case NE: fputs ("ne", file); break;
8792 case EQ: fputs ("e", file); break;
8793 case GE: fputs ("ge", file); break;
8794 case GT: fputs ("g", file); break;
8795 case LE: fputs ("le", file); break;
8796 case LT: fputs ("l", file); break;
8797 case GEU: fputs ("geu", file); break;
8798 case GTU: fputs ("gu", file); break;
8799 case LEU: fputs ("leu", file); break;
8800 case LTU: fputs ("lu", file); break;
8801 case LTGT: fputs ("lg", file); break;
8802 case UNORDERED: fputs ("u", file); break;
8803 case ORDERED: fputs ("o", file); break;
8804 case UNLT: fputs ("ul", file); break;
8805 case UNLE: fputs ("ule", file); break;
8806 case UNGT: fputs ("ug", file); break;
8807 case UNGE: fputs ("uge", file); break;
8808 case UNEQ: fputs ("ue", file); break;
8809 default: output_operand_lossage ("invalid %%C operand");
8811 return;
8814 /* This are used by the movr instruction pattern. */
8815 case 'D':
8817 enum rtx_code rc = GET_CODE (x);
8818 switch (rc)
8820 case NE: fputs ("ne", file); break;
8821 case EQ: fputs ("e", file); break;
8822 case GE: fputs ("gez", file); break;
8823 case LT: fputs ("lz", file); break;
8824 case LE: fputs ("lez", file); break;
8825 case GT: fputs ("gz", file); break;
8826 default: output_operand_lossage ("invalid %%D operand");
8828 return;
8831 case 'b':
8833 /* Print a sign-extended character. */
8834 int i = trunc_int_for_mode (INTVAL (x), QImode);
8835 fprintf (file, "%d", i);
8836 return;
8839 case 'f':
8840 /* Operand must be a MEM; write its address. */
8841 if (GET_CODE (x) != MEM)
8842 output_operand_lossage ("invalid %%f operand");
8843 output_address (XEXP (x, 0));
8844 return;
8846 case 's':
8848 /* Print a sign-extended 32-bit value. */
8849 HOST_WIDE_INT i;
8850 if (GET_CODE(x) == CONST_INT)
8851 i = INTVAL (x);
8852 else if (GET_CODE(x) == CONST_DOUBLE)
8853 i = CONST_DOUBLE_LOW (x);
8854 else
8856 output_operand_lossage ("invalid %%s operand");
8857 return;
8859 i = trunc_int_for_mode (i, SImode);
8860 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
8861 return;
8864 case 0:
8865 /* Do nothing special. */
8866 break;
8868 default:
8869 /* Undocumented flag. */
8870 output_operand_lossage ("invalid operand output code");
8873 if (GET_CODE (x) == REG)
8874 fputs (reg_names[REGNO (x)], file);
8875 else if (GET_CODE (x) == MEM)
8877 fputc ('[', file);
8878 /* Poor Sun assembler doesn't understand absolute addressing. */
8879 if (CONSTANT_P (XEXP (x, 0)))
8880 fputs ("%g0+", file);
8881 output_address (XEXP (x, 0));
8882 fputc (']', file);
8884 else if (GET_CODE (x) == HIGH)
8886 fputs ("%hi(", file);
8887 output_addr_const (file, XEXP (x, 0));
8888 fputc (')', file);
8890 else if (GET_CODE (x) == LO_SUM)
8892 sparc_print_operand (file, XEXP (x, 0), 0);
8893 if (TARGET_CM_MEDMID)
8894 fputs ("+%l44(", file);
8895 else
8896 fputs ("+%lo(", file);
8897 output_addr_const (file, XEXP (x, 1));
8898 fputc (')', file);
8900 else if (GET_CODE (x) == CONST_DOUBLE
8901 && (GET_MODE (x) == VOIDmode
8902 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8904 if (CONST_DOUBLE_HIGH (x) == 0)
8905 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8906 else if (CONST_DOUBLE_HIGH (x) == -1
8907 && CONST_DOUBLE_LOW (x) < 0)
8908 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8909 else
8910 output_operand_lossage ("long long constant not a valid immediate operand");
8912 else if (GET_CODE (x) == CONST_DOUBLE)
8913 output_operand_lossage ("floating point constant not a valid immediate operand");
8914 else { output_addr_const (file, x); }
8917 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8919 static void
8920 sparc_print_operand_address (FILE *file, rtx x)
8922 register rtx base, index = 0;
8923 int offset = 0;
8924 register rtx addr = x;
8926 if (REG_P (addr))
8927 fputs (reg_names[REGNO (addr)], file);
8928 else if (GET_CODE (addr) == PLUS)
8930 if (CONST_INT_P (XEXP (addr, 0)))
8931 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8932 else if (CONST_INT_P (XEXP (addr, 1)))
8933 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8934 else
8935 base = XEXP (addr, 0), index = XEXP (addr, 1);
8936 if (GET_CODE (base) == LO_SUM)
8938 gcc_assert (USE_AS_OFFSETABLE_LO10
8939 && TARGET_ARCH64
8940 && ! TARGET_CM_MEDMID);
8941 output_operand (XEXP (base, 0), 0);
8942 fputs ("+%lo(", file);
8943 output_address (XEXP (base, 1));
8944 fprintf (file, ")+%d", offset);
8946 else
8948 fputs (reg_names[REGNO (base)], file);
8949 if (index == 0)
8950 fprintf (file, "%+d", offset);
8951 else if (REG_P (index))
8952 fprintf (file, "+%s", reg_names[REGNO (index)]);
8953 else if (GET_CODE (index) == SYMBOL_REF
8954 || GET_CODE (index) == LABEL_REF
8955 || GET_CODE (index) == CONST)
8956 fputc ('+', file), output_addr_const (file, index);
8957 else gcc_unreachable ();
8960 else if (GET_CODE (addr) == MINUS
8961 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8963 output_addr_const (file, XEXP (addr, 0));
8964 fputs ("-(", file);
8965 output_addr_const (file, XEXP (addr, 1));
8966 fputs ("-.)", file);
8968 else if (GET_CODE (addr) == LO_SUM)
8970 output_operand (XEXP (addr, 0), 0);
8971 if (TARGET_CM_MEDMID)
8972 fputs ("+%l44(", file);
8973 else
8974 fputs ("+%lo(", file);
8975 output_address (XEXP (addr, 1));
8976 fputc (')', file);
8978 else if (flag_pic
8979 && GET_CODE (addr) == CONST
8980 && GET_CODE (XEXP (addr, 0)) == MINUS
8981 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8982 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8983 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8985 addr = XEXP (addr, 0);
8986 output_addr_const (file, XEXP (addr, 0));
8987 /* Group the args of the second CONST in parenthesis. */
8988 fputs ("-(", file);
8989 /* Skip past the second CONST--it does nothing for us. */
8990 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8991 /* Close the parenthesis. */
8992 fputc (')', file);
8994 else
8996 output_addr_const (file, addr);
9000 /* Target hook for assembling integer objects. The sparc version has
9001 special handling for aligned DI-mode objects. */
9003 static bool
9004 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
9006 /* ??? We only output .xword's for symbols and only then in environments
9007 where the assembler can handle them. */
9008 if (aligned_p && size == 8
9009 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
9011 if (TARGET_V9)
9013 assemble_integer_with_op ("\t.xword\t", x);
9014 return true;
9016 else
9018 assemble_aligned_integer (4, const0_rtx);
9019 assemble_aligned_integer (4, x);
9020 return true;
9023 return default_assemble_integer (x, size, aligned_p);
9026 /* Return the value of a code used in the .proc pseudo-op that says
9027 what kind of result this function returns. For non-C types, we pick
9028 the closest C type. */
9030 #ifndef SHORT_TYPE_SIZE
9031 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
9032 #endif
9034 #ifndef INT_TYPE_SIZE
9035 #define INT_TYPE_SIZE BITS_PER_WORD
9036 #endif
9038 #ifndef LONG_TYPE_SIZE
9039 #define LONG_TYPE_SIZE BITS_PER_WORD
9040 #endif
9042 #ifndef LONG_LONG_TYPE_SIZE
9043 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
9044 #endif
9046 #ifndef FLOAT_TYPE_SIZE
9047 #define FLOAT_TYPE_SIZE BITS_PER_WORD
9048 #endif
9050 #ifndef DOUBLE_TYPE_SIZE
9051 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9052 #endif
9054 #ifndef LONG_DOUBLE_TYPE_SIZE
9055 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
9056 #endif
9058 unsigned long
9059 sparc_type_code (register tree type)
9061 register unsigned long qualifiers = 0;
9062 register unsigned shift;
9064 /* Only the first 30 bits of the qualifier are valid. We must refrain from
9065 setting more, since some assemblers will give an error for this. Also,
9066 we must be careful to avoid shifts of 32 bits or more to avoid getting
9067 unpredictable results. */
9069 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
9071 switch (TREE_CODE (type))
9073 case ERROR_MARK:
9074 return qualifiers;
9076 case ARRAY_TYPE:
9077 qualifiers |= (3 << shift);
9078 break;
9080 case FUNCTION_TYPE:
9081 case METHOD_TYPE:
9082 qualifiers |= (2 << shift);
9083 break;
9085 case POINTER_TYPE:
9086 case REFERENCE_TYPE:
9087 case OFFSET_TYPE:
9088 qualifiers |= (1 << shift);
9089 break;
9091 case RECORD_TYPE:
9092 return (qualifiers | 8);
9094 case UNION_TYPE:
9095 case QUAL_UNION_TYPE:
9096 return (qualifiers | 9);
9098 case ENUMERAL_TYPE:
9099 return (qualifiers | 10);
9101 case VOID_TYPE:
9102 return (qualifiers | 16);
9104 case INTEGER_TYPE:
9105 /* If this is a range type, consider it to be the underlying
9106 type. */
9107 if (TREE_TYPE (type) != 0)
9108 break;
9110 /* Carefully distinguish all the standard types of C,
9111 without messing up if the language is not C. We do this by
9112 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
9113 look at both the names and the above fields, but that's redundant.
9114 Any type whose size is between two C types will be considered
9115 to be the wider of the two types. Also, we do not have a
9116 special code to use for "long long", so anything wider than
9117 long is treated the same. Note that we can't distinguish
9118 between "int" and "long" in this code if they are the same
9119 size, but that's fine, since neither can the assembler. */
9121 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
9122 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
9124 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
9125 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
9127 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
9128 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
9130 else
9131 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
9133 case REAL_TYPE:
9134 /* If this is a range type, consider it to be the underlying
9135 type. */
9136 if (TREE_TYPE (type) != 0)
9137 break;
9139 /* Carefully distinguish all the standard types of C,
9140 without messing up if the language is not C. */
9142 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
9143 return (qualifiers | 6);
9145 else
9146 return (qualifiers | 7);
9148 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
9149 /* ??? We need to distinguish between double and float complex types,
9150 but I don't know how yet because I can't reach this code from
9151 existing front-ends. */
9152 return (qualifiers | 7); /* Who knows? */
9154 case VECTOR_TYPE:
9155 case BOOLEAN_TYPE: /* Boolean truth value type. */
9156 case LANG_TYPE:
9157 case NULLPTR_TYPE:
9158 return qualifiers;
9160 default:
9161 gcc_unreachable (); /* Not a type! */
9165 return qualifiers;
9168 /* Nested function support. */
9170 /* Emit RTL insns to initialize the variable parts of a trampoline.
9171 FNADDR is an RTX for the address of the function's pure code.
9172 CXT is an RTX for the static chain value for the function.
9174 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
9175 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
9176 (to store insns). This is a bit excessive. Perhaps a different
9177 mechanism would be better here.
9179 Emit enough FLUSH insns to synchronize the data and instruction caches. */
9181 static void
9182 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9184 /* SPARC 32-bit trampoline:
9186 sethi %hi(fn), %g1
9187 sethi %hi(static), %g2
9188 jmp %g1+%lo(fn)
9189 or %g2, %lo(static), %g2
9191 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
9192 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
9195 emit_move_insn
9196 (adjust_address (m_tramp, SImode, 0),
9197 expand_binop (SImode, ior_optab,
9198 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
9199 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
9200 NULL_RTX, 1, OPTAB_DIRECT));
9202 emit_move_insn
9203 (adjust_address (m_tramp, SImode, 4),
9204 expand_binop (SImode, ior_optab,
9205 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
9206 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
9207 NULL_RTX, 1, OPTAB_DIRECT));
9209 emit_move_insn
9210 (adjust_address (m_tramp, SImode, 8),
9211 expand_binop (SImode, ior_optab,
9212 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
9213 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
9214 NULL_RTX, 1, OPTAB_DIRECT));
9216 emit_move_insn
9217 (adjust_address (m_tramp, SImode, 12),
9218 expand_binop (SImode, ior_optab,
9219 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
9220 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
9221 NULL_RTX, 1, OPTAB_DIRECT));
9223 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
9224 aligned on a 16 byte boundary so one flush clears it all. */
9225 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
9226 if (sparc_cpu != PROCESSOR_ULTRASPARC
9227 && sparc_cpu != PROCESSOR_ULTRASPARC3
9228 && sparc_cpu != PROCESSOR_NIAGARA
9229 && sparc_cpu != PROCESSOR_NIAGARA2
9230 && sparc_cpu != PROCESSOR_NIAGARA3
9231 && sparc_cpu != PROCESSOR_NIAGARA4)
9232 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
9234 /* Call __enable_execute_stack after writing onto the stack to make sure
9235 the stack address is accessible. */
9236 #ifdef HAVE_ENABLE_EXECUTE_STACK
9237 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9238 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
9239 #endif
9243 /* The 64-bit version is simpler because it makes more sense to load the
9244 values as "immediate" data out of the trampoline. It's also easier since
9245 we can read the PC without clobbering a register. */
9247 static void
9248 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
9250 /* SPARC 64-bit trampoline:
9252 rd %pc, %g1
9253 ldx [%g1+24], %g5
9254 jmp %g5
9255 ldx [%g1+16], %g5
9256 +16 bytes data
9259 emit_move_insn (adjust_address (m_tramp, SImode, 0),
9260 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
9261 emit_move_insn (adjust_address (m_tramp, SImode, 4),
9262 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
9263 emit_move_insn (adjust_address (m_tramp, SImode, 8),
9264 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
9265 emit_move_insn (adjust_address (m_tramp, SImode, 12),
9266 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
9267 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
9268 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
9269 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
9271 if (sparc_cpu != PROCESSOR_ULTRASPARC
9272 && sparc_cpu != PROCESSOR_ULTRASPARC3
9273 && sparc_cpu != PROCESSOR_NIAGARA
9274 && sparc_cpu != PROCESSOR_NIAGARA2
9275 && sparc_cpu != PROCESSOR_NIAGARA3
9276 && sparc_cpu != PROCESSOR_NIAGARA4)
9277 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
9279 /* Call __enable_execute_stack after writing onto the stack to make sure
9280 the stack address is accessible. */
9281 #ifdef HAVE_ENABLE_EXECUTE_STACK
9282 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
9283 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
9284 #endif
9287 /* Worker for TARGET_TRAMPOLINE_INIT. */
9289 static void
9290 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9292 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
9293 cxt = force_reg (Pmode, cxt);
9294 if (TARGET_ARCH64)
9295 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
9296 else
9297 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
9300 /* Adjust the cost of a scheduling dependency. Return the new cost of
9301 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
9303 static int
9304 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
9306 enum attr_type insn_type;
9308 if (! recog_memoized (insn))
9309 return 0;
9311 insn_type = get_attr_type (insn);
9313 if (REG_NOTE_KIND (link) == 0)
9315 /* Data dependency; DEP_INSN writes a register that INSN reads some
9316 cycles later. */
9318 /* if a load, then the dependence must be on the memory address;
9319 add an extra "cycle". Note that the cost could be two cycles
9320 if the reg was written late in an instruction group; we ca not tell
9321 here. */
9322 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
9323 return cost + 3;
9325 /* Get the delay only if the address of the store is the dependence. */
9326 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
9328 rtx pat = PATTERN(insn);
9329 rtx dep_pat = PATTERN (dep_insn);
9331 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
9332 return cost; /* This should not happen! */
9334 /* The dependency between the two instructions was on the data that
9335 is being stored. Assume that this implies that the address of the
9336 store is not dependent. */
9337 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
9338 return cost;
9340 return cost + 3; /* An approximation. */
9343 /* A shift instruction cannot receive its data from an instruction
9344 in the same cycle; add a one cycle penalty. */
9345 if (insn_type == TYPE_SHIFT)
9346 return cost + 3; /* Split before cascade into shift. */
9348 else
9350 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
9351 INSN writes some cycles later. */
9353 /* These are only significant for the fpu unit; writing a fp reg before
9354 the fpu has finished with it stalls the processor. */
9356 /* Reusing an integer register causes no problems. */
9357 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
9358 return 0;
9361 return cost;
9364 static int
9365 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
9367 enum attr_type insn_type, dep_type;
9368 rtx pat = PATTERN(insn);
9369 rtx dep_pat = PATTERN (dep_insn);
9371 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
9372 return cost;
9374 insn_type = get_attr_type (insn);
9375 dep_type = get_attr_type (dep_insn);
9377 switch (REG_NOTE_KIND (link))
9379 case 0:
9380 /* Data dependency; DEP_INSN writes a register that INSN reads some
9381 cycles later. */
9383 switch (insn_type)
9385 case TYPE_STORE:
9386 case TYPE_FPSTORE:
9387 /* Get the delay iff the address of the store is the dependence. */
9388 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
9389 return cost;
9391 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
9392 return cost;
9393 return cost + 3;
9395 case TYPE_LOAD:
9396 case TYPE_SLOAD:
9397 case TYPE_FPLOAD:
9398 /* If a load, then the dependence must be on the memory address. If
9399 the addresses aren't equal, then it might be a false dependency */
9400 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
9402 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
9403 || GET_CODE (SET_DEST (dep_pat)) != MEM
9404 || GET_CODE (SET_SRC (pat)) != MEM
9405 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
9406 XEXP (SET_SRC (pat), 0)))
9407 return cost + 2;
9409 return cost + 8;
9411 break;
9413 case TYPE_BRANCH:
9414 /* Compare to branch latency is 0. There is no benefit from
9415 separating compare and branch. */
9416 if (dep_type == TYPE_COMPARE)
9417 return 0;
9418 /* Floating point compare to branch latency is less than
9419 compare to conditional move. */
9420 if (dep_type == TYPE_FPCMP)
9421 return cost - 1;
9422 break;
9423 default:
9424 break;
9426 break;
9428 case REG_DEP_ANTI:
9429 /* Anti-dependencies only penalize the fpu unit. */
9430 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
9431 return 0;
9432 break;
9434 default:
9435 break;
9438 return cost;
9441 static int
9442 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
9444 switch (sparc_cpu)
9446 case PROCESSOR_SUPERSPARC:
9447 cost = supersparc_adjust_cost (insn, link, dep, cost);
9448 break;
9449 case PROCESSOR_HYPERSPARC:
9450 case PROCESSOR_SPARCLITE86X:
9451 cost = hypersparc_adjust_cost (insn, link, dep, cost);
9452 break;
9453 default:
9454 break;
9456 return cost;
9459 static void
9460 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
9461 int sched_verbose ATTRIBUTE_UNUSED,
9462 int max_ready ATTRIBUTE_UNUSED)
9465 static int
9466 sparc_use_sched_lookahead (void)
9468 if (sparc_cpu == PROCESSOR_NIAGARA
9469 || sparc_cpu == PROCESSOR_NIAGARA2
9470 || sparc_cpu == PROCESSOR_NIAGARA3)
9471 return 0;
9472 if (sparc_cpu == PROCESSOR_NIAGARA4)
9473 return 2;
9474 if (sparc_cpu == PROCESSOR_ULTRASPARC
9475 || sparc_cpu == PROCESSOR_ULTRASPARC3)
9476 return 4;
9477 if ((1 << sparc_cpu) &
9478 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
9479 (1 << PROCESSOR_SPARCLITE86X)))
9480 return 3;
9481 return 0;
9484 static int
9485 sparc_issue_rate (void)
9487 switch (sparc_cpu)
9489 case PROCESSOR_NIAGARA:
9490 case PROCESSOR_NIAGARA2:
9491 case PROCESSOR_NIAGARA3:
9492 default:
9493 return 1;
9494 case PROCESSOR_NIAGARA4:
9495 case PROCESSOR_V9:
9496 /* Assume V9 processors are capable of at least dual-issue. */
9497 return 2;
9498 case PROCESSOR_SUPERSPARC:
9499 return 3;
9500 case PROCESSOR_HYPERSPARC:
9501 case PROCESSOR_SPARCLITE86X:
9502 return 2;
9503 case PROCESSOR_ULTRASPARC:
9504 case PROCESSOR_ULTRASPARC3:
9505 return 4;
9509 static int
9510 set_extends (rtx insn)
9512 register rtx pat = PATTERN (insn);
9514 switch (GET_CODE (SET_SRC (pat)))
9516 /* Load and some shift instructions zero extend. */
9517 case MEM:
9518 case ZERO_EXTEND:
9519 /* sethi clears the high bits */
9520 case HIGH:
9521 /* LO_SUM is used with sethi. sethi cleared the high
9522 bits and the values used with lo_sum are positive */
9523 case LO_SUM:
9524 /* Store flag stores 0 or 1 */
9525 case LT: case LTU:
9526 case GT: case GTU:
9527 case LE: case LEU:
9528 case GE: case GEU:
9529 case EQ:
9530 case NE:
9531 return 1;
9532 case AND:
9534 rtx op0 = XEXP (SET_SRC (pat), 0);
9535 rtx op1 = XEXP (SET_SRC (pat), 1);
9536 if (GET_CODE (op1) == CONST_INT)
9537 return INTVAL (op1) >= 0;
9538 if (GET_CODE (op0) != REG)
9539 return 0;
9540 if (sparc_check_64 (op0, insn) == 1)
9541 return 1;
9542 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9544 case IOR:
9545 case XOR:
9547 rtx op0 = XEXP (SET_SRC (pat), 0);
9548 rtx op1 = XEXP (SET_SRC (pat), 1);
9549 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
9550 return 0;
9551 if (GET_CODE (op1) == CONST_INT)
9552 return INTVAL (op1) >= 0;
9553 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
9555 case LSHIFTRT:
9556 return GET_MODE (SET_SRC (pat)) == SImode;
9557 /* Positive integers leave the high bits zero. */
9558 case CONST_DOUBLE:
9559 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
9560 case CONST_INT:
9561 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
9562 case ASHIFTRT:
9563 case SIGN_EXTEND:
9564 return - (GET_MODE (SET_SRC (pat)) == SImode);
9565 case REG:
9566 return sparc_check_64 (SET_SRC (pat), insn);
9567 default:
9568 return 0;
9572 /* We _ought_ to have only one kind per function, but... */
9573 static GTY(()) rtx sparc_addr_diff_list;
9574 static GTY(()) rtx sparc_addr_list;
9576 void
9577 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
9579 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9580 if (diff)
9581 sparc_addr_diff_list
9582 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
9583 else
9584 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
9587 static void
9588 sparc_output_addr_vec (rtx vec)
9590 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9591 int idx, vlen = XVECLEN (body, 0);
9593 #ifdef ASM_OUTPUT_ADDR_VEC_START
9594 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9595 #endif
9597 #ifdef ASM_OUTPUT_CASE_LABEL
9598 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9599 NEXT_INSN (lab));
9600 #else
9601 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9602 #endif
9604 for (idx = 0; idx < vlen; idx++)
9606 ASM_OUTPUT_ADDR_VEC_ELT
9607 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9610 #ifdef ASM_OUTPUT_ADDR_VEC_END
9611 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9612 #endif
9615 static void
9616 sparc_output_addr_diff_vec (rtx vec)
9618 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9619 rtx base = XEXP (XEXP (body, 0), 0);
9620 int idx, vlen = XVECLEN (body, 1);
9622 #ifdef ASM_OUTPUT_ADDR_VEC_START
9623 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9624 #endif
9626 #ifdef ASM_OUTPUT_CASE_LABEL
9627 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9628 NEXT_INSN (lab));
9629 #else
9630 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9631 #endif
9633 for (idx = 0; idx < vlen; idx++)
9635 ASM_OUTPUT_ADDR_DIFF_ELT
9636 (asm_out_file,
9637 body,
9638 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
9639 CODE_LABEL_NUMBER (base));
9642 #ifdef ASM_OUTPUT_ADDR_VEC_END
9643 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9644 #endif
9647 static void
9648 sparc_output_deferred_case_vectors (void)
9650 rtx t;
9651 int align;
9653 if (sparc_addr_list == NULL_RTX
9654 && sparc_addr_diff_list == NULL_RTX)
9655 return;
9657 /* Align to cache line in the function's code section. */
9658 switch_to_section (current_function_section ());
9660 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9661 if (align > 0)
9662 ASM_OUTPUT_ALIGN (asm_out_file, align);
9664 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
9665 sparc_output_addr_vec (XEXP (t, 0));
9666 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
9667 sparc_output_addr_diff_vec (XEXP (t, 0));
9669 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
9672 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
9673 unknown. Return 1 if the high bits are zero, -1 if the register is
9674 sign extended. */
9676 sparc_check_64 (rtx x, rtx insn)
9678 /* If a register is set only once it is safe to ignore insns this
9679 code does not know how to handle. The loop will either recognize
9680 the single set and return the correct value or fail to recognize
9681 it and return 0. */
9682 int set_once = 0;
9683 rtx y = x;
9685 gcc_assert (GET_CODE (x) == REG);
9687 if (GET_MODE (x) == DImode)
9688 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
9690 if (flag_expensive_optimizations
9691 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
9692 set_once = 1;
9694 if (insn == 0)
9696 if (set_once)
9697 insn = get_last_insn_anywhere ();
9698 else
9699 return 0;
9702 while ((insn = PREV_INSN (insn)))
9704 switch (GET_CODE (insn))
9706 case JUMP_INSN:
9707 case NOTE:
9708 break;
9709 case CODE_LABEL:
9710 case CALL_INSN:
9711 default:
9712 if (! set_once)
9713 return 0;
9714 break;
9715 case INSN:
9717 rtx pat = PATTERN (insn);
9718 if (GET_CODE (pat) != SET)
9719 return 0;
9720 if (rtx_equal_p (x, SET_DEST (pat)))
9721 return set_extends (insn);
9722 if (y && rtx_equal_p (y, SET_DEST (pat)))
9723 return set_extends (insn);
9724 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
9725 return 0;
9729 return 0;
9732 /* Output a wide shift instruction in V8+ mode. INSN is the instruction,
9733 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
9735 const char *
9736 output_v8plus_shift (rtx insn, rtx *operands, const char *opcode)
9738 static char asm_code[60];
9740 /* The scratch register is only required when the destination
9741 register is not a 64-bit global or out register. */
9742 if (which_alternative != 2)
9743 operands[3] = operands[0];
9745 /* We can only shift by constants <= 63. */
9746 if (GET_CODE (operands[2]) == CONST_INT)
9747 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
9749 if (GET_CODE (operands[1]) == CONST_INT)
9751 output_asm_insn ("mov\t%1, %3", operands);
9753 else
9755 output_asm_insn ("sllx\t%H1, 32, %3", operands);
9756 if (sparc_check_64 (operands[1], insn) <= 0)
9757 output_asm_insn ("srl\t%L1, 0, %L1", operands);
9758 output_asm_insn ("or\t%L1, %3, %3", operands);
9761 strcpy (asm_code, opcode);
9763 if (which_alternative != 2)
9764 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
9765 else
9766 return
9767 strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
9770 /* Output rtl to increment the profiler label LABELNO
9771 for profiling a function entry. */
9773 void
9774 sparc_profile_hook (int labelno)
9776 char buf[32];
9777 rtx lab, fun;
9779 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
9780 if (NO_PROFILE_COUNTERS)
9782 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
9784 else
9786 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9787 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
9788 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
9792 #ifdef TARGET_SOLARIS
9793 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9795 static void
9796 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
9797 tree decl ATTRIBUTE_UNUSED)
9799 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
9801 solaris_elf_asm_comdat_section (name, flags, decl);
9802 return;
9805 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
9807 if (!(flags & SECTION_DEBUG))
9808 fputs (",#alloc", asm_out_file);
9809 if (flags & SECTION_WRITE)
9810 fputs (",#write", asm_out_file);
9811 if (flags & SECTION_TLS)
9812 fputs (",#tls", asm_out_file);
9813 if (flags & SECTION_CODE)
9814 fputs (",#execinstr", asm_out_file);
9816 /* Sun as only supports #nobits/#progbits since Solaris 10. */
9817 if (HAVE_AS_SPARC_NOBITS)
9819 if (flags & SECTION_BSS)
9820 fputs (",#nobits", asm_out_file);
9821 else
9822 fputs (",#progbits", asm_out_file);
9825 fputc ('\n', asm_out_file);
9827 #endif /* TARGET_SOLARIS */
9829 /* We do not allow indirect calls to be optimized into sibling calls.
9831 We cannot use sibling calls when delayed branches are disabled
9832 because they will likely require the call delay slot to be filled.
9834 Also, on SPARC 32-bit we cannot emit a sibling call when the
9835 current function returns a structure. This is because the "unimp
9836 after call" convention would cause the callee to return to the
9837 wrong place. The generic code already disallows cases where the
9838 function being called returns a structure.
9840 It may seem strange how this last case could occur. Usually there
9841 is code after the call which jumps to epilogue code which dumps the
9842 return value into the struct return area. That ought to invalidate
9843 the sibling call right? Well, in the C++ case we can end up passing
9844 the pointer to the struct return area to a constructor (which returns
9845 void) and then nothing else happens. Such a sibling call would look
9846 valid without the added check here.
9848 VxWorks PIC PLT entries require the global pointer to be initialized
9849 on entry. We therefore can't emit sibling calls to them. */
9850 static bool
9851 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9853 return (decl
9854 && flag_delayed_branch
9855 && (TARGET_ARCH64 || ! cfun->returns_struct)
9856 && !(TARGET_VXWORKS_RTP
9857 && flag_pic
9858 && !targetm.binds_local_p (decl)));
9861 /* libfunc renaming. */
9863 static void
9864 sparc_init_libfuncs (void)
9866 if (TARGET_ARCH32)
9868 /* Use the subroutines that Sun's library provides for integer
9869 multiply and divide. The `*' prevents an underscore from
9870 being prepended by the compiler. .umul is a little faster
9871 than .mul. */
9872 set_optab_libfunc (smul_optab, SImode, "*.umul");
9873 set_optab_libfunc (sdiv_optab, SImode, "*.div");
9874 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
9875 set_optab_libfunc (smod_optab, SImode, "*.rem");
9876 set_optab_libfunc (umod_optab, SImode, "*.urem");
9878 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9879 set_optab_libfunc (add_optab, TFmode, "_Q_add");
9880 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
9881 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
9882 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
9883 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
9885 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9886 is because with soft-float, the SFmode and DFmode sqrt
9887 instructions will be absent, and the compiler will notice and
9888 try to use the TFmode sqrt instruction for calls to the
9889 builtin function sqrt, but this fails. */
9890 if (TARGET_FPU)
9891 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
9893 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
9894 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
9895 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
9896 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
9897 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
9898 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
9900 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
9901 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9902 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9903 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9905 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9906 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9907 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9908 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9910 if (DITF_CONVERSION_LIBFUNCS)
9912 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9913 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9914 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9915 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9918 if (SUN_CONVERSION_LIBFUNCS)
9920 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9921 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9922 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9923 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9926 if (TARGET_ARCH64)
9928 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9929 do not exist in the library. Make sure the compiler does not
9930 emit calls to them by accident. (It should always use the
9931 hardware instructions.) */
9932 set_optab_libfunc (smul_optab, SImode, 0);
9933 set_optab_libfunc (sdiv_optab, SImode, 0);
9934 set_optab_libfunc (udiv_optab, SImode, 0);
9935 set_optab_libfunc (smod_optab, SImode, 0);
9936 set_optab_libfunc (umod_optab, SImode, 0);
9938 if (SUN_INTEGER_MULTIPLY_64)
9940 set_optab_libfunc (smul_optab, DImode, "__mul64");
9941 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9942 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9943 set_optab_libfunc (smod_optab, DImode, "__rem64");
9944 set_optab_libfunc (umod_optab, DImode, "__urem64");
9947 if (SUN_CONVERSION_LIBFUNCS)
9949 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9950 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9951 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9952 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9957 static tree def_builtin(const char *name, int code, tree type)
9959 return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
9960 NULL_TREE);
9963 static tree def_builtin_const(const char *name, int code, tree type)
9965 tree t = def_builtin(name, code, type);
9967 if (t)
9968 TREE_READONLY (t) = 1;
9970 return t;
9973 /* Implement the TARGET_INIT_BUILTINS target hook.
9974 Create builtin functions for special SPARC instructions. */
9976 static void
9977 sparc_init_builtins (void)
9979 if (TARGET_VIS)
9980 sparc_vis_init_builtins ();
9983 /* Create builtin functions for VIS 1.0 instructions. */
9985 static void
9986 sparc_vis_init_builtins (void)
9988 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9989 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9990 tree v4hi = build_vector_type (intHI_type_node, 4);
9991 tree v2hi = build_vector_type (intHI_type_node, 2);
9992 tree v2si = build_vector_type (intSI_type_node, 2);
9993 tree v1si = build_vector_type (intSI_type_node, 1);
9995 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9996 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9997 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9998 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9999 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
10000 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
10001 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
10002 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
10003 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
10004 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
10005 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
10006 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
10007 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
10008 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
10009 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
10010 v8qi, v8qi,
10011 intDI_type_node, 0);
10012 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
10013 v8qi, v8qi, 0);
10014 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
10015 v8qi, v8qi, 0);
10016 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
10017 intDI_type_node,
10018 intDI_type_node, 0);
10019 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
10020 intSI_type_node,
10021 intSI_type_node, 0);
10022 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
10023 ptr_type_node,
10024 intSI_type_node, 0);
10025 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
10026 ptr_type_node,
10027 intDI_type_node, 0);
10028 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
10029 ptr_type_node,
10030 ptr_type_node, 0);
10031 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
10032 ptr_type_node,
10033 ptr_type_node, 0);
10034 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
10035 v4hi, v4hi, 0);
10036 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
10037 v2si, v2si, 0);
10038 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
10039 v4hi, v4hi, 0);
10040 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
10041 v2si, v2si, 0);
10042 tree void_ftype_di = build_function_type_list (void_type_node,
10043 intDI_type_node, 0);
10044 tree di_ftype_void = build_function_type_list (intDI_type_node,
10045 void_type_node, 0);
10046 tree void_ftype_si = build_function_type_list (void_type_node,
10047 intSI_type_node, 0);
10048 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
10049 float_type_node,
10050 float_type_node, 0);
10051 tree df_ftype_df_df = build_function_type_list (double_type_node,
10052 double_type_node,
10053 double_type_node, 0);
10055 /* Packing and expanding vectors. */
10056 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
10057 v4qi_ftype_v4hi);
10058 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
10059 v8qi_ftype_v2si_v8qi);
10060 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
10061 v2hi_ftype_v2si);
10062 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
10063 v4hi_ftype_v4qi);
10064 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
10065 v8qi_ftype_v4qi_v4qi);
10067 /* Multiplications. */
10068 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
10069 v4hi_ftype_v4qi_v4hi);
10070 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
10071 v4hi_ftype_v4qi_v2hi);
10072 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
10073 v4hi_ftype_v4qi_v2hi);
10074 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
10075 v4hi_ftype_v8qi_v4hi);
10076 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
10077 v4hi_ftype_v8qi_v4hi);
10078 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
10079 v2si_ftype_v4qi_v2hi);
10080 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
10081 v2si_ftype_v4qi_v2hi);
10083 /* Data aligning. */
10084 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
10085 v4hi_ftype_v4hi_v4hi);
10086 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
10087 v8qi_ftype_v8qi_v8qi);
10088 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
10089 v2si_ftype_v2si_v2si);
10090 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
10091 di_ftype_di_di);
10093 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
10094 void_ftype_di);
10095 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
10096 di_ftype_void);
10098 if (TARGET_ARCH64)
10100 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
10101 ptr_ftype_ptr_di);
10102 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
10103 ptr_ftype_ptr_di);
10105 else
10107 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
10108 ptr_ftype_ptr_si);
10109 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
10110 ptr_ftype_ptr_si);
10113 /* Pixel distance. */
10114 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
10115 di_ftype_v8qi_v8qi_di);
10117 /* Edge handling. */
10118 if (TARGET_ARCH64)
10120 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
10121 di_ftype_ptr_ptr);
10122 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
10123 di_ftype_ptr_ptr);
10124 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
10125 di_ftype_ptr_ptr);
10126 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
10127 di_ftype_ptr_ptr);
10128 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
10129 di_ftype_ptr_ptr);
10130 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
10131 di_ftype_ptr_ptr);
10132 if (TARGET_VIS2)
10134 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
10135 di_ftype_ptr_ptr);
10136 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
10137 di_ftype_ptr_ptr);
10138 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
10139 di_ftype_ptr_ptr);
10140 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
10141 di_ftype_ptr_ptr);
10142 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
10143 di_ftype_ptr_ptr);
10144 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
10145 di_ftype_ptr_ptr);
10148 else
10150 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
10151 si_ftype_ptr_ptr);
10152 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
10153 si_ftype_ptr_ptr);
10154 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
10155 si_ftype_ptr_ptr);
10156 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
10157 si_ftype_ptr_ptr);
10158 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
10159 si_ftype_ptr_ptr);
10160 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
10161 si_ftype_ptr_ptr);
10162 if (TARGET_VIS2)
10164 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
10165 si_ftype_ptr_ptr);
10166 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
10167 si_ftype_ptr_ptr);
10168 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
10169 si_ftype_ptr_ptr);
10170 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
10171 si_ftype_ptr_ptr);
10172 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
10173 si_ftype_ptr_ptr);
10174 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
10175 si_ftype_ptr_ptr);
10179 /* Pixel compare. */
10180 if (TARGET_ARCH64)
10182 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
10183 di_ftype_v4hi_v4hi);
10184 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
10185 di_ftype_v2si_v2si);
10186 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
10187 di_ftype_v4hi_v4hi);
10188 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
10189 di_ftype_v2si_v2si);
10190 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
10191 di_ftype_v4hi_v4hi);
10192 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
10193 di_ftype_v2si_v2si);
10194 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
10195 di_ftype_v4hi_v4hi);
10196 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
10197 di_ftype_v2si_v2si);
10199 else
10201 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
10202 si_ftype_v4hi_v4hi);
10203 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
10204 si_ftype_v2si_v2si);
10205 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
10206 si_ftype_v4hi_v4hi);
10207 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
10208 si_ftype_v2si_v2si);
10209 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
10210 si_ftype_v4hi_v4hi);
10211 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
10212 si_ftype_v2si_v2si);
10213 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
10214 si_ftype_v4hi_v4hi);
10215 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
10216 si_ftype_v2si_v2si);
10219 /* Addition and subtraction. */
10220 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
10221 v4hi_ftype_v4hi_v4hi);
10222 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
10223 v2hi_ftype_v2hi_v2hi);
10224 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
10225 v2si_ftype_v2si_v2si);
10226 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
10227 v1si_ftype_v1si_v1si);
10228 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
10229 v4hi_ftype_v4hi_v4hi);
10230 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
10231 v2hi_ftype_v2hi_v2hi);
10232 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
10233 v2si_ftype_v2si_v2si);
10234 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
10235 v1si_ftype_v1si_v1si);
10237 /* Three-dimensional array addressing. */
10238 if (TARGET_ARCH64)
10240 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
10241 di_ftype_di_di);
10242 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
10243 di_ftype_di_di);
10244 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
10245 di_ftype_di_di);
10247 else
10249 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
10250 si_ftype_si_si);
10251 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
10252 si_ftype_si_si);
10253 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
10254 si_ftype_si_si);
10257 if (TARGET_VIS2)
10259 /* Byte mask and shuffle */
10260 if (TARGET_ARCH64)
10261 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
10262 di_ftype_di_di);
10263 else
10264 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
10265 si_ftype_si_si);
10266 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
10267 v4hi_ftype_v4hi_v4hi);
10268 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
10269 v8qi_ftype_v8qi_v8qi);
10270 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
10271 v2si_ftype_v2si_v2si);
10272 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
10273 di_ftype_di_di);
10276 if (TARGET_VIS3)
10278 if (TARGET_ARCH64)
10280 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
10281 void_ftype_di);
10282 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
10283 void_ftype_di);
10284 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
10285 void_ftype_di);
10287 else
10289 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
10290 void_ftype_si);
10291 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
10292 void_ftype_si);
10293 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
10294 void_ftype_si);
10297 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
10298 v4hi_ftype_v4hi_v4hi);
10300 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
10301 v4hi_ftype_v4hi_v4hi);
10302 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
10303 v4hi_ftype_v4hi_v4hi);
10304 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
10305 v4hi_ftype_v4hi_v4hi);
10306 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
10307 v4hi_ftype_v4hi_v4hi);
10308 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
10309 v2si_ftype_v2si_v2si);
10310 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
10311 v2si_ftype_v2si_v2si);
10312 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
10313 v2si_ftype_v2si_v2si);
10314 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
10315 v2si_ftype_v2si_v2si);
10317 if (TARGET_ARCH64)
10318 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
10319 di_ftype_v8qi_v8qi);
10320 else
10321 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
10322 si_ftype_v8qi_v8qi);
10324 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
10325 v4hi_ftype_v4hi_v4hi);
10326 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
10327 di_ftype_di_di);
10328 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
10329 di_ftype_di_di);
10331 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
10332 v4hi_ftype_v4hi_v4hi);
10333 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
10334 v2hi_ftype_v2hi_v2hi);
10335 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
10336 v4hi_ftype_v4hi_v4hi);
10337 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
10338 v2hi_ftype_v2hi_v2hi);
10339 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
10340 v2si_ftype_v2si_v2si);
10341 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
10342 v1si_ftype_v1si_v1si);
10343 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
10344 v2si_ftype_v2si_v2si);
10345 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
10346 v1si_ftype_v1si_v1si);
10348 if (TARGET_ARCH64)
10350 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
10351 di_ftype_v8qi_v8qi);
10352 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
10353 di_ftype_v8qi_v8qi);
10354 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
10355 di_ftype_v8qi_v8qi);
10356 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
10357 di_ftype_v8qi_v8qi);
10359 else
10361 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
10362 si_ftype_v8qi_v8qi);
10363 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
10364 si_ftype_v8qi_v8qi);
10365 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
10366 si_ftype_v8qi_v8qi);
10367 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
10368 si_ftype_v8qi_v8qi);
10371 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
10372 sf_ftype_sf_sf);
10373 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
10374 df_ftype_df_df);
10375 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
10376 sf_ftype_sf_sf);
10377 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
10378 df_ftype_df_df);
10379 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
10380 sf_ftype_sf_sf);
10381 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
10382 df_ftype_df_df);
10384 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
10385 di_ftype_di_di);
10386 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
10387 di_ftype_di_di);
10388 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
10389 di_ftype_di_di);
10393 /* Handle TARGET_EXPAND_BUILTIN target hook.
10394 Expand builtin functions for sparc intrinsics. */
10396 static rtx
10397 sparc_expand_builtin (tree exp, rtx target,
10398 rtx subtarget ATTRIBUTE_UNUSED,
10399 enum machine_mode tmode ATTRIBUTE_UNUSED,
10400 int ignore ATTRIBUTE_UNUSED)
10402 tree arg;
10403 call_expr_arg_iterator iter;
10404 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10405 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
10406 rtx pat, op[4];
10407 int arg_count = 0;
10408 bool nonvoid;
10410 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
10412 if (nonvoid)
10414 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10415 if (!target
10416 || GET_MODE (target) != tmode
10417 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10418 op[0] = gen_reg_rtx (tmode);
10419 else
10420 op[0] = target;
10422 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
10424 const struct insn_operand_data *insn_op;
10425 int idx;
10427 if (arg == error_mark_node)
10428 return NULL_RTX;
10430 arg_count++;
10431 idx = arg_count - !nonvoid;
10432 insn_op = &insn_data[icode].operand[idx];
10433 op[arg_count] = expand_normal (arg);
10435 if (insn_op->mode == V1DImode
10436 && GET_MODE (op[arg_count]) == DImode)
10437 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
10438 else if (insn_op->mode == V1SImode
10439 && GET_MODE (op[arg_count]) == SImode)
10440 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
10442 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
10443 insn_op->mode))
10444 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
10447 switch (arg_count)
10449 case 0:
10450 pat = GEN_FCN (icode) (op[0]);
10451 break;
10452 case 1:
10453 if (nonvoid)
10454 pat = GEN_FCN (icode) (op[0], op[1]);
10455 else
10456 pat = GEN_FCN (icode) (op[1]);
10457 break;
10458 case 2:
10459 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
10460 break;
10461 case 3:
10462 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
10463 break;
10464 default:
10465 gcc_unreachable ();
10468 if (!pat)
10469 return NULL_RTX;
10471 emit_insn (pat);
10473 if (nonvoid)
10474 return op[0];
10475 else
10476 return const0_rtx;
10479 static int
10480 sparc_vis_mul8x16 (int e8, int e16)
10482 return (e8 * e16 + 128) / 256;
10485 /* Multiply the VECTOR_CSTs CST0 and CST1 as specified by FNCODE and put
10486 the result into the array N_ELTS, whose elements are of INNER_TYPE. */
10488 static void
10489 sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type,
10490 tree cst0, tree cst1)
10492 unsigned i, num = VECTOR_CST_NELTS (cst0);
10493 int scale;
10495 switch (fncode)
10497 case CODE_FOR_fmul8x16_vis:
10498 for (i = 0; i < num; ++i)
10500 int val
10501 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
10502 TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i)));
10503 n_elts[i] = build_int_cst (inner_type, val);
10505 break;
10507 case CODE_FOR_fmul8x16au_vis:
10508 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0));
10510 for (i = 0; i < num; ++i)
10512 int val
10513 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
10514 scale);
10515 n_elts[i] = build_int_cst (inner_type, val);
10517 break;
10519 case CODE_FOR_fmul8x16al_vis:
10520 scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1));
10522 for (i = 0; i < num; ++i)
10524 int val
10525 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)),
10526 scale);
10527 n_elts[i] = build_int_cst (inner_type, val);
10529 break;
10531 default:
10532 gcc_unreachable ();
10536 /* Handle TARGET_FOLD_BUILTIN target hook.
10537 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
10538 result of the function call is ignored. NULL_TREE is returned if the
10539 function could not be folded. */
10541 static tree
10542 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
10543 tree *args, bool ignore)
10545 tree arg0, arg1, arg2;
10546 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
10547 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
10549 if (ignore)
10551 /* Note that a switch statement instead of the sequence of tests would
10552 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
10553 and that would yield multiple alternatives with identical values. */
10554 if (icode == CODE_FOR_alignaddrsi_vis
10555 || icode == CODE_FOR_alignaddrdi_vis
10556 || icode == CODE_FOR_wrgsr_vis
10557 || icode == CODE_FOR_bmasksi_vis
10558 || icode == CODE_FOR_bmaskdi_vis
10559 || icode == CODE_FOR_cmask8si_vis
10560 || icode == CODE_FOR_cmask8di_vis
10561 || icode == CODE_FOR_cmask16si_vis
10562 || icode == CODE_FOR_cmask16di_vis
10563 || icode == CODE_FOR_cmask32si_vis
10564 || icode == CODE_FOR_cmask32di_vis)
10566 else
10567 return build_zero_cst (rtype);
10570 switch (icode)
10572 case CODE_FOR_fexpand_vis:
10573 arg0 = args[0];
10574 STRIP_NOPS (arg0);
10576 if (TREE_CODE (arg0) == VECTOR_CST)
10578 tree inner_type = TREE_TYPE (rtype);
10579 tree *n_elts;
10580 unsigned i;
10582 n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
10583 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10584 n_elts[i] = build_int_cst (inner_type,
10585 TREE_INT_CST_LOW
10586 (VECTOR_CST_ELT (arg0, i)) << 4);
10587 return build_vector (rtype, n_elts);
10589 break;
10591 case CODE_FOR_fmul8x16_vis:
10592 case CODE_FOR_fmul8x16au_vis:
10593 case CODE_FOR_fmul8x16al_vis:
10594 arg0 = args[0];
10595 arg1 = args[1];
10596 STRIP_NOPS (arg0);
10597 STRIP_NOPS (arg1);
10599 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10601 tree inner_type = TREE_TYPE (rtype);
10602 tree *n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0));
10603 sparc_handle_vis_mul8x16 (n_elts, icode, inner_type, arg0, arg1);
10604 return build_vector (rtype, n_elts);
10606 break;
10608 case CODE_FOR_fpmerge_vis:
10609 arg0 = args[0];
10610 arg1 = args[1];
10611 STRIP_NOPS (arg0);
10612 STRIP_NOPS (arg1);
10614 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10616 tree *n_elts = XALLOCAVEC (tree, 2 * VECTOR_CST_NELTS (arg0));
10617 unsigned i;
10618 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10620 n_elts[2*i] = VECTOR_CST_ELT (arg0, i);
10621 n_elts[2*i+1] = VECTOR_CST_ELT (arg1, i);
10624 return build_vector (rtype, n_elts);
10626 break;
10628 case CODE_FOR_pdist_vis:
10629 arg0 = args[0];
10630 arg1 = args[1];
10631 arg2 = args[2];
10632 STRIP_NOPS (arg0);
10633 STRIP_NOPS (arg1);
10634 STRIP_NOPS (arg2);
10636 if (TREE_CODE (arg0) == VECTOR_CST
10637 && TREE_CODE (arg1) == VECTOR_CST
10638 && TREE_CODE (arg2) == INTEGER_CST)
10640 bool overflow = false;
10641 double_int result = TREE_INT_CST (arg2);
10642 double_int tmp;
10643 unsigned i;
10645 for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
10647 double_int e0 = TREE_INT_CST (VECTOR_CST_ELT (arg0, i));
10648 double_int e1 = TREE_INT_CST (VECTOR_CST_ELT (arg1, i));
10650 bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
10652 tmp = e1.neg_with_overflow (&neg1_ovf);
10653 tmp = e0.add_with_sign (tmp, false, &add1_ovf);
10654 if (tmp.is_negative ())
10655 tmp = tmp.neg_with_overflow (&neg2_ovf);
10657 result = result.add_with_sign (tmp, false, &add2_ovf);
10658 overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf;
10661 gcc_assert (!overflow);
10663 return build_int_cst_wide (rtype, result.low, result.high);
10666 default:
10667 break;
10670 return NULL_TREE;
10673 /* ??? This duplicates information provided to the compiler by the
10674 ??? scheduler description. Some day, teach genautomata to output
10675 ??? the latencies and then CSE will just use that. */
10677 static bool
10678 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
10679 int *total, bool speed ATTRIBUTE_UNUSED)
10681 enum machine_mode mode = GET_MODE (x);
10682 bool float_mode_p = FLOAT_MODE_P (mode);
10684 switch (code)
10686 case CONST_INT:
10687 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
10689 *total = 0;
10690 return true;
10692 /* FALLTHRU */
10694 case HIGH:
10695 *total = 2;
10696 return true;
10698 case CONST:
10699 case LABEL_REF:
10700 case SYMBOL_REF:
10701 *total = 4;
10702 return true;
10704 case CONST_DOUBLE:
10705 if (GET_MODE (x) == VOIDmode
10706 && ((CONST_DOUBLE_HIGH (x) == 0
10707 && CONST_DOUBLE_LOW (x) < 0x1000)
10708 || (CONST_DOUBLE_HIGH (x) == -1
10709 && CONST_DOUBLE_LOW (x) < 0
10710 && CONST_DOUBLE_LOW (x) >= -0x1000)))
10711 *total = 0;
10712 else
10713 *total = 8;
10714 return true;
10716 case MEM:
10717 /* If outer-code was a sign or zero extension, a cost
10718 of COSTS_N_INSNS (1) was already added in. This is
10719 why we are subtracting it back out. */
10720 if (outer_code == ZERO_EXTEND)
10722 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
10724 else if (outer_code == SIGN_EXTEND)
10726 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
10728 else if (float_mode_p)
10730 *total = sparc_costs->float_load;
10732 else
10734 *total = sparc_costs->int_load;
10737 return true;
10739 case PLUS:
10740 case MINUS:
10741 if (float_mode_p)
10742 *total = sparc_costs->float_plusminus;
10743 else
10744 *total = COSTS_N_INSNS (1);
10745 return false;
10747 case FMA:
10749 rtx sub;
10751 gcc_assert (float_mode_p);
10752 *total = sparc_costs->float_mul;
10754 sub = XEXP (x, 0);
10755 if (GET_CODE (sub) == NEG)
10756 sub = XEXP (sub, 0);
10757 *total += rtx_cost (sub, FMA, 0, speed);
10759 sub = XEXP (x, 2);
10760 if (GET_CODE (sub) == NEG)
10761 sub = XEXP (sub, 0);
10762 *total += rtx_cost (sub, FMA, 2, speed);
10763 return true;
10766 case MULT:
10767 if (float_mode_p)
10768 *total = sparc_costs->float_mul;
10769 else if (! TARGET_HARD_MUL)
10770 *total = COSTS_N_INSNS (25);
10771 else
10773 int bit_cost;
10775 bit_cost = 0;
10776 if (sparc_costs->int_mul_bit_factor)
10778 int nbits;
10780 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
10782 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
10783 for (nbits = 0; value != 0; value &= value - 1)
10784 nbits++;
10786 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
10787 && GET_MODE (XEXP (x, 1)) == VOIDmode)
10789 rtx x1 = XEXP (x, 1);
10790 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
10791 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
10793 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
10794 nbits++;
10795 for (; value2 != 0; value2 &= value2 - 1)
10796 nbits++;
10798 else
10799 nbits = 7;
10801 if (nbits < 3)
10802 nbits = 3;
10803 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
10804 bit_cost = COSTS_N_INSNS (bit_cost);
10807 if (mode == DImode)
10808 *total = sparc_costs->int_mulX + bit_cost;
10809 else
10810 *total = sparc_costs->int_mul + bit_cost;
10812 return false;
10814 case ASHIFT:
10815 case ASHIFTRT:
10816 case LSHIFTRT:
10817 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
10818 return false;
10820 case DIV:
10821 case UDIV:
10822 case MOD:
10823 case UMOD:
10824 if (float_mode_p)
10826 if (mode == DFmode)
10827 *total = sparc_costs->float_div_df;
10828 else
10829 *total = sparc_costs->float_div_sf;
10831 else
10833 if (mode == DImode)
10834 *total = sparc_costs->int_divX;
10835 else
10836 *total = sparc_costs->int_div;
10838 return false;
10840 case NEG:
10841 if (! float_mode_p)
10843 *total = COSTS_N_INSNS (1);
10844 return false;
10846 /* FALLTHRU */
10848 case ABS:
10849 case FLOAT:
10850 case UNSIGNED_FLOAT:
10851 case FIX:
10852 case UNSIGNED_FIX:
10853 case FLOAT_EXTEND:
10854 case FLOAT_TRUNCATE:
10855 *total = sparc_costs->float_move;
10856 return false;
10858 case SQRT:
10859 if (mode == DFmode)
10860 *total = sparc_costs->float_sqrt_df;
10861 else
10862 *total = sparc_costs->float_sqrt_sf;
10863 return false;
10865 case COMPARE:
10866 if (float_mode_p)
10867 *total = sparc_costs->float_cmp;
10868 else
10869 *total = COSTS_N_INSNS (1);
10870 return false;
10872 case IF_THEN_ELSE:
10873 if (float_mode_p)
10874 *total = sparc_costs->float_cmove;
10875 else
10876 *total = sparc_costs->int_cmove;
10877 return false;
10879 case IOR:
10880 /* Handle the NAND vector patterns. */
10881 if (sparc_vector_mode_supported_p (GET_MODE (x))
10882 && GET_CODE (XEXP (x, 0)) == NOT
10883 && GET_CODE (XEXP (x, 1)) == NOT)
10885 *total = COSTS_N_INSNS (1);
10886 return true;
10888 else
10889 return false;
10891 default:
10892 return false;
10896 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10898 static inline bool
10899 general_or_i64_p (reg_class_t rclass)
10901 return (rclass == GENERAL_REGS || rclass == I64_REGS);
10904 /* Implement TARGET_REGISTER_MOVE_COST. */
10906 static int
10907 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10908 reg_class_t from, reg_class_t to)
10910 bool need_memory = false;
10912 if (from == FPCC_REGS || to == FPCC_REGS)
10913 need_memory = true;
10914 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
10915 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
10917 if (TARGET_VIS3)
10919 int size = GET_MODE_SIZE (mode);
10920 if (size == 8 || size == 4)
10922 if (! TARGET_ARCH32 || size == 4)
10923 return 4;
10924 else
10925 return 6;
10928 need_memory = true;
10931 if (need_memory)
10933 if (sparc_cpu == PROCESSOR_ULTRASPARC
10934 || sparc_cpu == PROCESSOR_ULTRASPARC3
10935 || sparc_cpu == PROCESSOR_NIAGARA
10936 || sparc_cpu == PROCESSOR_NIAGARA2
10937 || sparc_cpu == PROCESSOR_NIAGARA3
10938 || sparc_cpu == PROCESSOR_NIAGARA4)
10939 return 12;
10941 return 6;
10944 return 2;
10947 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10948 This is achieved by means of a manual dynamic stack space allocation in
10949 the current frame. We make the assumption that SEQ doesn't contain any
10950 function calls, with the possible exception of calls to the GOT helper. */
10952 static void
10953 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
10955 /* We must preserve the lowest 16 words for the register save area. */
10956 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
10957 /* We really need only 2 words of fresh stack space. */
10958 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
10960 rtx slot
10961 = gen_rtx_MEM (word_mode, plus_constant (Pmode, stack_pointer_rtx,
10962 SPARC_STACK_BIAS + offset));
10964 emit_insn (gen_stack_pointer_inc (GEN_INT (-size)));
10965 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
10966 if (reg2)
10967 emit_insn (gen_rtx_SET (VOIDmode,
10968 adjust_address (slot, word_mode, UNITS_PER_WORD),
10969 reg2));
10970 emit_insn (seq);
10971 if (reg2)
10972 emit_insn (gen_rtx_SET (VOIDmode,
10973 reg2,
10974 adjust_address (slot, word_mode, UNITS_PER_WORD)));
10975 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
10976 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
10979 /* Output the assembler code for a thunk function. THUNK_DECL is the
10980 declaration for the thunk function itself, FUNCTION is the decl for
10981 the target function. DELTA is an immediate constant offset to be
10982 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10983 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10985 static void
10986 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10987 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10988 tree function)
10990 rtx this_rtx, insn, funexp;
10991 unsigned int int_arg_first;
10993 reload_completed = 1;
10994 epilogue_completed = 1;
10996 emit_note (NOTE_INSN_PROLOGUE_END);
10998 if (TARGET_FLAT)
11000 sparc_leaf_function_p = 1;
11002 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
11004 else if (flag_delayed_branch)
11006 /* We will emit a regular sibcall below, so we need to instruct
11007 output_sibcall that we are in a leaf function. */
11008 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 1;
11010 /* This will cause final.c to invoke leaf_renumber_regs so we
11011 must behave as if we were in a not-yet-leafified function. */
11012 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
11014 else
11016 /* We will emit the sibcall manually below, so we will need to
11017 manually spill non-leaf registers. */
11018 sparc_leaf_function_p = crtl->uses_only_leaf_regs = 0;
11020 /* We really are in a leaf function. */
11021 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
11024 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
11025 returns a structure, the structure return pointer is there instead. */
11026 if (TARGET_ARCH64
11027 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11028 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
11029 else
11030 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
11032 /* Add DELTA. When possible use a plain add, otherwise load it into
11033 a register first. */
11034 if (delta)
11036 rtx delta_rtx = GEN_INT (delta);
11038 if (! SPARC_SIMM13_P (delta))
11040 rtx scratch = gen_rtx_REG (Pmode, 1);
11041 emit_move_insn (scratch, delta_rtx);
11042 delta_rtx = scratch;
11045 /* THIS_RTX += DELTA. */
11046 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
11049 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
11050 if (vcall_offset)
11052 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
11053 rtx scratch = gen_rtx_REG (Pmode, 1);
11055 gcc_assert (vcall_offset < 0);
11057 /* SCRATCH = *THIS_RTX. */
11058 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
11060 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
11061 may not have any available scratch register at this point. */
11062 if (SPARC_SIMM13_P (vcall_offset))
11064 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
11065 else if (! fixed_regs[5]
11066 /* The below sequence is made up of at least 2 insns,
11067 while the default method may need only one. */
11068 && vcall_offset < -8192)
11070 rtx scratch2 = gen_rtx_REG (Pmode, 5);
11071 emit_move_insn (scratch2, vcall_offset_rtx);
11072 vcall_offset_rtx = scratch2;
11074 else
11076 rtx increment = GEN_INT (-4096);
11078 /* VCALL_OFFSET is a negative number whose typical range can be
11079 estimated as -32768..0 in 32-bit mode. In almost all cases
11080 it is therefore cheaper to emit multiple add insns than
11081 spilling and loading the constant into a register (at least
11082 6 insns). */
11083 while (! SPARC_SIMM13_P (vcall_offset))
11085 emit_insn (gen_add2_insn (scratch, increment));
11086 vcall_offset += 4096;
11088 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
11091 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
11092 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
11093 gen_rtx_PLUS (Pmode,
11094 scratch,
11095 vcall_offset_rtx)));
11097 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
11098 emit_insn (gen_add2_insn (this_rtx, scratch));
11101 /* Generate a tail call to the target function. */
11102 if (! TREE_USED (function))
11104 assemble_external (function);
11105 TREE_USED (function) = 1;
11107 funexp = XEXP (DECL_RTL (function), 0);
11109 if (flag_delayed_branch)
11111 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
11112 insn = emit_call_insn (gen_sibcall (funexp));
11113 SIBLING_CALL_P (insn) = 1;
11115 else
11117 /* The hoops we have to jump through in order to generate a sibcall
11118 without using delay slots... */
11119 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
11121 if (flag_pic)
11123 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
11124 start_sequence ();
11125 load_got_register (); /* clobbers %o7 */
11126 scratch = sparc_legitimize_pic_address (funexp, scratch);
11127 seq = get_insns ();
11128 end_sequence ();
11129 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
11131 else if (TARGET_ARCH32)
11133 emit_insn (gen_rtx_SET (VOIDmode,
11134 scratch,
11135 gen_rtx_HIGH (SImode, funexp)));
11136 emit_insn (gen_rtx_SET (VOIDmode,
11137 scratch,
11138 gen_rtx_LO_SUM (SImode, scratch, funexp)));
11140 else /* TARGET_ARCH64 */
11142 switch (sparc_cmodel)
11144 case CM_MEDLOW:
11145 case CM_MEDMID:
11146 /* The destination can serve as a temporary. */
11147 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
11148 break;
11150 case CM_MEDANY:
11151 case CM_EMBMEDANY:
11152 /* The destination cannot serve as a temporary. */
11153 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
11154 start_sequence ();
11155 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
11156 seq = get_insns ();
11157 end_sequence ();
11158 emit_and_preserve (seq, spill_reg, 0);
11159 break;
11161 default:
11162 gcc_unreachable ();
11166 emit_jump_insn (gen_indirect_jump (scratch));
11169 emit_barrier ();
11171 /* Run just enough of rest_of_compilation to get the insns emitted.
11172 There's not really enough bulk here to make other passes such as
11173 instruction scheduling worth while. Note that use_thunk calls
11174 assemble_start_function and assemble_end_function. */
11175 insn = get_insns ();
11176 shorten_branches (insn);
11177 final_start_function (insn, file, 1);
11178 final (insn, file, 1);
11179 final_end_function ();
11181 reload_completed = 0;
11182 epilogue_completed = 0;
11185 /* Return true if sparc_output_mi_thunk would be able to output the
11186 assembler code for the thunk function specified by the arguments
11187 it is passed, and false otherwise. */
11188 static bool
11189 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
11190 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
11191 HOST_WIDE_INT vcall_offset,
11192 const_tree function ATTRIBUTE_UNUSED)
11194 /* Bound the loop used in the default method above. */
11195 return (vcall_offset >= -32768 || ! fixed_regs[5]);
11198 /* How to allocate a 'struct machine_function'. */
11200 static struct machine_function *
11201 sparc_init_machine_status (void)
11203 return ggc_alloc_cleared_machine_function ();
11206 /* Locate some local-dynamic symbol still in use by this function
11207 so that we can print its name in local-dynamic base patterns. */
11209 static const char *
11210 get_some_local_dynamic_name (void)
11212 rtx insn;
11214 if (cfun->machine->some_ld_name)
11215 return cfun->machine->some_ld_name;
11217 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11218 if (INSN_P (insn)
11219 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11220 return cfun->machine->some_ld_name;
11222 gcc_unreachable ();
11225 static int
11226 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11228 rtx x = *px;
11230 if (x
11231 && GET_CODE (x) == SYMBOL_REF
11232 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11234 cfun->machine->some_ld_name = XSTR (x, 0);
11235 return 1;
11238 return 0;
11241 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
11242 We need to emit DTP-relative relocations. */
11244 static void
11245 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
11247 switch (size)
11249 case 4:
11250 fputs ("\t.word\t%r_tls_dtpoff32(", file);
11251 break;
11252 case 8:
11253 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
11254 break;
11255 default:
11256 gcc_unreachable ();
11258 output_addr_const (file, x);
11259 fputs (")", file);
11262 /* Do whatever processing is required at the end of a file. */
11264 static void
11265 sparc_file_end (void)
11267 /* If we need to emit the special GOT helper function, do so now. */
11268 if (got_helper_rtx)
11270 const char *name = XSTR (got_helper_rtx, 0);
11271 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
11272 #ifdef DWARF2_UNWIND_INFO
11273 bool do_cfi;
11274 #endif
11276 if (USE_HIDDEN_LINKONCE)
11278 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
11279 get_identifier (name),
11280 build_function_type_list (void_type_node,
11281 NULL_TREE));
11282 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
11283 NULL_TREE, void_type_node);
11284 TREE_PUBLIC (decl) = 1;
11285 TREE_STATIC (decl) = 1;
11286 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
11287 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
11288 DECL_VISIBILITY_SPECIFIED (decl) = 1;
11289 resolve_unique_section (decl, 0, flag_function_sections);
11290 allocate_struct_function (decl, true);
11291 cfun->is_thunk = 1;
11292 current_function_decl = decl;
11293 init_varasm_status ();
11294 assemble_start_function (decl, name);
11296 else
11298 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
11299 switch_to_section (text_section);
11300 if (align > 0)
11301 ASM_OUTPUT_ALIGN (asm_out_file, align);
11302 ASM_OUTPUT_LABEL (asm_out_file, name);
11305 #ifdef DWARF2_UNWIND_INFO
11306 do_cfi = dwarf2out_do_cfi_asm ();
11307 if (do_cfi)
11308 fprintf (asm_out_file, "\t.cfi_startproc\n");
11309 #endif
11310 if (flag_delayed_branch)
11311 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
11312 reg_name, reg_name);
11313 else
11314 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
11315 reg_name, reg_name);
11316 #ifdef DWARF2_UNWIND_INFO
11317 if (do_cfi)
11318 fprintf (asm_out_file, "\t.cfi_endproc\n");
11319 #endif
11322 if (NEED_INDICATE_EXEC_STACK)
11323 file_end_indicate_exec_stack ();
11325 #ifdef TARGET_SOLARIS
11326 solaris_file_end ();
11327 #endif
11330 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11331 /* Implement TARGET_MANGLE_TYPE. */
11333 static const char *
11334 sparc_mangle_type (const_tree type)
11336 if (!TARGET_64BIT
11337 && TYPE_MAIN_VARIANT (type) == long_double_type_node
11338 && TARGET_LONG_DOUBLE_128)
11339 return "g";
11341 /* For all other types, use normal C++ mangling. */
11342 return NULL;
11344 #endif
11346 /* Expand a membar instruction for various use cases. Both the LOAD_STORE
11347 and BEFORE_AFTER arguments of the form X_Y. They are two-bit masks where
11348 bit 0 indicates that X is true, and bit 1 indicates Y is true. */
11350 void
11351 sparc_emit_membar_for_model (enum memmodel model,
11352 int load_store, int before_after)
11354 /* Bits for the MEMBAR mmask field. */
11355 const int LoadLoad = 1;
11356 const int StoreLoad = 2;
11357 const int LoadStore = 4;
11358 const int StoreStore = 8;
11360 int mm = 0, implied = 0;
11362 switch (sparc_memory_model)
11364 case SMM_SC:
11365 /* Sequential Consistency. All memory transactions are immediately
11366 visible in sequential execution order. No barriers needed. */
11367 implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
11368 break;
11370 case SMM_TSO:
11371 /* Total Store Ordering: all memory transactions with store semantics
11372 are followed by an implied StoreStore. */
11373 implied |= StoreStore;
11375 /* If we're not looking for a raw barrer (before+after), then atomic
11376 operations get the benefit of being both load and store. */
11377 if (load_store == 3 && before_after == 1)
11378 implied |= StoreLoad;
11379 /* FALLTHRU */
11381 case SMM_PSO:
11382 /* Partial Store Ordering: all memory transactions with load semantics
11383 are followed by an implied LoadLoad | LoadStore. */
11384 implied |= LoadLoad | LoadStore;
11386 /* If we're not looking for a raw barrer (before+after), then atomic
11387 operations get the benefit of being both load and store. */
11388 if (load_store == 3 && before_after == 2)
11389 implied |= StoreLoad | StoreStore;
11390 /* FALLTHRU */
11392 case SMM_RMO:
11393 /* Relaxed Memory Ordering: no implicit bits. */
11394 break;
11396 default:
11397 gcc_unreachable ();
11400 if (before_after & 1)
11402 if (model == MEMMODEL_RELEASE
11403 || model == MEMMODEL_ACQ_REL
11404 || model == MEMMODEL_SEQ_CST)
11406 if (load_store & 1)
11407 mm |= LoadLoad | StoreLoad;
11408 if (load_store & 2)
11409 mm |= LoadStore | StoreStore;
11412 if (before_after & 2)
11414 if (model == MEMMODEL_ACQUIRE
11415 || model == MEMMODEL_ACQ_REL
11416 || model == MEMMODEL_SEQ_CST)
11418 if (load_store & 1)
11419 mm |= LoadLoad | LoadStore;
11420 if (load_store & 2)
11421 mm |= StoreLoad | StoreStore;
11425 /* Remove the bits implied by the system memory model. */
11426 mm &= ~implied;
11428 /* For raw barriers (before+after), always emit a barrier.
11429 This will become a compile-time barrier if needed. */
11430 if (mm || before_after == 3)
11431 emit_insn (gen_membar (GEN_INT (mm)));
11434 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
11435 compare and swap on the word containing the byte or half-word. */
11437 static void
11438 sparc_expand_compare_and_swap_12 (rtx bool_result, rtx result, rtx mem,
11439 rtx oldval, rtx newval)
11441 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
11442 rtx addr = gen_reg_rtx (Pmode);
11443 rtx off = gen_reg_rtx (SImode);
11444 rtx oldv = gen_reg_rtx (SImode);
11445 rtx newv = gen_reg_rtx (SImode);
11446 rtx oldvalue = gen_reg_rtx (SImode);
11447 rtx newvalue = gen_reg_rtx (SImode);
11448 rtx res = gen_reg_rtx (SImode);
11449 rtx resv = gen_reg_rtx (SImode);
11450 rtx memsi, val, mask, end_label, loop_label, cc;
11452 emit_insn (gen_rtx_SET (VOIDmode, addr,
11453 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
11455 if (Pmode != SImode)
11456 addr1 = gen_lowpart (SImode, addr1);
11457 emit_insn (gen_rtx_SET (VOIDmode, off,
11458 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
11460 memsi = gen_rtx_MEM (SImode, addr);
11461 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
11462 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
11464 val = copy_to_reg (memsi);
11466 emit_insn (gen_rtx_SET (VOIDmode, off,
11467 gen_rtx_XOR (SImode, off,
11468 GEN_INT (GET_MODE (mem) == QImode
11469 ? 3 : 2))));
11471 emit_insn (gen_rtx_SET (VOIDmode, off,
11472 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
11474 if (GET_MODE (mem) == QImode)
11475 mask = force_reg (SImode, GEN_INT (0xff));
11476 else
11477 mask = force_reg (SImode, GEN_INT (0xffff));
11479 emit_insn (gen_rtx_SET (VOIDmode, mask,
11480 gen_rtx_ASHIFT (SImode, mask, off)));
11482 emit_insn (gen_rtx_SET (VOIDmode, val,
11483 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
11484 val)));
11486 oldval = gen_lowpart (SImode, oldval);
11487 emit_insn (gen_rtx_SET (VOIDmode, oldv,
11488 gen_rtx_ASHIFT (SImode, oldval, off)));
11490 newval = gen_lowpart_common (SImode, newval);
11491 emit_insn (gen_rtx_SET (VOIDmode, newv,
11492 gen_rtx_ASHIFT (SImode, newval, off)));
11494 emit_insn (gen_rtx_SET (VOIDmode, oldv,
11495 gen_rtx_AND (SImode, oldv, mask)));
11497 emit_insn (gen_rtx_SET (VOIDmode, newv,
11498 gen_rtx_AND (SImode, newv, mask)));
11500 end_label = gen_label_rtx ();
11501 loop_label = gen_label_rtx ();
11502 emit_label (loop_label);
11504 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
11505 gen_rtx_IOR (SImode, oldv, val)));
11507 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
11508 gen_rtx_IOR (SImode, newv, val)));
11510 emit_move_insn (bool_result, const1_rtx);
11512 emit_insn (gen_atomic_compare_and_swapsi_1 (res, memsi, oldvalue, newvalue));
11514 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
11516 emit_insn (gen_rtx_SET (VOIDmode, resv,
11517 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
11518 res)));
11520 emit_move_insn (bool_result, const0_rtx);
11522 cc = gen_compare_reg_1 (NE, resv, val);
11523 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
11525 /* Use cbranchcc4 to separate the compare and branch! */
11526 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
11527 cc, const0_rtx, loop_label));
11529 emit_label (end_label);
11531 emit_insn (gen_rtx_SET (VOIDmode, res,
11532 gen_rtx_AND (SImode, res, mask)));
11534 emit_insn (gen_rtx_SET (VOIDmode, res,
11535 gen_rtx_LSHIFTRT (SImode, res, off)));
11537 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
11540 /* Expand code to perform a compare-and-swap. */
11542 void
11543 sparc_expand_compare_and_swap (rtx operands[])
11545 rtx bval, retval, mem, oldval, newval;
11546 enum machine_mode mode;
11547 enum memmodel model;
11549 bval = operands[0];
11550 retval = operands[1];
11551 mem = operands[2];
11552 oldval = operands[3];
11553 newval = operands[4];
11554 model = (enum memmodel) INTVAL (operands[6]);
11555 mode = GET_MODE (mem);
11557 sparc_emit_membar_for_model (model, 3, 1);
11559 if (reg_overlap_mentioned_p (retval, oldval))
11560 oldval = copy_to_reg (oldval);
11562 if (mode == QImode || mode == HImode)
11563 sparc_expand_compare_and_swap_12 (bval, retval, mem, oldval, newval);
11564 else
11566 rtx (*gen) (rtx, rtx, rtx, rtx);
11567 rtx x;
11569 if (mode == SImode)
11570 gen = gen_atomic_compare_and_swapsi_1;
11571 else
11572 gen = gen_atomic_compare_and_swapdi_1;
11573 emit_insn (gen (retval, mem, oldval, newval));
11575 x = emit_store_flag (bval, EQ, retval, oldval, mode, 1, 1);
11576 if (x != bval)
11577 convert_move (bval, x, 1);
11580 sparc_emit_membar_for_model (model, 3, 2);
11583 void
11584 sparc_expand_vec_perm_bmask (enum machine_mode vmode, rtx sel)
11586 rtx t_1, t_2, t_3;
11588 sel = gen_lowpart (DImode, sel);
11589 switch (vmode)
11591 case V2SImode:
11592 /* inp = xxxxxxxAxxxxxxxB */
11593 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11594 NULL_RTX, 1, OPTAB_DIRECT);
11595 /* t_1 = ....xxxxxxxAxxx. */
11596 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
11597 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
11598 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
11599 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
11600 /* sel = .......B */
11601 /* t_1 = ...A.... */
11602 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
11603 /* sel = ...A...B */
11604 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
11605 /* sel = AAAABBBB * 4 */
11606 t_1 = force_reg (SImode, GEN_INT (0x01230123));
11607 /* sel = { A*4, A*4+1, A*4+2, ... } */
11608 break;
11610 case V4HImode:
11611 /* inp = xxxAxxxBxxxCxxxD */
11612 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11613 NULL_RTX, 1, OPTAB_DIRECT);
11614 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11615 NULL_RTX, 1, OPTAB_DIRECT);
11616 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
11617 NULL_RTX, 1, OPTAB_DIRECT);
11618 /* t_1 = ..xxxAxxxBxxxCxx */
11619 /* t_2 = ....xxxAxxxBxxxC */
11620 /* t_3 = ......xxxAxxxBxx */
11621 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
11622 GEN_INT (0x07),
11623 NULL_RTX, 1, OPTAB_DIRECT);
11624 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
11625 GEN_INT (0x0700),
11626 NULL_RTX, 1, OPTAB_DIRECT);
11627 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
11628 GEN_INT (0x070000),
11629 NULL_RTX, 1, OPTAB_DIRECT);
11630 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
11631 GEN_INT (0x07000000),
11632 NULL_RTX, 1, OPTAB_DIRECT);
11633 /* sel = .......D */
11634 /* t_1 = .....C.. */
11635 /* t_2 = ...B.... */
11636 /* t_3 = .A...... */
11637 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
11638 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
11639 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
11640 /* sel = .A.B.C.D */
11641 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
11642 /* sel = AABBCCDD * 2 */
11643 t_1 = force_reg (SImode, GEN_INT (0x01010101));
11644 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
11645 break;
11647 case V8QImode:
11648 /* input = xAxBxCxDxExFxGxH */
11649 sel = expand_simple_binop (DImode, AND, sel,
11650 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
11651 | 0x0f0f0f0f),
11652 NULL_RTX, 1, OPTAB_DIRECT);
11653 /* sel = .A.B.C.D.E.F.G.H */
11654 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
11655 NULL_RTX, 1, OPTAB_DIRECT);
11656 /* t_1 = ..A.B.C.D.E.F.G. */
11657 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11658 NULL_RTX, 1, OPTAB_DIRECT);
11659 /* sel = .AABBCCDDEEFFGGH */
11660 sel = expand_simple_binop (DImode, AND, sel,
11661 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
11662 | 0xff00ff),
11663 NULL_RTX, 1, OPTAB_DIRECT);
11664 /* sel = ..AB..CD..EF..GH */
11665 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11666 NULL_RTX, 1, OPTAB_DIRECT);
11667 /* t_1 = ....AB..CD..EF.. */
11668 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11669 NULL_RTX, 1, OPTAB_DIRECT);
11670 /* sel = ..ABABCDCDEFEFGH */
11671 sel = expand_simple_binop (DImode, AND, sel,
11672 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
11673 NULL_RTX, 1, OPTAB_DIRECT);
11674 /* sel = ....ABCD....EFGH */
11675 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11676 NULL_RTX, 1, OPTAB_DIRECT);
11677 /* t_1 = ........ABCD.... */
11678 sel = gen_lowpart (SImode, sel);
11679 t_1 = gen_lowpart (SImode, t_1);
11680 break;
11682 default:
11683 gcc_unreachable ();
11686 /* Always perform the final addition/merge within the bmask insn. */
11687 emit_insn (gen_bmasksi_vis (gen_rtx_REG (SImode, 0), sel, t_1));
11690 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11692 static bool
11693 sparc_frame_pointer_required (void)
11695 /* If the stack pointer is dynamically modified in the function, it cannot
11696 serve as the frame pointer. */
11697 if (cfun->calls_alloca)
11698 return true;
11700 /* If the function receives nonlocal gotos, it needs to save the frame
11701 pointer in the nonlocal_goto_save_area object. */
11702 if (cfun->has_nonlocal_label)
11703 return true;
11705 /* In flat mode, that's it. */
11706 if (TARGET_FLAT)
11707 return false;
11709 /* Otherwise, the frame pointer is required if the function isn't leaf. */
11710 return !(crtl->is_leaf && only_leaf_regs_used ());
11713 /* The way this is structured, we can't eliminate SFP in favor of SP
11714 if the frame pointer is required: we want to use the SFP->HFP elimination
11715 in that case. But the test in update_eliminables doesn't know we are
11716 assuming below that we only do the former elimination. */
11718 static bool
11719 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
11721 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
11724 /* Return the hard frame pointer directly to bypass the stack bias. */
11726 static rtx
11727 sparc_builtin_setjmp_frame_value (void)
11729 return hard_frame_pointer_rtx;
11732 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
11733 they won't be allocated. */
11735 static void
11736 sparc_conditional_register_usage (void)
11738 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
11740 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11741 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11743 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
11744 /* then honor it. */
11745 if (TARGET_ARCH32 && fixed_regs[5])
11746 fixed_regs[5] = 1;
11747 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
11748 fixed_regs[5] = 0;
11749 if (! TARGET_V9)
11751 int regno;
11752 for (regno = SPARC_FIRST_V9_FP_REG;
11753 regno <= SPARC_LAST_V9_FP_REG;
11754 regno++)
11755 fixed_regs[regno] = 1;
11756 /* %fcc0 is used by v8 and v9. */
11757 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
11758 regno <= SPARC_LAST_V9_FCC_REG;
11759 regno++)
11760 fixed_regs[regno] = 1;
11762 if (! TARGET_FPU)
11764 int regno;
11765 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
11766 fixed_regs[regno] = 1;
11768 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
11769 /* then honor it. Likewise with g3 and g4. */
11770 if (fixed_regs[2] == 2)
11771 fixed_regs[2] = ! TARGET_APP_REGS;
11772 if (fixed_regs[3] == 2)
11773 fixed_regs[3] = ! TARGET_APP_REGS;
11774 if (TARGET_ARCH32 && fixed_regs[4] == 2)
11775 fixed_regs[4] = ! TARGET_APP_REGS;
11776 else if (TARGET_CM_EMBMEDANY)
11777 fixed_regs[4] = 1;
11778 else if (fixed_regs[4] == 2)
11779 fixed_regs[4] = 0;
11780 if (TARGET_FLAT)
11782 int regno;
11783 /* Disable leaf functions. */
11784 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
11785 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
11786 leaf_reg_remap [regno] = regno;
11788 if (TARGET_VIS)
11789 global_regs[SPARC_GSR_REG] = 1;
11792 /* Implement TARGET_PREFERRED_RELOAD_CLASS:
11794 - We can't load constants into FP registers.
11795 - We can't load FP constants into integer registers when soft-float,
11796 because there is no soft-float pattern with a r/F constraint.
11797 - We can't load FP constants into integer registers for TFmode unless
11798 it is 0.0L, because there is no movtf pattern with a r/F constraint.
11799 - Try and reload integer constants (symbolic or otherwise) back into
11800 registers directly, rather than having them dumped to memory. */
11802 static reg_class_t
11803 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
11805 enum machine_mode mode = GET_MODE (x);
11806 if (CONSTANT_P (x))
11808 if (FP_REG_CLASS_P (rclass)
11809 || rclass == GENERAL_OR_FP_REGS
11810 || rclass == GENERAL_OR_EXTRA_FP_REGS
11811 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
11812 || (mode == TFmode && ! const_zero_operand (x, mode)))
11813 return NO_REGS;
11815 if (GET_MODE_CLASS (mode) == MODE_INT)
11816 return GENERAL_REGS;
11818 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
11820 if (! FP_REG_CLASS_P (rclass)
11821 || !(const_zero_operand (x, mode)
11822 || const_all_ones_operand (x, mode)))
11823 return NO_REGS;
11827 if (TARGET_VIS3
11828 && ! TARGET_ARCH64
11829 && (rclass == EXTRA_FP_REGS
11830 || rclass == GENERAL_OR_EXTRA_FP_REGS))
11832 int regno = true_regnum (x);
11834 if (SPARC_INT_REG_P (regno))
11835 return (rclass == EXTRA_FP_REGS
11836 ? FP_REGS : GENERAL_OR_FP_REGS);
11839 return rclass;
11842 /* Output a wide multiply instruction in V8+ mode. INSN is the instruction,
11843 OPERANDS are its operands and OPCODE is the mnemonic to be used. */
11845 const char *
11846 output_v8plus_mult (rtx insn, rtx *operands, const char *opcode)
11848 char mulstr[32];
11850 gcc_assert (! TARGET_ARCH64);
11852 if (sparc_check_64 (operands[1], insn) <= 0)
11853 output_asm_insn ("srl\t%L1, 0, %L1", operands);
11854 if (which_alternative == 1)
11855 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
11856 if (GET_CODE (operands[2]) == CONST_INT)
11858 if (which_alternative == 1)
11860 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11861 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", opcode);
11862 output_asm_insn (mulstr, operands);
11863 return "srlx\t%L0, 32, %H0";
11865 else
11867 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11868 output_asm_insn ("or\t%L1, %3, %3", operands);
11869 sprintf (mulstr, "%s\t%%3, %%2, %%3", opcode);
11870 output_asm_insn (mulstr, operands);
11871 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11872 return "mov\t%3, %L0";
11875 else if (rtx_equal_p (operands[1], operands[2]))
11877 if (which_alternative == 1)
11879 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11880 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", opcode);
11881 output_asm_insn (mulstr, operands);
11882 return "srlx\t%L0, 32, %H0";
11884 else
11886 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11887 output_asm_insn ("or\t%L1, %3, %3", operands);
11888 sprintf (mulstr, "%s\t%%3, %%3, %%3", opcode);
11889 output_asm_insn (mulstr, operands);
11890 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11891 return "mov\t%3, %L0";
11894 if (sparc_check_64 (operands[2], insn) <= 0)
11895 output_asm_insn ("srl\t%L2, 0, %L2", operands);
11896 if (which_alternative == 1)
11898 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11899 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
11900 output_asm_insn ("or\t%L2, %L1, %L1", operands);
11901 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", opcode);
11902 output_asm_insn (mulstr, operands);
11903 return "srlx\t%L0, 32, %H0";
11905 else
11907 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11908 output_asm_insn ("sllx\t%H2, 32, %4", operands);
11909 output_asm_insn ("or\t%L1, %3, %3", operands);
11910 output_asm_insn ("or\t%L2, %4, %4", operands);
11911 sprintf (mulstr, "%s\t%%3, %%4, %%3", opcode);
11912 output_asm_insn (mulstr, operands);
11913 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11914 return "mov\t%3, %L0";
11918 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11919 all fields of TARGET to ELT by means of VIS2 BSHUFFLE insn. MODE
11920 and INNER_MODE are the modes describing TARGET. */
11922 static void
11923 vector_init_bshuffle (rtx target, rtx elt, enum machine_mode mode,
11924 enum machine_mode inner_mode)
11926 rtx t1, final_insn, sel;
11927 int bmask;
11929 t1 = gen_reg_rtx (mode);
11931 elt = convert_modes (SImode, inner_mode, elt, true);
11932 emit_move_insn (gen_lowpart(SImode, t1), elt);
11934 switch (mode)
11936 case V2SImode:
11937 final_insn = gen_bshufflev2si_vis (target, t1, t1);
11938 bmask = 0x45674567;
11939 break;
11940 case V4HImode:
11941 final_insn = gen_bshufflev4hi_vis (target, t1, t1);
11942 bmask = 0x67676767;
11943 break;
11944 case V8QImode:
11945 final_insn = gen_bshufflev8qi_vis (target, t1, t1);
11946 bmask = 0x77777777;
11947 break;
11948 default:
11949 gcc_unreachable ();
11952 sel = force_reg (SImode, GEN_INT (bmask));
11953 emit_insn (gen_bmasksi_vis (gen_rtx_REG (SImode, 0), sel, const0_rtx));
11954 emit_insn (final_insn);
11957 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11958 all fields of TARGET to ELT in V8QI by means of VIS FPMERGE insn. */
11960 static void
11961 vector_init_fpmerge (rtx target, rtx elt)
11963 rtx t1, t2, t2_low, t3, t3_low;
11965 t1 = gen_reg_rtx (V4QImode);
11966 elt = convert_modes (SImode, QImode, elt, true);
11967 emit_move_insn (gen_lowpart (SImode, t1), elt);
11969 t2 = gen_reg_rtx (V8QImode);
11970 t2_low = gen_lowpart (V4QImode, t2);
11971 emit_insn (gen_fpmerge_vis (t2, t1, t1));
11973 t3 = gen_reg_rtx (V8QImode);
11974 t3_low = gen_lowpart (V4QImode, t3);
11975 emit_insn (gen_fpmerge_vis (t3, t2_low, t2_low));
11977 emit_insn (gen_fpmerge_vis (target, t3_low, t3_low));
11980 /* Subroutine of sparc_expand_vector_init. Emit code to initialize
11981 all fields of TARGET to ELT in V4HI by means of VIS FALIGNDATA insn. */
11983 static void
11984 vector_init_faligndata (rtx target, rtx elt)
11986 rtx t1 = gen_reg_rtx (V4HImode);
11987 int i;
11989 elt = convert_modes (SImode, HImode, elt, true);
11990 emit_move_insn (gen_lowpart (SImode, t1), elt);
11992 emit_insn (gen_alignaddrsi_vis (gen_reg_rtx (SImode),
11993 force_reg (SImode, GEN_INT (6)),
11994 const0_rtx));
11996 for (i = 0; i < 4; i++)
11997 emit_insn (gen_faligndatav4hi_vis (target, t1, target));
12000 /* Emit code to initialize TARGET to values for individual fields VALS. */
12002 void
12003 sparc_expand_vector_init (rtx target, rtx vals)
12005 const enum machine_mode mode = GET_MODE (target);
12006 const enum machine_mode inner_mode = GET_MODE_INNER (mode);
12007 const int n_elts = GET_MODE_NUNITS (mode);
12008 int i, n_var = 0;
12009 bool all_same;
12010 rtx mem;
12012 all_same = true;
12013 for (i = 0; i < n_elts; i++)
12015 rtx x = XVECEXP (vals, 0, i);
12016 if (!CONSTANT_P (x))
12017 n_var++;
12019 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
12020 all_same = false;
12023 if (n_var == 0)
12025 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
12026 return;
12029 if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (mode))
12031 if (GET_MODE_SIZE (inner_mode) == 4)
12033 emit_move_insn (gen_lowpart (SImode, target),
12034 gen_lowpart (SImode, XVECEXP (vals, 0, 0)));
12035 return;
12037 else if (GET_MODE_SIZE (inner_mode) == 8)
12039 emit_move_insn (gen_lowpart (DImode, target),
12040 gen_lowpart (DImode, XVECEXP (vals, 0, 0)));
12041 return;
12044 else if (GET_MODE_SIZE (inner_mode) == GET_MODE_SIZE (word_mode)
12045 && GET_MODE_SIZE (mode) == 2 * GET_MODE_SIZE (word_mode))
12047 emit_move_insn (gen_highpart (word_mode, target),
12048 gen_lowpart (word_mode, XVECEXP (vals, 0, 0)));
12049 emit_move_insn (gen_lowpart (word_mode, target),
12050 gen_lowpart (word_mode, XVECEXP (vals, 0, 1)));
12051 return;
12054 if (all_same && GET_MODE_SIZE (mode) == 8)
12056 if (TARGET_VIS2)
12058 vector_init_bshuffle (target, XVECEXP (vals, 0, 0), mode, inner_mode);
12059 return;
12061 if (mode == V8QImode)
12063 vector_init_fpmerge (target, XVECEXP (vals, 0, 0));
12064 return;
12066 if (mode == V4HImode)
12068 vector_init_faligndata (target, XVECEXP (vals, 0, 0));
12069 return;
12073 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
12074 for (i = 0; i < n_elts; i++)
12075 emit_move_insn (adjust_address_nv (mem, inner_mode,
12076 i * GET_MODE_SIZE (inner_mode)),
12077 XVECEXP (vals, 0, i));
12078 emit_move_insn (target, mem);
12081 /* Implement TARGET_SECONDARY_RELOAD. */
12083 static reg_class_t
12084 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
12085 enum machine_mode mode, secondary_reload_info *sri)
12087 enum reg_class rclass = (enum reg_class) rclass_i;
12089 sri->icode = CODE_FOR_nothing;
12090 sri->extra_cost = 0;
12092 /* We need a temporary when loading/storing a HImode/QImode value
12093 between memory and the FPU registers. This can happen when combine puts
12094 a paradoxical subreg in a float/fix conversion insn. */
12095 if (FP_REG_CLASS_P (rclass)
12096 && (mode == HImode || mode == QImode)
12097 && (GET_CODE (x) == MEM
12098 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
12099 && true_regnum (x) == -1)))
12100 return GENERAL_REGS;
12102 /* On 32-bit we need a temporary when loading/storing a DFmode value
12103 between unaligned memory and the upper FPU registers. */
12104 if (TARGET_ARCH32
12105 && rclass == EXTRA_FP_REGS
12106 && mode == DFmode
12107 && GET_CODE (x) == MEM
12108 && ! mem_min_alignment (x, 8))
12109 return FP_REGS;
12111 if (((TARGET_CM_MEDANY
12112 && symbolic_operand (x, mode))
12113 || (TARGET_CM_EMBMEDANY
12114 && text_segment_operand (x, mode)))
12115 && ! flag_pic)
12117 if (in_p)
12118 sri->icode = direct_optab_handler (reload_in_optab, mode);
12119 else
12120 sri->icode = direct_optab_handler (reload_out_optab, mode);
12121 return NO_REGS;
12124 if (TARGET_VIS3 && TARGET_ARCH32)
12126 int regno = true_regnum (x);
12128 /* When using VIS3 fp<-->int register moves, on 32-bit we have
12129 to move 8-byte values in 4-byte pieces. This only works via
12130 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
12131 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
12132 an FP_REGS intermediate move. */
12133 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
12134 || ((general_or_i64_p (rclass)
12135 || rclass == GENERAL_OR_FP_REGS)
12136 && SPARC_FP_REG_P (regno)))
12138 sri->extra_cost = 2;
12139 return FP_REGS;
12143 return NO_REGS;
12146 /* Emit code to conditionally move either OPERANDS[2] or OPERANDS[3] into
12147 OPERANDS[0] in MODE. OPERANDS[1] is the operator of the condition. */
12149 bool
12150 sparc_expand_conditional_move (enum machine_mode mode, rtx *operands)
12152 enum rtx_code rc = GET_CODE (operands[1]);
12153 enum machine_mode cmp_mode;
12154 rtx cc_reg, dst, cmp;
12156 cmp = operands[1];
12157 if (GET_MODE (XEXP (cmp, 0)) == DImode && !TARGET_ARCH64)
12158 return false;
12160 if (GET_MODE (XEXP (cmp, 0)) == TFmode && !TARGET_HARD_QUAD)
12161 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
12163 cmp_mode = GET_MODE (XEXP (cmp, 0));
12164 rc = GET_CODE (cmp);
12166 dst = operands[0];
12167 if (! rtx_equal_p (operands[2], dst)
12168 && ! rtx_equal_p (operands[3], dst))
12170 if (reg_overlap_mentioned_p (dst, cmp))
12171 dst = gen_reg_rtx (mode);
12173 emit_move_insn (dst, operands[3]);
12175 else if (operands[2] == dst)
12177 operands[2] = operands[3];
12179 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
12180 rc = reverse_condition_maybe_unordered (rc);
12181 else
12182 rc = reverse_condition (rc);
12185 if (XEXP (cmp, 1) == const0_rtx
12186 && GET_CODE (XEXP (cmp, 0)) == REG
12187 && cmp_mode == DImode
12188 && v9_regcmp_p (rc))
12189 cc_reg = XEXP (cmp, 0);
12190 else
12191 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
12193 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
12195 emit_insn (gen_rtx_SET (VOIDmode, dst,
12196 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
12198 if (dst != operands[0])
12199 emit_move_insn (operands[0], dst);
12201 return true;
12204 /* Emit code to conditionally move a combination of OPERANDS[1] and OPERANDS[2]
12205 into OPERANDS[0] in MODE, depending on the outcome of the comparison of
12206 OPERANDS[4] and OPERANDS[5]. OPERANDS[3] is the operator of the condition.
12207 FCODE is the machine code to be used for OPERANDS[3] and CCODE the machine
12208 code to be used for the condition mask. */
12210 void
12211 sparc_expand_vcond (enum machine_mode mode, rtx *operands, int ccode, int fcode)
12213 rtx mask, cop0, cop1, fcmp, cmask, bshuf, gsr;
12214 enum rtx_code code = GET_CODE (operands[3]);
12216 mask = gen_reg_rtx (Pmode);
12217 cop0 = operands[4];
12218 cop1 = operands[5];
12219 if (code == LT || code == GE)
12221 rtx t;
12223 code = swap_condition (code);
12224 t = cop0; cop0 = cop1; cop1 = t;
12227 gsr = gen_rtx_REG (DImode, SPARC_GSR_REG);
12229 fcmp = gen_rtx_UNSPEC (Pmode,
12230 gen_rtvec (1, gen_rtx_fmt_ee (code, mode, cop0, cop1)),
12231 fcode);
12233 cmask = gen_rtx_UNSPEC (DImode,
12234 gen_rtvec (2, mask, gsr),
12235 ccode);
12237 bshuf = gen_rtx_UNSPEC (mode,
12238 gen_rtvec (3, operands[1], operands[2], gsr),
12239 UNSPEC_BSHUFFLE);
12241 emit_insn (gen_rtx_SET (VOIDmode, mask, fcmp));
12242 emit_insn (gen_rtx_SET (VOIDmode, gsr, cmask));
12244 emit_insn (gen_rtx_SET (VOIDmode, operands[0], bshuf));
12247 /* On sparc, any mode which naturally allocates into the float
12248 registers should return 4 here. */
12250 unsigned int
12251 sparc_regmode_natural_size (enum machine_mode mode)
12253 int size = UNITS_PER_WORD;
12255 if (TARGET_ARCH64)
12257 enum mode_class mclass = GET_MODE_CLASS (mode);
12259 if (mclass == MODE_FLOAT || mclass == MODE_VECTOR_INT)
12260 size = 4;
12263 return size;
12266 /* Return TRUE if it is a good idea to tie two pseudo registers
12267 when one has mode MODE1 and one has mode MODE2.
12268 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
12269 for any hard reg, then this must be FALSE for correct output.
12271 For V9 we have to deal with the fact that only the lower 32 floating
12272 point registers are 32-bit addressable. */
12274 bool
12275 sparc_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
12277 enum mode_class mclass1, mclass2;
12278 unsigned short size1, size2;
12280 if (mode1 == mode2)
12281 return true;
12283 mclass1 = GET_MODE_CLASS (mode1);
12284 mclass2 = GET_MODE_CLASS (mode2);
12285 if (mclass1 != mclass2)
12286 return false;
12288 if (! TARGET_V9)
12289 return true;
12291 /* Classes are the same and we are V9 so we have to deal with upper
12292 vs. lower floating point registers. If one of the modes is a
12293 4-byte mode, and the other is not, we have to mark them as not
12294 tieable because only the lower 32 floating point register are
12295 addressable 32-bits at a time.
12297 We can't just test explicitly for SFmode, otherwise we won't
12298 cover the vector mode cases properly. */
12300 if (mclass1 != MODE_FLOAT && mclass1 != MODE_VECTOR_INT)
12301 return true;
12303 size1 = GET_MODE_SIZE (mode1);
12304 size2 = GET_MODE_SIZE (mode2);
12305 if ((size1 > 4 && size2 == 4)
12306 || (size2 > 4 && size1 == 4))
12307 return false;
12309 return true;
12312 static enum machine_mode sparc_cstore_mode (enum insn_code icode ATTRIBUTE_UNUSED)
12314 return (TARGET_ARCH64 ? DImode : SImode);
12317 #include "gt-sparc.h"