tm.texi (LEGITIMIZE_ADDRESS): Revise documentation.
[official-gcc.git] / gcc / config / sparc / sparc.c
blobab2b57bbcd9a513a090662c70bd2fdeb1c0fcc1b
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "tm_p.h"
47 #include "debug.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
51 #include "gimple.h"
52 #include "langhooks.h"
53 #include "params.h"
54 #include "df.h"
56 /* Processor costs */
57 static const
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
81 static const
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
105 static const
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
129 static const
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
153 static const
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
177 static const
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
201 static const
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
225 static const
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
257 #else
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
260 #endif
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Save the operands last given to a compare for use when we
284 generate a scc or bcc insn. */
285 rtx sparc_compare_op0, sparc_compare_op1;
287 /* Vector to say how input registers are mapped to output registers.
288 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
289 eliminate it. You must use -fomit-frame-pointer to get that. */
290 char leaf_reg_remap[] =
291 { 0, 1, 2, 3, 4, 5, 6, 7,
292 -1, -1, -1, -1, -1, -1, 14, -1,
293 -1, -1, -1, -1, -1, -1, -1, -1,
294 8, 9, 10, 11, 12, 13, -1, 15,
296 32, 33, 34, 35, 36, 37, 38, 39,
297 40, 41, 42, 43, 44, 45, 46, 47,
298 48, 49, 50, 51, 52, 53, 54, 55,
299 56, 57, 58, 59, 60, 61, 62, 63,
300 64, 65, 66, 67, 68, 69, 70, 71,
301 72, 73, 74, 75, 76, 77, 78, 79,
302 80, 81, 82, 83, 84, 85, 86, 87,
303 88, 89, 90, 91, 92, 93, 94, 95,
304 96, 97, 98, 99, 100};
306 /* Vector, indexed by hard register number, which contains 1
307 for a register that is allowable in a candidate for leaf
308 function treatment. */
309 char sparc_leaf_regs[] =
310 { 1, 1, 1, 1, 1, 1, 1, 1,
311 0, 0, 0, 0, 0, 0, 1, 0,
312 0, 0, 0, 0, 0, 0, 0, 0,
313 1, 1, 1, 1, 1, 1, 0, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
320 1, 1, 1, 1, 1, 1, 1, 1,
321 1, 1, 1, 1, 1, 1, 1, 1,
322 1, 1, 1, 1, 1};
324 struct GTY(()) machine_function
326 /* Some local-dynamic TLS symbol name. */
327 const char *some_ld_name;
329 /* True if the current function is leaf and uses only leaf regs,
330 so that the SPARC leaf function optimization can be applied.
331 Private version of current_function_uses_only_leaf_regs, see
332 sparc_expand_prologue for the rationale. */
333 int leaf_function_p;
335 /* True if the data calculated by sparc_expand_prologue are valid. */
336 bool prologue_data_valid_p;
339 #define sparc_leaf_function_p cfun->machine->leaf_function_p
340 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
342 /* Register we pretend to think the frame pointer is allocated to.
343 Normally, this is %fp, but if we are in a leaf procedure, this
344 is %sp+"something". We record "something" separately as it may
345 be too big for reg+constant addressing. */
346 static rtx frame_base_reg;
347 static HOST_WIDE_INT frame_base_offset;
349 /* 1 if the next opcode is to be specially indented. */
350 int sparc_indent_opcode = 0;
352 static bool sparc_handle_option (size_t, const char *, int);
353 static void sparc_init_modes (void);
354 static void scan_record_type (tree, int *, int *, int *);
355 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
356 tree, int, int, int *, int *);
358 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
359 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
361 static void sparc_output_addr_vec (rtx);
362 static void sparc_output_addr_diff_vec (rtx);
363 static void sparc_output_deferred_case_vectors (void);
364 static rtx sparc_builtin_saveregs (void);
365 static int epilogue_renumber (rtx *, int);
366 static bool sparc_assemble_integer (rtx, unsigned int, int);
367 static int set_extends (rtx);
368 static void emit_pic_helper (void);
369 static void load_pic_register (bool);
370 static int save_or_restore_regs (int, int, rtx, int, int);
371 static void emit_save_or_restore_regs (int);
372 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
373 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
374 #ifdef OBJECT_FORMAT_ELF
375 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
376 #endif
378 static int sparc_adjust_cost (rtx, rtx, rtx, int);
379 static int sparc_issue_rate (void);
380 static void sparc_sched_init (FILE *, int, int);
381 static int sparc_use_sched_lookahead (void);
383 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
384 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
385 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
386 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
387 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
389 static bool sparc_function_ok_for_sibcall (tree, tree);
390 static void sparc_init_libfuncs (void);
391 static void sparc_init_builtins (void);
392 static void sparc_vis_init_builtins (void);
393 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
394 static tree sparc_fold_builtin (tree, tree, bool);
395 static int sparc_vis_mul8x16 (int, int);
396 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
397 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
398 HOST_WIDE_INT, tree);
399 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
400 HOST_WIDE_INT, const_tree);
401 static struct machine_function * sparc_init_machine_status (void);
402 static bool sparc_cannot_force_const_mem (rtx);
403 static rtx sparc_tls_get_addr (void);
404 static rtx sparc_tls_got (void);
405 static const char *get_some_local_dynamic_name (void);
406 static int get_some_local_dynamic_name_1 (rtx *, void *);
407 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
408 static bool sparc_promote_prototypes (const_tree);
409 static rtx sparc_struct_value_rtx (tree, int);
410 static bool sparc_return_in_memory (const_tree, const_tree);
411 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
412 static void sparc_va_start (tree, rtx);
413 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
414 static bool sparc_vector_mode_supported_p (enum machine_mode);
415 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
416 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
417 enum machine_mode, const_tree, bool);
418 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
419 enum machine_mode, tree, bool);
420 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
421 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
422 static void sparc_file_end (void);
423 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
424 static const char *sparc_mangle_type (const_tree);
425 #endif
426 #ifdef SUBTARGET_ATTRIBUTE_TABLE
427 const struct attribute_spec sparc_attribute_table[];
428 #endif
430 /* Option handling. */
432 /* Parsed value. */
433 enum cmodel sparc_cmodel;
435 char sparc_hard_reg_printed[8];
437 struct sparc_cpu_select sparc_select[] =
439 /* switch name, tune arch */
440 { (char *)0, "default", 1, 1 },
441 { (char *)0, "-mcpu=", 1, 1 },
442 { (char *)0, "-mtune=", 1, 0 },
443 { 0, 0, 0, 0 }
446 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
447 enum processor_type sparc_cpu;
449 /* Whether\fan FPU option was specified. */
450 static bool fpu_option_set = false;
452 /* Initialize the GCC target structure. */
454 /* The sparc default is to use .half rather than .short for aligned
455 HI objects. Use .word instead of .long on non-ELF systems. */
456 #undef TARGET_ASM_ALIGNED_HI_OP
457 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
458 #ifndef OBJECT_FORMAT_ELF
459 #undef TARGET_ASM_ALIGNED_SI_OP
460 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
461 #endif
463 #undef TARGET_ASM_UNALIGNED_HI_OP
464 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
465 #undef TARGET_ASM_UNALIGNED_SI_OP
466 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
467 #undef TARGET_ASM_UNALIGNED_DI_OP
468 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
470 /* The target hook has to handle DI-mode values. */
471 #undef TARGET_ASM_INTEGER
472 #define TARGET_ASM_INTEGER sparc_assemble_integer
474 #undef TARGET_ASM_FUNCTION_PROLOGUE
475 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
476 #undef TARGET_ASM_FUNCTION_EPILOGUE
477 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
479 #undef TARGET_SCHED_ADJUST_COST
480 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
481 #undef TARGET_SCHED_ISSUE_RATE
482 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
483 #undef TARGET_SCHED_INIT
484 #define TARGET_SCHED_INIT sparc_sched_init
485 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
486 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
488 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
489 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
491 #undef TARGET_INIT_LIBFUNCS
492 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
493 #undef TARGET_INIT_BUILTINS
494 #define TARGET_INIT_BUILTINS sparc_init_builtins
496 #undef TARGET_LEGITIMIZE_ADDRESS
497 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
499 #undef TARGET_EXPAND_BUILTIN
500 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
501 #undef TARGET_FOLD_BUILTIN
502 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
504 #if TARGET_TLS
505 #undef TARGET_HAVE_TLS
506 #define TARGET_HAVE_TLS true
507 #endif
509 #undef TARGET_CANNOT_FORCE_CONST_MEM
510 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
512 #undef TARGET_ASM_OUTPUT_MI_THUNK
513 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
514 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
515 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
517 #undef TARGET_RTX_COSTS
518 #define TARGET_RTX_COSTS sparc_rtx_costs
519 #undef TARGET_ADDRESS_COST
520 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
522 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
523 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
524 test for this value. */
525 #undef TARGET_PROMOTE_FUNCTION_ARGS
526 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
528 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
529 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
530 test for this value. */
531 #undef TARGET_PROMOTE_FUNCTION_RETURN
532 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
534 #undef TARGET_PROMOTE_PROTOTYPES
535 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
537 #undef TARGET_STRUCT_VALUE_RTX
538 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
539 #undef TARGET_RETURN_IN_MEMORY
540 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
541 #undef TARGET_MUST_PASS_IN_STACK
542 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
543 #undef TARGET_PASS_BY_REFERENCE
544 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
545 #undef TARGET_ARG_PARTIAL_BYTES
546 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
548 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
549 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
550 #undef TARGET_STRICT_ARGUMENT_NAMING
551 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
553 #undef TARGET_EXPAND_BUILTIN_VA_START
554 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
555 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
556 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
558 #undef TARGET_VECTOR_MODE_SUPPORTED_P
559 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
561 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
562 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
564 #ifdef SUBTARGET_INSERT_ATTRIBUTES
565 #undef TARGET_INSERT_ATTRIBUTES
566 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
567 #endif
569 #ifdef SUBTARGET_ATTRIBUTE_TABLE
570 #undef TARGET_ATTRIBUTE_TABLE
571 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
572 #endif
574 #undef TARGET_RELAXED_ORDERING
575 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
577 #undef TARGET_DEFAULT_TARGET_FLAGS
578 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
579 #undef TARGET_HANDLE_OPTION
580 #define TARGET_HANDLE_OPTION sparc_handle_option
582 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
583 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
584 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
585 #endif
587 #undef TARGET_ASM_FILE_END
588 #define TARGET_ASM_FILE_END sparc_file_end
590 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
591 #undef TARGET_MANGLE_TYPE
592 #define TARGET_MANGLE_TYPE sparc_mangle_type
593 #endif
595 struct gcc_target targetm = TARGET_INITIALIZER;
597 /* Implement TARGET_HANDLE_OPTION. */
599 static bool
600 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
602 switch (code)
604 case OPT_mfpu:
605 case OPT_mhard_float:
606 case OPT_msoft_float:
607 fpu_option_set = true;
608 break;
610 case OPT_mcpu_:
611 sparc_select[1].string = arg;
612 break;
614 case OPT_mtune_:
615 sparc_select[2].string = arg;
616 break;
619 return true;
622 /* Validate and override various options, and do some machine dependent
623 initialization. */
625 void
626 sparc_override_options (void)
628 static struct code_model {
629 const char *const name;
630 const enum cmodel value;
631 } const cmodels[] = {
632 { "32", CM_32 },
633 { "medlow", CM_MEDLOW },
634 { "medmid", CM_MEDMID },
635 { "medany", CM_MEDANY },
636 { "embmedany", CM_EMBMEDANY },
637 { NULL, (enum cmodel) 0 }
639 const struct code_model *cmodel;
640 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
641 static struct cpu_default {
642 const int cpu;
643 const char *const name;
644 } const cpu_default[] = {
645 /* There must be one entry here for each TARGET_CPU value. */
646 { TARGET_CPU_sparc, "cypress" },
647 { TARGET_CPU_sparclet, "tsc701" },
648 { TARGET_CPU_sparclite, "f930" },
649 { TARGET_CPU_v8, "v8" },
650 { TARGET_CPU_hypersparc, "hypersparc" },
651 { TARGET_CPU_sparclite86x, "sparclite86x" },
652 { TARGET_CPU_supersparc, "supersparc" },
653 { TARGET_CPU_v9, "v9" },
654 { TARGET_CPU_ultrasparc, "ultrasparc" },
655 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
656 { TARGET_CPU_niagara, "niagara" },
657 { TARGET_CPU_niagara2, "niagara2" },
658 { 0, 0 }
660 const struct cpu_default *def;
661 /* Table of values for -m{cpu,tune}=. */
662 static struct cpu_table {
663 const char *const name;
664 const enum processor_type processor;
665 const int disable;
666 const int enable;
667 } const cpu_table[] = {
668 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
669 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
670 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
671 /* TI TMS390Z55 supersparc */
672 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
673 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
674 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
675 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
676 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
677 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
678 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
679 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
680 MASK_SPARCLITE },
681 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
682 /* TEMIC sparclet */
683 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
684 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
685 /* TI ultrasparc I, II, IIi */
686 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
687 /* Although insns using %y are deprecated, it is a clear win on current
688 ultrasparcs. */
689 |MASK_DEPRECATED_V8_INSNS},
690 /* TI ultrasparc III */
691 /* ??? Check if %y issue still holds true in ultra3. */
692 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
693 /* UltraSPARC T1 */
694 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
695 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
696 { 0, (enum processor_type) 0, 0, 0 }
698 const struct cpu_table *cpu;
699 const struct sparc_cpu_select *sel;
700 int fpu;
702 #ifndef SPARC_BI_ARCH
703 /* Check for unsupported architecture size. */
704 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
705 error ("%s is not supported by this configuration",
706 DEFAULT_ARCH32_P ? "-m64" : "-m32");
707 #endif
709 /* We force all 64bit archs to use 128 bit long double */
710 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
712 error ("-mlong-double-64 not allowed with -m64");
713 target_flags |= MASK_LONG_DOUBLE_128;
716 /* Code model selection. */
717 sparc_cmodel = SPARC_DEFAULT_CMODEL;
719 #ifdef SPARC_BI_ARCH
720 if (TARGET_ARCH32)
721 sparc_cmodel = CM_32;
722 #endif
724 if (sparc_cmodel_string != NULL)
726 if (TARGET_ARCH64)
728 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
729 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
730 break;
731 if (cmodel->name == NULL)
732 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
733 else
734 sparc_cmodel = cmodel->value;
736 else
737 error ("-mcmodel= is not supported on 32 bit systems");
740 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
742 /* Set the default CPU. */
743 for (def = &cpu_default[0]; def->name; ++def)
744 if (def->cpu == TARGET_CPU_DEFAULT)
745 break;
746 gcc_assert (def->name);
747 sparc_select[0].string = def->name;
749 for (sel = &sparc_select[0]; sel->name; ++sel)
751 if (sel->string)
753 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
754 if (! strcmp (sel->string, cpu->name))
756 if (sel->set_tune_p)
757 sparc_cpu = cpu->processor;
759 if (sel->set_arch_p)
761 target_flags &= ~cpu->disable;
762 target_flags |= cpu->enable;
764 break;
767 if (! cpu->name)
768 error ("bad value (%s) for %s switch", sel->string, sel->name);
772 /* If -mfpu or -mno-fpu was explicitly used, don't override with
773 the processor default. */
774 if (fpu_option_set)
775 target_flags = (target_flags & ~MASK_FPU) | fpu;
777 /* Don't allow -mvis if FPU is disabled. */
778 if (! TARGET_FPU)
779 target_flags &= ~MASK_VIS;
781 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
782 are available.
783 -m64 also implies v9. */
784 if (TARGET_VIS || TARGET_ARCH64)
786 target_flags |= MASK_V9;
787 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
790 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
791 if (TARGET_V9 && TARGET_ARCH32)
792 target_flags |= MASK_DEPRECATED_V8_INSNS;
794 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
795 if (! TARGET_V9 || TARGET_ARCH64)
796 target_flags &= ~MASK_V8PLUS;
798 /* Don't use stack biasing in 32 bit mode. */
799 if (TARGET_ARCH32)
800 target_flags &= ~MASK_STACK_BIAS;
802 /* Supply a default value for align_functions. */
803 if (align_functions == 0
804 && (sparc_cpu == PROCESSOR_ULTRASPARC
805 || sparc_cpu == PROCESSOR_ULTRASPARC3
806 || sparc_cpu == PROCESSOR_NIAGARA
807 || sparc_cpu == PROCESSOR_NIAGARA2))
808 align_functions = 32;
810 /* Validate PCC_STRUCT_RETURN. */
811 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
812 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
814 /* Only use .uaxword when compiling for a 64-bit target. */
815 if (!TARGET_ARCH64)
816 targetm.asm_out.unaligned_op.di = NULL;
818 /* Do various machine dependent initializations. */
819 sparc_init_modes ();
821 /* Acquire unique alias sets for our private stuff. */
822 sparc_sr_alias_set = new_alias_set ();
823 struct_value_alias_set = new_alias_set ();
825 /* Set up function hooks. */
826 init_machine_status = sparc_init_machine_status;
828 switch (sparc_cpu)
830 case PROCESSOR_V7:
831 case PROCESSOR_CYPRESS:
832 sparc_costs = &cypress_costs;
833 break;
834 case PROCESSOR_V8:
835 case PROCESSOR_SPARCLITE:
836 case PROCESSOR_SUPERSPARC:
837 sparc_costs = &supersparc_costs;
838 break;
839 case PROCESSOR_F930:
840 case PROCESSOR_F934:
841 case PROCESSOR_HYPERSPARC:
842 case PROCESSOR_SPARCLITE86X:
843 sparc_costs = &hypersparc_costs;
844 break;
845 case PROCESSOR_SPARCLET:
846 case PROCESSOR_TSC701:
847 sparc_costs = &sparclet_costs;
848 break;
849 case PROCESSOR_V9:
850 case PROCESSOR_ULTRASPARC:
851 sparc_costs = &ultrasparc_costs;
852 break;
853 case PROCESSOR_ULTRASPARC3:
854 sparc_costs = &ultrasparc3_costs;
855 break;
856 case PROCESSOR_NIAGARA:
857 sparc_costs = &niagara_costs;
858 break;
859 case PROCESSOR_NIAGARA2:
860 sparc_costs = &niagara2_costs;
861 break;
864 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
865 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
866 target_flags |= MASK_LONG_DOUBLE_128;
867 #endif
869 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
870 set_param_value ("simultaneous-prefetches",
871 ((sparc_cpu == PROCESSOR_ULTRASPARC
872 || sparc_cpu == PROCESSOR_NIAGARA
873 || sparc_cpu == PROCESSOR_NIAGARA2)
875 : (sparc_cpu == PROCESSOR_ULTRASPARC3
876 ? 8 : 3)));
877 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
878 set_param_value ("l1-cache-line-size",
879 ((sparc_cpu == PROCESSOR_ULTRASPARC
880 || sparc_cpu == PROCESSOR_ULTRASPARC3
881 || sparc_cpu == PROCESSOR_NIAGARA
882 || sparc_cpu == PROCESSOR_NIAGARA2)
883 ? 64 : 32));
886 #ifdef SUBTARGET_ATTRIBUTE_TABLE
887 /* Table of valid machine attributes. */
888 const struct attribute_spec sparc_attribute_table[] =
890 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
891 SUBTARGET_ATTRIBUTE_TABLE,
892 { NULL, 0, 0, false, false, false, NULL }
894 #endif
896 /* Miscellaneous utilities. */
898 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
899 or branch on register contents instructions. */
902 v9_regcmp_p (enum rtx_code code)
904 return (code == EQ || code == NE || code == GE || code == LT
905 || code == LE || code == GT);
908 /* Nonzero if OP is a floating point constant which can
909 be loaded into an integer register using a single
910 sethi instruction. */
913 fp_sethi_p (rtx op)
915 if (GET_CODE (op) == CONST_DOUBLE)
917 REAL_VALUE_TYPE r;
918 long i;
920 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
921 REAL_VALUE_TO_TARGET_SINGLE (r, i);
922 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
925 return 0;
928 /* Nonzero if OP is a floating point constant which can
929 be loaded into an integer register using a single
930 mov instruction. */
933 fp_mov_p (rtx op)
935 if (GET_CODE (op) == CONST_DOUBLE)
937 REAL_VALUE_TYPE r;
938 long i;
940 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
941 REAL_VALUE_TO_TARGET_SINGLE (r, i);
942 return SPARC_SIMM13_P (i);
945 return 0;
948 /* Nonzero if OP is a floating point constant which can
949 be loaded into an integer register using a high/losum
950 instruction sequence. */
953 fp_high_losum_p (rtx op)
955 /* The constraints calling this should only be in
956 SFmode move insns, so any constant which cannot
957 be moved using a single insn will do. */
958 if (GET_CODE (op) == CONST_DOUBLE)
960 REAL_VALUE_TYPE r;
961 long i;
963 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
964 REAL_VALUE_TO_TARGET_SINGLE (r, i);
965 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
968 return 0;
971 /* Expand a move instruction. Return true if all work is done. */
973 bool
974 sparc_expand_move (enum machine_mode mode, rtx *operands)
976 /* Handle sets of MEM first. */
977 if (GET_CODE (operands[0]) == MEM)
979 /* 0 is a register (or a pair of registers) on SPARC. */
980 if (register_or_zero_operand (operands[1], mode))
981 return false;
983 if (!reload_in_progress)
985 operands[0] = validize_mem (operands[0]);
986 operands[1] = force_reg (mode, operands[1]);
990 /* Fixup TLS cases. */
991 if (TARGET_HAVE_TLS
992 && CONSTANT_P (operands[1])
993 && GET_CODE (operands[1]) != HIGH
994 && sparc_tls_referenced_p (operands [1]))
996 rtx sym = operands[1];
997 rtx addend = NULL;
999 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
1001 addend = XEXP (XEXP (sym, 0), 1);
1002 sym = XEXP (XEXP (sym, 0), 0);
1005 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1007 sym = legitimize_tls_address (sym);
1008 if (addend)
1010 sym = gen_rtx_PLUS (mode, sym, addend);
1011 sym = force_operand (sym, operands[0]);
1013 operands[1] = sym;
1016 /* Fixup PIC cases. */
1017 if (flag_pic && CONSTANT_P (operands[1]))
1019 if (pic_address_needs_scratch (operands[1]))
1020 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1022 /* VxWorks does not impose a fixed gap between segments; the run-time
1023 gap can be different from the object-file gap. We therefore can't
1024 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1025 are absolutely sure that X is in the same segment as the GOT.
1026 Unfortunately, the flexibility of linker scripts means that we
1027 can't be sure of that in general, so assume that _G_O_T_-relative
1028 accesses are never valid on VxWorks. */
1029 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1031 if (mode == SImode)
1033 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1034 return true;
1037 if (mode == DImode)
1039 gcc_assert (TARGET_ARCH64);
1040 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1041 return true;
1045 if (symbolic_operand (operands[1], mode))
1047 operands[1] = legitimize_pic_address (operands[1],
1048 mode,
1049 (reload_in_progress ?
1050 operands[0] :
1051 NULL_RTX));
1052 return false;
1056 /* If we are trying to toss an integer constant into FP registers,
1057 or loading a FP or vector constant, force it into memory. */
1058 if (CONSTANT_P (operands[1])
1059 && REG_P (operands[0])
1060 && (SPARC_FP_REG_P (REGNO (operands[0]))
1061 || SCALAR_FLOAT_MODE_P (mode)
1062 || VECTOR_MODE_P (mode)))
1064 /* emit_group_store will send such bogosity to us when it is
1065 not storing directly into memory. So fix this up to avoid
1066 crashes in output_constant_pool. */
1067 if (operands [1] == const0_rtx)
1068 operands[1] = CONST0_RTX (mode);
1070 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1071 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1072 && const_zero_operand (operands[1], mode))
1073 return false;
1075 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1076 /* We are able to build any SF constant in integer registers
1077 with at most 2 instructions. */
1078 && (mode == SFmode
1079 /* And any DF constant in integer registers. */
1080 || (mode == DFmode
1081 && (reload_completed || reload_in_progress))))
1082 return false;
1084 operands[1] = force_const_mem (mode, operands[1]);
1085 if (!reload_in_progress)
1086 operands[1] = validize_mem (operands[1]);
1087 return false;
1090 /* Accept non-constants and valid constants unmodified. */
1091 if (!CONSTANT_P (operands[1])
1092 || GET_CODE (operands[1]) == HIGH
1093 || input_operand (operands[1], mode))
1094 return false;
1096 switch (mode)
1098 case QImode:
1099 /* All QImode constants require only one insn, so proceed. */
1100 break;
1102 case HImode:
1103 case SImode:
1104 sparc_emit_set_const32 (operands[0], operands[1]);
1105 return true;
1107 case DImode:
1108 /* input_operand should have filtered out 32-bit mode. */
1109 sparc_emit_set_const64 (operands[0], operands[1]);
1110 return true;
1112 default:
1113 gcc_unreachable ();
1116 return false;
1119 /* Load OP1, a 32-bit constant, into OP0, a register.
1120 We know it can't be done in one insn when we get
1121 here, the move expander guarantees this. */
1123 void
1124 sparc_emit_set_const32 (rtx op0, rtx op1)
1126 enum machine_mode mode = GET_MODE (op0);
1127 rtx temp;
1129 if (reload_in_progress || reload_completed)
1130 temp = op0;
1131 else
1132 temp = gen_reg_rtx (mode);
1134 if (GET_CODE (op1) == CONST_INT)
1136 gcc_assert (!small_int_operand (op1, mode)
1137 && !const_high_operand (op1, mode));
1139 /* Emit them as real moves instead of a HIGH/LO_SUM,
1140 this way CSE can see everything and reuse intermediate
1141 values if it wants. */
1142 emit_insn (gen_rtx_SET (VOIDmode, temp,
1143 GEN_INT (INTVAL (op1)
1144 & ~(HOST_WIDE_INT)0x3ff)));
1146 emit_insn (gen_rtx_SET (VOIDmode,
1147 op0,
1148 gen_rtx_IOR (mode, temp,
1149 GEN_INT (INTVAL (op1) & 0x3ff))));
1151 else
1153 /* A symbol, emit in the traditional way. */
1154 emit_insn (gen_rtx_SET (VOIDmode, temp,
1155 gen_rtx_HIGH (mode, op1)));
1156 emit_insn (gen_rtx_SET (VOIDmode,
1157 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1161 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1162 If TEMP is nonzero, we are forbidden to use any other scratch
1163 registers. Otherwise, we are allowed to generate them as needed.
1165 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1166 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1168 void
1169 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1171 rtx temp1, temp2, temp3, temp4, temp5;
1172 rtx ti_temp = 0;
1174 if (temp && GET_MODE (temp) == TImode)
1176 ti_temp = temp;
1177 temp = gen_rtx_REG (DImode, REGNO (temp));
1180 /* SPARC-V9 code-model support. */
1181 switch (sparc_cmodel)
1183 case CM_MEDLOW:
1184 /* The range spanned by all instructions in the object is less
1185 than 2^31 bytes (2GB) and the distance from any instruction
1186 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1187 than 2^31 bytes (2GB).
1189 The executable must be in the low 4TB of the virtual address
1190 space.
1192 sethi %hi(symbol), %temp1
1193 or %temp1, %lo(symbol), %reg */
1194 if (temp)
1195 temp1 = temp; /* op0 is allowed. */
1196 else
1197 temp1 = gen_reg_rtx (DImode);
1199 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1200 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1201 break;
1203 case CM_MEDMID:
1204 /* The range spanned by all instructions in the object is less
1205 than 2^31 bytes (2GB) and the distance from any instruction
1206 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1207 than 2^31 bytes (2GB).
1209 The executable must be in the low 16TB of the virtual address
1210 space.
1212 sethi %h44(symbol), %temp1
1213 or %temp1, %m44(symbol), %temp2
1214 sllx %temp2, 12, %temp3
1215 or %temp3, %l44(symbol), %reg */
1216 if (temp)
1218 temp1 = op0;
1219 temp2 = op0;
1220 temp3 = temp; /* op0 is allowed. */
1222 else
1224 temp1 = gen_reg_rtx (DImode);
1225 temp2 = gen_reg_rtx (DImode);
1226 temp3 = gen_reg_rtx (DImode);
1229 emit_insn (gen_seth44 (temp1, op1));
1230 emit_insn (gen_setm44 (temp2, temp1, op1));
1231 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1232 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1233 emit_insn (gen_setl44 (op0, temp3, op1));
1234 break;
1236 case CM_MEDANY:
1237 /* The range spanned by all instructions in the object is less
1238 than 2^31 bytes (2GB) and the distance from any instruction
1239 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1240 than 2^31 bytes (2GB).
1242 The executable can be placed anywhere in the virtual address
1243 space.
1245 sethi %hh(symbol), %temp1
1246 sethi %lm(symbol), %temp2
1247 or %temp1, %hm(symbol), %temp3
1248 sllx %temp3, 32, %temp4
1249 or %temp4, %temp2, %temp5
1250 or %temp5, %lo(symbol), %reg */
1251 if (temp)
1253 /* It is possible that one of the registers we got for operands[2]
1254 might coincide with that of operands[0] (which is why we made
1255 it TImode). Pick the other one to use as our scratch. */
1256 if (rtx_equal_p (temp, op0))
1258 gcc_assert (ti_temp);
1259 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1261 temp1 = op0;
1262 temp2 = temp; /* op0 is _not_ allowed, see above. */
1263 temp3 = op0;
1264 temp4 = op0;
1265 temp5 = op0;
1267 else
1269 temp1 = gen_reg_rtx (DImode);
1270 temp2 = gen_reg_rtx (DImode);
1271 temp3 = gen_reg_rtx (DImode);
1272 temp4 = gen_reg_rtx (DImode);
1273 temp5 = gen_reg_rtx (DImode);
1276 emit_insn (gen_sethh (temp1, op1));
1277 emit_insn (gen_setlm (temp2, op1));
1278 emit_insn (gen_sethm (temp3, temp1, op1));
1279 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1280 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1281 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1282 gen_rtx_PLUS (DImode, temp4, temp2)));
1283 emit_insn (gen_setlo (op0, temp5, op1));
1284 break;
1286 case CM_EMBMEDANY:
1287 /* Old old old backwards compatibility kruft here.
1288 Essentially it is MEDLOW with a fixed 64-bit
1289 virtual base added to all data segment addresses.
1290 Text-segment stuff is computed like MEDANY, we can't
1291 reuse the code above because the relocation knobs
1292 look different.
1294 Data segment: sethi %hi(symbol), %temp1
1295 add %temp1, EMBMEDANY_BASE_REG, %temp2
1296 or %temp2, %lo(symbol), %reg */
1297 if (data_segment_operand (op1, GET_MODE (op1)))
1299 if (temp)
1301 temp1 = temp; /* op0 is allowed. */
1302 temp2 = op0;
1304 else
1306 temp1 = gen_reg_rtx (DImode);
1307 temp2 = gen_reg_rtx (DImode);
1310 emit_insn (gen_embmedany_sethi (temp1, op1));
1311 emit_insn (gen_embmedany_brsum (temp2, temp1));
1312 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1315 /* Text segment: sethi %uhi(symbol), %temp1
1316 sethi %hi(symbol), %temp2
1317 or %temp1, %ulo(symbol), %temp3
1318 sllx %temp3, 32, %temp4
1319 or %temp4, %temp2, %temp5
1320 or %temp5, %lo(symbol), %reg */
1321 else
1323 if (temp)
1325 /* It is possible that one of the registers we got for operands[2]
1326 might coincide with that of operands[0] (which is why we made
1327 it TImode). Pick the other one to use as our scratch. */
1328 if (rtx_equal_p (temp, op0))
1330 gcc_assert (ti_temp);
1331 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1333 temp1 = op0;
1334 temp2 = temp; /* op0 is _not_ allowed, see above. */
1335 temp3 = op0;
1336 temp4 = op0;
1337 temp5 = op0;
1339 else
1341 temp1 = gen_reg_rtx (DImode);
1342 temp2 = gen_reg_rtx (DImode);
1343 temp3 = gen_reg_rtx (DImode);
1344 temp4 = gen_reg_rtx (DImode);
1345 temp5 = gen_reg_rtx (DImode);
1348 emit_insn (gen_embmedany_textuhi (temp1, op1));
1349 emit_insn (gen_embmedany_texthi (temp2, op1));
1350 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1351 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1352 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1353 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1354 gen_rtx_PLUS (DImode, temp4, temp2)));
1355 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1357 break;
1359 default:
1360 gcc_unreachable ();
1364 #if HOST_BITS_PER_WIDE_INT == 32
1365 void
1366 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1368 gcc_unreachable ();
1370 #else
1371 /* These avoid problems when cross compiling. If we do not
1372 go through all this hair then the optimizer will see
1373 invalid REG_EQUAL notes or in some cases none at all. */
1374 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1375 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1376 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1377 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1379 /* The optimizer is not to assume anything about exactly
1380 which bits are set for a HIGH, they are unspecified.
1381 Unfortunately this leads to many missed optimizations
1382 during CSE. We mask out the non-HIGH bits, and matches
1383 a plain movdi, to alleviate this problem. */
1384 static rtx
1385 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1387 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1390 static rtx
1391 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1393 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1396 static rtx
1397 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1399 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1402 static rtx
1403 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1405 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1408 /* Worker routines for 64-bit constant formation on arch64.
1409 One of the key things to be doing in these emissions is
1410 to create as many temp REGs as possible. This makes it
1411 possible for half-built constants to be used later when
1412 such values are similar to something required later on.
1413 Without doing this, the optimizer cannot see such
1414 opportunities. */
1416 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1417 unsigned HOST_WIDE_INT, int);
1419 static void
1420 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1421 unsigned HOST_WIDE_INT low_bits, int is_neg)
1423 unsigned HOST_WIDE_INT high_bits;
1425 if (is_neg)
1426 high_bits = (~low_bits) & 0xffffffff;
1427 else
1428 high_bits = low_bits;
1430 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1431 if (!is_neg)
1433 emit_insn (gen_rtx_SET (VOIDmode, op0,
1434 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1436 else
1438 /* If we are XOR'ing with -1, then we should emit a one's complement
1439 instead. This way the combiner will notice logical operations
1440 such as ANDN later on and substitute. */
1441 if ((low_bits & 0x3ff) == 0x3ff)
1443 emit_insn (gen_rtx_SET (VOIDmode, op0,
1444 gen_rtx_NOT (DImode, temp)));
1446 else
1448 emit_insn (gen_rtx_SET (VOIDmode, op0,
1449 gen_safe_XOR64 (temp,
1450 (-(HOST_WIDE_INT)0x400
1451 | (low_bits & 0x3ff)))));
1456 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1457 unsigned HOST_WIDE_INT, int);
1459 static void
1460 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1461 unsigned HOST_WIDE_INT high_bits,
1462 unsigned HOST_WIDE_INT low_immediate,
1463 int shift_count)
1465 rtx temp2 = op0;
1467 if ((high_bits & 0xfffffc00) != 0)
1469 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1470 if ((high_bits & ~0xfffffc00) != 0)
1471 emit_insn (gen_rtx_SET (VOIDmode, op0,
1472 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1473 else
1474 temp2 = temp;
1476 else
1478 emit_insn (gen_safe_SET64 (temp, high_bits));
1479 temp2 = temp;
1482 /* Now shift it up into place. */
1483 emit_insn (gen_rtx_SET (VOIDmode, op0,
1484 gen_rtx_ASHIFT (DImode, temp2,
1485 GEN_INT (shift_count))));
1487 /* If there is a low immediate part piece, finish up by
1488 putting that in as well. */
1489 if (low_immediate != 0)
1490 emit_insn (gen_rtx_SET (VOIDmode, op0,
1491 gen_safe_OR64 (op0, low_immediate)));
1494 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1495 unsigned HOST_WIDE_INT);
1497 /* Full 64-bit constant decomposition. Even though this is the
1498 'worst' case, we still optimize a few things away. */
1499 static void
1500 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1501 unsigned HOST_WIDE_INT high_bits,
1502 unsigned HOST_WIDE_INT low_bits)
1504 rtx sub_temp;
1506 if (reload_in_progress || reload_completed)
1507 sub_temp = op0;
1508 else
1509 sub_temp = gen_reg_rtx (DImode);
1511 if ((high_bits & 0xfffffc00) != 0)
1513 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1514 if ((high_bits & ~0xfffffc00) != 0)
1515 emit_insn (gen_rtx_SET (VOIDmode,
1516 sub_temp,
1517 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1518 else
1519 sub_temp = temp;
1521 else
1523 emit_insn (gen_safe_SET64 (temp, high_bits));
1524 sub_temp = temp;
1527 if (!reload_in_progress && !reload_completed)
1529 rtx temp2 = gen_reg_rtx (DImode);
1530 rtx temp3 = gen_reg_rtx (DImode);
1531 rtx temp4 = gen_reg_rtx (DImode);
1533 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1534 gen_rtx_ASHIFT (DImode, sub_temp,
1535 GEN_INT (32))));
1537 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1538 if ((low_bits & ~0xfffffc00) != 0)
1540 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1541 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1542 emit_insn (gen_rtx_SET (VOIDmode, op0,
1543 gen_rtx_PLUS (DImode, temp4, temp3)));
1545 else
1547 emit_insn (gen_rtx_SET (VOIDmode, op0,
1548 gen_rtx_PLUS (DImode, temp4, temp2)));
1551 else
1553 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1554 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1555 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1556 int to_shift = 12;
1558 /* We are in the middle of reload, so this is really
1559 painful. However we do still make an attempt to
1560 avoid emitting truly stupid code. */
1561 if (low1 != const0_rtx)
1563 emit_insn (gen_rtx_SET (VOIDmode, op0,
1564 gen_rtx_ASHIFT (DImode, sub_temp,
1565 GEN_INT (to_shift))));
1566 emit_insn (gen_rtx_SET (VOIDmode, op0,
1567 gen_rtx_IOR (DImode, op0, low1)));
1568 sub_temp = op0;
1569 to_shift = 12;
1571 else
1573 to_shift += 12;
1575 if (low2 != const0_rtx)
1577 emit_insn (gen_rtx_SET (VOIDmode, op0,
1578 gen_rtx_ASHIFT (DImode, sub_temp,
1579 GEN_INT (to_shift))));
1580 emit_insn (gen_rtx_SET (VOIDmode, op0,
1581 gen_rtx_IOR (DImode, op0, low2)));
1582 sub_temp = op0;
1583 to_shift = 8;
1585 else
1587 to_shift += 8;
1589 emit_insn (gen_rtx_SET (VOIDmode, op0,
1590 gen_rtx_ASHIFT (DImode, sub_temp,
1591 GEN_INT (to_shift))));
1592 if (low3 != const0_rtx)
1593 emit_insn (gen_rtx_SET (VOIDmode, op0,
1594 gen_rtx_IOR (DImode, op0, low3)));
1595 /* phew... */
1599 /* Analyze a 64-bit constant for certain properties. */
1600 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1601 unsigned HOST_WIDE_INT,
1602 int *, int *, int *);
1604 static void
1605 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1606 unsigned HOST_WIDE_INT low_bits,
1607 int *hbsp, int *lbsp, int *abbasp)
1609 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1610 int i;
1612 lowest_bit_set = highest_bit_set = -1;
1613 i = 0;
1616 if ((lowest_bit_set == -1)
1617 && ((low_bits >> i) & 1))
1618 lowest_bit_set = i;
1619 if ((highest_bit_set == -1)
1620 && ((high_bits >> (32 - i - 1)) & 1))
1621 highest_bit_set = (64 - i - 1);
1623 while (++i < 32
1624 && ((highest_bit_set == -1)
1625 || (lowest_bit_set == -1)));
1626 if (i == 32)
1628 i = 0;
1631 if ((lowest_bit_set == -1)
1632 && ((high_bits >> i) & 1))
1633 lowest_bit_set = i + 32;
1634 if ((highest_bit_set == -1)
1635 && ((low_bits >> (32 - i - 1)) & 1))
1636 highest_bit_set = 32 - i - 1;
1638 while (++i < 32
1639 && ((highest_bit_set == -1)
1640 || (lowest_bit_set == -1)));
1642 /* If there are no bits set this should have gone out
1643 as one instruction! */
1644 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1645 all_bits_between_are_set = 1;
1646 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1648 if (i < 32)
1650 if ((low_bits & (1 << i)) != 0)
1651 continue;
1653 else
1655 if ((high_bits & (1 << (i - 32))) != 0)
1656 continue;
1658 all_bits_between_are_set = 0;
1659 break;
1661 *hbsp = highest_bit_set;
1662 *lbsp = lowest_bit_set;
1663 *abbasp = all_bits_between_are_set;
1666 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1668 static int
1669 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1670 unsigned HOST_WIDE_INT low_bits)
1672 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1674 if (high_bits == 0
1675 || high_bits == 0xffffffff)
1676 return 1;
1678 analyze_64bit_constant (high_bits, low_bits,
1679 &highest_bit_set, &lowest_bit_set,
1680 &all_bits_between_are_set);
1682 if ((highest_bit_set == 63
1683 || lowest_bit_set == 0)
1684 && all_bits_between_are_set != 0)
1685 return 1;
1687 if ((highest_bit_set - lowest_bit_set) < 21)
1688 return 1;
1690 return 0;
1693 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1694 unsigned HOST_WIDE_INT,
1695 int, int);
1697 static unsigned HOST_WIDE_INT
1698 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1699 unsigned HOST_WIDE_INT low_bits,
1700 int lowest_bit_set, int shift)
1702 HOST_WIDE_INT hi, lo;
1704 if (lowest_bit_set < 32)
1706 lo = (low_bits >> lowest_bit_set) << shift;
1707 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1709 else
1711 lo = 0;
1712 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1714 gcc_assert (! (hi & lo));
1715 return (hi | lo);
1718 /* Here we are sure to be arch64 and this is an integer constant
1719 being loaded into a register. Emit the most efficient
1720 insn sequence possible. Detection of all the 1-insn cases
1721 has been done already. */
1722 void
1723 sparc_emit_set_const64 (rtx op0, rtx op1)
1725 unsigned HOST_WIDE_INT high_bits, low_bits;
1726 int lowest_bit_set, highest_bit_set;
1727 int all_bits_between_are_set;
1728 rtx temp = 0;
1730 /* Sanity check that we know what we are working with. */
1731 gcc_assert (TARGET_ARCH64
1732 && (GET_CODE (op0) == SUBREG
1733 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1735 if (reload_in_progress || reload_completed)
1736 temp = op0;
1738 if (GET_CODE (op1) != CONST_INT)
1740 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1741 return;
1744 if (! temp)
1745 temp = gen_reg_rtx (DImode);
1747 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1748 low_bits = (INTVAL (op1) & 0xffffffff);
1750 /* low_bits bits 0 --> 31
1751 high_bits bits 32 --> 63 */
1753 analyze_64bit_constant (high_bits, low_bits,
1754 &highest_bit_set, &lowest_bit_set,
1755 &all_bits_between_are_set);
1757 /* First try for a 2-insn sequence. */
1759 /* These situations are preferred because the optimizer can
1760 * do more things with them:
1761 * 1) mov -1, %reg
1762 * sllx %reg, shift, %reg
1763 * 2) mov -1, %reg
1764 * srlx %reg, shift, %reg
1765 * 3) mov some_small_const, %reg
1766 * sllx %reg, shift, %reg
1768 if (((highest_bit_set == 63
1769 || lowest_bit_set == 0)
1770 && all_bits_between_are_set != 0)
1771 || ((highest_bit_set - lowest_bit_set) < 12))
1773 HOST_WIDE_INT the_const = -1;
1774 int shift = lowest_bit_set;
1776 if ((highest_bit_set != 63
1777 && lowest_bit_set != 0)
1778 || all_bits_between_are_set == 0)
1780 the_const =
1781 create_simple_focus_bits (high_bits, low_bits,
1782 lowest_bit_set, 0);
1784 else if (lowest_bit_set == 0)
1785 shift = -(63 - highest_bit_set);
1787 gcc_assert (SPARC_SIMM13_P (the_const));
1788 gcc_assert (shift != 0);
1790 emit_insn (gen_safe_SET64 (temp, the_const));
1791 if (shift > 0)
1792 emit_insn (gen_rtx_SET (VOIDmode,
1793 op0,
1794 gen_rtx_ASHIFT (DImode,
1795 temp,
1796 GEN_INT (shift))));
1797 else if (shift < 0)
1798 emit_insn (gen_rtx_SET (VOIDmode,
1799 op0,
1800 gen_rtx_LSHIFTRT (DImode,
1801 temp,
1802 GEN_INT (-shift))));
1803 return;
1806 /* Now a range of 22 or less bits set somewhere.
1807 * 1) sethi %hi(focus_bits), %reg
1808 * sllx %reg, shift, %reg
1809 * 2) sethi %hi(focus_bits), %reg
1810 * srlx %reg, shift, %reg
1812 if ((highest_bit_set - lowest_bit_set) < 21)
1814 unsigned HOST_WIDE_INT focus_bits =
1815 create_simple_focus_bits (high_bits, low_bits,
1816 lowest_bit_set, 10);
1818 gcc_assert (SPARC_SETHI_P (focus_bits));
1819 gcc_assert (lowest_bit_set != 10);
1821 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1823 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1824 if (lowest_bit_set < 10)
1825 emit_insn (gen_rtx_SET (VOIDmode,
1826 op0,
1827 gen_rtx_LSHIFTRT (DImode, temp,
1828 GEN_INT (10 - lowest_bit_set))));
1829 else if (lowest_bit_set > 10)
1830 emit_insn (gen_rtx_SET (VOIDmode,
1831 op0,
1832 gen_rtx_ASHIFT (DImode, temp,
1833 GEN_INT (lowest_bit_set - 10))));
1834 return;
1837 /* 1) sethi %hi(low_bits), %reg
1838 * or %reg, %lo(low_bits), %reg
1839 * 2) sethi %hi(~low_bits), %reg
1840 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1842 if (high_bits == 0
1843 || high_bits == 0xffffffff)
1845 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1846 (high_bits == 0xffffffff));
1847 return;
1850 /* Now, try 3-insn sequences. */
1852 /* 1) sethi %hi(high_bits), %reg
1853 * or %reg, %lo(high_bits), %reg
1854 * sllx %reg, 32, %reg
1856 if (low_bits == 0)
1858 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1859 return;
1862 /* We may be able to do something quick
1863 when the constant is negated, so try that. */
1864 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1865 (~low_bits) & 0xfffffc00))
1867 /* NOTE: The trailing bits get XOR'd so we need the
1868 non-negated bits, not the negated ones. */
1869 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1871 if ((((~high_bits) & 0xffffffff) == 0
1872 && ((~low_bits) & 0x80000000) == 0)
1873 || (((~high_bits) & 0xffffffff) == 0xffffffff
1874 && ((~low_bits) & 0x80000000) != 0))
1876 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1878 if ((SPARC_SETHI_P (fast_int)
1879 && (~high_bits & 0xffffffff) == 0)
1880 || SPARC_SIMM13_P (fast_int))
1881 emit_insn (gen_safe_SET64 (temp, fast_int));
1882 else
1883 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1885 else
1887 rtx negated_const;
1888 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1889 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1890 sparc_emit_set_const64 (temp, negated_const);
1893 /* If we are XOR'ing with -1, then we should emit a one's complement
1894 instead. This way the combiner will notice logical operations
1895 such as ANDN later on and substitute. */
1896 if (trailing_bits == 0x3ff)
1898 emit_insn (gen_rtx_SET (VOIDmode, op0,
1899 gen_rtx_NOT (DImode, temp)));
1901 else
1903 emit_insn (gen_rtx_SET (VOIDmode,
1904 op0,
1905 gen_safe_XOR64 (temp,
1906 (-0x400 | trailing_bits))));
1908 return;
1911 /* 1) sethi %hi(xxx), %reg
1912 * or %reg, %lo(xxx), %reg
1913 * sllx %reg, yyy, %reg
1915 * ??? This is just a generalized version of the low_bits==0
1916 * thing above, FIXME...
1918 if ((highest_bit_set - lowest_bit_set) < 32)
1920 unsigned HOST_WIDE_INT focus_bits =
1921 create_simple_focus_bits (high_bits, low_bits,
1922 lowest_bit_set, 0);
1924 /* We can't get here in this state. */
1925 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1927 /* So what we know is that the set bits straddle the
1928 middle of the 64-bit word. */
1929 sparc_emit_set_const64_quick2 (op0, temp,
1930 focus_bits, 0,
1931 lowest_bit_set);
1932 return;
1935 /* 1) sethi %hi(high_bits), %reg
1936 * or %reg, %lo(high_bits), %reg
1937 * sllx %reg, 32, %reg
1938 * or %reg, low_bits, %reg
1940 if (SPARC_SIMM13_P(low_bits)
1941 && ((int)low_bits > 0))
1943 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1944 return;
1947 /* The easiest way when all else fails, is full decomposition. */
1948 #if 0
1949 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1950 high_bits, low_bits, ~high_bits, ~low_bits);
1951 #endif
1952 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1954 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1956 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1957 return the mode to be used for the comparison. For floating-point,
1958 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1959 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1960 processing is needed. */
1962 enum machine_mode
1963 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1965 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1967 switch (op)
1969 case EQ:
1970 case NE:
1971 case UNORDERED:
1972 case ORDERED:
1973 case UNLT:
1974 case UNLE:
1975 case UNGT:
1976 case UNGE:
1977 case UNEQ:
1978 case LTGT:
1979 return CCFPmode;
1981 case LT:
1982 case LE:
1983 case GT:
1984 case GE:
1985 return CCFPEmode;
1987 default:
1988 gcc_unreachable ();
1991 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1992 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1994 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1995 return CCX_NOOVmode;
1996 else
1997 return CC_NOOVmode;
1999 else
2001 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2002 return CCXmode;
2003 else
2004 return CCmode;
2008 /* Emit the compare insn and return the CC reg for a CODE comparison. */
2011 gen_compare_reg (enum rtx_code code)
2013 enum machine_mode mode;
2014 rtx x, y, cc_reg;
2016 if (GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_CC)
2017 return sparc_compare_op0;
2019 x = sparc_compare_op0;
2020 y = sparc_compare_op1;
2021 mode = SELECT_CC_MODE (code, x, y);
2023 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2024 fcc regs (cse can't tell they're really call clobbered regs and will
2025 remove a duplicate comparison even if there is an intervening function
2026 call - it will then try to reload the cc reg via an int reg which is why
2027 we need the movcc patterns). It is possible to provide the movcc
2028 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2029 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2030 to tell cse that CCFPE mode registers (even pseudos) are call
2031 clobbered. */
2033 /* ??? This is an experiment. Rather than making changes to cse which may
2034 or may not be easy/clean, we do our own cse. This is possible because
2035 we will generate hard registers. Cse knows they're call clobbered (it
2036 doesn't know the same thing about pseudos). If we guess wrong, no big
2037 deal, but if we win, great! */
2039 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2040 #if 1 /* experiment */
2042 int reg;
2043 /* We cycle through the registers to ensure they're all exercised. */
2044 static int next_fcc_reg = 0;
2045 /* Previous x,y for each fcc reg. */
2046 static rtx prev_args[4][2];
2048 /* Scan prev_args for x,y. */
2049 for (reg = 0; reg < 4; reg++)
2050 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2051 break;
2052 if (reg == 4)
2054 reg = next_fcc_reg;
2055 prev_args[reg][0] = x;
2056 prev_args[reg][1] = y;
2057 next_fcc_reg = (next_fcc_reg + 1) & 3;
2059 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2061 #else
2062 cc_reg = gen_reg_rtx (mode);
2063 #endif /* ! experiment */
2064 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2065 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2066 else
2067 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2069 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2070 will only result in an unrecognizable insn so no point in asserting. */
2071 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2073 return cc_reg;
2076 /* Same as above but return the whole compare operator. */
2079 gen_compare_operator (enum rtx_code code)
2081 rtx cc_reg;
2083 if (GET_MODE (sparc_compare_op0) == TFmode && !TARGET_HARD_QUAD)
2084 code
2085 = sparc_emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, code);
2087 cc_reg = gen_compare_reg (code);
2088 return gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
2091 /* This function is used for v9 only.
2092 CODE is the code for an Scc's comparison.
2093 OPERANDS[0] is the target of the Scc insn.
2094 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2095 been generated yet).
2097 This function is needed to turn
2099 (set (reg:SI 110)
2100 (gt (reg:CCX 100 %icc)
2101 (const_int 0)))
2102 into
2103 (set (reg:SI 110)
2104 (gt:DI (reg:CCX 100 %icc)
2105 (const_int 0)))
2107 IE: The instruction recognizer needs to see the mode of the comparison to
2108 find the right instruction. We could use "gt:DI" right in the
2109 define_expand, but leaving it out allows us to handle DI, SI, etc.
2111 We refer to the global sparc compare operands sparc_compare_op0 and
2112 sparc_compare_op1. */
2115 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2117 if (! TARGET_ARCH64
2118 && (GET_MODE (sparc_compare_op0) == DImode
2119 || GET_MODE (operands[0]) == DImode))
2120 return 0;
2122 /* Try to use the movrCC insns. */
2123 if (TARGET_ARCH64
2124 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2125 && sparc_compare_op1 == const0_rtx
2126 && v9_regcmp_p (compare_code))
2128 rtx op0 = sparc_compare_op0;
2129 rtx temp;
2131 /* Special case for op0 != 0. This can be done with one instruction if
2132 operands[0] == sparc_compare_op0. */
2134 if (compare_code == NE
2135 && GET_MODE (operands[0]) == DImode
2136 && rtx_equal_p (op0, operands[0]))
2138 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2139 gen_rtx_IF_THEN_ELSE (DImode,
2140 gen_rtx_fmt_ee (compare_code, DImode,
2141 op0, const0_rtx),
2142 const1_rtx,
2143 operands[0])));
2144 return 1;
2147 if (reg_overlap_mentioned_p (operands[0], op0))
2149 /* Handle the case where operands[0] == sparc_compare_op0.
2150 We "early clobber" the result. */
2151 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2152 emit_move_insn (op0, sparc_compare_op0);
2155 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2156 if (GET_MODE (op0) != DImode)
2158 temp = gen_reg_rtx (DImode);
2159 convert_move (temp, op0, 0);
2161 else
2162 temp = op0;
2163 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2164 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2165 gen_rtx_fmt_ee (compare_code, DImode,
2166 temp, const0_rtx),
2167 const1_rtx,
2168 operands[0])));
2169 return 1;
2171 else
2173 operands[1] = gen_compare_reg (compare_code);
2175 switch (GET_MODE (operands[1]))
2177 case CCmode :
2178 case CCXmode :
2179 case CCFPEmode :
2180 case CCFPmode :
2181 break;
2182 default :
2183 gcc_unreachable ();
2185 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2186 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2187 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2188 gen_rtx_fmt_ee (compare_code,
2189 GET_MODE (operands[1]),
2190 operands[1], const0_rtx),
2191 const1_rtx, operands[0])));
2192 return 1;
2196 /* Emit a conditional jump insn for the v9 architecture using comparison code
2197 CODE and jump target LABEL.
2198 This function exists to take advantage of the v9 brxx insns. */
2200 void
2201 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2203 gcc_assert (GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) != MODE_CC);
2204 emit_jump_insn (gen_rtx_SET (VOIDmode,
2205 pc_rtx,
2206 gen_rtx_IF_THEN_ELSE (VOIDmode,
2207 gen_rtx_fmt_ee (code, GET_MODE (op0),
2208 op0, const0_rtx),
2209 gen_rtx_LABEL_REF (VOIDmode, label),
2210 pc_rtx)));
2213 /* Generate a DFmode part of a hard TFmode register.
2214 REG is the TFmode hard register, LOW is 1 for the
2215 low 64bit of the register and 0 otherwise.
2218 gen_df_reg (rtx reg, int low)
2220 int regno = REGNO (reg);
2222 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2223 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2224 return gen_rtx_REG (DFmode, regno);
2227 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2228 Unlike normal calls, TFmode operands are passed by reference. It is
2229 assumed that no more than 3 operands are required. */
2231 static void
2232 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2234 rtx ret_slot = NULL, arg[3], func_sym;
2235 int i;
2237 /* We only expect to be called for conversions, unary, and binary ops. */
2238 gcc_assert (nargs == 2 || nargs == 3);
2240 for (i = 0; i < nargs; ++i)
2242 rtx this_arg = operands[i];
2243 rtx this_slot;
2245 /* TFmode arguments and return values are passed by reference. */
2246 if (GET_MODE (this_arg) == TFmode)
2248 int force_stack_temp;
2250 force_stack_temp = 0;
2251 if (TARGET_BUGGY_QP_LIB && i == 0)
2252 force_stack_temp = 1;
2254 if (GET_CODE (this_arg) == MEM
2255 && ! force_stack_temp)
2256 this_arg = XEXP (this_arg, 0);
2257 else if (CONSTANT_P (this_arg)
2258 && ! force_stack_temp)
2260 this_slot = force_const_mem (TFmode, this_arg);
2261 this_arg = XEXP (this_slot, 0);
2263 else
2265 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2267 /* Operand 0 is the return value. We'll copy it out later. */
2268 if (i > 0)
2269 emit_move_insn (this_slot, this_arg);
2270 else
2271 ret_slot = this_slot;
2273 this_arg = XEXP (this_slot, 0);
2277 arg[i] = this_arg;
2280 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2282 if (GET_MODE (operands[0]) == TFmode)
2284 if (nargs == 2)
2285 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2286 arg[0], GET_MODE (arg[0]),
2287 arg[1], GET_MODE (arg[1]));
2288 else
2289 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2290 arg[0], GET_MODE (arg[0]),
2291 arg[1], GET_MODE (arg[1]),
2292 arg[2], GET_MODE (arg[2]));
2294 if (ret_slot)
2295 emit_move_insn (operands[0], ret_slot);
2297 else
2299 rtx ret;
2301 gcc_assert (nargs == 2);
2303 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2304 GET_MODE (operands[0]), 1,
2305 arg[1], GET_MODE (arg[1]));
2307 if (ret != operands[0])
2308 emit_move_insn (operands[0], ret);
2312 /* Expand soft-float TFmode calls to sparc abi routines. */
2314 static void
2315 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2317 const char *func;
2319 switch (code)
2321 case PLUS:
2322 func = "_Qp_add";
2323 break;
2324 case MINUS:
2325 func = "_Qp_sub";
2326 break;
2327 case MULT:
2328 func = "_Qp_mul";
2329 break;
2330 case DIV:
2331 func = "_Qp_div";
2332 break;
2333 default:
2334 gcc_unreachable ();
2337 emit_soft_tfmode_libcall (func, 3, operands);
2340 static void
2341 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2343 const char *func;
2345 gcc_assert (code == SQRT);
2346 func = "_Qp_sqrt";
2348 emit_soft_tfmode_libcall (func, 2, operands);
2351 static void
2352 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2354 const char *func;
2356 switch (code)
2358 case FLOAT_EXTEND:
2359 switch (GET_MODE (operands[1]))
2361 case SFmode:
2362 func = "_Qp_stoq";
2363 break;
2364 case DFmode:
2365 func = "_Qp_dtoq";
2366 break;
2367 default:
2368 gcc_unreachable ();
2370 break;
2372 case FLOAT_TRUNCATE:
2373 switch (GET_MODE (operands[0]))
2375 case SFmode:
2376 func = "_Qp_qtos";
2377 break;
2378 case DFmode:
2379 func = "_Qp_qtod";
2380 break;
2381 default:
2382 gcc_unreachable ();
2384 break;
2386 case FLOAT:
2387 switch (GET_MODE (operands[1]))
2389 case SImode:
2390 func = "_Qp_itoq";
2391 if (TARGET_ARCH64)
2392 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2393 break;
2394 case DImode:
2395 func = "_Qp_xtoq";
2396 break;
2397 default:
2398 gcc_unreachable ();
2400 break;
2402 case UNSIGNED_FLOAT:
2403 switch (GET_MODE (operands[1]))
2405 case SImode:
2406 func = "_Qp_uitoq";
2407 if (TARGET_ARCH64)
2408 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2409 break;
2410 case DImode:
2411 func = "_Qp_uxtoq";
2412 break;
2413 default:
2414 gcc_unreachable ();
2416 break;
2418 case FIX:
2419 switch (GET_MODE (operands[0]))
2421 case SImode:
2422 func = "_Qp_qtoi";
2423 break;
2424 case DImode:
2425 func = "_Qp_qtox";
2426 break;
2427 default:
2428 gcc_unreachable ();
2430 break;
2432 case UNSIGNED_FIX:
2433 switch (GET_MODE (operands[0]))
2435 case SImode:
2436 func = "_Qp_qtoui";
2437 break;
2438 case DImode:
2439 func = "_Qp_qtoux";
2440 break;
2441 default:
2442 gcc_unreachable ();
2444 break;
2446 default:
2447 gcc_unreachable ();
2450 emit_soft_tfmode_libcall (func, 2, operands);
2453 /* Expand a hard-float tfmode operation. All arguments must be in
2454 registers. */
2456 static void
2457 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2459 rtx op, dest;
2461 if (GET_RTX_CLASS (code) == RTX_UNARY)
2463 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2464 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2466 else
2468 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2469 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2470 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2471 operands[1], operands[2]);
2474 if (register_operand (operands[0], VOIDmode))
2475 dest = operands[0];
2476 else
2477 dest = gen_reg_rtx (GET_MODE (operands[0]));
2479 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2481 if (dest != operands[0])
2482 emit_move_insn (operands[0], dest);
2485 void
2486 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2488 if (TARGET_HARD_QUAD)
2489 emit_hard_tfmode_operation (code, operands);
2490 else
2491 emit_soft_tfmode_binop (code, operands);
2494 void
2495 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2497 if (TARGET_HARD_QUAD)
2498 emit_hard_tfmode_operation (code, operands);
2499 else
2500 emit_soft_tfmode_unop (code, operands);
2503 void
2504 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2506 if (TARGET_HARD_QUAD)
2507 emit_hard_tfmode_operation (code, operands);
2508 else
2509 emit_soft_tfmode_cvt (code, operands);
2512 /* Return nonzero if a branch/jump/call instruction will be emitting
2513 nop into its delay slot. */
2516 empty_delay_slot (rtx insn)
2518 rtx seq;
2520 /* If no previous instruction (should not happen), return true. */
2521 if (PREV_INSN (insn) == NULL)
2522 return 1;
2524 seq = NEXT_INSN (PREV_INSN (insn));
2525 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2526 return 0;
2528 return 1;
2531 /* Return nonzero if TRIAL can go into the call delay slot. */
2534 tls_call_delay (rtx trial)
2536 rtx pat;
2538 /* Binutils allows
2539 call __tls_get_addr, %tgd_call (foo)
2540 add %l7, %o0, %o0, %tgd_add (foo)
2541 while Sun as/ld does not. */
2542 if (TARGET_GNU_TLS || !TARGET_TLS)
2543 return 1;
2545 pat = PATTERN (trial);
2547 /* We must reject tgd_add{32|64}, i.e.
2548 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2549 and tldm_add{32|64}, i.e.
2550 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2551 for Sun as/ld. */
2552 if (GET_CODE (pat) == SET
2553 && GET_CODE (SET_SRC (pat)) == PLUS)
2555 rtx unspec = XEXP (SET_SRC (pat), 1);
2557 if (GET_CODE (unspec) == UNSPEC
2558 && (XINT (unspec, 1) == UNSPEC_TLSGD
2559 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2560 return 0;
2563 return 1;
2566 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2567 instruction. RETURN_P is true if the v9 variant 'return' is to be
2568 considered in the test too.
2570 TRIAL must be a SET whose destination is a REG appropriate for the
2571 'restore' instruction or, if RETURN_P is true, for the 'return'
2572 instruction. */
2574 static int
2575 eligible_for_restore_insn (rtx trial, bool return_p)
2577 rtx pat = PATTERN (trial);
2578 rtx src = SET_SRC (pat);
2580 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2581 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2582 && arith_operand (src, GET_MODE (src)))
2584 if (TARGET_ARCH64)
2585 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2586 else
2587 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2590 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2591 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2592 && arith_double_operand (src, GET_MODE (src)))
2593 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2595 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2596 else if (! TARGET_FPU && register_operand (src, SFmode))
2597 return 1;
2599 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2600 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2601 return 1;
2603 /* If we have the 'return' instruction, anything that does not use
2604 local or output registers and can go into a delay slot wins. */
2605 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2606 && (get_attr_in_uncond_branch_delay (trial)
2607 == IN_UNCOND_BRANCH_DELAY_TRUE))
2608 return 1;
2610 /* The 'restore src1,src2,dest' pattern for SImode. */
2611 else if (GET_CODE (src) == PLUS
2612 && register_operand (XEXP (src, 0), SImode)
2613 && arith_operand (XEXP (src, 1), SImode))
2614 return 1;
2616 /* The 'restore src1,src2,dest' pattern for DImode. */
2617 else if (GET_CODE (src) == PLUS
2618 && register_operand (XEXP (src, 0), DImode)
2619 && arith_double_operand (XEXP (src, 1), DImode))
2620 return 1;
2622 /* The 'restore src1,%lo(src2),dest' pattern. */
2623 else if (GET_CODE (src) == LO_SUM
2624 && ! TARGET_CM_MEDMID
2625 && ((register_operand (XEXP (src, 0), SImode)
2626 && immediate_operand (XEXP (src, 1), SImode))
2627 || (TARGET_ARCH64
2628 && register_operand (XEXP (src, 0), DImode)
2629 && immediate_operand (XEXP (src, 1), DImode))))
2630 return 1;
2632 /* The 'restore src,src,dest' pattern. */
2633 else if (GET_CODE (src) == ASHIFT
2634 && (register_operand (XEXP (src, 0), SImode)
2635 || register_operand (XEXP (src, 0), DImode))
2636 && XEXP (src, 1) == const1_rtx)
2637 return 1;
2639 return 0;
2642 /* Return nonzero if TRIAL can go into the function return's
2643 delay slot. */
2646 eligible_for_return_delay (rtx trial)
2648 rtx pat;
2650 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2651 return 0;
2653 if (get_attr_length (trial) != 1)
2654 return 0;
2656 /* If there are any call-saved registers, we should scan TRIAL if it
2657 does not reference them. For now just make it easy. */
2658 if (num_gfregs)
2659 return 0;
2661 /* If the function uses __builtin_eh_return, the eh_return machinery
2662 occupies the delay slot. */
2663 if (crtl->calls_eh_return)
2664 return 0;
2666 /* In the case of a true leaf function, anything can go into the slot. */
2667 if (sparc_leaf_function_p)
2668 return get_attr_in_uncond_branch_delay (trial)
2669 == IN_UNCOND_BRANCH_DELAY_TRUE;
2671 pat = PATTERN (trial);
2673 /* Otherwise, only operations which can be done in tandem with
2674 a `restore' or `return' insn can go into the delay slot. */
2675 if (GET_CODE (SET_DEST (pat)) != REG
2676 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2677 return 0;
2679 /* If this instruction sets up floating point register and we have a return
2680 instruction, it can probably go in. But restore will not work
2681 with FP_REGS. */
2682 if (REGNO (SET_DEST (pat)) >= 32)
2683 return (TARGET_V9
2684 && ! epilogue_renumber (&pat, 1)
2685 && (get_attr_in_uncond_branch_delay (trial)
2686 == IN_UNCOND_BRANCH_DELAY_TRUE));
2688 return eligible_for_restore_insn (trial, true);
2691 /* Return nonzero if TRIAL can go into the sibling call's
2692 delay slot. */
2695 eligible_for_sibcall_delay (rtx trial)
2697 rtx pat;
2699 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2700 return 0;
2702 if (get_attr_length (trial) != 1)
2703 return 0;
2705 pat = PATTERN (trial);
2707 if (sparc_leaf_function_p)
2709 /* If the tail call is done using the call instruction,
2710 we have to restore %o7 in the delay slot. */
2711 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2712 return 0;
2714 /* %g1 is used to build the function address */
2715 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2716 return 0;
2718 return 1;
2721 /* Otherwise, only operations which can be done in tandem with
2722 a `restore' insn can go into the delay slot. */
2723 if (GET_CODE (SET_DEST (pat)) != REG
2724 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2725 || REGNO (SET_DEST (pat)) >= 32)
2726 return 0;
2728 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2729 in most cases. */
2730 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2731 return 0;
2733 return eligible_for_restore_insn (trial, false);
2737 short_branch (int uid1, int uid2)
2739 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2741 /* Leave a few words of "slop". */
2742 if (delta >= -1023 && delta <= 1022)
2743 return 1;
2745 return 0;
2748 /* Return nonzero if REG is not used after INSN.
2749 We assume REG is a reload reg, and therefore does
2750 not live past labels or calls or jumps. */
2752 reg_unused_after (rtx reg, rtx insn)
2754 enum rtx_code code, prev_code = UNKNOWN;
2756 while ((insn = NEXT_INSN (insn)))
2758 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2759 return 1;
2761 code = GET_CODE (insn);
2762 if (GET_CODE (insn) == CODE_LABEL)
2763 return 1;
2765 if (INSN_P (insn))
2767 rtx set = single_set (insn);
2768 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2769 if (set && in_src)
2770 return 0;
2771 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2772 return 1;
2773 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2774 return 0;
2776 prev_code = code;
2778 return 1;
2781 /* Determine if it's legal to put X into the constant pool. This
2782 is not possible if X contains the address of a symbol that is
2783 not constant (TLS) or not known at final link time (PIC). */
2785 static bool
2786 sparc_cannot_force_const_mem (rtx x)
2788 switch (GET_CODE (x))
2790 case CONST_INT:
2791 case CONST_DOUBLE:
2792 case CONST_VECTOR:
2793 /* Accept all non-symbolic constants. */
2794 return false;
2796 case LABEL_REF:
2797 /* Labels are OK iff we are non-PIC. */
2798 return flag_pic != 0;
2800 case SYMBOL_REF:
2801 /* 'Naked' TLS symbol references are never OK,
2802 non-TLS symbols are OK iff we are non-PIC. */
2803 if (SYMBOL_REF_TLS_MODEL (x))
2804 return true;
2805 else
2806 return flag_pic != 0;
2808 case CONST:
2809 return sparc_cannot_force_const_mem (XEXP (x, 0));
2810 case PLUS:
2811 case MINUS:
2812 return sparc_cannot_force_const_mem (XEXP (x, 0))
2813 || sparc_cannot_force_const_mem (XEXP (x, 1));
2814 case UNSPEC:
2815 return true;
2816 default:
2817 gcc_unreachable ();
2821 /* PIC support. */
2822 static GTY(()) char pic_helper_symbol_name[256];
2823 static GTY(()) rtx pic_helper_symbol;
2824 static GTY(()) bool pic_helper_emitted_p = false;
2825 static GTY(()) rtx global_offset_table;
2827 /* Ensure that we are not using patterns that are not OK with PIC. */
2830 check_pic (int i)
2832 switch (flag_pic)
2834 case 1:
2835 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2836 && (GET_CODE (recog_data.operand[i]) != CONST
2837 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2838 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2839 == global_offset_table)
2840 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2841 == CONST))));
2842 case 2:
2843 default:
2844 return 1;
2848 /* Return true if X is an address which needs a temporary register when
2849 reloaded while generating PIC code. */
2852 pic_address_needs_scratch (rtx x)
2854 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2855 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2856 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2857 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2858 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2859 return 1;
2861 return 0;
2864 /* Determine if a given RTX is a valid constant. We already know this
2865 satisfies CONSTANT_P. */
2867 bool
2868 legitimate_constant_p (rtx x)
2870 rtx inner;
2872 switch (GET_CODE (x))
2874 case SYMBOL_REF:
2875 /* TLS symbols are not constant. */
2876 if (SYMBOL_REF_TLS_MODEL (x))
2877 return false;
2878 break;
2880 case CONST:
2881 inner = XEXP (x, 0);
2883 /* Offsets of TLS symbols are never valid.
2884 Discourage CSE from creating them. */
2885 if (GET_CODE (inner) == PLUS
2886 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2887 return false;
2888 break;
2890 case CONST_DOUBLE:
2891 if (GET_MODE (x) == VOIDmode)
2892 return true;
2894 /* Floating point constants are generally not ok.
2895 The only exception is 0.0 in VIS. */
2896 if (TARGET_VIS
2897 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2898 && const_zero_operand (x, GET_MODE (x)))
2899 return true;
2901 return false;
2903 case CONST_VECTOR:
2904 /* Vector constants are generally not ok.
2905 The only exception is 0 in VIS. */
2906 if (TARGET_VIS
2907 && const_zero_operand (x, GET_MODE (x)))
2908 return true;
2910 return false;
2912 default:
2913 break;
2916 return true;
2919 /* Determine if a given RTX is a valid constant address. */
2921 bool
2922 constant_address_p (rtx x)
2924 switch (GET_CODE (x))
2926 case LABEL_REF:
2927 case CONST_INT:
2928 case HIGH:
2929 return true;
2931 case CONST:
2932 if (flag_pic && pic_address_needs_scratch (x))
2933 return false;
2934 return legitimate_constant_p (x);
2936 case SYMBOL_REF:
2937 return !flag_pic && legitimate_constant_p (x);
2939 default:
2940 return false;
2944 /* Nonzero if the constant value X is a legitimate general operand
2945 when generating PIC code. It is given that flag_pic is on and
2946 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2948 bool
2949 legitimate_pic_operand_p (rtx x)
2951 if (pic_address_needs_scratch (x))
2952 return false;
2953 if (SPARC_SYMBOL_REF_TLS_P (x)
2954 || (GET_CODE (x) == CONST
2955 && GET_CODE (XEXP (x, 0)) == PLUS
2956 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2957 return false;
2958 return true;
2961 /* Return nonzero if ADDR is a valid memory address.
2962 STRICT specifies whether strict register checking applies. */
2965 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2967 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2969 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2970 rs1 = addr;
2971 else if (GET_CODE (addr) == PLUS)
2973 rs1 = XEXP (addr, 0);
2974 rs2 = XEXP (addr, 1);
2976 /* Canonicalize. REG comes first, if there are no regs,
2977 LO_SUM comes first. */
2978 if (!REG_P (rs1)
2979 && GET_CODE (rs1) != SUBREG
2980 && (REG_P (rs2)
2981 || GET_CODE (rs2) == SUBREG
2982 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2984 rs1 = XEXP (addr, 1);
2985 rs2 = XEXP (addr, 0);
2988 if ((flag_pic == 1
2989 && rs1 == pic_offset_table_rtx
2990 && !REG_P (rs2)
2991 && GET_CODE (rs2) != SUBREG
2992 && GET_CODE (rs2) != LO_SUM
2993 && GET_CODE (rs2) != MEM
2994 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2995 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2996 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2997 || ((REG_P (rs1)
2998 || GET_CODE (rs1) == SUBREG)
2999 && RTX_OK_FOR_OFFSET_P (rs2)))
3001 imm1 = rs2;
3002 rs2 = NULL;
3004 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3005 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3007 /* We prohibit REG + REG for TFmode when there are no quad move insns
3008 and we consequently need to split. We do this because REG+REG
3009 is not an offsettable address. If we get the situation in reload
3010 where source and destination of a movtf pattern are both MEMs with
3011 REG+REG address, then only one of them gets converted to an
3012 offsettable address. */
3013 if (mode == TFmode
3014 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3015 return 0;
3017 /* We prohibit REG + REG on ARCH32 if not optimizing for
3018 DFmode/DImode because then mem_min_alignment is likely to be zero
3019 after reload and the forced split would lack a matching splitter
3020 pattern. */
3021 if (TARGET_ARCH32 && !optimize
3022 && (mode == DFmode || mode == DImode))
3023 return 0;
3025 else if (USE_AS_OFFSETABLE_LO10
3026 && GET_CODE (rs1) == LO_SUM
3027 && TARGET_ARCH64
3028 && ! TARGET_CM_MEDMID
3029 && RTX_OK_FOR_OLO10_P (rs2))
3031 rs2 = NULL;
3032 imm1 = XEXP (rs1, 1);
3033 rs1 = XEXP (rs1, 0);
3034 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3035 return 0;
3038 else if (GET_CODE (addr) == LO_SUM)
3040 rs1 = XEXP (addr, 0);
3041 imm1 = XEXP (addr, 1);
3043 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3044 return 0;
3046 /* We can't allow TFmode in 32-bit mode, because an offset greater
3047 than the alignment (8) may cause the LO_SUM to overflow. */
3048 if (mode == TFmode && TARGET_ARCH32)
3049 return 0;
3051 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3052 return 1;
3053 else
3054 return 0;
3056 if (GET_CODE (rs1) == SUBREG)
3057 rs1 = SUBREG_REG (rs1);
3058 if (!REG_P (rs1))
3059 return 0;
3061 if (rs2)
3063 if (GET_CODE (rs2) == SUBREG)
3064 rs2 = SUBREG_REG (rs2);
3065 if (!REG_P (rs2))
3066 return 0;
3069 if (strict)
3071 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3072 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3073 return 0;
3075 else
3077 if ((REGNO (rs1) >= 32
3078 && REGNO (rs1) != FRAME_POINTER_REGNUM
3079 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3080 || (rs2
3081 && (REGNO (rs2) >= 32
3082 && REGNO (rs2) != FRAME_POINTER_REGNUM
3083 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3084 return 0;
3086 return 1;
3089 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3091 static GTY(()) rtx sparc_tls_symbol;
3093 static rtx
3094 sparc_tls_get_addr (void)
3096 if (!sparc_tls_symbol)
3097 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3099 return sparc_tls_symbol;
3102 static rtx
3103 sparc_tls_got (void)
3105 rtx temp;
3106 if (flag_pic)
3108 crtl->uses_pic_offset_table = 1;
3109 return pic_offset_table_rtx;
3112 if (!global_offset_table)
3113 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3114 temp = gen_reg_rtx (Pmode);
3115 emit_move_insn (temp, global_offset_table);
3116 return temp;
3119 /* Return 1 if *X is a thread-local symbol. */
3121 static int
3122 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3124 return SPARC_SYMBOL_REF_TLS_P (*x);
3127 /* Return 1 if X contains a thread-local symbol. */
3129 bool
3130 sparc_tls_referenced_p (rtx x)
3132 if (!TARGET_HAVE_TLS)
3133 return false;
3135 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3138 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3139 this (thread-local) address. */
3142 legitimize_tls_address (rtx addr)
3144 rtx temp1, temp2, temp3, ret, o0, got, insn;
3146 gcc_assert (can_create_pseudo_p ());
3148 if (GET_CODE (addr) == SYMBOL_REF)
3149 switch (SYMBOL_REF_TLS_MODEL (addr))
3151 case TLS_MODEL_GLOBAL_DYNAMIC:
3152 start_sequence ();
3153 temp1 = gen_reg_rtx (SImode);
3154 temp2 = gen_reg_rtx (SImode);
3155 ret = gen_reg_rtx (Pmode);
3156 o0 = gen_rtx_REG (Pmode, 8);
3157 got = sparc_tls_got ();
3158 emit_insn (gen_tgd_hi22 (temp1, addr));
3159 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3160 if (TARGET_ARCH32)
3162 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3163 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3164 addr, const1_rtx));
3166 else
3168 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3169 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3170 addr, const1_rtx));
3172 CALL_INSN_FUNCTION_USAGE (insn)
3173 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3174 CALL_INSN_FUNCTION_USAGE (insn));
3175 insn = get_insns ();
3176 end_sequence ();
3177 emit_libcall_block (insn, ret, o0, addr);
3178 break;
3180 case TLS_MODEL_LOCAL_DYNAMIC:
3181 start_sequence ();
3182 temp1 = gen_reg_rtx (SImode);
3183 temp2 = gen_reg_rtx (SImode);
3184 temp3 = gen_reg_rtx (Pmode);
3185 ret = gen_reg_rtx (Pmode);
3186 o0 = gen_rtx_REG (Pmode, 8);
3187 got = sparc_tls_got ();
3188 emit_insn (gen_tldm_hi22 (temp1));
3189 emit_insn (gen_tldm_lo10 (temp2, temp1));
3190 if (TARGET_ARCH32)
3192 emit_insn (gen_tldm_add32 (o0, got, temp2));
3193 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3194 const1_rtx));
3196 else
3198 emit_insn (gen_tldm_add64 (o0, got, temp2));
3199 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3200 const1_rtx));
3202 CALL_INSN_FUNCTION_USAGE (insn)
3203 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3204 CALL_INSN_FUNCTION_USAGE (insn));
3205 insn = get_insns ();
3206 end_sequence ();
3207 emit_libcall_block (insn, temp3, o0,
3208 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3209 UNSPEC_TLSLD_BASE));
3210 temp1 = gen_reg_rtx (SImode);
3211 temp2 = gen_reg_rtx (SImode);
3212 emit_insn (gen_tldo_hix22 (temp1, addr));
3213 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3214 if (TARGET_ARCH32)
3215 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3216 else
3217 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3218 break;
3220 case TLS_MODEL_INITIAL_EXEC:
3221 temp1 = gen_reg_rtx (SImode);
3222 temp2 = gen_reg_rtx (SImode);
3223 temp3 = gen_reg_rtx (Pmode);
3224 got = sparc_tls_got ();
3225 emit_insn (gen_tie_hi22 (temp1, addr));
3226 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3227 if (TARGET_ARCH32)
3228 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3229 else
3230 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3231 if (TARGET_SUN_TLS)
3233 ret = gen_reg_rtx (Pmode);
3234 if (TARGET_ARCH32)
3235 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3236 temp3, addr));
3237 else
3238 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3239 temp3, addr));
3241 else
3242 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3243 break;
3245 case TLS_MODEL_LOCAL_EXEC:
3246 temp1 = gen_reg_rtx (Pmode);
3247 temp2 = gen_reg_rtx (Pmode);
3248 if (TARGET_ARCH32)
3250 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3251 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3253 else
3255 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3256 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3258 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3259 break;
3261 default:
3262 gcc_unreachable ();
3265 else
3266 gcc_unreachable (); /* for now ... */
3268 return ret;
3272 /* Legitimize PIC addresses. If the address is already position-independent,
3273 we return ORIG. Newly generated position-independent addresses go into a
3274 reg. This is REG if nonzero, otherwise we allocate register(s) as
3275 necessary. */
3278 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3279 rtx reg)
3281 if (GET_CODE (orig) == SYMBOL_REF
3282 /* See the comment in sparc_expand_move. */
3283 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3285 rtx pic_ref, address;
3286 rtx insn;
3288 if (reg == 0)
3290 gcc_assert (! reload_in_progress && ! reload_completed);
3291 reg = gen_reg_rtx (Pmode);
3294 if (flag_pic == 2)
3296 /* If not during reload, allocate another temp reg here for loading
3297 in the address, so that these instructions can be optimized
3298 properly. */
3299 rtx temp_reg = ((reload_in_progress || reload_completed)
3300 ? reg : gen_reg_rtx (Pmode));
3302 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3303 won't get confused into thinking that these two instructions
3304 are loading in the true address of the symbol. If in the
3305 future a PIC rtx exists, that should be used instead. */
3306 if (TARGET_ARCH64)
3308 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3309 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3311 else
3313 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3314 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3316 address = temp_reg;
3318 else
3319 address = orig;
3321 pic_ref = gen_const_mem (Pmode,
3322 gen_rtx_PLUS (Pmode,
3323 pic_offset_table_rtx, address));
3324 crtl->uses_pic_offset_table = 1;
3325 insn = emit_move_insn (reg, pic_ref);
3326 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3327 by loop. */
3328 set_unique_reg_note (insn, REG_EQUAL, orig);
3329 return reg;
3331 else if (GET_CODE (orig) == CONST)
3333 rtx base, offset;
3335 if (GET_CODE (XEXP (orig, 0)) == PLUS
3336 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3337 return orig;
3339 if (reg == 0)
3341 gcc_assert (! reload_in_progress && ! reload_completed);
3342 reg = gen_reg_rtx (Pmode);
3345 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3346 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3347 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3348 base == reg ? 0 : reg);
3350 if (GET_CODE (offset) == CONST_INT)
3352 if (SMALL_INT (offset))
3353 return plus_constant (base, INTVAL (offset));
3354 else if (! reload_in_progress && ! reload_completed)
3355 offset = force_reg (Pmode, offset);
3356 else
3357 /* If we reach here, then something is seriously wrong. */
3358 gcc_unreachable ();
3360 return gen_rtx_PLUS (Pmode, base, offset);
3362 else if (GET_CODE (orig) == LABEL_REF)
3363 /* ??? Why do we do this? */
3364 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3365 the register is live instead, in case it is eliminated. */
3366 crtl->uses_pic_offset_table = 1;
3368 return orig;
3371 /* Try machine-dependent ways of modifying an illegitimate address X
3372 to be legitimate. If we find one, return the new, valid address.
3374 OLDX is the address as it was before break_out_memory_refs was called.
3375 In some cases it is useful to look at this to decide what needs to be done.
3377 MODE is the mode of the operand pointed to by X.
3379 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3382 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3383 enum machine_mode mode)
3385 rtx orig_x = x;
3387 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3388 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3389 force_operand (XEXP (x, 0), NULL_RTX));
3390 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3391 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3392 force_operand (XEXP (x, 1), NULL_RTX));
3393 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3394 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3395 XEXP (x, 1));
3396 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3397 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3398 force_operand (XEXP (x, 1), NULL_RTX));
3400 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3401 return x;
3403 if (SPARC_SYMBOL_REF_TLS_P (x))
3404 x = legitimize_tls_address (x);
3405 else if (flag_pic)
3406 x = legitimize_pic_address (x, mode, 0);
3407 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3408 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3409 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3410 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3411 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3412 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3413 else if (GET_CODE (x) == SYMBOL_REF
3414 || GET_CODE (x) == CONST
3415 || GET_CODE (x) == LABEL_REF)
3416 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3417 return x;
3420 /* Emit the special PIC helper function. */
3422 static void
3423 emit_pic_helper (void)
3425 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3426 int align;
3428 switch_to_section (text_section);
3430 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3431 if (align > 0)
3432 ASM_OUTPUT_ALIGN (asm_out_file, align);
3433 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3434 if (flag_delayed_branch)
3435 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3436 pic_name, pic_name);
3437 else
3438 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3439 pic_name, pic_name);
3441 pic_helper_emitted_p = true;
3444 /* Emit code to load the PIC register. */
3446 static void
3447 load_pic_register (bool delay_pic_helper)
3449 int orig_flag_pic = flag_pic;
3451 if (TARGET_VXWORKS_RTP)
3453 emit_insn (gen_vxworks_load_got ());
3454 emit_use (pic_offset_table_rtx);
3455 return;
3458 /* If we haven't initialized the special PIC symbols, do so now. */
3459 if (!pic_helper_symbol_name[0])
3461 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3462 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3463 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3466 /* If we haven't emitted the special PIC helper function, do so now unless
3467 we are requested to delay it. */
3468 if (!delay_pic_helper && !pic_helper_emitted_p)
3469 emit_pic_helper ();
3471 flag_pic = 0;
3472 if (TARGET_ARCH64)
3473 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3474 pic_helper_symbol));
3475 else
3476 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3477 pic_helper_symbol));
3478 flag_pic = orig_flag_pic;
3480 /* Need to emit this whether or not we obey regdecls,
3481 since setjmp/longjmp can cause life info to screw up.
3482 ??? In the case where we don't obey regdecls, this is not sufficient
3483 since we may not fall out the bottom. */
3484 emit_use (pic_offset_table_rtx);
3487 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3488 address of the call target. */
3490 void
3491 sparc_emit_call_insn (rtx pat, rtx addr)
3493 rtx insn;
3495 insn = emit_call_insn (pat);
3497 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3498 if (TARGET_VXWORKS_RTP
3499 && flag_pic
3500 && GET_CODE (addr) == SYMBOL_REF
3501 && (SYMBOL_REF_DECL (addr)
3502 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3503 : !SYMBOL_REF_LOCAL_P (addr)))
3505 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3506 crtl->uses_pic_offset_table = 1;
3510 /* Return 1 if RTX is a MEM which is known to be aligned to at
3511 least a DESIRED byte boundary. */
3514 mem_min_alignment (rtx mem, int desired)
3516 rtx addr, base, offset;
3518 /* If it's not a MEM we can't accept it. */
3519 if (GET_CODE (mem) != MEM)
3520 return 0;
3522 /* Obviously... */
3523 if (!TARGET_UNALIGNED_DOUBLES
3524 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3525 return 1;
3527 /* ??? The rest of the function predates MEM_ALIGN so
3528 there is probably a bit of redundancy. */
3529 addr = XEXP (mem, 0);
3530 base = offset = NULL_RTX;
3531 if (GET_CODE (addr) == PLUS)
3533 if (GET_CODE (XEXP (addr, 0)) == REG)
3535 base = XEXP (addr, 0);
3537 /* What we are saying here is that if the base
3538 REG is aligned properly, the compiler will make
3539 sure any REG based index upon it will be so
3540 as well. */
3541 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3542 offset = XEXP (addr, 1);
3543 else
3544 offset = const0_rtx;
3547 else if (GET_CODE (addr) == REG)
3549 base = addr;
3550 offset = const0_rtx;
3553 if (base != NULL_RTX)
3555 int regno = REGNO (base);
3557 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3559 /* Check if the compiler has recorded some information
3560 about the alignment of the base REG. If reload has
3561 completed, we already matched with proper alignments.
3562 If not running global_alloc, reload might give us
3563 unaligned pointer to local stack though. */
3564 if (((cfun != 0
3565 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3566 || (optimize && reload_completed))
3567 && (INTVAL (offset) & (desired - 1)) == 0)
3568 return 1;
3570 else
3572 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3573 return 1;
3576 else if (! TARGET_UNALIGNED_DOUBLES
3577 || CONSTANT_P (addr)
3578 || GET_CODE (addr) == LO_SUM)
3580 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3581 is true, in which case we can only assume that an access is aligned if
3582 it is to a constant address, or the address involves a LO_SUM. */
3583 return 1;
3586 /* An obviously unaligned address. */
3587 return 0;
3591 /* Vectors to keep interesting information about registers where it can easily
3592 be got. We used to use the actual mode value as the bit number, but there
3593 are more than 32 modes now. Instead we use two tables: one indexed by
3594 hard register number, and one indexed by mode. */
3596 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3597 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3598 mapped into one sparc_mode_class mode. */
3600 enum sparc_mode_class {
3601 S_MODE, D_MODE, T_MODE, O_MODE,
3602 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3603 CC_MODE, CCFP_MODE
3606 /* Modes for single-word and smaller quantities. */
3607 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3609 /* Modes for double-word and smaller quantities. */
3610 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3612 /* Modes for quad-word and smaller quantities. */
3613 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3615 /* Modes for 8-word and smaller quantities. */
3616 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3618 /* Modes for single-float quantities. We must allow any single word or
3619 smaller quantity. This is because the fix/float conversion instructions
3620 take integer inputs/outputs from the float registers. */
3621 #define SF_MODES (S_MODES)
3623 /* Modes for double-float and smaller quantities. */
3624 #define DF_MODES (S_MODES | D_MODES)
3626 /* Modes for double-float only quantities. */
3627 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3629 /* Modes for quad-float only quantities. */
3630 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3632 /* Modes for quad-float and smaller quantities. */
3633 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3635 /* Modes for quad-float and double-float quantities. */
3636 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3638 /* Modes for quad-float pair only quantities. */
3639 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3641 /* Modes for quad-float pairs and smaller quantities. */
3642 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3644 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3646 /* Modes for condition codes. */
3647 #define CC_MODES (1 << (int) CC_MODE)
3648 #define CCFP_MODES (1 << (int) CCFP_MODE)
3650 /* Value is 1 if register/mode pair is acceptable on sparc.
3651 The funny mixture of D and T modes is because integer operations
3652 do not specially operate on tetra quantities, so non-quad-aligned
3653 registers can hold quadword quantities (except %o4 and %i4 because
3654 they cross fixed registers). */
3656 /* This points to either the 32 bit or the 64 bit version. */
3657 const int *hard_regno_mode_classes;
3659 static const int hard_32bit_mode_classes[] = {
3660 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3661 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3662 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3663 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3665 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3666 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3667 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3668 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3670 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3671 and none can hold SFmode/SImode values. */
3672 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3673 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3674 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3675 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3677 /* %fcc[0123] */
3678 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3680 /* %icc */
3681 CC_MODES
3684 static const int hard_64bit_mode_classes[] = {
3685 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3686 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3687 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3688 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3690 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3691 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3692 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3693 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3695 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3696 and none can hold SFmode/SImode values. */
3697 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3698 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3699 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3700 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3702 /* %fcc[0123] */
3703 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3705 /* %icc */
3706 CC_MODES
3709 int sparc_mode_class [NUM_MACHINE_MODES];
3711 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3713 static void
3714 sparc_init_modes (void)
3716 int i;
3718 for (i = 0; i < NUM_MACHINE_MODES; i++)
3720 switch (GET_MODE_CLASS (i))
3722 case MODE_INT:
3723 case MODE_PARTIAL_INT:
3724 case MODE_COMPLEX_INT:
3725 if (GET_MODE_SIZE (i) <= 4)
3726 sparc_mode_class[i] = 1 << (int) S_MODE;
3727 else if (GET_MODE_SIZE (i) == 8)
3728 sparc_mode_class[i] = 1 << (int) D_MODE;
3729 else if (GET_MODE_SIZE (i) == 16)
3730 sparc_mode_class[i] = 1 << (int) T_MODE;
3731 else if (GET_MODE_SIZE (i) == 32)
3732 sparc_mode_class[i] = 1 << (int) O_MODE;
3733 else
3734 sparc_mode_class[i] = 0;
3735 break;
3736 case MODE_VECTOR_INT:
3737 if (GET_MODE_SIZE (i) <= 4)
3738 sparc_mode_class[i] = 1 << (int)SF_MODE;
3739 else if (GET_MODE_SIZE (i) == 8)
3740 sparc_mode_class[i] = 1 << (int)DF_MODE;
3741 break;
3742 case MODE_FLOAT:
3743 case MODE_COMPLEX_FLOAT:
3744 if (GET_MODE_SIZE (i) <= 4)
3745 sparc_mode_class[i] = 1 << (int) SF_MODE;
3746 else if (GET_MODE_SIZE (i) == 8)
3747 sparc_mode_class[i] = 1 << (int) DF_MODE;
3748 else if (GET_MODE_SIZE (i) == 16)
3749 sparc_mode_class[i] = 1 << (int) TF_MODE;
3750 else if (GET_MODE_SIZE (i) == 32)
3751 sparc_mode_class[i] = 1 << (int) OF_MODE;
3752 else
3753 sparc_mode_class[i] = 0;
3754 break;
3755 case MODE_CC:
3756 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3757 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3758 else
3759 sparc_mode_class[i] = 1 << (int) CC_MODE;
3760 break;
3761 default:
3762 sparc_mode_class[i] = 0;
3763 break;
3767 if (TARGET_ARCH64)
3768 hard_regno_mode_classes = hard_64bit_mode_classes;
3769 else
3770 hard_regno_mode_classes = hard_32bit_mode_classes;
3772 /* Initialize the array used by REGNO_REG_CLASS. */
3773 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3775 if (i < 16 && TARGET_V8PLUS)
3776 sparc_regno_reg_class[i] = I64_REGS;
3777 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3778 sparc_regno_reg_class[i] = GENERAL_REGS;
3779 else if (i < 64)
3780 sparc_regno_reg_class[i] = FP_REGS;
3781 else if (i < 96)
3782 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3783 else if (i < 100)
3784 sparc_regno_reg_class[i] = FPCC_REGS;
3785 else
3786 sparc_regno_reg_class[i] = NO_REGS;
3790 /* Compute the frame size required by the function. This function is called
3791 during the reload pass and also by sparc_expand_prologue. */
3793 HOST_WIDE_INT
3794 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3796 int outgoing_args_size = (crtl->outgoing_args_size
3797 + REG_PARM_STACK_SPACE (current_function_decl));
3798 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3799 int i;
3801 if (TARGET_ARCH64)
3803 for (i = 0; i < 8; i++)
3804 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3805 n_regs += 2;
3807 else
3809 for (i = 0; i < 8; i += 2)
3810 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3811 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3812 n_regs += 2;
3815 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3816 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3817 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3818 n_regs += 2;
3820 /* Set up values for use in prologue and epilogue. */
3821 num_gfregs = n_regs;
3823 if (leaf_function_p
3824 && n_regs == 0
3825 && size == 0
3826 && crtl->outgoing_args_size == 0)
3827 actual_fsize = apparent_fsize = 0;
3828 else
3830 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3831 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3832 apparent_fsize += n_regs * 4;
3833 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3836 /* Make sure nothing can clobber our register windows.
3837 If a SAVE must be done, or there is a stack-local variable,
3838 the register window area must be allocated. */
3839 if (! leaf_function_p || size > 0)
3840 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3842 return SPARC_STACK_ALIGN (actual_fsize);
3845 /* Output any necessary .register pseudo-ops. */
3847 void
3848 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3850 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3851 int i;
3853 if (TARGET_ARCH32)
3854 return;
3856 /* Check if %g[2367] were used without
3857 .register being printed for them already. */
3858 for (i = 2; i < 8; i++)
3860 if (df_regs_ever_live_p (i)
3861 && ! sparc_hard_reg_printed [i])
3863 sparc_hard_reg_printed [i] = 1;
3864 /* %g7 is used as TLS base register, use #ignore
3865 for it instead of #scratch. */
3866 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3867 i == 7 ? "ignore" : "scratch");
3869 if (i == 3) i = 5;
3871 #endif
3874 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3875 as needed. LOW should be double-word aligned for 32-bit registers.
3876 Return the new OFFSET. */
3878 #define SORR_SAVE 0
3879 #define SORR_RESTORE 1
3881 static int
3882 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3884 rtx mem, insn;
3885 int i;
3887 if (TARGET_ARCH64 && high <= 32)
3889 for (i = low; i < high; i++)
3891 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3893 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3894 set_mem_alias_set (mem, sparc_sr_alias_set);
3895 if (action == SORR_SAVE)
3897 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3898 RTX_FRAME_RELATED_P (insn) = 1;
3900 else /* action == SORR_RESTORE */
3901 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3902 offset += 8;
3906 else
3908 for (i = low; i < high; i += 2)
3910 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
3911 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
3912 enum machine_mode mode;
3913 int regno;
3915 if (reg0 && reg1)
3917 mode = i < 32 ? DImode : DFmode;
3918 regno = i;
3920 else if (reg0)
3922 mode = i < 32 ? SImode : SFmode;
3923 regno = i;
3925 else if (reg1)
3927 mode = i < 32 ? SImode : SFmode;
3928 regno = i + 1;
3929 offset += 4;
3931 else
3932 continue;
3934 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3935 set_mem_alias_set (mem, sparc_sr_alias_set);
3936 if (action == SORR_SAVE)
3938 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3939 RTX_FRAME_RELATED_P (insn) = 1;
3941 else /* action == SORR_RESTORE */
3942 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3944 /* Always preserve double-word alignment. */
3945 offset = (offset + 7) & -8;
3949 return offset;
3952 /* Emit code to save call-saved registers. */
3954 static void
3955 emit_save_or_restore_regs (int action)
3957 HOST_WIDE_INT offset;
3958 rtx base;
3960 offset = frame_base_offset - apparent_fsize;
3962 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3964 /* ??? This might be optimized a little as %g1 might already have a
3965 value close enough that a single add insn will do. */
3966 /* ??? Although, all of this is probably only a temporary fix
3967 because if %g1 can hold a function result, then
3968 sparc_expand_epilogue will lose (the result will be
3969 clobbered). */
3970 base = gen_rtx_REG (Pmode, 1);
3971 emit_move_insn (base, GEN_INT (offset));
3972 emit_insn (gen_rtx_SET (VOIDmode,
3973 base,
3974 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3975 offset = 0;
3977 else
3978 base = frame_base_reg;
3980 offset = save_or_restore_regs (0, 8, base, offset, action);
3981 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3984 /* Generate a save_register_window insn. */
3986 static rtx
3987 gen_save_register_window (rtx increment)
3989 if (TARGET_ARCH64)
3990 return gen_save_register_windowdi (increment);
3991 else
3992 return gen_save_register_windowsi (increment);
3995 /* Generate an increment for the stack pointer. */
3997 static rtx
3998 gen_stack_pointer_inc (rtx increment)
4000 return gen_rtx_SET (VOIDmode,
4001 stack_pointer_rtx,
4002 gen_rtx_PLUS (Pmode,
4003 stack_pointer_rtx,
4004 increment));
4007 /* Generate a decrement for the stack pointer. */
4009 static rtx
4010 gen_stack_pointer_dec (rtx decrement)
4012 return gen_rtx_SET (VOIDmode,
4013 stack_pointer_rtx,
4014 gen_rtx_MINUS (Pmode,
4015 stack_pointer_rtx,
4016 decrement));
4019 /* Expand the function prologue. The prologue is responsible for reserving
4020 storage for the frame, saving the call-saved registers and loading the
4021 PIC register if needed. */
4023 void
4024 sparc_expand_prologue (void)
4026 rtx insn;
4027 int i;
4029 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4030 on the final value of the flag means deferring the prologue/epilogue
4031 expansion until just before the second scheduling pass, which is too
4032 late to emit multiple epilogues or return insns.
4034 Of course we are making the assumption that the value of the flag
4035 will not change between now and its final value. Of the three parts
4036 of the formula, only the last one can reasonably vary. Let's take a
4037 closer look, after assuming that the first two ones are set to true
4038 (otherwise the last value is effectively silenced).
4040 If only_leaf_regs_used returns false, the global predicate will also
4041 be false so the actual frame size calculated below will be positive.
4042 As a consequence, the save_register_window insn will be emitted in
4043 the instruction stream; now this insn explicitly references %fp
4044 which is not a leaf register so only_leaf_regs_used will always
4045 return false subsequently.
4047 If only_leaf_regs_used returns true, we hope that the subsequent
4048 optimization passes won't cause non-leaf registers to pop up. For
4049 example, the regrename pass has special provisions to not rename to
4050 non-leaf registers in a leaf function. */
4051 sparc_leaf_function_p
4052 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4054 /* Need to use actual_fsize, since we are also allocating
4055 space for our callee (and our own register save area). */
4056 actual_fsize
4057 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4059 /* Advertise that the data calculated just above are now valid. */
4060 sparc_prologue_data_valid_p = true;
4062 if (sparc_leaf_function_p)
4064 frame_base_reg = stack_pointer_rtx;
4065 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4067 else
4069 frame_base_reg = hard_frame_pointer_rtx;
4070 frame_base_offset = SPARC_STACK_BIAS;
4073 if (actual_fsize == 0)
4074 /* do nothing. */ ;
4075 else if (sparc_leaf_function_p)
4077 if (actual_fsize <= 4096)
4078 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4079 else if (actual_fsize <= 8192)
4081 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4082 /* %sp is still the CFA register. */
4083 RTX_FRAME_RELATED_P (insn) = 1;
4084 insn
4085 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4087 else
4089 rtx reg = gen_rtx_REG (Pmode, 1);
4090 emit_move_insn (reg, GEN_INT (-actual_fsize));
4091 insn = emit_insn (gen_stack_pointer_inc (reg));
4092 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4093 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4096 RTX_FRAME_RELATED_P (insn) = 1;
4098 else
4100 if (actual_fsize <= 4096)
4101 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4102 else if (actual_fsize <= 8192)
4104 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4105 /* %sp is not the CFA register anymore. */
4106 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4108 else
4110 rtx reg = gen_rtx_REG (Pmode, 1);
4111 emit_move_insn (reg, GEN_INT (-actual_fsize));
4112 insn = emit_insn (gen_save_register_window (reg));
4115 RTX_FRAME_RELATED_P (insn) = 1;
4116 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4117 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4120 if (num_gfregs)
4121 emit_save_or_restore_regs (SORR_SAVE);
4123 /* Load the PIC register if needed. */
4124 if (flag_pic && crtl->uses_pic_offset_table)
4125 load_pic_register (false);
4128 /* This function generates the assembly code for function entry, which boils
4129 down to emitting the necessary .register directives. */
4131 static void
4132 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4134 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4135 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4137 sparc_output_scratch_registers (file);
4140 /* Expand the function epilogue, either normal or part of a sibcall.
4141 We emit all the instructions except the return or the call. */
4143 void
4144 sparc_expand_epilogue (void)
4146 if (num_gfregs)
4147 emit_save_or_restore_regs (SORR_RESTORE);
4149 if (actual_fsize == 0)
4150 /* do nothing. */ ;
4151 else if (sparc_leaf_function_p)
4153 if (actual_fsize <= 4096)
4154 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4155 else if (actual_fsize <= 8192)
4157 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4158 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4160 else
4162 rtx reg = gen_rtx_REG (Pmode, 1);
4163 emit_move_insn (reg, GEN_INT (-actual_fsize));
4164 emit_insn (gen_stack_pointer_dec (reg));
4169 /* Return true if it is appropriate to emit `return' instructions in the
4170 body of a function. */
4172 bool
4173 sparc_can_use_return_insn_p (void)
4175 return sparc_prologue_data_valid_p
4176 && (actual_fsize == 0 || !sparc_leaf_function_p);
4179 /* This function generates the assembly code for function exit. */
4181 static void
4182 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4184 /* If code does not drop into the epilogue, we have to still output
4185 a dummy nop for the sake of sane backtraces. Otherwise, if the
4186 last two instructions of a function were "call foo; dslot;" this
4187 can make the return PC of foo (i.e. address of call instruction
4188 plus 8) point to the first instruction in the next function. */
4190 rtx insn, last_real_insn;
4192 insn = get_last_insn ();
4194 last_real_insn = prev_real_insn (insn);
4195 if (last_real_insn
4196 && GET_CODE (last_real_insn) == INSN
4197 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4198 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4200 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4201 fputs("\tnop\n", file);
4203 sparc_output_deferred_case_vectors ();
4206 /* Output a 'restore' instruction. */
4208 static void
4209 output_restore (rtx pat)
4211 rtx operands[3];
4213 if (! pat)
4215 fputs ("\t restore\n", asm_out_file);
4216 return;
4219 gcc_assert (GET_CODE (pat) == SET);
4221 operands[0] = SET_DEST (pat);
4222 pat = SET_SRC (pat);
4224 switch (GET_CODE (pat))
4226 case PLUS:
4227 operands[1] = XEXP (pat, 0);
4228 operands[2] = XEXP (pat, 1);
4229 output_asm_insn (" restore %r1, %2, %Y0", operands);
4230 break;
4231 case LO_SUM:
4232 operands[1] = XEXP (pat, 0);
4233 operands[2] = XEXP (pat, 1);
4234 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4235 break;
4236 case ASHIFT:
4237 operands[1] = XEXP (pat, 0);
4238 gcc_assert (XEXP (pat, 1) == const1_rtx);
4239 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4240 break;
4241 default:
4242 operands[1] = pat;
4243 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4244 break;
4248 /* Output a return. */
4250 const char *
4251 output_return (rtx insn)
4253 if (sparc_leaf_function_p)
4255 /* This is a leaf function so we don't have to bother restoring the
4256 register window, which frees us from dealing with the convoluted
4257 semantics of restore/return. We simply output the jump to the
4258 return address and the insn in the delay slot (if any). */
4260 gcc_assert (! crtl->calls_eh_return);
4262 return "jmp\t%%o7+%)%#";
4264 else
4266 /* This is a regular function so we have to restore the register window.
4267 We may have a pending insn for the delay slot, which will be either
4268 combined with the 'restore' instruction or put in the delay slot of
4269 the 'return' instruction. */
4271 if (crtl->calls_eh_return)
4273 /* If the function uses __builtin_eh_return, the eh_return
4274 machinery occupies the delay slot. */
4275 gcc_assert (! final_sequence);
4277 if (! flag_delayed_branch)
4278 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4280 if (TARGET_V9)
4281 fputs ("\treturn\t%i7+8\n", asm_out_file);
4282 else
4283 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4285 if (flag_delayed_branch)
4286 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4287 else
4288 fputs ("\t nop\n", asm_out_file);
4290 else if (final_sequence)
4292 rtx delay, pat;
4294 delay = NEXT_INSN (insn);
4295 gcc_assert (delay);
4297 pat = PATTERN (delay);
4299 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4301 epilogue_renumber (&pat, 0);
4302 return "return\t%%i7+%)%#";
4304 else
4306 output_asm_insn ("jmp\t%%i7+%)", NULL);
4307 output_restore (pat);
4308 PATTERN (delay) = gen_blockage ();
4309 INSN_CODE (delay) = -1;
4312 else
4314 /* The delay slot is empty. */
4315 if (TARGET_V9)
4316 return "return\t%%i7+%)\n\t nop";
4317 else if (flag_delayed_branch)
4318 return "jmp\t%%i7+%)\n\t restore";
4319 else
4320 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4324 return "";
4327 /* Output a sibling call. */
4329 const char *
4330 output_sibcall (rtx insn, rtx call_operand)
4332 rtx operands[1];
4334 gcc_assert (flag_delayed_branch);
4336 operands[0] = call_operand;
4338 if (sparc_leaf_function_p)
4340 /* This is a leaf function so we don't have to bother restoring the
4341 register window. We simply output the jump to the function and
4342 the insn in the delay slot (if any). */
4344 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4346 if (final_sequence)
4347 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4348 operands);
4349 else
4350 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4351 it into branch if possible. */
4352 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4353 operands);
4355 else
4357 /* This is a regular function so we have to restore the register window.
4358 We may have a pending insn for the delay slot, which will be combined
4359 with the 'restore' instruction. */
4361 output_asm_insn ("call\t%a0, 0", operands);
4363 if (final_sequence)
4365 rtx delay = NEXT_INSN (insn);
4366 gcc_assert (delay);
4368 output_restore (PATTERN (delay));
4370 PATTERN (delay) = gen_blockage ();
4371 INSN_CODE (delay) = -1;
4373 else
4374 output_restore (NULL_RTX);
4377 return "";
4380 /* Functions for handling argument passing.
4382 For 32-bit, the first 6 args are normally in registers and the rest are
4383 pushed. Any arg that starts within the first 6 words is at least
4384 partially passed in a register unless its data type forbids.
4386 For 64-bit, the argument registers are laid out as an array of 16 elements
4387 and arguments are added sequentially. The first 6 int args and up to the
4388 first 16 fp args (depending on size) are passed in regs.
4390 Slot Stack Integral Float Float in structure Double Long Double
4391 ---- ----- -------- ----- ------------------ ------ -----------
4392 15 [SP+248] %f31 %f30,%f31 %d30
4393 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4394 13 [SP+232] %f27 %f26,%f27 %d26
4395 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4396 11 [SP+216] %f23 %f22,%f23 %d22
4397 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4398 9 [SP+200] %f19 %f18,%f19 %d18
4399 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4400 7 [SP+184] %f15 %f14,%f15 %d14
4401 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4402 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4403 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4404 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4405 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4406 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4407 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4409 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4411 Integral arguments are always passed as 64-bit quantities appropriately
4412 extended.
4414 Passing of floating point values is handled as follows.
4415 If a prototype is in scope:
4416 If the value is in a named argument (i.e. not a stdarg function or a
4417 value not part of the `...') then the value is passed in the appropriate
4418 fp reg.
4419 If the value is part of the `...' and is passed in one of the first 6
4420 slots then the value is passed in the appropriate int reg.
4421 If the value is part of the `...' and is not passed in one of the first 6
4422 slots then the value is passed in memory.
4423 If a prototype is not in scope:
4424 If the value is one of the first 6 arguments the value is passed in the
4425 appropriate integer reg and the appropriate fp reg.
4426 If the value is not one of the first 6 arguments the value is passed in
4427 the appropriate fp reg and in memory.
4430 Summary of the calling conventions implemented by GCC on SPARC:
4432 32-bit ABI:
4433 size argument return value
4435 small integer <4 int. reg. int. reg.
4436 word 4 int. reg. int. reg.
4437 double word 8 int. reg. int. reg.
4439 _Complex small integer <8 int. reg. int. reg.
4440 _Complex word 8 int. reg. int. reg.
4441 _Complex double word 16 memory int. reg.
4443 vector integer <=8 int. reg. FP reg.
4444 vector integer >8 memory memory
4446 float 4 int. reg. FP reg.
4447 double 8 int. reg. FP reg.
4448 long double 16 memory memory
4450 _Complex float 8 memory FP reg.
4451 _Complex double 16 memory FP reg.
4452 _Complex long double 32 memory FP reg.
4454 vector float any memory memory
4456 aggregate any memory memory
4460 64-bit ABI:
4461 size argument return value
4463 small integer <8 int. reg. int. reg.
4464 word 8 int. reg. int. reg.
4465 double word 16 int. reg. int. reg.
4467 _Complex small integer <16 int. reg. int. reg.
4468 _Complex word 16 int. reg. int. reg.
4469 _Complex double word 32 memory int. reg.
4471 vector integer <=16 FP reg. FP reg.
4472 vector integer 16<s<=32 memory FP reg.
4473 vector integer >32 memory memory
4475 float 4 FP reg. FP reg.
4476 double 8 FP reg. FP reg.
4477 long double 16 FP reg. FP reg.
4479 _Complex float 8 FP reg. FP reg.
4480 _Complex double 16 FP reg. FP reg.
4481 _Complex long double 32 memory FP reg.
4483 vector float <=16 FP reg. FP reg.
4484 vector float 16<s<=32 memory FP reg.
4485 vector float >32 memory memory
4487 aggregate <=16 reg. reg.
4488 aggregate 16<s<=32 memory reg.
4489 aggregate >32 memory memory
4493 Note #1: complex floating-point types follow the extended SPARC ABIs as
4494 implemented by the Sun compiler.
4496 Note #2: integral vector types follow the scalar floating-point types
4497 conventions to match what is implemented by the Sun VIS SDK.
4499 Note #3: floating-point vector types follow the aggregate types
4500 conventions. */
4503 /* Maximum number of int regs for args. */
4504 #define SPARC_INT_ARG_MAX 6
4505 /* Maximum number of fp regs for args. */
4506 #define SPARC_FP_ARG_MAX 16
4508 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4510 /* Handle the INIT_CUMULATIVE_ARGS macro.
4511 Initialize a variable CUM of type CUMULATIVE_ARGS
4512 for a call to a function whose data type is FNTYPE.
4513 For a library call, FNTYPE is 0. */
4515 void
4516 init_cumulative_args (struct sparc_args *cum, tree fntype,
4517 rtx libname ATTRIBUTE_UNUSED,
4518 tree fndecl ATTRIBUTE_UNUSED)
4520 cum->words = 0;
4521 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4522 cum->libcall_p = fntype == 0;
4525 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4526 When a prototype says `char' or `short', really pass an `int'. */
4528 static bool
4529 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4531 return TARGET_ARCH32 ? true : false;
4534 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4536 static bool
4537 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4539 return TARGET_ARCH64 ? true : false;
4542 /* Scan the record type TYPE and return the following predicates:
4543 - INTREGS_P: the record contains at least one field or sub-field
4544 that is eligible for promotion in integer registers.
4545 - FP_REGS_P: the record contains at least one field or sub-field
4546 that is eligible for promotion in floating-point registers.
4547 - PACKED_P: the record contains at least one field that is packed.
4549 Sub-fields are not taken into account for the PACKED_P predicate. */
4551 static void
4552 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4554 tree field;
4556 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4558 if (TREE_CODE (field) == FIELD_DECL)
4560 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4561 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4562 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4563 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4564 && TARGET_FPU)
4565 *fpregs_p = 1;
4566 else
4567 *intregs_p = 1;
4569 if (packed_p && DECL_PACKED (field))
4570 *packed_p = 1;
4575 /* Compute the slot number to pass an argument in.
4576 Return the slot number or -1 if passing on the stack.
4578 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4579 the preceding args and about the function being called.
4580 MODE is the argument's machine mode.
4581 TYPE is the data type of the argument (as a tree).
4582 This is null for libcalls where that information may
4583 not be available.
4584 NAMED is nonzero if this argument is a named parameter
4585 (otherwise it is an extra parameter matching an ellipsis).
4586 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4587 *PREGNO records the register number to use if scalar type.
4588 *PPADDING records the amount of padding needed in words. */
4590 static int
4591 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4592 tree type, int named, int incoming_p,
4593 int *pregno, int *ppadding)
4595 int regbase = (incoming_p
4596 ? SPARC_INCOMING_INT_ARG_FIRST
4597 : SPARC_OUTGOING_INT_ARG_FIRST);
4598 int slotno = cum->words;
4599 enum mode_class mclass;
4600 int regno;
4602 *ppadding = 0;
4604 if (type && TREE_ADDRESSABLE (type))
4605 return -1;
4607 if (TARGET_ARCH32
4608 && mode == BLKmode
4609 && type
4610 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4611 return -1;
4613 /* For SPARC64, objects requiring 16-byte alignment get it. */
4614 if (TARGET_ARCH64
4615 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4616 && (slotno & 1) != 0)
4617 slotno++, *ppadding = 1;
4619 mclass = GET_MODE_CLASS (mode);
4620 if (type && TREE_CODE (type) == VECTOR_TYPE)
4622 /* Vector types deserve special treatment because they are
4623 polymorphic wrt their mode, depending upon whether VIS
4624 instructions are enabled. */
4625 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4627 /* The SPARC port defines no floating-point vector modes. */
4628 gcc_assert (mode == BLKmode);
4630 else
4632 /* Integral vector types should either have a vector
4633 mode or an integral mode, because we are guaranteed
4634 by pass_by_reference that their size is not greater
4635 than 16 bytes and TImode is 16-byte wide. */
4636 gcc_assert (mode != BLKmode);
4638 /* Vector integers are handled like floats according to
4639 the Sun VIS SDK. */
4640 mclass = MODE_FLOAT;
4644 switch (mclass)
4646 case MODE_FLOAT:
4647 case MODE_COMPLEX_FLOAT:
4648 case MODE_VECTOR_INT:
4649 if (TARGET_ARCH64 && TARGET_FPU && named)
4651 if (slotno >= SPARC_FP_ARG_MAX)
4652 return -1;
4653 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4654 /* Arguments filling only one single FP register are
4655 right-justified in the outer double FP register. */
4656 if (GET_MODE_SIZE (mode) <= 4)
4657 regno++;
4658 break;
4660 /* fallthrough */
4662 case MODE_INT:
4663 case MODE_COMPLEX_INT:
4664 if (slotno >= SPARC_INT_ARG_MAX)
4665 return -1;
4666 regno = regbase + slotno;
4667 break;
4669 case MODE_RANDOM:
4670 if (mode == VOIDmode)
4671 /* MODE is VOIDmode when generating the actual call. */
4672 return -1;
4674 gcc_assert (mode == BLKmode);
4676 if (TARGET_ARCH32
4677 || !type
4678 || (TREE_CODE (type) != VECTOR_TYPE
4679 && TREE_CODE (type) != RECORD_TYPE))
4681 if (slotno >= SPARC_INT_ARG_MAX)
4682 return -1;
4683 regno = regbase + slotno;
4685 else /* TARGET_ARCH64 && type */
4687 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4689 /* First see what kinds of registers we would need. */
4690 if (TREE_CODE (type) == VECTOR_TYPE)
4691 fpregs_p = 1;
4692 else
4693 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4695 /* The ABI obviously doesn't specify how packed structures
4696 are passed. These are defined to be passed in int regs
4697 if possible, otherwise memory. */
4698 if (packed_p || !named)
4699 fpregs_p = 0, intregs_p = 1;
4701 /* If all arg slots are filled, then must pass on stack. */
4702 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4703 return -1;
4705 /* If there are only int args and all int arg slots are filled,
4706 then must pass on stack. */
4707 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4708 return -1;
4710 /* Note that even if all int arg slots are filled, fp members may
4711 still be passed in regs if such regs are available.
4712 *PREGNO isn't set because there may be more than one, it's up
4713 to the caller to compute them. */
4714 return slotno;
4716 break;
4718 default :
4719 gcc_unreachable ();
4722 *pregno = regno;
4723 return slotno;
4726 /* Handle recursive register counting for structure field layout. */
4728 struct function_arg_record_value_parms
4730 rtx ret; /* return expression being built. */
4731 int slotno; /* slot number of the argument. */
4732 int named; /* whether the argument is named. */
4733 int regbase; /* regno of the base register. */
4734 int stack; /* 1 if part of the argument is on the stack. */
4735 int intoffset; /* offset of the first pending integer field. */
4736 unsigned int nregs; /* number of words passed in registers. */
4739 static void function_arg_record_value_3
4740 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4741 static void function_arg_record_value_2
4742 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4743 static void function_arg_record_value_1
4744 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4745 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4746 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4748 /* A subroutine of function_arg_record_value. Traverse the structure
4749 recursively and determine how many registers will be required. */
4751 static void
4752 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4753 struct function_arg_record_value_parms *parms,
4754 bool packed_p)
4756 tree field;
4758 /* We need to compute how many registers are needed so we can
4759 allocate the PARALLEL but before we can do that we need to know
4760 whether there are any packed fields. The ABI obviously doesn't
4761 specify how structures are passed in this case, so they are
4762 defined to be passed in int regs if possible, otherwise memory,
4763 regardless of whether there are fp values present. */
4765 if (! packed_p)
4766 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4768 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4770 packed_p = true;
4771 break;
4775 /* Compute how many registers we need. */
4776 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4778 if (TREE_CODE (field) == FIELD_DECL)
4780 HOST_WIDE_INT bitpos = startbitpos;
4782 if (DECL_SIZE (field) != 0)
4784 if (integer_zerop (DECL_SIZE (field)))
4785 continue;
4787 if (host_integerp (bit_position (field), 1))
4788 bitpos += int_bit_position (field);
4791 /* ??? FIXME: else assume zero offset. */
4793 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4794 function_arg_record_value_1 (TREE_TYPE (field),
4795 bitpos,
4796 parms,
4797 packed_p);
4798 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4799 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4800 && TARGET_FPU
4801 && parms->named
4802 && ! packed_p)
4804 if (parms->intoffset != -1)
4806 unsigned int startbit, endbit;
4807 int intslots, this_slotno;
4809 startbit = parms->intoffset & -BITS_PER_WORD;
4810 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4812 intslots = (endbit - startbit) / BITS_PER_WORD;
4813 this_slotno = parms->slotno + parms->intoffset
4814 / BITS_PER_WORD;
4816 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4818 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4819 /* We need to pass this field on the stack. */
4820 parms->stack = 1;
4823 parms->nregs += intslots;
4824 parms->intoffset = -1;
4827 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4828 If it wasn't true we wouldn't be here. */
4829 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4830 && DECL_MODE (field) == BLKmode)
4831 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4832 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4833 parms->nregs += 2;
4834 else
4835 parms->nregs += 1;
4837 else
4839 if (parms->intoffset == -1)
4840 parms->intoffset = bitpos;
4846 /* A subroutine of function_arg_record_value. Assign the bits of the
4847 structure between parms->intoffset and bitpos to integer registers. */
4849 static void
4850 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4851 struct function_arg_record_value_parms *parms)
4853 enum machine_mode mode;
4854 unsigned int regno;
4855 unsigned int startbit, endbit;
4856 int this_slotno, intslots, intoffset;
4857 rtx reg;
4859 if (parms->intoffset == -1)
4860 return;
4862 intoffset = parms->intoffset;
4863 parms->intoffset = -1;
4865 startbit = intoffset & -BITS_PER_WORD;
4866 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4867 intslots = (endbit - startbit) / BITS_PER_WORD;
4868 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4870 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4871 if (intslots <= 0)
4872 return;
4874 /* If this is the trailing part of a word, only load that much into
4875 the register. Otherwise load the whole register. Note that in
4876 the latter case we may pick up unwanted bits. It's not a problem
4877 at the moment but may wish to revisit. */
4879 if (intoffset % BITS_PER_WORD != 0)
4880 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4881 MODE_INT);
4882 else
4883 mode = word_mode;
4885 intoffset /= BITS_PER_UNIT;
4888 regno = parms->regbase + this_slotno;
4889 reg = gen_rtx_REG (mode, regno);
4890 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4891 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4893 this_slotno += 1;
4894 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4895 mode = word_mode;
4896 parms->nregs += 1;
4897 intslots -= 1;
4899 while (intslots > 0);
4902 /* A subroutine of function_arg_record_value. Traverse the structure
4903 recursively and assign bits to floating point registers. Track which
4904 bits in between need integer registers; invoke function_arg_record_value_3
4905 to make that happen. */
4907 static void
4908 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
4909 struct function_arg_record_value_parms *parms,
4910 bool packed_p)
4912 tree field;
4914 if (! packed_p)
4915 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4917 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4919 packed_p = true;
4920 break;
4924 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4926 if (TREE_CODE (field) == FIELD_DECL)
4928 HOST_WIDE_INT bitpos = startbitpos;
4930 if (DECL_SIZE (field) != 0)
4932 if (integer_zerop (DECL_SIZE (field)))
4933 continue;
4935 if (host_integerp (bit_position (field), 1))
4936 bitpos += int_bit_position (field);
4939 /* ??? FIXME: else assume zero offset. */
4941 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4942 function_arg_record_value_2 (TREE_TYPE (field),
4943 bitpos,
4944 parms,
4945 packed_p);
4946 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4947 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4948 && TARGET_FPU
4949 && parms->named
4950 && ! packed_p)
4952 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4953 int regno, nregs, pos;
4954 enum machine_mode mode = DECL_MODE (field);
4955 rtx reg;
4957 function_arg_record_value_3 (bitpos, parms);
4959 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4960 && mode == BLKmode)
4962 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4963 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4965 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4967 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4968 nregs = 2;
4970 else
4971 nregs = 1;
4973 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4974 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4975 regno++;
4976 reg = gen_rtx_REG (mode, regno);
4977 pos = bitpos / BITS_PER_UNIT;
4978 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4979 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4980 parms->nregs += 1;
4981 while (--nregs > 0)
4983 regno += GET_MODE_SIZE (mode) / 4;
4984 reg = gen_rtx_REG (mode, regno);
4985 pos += GET_MODE_SIZE (mode);
4986 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4987 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4988 parms->nregs += 1;
4991 else
4993 if (parms->intoffset == -1)
4994 parms->intoffset = bitpos;
5000 /* Used by function_arg and function_value to implement the complex
5001 conventions of the 64-bit ABI for passing and returning structures.
5002 Return an expression valid as a return value for the two macros
5003 FUNCTION_ARG and FUNCTION_VALUE.
5005 TYPE is the data type of the argument (as a tree).
5006 This is null for libcalls where that information may
5007 not be available.
5008 MODE is the argument's machine mode.
5009 SLOTNO is the index number of the argument's slot in the parameter array.
5010 NAMED is nonzero if this argument is a named parameter
5011 (otherwise it is an extra parameter matching an ellipsis).
5012 REGBASE is the regno of the base register for the parameter array. */
5014 static rtx
5015 function_arg_record_value (const_tree type, enum machine_mode mode,
5016 int slotno, int named, int regbase)
5018 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5019 struct function_arg_record_value_parms parms;
5020 unsigned int nregs;
5022 parms.ret = NULL_RTX;
5023 parms.slotno = slotno;
5024 parms.named = named;
5025 parms.regbase = regbase;
5026 parms.stack = 0;
5028 /* Compute how many registers we need. */
5029 parms.nregs = 0;
5030 parms.intoffset = 0;
5031 function_arg_record_value_1 (type, 0, &parms, false);
5033 /* Take into account pending integer fields. */
5034 if (parms.intoffset != -1)
5036 unsigned int startbit, endbit;
5037 int intslots, this_slotno;
5039 startbit = parms.intoffset & -BITS_PER_WORD;
5040 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5041 intslots = (endbit - startbit) / BITS_PER_WORD;
5042 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5044 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5046 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5047 /* We need to pass this field on the stack. */
5048 parms.stack = 1;
5051 parms.nregs += intslots;
5053 nregs = parms.nregs;
5055 /* Allocate the vector and handle some annoying special cases. */
5056 if (nregs == 0)
5058 /* ??? Empty structure has no value? Duh? */
5059 if (typesize <= 0)
5061 /* Though there's nothing really to store, return a word register
5062 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5063 leads to breakage due to the fact that there are zero bytes to
5064 load. */
5065 return gen_rtx_REG (mode, regbase);
5067 else
5069 /* ??? C++ has structures with no fields, and yet a size. Give up
5070 for now and pass everything back in integer registers. */
5071 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5073 if (nregs + slotno > SPARC_INT_ARG_MAX)
5074 nregs = SPARC_INT_ARG_MAX - slotno;
5076 gcc_assert (nregs != 0);
5078 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5080 /* If at least one field must be passed on the stack, generate
5081 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5082 also be passed on the stack. We can't do much better because the
5083 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5084 of structures for which the fields passed exclusively in registers
5085 are not at the beginning of the structure. */
5086 if (parms.stack)
5087 XVECEXP (parms.ret, 0, 0)
5088 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5090 /* Fill in the entries. */
5091 parms.nregs = 0;
5092 parms.intoffset = 0;
5093 function_arg_record_value_2 (type, 0, &parms, false);
5094 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5096 gcc_assert (parms.nregs == nregs);
5098 return parms.ret;
5101 /* Used by function_arg and function_value to implement the conventions
5102 of the 64-bit ABI for passing and returning unions.
5103 Return an expression valid as a return value for the two macros
5104 FUNCTION_ARG and FUNCTION_VALUE.
5106 SIZE is the size in bytes of the union.
5107 MODE is the argument's machine mode.
5108 REGNO is the hard register the union will be passed in. */
5110 static rtx
5111 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5112 int regno)
5114 int nwords = ROUND_ADVANCE (size), i;
5115 rtx regs;
5117 /* See comment in previous function for empty structures. */
5118 if (nwords == 0)
5119 return gen_rtx_REG (mode, regno);
5121 if (slotno == SPARC_INT_ARG_MAX - 1)
5122 nwords = 1;
5124 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5126 for (i = 0; i < nwords; i++)
5128 /* Unions are passed left-justified. */
5129 XVECEXP (regs, 0, i)
5130 = gen_rtx_EXPR_LIST (VOIDmode,
5131 gen_rtx_REG (word_mode, regno),
5132 GEN_INT (UNITS_PER_WORD * i));
5133 regno++;
5136 return regs;
5139 /* Used by function_arg and function_value to implement the conventions
5140 for passing and returning large (BLKmode) vectors.
5141 Return an expression valid as a return value for the two macros
5142 FUNCTION_ARG and FUNCTION_VALUE.
5144 SIZE is the size in bytes of the vector (at least 8 bytes).
5145 REGNO is the FP hard register the vector will be passed in. */
5147 static rtx
5148 function_arg_vector_value (int size, int regno)
5150 int i, nregs = size / 8;
5151 rtx regs;
5153 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5155 for (i = 0; i < nregs; i++)
5157 XVECEXP (regs, 0, i)
5158 = gen_rtx_EXPR_LIST (VOIDmode,
5159 gen_rtx_REG (DImode, regno + 2*i),
5160 GEN_INT (i*8));
5163 return regs;
5166 /* Handle the FUNCTION_ARG macro.
5167 Determine where to put an argument to a function.
5168 Value is zero to push the argument on the stack,
5169 or a hard register in which to store the argument.
5171 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5172 the preceding args and about the function being called.
5173 MODE is the argument's machine mode.
5174 TYPE is the data type of the argument (as a tree).
5175 This is null for libcalls where that information may
5176 not be available.
5177 NAMED is nonzero if this argument is a named parameter
5178 (otherwise it is an extra parameter matching an ellipsis).
5179 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5182 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5183 tree type, int named, int incoming_p)
5185 int regbase = (incoming_p
5186 ? SPARC_INCOMING_INT_ARG_FIRST
5187 : SPARC_OUTGOING_INT_ARG_FIRST);
5188 int slotno, regno, padding;
5189 enum mode_class mclass = GET_MODE_CLASS (mode);
5191 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5192 &regno, &padding);
5193 if (slotno == -1)
5194 return 0;
5196 /* Vector types deserve special treatment because they are polymorphic wrt
5197 their mode, depending upon whether VIS instructions are enabled. */
5198 if (type && TREE_CODE (type) == VECTOR_TYPE)
5200 HOST_WIDE_INT size = int_size_in_bytes (type);
5201 gcc_assert ((TARGET_ARCH32 && size <= 8)
5202 || (TARGET_ARCH64 && size <= 16));
5204 if (mode == BLKmode)
5205 return function_arg_vector_value (size,
5206 SPARC_FP_ARG_FIRST + 2*slotno);
5207 else
5208 mclass = MODE_FLOAT;
5211 if (TARGET_ARCH32)
5212 return gen_rtx_REG (mode, regno);
5214 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5215 and are promoted to registers if possible. */
5216 if (type && TREE_CODE (type) == RECORD_TYPE)
5218 HOST_WIDE_INT size = int_size_in_bytes (type);
5219 gcc_assert (size <= 16);
5221 return function_arg_record_value (type, mode, slotno, named, regbase);
5224 /* Unions up to 16 bytes in size are passed in integer registers. */
5225 else if (type && TREE_CODE (type) == UNION_TYPE)
5227 HOST_WIDE_INT size = int_size_in_bytes (type);
5228 gcc_assert (size <= 16);
5230 return function_arg_union_value (size, mode, slotno, regno);
5233 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5234 but also have the slot allocated for them.
5235 If no prototype is in scope fp values in register slots get passed
5236 in two places, either fp regs and int regs or fp regs and memory. */
5237 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5238 && SPARC_FP_REG_P (regno))
5240 rtx reg = gen_rtx_REG (mode, regno);
5241 if (cum->prototype_p || cum->libcall_p)
5243 /* "* 2" because fp reg numbers are recorded in 4 byte
5244 quantities. */
5245 #if 0
5246 /* ??? This will cause the value to be passed in the fp reg and
5247 in the stack. When a prototype exists we want to pass the
5248 value in the reg but reserve space on the stack. That's an
5249 optimization, and is deferred [for a bit]. */
5250 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5251 return gen_rtx_PARALLEL (mode,
5252 gen_rtvec (2,
5253 gen_rtx_EXPR_LIST (VOIDmode,
5254 NULL_RTX, const0_rtx),
5255 gen_rtx_EXPR_LIST (VOIDmode,
5256 reg, const0_rtx)));
5257 else
5258 #else
5259 /* ??? It seems that passing back a register even when past
5260 the area declared by REG_PARM_STACK_SPACE will allocate
5261 space appropriately, and will not copy the data onto the
5262 stack, exactly as we desire.
5264 This is due to locate_and_pad_parm being called in
5265 expand_call whenever reg_parm_stack_space > 0, which
5266 while beneficial to our example here, would seem to be
5267 in error from what had been intended. Ho hum... -- r~ */
5268 #endif
5269 return reg;
5271 else
5273 rtx v0, v1;
5275 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5277 int intreg;
5279 /* On incoming, we don't need to know that the value
5280 is passed in %f0 and %i0, and it confuses other parts
5281 causing needless spillage even on the simplest cases. */
5282 if (incoming_p)
5283 return reg;
5285 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5286 + (regno - SPARC_FP_ARG_FIRST) / 2);
5288 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5289 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5290 const0_rtx);
5291 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5293 else
5295 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5296 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5297 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5302 /* All other aggregate types are passed in an integer register in a mode
5303 corresponding to the size of the type. */
5304 else if (type && AGGREGATE_TYPE_P (type))
5306 HOST_WIDE_INT size = int_size_in_bytes (type);
5307 gcc_assert (size <= 16);
5309 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5312 return gen_rtx_REG (mode, regno);
5315 /* For an arg passed partly in registers and partly in memory,
5316 this is the number of bytes of registers used.
5317 For args passed entirely in registers or entirely in memory, zero.
5319 Any arg that starts in the first 6 regs but won't entirely fit in them
5320 needs partial registers on v8. On v9, structures with integer
5321 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5322 values that begin in the last fp reg [where "last fp reg" varies with the
5323 mode] will be split between that reg and memory. */
5325 static int
5326 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5327 tree type, bool named)
5329 int slotno, regno, padding;
5331 /* We pass 0 for incoming_p here, it doesn't matter. */
5332 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5334 if (slotno == -1)
5335 return 0;
5337 if (TARGET_ARCH32)
5339 if ((slotno + (mode == BLKmode
5340 ? ROUND_ADVANCE (int_size_in_bytes (type))
5341 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5342 > SPARC_INT_ARG_MAX)
5343 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5345 else
5347 /* We are guaranteed by pass_by_reference that the size of the
5348 argument is not greater than 16 bytes, so we only need to return
5349 one word if the argument is partially passed in registers. */
5351 if (type && AGGREGATE_TYPE_P (type))
5353 int size = int_size_in_bytes (type);
5355 if (size > UNITS_PER_WORD
5356 && slotno == SPARC_INT_ARG_MAX - 1)
5357 return UNITS_PER_WORD;
5359 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5360 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5361 && ! (TARGET_FPU && named)))
5363 /* The complex types are passed as packed types. */
5364 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5365 && slotno == SPARC_INT_ARG_MAX - 1)
5366 return UNITS_PER_WORD;
5368 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5370 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5371 > SPARC_FP_ARG_MAX)
5372 return UNITS_PER_WORD;
5376 return 0;
5379 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5380 Specify whether to pass the argument by reference. */
5382 static bool
5383 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5384 enum machine_mode mode, const_tree type,
5385 bool named ATTRIBUTE_UNUSED)
5387 if (TARGET_ARCH32)
5388 /* Original SPARC 32-bit ABI says that structures and unions,
5389 and quad-precision floats are passed by reference. For Pascal,
5390 also pass arrays by reference. All other base types are passed
5391 in registers.
5393 Extended ABI (as implemented by the Sun compiler) says that all
5394 complex floats are passed by reference. Pass complex integers
5395 in registers up to 8 bytes. More generally, enforce the 2-word
5396 cap for passing arguments in registers.
5398 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5399 integers are passed like floats of the same size, that is in
5400 registers up to 8 bytes. Pass all vector floats by reference
5401 like structure and unions. */
5402 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5403 || mode == SCmode
5404 /* Catch CDImode, TFmode, DCmode and TCmode. */
5405 || GET_MODE_SIZE (mode) > 8
5406 || (type
5407 && TREE_CODE (type) == VECTOR_TYPE
5408 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5409 else
5410 /* Original SPARC 64-bit ABI says that structures and unions
5411 smaller than 16 bytes are passed in registers, as well as
5412 all other base types.
5414 Extended ABI (as implemented by the Sun compiler) says that
5415 complex floats are passed in registers up to 16 bytes. Pass
5416 all complex integers in registers up to 16 bytes. More generally,
5417 enforce the 2-word cap for passing arguments in registers.
5419 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5420 integers are passed like floats of the same size, that is in
5421 registers (up to 16 bytes). Pass all vector floats like structure
5422 and unions. */
5423 return ((type
5424 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5425 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5426 /* Catch CTImode and TCmode. */
5427 || GET_MODE_SIZE (mode) > 16);
5430 /* Handle the FUNCTION_ARG_ADVANCE macro.
5431 Update the data in CUM to advance over an argument
5432 of mode MODE and data type TYPE.
5433 TYPE is null for libcalls where that information may not be available. */
5435 void
5436 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5437 tree type, int named)
5439 int slotno, regno, padding;
5441 /* We pass 0 for incoming_p here, it doesn't matter. */
5442 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5444 /* If register required leading padding, add it. */
5445 if (slotno != -1)
5446 cum->words += padding;
5448 if (TARGET_ARCH32)
5450 cum->words += (mode != BLKmode
5451 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5452 : ROUND_ADVANCE (int_size_in_bytes (type)));
5454 else
5456 if (type && AGGREGATE_TYPE_P (type))
5458 int size = int_size_in_bytes (type);
5460 if (size <= 8)
5461 ++cum->words;
5462 else if (size <= 16)
5463 cum->words += 2;
5464 else /* passed by reference */
5465 ++cum->words;
5467 else
5469 cum->words += (mode != BLKmode
5470 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5471 : ROUND_ADVANCE (int_size_in_bytes (type)));
5476 /* Handle the FUNCTION_ARG_PADDING macro.
5477 For the 64 bit ABI structs are always stored left shifted in their
5478 argument slot. */
5480 enum direction
5481 function_arg_padding (enum machine_mode mode, const_tree type)
5483 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5484 return upward;
5486 /* Fall back to the default. */
5487 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5490 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5491 Specify whether to return the return value in memory. */
5493 static bool
5494 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5496 if (TARGET_ARCH32)
5497 /* Original SPARC 32-bit ABI says that structures and unions,
5498 and quad-precision floats are returned in memory. All other
5499 base types are returned in registers.
5501 Extended ABI (as implemented by the Sun compiler) says that
5502 all complex floats are returned in registers (8 FP registers
5503 at most for '_Complex long double'). Return all complex integers
5504 in registers (4 at most for '_Complex long long').
5506 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5507 integers are returned like floats of the same size, that is in
5508 registers up to 8 bytes and in memory otherwise. Return all
5509 vector floats in memory like structure and unions; note that
5510 they always have BLKmode like the latter. */
5511 return (TYPE_MODE (type) == BLKmode
5512 || TYPE_MODE (type) == TFmode
5513 || (TREE_CODE (type) == VECTOR_TYPE
5514 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5515 else
5516 /* Original SPARC 64-bit ABI says that structures and unions
5517 smaller than 32 bytes are returned in registers, as well as
5518 all other base types.
5520 Extended ABI (as implemented by the Sun compiler) says that all
5521 complex floats are returned in registers (8 FP registers at most
5522 for '_Complex long double'). Return all complex integers in
5523 registers (4 at most for '_Complex TItype').
5525 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5526 integers are returned like floats of the same size, that is in
5527 registers. Return all vector floats like structure and unions;
5528 note that they always have BLKmode like the latter. */
5529 return ((TYPE_MODE (type) == BLKmode
5530 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5533 /* Handle the TARGET_STRUCT_VALUE target hook.
5534 Return where to find the structure return value address. */
5536 static rtx
5537 sparc_struct_value_rtx (tree fndecl, int incoming)
5539 if (TARGET_ARCH64)
5540 return 0;
5541 else
5543 rtx mem;
5545 if (incoming)
5546 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5547 STRUCT_VALUE_OFFSET));
5548 else
5549 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5550 STRUCT_VALUE_OFFSET));
5552 /* Only follow the SPARC ABI for fixed-size structure returns.
5553 Variable size structure returns are handled per the normal
5554 procedures in GCC. This is enabled by -mstd-struct-return */
5555 if (incoming == 2
5556 && sparc_std_struct_return
5557 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5558 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5560 /* We must check and adjust the return address, as it is
5561 optional as to whether the return object is really
5562 provided. */
5563 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5564 rtx scratch = gen_reg_rtx (SImode);
5565 rtx endlab = gen_label_rtx ();
5567 /* Calculate the return object size */
5568 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5569 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5570 /* Construct a temporary return value */
5571 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5573 /* Implement SPARC 32-bit psABI callee returns struck checking
5574 requirements:
5576 Fetch the instruction where we will return to and see if
5577 it's an unimp instruction (the most significant 10 bits
5578 will be zero). */
5579 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5580 plus_constant (ret_rtx, 8)));
5581 /* Assume the size is valid and pre-adjust */
5582 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5583 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5584 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5585 /* Assign stack temp:
5586 Write the address of the memory pointed to by temp_val into
5587 the memory pointed to by mem */
5588 emit_move_insn (mem, XEXP (temp_val, 0));
5589 emit_label (endlab);
5592 set_mem_alias_set (mem, struct_value_alias_set);
5593 return mem;
5597 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5598 For v9, function return values are subject to the same rules as arguments,
5599 except that up to 32 bytes may be returned in registers. */
5602 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5604 /* Beware that the two values are swapped here wrt function_arg. */
5605 int regbase = (incoming_p
5606 ? SPARC_OUTGOING_INT_ARG_FIRST
5607 : SPARC_INCOMING_INT_ARG_FIRST);
5608 enum mode_class mclass = GET_MODE_CLASS (mode);
5609 int regno;
5611 /* Vector types deserve special treatment because they are polymorphic wrt
5612 their mode, depending upon whether VIS instructions are enabled. */
5613 if (type && TREE_CODE (type) == VECTOR_TYPE)
5615 HOST_WIDE_INT size = int_size_in_bytes (type);
5616 gcc_assert ((TARGET_ARCH32 && size <= 8)
5617 || (TARGET_ARCH64 && size <= 32));
5619 if (mode == BLKmode)
5620 return function_arg_vector_value (size,
5621 SPARC_FP_ARG_FIRST);
5622 else
5623 mclass = MODE_FLOAT;
5626 if (TARGET_ARCH64 && type)
5628 /* Structures up to 32 bytes in size are returned in registers. */
5629 if (TREE_CODE (type) == RECORD_TYPE)
5631 HOST_WIDE_INT size = int_size_in_bytes (type);
5632 gcc_assert (size <= 32);
5634 return function_arg_record_value (type, mode, 0, 1, regbase);
5637 /* Unions up to 32 bytes in size are returned in integer registers. */
5638 else if (TREE_CODE (type) == UNION_TYPE)
5640 HOST_WIDE_INT size = int_size_in_bytes (type);
5641 gcc_assert (size <= 32);
5643 return function_arg_union_value (size, mode, 0, regbase);
5646 /* Objects that require it are returned in FP registers. */
5647 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5650 /* All other aggregate types are returned in an integer register in a
5651 mode corresponding to the size of the type. */
5652 else if (AGGREGATE_TYPE_P (type))
5654 /* All other aggregate types are passed in an integer register
5655 in a mode corresponding to the size of the type. */
5656 HOST_WIDE_INT size = int_size_in_bytes (type);
5657 gcc_assert (size <= 32);
5659 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5661 /* ??? We probably should have made the same ABI change in
5662 3.4.0 as the one we made for unions. The latter was
5663 required by the SCD though, while the former is not
5664 specified, so we favored compatibility and efficiency.
5666 Now we're stuck for aggregates larger than 16 bytes,
5667 because OImode vanished in the meantime. Let's not
5668 try to be unduly clever, and simply follow the ABI
5669 for unions in that case. */
5670 if (mode == BLKmode)
5671 return function_arg_union_value (size, mode, 0, regbase);
5672 else
5673 mclass = MODE_INT;
5676 /* This must match PROMOTE_FUNCTION_MODE. */
5677 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5678 mode = word_mode;
5681 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5682 regno = SPARC_FP_ARG_FIRST;
5683 else
5684 regno = regbase;
5686 return gen_rtx_REG (mode, regno);
5689 /* Do what is necessary for `va_start'. We look at the current function
5690 to determine if stdarg or varargs is used and return the address of
5691 the first unnamed parameter. */
5693 static rtx
5694 sparc_builtin_saveregs (void)
5696 int first_reg = crtl->args.info.words;
5697 rtx address;
5698 int regno;
5700 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5701 emit_move_insn (gen_rtx_MEM (word_mode,
5702 gen_rtx_PLUS (Pmode,
5703 frame_pointer_rtx,
5704 GEN_INT (FIRST_PARM_OFFSET (0)
5705 + (UNITS_PER_WORD
5706 * regno)))),
5707 gen_rtx_REG (word_mode,
5708 SPARC_INCOMING_INT_ARG_FIRST + regno));
5710 address = gen_rtx_PLUS (Pmode,
5711 frame_pointer_rtx,
5712 GEN_INT (FIRST_PARM_OFFSET (0)
5713 + UNITS_PER_WORD * first_reg));
5715 return address;
5718 /* Implement `va_start' for stdarg. */
5720 static void
5721 sparc_va_start (tree valist, rtx nextarg)
5723 nextarg = expand_builtin_saveregs ();
5724 std_expand_builtin_va_start (valist, nextarg);
5727 /* Implement `va_arg' for stdarg. */
5729 static tree
5730 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5731 gimple_seq *post_p)
5733 HOST_WIDE_INT size, rsize, align;
5734 tree addr, incr;
5735 bool indirect;
5736 tree ptrtype = build_pointer_type (type);
5738 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5740 indirect = true;
5741 size = rsize = UNITS_PER_WORD;
5742 align = 0;
5744 else
5746 indirect = false;
5747 size = int_size_in_bytes (type);
5748 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5749 align = 0;
5751 if (TARGET_ARCH64)
5753 /* For SPARC64, objects requiring 16-byte alignment get it. */
5754 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5755 align = 2 * UNITS_PER_WORD;
5757 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5758 are left-justified in their slots. */
5759 if (AGGREGATE_TYPE_P (type))
5761 if (size == 0)
5762 size = rsize = UNITS_PER_WORD;
5763 else
5764 size = rsize;
5769 incr = valist;
5770 if (align)
5772 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5773 size_int (align - 1));
5774 incr = fold_convert (sizetype, incr);
5775 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5776 size_int (-align));
5777 incr = fold_convert (ptr_type_node, incr);
5780 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5781 addr = incr;
5783 if (BYTES_BIG_ENDIAN && size < rsize)
5784 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5785 size_int (rsize - size));
5787 if (indirect)
5789 addr = fold_convert (build_pointer_type (ptrtype), addr);
5790 addr = build_va_arg_indirect_ref (addr);
5793 /* If the address isn't aligned properly for the type, we need a temporary.
5794 FIXME: This is inefficient, usually we can do this in registers. */
5795 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5797 tree tmp = create_tmp_var (type, "va_arg_tmp");
5798 tree dest_addr = build_fold_addr_expr (tmp);
5799 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5800 3, dest_addr, addr, size_int (rsize));
5801 TREE_ADDRESSABLE (tmp) = 1;
5802 gimplify_and_add (copy, pre_p);
5803 addr = dest_addr;
5806 else
5807 addr = fold_convert (ptrtype, addr);
5809 incr
5810 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5811 gimplify_assign (valist, incr, post_p);
5813 return build_va_arg_indirect_ref (addr);
5816 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5817 Specify whether the vector mode is supported by the hardware. */
5819 static bool
5820 sparc_vector_mode_supported_p (enum machine_mode mode)
5822 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5825 /* Return the string to output an unconditional branch to LABEL, which is
5826 the operand number of the label.
5828 DEST is the destination insn (i.e. the label), INSN is the source. */
5830 const char *
5831 output_ubranch (rtx dest, int label, rtx insn)
5833 static char string[64];
5834 bool v9_form = false;
5835 char *p;
5837 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5839 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5840 - INSN_ADDRESSES (INSN_UID (insn)));
5841 /* Leave some instructions for "slop". */
5842 if (delta >= -260000 && delta < 260000)
5843 v9_form = true;
5846 if (v9_form)
5847 strcpy (string, "ba%*,pt\t%%xcc, ");
5848 else
5849 strcpy (string, "b%*\t");
5851 p = strchr (string, '\0');
5852 *p++ = '%';
5853 *p++ = 'l';
5854 *p++ = '0' + label;
5855 *p++ = '%';
5856 *p++ = '(';
5857 *p = '\0';
5859 return string;
5862 /* Return the string to output a conditional branch to LABEL, which is
5863 the operand number of the label. OP is the conditional expression.
5864 XEXP (OP, 0) is assumed to be a condition code register (integer or
5865 floating point) and its mode specifies what kind of comparison we made.
5867 DEST is the destination insn (i.e. the label), INSN is the source.
5869 REVERSED is nonzero if we should reverse the sense of the comparison.
5871 ANNUL is nonzero if we should generate an annulling branch. */
5873 const char *
5874 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5875 rtx insn)
5877 static char string[64];
5878 enum rtx_code code = GET_CODE (op);
5879 rtx cc_reg = XEXP (op, 0);
5880 enum machine_mode mode = GET_MODE (cc_reg);
5881 const char *labelno, *branch;
5882 int spaces = 8, far;
5883 char *p;
5885 /* v9 branches are limited to +-1MB. If it is too far away,
5886 change
5888 bne,pt %xcc, .LC30
5892 be,pn %xcc, .+12
5894 ba .LC30
5898 fbne,a,pn %fcc2, .LC29
5902 fbe,pt %fcc2, .+16
5904 ba .LC29 */
5906 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5907 if (reversed ^ far)
5909 /* Reversal of FP compares takes care -- an ordered compare
5910 becomes an unordered compare and vice versa. */
5911 if (mode == CCFPmode || mode == CCFPEmode)
5912 code = reverse_condition_maybe_unordered (code);
5913 else
5914 code = reverse_condition (code);
5917 /* Start by writing the branch condition. */
5918 if (mode == CCFPmode || mode == CCFPEmode)
5920 switch (code)
5922 case NE:
5923 branch = "fbne";
5924 break;
5925 case EQ:
5926 branch = "fbe";
5927 break;
5928 case GE:
5929 branch = "fbge";
5930 break;
5931 case GT:
5932 branch = "fbg";
5933 break;
5934 case LE:
5935 branch = "fble";
5936 break;
5937 case LT:
5938 branch = "fbl";
5939 break;
5940 case UNORDERED:
5941 branch = "fbu";
5942 break;
5943 case ORDERED:
5944 branch = "fbo";
5945 break;
5946 case UNGT:
5947 branch = "fbug";
5948 break;
5949 case UNLT:
5950 branch = "fbul";
5951 break;
5952 case UNEQ:
5953 branch = "fbue";
5954 break;
5955 case UNGE:
5956 branch = "fbuge";
5957 break;
5958 case UNLE:
5959 branch = "fbule";
5960 break;
5961 case LTGT:
5962 branch = "fblg";
5963 break;
5965 default:
5966 gcc_unreachable ();
5969 /* ??? !v9: FP branches cannot be preceded by another floating point
5970 insn. Because there is currently no concept of pre-delay slots,
5971 we can fix this only by always emitting a nop before a floating
5972 point branch. */
5974 string[0] = '\0';
5975 if (! TARGET_V9)
5976 strcpy (string, "nop\n\t");
5977 strcat (string, branch);
5979 else
5981 switch (code)
5983 case NE:
5984 branch = "bne";
5985 break;
5986 case EQ:
5987 branch = "be";
5988 break;
5989 case GE:
5990 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5991 branch = "bpos";
5992 else
5993 branch = "bge";
5994 break;
5995 case GT:
5996 branch = "bg";
5997 break;
5998 case LE:
5999 branch = "ble";
6000 break;
6001 case LT:
6002 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6003 branch = "bneg";
6004 else
6005 branch = "bl";
6006 break;
6007 case GEU:
6008 branch = "bgeu";
6009 break;
6010 case GTU:
6011 branch = "bgu";
6012 break;
6013 case LEU:
6014 branch = "bleu";
6015 break;
6016 case LTU:
6017 branch = "blu";
6018 break;
6020 default:
6021 gcc_unreachable ();
6023 strcpy (string, branch);
6025 spaces -= strlen (branch);
6026 p = strchr (string, '\0');
6028 /* Now add the annulling, the label, and a possible noop. */
6029 if (annul && ! far)
6031 strcpy (p, ",a");
6032 p += 2;
6033 spaces -= 2;
6036 if (TARGET_V9)
6038 rtx note;
6039 int v8 = 0;
6041 if (! far && insn && INSN_ADDRESSES_SET_P ())
6043 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6044 - INSN_ADDRESSES (INSN_UID (insn)));
6045 /* Leave some instructions for "slop". */
6046 if (delta < -260000 || delta >= 260000)
6047 v8 = 1;
6050 if (mode == CCFPmode || mode == CCFPEmode)
6052 static char v9_fcc_labelno[] = "%%fccX, ";
6053 /* Set the char indicating the number of the fcc reg to use. */
6054 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6055 labelno = v9_fcc_labelno;
6056 if (v8)
6058 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6059 labelno = "";
6062 else if (mode == CCXmode || mode == CCX_NOOVmode)
6064 labelno = "%%xcc, ";
6065 gcc_assert (! v8);
6067 else
6069 labelno = "%%icc, ";
6070 if (v8)
6071 labelno = "";
6074 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6076 strcpy (p,
6077 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6078 ? ",pt" : ",pn");
6079 p += 3;
6080 spaces -= 3;
6083 else
6084 labelno = "";
6086 if (spaces > 0)
6087 *p++ = '\t';
6088 else
6089 *p++ = ' ';
6090 strcpy (p, labelno);
6091 p = strchr (p, '\0');
6092 if (far)
6094 strcpy (p, ".+12\n\t nop\n\tb\t");
6095 /* Skip the next insn if requested or
6096 if we know that it will be a nop. */
6097 if (annul || ! final_sequence)
6098 p[3] = '6';
6099 p += 14;
6101 *p++ = '%';
6102 *p++ = 'l';
6103 *p++ = label + '0';
6104 *p++ = '%';
6105 *p++ = '#';
6106 *p = '\0';
6108 return string;
6111 /* Emit a library call comparison between floating point X and Y.
6112 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6113 Return the new operator to be used in the comparison sequence.
6115 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6116 values as arguments instead of the TFmode registers themselves,
6117 that's why we cannot call emit_float_lib_cmp. */
6119 enum rtx_code
6120 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6122 const char *qpfunc;
6123 rtx slot0, slot1, result, tem, tem2;
6124 enum machine_mode mode;
6125 enum rtx_code new_comparison;
6127 switch (comparison)
6129 case EQ:
6130 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6131 break;
6133 case NE:
6134 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6135 break;
6137 case GT:
6138 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6139 break;
6141 case GE:
6142 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6143 break;
6145 case LT:
6146 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6147 break;
6149 case LE:
6150 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6151 break;
6153 case ORDERED:
6154 case UNORDERED:
6155 case UNGT:
6156 case UNLT:
6157 case UNEQ:
6158 case UNGE:
6159 case UNLE:
6160 case LTGT:
6161 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6162 break;
6164 default:
6165 gcc_unreachable ();
6168 if (TARGET_ARCH64)
6170 if (MEM_P (x))
6171 slot0 = x;
6172 else
6174 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6175 emit_move_insn (slot0, x);
6178 if (MEM_P (y))
6179 slot1 = y;
6180 else
6182 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6183 emit_move_insn (slot1, y);
6186 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6187 DImode, 2,
6188 XEXP (slot0, 0), Pmode,
6189 XEXP (slot1, 0), Pmode);
6190 mode = DImode;
6192 else
6194 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6195 SImode, 2,
6196 x, TFmode, y, TFmode);
6197 mode = SImode;
6201 /* Immediately move the result of the libcall into a pseudo
6202 register so reload doesn't clobber the value if it needs
6203 the return register for a spill reg. */
6204 result = gen_reg_rtx (mode);
6205 emit_move_insn (result, hard_libcall_value (mode));
6207 switch (comparison)
6209 default:
6210 new_comparison = NE;
6211 emit_cmp_insn (result, const0_rtx, new_comparison, NULL_RTX, mode, 0);
6212 break;
6213 case ORDERED:
6214 case UNORDERED:
6215 new_comparison = (comparison == UNORDERED ? EQ : NE);
6216 emit_cmp_insn (result, GEN_INT(3), new_comparison, NULL_RTX, mode, 0);
6217 break;
6218 case UNGT:
6219 case UNGE:
6220 new_comparison = (comparison == UNGT ? GT : NE);
6221 emit_cmp_insn (result, const1_rtx, new_comparison, NULL_RTX, mode, 0);
6222 break;
6223 case UNLE:
6224 new_comparison = NE;
6225 emit_cmp_insn (result, const2_rtx, new_comparison, NULL_RTX, mode, 0);
6226 break;
6227 case UNLT:
6228 tem = gen_reg_rtx (mode);
6229 if (TARGET_ARCH32)
6230 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6231 else
6232 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6233 new_comparison = NE;
6234 emit_cmp_insn (tem, const0_rtx, new_comparison, NULL_RTX, mode, 0);
6235 break;
6236 case UNEQ:
6237 case LTGT:
6238 tem = gen_reg_rtx (mode);
6239 if (TARGET_ARCH32)
6240 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6241 else
6242 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6243 tem2 = gen_reg_rtx (mode);
6244 if (TARGET_ARCH32)
6245 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6246 else
6247 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6248 new_comparison = (comparison == UNEQ ? EQ : NE);
6249 emit_cmp_insn (tem2, const0_rtx, new_comparison, NULL_RTX, mode, 0);
6250 break;
6253 return new_comparison;
6256 /* Generate an unsigned DImode to FP conversion. This is the same code
6257 optabs would emit if we didn't have TFmode patterns. */
6259 void
6260 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6262 rtx neglab, donelab, i0, i1, f0, in, out;
6264 out = operands[0];
6265 in = force_reg (DImode, operands[1]);
6266 neglab = gen_label_rtx ();
6267 donelab = gen_label_rtx ();
6268 i0 = gen_reg_rtx (DImode);
6269 i1 = gen_reg_rtx (DImode);
6270 f0 = gen_reg_rtx (mode);
6272 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6274 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6275 emit_jump_insn (gen_jump (donelab));
6276 emit_barrier ();
6278 emit_label (neglab);
6280 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6281 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6282 emit_insn (gen_iordi3 (i0, i0, i1));
6283 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6284 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6286 emit_label (donelab);
6289 /* Generate an FP to unsigned DImode conversion. This is the same code
6290 optabs would emit if we didn't have TFmode patterns. */
6292 void
6293 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6295 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6297 out = operands[0];
6298 in = force_reg (mode, operands[1]);
6299 neglab = gen_label_rtx ();
6300 donelab = gen_label_rtx ();
6301 i0 = gen_reg_rtx (DImode);
6302 i1 = gen_reg_rtx (DImode);
6303 limit = gen_reg_rtx (mode);
6304 f0 = gen_reg_rtx (mode);
6306 emit_move_insn (limit,
6307 CONST_DOUBLE_FROM_REAL_VALUE (
6308 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6309 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6311 emit_insn (gen_rtx_SET (VOIDmode,
6312 out,
6313 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6314 emit_jump_insn (gen_jump (donelab));
6315 emit_barrier ();
6317 emit_label (neglab);
6319 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6320 emit_insn (gen_rtx_SET (VOIDmode,
6322 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6323 emit_insn (gen_movdi (i1, const1_rtx));
6324 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6325 emit_insn (gen_xordi3 (out, i0, i1));
6327 emit_label (donelab);
6330 /* Return the string to output a conditional branch to LABEL, testing
6331 register REG. LABEL is the operand number of the label; REG is the
6332 operand number of the reg. OP is the conditional expression. The mode
6333 of REG says what kind of comparison we made.
6335 DEST is the destination insn (i.e. the label), INSN is the source.
6337 REVERSED is nonzero if we should reverse the sense of the comparison.
6339 ANNUL is nonzero if we should generate an annulling branch. */
6341 const char *
6342 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6343 int annul, rtx insn)
6345 static char string[64];
6346 enum rtx_code code = GET_CODE (op);
6347 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6348 rtx note;
6349 int far;
6350 char *p;
6352 /* branch on register are limited to +-128KB. If it is too far away,
6353 change
6355 brnz,pt %g1, .LC30
6359 brz,pn %g1, .+12
6361 ba,pt %xcc, .LC30
6365 brgez,a,pn %o1, .LC29
6369 brlz,pt %o1, .+16
6371 ba,pt %xcc, .LC29 */
6373 far = get_attr_length (insn) >= 3;
6375 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6376 if (reversed ^ far)
6377 code = reverse_condition (code);
6379 /* Only 64 bit versions of these instructions exist. */
6380 gcc_assert (mode == DImode);
6382 /* Start by writing the branch condition. */
6384 switch (code)
6386 case NE:
6387 strcpy (string, "brnz");
6388 break;
6390 case EQ:
6391 strcpy (string, "brz");
6392 break;
6394 case GE:
6395 strcpy (string, "brgez");
6396 break;
6398 case LT:
6399 strcpy (string, "brlz");
6400 break;
6402 case LE:
6403 strcpy (string, "brlez");
6404 break;
6406 case GT:
6407 strcpy (string, "brgz");
6408 break;
6410 default:
6411 gcc_unreachable ();
6414 p = strchr (string, '\0');
6416 /* Now add the annulling, reg, label, and nop. */
6417 if (annul && ! far)
6419 strcpy (p, ",a");
6420 p += 2;
6423 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6425 strcpy (p,
6426 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6427 ? ",pt" : ",pn");
6428 p += 3;
6431 *p = p < string + 8 ? '\t' : ' ';
6432 p++;
6433 *p++ = '%';
6434 *p++ = '0' + reg;
6435 *p++ = ',';
6436 *p++ = ' ';
6437 if (far)
6439 int veryfar = 1, delta;
6441 if (INSN_ADDRESSES_SET_P ())
6443 delta = (INSN_ADDRESSES (INSN_UID (dest))
6444 - INSN_ADDRESSES (INSN_UID (insn)));
6445 /* Leave some instructions for "slop". */
6446 if (delta >= -260000 && delta < 260000)
6447 veryfar = 0;
6450 strcpy (p, ".+12\n\t nop\n\t");
6451 /* Skip the next insn if requested or
6452 if we know that it will be a nop. */
6453 if (annul || ! final_sequence)
6454 p[3] = '6';
6455 p += 12;
6456 if (veryfar)
6458 strcpy (p, "b\t");
6459 p += 2;
6461 else
6463 strcpy (p, "ba,pt\t%%xcc, ");
6464 p += 13;
6467 *p++ = '%';
6468 *p++ = 'l';
6469 *p++ = '0' + label;
6470 *p++ = '%';
6471 *p++ = '#';
6472 *p = '\0';
6474 return string;
6477 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6478 Such instructions cannot be used in the delay slot of return insn on v9.
6479 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6482 static int
6483 epilogue_renumber (register rtx *where, int test)
6485 register const char *fmt;
6486 register int i;
6487 register enum rtx_code code;
6489 if (*where == 0)
6490 return 0;
6492 code = GET_CODE (*where);
6494 switch (code)
6496 case REG:
6497 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6498 return 1;
6499 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6500 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6501 case SCRATCH:
6502 case CC0:
6503 case PC:
6504 case CONST_INT:
6505 case CONST_DOUBLE:
6506 return 0;
6508 /* Do not replace the frame pointer with the stack pointer because
6509 it can cause the delayed instruction to load below the stack.
6510 This occurs when instructions like:
6512 (set (reg/i:SI 24 %i0)
6513 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6514 (const_int -20 [0xffffffec])) 0))
6516 are in the return delayed slot. */
6517 case PLUS:
6518 if (GET_CODE (XEXP (*where, 0)) == REG
6519 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6520 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6521 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6522 return 1;
6523 break;
6525 case MEM:
6526 if (SPARC_STACK_BIAS
6527 && GET_CODE (XEXP (*where, 0)) == REG
6528 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6529 return 1;
6530 break;
6532 default:
6533 break;
6536 fmt = GET_RTX_FORMAT (code);
6538 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6540 if (fmt[i] == 'E')
6542 register int j;
6543 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6544 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6545 return 1;
6547 else if (fmt[i] == 'e'
6548 && epilogue_renumber (&(XEXP (*where, i)), test))
6549 return 1;
6551 return 0;
6554 /* Leaf functions and non-leaf functions have different needs. */
6556 static const int
6557 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6559 static const int
6560 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6562 static const int *const reg_alloc_orders[] = {
6563 reg_leaf_alloc_order,
6564 reg_nonleaf_alloc_order};
6566 void
6567 order_regs_for_local_alloc (void)
6569 static int last_order_nonleaf = 1;
6571 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6573 last_order_nonleaf = !last_order_nonleaf;
6574 memcpy ((char *) reg_alloc_order,
6575 (const char *) reg_alloc_orders[last_order_nonleaf],
6576 FIRST_PSEUDO_REGISTER * sizeof (int));
6580 /* Return 1 if REG and MEM are legitimate enough to allow the various
6581 mem<-->reg splits to be run. */
6584 sparc_splitdi_legitimate (rtx reg, rtx mem)
6586 /* Punt if we are here by mistake. */
6587 gcc_assert (reload_completed);
6589 /* We must have an offsettable memory reference. */
6590 if (! offsettable_memref_p (mem))
6591 return 0;
6593 /* If we have legitimate args for ldd/std, we do not want
6594 the split to happen. */
6595 if ((REGNO (reg) % 2) == 0
6596 && mem_min_alignment (mem, 8))
6597 return 0;
6599 /* Success. */
6600 return 1;
6603 /* Return 1 if x and y are some kind of REG and they refer to
6604 different hard registers. This test is guaranteed to be
6605 run after reload. */
6608 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6610 if (GET_CODE (x) != REG)
6611 return 0;
6612 if (GET_CODE (y) != REG)
6613 return 0;
6614 if (REGNO (x) == REGNO (y))
6615 return 0;
6616 return 1;
6619 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6620 This makes them candidates for using ldd and std insns.
6622 Note reg1 and reg2 *must* be hard registers. */
6625 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6627 /* We might have been passed a SUBREG. */
6628 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6629 return 0;
6631 if (REGNO (reg1) % 2 != 0)
6632 return 0;
6634 /* Integer ldd is deprecated in SPARC V9 */
6635 if (TARGET_V9 && REGNO (reg1) < 32)
6636 return 0;
6638 return (REGNO (reg1) == REGNO (reg2) - 1);
6641 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6642 an ldd or std insn.
6644 This can only happen when addr1 and addr2, the addresses in mem1
6645 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6646 addr1 must also be aligned on a 64-bit boundary.
6648 Also iff dependent_reg_rtx is not null it should not be used to
6649 compute the address for mem1, i.e. we cannot optimize a sequence
6650 like:
6651 ld [%o0], %o0
6652 ld [%o0 + 4], %o1
6654 ldd [%o0], %o0
6655 nor:
6656 ld [%g3 + 4], %g3
6657 ld [%g3], %g2
6659 ldd [%g3], %g2
6661 But, note that the transformation from:
6662 ld [%g2 + 4], %g3
6663 ld [%g2], %g2
6665 ldd [%g2], %g2
6666 is perfectly fine. Thus, the peephole2 patterns always pass us
6667 the destination register of the first load, never the second one.
6669 For stores we don't have a similar problem, so dependent_reg_rtx is
6670 NULL_RTX. */
6673 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6675 rtx addr1, addr2;
6676 unsigned int reg1;
6677 HOST_WIDE_INT offset1;
6679 /* The mems cannot be volatile. */
6680 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6681 return 0;
6683 /* MEM1 should be aligned on a 64-bit boundary. */
6684 if (MEM_ALIGN (mem1) < 64)
6685 return 0;
6687 addr1 = XEXP (mem1, 0);
6688 addr2 = XEXP (mem2, 0);
6690 /* Extract a register number and offset (if used) from the first addr. */
6691 if (GET_CODE (addr1) == PLUS)
6693 /* If not a REG, return zero. */
6694 if (GET_CODE (XEXP (addr1, 0)) != REG)
6695 return 0;
6696 else
6698 reg1 = REGNO (XEXP (addr1, 0));
6699 /* The offset must be constant! */
6700 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6701 return 0;
6702 offset1 = INTVAL (XEXP (addr1, 1));
6705 else if (GET_CODE (addr1) != REG)
6706 return 0;
6707 else
6709 reg1 = REGNO (addr1);
6710 /* This was a simple (mem (reg)) expression. Offset is 0. */
6711 offset1 = 0;
6714 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6715 if (GET_CODE (addr2) != PLUS)
6716 return 0;
6718 if (GET_CODE (XEXP (addr2, 0)) != REG
6719 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6720 return 0;
6722 if (reg1 != REGNO (XEXP (addr2, 0)))
6723 return 0;
6725 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6726 return 0;
6728 /* The first offset must be evenly divisible by 8 to ensure the
6729 address is 64 bit aligned. */
6730 if (offset1 % 8 != 0)
6731 return 0;
6733 /* The offset for the second addr must be 4 more than the first addr. */
6734 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6735 return 0;
6737 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6738 instructions. */
6739 return 1;
6742 /* Return 1 if reg is a pseudo, or is the first register in
6743 a hard register pair. This makes it suitable for use in
6744 ldd and std insns. */
6747 register_ok_for_ldd (rtx reg)
6749 /* We might have been passed a SUBREG. */
6750 if (!REG_P (reg))
6751 return 0;
6753 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6754 return (REGNO (reg) % 2 == 0);
6756 return 1;
6759 /* Return 1 if OP is a memory whose address is known to be
6760 aligned to 8-byte boundary, or a pseudo during reload.
6761 This makes it suitable for use in ldd and std insns. */
6764 memory_ok_for_ldd (rtx op)
6766 if (MEM_P (op))
6768 /* In 64-bit mode, we assume that the address is word-aligned. */
6769 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6770 return 0;
6772 if ((reload_in_progress || reload_completed)
6773 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6774 return 0;
6776 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6778 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6779 return 0;
6781 else
6782 return 0;
6784 return 1;
6787 /* Print operand X (an rtx) in assembler syntax to file FILE.
6788 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6789 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6791 void
6792 print_operand (FILE *file, rtx x, int code)
6794 switch (code)
6796 case '#':
6797 /* Output an insn in a delay slot. */
6798 if (final_sequence)
6799 sparc_indent_opcode = 1;
6800 else
6801 fputs ("\n\t nop", file);
6802 return;
6803 case '*':
6804 /* Output an annul flag if there's nothing for the delay slot and we
6805 are optimizing. This is always used with '(' below.
6806 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6807 this is a dbx bug. So, we only do this when optimizing.
6808 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6809 Always emit a nop in case the next instruction is a branch. */
6810 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6811 fputs (",a", file);
6812 return;
6813 case '(':
6814 /* Output a 'nop' if there's nothing for the delay slot and we are
6815 not optimizing. This is always used with '*' above. */
6816 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6817 fputs ("\n\t nop", file);
6818 else if (final_sequence)
6819 sparc_indent_opcode = 1;
6820 return;
6821 case ')':
6822 /* Output the right displacement from the saved PC on function return.
6823 The caller may have placed an "unimp" insn immediately after the call
6824 so we have to account for it. This insn is used in the 32-bit ABI
6825 when calling a function that returns a non zero-sized structure. The
6826 64-bit ABI doesn't have it. Be careful to have this test be the same
6827 as that used on the call. The exception here is that when
6828 sparc_std_struct_return is enabled, the psABI is followed exactly
6829 and the adjustment is made by the code in sparc_struct_value_rtx.
6830 The call emitted is the same when sparc_std_struct_return is
6831 present. */
6832 if (! TARGET_ARCH64
6833 && cfun->returns_struct
6834 && ! sparc_std_struct_return
6835 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6836 == INTEGER_CST)
6837 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6838 fputs ("12", file);
6839 else
6840 fputc ('8', file);
6841 return;
6842 case '_':
6843 /* Output the Embedded Medium/Anywhere code model base register. */
6844 fputs (EMBMEDANY_BASE_REG, file);
6845 return;
6846 case '&':
6847 /* Print some local dynamic TLS name. */
6848 assemble_name (file, get_some_local_dynamic_name ());
6849 return;
6851 case 'Y':
6852 /* Adjust the operand to take into account a RESTORE operation. */
6853 if (GET_CODE (x) == CONST_INT)
6854 break;
6855 else if (GET_CODE (x) != REG)
6856 output_operand_lossage ("invalid %%Y operand");
6857 else if (REGNO (x) < 8)
6858 fputs (reg_names[REGNO (x)], file);
6859 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6860 fputs (reg_names[REGNO (x)-16], file);
6861 else
6862 output_operand_lossage ("invalid %%Y operand");
6863 return;
6864 case 'L':
6865 /* Print out the low order register name of a register pair. */
6866 if (WORDS_BIG_ENDIAN)
6867 fputs (reg_names[REGNO (x)+1], file);
6868 else
6869 fputs (reg_names[REGNO (x)], file);
6870 return;
6871 case 'H':
6872 /* Print out the high order register name of a register pair. */
6873 if (WORDS_BIG_ENDIAN)
6874 fputs (reg_names[REGNO (x)], file);
6875 else
6876 fputs (reg_names[REGNO (x)+1], file);
6877 return;
6878 case 'R':
6879 /* Print out the second register name of a register pair or quad.
6880 I.e., R (%o0) => %o1. */
6881 fputs (reg_names[REGNO (x)+1], file);
6882 return;
6883 case 'S':
6884 /* Print out the third register name of a register quad.
6885 I.e., S (%o0) => %o2. */
6886 fputs (reg_names[REGNO (x)+2], file);
6887 return;
6888 case 'T':
6889 /* Print out the fourth register name of a register quad.
6890 I.e., T (%o0) => %o3. */
6891 fputs (reg_names[REGNO (x)+3], file);
6892 return;
6893 case 'x':
6894 /* Print a condition code register. */
6895 if (REGNO (x) == SPARC_ICC_REG)
6897 /* We don't handle CC[X]_NOOVmode because they're not supposed
6898 to occur here. */
6899 if (GET_MODE (x) == CCmode)
6900 fputs ("%icc", file);
6901 else if (GET_MODE (x) == CCXmode)
6902 fputs ("%xcc", file);
6903 else
6904 gcc_unreachable ();
6906 else
6907 /* %fccN register */
6908 fputs (reg_names[REGNO (x)], file);
6909 return;
6910 case 'm':
6911 /* Print the operand's address only. */
6912 output_address (XEXP (x, 0));
6913 return;
6914 case 'r':
6915 /* In this case we need a register. Use %g0 if the
6916 operand is const0_rtx. */
6917 if (x == const0_rtx
6918 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6920 fputs ("%g0", file);
6921 return;
6923 else
6924 break;
6926 case 'A':
6927 switch (GET_CODE (x))
6929 case IOR: fputs ("or", file); break;
6930 case AND: fputs ("and", file); break;
6931 case XOR: fputs ("xor", file); break;
6932 default: output_operand_lossage ("invalid %%A operand");
6934 return;
6936 case 'B':
6937 switch (GET_CODE (x))
6939 case IOR: fputs ("orn", file); break;
6940 case AND: fputs ("andn", file); break;
6941 case XOR: fputs ("xnor", file); break;
6942 default: output_operand_lossage ("invalid %%B operand");
6944 return;
6946 /* These are used by the conditional move instructions. */
6947 case 'c' :
6948 case 'C':
6950 enum rtx_code rc = GET_CODE (x);
6952 if (code == 'c')
6954 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6955 if (mode == CCFPmode || mode == CCFPEmode)
6956 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6957 else
6958 rc = reverse_condition (GET_CODE (x));
6960 switch (rc)
6962 case NE: fputs ("ne", file); break;
6963 case EQ: fputs ("e", file); break;
6964 case GE: fputs ("ge", file); break;
6965 case GT: fputs ("g", file); break;
6966 case LE: fputs ("le", file); break;
6967 case LT: fputs ("l", file); break;
6968 case GEU: fputs ("geu", file); break;
6969 case GTU: fputs ("gu", file); break;
6970 case LEU: fputs ("leu", file); break;
6971 case LTU: fputs ("lu", file); break;
6972 case LTGT: fputs ("lg", file); break;
6973 case UNORDERED: fputs ("u", file); break;
6974 case ORDERED: fputs ("o", file); break;
6975 case UNLT: fputs ("ul", file); break;
6976 case UNLE: fputs ("ule", file); break;
6977 case UNGT: fputs ("ug", file); break;
6978 case UNGE: fputs ("uge", file); break;
6979 case UNEQ: fputs ("ue", file); break;
6980 default: output_operand_lossage (code == 'c'
6981 ? "invalid %%c operand"
6982 : "invalid %%C operand");
6984 return;
6987 /* These are used by the movr instruction pattern. */
6988 case 'd':
6989 case 'D':
6991 enum rtx_code rc = (code == 'd'
6992 ? reverse_condition (GET_CODE (x))
6993 : GET_CODE (x));
6994 switch (rc)
6996 case NE: fputs ("ne", file); break;
6997 case EQ: fputs ("e", file); break;
6998 case GE: fputs ("gez", file); break;
6999 case LT: fputs ("lz", file); break;
7000 case LE: fputs ("lez", file); break;
7001 case GT: fputs ("gz", file); break;
7002 default: output_operand_lossage (code == 'd'
7003 ? "invalid %%d operand"
7004 : "invalid %%D operand");
7006 return;
7009 case 'b':
7011 /* Print a sign-extended character. */
7012 int i = trunc_int_for_mode (INTVAL (x), QImode);
7013 fprintf (file, "%d", i);
7014 return;
7017 case 'f':
7018 /* Operand must be a MEM; write its address. */
7019 if (GET_CODE (x) != MEM)
7020 output_operand_lossage ("invalid %%f operand");
7021 output_address (XEXP (x, 0));
7022 return;
7024 case 's':
7026 /* Print a sign-extended 32-bit value. */
7027 HOST_WIDE_INT i;
7028 if (GET_CODE(x) == CONST_INT)
7029 i = INTVAL (x);
7030 else if (GET_CODE(x) == CONST_DOUBLE)
7031 i = CONST_DOUBLE_LOW (x);
7032 else
7034 output_operand_lossage ("invalid %%s operand");
7035 return;
7037 i = trunc_int_for_mode (i, SImode);
7038 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7039 return;
7042 case 0:
7043 /* Do nothing special. */
7044 break;
7046 default:
7047 /* Undocumented flag. */
7048 output_operand_lossage ("invalid operand output code");
7051 if (GET_CODE (x) == REG)
7052 fputs (reg_names[REGNO (x)], file);
7053 else if (GET_CODE (x) == MEM)
7055 fputc ('[', file);
7056 /* Poor Sun assembler doesn't understand absolute addressing. */
7057 if (CONSTANT_P (XEXP (x, 0)))
7058 fputs ("%g0+", file);
7059 output_address (XEXP (x, 0));
7060 fputc (']', file);
7062 else if (GET_CODE (x) == HIGH)
7064 fputs ("%hi(", file);
7065 output_addr_const (file, XEXP (x, 0));
7066 fputc (')', file);
7068 else if (GET_CODE (x) == LO_SUM)
7070 print_operand (file, XEXP (x, 0), 0);
7071 if (TARGET_CM_MEDMID)
7072 fputs ("+%l44(", file);
7073 else
7074 fputs ("+%lo(", file);
7075 output_addr_const (file, XEXP (x, 1));
7076 fputc (')', file);
7078 else if (GET_CODE (x) == CONST_DOUBLE
7079 && (GET_MODE (x) == VOIDmode
7080 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7082 if (CONST_DOUBLE_HIGH (x) == 0)
7083 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7084 else if (CONST_DOUBLE_HIGH (x) == -1
7085 && CONST_DOUBLE_LOW (x) < 0)
7086 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7087 else
7088 output_operand_lossage ("long long constant not a valid immediate operand");
7090 else if (GET_CODE (x) == CONST_DOUBLE)
7091 output_operand_lossage ("floating point constant not a valid immediate operand");
7092 else { output_addr_const (file, x); }
7095 /* Target hook for assembling integer objects. The sparc version has
7096 special handling for aligned DI-mode objects. */
7098 static bool
7099 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7101 /* ??? We only output .xword's for symbols and only then in environments
7102 where the assembler can handle them. */
7103 if (aligned_p && size == 8
7104 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7106 if (TARGET_V9)
7108 assemble_integer_with_op ("\t.xword\t", x);
7109 return true;
7111 else
7113 assemble_aligned_integer (4, const0_rtx);
7114 assemble_aligned_integer (4, x);
7115 return true;
7118 return default_assemble_integer (x, size, aligned_p);
7121 /* Return the value of a code used in the .proc pseudo-op that says
7122 what kind of result this function returns. For non-C types, we pick
7123 the closest C type. */
7125 #ifndef SHORT_TYPE_SIZE
7126 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7127 #endif
7129 #ifndef INT_TYPE_SIZE
7130 #define INT_TYPE_SIZE BITS_PER_WORD
7131 #endif
7133 #ifndef LONG_TYPE_SIZE
7134 #define LONG_TYPE_SIZE BITS_PER_WORD
7135 #endif
7137 #ifndef LONG_LONG_TYPE_SIZE
7138 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7139 #endif
7141 #ifndef FLOAT_TYPE_SIZE
7142 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7143 #endif
7145 #ifndef DOUBLE_TYPE_SIZE
7146 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7147 #endif
7149 #ifndef LONG_DOUBLE_TYPE_SIZE
7150 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7151 #endif
7153 unsigned long
7154 sparc_type_code (register tree type)
7156 register unsigned long qualifiers = 0;
7157 register unsigned shift;
7159 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7160 setting more, since some assemblers will give an error for this. Also,
7161 we must be careful to avoid shifts of 32 bits or more to avoid getting
7162 unpredictable results. */
7164 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7166 switch (TREE_CODE (type))
7168 case ERROR_MARK:
7169 return qualifiers;
7171 case ARRAY_TYPE:
7172 qualifiers |= (3 << shift);
7173 break;
7175 case FUNCTION_TYPE:
7176 case METHOD_TYPE:
7177 qualifiers |= (2 << shift);
7178 break;
7180 case POINTER_TYPE:
7181 case REFERENCE_TYPE:
7182 case OFFSET_TYPE:
7183 qualifiers |= (1 << shift);
7184 break;
7186 case RECORD_TYPE:
7187 return (qualifiers | 8);
7189 case UNION_TYPE:
7190 case QUAL_UNION_TYPE:
7191 return (qualifiers | 9);
7193 case ENUMERAL_TYPE:
7194 return (qualifiers | 10);
7196 case VOID_TYPE:
7197 return (qualifiers | 16);
7199 case INTEGER_TYPE:
7200 /* If this is a range type, consider it to be the underlying
7201 type. */
7202 if (TREE_TYPE (type) != 0)
7203 break;
7205 /* Carefully distinguish all the standard types of C,
7206 without messing up if the language is not C. We do this by
7207 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7208 look at both the names and the above fields, but that's redundant.
7209 Any type whose size is between two C types will be considered
7210 to be the wider of the two types. Also, we do not have a
7211 special code to use for "long long", so anything wider than
7212 long is treated the same. Note that we can't distinguish
7213 between "int" and "long" in this code if they are the same
7214 size, but that's fine, since neither can the assembler. */
7216 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7217 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7219 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7220 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7222 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7223 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7225 else
7226 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7228 case REAL_TYPE:
7229 /* If this is a range type, consider it to be the underlying
7230 type. */
7231 if (TREE_TYPE (type) != 0)
7232 break;
7234 /* Carefully distinguish all the standard types of C,
7235 without messing up if the language is not C. */
7237 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7238 return (qualifiers | 6);
7240 else
7241 return (qualifiers | 7);
7243 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7244 /* ??? We need to distinguish between double and float complex types,
7245 but I don't know how yet because I can't reach this code from
7246 existing front-ends. */
7247 return (qualifiers | 7); /* Who knows? */
7249 case VECTOR_TYPE:
7250 case BOOLEAN_TYPE: /* Boolean truth value type. */
7251 case LANG_TYPE: /* ? */
7252 return qualifiers;
7254 default:
7255 gcc_unreachable (); /* Not a type! */
7259 return qualifiers;
7262 /* Nested function support. */
7264 /* Emit RTL insns to initialize the variable parts of a trampoline.
7265 FNADDR is an RTX for the address of the function's pure code.
7266 CXT is an RTX for the static chain value for the function.
7268 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7269 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7270 (to store insns). This is a bit excessive. Perhaps a different
7271 mechanism would be better here.
7273 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7275 void
7276 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7278 /* SPARC 32-bit trampoline:
7280 sethi %hi(fn), %g1
7281 sethi %hi(static), %g2
7282 jmp %g1+%lo(fn)
7283 or %g2, %lo(static), %g2
7285 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7286 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7289 emit_move_insn
7290 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7291 expand_binop (SImode, ior_optab,
7292 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7293 size_int (10), 0, 1),
7294 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7295 NULL_RTX, 1, OPTAB_DIRECT));
7297 emit_move_insn
7298 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7299 expand_binop (SImode, ior_optab,
7300 expand_shift (RSHIFT_EXPR, SImode, cxt,
7301 size_int (10), 0, 1),
7302 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7303 NULL_RTX, 1, OPTAB_DIRECT));
7305 emit_move_insn
7306 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7307 expand_binop (SImode, ior_optab,
7308 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7309 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7310 NULL_RTX, 1, OPTAB_DIRECT));
7312 emit_move_insn
7313 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7314 expand_binop (SImode, ior_optab,
7315 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7316 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7317 NULL_RTX, 1, OPTAB_DIRECT));
7319 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7320 aligned on a 16 byte boundary so one flush clears it all. */
7321 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7322 if (sparc_cpu != PROCESSOR_ULTRASPARC
7323 && sparc_cpu != PROCESSOR_ULTRASPARC3
7324 && sparc_cpu != PROCESSOR_NIAGARA
7325 && sparc_cpu != PROCESSOR_NIAGARA2)
7326 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7327 plus_constant (tramp, 8)))));
7329 /* Call __enable_execute_stack after writing onto the stack to make sure
7330 the stack address is accessible. */
7331 #ifdef ENABLE_EXECUTE_STACK
7332 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7333 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7334 #endif
7338 /* The 64-bit version is simpler because it makes more sense to load the
7339 values as "immediate" data out of the trampoline. It's also easier since
7340 we can read the PC without clobbering a register. */
7342 void
7343 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7345 /* SPARC 64-bit trampoline:
7347 rd %pc, %g1
7348 ldx [%g1+24], %g5
7349 jmp %g5
7350 ldx [%g1+16], %g5
7351 +16 bytes data
7354 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7355 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7356 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7357 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7358 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7359 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7360 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7361 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7362 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7363 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7364 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7366 if (sparc_cpu != PROCESSOR_ULTRASPARC
7367 && sparc_cpu != PROCESSOR_ULTRASPARC3
7368 && sparc_cpu != PROCESSOR_NIAGARA
7369 && sparc_cpu != PROCESSOR_NIAGARA2)
7370 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7372 /* Call __enable_execute_stack after writing onto the stack to make sure
7373 the stack address is accessible. */
7374 #ifdef ENABLE_EXECUTE_STACK
7375 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7376 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7377 #endif
7380 /* Adjust the cost of a scheduling dependency. Return the new cost of
7381 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7383 static int
7384 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7386 enum attr_type insn_type;
7388 if (! recog_memoized (insn))
7389 return 0;
7391 insn_type = get_attr_type (insn);
7393 if (REG_NOTE_KIND (link) == 0)
7395 /* Data dependency; DEP_INSN writes a register that INSN reads some
7396 cycles later. */
7398 /* if a load, then the dependence must be on the memory address;
7399 add an extra "cycle". Note that the cost could be two cycles
7400 if the reg was written late in an instruction group; we ca not tell
7401 here. */
7402 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7403 return cost + 3;
7405 /* Get the delay only if the address of the store is the dependence. */
7406 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7408 rtx pat = PATTERN(insn);
7409 rtx dep_pat = PATTERN (dep_insn);
7411 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7412 return cost; /* This should not happen! */
7414 /* The dependency between the two instructions was on the data that
7415 is being stored. Assume that this implies that the address of the
7416 store is not dependent. */
7417 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7418 return cost;
7420 return cost + 3; /* An approximation. */
7423 /* A shift instruction cannot receive its data from an instruction
7424 in the same cycle; add a one cycle penalty. */
7425 if (insn_type == TYPE_SHIFT)
7426 return cost + 3; /* Split before cascade into shift. */
7428 else
7430 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7431 INSN writes some cycles later. */
7433 /* These are only significant for the fpu unit; writing a fp reg before
7434 the fpu has finished with it stalls the processor. */
7436 /* Reusing an integer register causes no problems. */
7437 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7438 return 0;
7441 return cost;
7444 static int
7445 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7447 enum attr_type insn_type, dep_type;
7448 rtx pat = PATTERN(insn);
7449 rtx dep_pat = PATTERN (dep_insn);
7451 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7452 return cost;
7454 insn_type = get_attr_type (insn);
7455 dep_type = get_attr_type (dep_insn);
7457 switch (REG_NOTE_KIND (link))
7459 case 0:
7460 /* Data dependency; DEP_INSN writes a register that INSN reads some
7461 cycles later. */
7463 switch (insn_type)
7465 case TYPE_STORE:
7466 case TYPE_FPSTORE:
7467 /* Get the delay iff the address of the store is the dependence. */
7468 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7469 return cost;
7471 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7472 return cost;
7473 return cost + 3;
7475 case TYPE_LOAD:
7476 case TYPE_SLOAD:
7477 case TYPE_FPLOAD:
7478 /* If a load, then the dependence must be on the memory address. If
7479 the addresses aren't equal, then it might be a false dependency */
7480 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7482 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7483 || GET_CODE (SET_DEST (dep_pat)) != MEM
7484 || GET_CODE (SET_SRC (pat)) != MEM
7485 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7486 XEXP (SET_SRC (pat), 0)))
7487 return cost + 2;
7489 return cost + 8;
7491 break;
7493 case TYPE_BRANCH:
7494 /* Compare to branch latency is 0. There is no benefit from
7495 separating compare and branch. */
7496 if (dep_type == TYPE_COMPARE)
7497 return 0;
7498 /* Floating point compare to branch latency is less than
7499 compare to conditional move. */
7500 if (dep_type == TYPE_FPCMP)
7501 return cost - 1;
7502 break;
7503 default:
7504 break;
7506 break;
7508 case REG_DEP_ANTI:
7509 /* Anti-dependencies only penalize the fpu unit. */
7510 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7511 return 0;
7512 break;
7514 default:
7515 break;
7518 return cost;
7521 static int
7522 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7524 switch (sparc_cpu)
7526 case PROCESSOR_SUPERSPARC:
7527 cost = supersparc_adjust_cost (insn, link, dep, cost);
7528 break;
7529 case PROCESSOR_HYPERSPARC:
7530 case PROCESSOR_SPARCLITE86X:
7531 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7532 break;
7533 default:
7534 break;
7536 return cost;
7539 static void
7540 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7541 int sched_verbose ATTRIBUTE_UNUSED,
7542 int max_ready ATTRIBUTE_UNUSED)
7546 static int
7547 sparc_use_sched_lookahead (void)
7549 if (sparc_cpu == PROCESSOR_NIAGARA
7550 || sparc_cpu == PROCESSOR_NIAGARA2)
7551 return 0;
7552 if (sparc_cpu == PROCESSOR_ULTRASPARC
7553 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7554 return 4;
7555 if ((1 << sparc_cpu) &
7556 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7557 (1 << PROCESSOR_SPARCLITE86X)))
7558 return 3;
7559 return 0;
7562 static int
7563 sparc_issue_rate (void)
7565 switch (sparc_cpu)
7567 case PROCESSOR_NIAGARA:
7568 case PROCESSOR_NIAGARA2:
7569 default:
7570 return 1;
7571 case PROCESSOR_V9:
7572 /* Assume V9 processors are capable of at least dual-issue. */
7573 return 2;
7574 case PROCESSOR_SUPERSPARC:
7575 return 3;
7576 case PROCESSOR_HYPERSPARC:
7577 case PROCESSOR_SPARCLITE86X:
7578 return 2;
7579 case PROCESSOR_ULTRASPARC:
7580 case PROCESSOR_ULTRASPARC3:
7581 return 4;
7585 static int
7586 set_extends (rtx insn)
7588 register rtx pat = PATTERN (insn);
7590 switch (GET_CODE (SET_SRC (pat)))
7592 /* Load and some shift instructions zero extend. */
7593 case MEM:
7594 case ZERO_EXTEND:
7595 /* sethi clears the high bits */
7596 case HIGH:
7597 /* LO_SUM is used with sethi. sethi cleared the high
7598 bits and the values used with lo_sum are positive */
7599 case LO_SUM:
7600 /* Store flag stores 0 or 1 */
7601 case LT: case LTU:
7602 case GT: case GTU:
7603 case LE: case LEU:
7604 case GE: case GEU:
7605 case EQ:
7606 case NE:
7607 return 1;
7608 case AND:
7610 rtx op0 = XEXP (SET_SRC (pat), 0);
7611 rtx op1 = XEXP (SET_SRC (pat), 1);
7612 if (GET_CODE (op1) == CONST_INT)
7613 return INTVAL (op1) >= 0;
7614 if (GET_CODE (op0) != REG)
7615 return 0;
7616 if (sparc_check_64 (op0, insn) == 1)
7617 return 1;
7618 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7620 case IOR:
7621 case XOR:
7623 rtx op0 = XEXP (SET_SRC (pat), 0);
7624 rtx op1 = XEXP (SET_SRC (pat), 1);
7625 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7626 return 0;
7627 if (GET_CODE (op1) == CONST_INT)
7628 return INTVAL (op1) >= 0;
7629 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7631 case LSHIFTRT:
7632 return GET_MODE (SET_SRC (pat)) == SImode;
7633 /* Positive integers leave the high bits zero. */
7634 case CONST_DOUBLE:
7635 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7636 case CONST_INT:
7637 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7638 case ASHIFTRT:
7639 case SIGN_EXTEND:
7640 return - (GET_MODE (SET_SRC (pat)) == SImode);
7641 case REG:
7642 return sparc_check_64 (SET_SRC (pat), insn);
7643 default:
7644 return 0;
7648 /* We _ought_ to have only one kind per function, but... */
7649 static GTY(()) rtx sparc_addr_diff_list;
7650 static GTY(()) rtx sparc_addr_list;
7652 void
7653 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7655 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7656 if (diff)
7657 sparc_addr_diff_list
7658 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7659 else
7660 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7663 static void
7664 sparc_output_addr_vec (rtx vec)
7666 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7667 int idx, vlen = XVECLEN (body, 0);
7669 #ifdef ASM_OUTPUT_ADDR_VEC_START
7670 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7671 #endif
7673 #ifdef ASM_OUTPUT_CASE_LABEL
7674 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7675 NEXT_INSN (lab));
7676 #else
7677 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7678 #endif
7680 for (idx = 0; idx < vlen; idx++)
7682 ASM_OUTPUT_ADDR_VEC_ELT
7683 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7686 #ifdef ASM_OUTPUT_ADDR_VEC_END
7687 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7688 #endif
7691 static void
7692 sparc_output_addr_diff_vec (rtx vec)
7694 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7695 rtx base = XEXP (XEXP (body, 0), 0);
7696 int idx, vlen = XVECLEN (body, 1);
7698 #ifdef ASM_OUTPUT_ADDR_VEC_START
7699 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7700 #endif
7702 #ifdef ASM_OUTPUT_CASE_LABEL
7703 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7704 NEXT_INSN (lab));
7705 #else
7706 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7707 #endif
7709 for (idx = 0; idx < vlen; idx++)
7711 ASM_OUTPUT_ADDR_DIFF_ELT
7712 (asm_out_file,
7713 body,
7714 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7715 CODE_LABEL_NUMBER (base));
7718 #ifdef ASM_OUTPUT_ADDR_VEC_END
7719 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7720 #endif
7723 static void
7724 sparc_output_deferred_case_vectors (void)
7726 rtx t;
7727 int align;
7729 if (sparc_addr_list == NULL_RTX
7730 && sparc_addr_diff_list == NULL_RTX)
7731 return;
7733 /* Align to cache line in the function's code section. */
7734 switch_to_section (current_function_section ());
7736 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7737 if (align > 0)
7738 ASM_OUTPUT_ALIGN (asm_out_file, align);
7740 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7741 sparc_output_addr_vec (XEXP (t, 0));
7742 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7743 sparc_output_addr_diff_vec (XEXP (t, 0));
7745 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7748 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7749 unknown. Return 1 if the high bits are zero, -1 if the register is
7750 sign extended. */
7752 sparc_check_64 (rtx x, rtx insn)
7754 /* If a register is set only once it is safe to ignore insns this
7755 code does not know how to handle. The loop will either recognize
7756 the single set and return the correct value or fail to recognize
7757 it and return 0. */
7758 int set_once = 0;
7759 rtx y = x;
7761 gcc_assert (GET_CODE (x) == REG);
7763 if (GET_MODE (x) == DImode)
7764 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7766 if (flag_expensive_optimizations
7767 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7768 set_once = 1;
7770 if (insn == 0)
7772 if (set_once)
7773 insn = get_last_insn_anywhere ();
7774 else
7775 return 0;
7778 while ((insn = PREV_INSN (insn)))
7780 switch (GET_CODE (insn))
7782 case JUMP_INSN:
7783 case NOTE:
7784 break;
7785 case CODE_LABEL:
7786 case CALL_INSN:
7787 default:
7788 if (! set_once)
7789 return 0;
7790 break;
7791 case INSN:
7793 rtx pat = PATTERN (insn);
7794 if (GET_CODE (pat) != SET)
7795 return 0;
7796 if (rtx_equal_p (x, SET_DEST (pat)))
7797 return set_extends (insn);
7798 if (y && rtx_equal_p (y, SET_DEST (pat)))
7799 return set_extends (insn);
7800 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7801 return 0;
7805 return 0;
7808 /* Returns assembly code to perform a DImode shift using
7809 a 64-bit global or out register on SPARC-V8+. */
7810 const char *
7811 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7813 static char asm_code[60];
7815 /* The scratch register is only required when the destination
7816 register is not a 64-bit global or out register. */
7817 if (which_alternative != 2)
7818 operands[3] = operands[0];
7820 /* We can only shift by constants <= 63. */
7821 if (GET_CODE (operands[2]) == CONST_INT)
7822 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7824 if (GET_CODE (operands[1]) == CONST_INT)
7826 output_asm_insn ("mov\t%1, %3", operands);
7828 else
7830 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7831 if (sparc_check_64 (operands[1], insn) <= 0)
7832 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7833 output_asm_insn ("or\t%L1, %3, %3", operands);
7836 strcpy(asm_code, opcode);
7838 if (which_alternative != 2)
7839 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7840 else
7841 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7844 /* Output rtl to increment the profiler label LABELNO
7845 for profiling a function entry. */
7847 void
7848 sparc_profile_hook (int labelno)
7850 char buf[32];
7851 rtx lab, fun;
7853 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7854 if (NO_PROFILE_COUNTERS)
7856 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7858 else
7860 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7861 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7862 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7866 #ifdef OBJECT_FORMAT_ELF
7867 static void
7868 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7869 tree decl)
7871 if (flags & SECTION_MERGE)
7873 /* entsize cannot be expressed in this section attributes
7874 encoding style. */
7875 default_elf_asm_named_section (name, flags, decl);
7876 return;
7879 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7881 if (!(flags & SECTION_DEBUG))
7882 fputs (",#alloc", asm_out_file);
7883 if (flags & SECTION_WRITE)
7884 fputs (",#write", asm_out_file);
7885 if (flags & SECTION_TLS)
7886 fputs (",#tls", asm_out_file);
7887 if (flags & SECTION_CODE)
7888 fputs (",#execinstr", asm_out_file);
7890 /* ??? Handle SECTION_BSS. */
7892 fputc ('\n', asm_out_file);
7894 #endif /* OBJECT_FORMAT_ELF */
7896 /* We do not allow indirect calls to be optimized into sibling calls.
7898 We cannot use sibling calls when delayed branches are disabled
7899 because they will likely require the call delay slot to be filled.
7901 Also, on SPARC 32-bit we cannot emit a sibling call when the
7902 current function returns a structure. This is because the "unimp
7903 after call" convention would cause the callee to return to the
7904 wrong place. The generic code already disallows cases where the
7905 function being called returns a structure.
7907 It may seem strange how this last case could occur. Usually there
7908 is code after the call which jumps to epilogue code which dumps the
7909 return value into the struct return area. That ought to invalidate
7910 the sibling call right? Well, in the C++ case we can end up passing
7911 the pointer to the struct return area to a constructor (which returns
7912 void) and then nothing else happens. Such a sibling call would look
7913 valid without the added check here.
7915 VxWorks PIC PLT entries require the global pointer to be initialized
7916 on entry. We therefore can't emit sibling calls to them. */
7917 static bool
7918 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7920 return (decl
7921 && flag_delayed_branch
7922 && (TARGET_ARCH64 || ! cfun->returns_struct)
7923 && !(TARGET_VXWORKS_RTP
7924 && flag_pic
7925 && !targetm.binds_local_p (decl)));
7928 /* libfunc renaming. */
7929 #include "config/gofast.h"
7931 static void
7932 sparc_init_libfuncs (void)
7934 if (TARGET_ARCH32)
7936 /* Use the subroutines that Sun's library provides for integer
7937 multiply and divide. The `*' prevents an underscore from
7938 being prepended by the compiler. .umul is a little faster
7939 than .mul. */
7940 set_optab_libfunc (smul_optab, SImode, "*.umul");
7941 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7942 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7943 set_optab_libfunc (smod_optab, SImode, "*.rem");
7944 set_optab_libfunc (umod_optab, SImode, "*.urem");
7946 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7947 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7948 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7949 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7950 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7951 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7953 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7954 is because with soft-float, the SFmode and DFmode sqrt
7955 instructions will be absent, and the compiler will notice and
7956 try to use the TFmode sqrt instruction for calls to the
7957 builtin function sqrt, but this fails. */
7958 if (TARGET_FPU)
7959 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7961 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7962 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7963 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7964 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7965 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7966 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7968 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7969 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7970 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7971 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7973 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7974 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7975 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7976 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7978 if (DITF_CONVERSION_LIBFUNCS)
7980 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7981 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7982 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7983 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7986 if (SUN_CONVERSION_LIBFUNCS)
7988 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7989 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7990 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7991 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7994 if (TARGET_ARCH64)
7996 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7997 do not exist in the library. Make sure the compiler does not
7998 emit calls to them by accident. (It should always use the
7999 hardware instructions.) */
8000 set_optab_libfunc (smul_optab, SImode, 0);
8001 set_optab_libfunc (sdiv_optab, SImode, 0);
8002 set_optab_libfunc (udiv_optab, SImode, 0);
8003 set_optab_libfunc (smod_optab, SImode, 0);
8004 set_optab_libfunc (umod_optab, SImode, 0);
8006 if (SUN_INTEGER_MULTIPLY_64)
8008 set_optab_libfunc (smul_optab, DImode, "__mul64");
8009 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8010 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8011 set_optab_libfunc (smod_optab, DImode, "__rem64");
8012 set_optab_libfunc (umod_optab, DImode, "__urem64");
8015 if (SUN_CONVERSION_LIBFUNCS)
8017 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8018 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8019 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8020 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8024 gofast_maybe_init_libfuncs ();
8027 #define def_builtin(NAME, CODE, TYPE) \
8028 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8029 NULL_TREE)
8031 /* Implement the TARGET_INIT_BUILTINS target hook.
8032 Create builtin functions for special SPARC instructions. */
8034 static void
8035 sparc_init_builtins (void)
8037 if (TARGET_VIS)
8038 sparc_vis_init_builtins ();
8041 /* Create builtin functions for VIS 1.0 instructions. */
8043 static void
8044 sparc_vis_init_builtins (void)
8046 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8047 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8048 tree v4hi = build_vector_type (intHI_type_node, 4);
8049 tree v2hi = build_vector_type (intHI_type_node, 2);
8050 tree v2si = build_vector_type (intSI_type_node, 2);
8052 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8053 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8054 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8055 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8056 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8057 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8058 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8059 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8060 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8061 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8062 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8063 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8064 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8065 v8qi, v8qi,
8066 intDI_type_node, 0);
8067 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8068 intDI_type_node,
8069 intDI_type_node, 0);
8070 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8071 ptr_type_node,
8072 intSI_type_node, 0);
8073 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8074 ptr_type_node,
8075 intDI_type_node, 0);
8077 /* Packing and expanding vectors. */
8078 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8079 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8080 v8qi_ftype_v2si_v8qi);
8081 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8082 v2hi_ftype_v2si);
8083 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8084 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8085 v8qi_ftype_v4qi_v4qi);
8087 /* Multiplications. */
8088 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8089 v4hi_ftype_v4qi_v4hi);
8090 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8091 v4hi_ftype_v4qi_v2hi);
8092 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8093 v4hi_ftype_v4qi_v2hi);
8094 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8095 v4hi_ftype_v8qi_v4hi);
8096 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8097 v4hi_ftype_v8qi_v4hi);
8098 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8099 v2si_ftype_v4qi_v2hi);
8100 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8101 v2si_ftype_v4qi_v2hi);
8103 /* Data aligning. */
8104 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8105 v4hi_ftype_v4hi_v4hi);
8106 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8107 v8qi_ftype_v8qi_v8qi);
8108 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8109 v2si_ftype_v2si_v2si);
8110 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8111 di_ftype_di_di);
8112 if (TARGET_ARCH64)
8113 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8114 ptr_ftype_ptr_di);
8115 else
8116 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8117 ptr_ftype_ptr_si);
8119 /* Pixel distance. */
8120 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8121 di_ftype_v8qi_v8qi_di);
8124 /* Handle TARGET_EXPAND_BUILTIN target hook.
8125 Expand builtin functions for sparc intrinsics. */
8127 static rtx
8128 sparc_expand_builtin (tree exp, rtx target,
8129 rtx subtarget ATTRIBUTE_UNUSED,
8130 enum machine_mode tmode ATTRIBUTE_UNUSED,
8131 int ignore ATTRIBUTE_UNUSED)
8133 tree arg;
8134 call_expr_arg_iterator iter;
8135 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8136 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8137 rtx pat, op[4];
8138 enum machine_mode mode[4];
8139 int arg_count = 0;
8141 mode[0] = insn_data[icode].operand[0].mode;
8142 if (!target
8143 || GET_MODE (target) != mode[0]
8144 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8145 op[0] = gen_reg_rtx (mode[0]);
8146 else
8147 op[0] = target;
8149 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8151 arg_count++;
8152 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8153 op[arg_count] = expand_normal (arg);
8155 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8156 mode[arg_count]))
8157 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8160 switch (arg_count)
8162 case 1:
8163 pat = GEN_FCN (icode) (op[0], op[1]);
8164 break;
8165 case 2:
8166 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8167 break;
8168 case 3:
8169 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8170 break;
8171 default:
8172 gcc_unreachable ();
8175 if (!pat)
8176 return NULL_RTX;
8178 emit_insn (pat);
8180 return op[0];
8183 static int
8184 sparc_vis_mul8x16 (int e8, int e16)
8186 return (e8 * e16 + 128) / 256;
8189 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8190 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8191 constants. A tree list with the results of the multiplications is returned,
8192 and each element in the list is of INNER_TYPE. */
8194 static tree
8195 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8197 tree n_elts = NULL_TREE;
8198 int scale;
8200 switch (fncode)
8202 case CODE_FOR_fmul8x16_vis:
8203 for (; elts0 && elts1;
8204 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8206 int val
8207 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8208 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8209 n_elts = tree_cons (NULL_TREE,
8210 build_int_cst (inner_type, val),
8211 n_elts);
8213 break;
8215 case CODE_FOR_fmul8x16au_vis:
8216 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8218 for (; elts0; elts0 = TREE_CHAIN (elts0))
8220 int val
8221 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8222 scale);
8223 n_elts = tree_cons (NULL_TREE,
8224 build_int_cst (inner_type, val),
8225 n_elts);
8227 break;
8229 case CODE_FOR_fmul8x16al_vis:
8230 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8232 for (; elts0; elts0 = TREE_CHAIN (elts0))
8234 int val
8235 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8236 scale);
8237 n_elts = tree_cons (NULL_TREE,
8238 build_int_cst (inner_type, val),
8239 n_elts);
8241 break;
8243 default:
8244 gcc_unreachable ();
8247 return nreverse (n_elts);
8250 /* Handle TARGET_FOLD_BUILTIN target hook.
8251 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8252 result of the function call is ignored. NULL_TREE is returned if the
8253 function could not be folded. */
8255 static tree
8256 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8258 tree arg0, arg1, arg2;
8259 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8260 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8262 if (ignore
8263 && icode != CODE_FOR_alignaddrsi_vis
8264 && icode != CODE_FOR_alignaddrdi_vis)
8265 return fold_convert (rtype, integer_zero_node);
8267 switch (icode)
8269 case CODE_FOR_fexpand_vis:
8270 arg0 = TREE_VALUE (arglist);
8271 STRIP_NOPS (arg0);
8273 if (TREE_CODE (arg0) == VECTOR_CST)
8275 tree inner_type = TREE_TYPE (rtype);
8276 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8277 tree n_elts = NULL_TREE;
8279 for (; elts; elts = TREE_CHAIN (elts))
8281 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8282 n_elts = tree_cons (NULL_TREE,
8283 build_int_cst (inner_type, val),
8284 n_elts);
8286 return build_vector (rtype, nreverse (n_elts));
8288 break;
8290 case CODE_FOR_fmul8x16_vis:
8291 case CODE_FOR_fmul8x16au_vis:
8292 case CODE_FOR_fmul8x16al_vis:
8293 arg0 = TREE_VALUE (arglist);
8294 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8295 STRIP_NOPS (arg0);
8296 STRIP_NOPS (arg1);
8298 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8300 tree inner_type = TREE_TYPE (rtype);
8301 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8302 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8303 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8304 elts1);
8306 return build_vector (rtype, n_elts);
8308 break;
8310 case CODE_FOR_fpmerge_vis:
8311 arg0 = TREE_VALUE (arglist);
8312 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8313 STRIP_NOPS (arg0);
8314 STRIP_NOPS (arg1);
8316 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8318 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8319 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8320 tree n_elts = NULL_TREE;
8322 for (; elts0 && elts1;
8323 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8325 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8326 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8329 return build_vector (rtype, nreverse (n_elts));
8331 break;
8333 case CODE_FOR_pdist_vis:
8334 arg0 = TREE_VALUE (arglist);
8335 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8336 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8337 STRIP_NOPS (arg0);
8338 STRIP_NOPS (arg1);
8339 STRIP_NOPS (arg2);
8341 if (TREE_CODE (arg0) == VECTOR_CST
8342 && TREE_CODE (arg1) == VECTOR_CST
8343 && TREE_CODE (arg2) == INTEGER_CST)
8345 int overflow = 0;
8346 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8347 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8348 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8349 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8351 for (; elts0 && elts1;
8352 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8354 unsigned HOST_WIDE_INT
8355 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8356 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8357 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8358 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8360 unsigned HOST_WIDE_INT l;
8361 HOST_WIDE_INT h;
8363 overflow |= neg_double (low1, high1, &l, &h);
8364 overflow |= add_double (low0, high0, l, h, &l, &h);
8365 if (h < 0)
8366 overflow |= neg_double (l, h, &l, &h);
8368 overflow |= add_double (low, high, l, h, &low, &high);
8371 gcc_assert (overflow == 0);
8373 return build_int_cst_wide (rtype, low, high);
8376 default:
8377 break;
8380 return NULL_TREE;
8383 /* ??? This duplicates information provided to the compiler by the
8384 ??? scheduler description. Some day, teach genautomata to output
8385 ??? the latencies and then CSE will just use that. */
8387 static bool
8388 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8389 bool speed ATTRIBUTE_UNUSED)
8391 enum machine_mode mode = GET_MODE (x);
8392 bool float_mode_p = FLOAT_MODE_P (mode);
8394 switch (code)
8396 case CONST_INT:
8397 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8399 *total = 0;
8400 return true;
8402 /* FALLTHRU */
8404 case HIGH:
8405 *total = 2;
8406 return true;
8408 case CONST:
8409 case LABEL_REF:
8410 case SYMBOL_REF:
8411 *total = 4;
8412 return true;
8414 case CONST_DOUBLE:
8415 if (GET_MODE (x) == VOIDmode
8416 && ((CONST_DOUBLE_HIGH (x) == 0
8417 && CONST_DOUBLE_LOW (x) < 0x1000)
8418 || (CONST_DOUBLE_HIGH (x) == -1
8419 && CONST_DOUBLE_LOW (x) < 0
8420 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8421 *total = 0;
8422 else
8423 *total = 8;
8424 return true;
8426 case MEM:
8427 /* If outer-code was a sign or zero extension, a cost
8428 of COSTS_N_INSNS (1) was already added in. This is
8429 why we are subtracting it back out. */
8430 if (outer_code == ZERO_EXTEND)
8432 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8434 else if (outer_code == SIGN_EXTEND)
8436 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8438 else if (float_mode_p)
8440 *total = sparc_costs->float_load;
8442 else
8444 *total = sparc_costs->int_load;
8447 return true;
8449 case PLUS:
8450 case MINUS:
8451 if (float_mode_p)
8452 *total = sparc_costs->float_plusminus;
8453 else
8454 *total = COSTS_N_INSNS (1);
8455 return false;
8457 case MULT:
8458 if (float_mode_p)
8459 *total = sparc_costs->float_mul;
8460 else if (! TARGET_HARD_MUL)
8461 *total = COSTS_N_INSNS (25);
8462 else
8464 int bit_cost;
8466 bit_cost = 0;
8467 if (sparc_costs->int_mul_bit_factor)
8469 int nbits;
8471 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8473 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8474 for (nbits = 0; value != 0; value &= value - 1)
8475 nbits++;
8477 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8478 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8480 rtx x1 = XEXP (x, 1);
8481 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8482 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8484 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8485 nbits++;
8486 for (; value2 != 0; value2 &= value2 - 1)
8487 nbits++;
8489 else
8490 nbits = 7;
8492 if (nbits < 3)
8493 nbits = 3;
8494 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8495 bit_cost = COSTS_N_INSNS (bit_cost);
8498 if (mode == DImode)
8499 *total = sparc_costs->int_mulX + bit_cost;
8500 else
8501 *total = sparc_costs->int_mul + bit_cost;
8503 return false;
8505 case ASHIFT:
8506 case ASHIFTRT:
8507 case LSHIFTRT:
8508 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8509 return false;
8511 case DIV:
8512 case UDIV:
8513 case MOD:
8514 case UMOD:
8515 if (float_mode_p)
8517 if (mode == DFmode)
8518 *total = sparc_costs->float_div_df;
8519 else
8520 *total = sparc_costs->float_div_sf;
8522 else
8524 if (mode == DImode)
8525 *total = sparc_costs->int_divX;
8526 else
8527 *total = sparc_costs->int_div;
8529 return false;
8531 case NEG:
8532 if (! float_mode_p)
8534 *total = COSTS_N_INSNS (1);
8535 return false;
8537 /* FALLTHRU */
8539 case ABS:
8540 case FLOAT:
8541 case UNSIGNED_FLOAT:
8542 case FIX:
8543 case UNSIGNED_FIX:
8544 case FLOAT_EXTEND:
8545 case FLOAT_TRUNCATE:
8546 *total = sparc_costs->float_move;
8547 return false;
8549 case SQRT:
8550 if (mode == DFmode)
8551 *total = sparc_costs->float_sqrt_df;
8552 else
8553 *total = sparc_costs->float_sqrt_sf;
8554 return false;
8556 case COMPARE:
8557 if (float_mode_p)
8558 *total = sparc_costs->float_cmp;
8559 else
8560 *total = COSTS_N_INSNS (1);
8561 return false;
8563 case IF_THEN_ELSE:
8564 if (float_mode_p)
8565 *total = sparc_costs->float_cmove;
8566 else
8567 *total = sparc_costs->int_cmove;
8568 return false;
8570 case IOR:
8571 /* Handle the NAND vector patterns. */
8572 if (sparc_vector_mode_supported_p (GET_MODE (x))
8573 && GET_CODE (XEXP (x, 0)) == NOT
8574 && GET_CODE (XEXP (x, 1)) == NOT)
8576 *total = COSTS_N_INSNS (1);
8577 return true;
8579 else
8580 return false;
8582 default:
8583 return false;
8587 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8588 This is achieved by means of a manual dynamic stack space allocation in
8589 the current frame. We make the assumption that SEQ doesn't contain any
8590 function calls, with the possible exception of calls to the PIC helper. */
8592 static void
8593 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8595 /* We must preserve the lowest 16 words for the register save area. */
8596 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8597 /* We really need only 2 words of fresh stack space. */
8598 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8600 rtx slot
8601 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8602 SPARC_STACK_BIAS + offset));
8604 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8605 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8606 if (reg2)
8607 emit_insn (gen_rtx_SET (VOIDmode,
8608 adjust_address (slot, word_mode, UNITS_PER_WORD),
8609 reg2));
8610 emit_insn (seq);
8611 if (reg2)
8612 emit_insn (gen_rtx_SET (VOIDmode,
8613 reg2,
8614 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8615 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8616 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8619 /* Output the assembler code for a thunk function. THUNK_DECL is the
8620 declaration for the thunk function itself, FUNCTION is the decl for
8621 the target function. DELTA is an immediate constant offset to be
8622 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8623 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8625 static void
8626 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8627 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8628 tree function)
8630 rtx this_rtx, insn, funexp;
8631 unsigned int int_arg_first;
8633 reload_completed = 1;
8634 epilogue_completed = 1;
8636 emit_note (NOTE_INSN_PROLOGUE_END);
8638 if (flag_delayed_branch)
8640 /* We will emit a regular sibcall below, so we need to instruct
8641 output_sibcall that we are in a leaf function. */
8642 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8644 /* This will cause final.c to invoke leaf_renumber_regs so we
8645 must behave as if we were in a not-yet-leafified function. */
8646 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8648 else
8650 /* We will emit the sibcall manually below, so we will need to
8651 manually spill non-leaf registers. */
8652 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8654 /* We really are in a leaf function. */
8655 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8658 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8659 returns a structure, the structure return pointer is there instead. */
8660 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8661 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8662 else
8663 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8665 /* Add DELTA. When possible use a plain add, otherwise load it into
8666 a register first. */
8667 if (delta)
8669 rtx delta_rtx = GEN_INT (delta);
8671 if (! SPARC_SIMM13_P (delta))
8673 rtx scratch = gen_rtx_REG (Pmode, 1);
8674 emit_move_insn (scratch, delta_rtx);
8675 delta_rtx = scratch;
8678 /* THIS_RTX += DELTA. */
8679 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8682 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8683 if (vcall_offset)
8685 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8686 rtx scratch = gen_rtx_REG (Pmode, 1);
8688 gcc_assert (vcall_offset < 0);
8690 /* SCRATCH = *THIS_RTX. */
8691 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8693 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8694 may not have any available scratch register at this point. */
8695 if (SPARC_SIMM13_P (vcall_offset))
8697 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8698 else if (! fixed_regs[5]
8699 /* The below sequence is made up of at least 2 insns,
8700 while the default method may need only one. */
8701 && vcall_offset < -8192)
8703 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8704 emit_move_insn (scratch2, vcall_offset_rtx);
8705 vcall_offset_rtx = scratch2;
8707 else
8709 rtx increment = GEN_INT (-4096);
8711 /* VCALL_OFFSET is a negative number whose typical range can be
8712 estimated as -32768..0 in 32-bit mode. In almost all cases
8713 it is therefore cheaper to emit multiple add insns than
8714 spilling and loading the constant into a register (at least
8715 6 insns). */
8716 while (! SPARC_SIMM13_P (vcall_offset))
8718 emit_insn (gen_add2_insn (scratch, increment));
8719 vcall_offset += 4096;
8721 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8724 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8725 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8726 gen_rtx_PLUS (Pmode,
8727 scratch,
8728 vcall_offset_rtx)));
8730 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8731 emit_insn (gen_add2_insn (this_rtx, scratch));
8734 /* Generate a tail call to the target function. */
8735 if (! TREE_USED (function))
8737 assemble_external (function);
8738 TREE_USED (function) = 1;
8740 funexp = XEXP (DECL_RTL (function), 0);
8742 if (flag_delayed_branch)
8744 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8745 insn = emit_call_insn (gen_sibcall (funexp));
8746 SIBLING_CALL_P (insn) = 1;
8748 else
8750 /* The hoops we have to jump through in order to generate a sibcall
8751 without using delay slots... */
8752 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8754 if (flag_pic)
8756 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8757 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8758 start_sequence ();
8759 /* Delay emitting the PIC helper function because it needs to
8760 change the section and we are emitting assembly code. */
8761 load_pic_register (true); /* clobbers %o7 */
8762 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8763 seq = get_insns ();
8764 end_sequence ();
8765 emit_and_preserve (seq, spill_reg, spill_reg2);
8767 else if (TARGET_ARCH32)
8769 emit_insn (gen_rtx_SET (VOIDmode,
8770 scratch,
8771 gen_rtx_HIGH (SImode, funexp)));
8772 emit_insn (gen_rtx_SET (VOIDmode,
8773 scratch,
8774 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8776 else /* TARGET_ARCH64 */
8778 switch (sparc_cmodel)
8780 case CM_MEDLOW:
8781 case CM_MEDMID:
8782 /* The destination can serve as a temporary. */
8783 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8784 break;
8786 case CM_MEDANY:
8787 case CM_EMBMEDANY:
8788 /* The destination cannot serve as a temporary. */
8789 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8790 start_sequence ();
8791 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8792 seq = get_insns ();
8793 end_sequence ();
8794 emit_and_preserve (seq, spill_reg, 0);
8795 break;
8797 default:
8798 gcc_unreachable ();
8802 emit_jump_insn (gen_indirect_jump (scratch));
8805 emit_barrier ();
8807 /* Run just enough of rest_of_compilation to get the insns emitted.
8808 There's not really enough bulk here to make other passes such as
8809 instruction scheduling worth while. Note that use_thunk calls
8810 assemble_start_function and assemble_end_function. */
8811 insn = get_insns ();
8812 insn_locators_alloc ();
8813 shorten_branches (insn);
8814 final_start_function (insn, file, 1);
8815 final (insn, file, 1);
8816 final_end_function ();
8817 free_after_compilation (cfun);
8819 reload_completed = 0;
8820 epilogue_completed = 0;
8823 /* Return true if sparc_output_mi_thunk would be able to output the
8824 assembler code for the thunk function specified by the arguments
8825 it is passed, and false otherwise. */
8826 static bool
8827 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8828 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8829 HOST_WIDE_INT vcall_offset,
8830 const_tree function ATTRIBUTE_UNUSED)
8832 /* Bound the loop used in the default method above. */
8833 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8836 /* How to allocate a 'struct machine_function'. */
8838 static struct machine_function *
8839 sparc_init_machine_status (void)
8841 return GGC_CNEW (struct machine_function);
8844 /* Locate some local-dynamic symbol still in use by this function
8845 so that we can print its name in local-dynamic base patterns. */
8847 static const char *
8848 get_some_local_dynamic_name (void)
8850 rtx insn;
8852 if (cfun->machine->some_ld_name)
8853 return cfun->machine->some_ld_name;
8855 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8856 if (INSN_P (insn)
8857 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8858 return cfun->machine->some_ld_name;
8860 gcc_unreachable ();
8863 static int
8864 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8866 rtx x = *px;
8868 if (x
8869 && GET_CODE (x) == SYMBOL_REF
8870 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8872 cfun->machine->some_ld_name = XSTR (x, 0);
8873 return 1;
8876 return 0;
8879 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8880 This is called from dwarf2out.c to emit call frame instructions
8881 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8882 static void
8883 sparc_dwarf_handle_frame_unspec (const char *label,
8884 rtx pattern ATTRIBUTE_UNUSED,
8885 int index ATTRIBUTE_UNUSED)
8887 gcc_assert (index == UNSPECV_SAVEW);
8888 dwarf2out_window_save (label);
8891 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8892 We need to emit DTP-relative relocations. */
8894 static void
8895 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8897 switch (size)
8899 case 4:
8900 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8901 break;
8902 case 8:
8903 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8904 break;
8905 default:
8906 gcc_unreachable ();
8908 output_addr_const (file, x);
8909 fputs (")", file);
8912 /* Do whatever processing is required at the end of a file. */
8914 static void
8915 sparc_file_end (void)
8917 /* If we haven't emitted the special PIC helper function, do so now. */
8918 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8919 emit_pic_helper ();
8921 if (NEED_INDICATE_EXEC_STACK)
8922 file_end_indicate_exec_stack ();
8925 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8926 /* Implement TARGET_MANGLE_TYPE. */
8928 static const char *
8929 sparc_mangle_type (const_tree type)
8931 if (!TARGET_64BIT
8932 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8933 && TARGET_LONG_DOUBLE_128)
8934 return "g";
8936 /* For all other types, use normal C++ mangling. */
8937 return NULL;
8939 #endif
8941 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8942 compare and swap on the word containing the byte or half-word. */
8944 void
8945 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8947 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8948 rtx addr = gen_reg_rtx (Pmode);
8949 rtx off = gen_reg_rtx (SImode);
8950 rtx oldv = gen_reg_rtx (SImode);
8951 rtx newv = gen_reg_rtx (SImode);
8952 rtx oldvalue = gen_reg_rtx (SImode);
8953 rtx newvalue = gen_reg_rtx (SImode);
8954 rtx res = gen_reg_rtx (SImode);
8955 rtx resv = gen_reg_rtx (SImode);
8956 rtx memsi, val, mask, end_label, loop_label, cc;
8958 emit_insn (gen_rtx_SET (VOIDmode, addr,
8959 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8961 if (Pmode != SImode)
8962 addr1 = gen_lowpart (SImode, addr1);
8963 emit_insn (gen_rtx_SET (VOIDmode, off,
8964 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8966 memsi = gen_rtx_MEM (SImode, addr);
8967 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8968 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8970 val = force_reg (SImode, memsi);
8972 emit_insn (gen_rtx_SET (VOIDmode, off,
8973 gen_rtx_XOR (SImode, off,
8974 GEN_INT (GET_MODE (mem) == QImode
8975 ? 3 : 2))));
8977 emit_insn (gen_rtx_SET (VOIDmode, off,
8978 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8980 if (GET_MODE (mem) == QImode)
8981 mask = force_reg (SImode, GEN_INT (0xff));
8982 else
8983 mask = force_reg (SImode, GEN_INT (0xffff));
8985 emit_insn (gen_rtx_SET (VOIDmode, mask,
8986 gen_rtx_ASHIFT (SImode, mask, off)));
8988 emit_insn (gen_rtx_SET (VOIDmode, val,
8989 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8990 val)));
8992 oldval = gen_lowpart (SImode, oldval);
8993 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8994 gen_rtx_ASHIFT (SImode, oldval, off)));
8996 newval = gen_lowpart_common (SImode, newval);
8997 emit_insn (gen_rtx_SET (VOIDmode, newv,
8998 gen_rtx_ASHIFT (SImode, newval, off)));
9000 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9001 gen_rtx_AND (SImode, oldv, mask)));
9003 emit_insn (gen_rtx_SET (VOIDmode, newv,
9004 gen_rtx_AND (SImode, newv, mask)));
9006 end_label = gen_label_rtx ();
9007 loop_label = gen_label_rtx ();
9008 emit_label (loop_label);
9010 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9011 gen_rtx_IOR (SImode, oldv, val)));
9013 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9014 gen_rtx_IOR (SImode, newv, val)));
9016 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9018 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9020 emit_insn (gen_rtx_SET (VOIDmode, resv,
9021 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9022 res)));
9024 sparc_compare_op0 = resv;
9025 sparc_compare_op1 = val;
9026 cc = gen_compare_reg (NE);
9028 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9030 sparc_compare_op0 = cc;
9031 sparc_compare_op1 = const0_rtx;
9032 emit_jump_insn (gen_bne (loop_label));
9034 emit_label (end_label);
9036 emit_insn (gen_rtx_SET (VOIDmode, res,
9037 gen_rtx_AND (SImode, res, mask)));
9039 emit_insn (gen_rtx_SET (VOIDmode, res,
9040 gen_rtx_LSHIFTRT (SImode, res, off)));
9042 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9045 #include "gt-sparc.h"