merge with trunk @ 139506
[official-gcc.git] / gcc / config / sparc / sparc.c
blobb74e81e9558386c054540d768f0c9fd1c14342d3
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "tm_p.h"
47 #include "debug.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
51 #include "gimple.h"
52 #include "langhooks.h"
53 #include "params.h"
54 #include "df.h"
56 /* Processor costs */
57 static const
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
81 static const
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
105 static const
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
129 static const
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
153 static const
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
177 static const
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
201 static const
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
225 static const
226 struct processor_costs niagara2_costs = {
227 COSTS_N_INSNS (3), /* int load */
228 COSTS_N_INSNS (3), /* int signed load */
229 COSTS_N_INSNS (3), /* int zeroed load */
230 COSTS_N_INSNS (3), /* float load */
231 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
232 COSTS_N_INSNS (6), /* fadd, fsub */
233 COSTS_N_INSNS (6), /* fcmp */
234 COSTS_N_INSNS (6), /* fmov, fmovr */
235 COSTS_N_INSNS (6), /* fmul */
236 COSTS_N_INSNS (19), /* fdivs */
237 COSTS_N_INSNS (33), /* fdivd */
238 COSTS_N_INSNS (19), /* fsqrts */
239 COSTS_N_INSNS (33), /* fsqrtd */
240 COSTS_N_INSNS (5), /* imul */
241 COSTS_N_INSNS (5), /* imulX */
242 0, /* imul bit factor */
243 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
244 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
245 COSTS_N_INSNS (1), /* movcc/movr */
246 0, /* shift penalty */
249 const struct processor_costs *sparc_costs = &cypress_costs;
251 #ifdef HAVE_AS_RELAX_OPTION
252 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
253 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
254 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
255 somebody does not branch between the sethi and jmp. */
256 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
257 #else
258 #define LEAF_SIBCALL_SLOT_RESERVED_P \
259 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
260 #endif
262 /* Global variables for machine-dependent things. */
264 /* Size of frame. Need to know this to emit return insns from leaf procedures.
265 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
266 reload pass. This is important as the value is later used for scheduling
267 (to see what can go in a delay slot).
268 APPARENT_FSIZE is the size of the stack less the register save area and less
269 the outgoing argument area. It is used when saving call preserved regs. */
270 static HOST_WIDE_INT apparent_fsize;
271 static HOST_WIDE_INT actual_fsize;
273 /* Number of live general or floating point registers needed to be
274 saved (as 4-byte quantities). */
275 static int num_gfregs;
277 /* The alias set for prologue/epilogue register save/restore. */
278 static GTY(()) alias_set_type sparc_sr_alias_set;
280 /* The alias set for the structure return value. */
281 static GTY(()) alias_set_type struct_value_alias_set;
283 /* Save the operands last given to a compare for use when we
284 generate a scc or bcc insn. */
285 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
287 /* Vector to say how input registers are mapped to output registers.
288 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
289 eliminate it. You must use -fomit-frame-pointer to get that. */
290 char leaf_reg_remap[] =
291 { 0, 1, 2, 3, 4, 5, 6, 7,
292 -1, -1, -1, -1, -1, -1, 14, -1,
293 -1, -1, -1, -1, -1, -1, -1, -1,
294 8, 9, 10, 11, 12, 13, -1, 15,
296 32, 33, 34, 35, 36, 37, 38, 39,
297 40, 41, 42, 43, 44, 45, 46, 47,
298 48, 49, 50, 51, 52, 53, 54, 55,
299 56, 57, 58, 59, 60, 61, 62, 63,
300 64, 65, 66, 67, 68, 69, 70, 71,
301 72, 73, 74, 75, 76, 77, 78, 79,
302 80, 81, 82, 83, 84, 85, 86, 87,
303 88, 89, 90, 91, 92, 93, 94, 95,
304 96, 97, 98, 99, 100};
306 /* Vector, indexed by hard register number, which contains 1
307 for a register that is allowable in a candidate for leaf
308 function treatment. */
309 char sparc_leaf_regs[] =
310 { 1, 1, 1, 1, 1, 1, 1, 1,
311 0, 0, 0, 0, 0, 0, 1, 0,
312 0, 0, 0, 0, 0, 0, 0, 0,
313 1, 1, 1, 1, 1, 1, 0, 1,
314 1, 1, 1, 1, 1, 1, 1, 1,
315 1, 1, 1, 1, 1, 1, 1, 1,
316 1, 1, 1, 1, 1, 1, 1, 1,
317 1, 1, 1, 1, 1, 1, 1, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
320 1, 1, 1, 1, 1, 1, 1, 1,
321 1, 1, 1, 1, 1, 1, 1, 1,
322 1, 1, 1, 1, 1};
324 struct machine_function GTY(())
326 /* Some local-dynamic TLS symbol name. */
327 const char *some_ld_name;
329 /* True if the current function is leaf and uses only leaf regs,
330 so that the SPARC leaf function optimization can be applied.
331 Private version of current_function_uses_only_leaf_regs, see
332 sparc_expand_prologue for the rationale. */
333 int leaf_function_p;
335 /* True if the data calculated by sparc_expand_prologue are valid. */
336 bool prologue_data_valid_p;
339 #define sparc_leaf_function_p cfun->machine->leaf_function_p
340 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
342 /* Register we pretend to think the frame pointer is allocated to.
343 Normally, this is %fp, but if we are in a leaf procedure, this
344 is %sp+"something". We record "something" separately as it may
345 be too big for reg+constant addressing. */
346 static rtx frame_base_reg;
347 static HOST_WIDE_INT frame_base_offset;
349 /* 1 if the next opcode is to be specially indented. */
350 int sparc_indent_opcode = 0;
352 static bool sparc_handle_option (size_t, const char *, int);
353 static void sparc_init_modes (void);
354 static void scan_record_type (tree, int *, int *, int *);
355 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
356 tree, int, int, int *, int *);
358 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
359 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
361 static void sparc_output_addr_vec (rtx);
362 static void sparc_output_addr_diff_vec (rtx);
363 static void sparc_output_deferred_case_vectors (void);
364 static rtx sparc_builtin_saveregs (void);
365 static int epilogue_renumber (rtx *, int);
366 static bool sparc_assemble_integer (rtx, unsigned int, int);
367 static int set_extends (rtx);
368 static void emit_pic_helper (void);
369 static void load_pic_register (bool);
370 static int save_or_restore_regs (int, int, rtx, int, int);
371 static void emit_save_or_restore_regs (int);
372 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
373 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
374 #ifdef OBJECT_FORMAT_ELF
375 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
376 #endif
378 static int sparc_adjust_cost (rtx, rtx, rtx, int);
379 static int sparc_issue_rate (void);
380 static void sparc_sched_init (FILE *, int, int);
381 static int sparc_use_sched_lookahead (void);
383 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
384 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
385 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
386 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
387 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
389 static bool sparc_function_ok_for_sibcall (tree, tree);
390 static void sparc_init_libfuncs (void);
391 static void sparc_init_builtins (void);
392 static void sparc_vis_init_builtins (void);
393 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
394 static tree sparc_fold_builtin (tree, tree, bool);
395 static int sparc_vis_mul8x16 (int, int);
396 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
397 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
398 HOST_WIDE_INT, tree);
399 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
400 HOST_WIDE_INT, const_tree);
401 static struct machine_function * sparc_init_machine_status (void);
402 static bool sparc_cannot_force_const_mem (rtx);
403 static rtx sparc_tls_get_addr (void);
404 static rtx sparc_tls_got (void);
405 static const char *get_some_local_dynamic_name (void);
406 static int get_some_local_dynamic_name_1 (rtx *, void *);
407 static bool sparc_rtx_costs (rtx, int, int, int *);
408 static bool sparc_promote_prototypes (const_tree);
409 static rtx sparc_struct_value_rtx (tree, int);
410 static bool sparc_return_in_memory (const_tree, const_tree);
411 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
412 static void sparc_va_start (tree, rtx);
413 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
414 static bool sparc_vector_mode_supported_p (enum machine_mode);
415 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
416 enum machine_mode, const_tree, bool);
417 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
418 enum machine_mode, tree, bool);
419 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
420 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
421 static void sparc_file_end (void);
422 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
423 static const char *sparc_mangle_type (const_tree);
424 #endif
425 #ifdef SUBTARGET_ATTRIBUTE_TABLE
426 const struct attribute_spec sparc_attribute_table[];
427 #endif
429 /* Option handling. */
431 /* Parsed value. */
432 enum cmodel sparc_cmodel;
434 char sparc_hard_reg_printed[8];
436 struct sparc_cpu_select sparc_select[] =
438 /* switch name, tune arch */
439 { (char *)0, "default", 1, 1 },
440 { (char *)0, "-mcpu=", 1, 1 },
441 { (char *)0, "-mtune=", 1, 0 },
442 { 0, 0, 0, 0 }
445 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
446 enum processor_type sparc_cpu;
448 /* Whether\fan FPU option was specified. */
449 static bool fpu_option_set = false;
451 /* Initialize the GCC target structure. */
453 /* The sparc default is to use .half rather than .short for aligned
454 HI objects. Use .word instead of .long on non-ELF systems. */
455 #undef TARGET_ASM_ALIGNED_HI_OP
456 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
457 #ifndef OBJECT_FORMAT_ELF
458 #undef TARGET_ASM_ALIGNED_SI_OP
459 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
460 #endif
462 #undef TARGET_ASM_UNALIGNED_HI_OP
463 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
464 #undef TARGET_ASM_UNALIGNED_SI_OP
465 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
466 #undef TARGET_ASM_UNALIGNED_DI_OP
467 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
469 /* The target hook has to handle DI-mode values. */
470 #undef TARGET_ASM_INTEGER
471 #define TARGET_ASM_INTEGER sparc_assemble_integer
473 #undef TARGET_ASM_FUNCTION_PROLOGUE
474 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
475 #undef TARGET_ASM_FUNCTION_EPILOGUE
476 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
478 #undef TARGET_SCHED_ADJUST_COST
479 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
480 #undef TARGET_SCHED_ISSUE_RATE
481 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
482 #undef TARGET_SCHED_INIT
483 #define TARGET_SCHED_INIT sparc_sched_init
484 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
485 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
487 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
488 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
490 #undef TARGET_INIT_LIBFUNCS
491 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
492 #undef TARGET_INIT_BUILTINS
493 #define TARGET_INIT_BUILTINS sparc_init_builtins
495 #undef TARGET_EXPAND_BUILTIN
496 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
497 #undef TARGET_FOLD_BUILTIN
498 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
500 #if TARGET_TLS
501 #undef TARGET_HAVE_TLS
502 #define TARGET_HAVE_TLS true
503 #endif
505 #undef TARGET_CANNOT_FORCE_CONST_MEM
506 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
508 #undef TARGET_ASM_OUTPUT_MI_THUNK
509 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
510 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
511 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
513 #undef TARGET_RTX_COSTS
514 #define TARGET_RTX_COSTS sparc_rtx_costs
515 #undef TARGET_ADDRESS_COST
516 #define TARGET_ADDRESS_COST hook_int_rtx_0
518 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
519 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
520 test for this value. */
521 #undef TARGET_PROMOTE_FUNCTION_ARGS
522 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
524 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
525 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
526 test for this value. */
527 #undef TARGET_PROMOTE_FUNCTION_RETURN
528 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
530 #undef TARGET_PROMOTE_PROTOTYPES
531 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
533 #undef TARGET_STRUCT_VALUE_RTX
534 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
535 #undef TARGET_RETURN_IN_MEMORY
536 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
537 #undef TARGET_MUST_PASS_IN_STACK
538 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
539 #undef TARGET_PASS_BY_REFERENCE
540 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
541 #undef TARGET_ARG_PARTIAL_BYTES
542 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
544 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
545 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
546 #undef TARGET_STRICT_ARGUMENT_NAMING
547 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
549 #undef TARGET_EXPAND_BUILTIN_VA_START
550 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
551 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
552 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
554 #undef TARGET_VECTOR_MODE_SUPPORTED_P
555 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
557 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
558 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
560 #ifdef SUBTARGET_INSERT_ATTRIBUTES
561 #undef TARGET_INSERT_ATTRIBUTES
562 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
563 #endif
565 #ifdef SUBTARGET_ATTRIBUTE_TABLE
566 #undef TARGET_ATTRIBUTE_TABLE
567 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
568 #endif
570 #undef TARGET_RELAXED_ORDERING
571 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
573 #undef TARGET_DEFAULT_TARGET_FLAGS
574 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
575 #undef TARGET_HANDLE_OPTION
576 #define TARGET_HANDLE_OPTION sparc_handle_option
578 #if TARGET_GNU_TLS
579 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
580 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
581 #endif
583 #undef TARGET_ASM_FILE_END
584 #define TARGET_ASM_FILE_END sparc_file_end
586 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
587 #undef TARGET_MANGLE_TYPE
588 #define TARGET_MANGLE_TYPE sparc_mangle_type
589 #endif
591 struct gcc_target targetm = TARGET_INITIALIZER;
593 /* Implement TARGET_HANDLE_OPTION. */
595 static bool
596 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
598 switch (code)
600 case OPT_mfpu:
601 case OPT_mhard_float:
602 case OPT_msoft_float:
603 fpu_option_set = true;
604 break;
606 case OPT_mcpu_:
607 sparc_select[1].string = arg;
608 break;
610 case OPT_mtune_:
611 sparc_select[2].string = arg;
612 break;
615 return true;
618 /* Validate and override various options, and do some machine dependent
619 initialization. */
621 void
622 sparc_override_options (void)
624 static struct code_model {
625 const char *const name;
626 const int value;
627 } const cmodels[] = {
628 { "32", CM_32 },
629 { "medlow", CM_MEDLOW },
630 { "medmid", CM_MEDMID },
631 { "medany", CM_MEDANY },
632 { "embmedany", CM_EMBMEDANY },
633 { 0, 0 }
635 const struct code_model *cmodel;
636 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
637 static struct cpu_default {
638 const int cpu;
639 const char *const name;
640 } const cpu_default[] = {
641 /* There must be one entry here for each TARGET_CPU value. */
642 { TARGET_CPU_sparc, "cypress" },
643 { TARGET_CPU_sparclet, "tsc701" },
644 { TARGET_CPU_sparclite, "f930" },
645 { TARGET_CPU_v8, "v8" },
646 { TARGET_CPU_hypersparc, "hypersparc" },
647 { TARGET_CPU_sparclite86x, "sparclite86x" },
648 { TARGET_CPU_supersparc, "supersparc" },
649 { TARGET_CPU_v9, "v9" },
650 { TARGET_CPU_ultrasparc, "ultrasparc" },
651 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
652 { TARGET_CPU_niagara, "niagara" },
653 { TARGET_CPU_niagara2, "niagara2" },
654 { 0, 0 }
656 const struct cpu_default *def;
657 /* Table of values for -m{cpu,tune}=. */
658 static struct cpu_table {
659 const char *const name;
660 const enum processor_type processor;
661 const int disable;
662 const int enable;
663 } const cpu_table[] = {
664 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
665 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
666 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
667 /* TI TMS390Z55 supersparc */
668 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
669 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
670 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
671 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
672 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
673 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
674 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
675 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
676 MASK_SPARCLITE },
677 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
678 /* TEMIC sparclet */
679 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
680 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
681 /* TI ultrasparc I, II, IIi */
682 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
683 /* Although insns using %y are deprecated, it is a clear win on current
684 ultrasparcs. */
685 |MASK_DEPRECATED_V8_INSNS},
686 /* TI ultrasparc III */
687 /* ??? Check if %y issue still holds true in ultra3. */
688 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
689 /* UltraSPARC T1 */
690 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
691 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
692 { 0, 0, 0, 0 }
694 const struct cpu_table *cpu;
695 const struct sparc_cpu_select *sel;
696 int fpu;
698 #ifndef SPARC_BI_ARCH
699 /* Check for unsupported architecture size. */
700 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
701 error ("%s is not supported by this configuration",
702 DEFAULT_ARCH32_P ? "-m64" : "-m32");
703 #endif
705 /* We force all 64bit archs to use 128 bit long double */
706 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
708 error ("-mlong-double-64 not allowed with -m64");
709 target_flags |= MASK_LONG_DOUBLE_128;
712 /* Code model selection. */
713 sparc_cmodel = SPARC_DEFAULT_CMODEL;
715 #ifdef SPARC_BI_ARCH
716 if (TARGET_ARCH32)
717 sparc_cmodel = CM_32;
718 #endif
720 if (sparc_cmodel_string != NULL)
722 if (TARGET_ARCH64)
724 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
725 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
726 break;
727 if (cmodel->name == NULL)
728 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
729 else
730 sparc_cmodel = cmodel->value;
732 else
733 error ("-mcmodel= is not supported on 32 bit systems");
736 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
738 /* Set the default CPU. */
739 for (def = &cpu_default[0]; def->name; ++def)
740 if (def->cpu == TARGET_CPU_DEFAULT)
741 break;
742 gcc_assert (def->name);
743 sparc_select[0].string = def->name;
745 for (sel = &sparc_select[0]; sel->name; ++sel)
747 if (sel->string)
749 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
750 if (! strcmp (sel->string, cpu->name))
752 if (sel->set_tune_p)
753 sparc_cpu = cpu->processor;
755 if (sel->set_arch_p)
757 target_flags &= ~cpu->disable;
758 target_flags |= cpu->enable;
760 break;
763 if (! cpu->name)
764 error ("bad value (%s) for %s switch", sel->string, sel->name);
768 /* If -mfpu or -mno-fpu was explicitly used, don't override with
769 the processor default. */
770 if (fpu_option_set)
771 target_flags = (target_flags & ~MASK_FPU) | fpu;
773 /* Don't allow -mvis if FPU is disabled. */
774 if (! TARGET_FPU)
775 target_flags &= ~MASK_VIS;
777 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
778 are available.
779 -m64 also implies v9. */
780 if (TARGET_VIS || TARGET_ARCH64)
782 target_flags |= MASK_V9;
783 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
786 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
787 if (TARGET_V9 && TARGET_ARCH32)
788 target_flags |= MASK_DEPRECATED_V8_INSNS;
790 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
791 if (! TARGET_V9 || TARGET_ARCH64)
792 target_flags &= ~MASK_V8PLUS;
794 /* Don't use stack biasing in 32 bit mode. */
795 if (TARGET_ARCH32)
796 target_flags &= ~MASK_STACK_BIAS;
798 /* Supply a default value for align_functions. */
799 if (align_functions == 0
800 && (sparc_cpu == PROCESSOR_ULTRASPARC
801 || sparc_cpu == PROCESSOR_ULTRASPARC3
802 || sparc_cpu == PROCESSOR_NIAGARA
803 || sparc_cpu == PROCESSOR_NIAGARA2))
804 align_functions = 32;
806 /* Validate PCC_STRUCT_RETURN. */
807 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
808 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
810 /* Only use .uaxword when compiling for a 64-bit target. */
811 if (!TARGET_ARCH64)
812 targetm.asm_out.unaligned_op.di = NULL;
814 /* Do various machine dependent initializations. */
815 sparc_init_modes ();
817 /* Acquire unique alias sets for our private stuff. */
818 sparc_sr_alias_set = new_alias_set ();
819 struct_value_alias_set = new_alias_set ();
821 /* Set up function hooks. */
822 init_machine_status = sparc_init_machine_status;
824 switch (sparc_cpu)
826 case PROCESSOR_V7:
827 case PROCESSOR_CYPRESS:
828 sparc_costs = &cypress_costs;
829 break;
830 case PROCESSOR_V8:
831 case PROCESSOR_SPARCLITE:
832 case PROCESSOR_SUPERSPARC:
833 sparc_costs = &supersparc_costs;
834 break;
835 case PROCESSOR_F930:
836 case PROCESSOR_F934:
837 case PROCESSOR_HYPERSPARC:
838 case PROCESSOR_SPARCLITE86X:
839 sparc_costs = &hypersparc_costs;
840 break;
841 case PROCESSOR_SPARCLET:
842 case PROCESSOR_TSC701:
843 sparc_costs = &sparclet_costs;
844 break;
845 case PROCESSOR_V9:
846 case PROCESSOR_ULTRASPARC:
847 sparc_costs = &ultrasparc_costs;
848 break;
849 case PROCESSOR_ULTRASPARC3:
850 sparc_costs = &ultrasparc3_costs;
851 break;
852 case PROCESSOR_NIAGARA:
853 sparc_costs = &niagara_costs;
854 break;
855 case PROCESSOR_NIAGARA2:
856 sparc_costs = &niagara2_costs;
857 break;
860 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
861 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
862 target_flags |= MASK_LONG_DOUBLE_128;
863 #endif
865 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
866 set_param_value ("simultaneous-prefetches",
867 ((sparc_cpu == PROCESSOR_ULTRASPARC
868 || sparc_cpu == PROCESSOR_NIAGARA
869 || sparc_cpu == PROCESSOR_NIAGARA2)
871 : (sparc_cpu == PROCESSOR_ULTRASPARC3
872 ? 8 : 3)));
873 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
874 set_param_value ("l1-cache-line-size",
875 ((sparc_cpu == PROCESSOR_ULTRASPARC
876 || sparc_cpu == PROCESSOR_ULTRASPARC3
877 || sparc_cpu == PROCESSOR_NIAGARA
878 || sparc_cpu == PROCESSOR_NIAGARA2)
879 ? 64 : 32));
882 #ifdef SUBTARGET_ATTRIBUTE_TABLE
883 /* Table of valid machine attributes. */
884 const struct attribute_spec sparc_attribute_table[] =
886 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
887 SUBTARGET_ATTRIBUTE_TABLE,
888 { NULL, 0, 0, false, false, false, NULL }
890 #endif
892 /* Miscellaneous utilities. */
894 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
895 or branch on register contents instructions. */
898 v9_regcmp_p (enum rtx_code code)
900 return (code == EQ || code == NE || code == GE || code == LT
901 || code == LE || code == GT);
904 /* Nonzero if OP is a floating point constant which can
905 be loaded into an integer register using a single
906 sethi instruction. */
909 fp_sethi_p (rtx op)
911 if (GET_CODE (op) == CONST_DOUBLE)
913 REAL_VALUE_TYPE r;
914 long i;
916 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
917 REAL_VALUE_TO_TARGET_SINGLE (r, i);
918 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
921 return 0;
924 /* Nonzero if OP is a floating point constant which can
925 be loaded into an integer register using a single
926 mov instruction. */
929 fp_mov_p (rtx op)
931 if (GET_CODE (op) == CONST_DOUBLE)
933 REAL_VALUE_TYPE r;
934 long i;
936 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
937 REAL_VALUE_TO_TARGET_SINGLE (r, i);
938 return SPARC_SIMM13_P (i);
941 return 0;
944 /* Nonzero if OP is a floating point constant which can
945 be loaded into an integer register using a high/losum
946 instruction sequence. */
949 fp_high_losum_p (rtx op)
951 /* The constraints calling this should only be in
952 SFmode move insns, so any constant which cannot
953 be moved using a single insn will do. */
954 if (GET_CODE (op) == CONST_DOUBLE)
956 REAL_VALUE_TYPE r;
957 long i;
959 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
960 REAL_VALUE_TO_TARGET_SINGLE (r, i);
961 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
964 return 0;
967 /* Expand a move instruction. Return true if all work is done. */
969 bool
970 sparc_expand_move (enum machine_mode mode, rtx *operands)
972 /* Handle sets of MEM first. */
973 if (GET_CODE (operands[0]) == MEM)
975 /* 0 is a register (or a pair of registers) on SPARC. */
976 if (register_or_zero_operand (operands[1], mode))
977 return false;
979 if (!reload_in_progress)
981 operands[0] = validize_mem (operands[0]);
982 operands[1] = force_reg (mode, operands[1]);
986 /* Fixup TLS cases. */
987 if (TARGET_HAVE_TLS
988 && CONSTANT_P (operands[1])
989 && GET_CODE (operands[1]) != HIGH
990 && sparc_tls_referenced_p (operands [1]))
992 rtx sym = operands[1];
993 rtx addend = NULL;
995 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
997 addend = XEXP (XEXP (sym, 0), 1);
998 sym = XEXP (XEXP (sym, 0), 0);
1001 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
1003 sym = legitimize_tls_address (sym);
1004 if (addend)
1006 sym = gen_rtx_PLUS (mode, sym, addend);
1007 sym = force_operand (sym, operands[0]);
1009 operands[1] = sym;
1012 /* Fixup PIC cases. */
1013 if (flag_pic && CONSTANT_P (operands[1]))
1015 if (pic_address_needs_scratch (operands[1]))
1016 operands[1] = legitimize_pic_address (operands[1], mode, 0);
1018 /* VxWorks does not impose a fixed gap between segments; the run-time
1019 gap can be different from the object-file gap. We therefore can't
1020 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1021 are absolutely sure that X is in the same segment as the GOT.
1022 Unfortunately, the flexibility of linker scripts means that we
1023 can't be sure of that in general, so assume that _G_O_T_-relative
1024 accesses are never valid on VxWorks. */
1025 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1027 if (mode == SImode)
1029 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1030 return true;
1033 if (mode == DImode)
1035 gcc_assert (TARGET_ARCH64);
1036 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1037 return true;
1041 if (symbolic_operand (operands[1], mode))
1043 operands[1] = legitimize_pic_address (operands[1],
1044 mode,
1045 (reload_in_progress ?
1046 operands[0] :
1047 NULL_RTX));
1048 return false;
1052 /* If we are trying to toss an integer constant into FP registers,
1053 or loading a FP or vector constant, force it into memory. */
1054 if (CONSTANT_P (operands[1])
1055 && REG_P (operands[0])
1056 && (SPARC_FP_REG_P (REGNO (operands[0]))
1057 || SCALAR_FLOAT_MODE_P (mode)
1058 || VECTOR_MODE_P (mode)))
1060 /* emit_group_store will send such bogosity to us when it is
1061 not storing directly into memory. So fix this up to avoid
1062 crashes in output_constant_pool. */
1063 if (operands [1] == const0_rtx)
1064 operands[1] = CONST0_RTX (mode);
1066 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1067 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1068 && const_zero_operand (operands[1], mode))
1069 return false;
1071 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1072 /* We are able to build any SF constant in integer registers
1073 with at most 2 instructions. */
1074 && (mode == SFmode
1075 /* And any DF constant in integer registers. */
1076 || (mode == DFmode
1077 && (reload_completed || reload_in_progress))))
1078 return false;
1080 operands[1] = force_const_mem (mode, operands[1]);
1081 if (!reload_in_progress)
1082 operands[1] = validize_mem (operands[1]);
1083 return false;
1086 /* Accept non-constants and valid constants unmodified. */
1087 if (!CONSTANT_P (operands[1])
1088 || GET_CODE (operands[1]) == HIGH
1089 || input_operand (operands[1], mode))
1090 return false;
1092 switch (mode)
1094 case QImode:
1095 /* All QImode constants require only one insn, so proceed. */
1096 break;
1098 case HImode:
1099 case SImode:
1100 sparc_emit_set_const32 (operands[0], operands[1]);
1101 return true;
1103 case DImode:
1104 /* input_operand should have filtered out 32-bit mode. */
1105 sparc_emit_set_const64 (operands[0], operands[1]);
1106 return true;
1108 default:
1109 gcc_unreachable ();
1112 return false;
1115 /* Load OP1, a 32-bit constant, into OP0, a register.
1116 We know it can't be done in one insn when we get
1117 here, the move expander guarantees this. */
1119 void
1120 sparc_emit_set_const32 (rtx op0, rtx op1)
1122 enum machine_mode mode = GET_MODE (op0);
1123 rtx temp;
1125 if (reload_in_progress || reload_completed)
1126 temp = op0;
1127 else
1128 temp = gen_reg_rtx (mode);
1130 if (GET_CODE (op1) == CONST_INT)
1132 gcc_assert (!small_int_operand (op1, mode)
1133 && !const_high_operand (op1, mode));
1135 /* Emit them as real moves instead of a HIGH/LO_SUM,
1136 this way CSE can see everything and reuse intermediate
1137 values if it wants. */
1138 emit_insn (gen_rtx_SET (VOIDmode, temp,
1139 GEN_INT (INTVAL (op1)
1140 & ~(HOST_WIDE_INT)0x3ff)));
1142 emit_insn (gen_rtx_SET (VOIDmode,
1143 op0,
1144 gen_rtx_IOR (mode, temp,
1145 GEN_INT (INTVAL (op1) & 0x3ff))));
1147 else
1149 /* A symbol, emit in the traditional way. */
1150 emit_insn (gen_rtx_SET (VOIDmode, temp,
1151 gen_rtx_HIGH (mode, op1)));
1152 emit_insn (gen_rtx_SET (VOIDmode,
1153 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1157 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1158 If TEMP is nonzero, we are forbidden to use any other scratch
1159 registers. Otherwise, we are allowed to generate them as needed.
1161 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1162 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1164 void
1165 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1167 rtx temp1, temp2, temp3, temp4, temp5;
1168 rtx ti_temp = 0;
1170 if (temp && GET_MODE (temp) == TImode)
1172 ti_temp = temp;
1173 temp = gen_rtx_REG (DImode, REGNO (temp));
1176 /* SPARC-V9 code-model support. */
1177 switch (sparc_cmodel)
1179 case CM_MEDLOW:
1180 /* The range spanned by all instructions in the object is less
1181 than 2^31 bytes (2GB) and the distance from any instruction
1182 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1183 than 2^31 bytes (2GB).
1185 The executable must be in the low 4TB of the virtual address
1186 space.
1188 sethi %hi(symbol), %temp1
1189 or %temp1, %lo(symbol), %reg */
1190 if (temp)
1191 temp1 = temp; /* op0 is allowed. */
1192 else
1193 temp1 = gen_reg_rtx (DImode);
1195 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1196 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1197 break;
1199 case CM_MEDMID:
1200 /* The range spanned by all instructions in the object is less
1201 than 2^31 bytes (2GB) and the distance from any instruction
1202 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1203 than 2^31 bytes (2GB).
1205 The executable must be in the low 16TB of the virtual address
1206 space.
1208 sethi %h44(symbol), %temp1
1209 or %temp1, %m44(symbol), %temp2
1210 sllx %temp2, 12, %temp3
1211 or %temp3, %l44(symbol), %reg */
1212 if (temp)
1214 temp1 = op0;
1215 temp2 = op0;
1216 temp3 = temp; /* op0 is allowed. */
1218 else
1220 temp1 = gen_reg_rtx (DImode);
1221 temp2 = gen_reg_rtx (DImode);
1222 temp3 = gen_reg_rtx (DImode);
1225 emit_insn (gen_seth44 (temp1, op1));
1226 emit_insn (gen_setm44 (temp2, temp1, op1));
1227 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1228 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1229 emit_insn (gen_setl44 (op0, temp3, op1));
1230 break;
1232 case CM_MEDANY:
1233 /* The range spanned by all instructions in the object is less
1234 than 2^31 bytes (2GB) and the distance from any instruction
1235 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1236 than 2^31 bytes (2GB).
1238 The executable can be placed anywhere in the virtual address
1239 space.
1241 sethi %hh(symbol), %temp1
1242 sethi %lm(symbol), %temp2
1243 or %temp1, %hm(symbol), %temp3
1244 sllx %temp3, 32, %temp4
1245 or %temp4, %temp2, %temp5
1246 or %temp5, %lo(symbol), %reg */
1247 if (temp)
1249 /* It is possible that one of the registers we got for operands[2]
1250 might coincide with that of operands[0] (which is why we made
1251 it TImode). Pick the other one to use as our scratch. */
1252 if (rtx_equal_p (temp, op0))
1254 gcc_assert (ti_temp);
1255 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1257 temp1 = op0;
1258 temp2 = temp; /* op0 is _not_ allowed, see above. */
1259 temp3 = op0;
1260 temp4 = op0;
1261 temp5 = op0;
1263 else
1265 temp1 = gen_reg_rtx (DImode);
1266 temp2 = gen_reg_rtx (DImode);
1267 temp3 = gen_reg_rtx (DImode);
1268 temp4 = gen_reg_rtx (DImode);
1269 temp5 = gen_reg_rtx (DImode);
1272 emit_insn (gen_sethh (temp1, op1));
1273 emit_insn (gen_setlm (temp2, op1));
1274 emit_insn (gen_sethm (temp3, temp1, op1));
1275 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1276 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1277 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1278 gen_rtx_PLUS (DImode, temp4, temp2)));
1279 emit_insn (gen_setlo (op0, temp5, op1));
1280 break;
1282 case CM_EMBMEDANY:
1283 /* Old old old backwards compatibility kruft here.
1284 Essentially it is MEDLOW with a fixed 64-bit
1285 virtual base added to all data segment addresses.
1286 Text-segment stuff is computed like MEDANY, we can't
1287 reuse the code above because the relocation knobs
1288 look different.
1290 Data segment: sethi %hi(symbol), %temp1
1291 add %temp1, EMBMEDANY_BASE_REG, %temp2
1292 or %temp2, %lo(symbol), %reg */
1293 if (data_segment_operand (op1, GET_MODE (op1)))
1295 if (temp)
1297 temp1 = temp; /* op0 is allowed. */
1298 temp2 = op0;
1300 else
1302 temp1 = gen_reg_rtx (DImode);
1303 temp2 = gen_reg_rtx (DImode);
1306 emit_insn (gen_embmedany_sethi (temp1, op1));
1307 emit_insn (gen_embmedany_brsum (temp2, temp1));
1308 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1311 /* Text segment: sethi %uhi(symbol), %temp1
1312 sethi %hi(symbol), %temp2
1313 or %temp1, %ulo(symbol), %temp3
1314 sllx %temp3, 32, %temp4
1315 or %temp4, %temp2, %temp5
1316 or %temp5, %lo(symbol), %reg */
1317 else
1319 if (temp)
1321 /* It is possible that one of the registers we got for operands[2]
1322 might coincide with that of operands[0] (which is why we made
1323 it TImode). Pick the other one to use as our scratch. */
1324 if (rtx_equal_p (temp, op0))
1326 gcc_assert (ti_temp);
1327 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1329 temp1 = op0;
1330 temp2 = temp; /* op0 is _not_ allowed, see above. */
1331 temp3 = op0;
1332 temp4 = op0;
1333 temp5 = op0;
1335 else
1337 temp1 = gen_reg_rtx (DImode);
1338 temp2 = gen_reg_rtx (DImode);
1339 temp3 = gen_reg_rtx (DImode);
1340 temp4 = gen_reg_rtx (DImode);
1341 temp5 = gen_reg_rtx (DImode);
1344 emit_insn (gen_embmedany_textuhi (temp1, op1));
1345 emit_insn (gen_embmedany_texthi (temp2, op1));
1346 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1347 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1348 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1349 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1350 gen_rtx_PLUS (DImode, temp4, temp2)));
1351 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1353 break;
1355 default:
1356 gcc_unreachable ();
1360 #if HOST_BITS_PER_WIDE_INT == 32
1361 void
1362 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1364 gcc_unreachable ();
1366 #else
1367 /* These avoid problems when cross compiling. If we do not
1368 go through all this hair then the optimizer will see
1369 invalid REG_EQUAL notes or in some cases none at all. */
1370 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1371 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1372 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1373 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1375 /* The optimizer is not to assume anything about exactly
1376 which bits are set for a HIGH, they are unspecified.
1377 Unfortunately this leads to many missed optimizations
1378 during CSE. We mask out the non-HIGH bits, and matches
1379 a plain movdi, to alleviate this problem. */
1380 static rtx
1381 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1383 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1386 static rtx
1387 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1389 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1392 static rtx
1393 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1395 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1398 static rtx
1399 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1401 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1404 /* Worker routines for 64-bit constant formation on arch64.
1405 One of the key things to be doing in these emissions is
1406 to create as many temp REGs as possible. This makes it
1407 possible for half-built constants to be used later when
1408 such values are similar to something required later on.
1409 Without doing this, the optimizer cannot see such
1410 opportunities. */
1412 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1413 unsigned HOST_WIDE_INT, int);
1415 static void
1416 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1417 unsigned HOST_WIDE_INT low_bits, int is_neg)
1419 unsigned HOST_WIDE_INT high_bits;
1421 if (is_neg)
1422 high_bits = (~low_bits) & 0xffffffff;
1423 else
1424 high_bits = low_bits;
1426 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1427 if (!is_neg)
1429 emit_insn (gen_rtx_SET (VOIDmode, op0,
1430 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1432 else
1434 /* If we are XOR'ing with -1, then we should emit a one's complement
1435 instead. This way the combiner will notice logical operations
1436 such as ANDN later on and substitute. */
1437 if ((low_bits & 0x3ff) == 0x3ff)
1439 emit_insn (gen_rtx_SET (VOIDmode, op0,
1440 gen_rtx_NOT (DImode, temp)));
1442 else
1444 emit_insn (gen_rtx_SET (VOIDmode, op0,
1445 gen_safe_XOR64 (temp,
1446 (-(HOST_WIDE_INT)0x400
1447 | (low_bits & 0x3ff)))));
1452 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1453 unsigned HOST_WIDE_INT, int);
1455 static void
1456 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1457 unsigned HOST_WIDE_INT high_bits,
1458 unsigned HOST_WIDE_INT low_immediate,
1459 int shift_count)
1461 rtx temp2 = op0;
1463 if ((high_bits & 0xfffffc00) != 0)
1465 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1466 if ((high_bits & ~0xfffffc00) != 0)
1467 emit_insn (gen_rtx_SET (VOIDmode, op0,
1468 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1469 else
1470 temp2 = temp;
1472 else
1474 emit_insn (gen_safe_SET64 (temp, high_bits));
1475 temp2 = temp;
1478 /* Now shift it up into place. */
1479 emit_insn (gen_rtx_SET (VOIDmode, op0,
1480 gen_rtx_ASHIFT (DImode, temp2,
1481 GEN_INT (shift_count))));
1483 /* If there is a low immediate part piece, finish up by
1484 putting that in as well. */
1485 if (low_immediate != 0)
1486 emit_insn (gen_rtx_SET (VOIDmode, op0,
1487 gen_safe_OR64 (op0, low_immediate)));
1490 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1491 unsigned HOST_WIDE_INT);
1493 /* Full 64-bit constant decomposition. Even though this is the
1494 'worst' case, we still optimize a few things away. */
1495 static void
1496 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1497 unsigned HOST_WIDE_INT high_bits,
1498 unsigned HOST_WIDE_INT low_bits)
1500 rtx sub_temp;
1502 if (reload_in_progress || reload_completed)
1503 sub_temp = op0;
1504 else
1505 sub_temp = gen_reg_rtx (DImode);
1507 if ((high_bits & 0xfffffc00) != 0)
1509 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1510 if ((high_bits & ~0xfffffc00) != 0)
1511 emit_insn (gen_rtx_SET (VOIDmode,
1512 sub_temp,
1513 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1514 else
1515 sub_temp = temp;
1517 else
1519 emit_insn (gen_safe_SET64 (temp, high_bits));
1520 sub_temp = temp;
1523 if (!reload_in_progress && !reload_completed)
1525 rtx temp2 = gen_reg_rtx (DImode);
1526 rtx temp3 = gen_reg_rtx (DImode);
1527 rtx temp4 = gen_reg_rtx (DImode);
1529 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1530 gen_rtx_ASHIFT (DImode, sub_temp,
1531 GEN_INT (32))));
1533 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1534 if ((low_bits & ~0xfffffc00) != 0)
1536 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1537 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1538 emit_insn (gen_rtx_SET (VOIDmode, op0,
1539 gen_rtx_PLUS (DImode, temp4, temp3)));
1541 else
1543 emit_insn (gen_rtx_SET (VOIDmode, op0,
1544 gen_rtx_PLUS (DImode, temp4, temp2)));
1547 else
1549 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1550 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1551 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1552 int to_shift = 12;
1554 /* We are in the middle of reload, so this is really
1555 painful. However we do still make an attempt to
1556 avoid emitting truly stupid code. */
1557 if (low1 != const0_rtx)
1559 emit_insn (gen_rtx_SET (VOIDmode, op0,
1560 gen_rtx_ASHIFT (DImode, sub_temp,
1561 GEN_INT (to_shift))));
1562 emit_insn (gen_rtx_SET (VOIDmode, op0,
1563 gen_rtx_IOR (DImode, op0, low1)));
1564 sub_temp = op0;
1565 to_shift = 12;
1567 else
1569 to_shift += 12;
1571 if (low2 != const0_rtx)
1573 emit_insn (gen_rtx_SET (VOIDmode, op0,
1574 gen_rtx_ASHIFT (DImode, sub_temp,
1575 GEN_INT (to_shift))));
1576 emit_insn (gen_rtx_SET (VOIDmode, op0,
1577 gen_rtx_IOR (DImode, op0, low2)));
1578 sub_temp = op0;
1579 to_shift = 8;
1581 else
1583 to_shift += 8;
1585 emit_insn (gen_rtx_SET (VOIDmode, op0,
1586 gen_rtx_ASHIFT (DImode, sub_temp,
1587 GEN_INT (to_shift))));
1588 if (low3 != const0_rtx)
1589 emit_insn (gen_rtx_SET (VOIDmode, op0,
1590 gen_rtx_IOR (DImode, op0, low3)));
1591 /* phew... */
1595 /* Analyze a 64-bit constant for certain properties. */
1596 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1597 unsigned HOST_WIDE_INT,
1598 int *, int *, int *);
1600 static void
1601 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1602 unsigned HOST_WIDE_INT low_bits,
1603 int *hbsp, int *lbsp, int *abbasp)
1605 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1606 int i;
1608 lowest_bit_set = highest_bit_set = -1;
1609 i = 0;
1612 if ((lowest_bit_set == -1)
1613 && ((low_bits >> i) & 1))
1614 lowest_bit_set = i;
1615 if ((highest_bit_set == -1)
1616 && ((high_bits >> (32 - i - 1)) & 1))
1617 highest_bit_set = (64 - i - 1);
1619 while (++i < 32
1620 && ((highest_bit_set == -1)
1621 || (lowest_bit_set == -1)));
1622 if (i == 32)
1624 i = 0;
1627 if ((lowest_bit_set == -1)
1628 && ((high_bits >> i) & 1))
1629 lowest_bit_set = i + 32;
1630 if ((highest_bit_set == -1)
1631 && ((low_bits >> (32 - i - 1)) & 1))
1632 highest_bit_set = 32 - i - 1;
1634 while (++i < 32
1635 && ((highest_bit_set == -1)
1636 || (lowest_bit_set == -1)));
1638 /* If there are no bits set this should have gone out
1639 as one instruction! */
1640 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1641 all_bits_between_are_set = 1;
1642 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1644 if (i < 32)
1646 if ((low_bits & (1 << i)) != 0)
1647 continue;
1649 else
1651 if ((high_bits & (1 << (i - 32))) != 0)
1652 continue;
1654 all_bits_between_are_set = 0;
1655 break;
1657 *hbsp = highest_bit_set;
1658 *lbsp = lowest_bit_set;
1659 *abbasp = all_bits_between_are_set;
1662 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1664 static int
1665 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1666 unsigned HOST_WIDE_INT low_bits)
1668 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1670 if (high_bits == 0
1671 || high_bits == 0xffffffff)
1672 return 1;
1674 analyze_64bit_constant (high_bits, low_bits,
1675 &highest_bit_set, &lowest_bit_set,
1676 &all_bits_between_are_set);
1678 if ((highest_bit_set == 63
1679 || lowest_bit_set == 0)
1680 && all_bits_between_are_set != 0)
1681 return 1;
1683 if ((highest_bit_set - lowest_bit_set) < 21)
1684 return 1;
1686 return 0;
1689 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1690 unsigned HOST_WIDE_INT,
1691 int, int);
1693 static unsigned HOST_WIDE_INT
1694 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1695 unsigned HOST_WIDE_INT low_bits,
1696 int lowest_bit_set, int shift)
1698 HOST_WIDE_INT hi, lo;
1700 if (lowest_bit_set < 32)
1702 lo = (low_bits >> lowest_bit_set) << shift;
1703 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1705 else
1707 lo = 0;
1708 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1710 gcc_assert (! (hi & lo));
1711 return (hi | lo);
1714 /* Here we are sure to be arch64 and this is an integer constant
1715 being loaded into a register. Emit the most efficient
1716 insn sequence possible. Detection of all the 1-insn cases
1717 has been done already. */
1718 void
1719 sparc_emit_set_const64 (rtx op0, rtx op1)
1721 unsigned HOST_WIDE_INT high_bits, low_bits;
1722 int lowest_bit_set, highest_bit_set;
1723 int all_bits_between_are_set;
1724 rtx temp = 0;
1726 /* Sanity check that we know what we are working with. */
1727 gcc_assert (TARGET_ARCH64
1728 && (GET_CODE (op0) == SUBREG
1729 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1731 if (reload_in_progress || reload_completed)
1732 temp = op0;
1734 if (GET_CODE (op1) != CONST_INT)
1736 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1737 return;
1740 if (! temp)
1741 temp = gen_reg_rtx (DImode);
1743 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1744 low_bits = (INTVAL (op1) & 0xffffffff);
1746 /* low_bits bits 0 --> 31
1747 high_bits bits 32 --> 63 */
1749 analyze_64bit_constant (high_bits, low_bits,
1750 &highest_bit_set, &lowest_bit_set,
1751 &all_bits_between_are_set);
1753 /* First try for a 2-insn sequence. */
1755 /* These situations are preferred because the optimizer can
1756 * do more things with them:
1757 * 1) mov -1, %reg
1758 * sllx %reg, shift, %reg
1759 * 2) mov -1, %reg
1760 * srlx %reg, shift, %reg
1761 * 3) mov some_small_const, %reg
1762 * sllx %reg, shift, %reg
1764 if (((highest_bit_set == 63
1765 || lowest_bit_set == 0)
1766 && all_bits_between_are_set != 0)
1767 || ((highest_bit_set - lowest_bit_set) < 12))
1769 HOST_WIDE_INT the_const = -1;
1770 int shift = lowest_bit_set;
1772 if ((highest_bit_set != 63
1773 && lowest_bit_set != 0)
1774 || all_bits_between_are_set == 0)
1776 the_const =
1777 create_simple_focus_bits (high_bits, low_bits,
1778 lowest_bit_set, 0);
1780 else if (lowest_bit_set == 0)
1781 shift = -(63 - highest_bit_set);
1783 gcc_assert (SPARC_SIMM13_P (the_const));
1784 gcc_assert (shift != 0);
1786 emit_insn (gen_safe_SET64 (temp, the_const));
1787 if (shift > 0)
1788 emit_insn (gen_rtx_SET (VOIDmode,
1789 op0,
1790 gen_rtx_ASHIFT (DImode,
1791 temp,
1792 GEN_INT (shift))));
1793 else if (shift < 0)
1794 emit_insn (gen_rtx_SET (VOIDmode,
1795 op0,
1796 gen_rtx_LSHIFTRT (DImode,
1797 temp,
1798 GEN_INT (-shift))));
1799 return;
1802 /* Now a range of 22 or less bits set somewhere.
1803 * 1) sethi %hi(focus_bits), %reg
1804 * sllx %reg, shift, %reg
1805 * 2) sethi %hi(focus_bits), %reg
1806 * srlx %reg, shift, %reg
1808 if ((highest_bit_set - lowest_bit_set) < 21)
1810 unsigned HOST_WIDE_INT focus_bits =
1811 create_simple_focus_bits (high_bits, low_bits,
1812 lowest_bit_set, 10);
1814 gcc_assert (SPARC_SETHI_P (focus_bits));
1815 gcc_assert (lowest_bit_set != 10);
1817 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1819 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1820 if (lowest_bit_set < 10)
1821 emit_insn (gen_rtx_SET (VOIDmode,
1822 op0,
1823 gen_rtx_LSHIFTRT (DImode, temp,
1824 GEN_INT (10 - lowest_bit_set))));
1825 else if (lowest_bit_set > 10)
1826 emit_insn (gen_rtx_SET (VOIDmode,
1827 op0,
1828 gen_rtx_ASHIFT (DImode, temp,
1829 GEN_INT (lowest_bit_set - 10))));
1830 return;
1833 /* 1) sethi %hi(low_bits), %reg
1834 * or %reg, %lo(low_bits), %reg
1835 * 2) sethi %hi(~low_bits), %reg
1836 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1838 if (high_bits == 0
1839 || high_bits == 0xffffffff)
1841 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1842 (high_bits == 0xffffffff));
1843 return;
1846 /* Now, try 3-insn sequences. */
1848 /* 1) sethi %hi(high_bits), %reg
1849 * or %reg, %lo(high_bits), %reg
1850 * sllx %reg, 32, %reg
1852 if (low_bits == 0)
1854 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1855 return;
1858 /* We may be able to do something quick
1859 when the constant is negated, so try that. */
1860 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1861 (~low_bits) & 0xfffffc00))
1863 /* NOTE: The trailing bits get XOR'd so we need the
1864 non-negated bits, not the negated ones. */
1865 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1867 if ((((~high_bits) & 0xffffffff) == 0
1868 && ((~low_bits) & 0x80000000) == 0)
1869 || (((~high_bits) & 0xffffffff) == 0xffffffff
1870 && ((~low_bits) & 0x80000000) != 0))
1872 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1874 if ((SPARC_SETHI_P (fast_int)
1875 && (~high_bits & 0xffffffff) == 0)
1876 || SPARC_SIMM13_P (fast_int))
1877 emit_insn (gen_safe_SET64 (temp, fast_int));
1878 else
1879 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1881 else
1883 rtx negated_const;
1884 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1885 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1886 sparc_emit_set_const64 (temp, negated_const);
1889 /* If we are XOR'ing with -1, then we should emit a one's complement
1890 instead. This way the combiner will notice logical operations
1891 such as ANDN later on and substitute. */
1892 if (trailing_bits == 0x3ff)
1894 emit_insn (gen_rtx_SET (VOIDmode, op0,
1895 gen_rtx_NOT (DImode, temp)));
1897 else
1899 emit_insn (gen_rtx_SET (VOIDmode,
1900 op0,
1901 gen_safe_XOR64 (temp,
1902 (-0x400 | trailing_bits))));
1904 return;
1907 /* 1) sethi %hi(xxx), %reg
1908 * or %reg, %lo(xxx), %reg
1909 * sllx %reg, yyy, %reg
1911 * ??? This is just a generalized version of the low_bits==0
1912 * thing above, FIXME...
1914 if ((highest_bit_set - lowest_bit_set) < 32)
1916 unsigned HOST_WIDE_INT focus_bits =
1917 create_simple_focus_bits (high_bits, low_bits,
1918 lowest_bit_set, 0);
1920 /* We can't get here in this state. */
1921 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1923 /* So what we know is that the set bits straddle the
1924 middle of the 64-bit word. */
1925 sparc_emit_set_const64_quick2 (op0, temp,
1926 focus_bits, 0,
1927 lowest_bit_set);
1928 return;
1931 /* 1) sethi %hi(high_bits), %reg
1932 * or %reg, %lo(high_bits), %reg
1933 * sllx %reg, 32, %reg
1934 * or %reg, low_bits, %reg
1936 if (SPARC_SIMM13_P(low_bits)
1937 && ((int)low_bits > 0))
1939 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1940 return;
1943 /* The easiest way when all else fails, is full decomposition. */
1944 #if 0
1945 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1946 high_bits, low_bits, ~high_bits, ~low_bits);
1947 #endif
1948 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1950 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1952 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1953 return the mode to be used for the comparison. For floating-point,
1954 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1955 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1956 processing is needed. */
1958 enum machine_mode
1959 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1961 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1963 switch (op)
1965 case EQ:
1966 case NE:
1967 case UNORDERED:
1968 case ORDERED:
1969 case UNLT:
1970 case UNLE:
1971 case UNGT:
1972 case UNGE:
1973 case UNEQ:
1974 case LTGT:
1975 return CCFPmode;
1977 case LT:
1978 case LE:
1979 case GT:
1980 case GE:
1981 return CCFPEmode;
1983 default:
1984 gcc_unreachable ();
1987 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1988 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1990 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1991 return CCX_NOOVmode;
1992 else
1993 return CC_NOOVmode;
1995 else
1997 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1998 return CCXmode;
1999 else
2000 return CCmode;
2004 /* X and Y are two things to compare using CODE. Emit the compare insn and
2005 return the rtx for the cc reg in the proper mode. */
2008 gen_compare_reg (enum rtx_code code)
2010 rtx x = sparc_compare_op0;
2011 rtx y = sparc_compare_op1;
2012 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
2013 rtx cc_reg;
2015 if (sparc_compare_emitted != NULL_RTX)
2017 cc_reg = sparc_compare_emitted;
2018 sparc_compare_emitted = NULL_RTX;
2019 return cc_reg;
2022 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2023 fcc regs (cse can't tell they're really call clobbered regs and will
2024 remove a duplicate comparison even if there is an intervening function
2025 call - it will then try to reload the cc reg via an int reg which is why
2026 we need the movcc patterns). It is possible to provide the movcc
2027 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2028 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2029 to tell cse that CCFPE mode registers (even pseudos) are call
2030 clobbered. */
2032 /* ??? This is an experiment. Rather than making changes to cse which may
2033 or may not be easy/clean, we do our own cse. This is possible because
2034 we will generate hard registers. Cse knows they're call clobbered (it
2035 doesn't know the same thing about pseudos). If we guess wrong, no big
2036 deal, but if we win, great! */
2038 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2039 #if 1 /* experiment */
2041 int reg;
2042 /* We cycle through the registers to ensure they're all exercised. */
2043 static int next_fcc_reg = 0;
2044 /* Previous x,y for each fcc reg. */
2045 static rtx prev_args[4][2];
2047 /* Scan prev_args for x,y. */
2048 for (reg = 0; reg < 4; reg++)
2049 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2050 break;
2051 if (reg == 4)
2053 reg = next_fcc_reg;
2054 prev_args[reg][0] = x;
2055 prev_args[reg][1] = y;
2056 next_fcc_reg = (next_fcc_reg + 1) & 3;
2058 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2060 #else
2061 cc_reg = gen_reg_rtx (mode);
2062 #endif /* ! experiment */
2063 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2064 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2065 else
2066 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2068 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2069 gen_rtx_COMPARE (mode, x, y)));
2071 return cc_reg;
2074 /* This function is used for v9 only.
2075 CODE is the code for an Scc's comparison.
2076 OPERANDS[0] is the target of the Scc insn.
2077 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2078 been generated yet).
2080 This function is needed to turn
2082 (set (reg:SI 110)
2083 (gt (reg:CCX 100 %icc)
2084 (const_int 0)))
2085 into
2086 (set (reg:SI 110)
2087 (gt:DI (reg:CCX 100 %icc)
2088 (const_int 0)))
2090 IE: The instruction recognizer needs to see the mode of the comparison to
2091 find the right instruction. We could use "gt:DI" right in the
2092 define_expand, but leaving it out allows us to handle DI, SI, etc.
2094 We refer to the global sparc compare operands sparc_compare_op0 and
2095 sparc_compare_op1. */
2098 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2100 if (! TARGET_ARCH64
2101 && (GET_MODE (sparc_compare_op0) == DImode
2102 || GET_MODE (operands[0]) == DImode))
2103 return 0;
2105 /* Try to use the movrCC insns. */
2106 if (TARGET_ARCH64
2107 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2108 && sparc_compare_op1 == const0_rtx
2109 && v9_regcmp_p (compare_code))
2111 rtx op0 = sparc_compare_op0;
2112 rtx temp;
2114 /* Special case for op0 != 0. This can be done with one instruction if
2115 operands[0] == sparc_compare_op0. */
2117 if (compare_code == NE
2118 && GET_MODE (operands[0]) == DImode
2119 && rtx_equal_p (op0, operands[0]))
2121 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2122 gen_rtx_IF_THEN_ELSE (DImode,
2123 gen_rtx_fmt_ee (compare_code, DImode,
2124 op0, const0_rtx),
2125 const1_rtx,
2126 operands[0])));
2127 return 1;
2130 if (reg_overlap_mentioned_p (operands[0], op0))
2132 /* Handle the case where operands[0] == sparc_compare_op0.
2133 We "early clobber" the result. */
2134 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2135 emit_move_insn (op0, sparc_compare_op0);
2138 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2139 if (GET_MODE (op0) != DImode)
2141 temp = gen_reg_rtx (DImode);
2142 convert_move (temp, op0, 0);
2144 else
2145 temp = op0;
2146 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2147 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2148 gen_rtx_fmt_ee (compare_code, DImode,
2149 temp, const0_rtx),
2150 const1_rtx,
2151 operands[0])));
2152 return 1;
2154 else
2156 operands[1] = gen_compare_reg (compare_code);
2158 switch (GET_MODE (operands[1]))
2160 case CCmode :
2161 case CCXmode :
2162 case CCFPEmode :
2163 case CCFPmode :
2164 break;
2165 default :
2166 gcc_unreachable ();
2168 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2169 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2170 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2171 gen_rtx_fmt_ee (compare_code,
2172 GET_MODE (operands[1]),
2173 operands[1], const0_rtx),
2174 const1_rtx, operands[0])));
2175 return 1;
2179 /* Emit a conditional jump insn for the v9 architecture using comparison code
2180 CODE and jump target LABEL.
2181 This function exists to take advantage of the v9 brxx insns. */
2183 void
2184 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2186 gcc_assert (sparc_compare_emitted == NULL_RTX);
2187 emit_jump_insn (gen_rtx_SET (VOIDmode,
2188 pc_rtx,
2189 gen_rtx_IF_THEN_ELSE (VOIDmode,
2190 gen_rtx_fmt_ee (code, GET_MODE (op0),
2191 op0, const0_rtx),
2192 gen_rtx_LABEL_REF (VOIDmode, label),
2193 pc_rtx)));
2196 /* Generate a DFmode part of a hard TFmode register.
2197 REG is the TFmode hard register, LOW is 1 for the
2198 low 64bit of the register and 0 otherwise.
2201 gen_df_reg (rtx reg, int low)
2203 int regno = REGNO (reg);
2205 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2206 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2207 return gen_rtx_REG (DFmode, regno);
2210 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2211 Unlike normal calls, TFmode operands are passed by reference. It is
2212 assumed that no more than 3 operands are required. */
2214 static void
2215 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2217 rtx ret_slot = NULL, arg[3], func_sym;
2218 int i;
2220 /* We only expect to be called for conversions, unary, and binary ops. */
2221 gcc_assert (nargs == 2 || nargs == 3);
2223 for (i = 0; i < nargs; ++i)
2225 rtx this_arg = operands[i];
2226 rtx this_slot;
2228 /* TFmode arguments and return values are passed by reference. */
2229 if (GET_MODE (this_arg) == TFmode)
2231 int force_stack_temp;
2233 force_stack_temp = 0;
2234 if (TARGET_BUGGY_QP_LIB && i == 0)
2235 force_stack_temp = 1;
2237 if (GET_CODE (this_arg) == MEM
2238 && ! force_stack_temp)
2239 this_arg = XEXP (this_arg, 0);
2240 else if (CONSTANT_P (this_arg)
2241 && ! force_stack_temp)
2243 this_slot = force_const_mem (TFmode, this_arg);
2244 this_arg = XEXP (this_slot, 0);
2246 else
2248 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2250 /* Operand 0 is the return value. We'll copy it out later. */
2251 if (i > 0)
2252 emit_move_insn (this_slot, this_arg);
2253 else
2254 ret_slot = this_slot;
2256 this_arg = XEXP (this_slot, 0);
2260 arg[i] = this_arg;
2263 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2265 if (GET_MODE (operands[0]) == TFmode)
2267 if (nargs == 2)
2268 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2269 arg[0], GET_MODE (arg[0]),
2270 arg[1], GET_MODE (arg[1]));
2271 else
2272 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2273 arg[0], GET_MODE (arg[0]),
2274 arg[1], GET_MODE (arg[1]),
2275 arg[2], GET_MODE (arg[2]));
2277 if (ret_slot)
2278 emit_move_insn (operands[0], ret_slot);
2280 else
2282 rtx ret;
2284 gcc_assert (nargs == 2);
2286 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2287 GET_MODE (operands[0]), 1,
2288 arg[1], GET_MODE (arg[1]));
2290 if (ret != operands[0])
2291 emit_move_insn (operands[0], ret);
2295 /* Expand soft-float TFmode calls to sparc abi routines. */
2297 static void
2298 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2300 const char *func;
2302 switch (code)
2304 case PLUS:
2305 func = "_Qp_add";
2306 break;
2307 case MINUS:
2308 func = "_Qp_sub";
2309 break;
2310 case MULT:
2311 func = "_Qp_mul";
2312 break;
2313 case DIV:
2314 func = "_Qp_div";
2315 break;
2316 default:
2317 gcc_unreachable ();
2320 emit_soft_tfmode_libcall (func, 3, operands);
2323 static void
2324 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2326 const char *func;
2328 gcc_assert (code == SQRT);
2329 func = "_Qp_sqrt";
2331 emit_soft_tfmode_libcall (func, 2, operands);
2334 static void
2335 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2337 const char *func;
2339 switch (code)
2341 case FLOAT_EXTEND:
2342 switch (GET_MODE (operands[1]))
2344 case SFmode:
2345 func = "_Qp_stoq";
2346 break;
2347 case DFmode:
2348 func = "_Qp_dtoq";
2349 break;
2350 default:
2351 gcc_unreachable ();
2353 break;
2355 case FLOAT_TRUNCATE:
2356 switch (GET_MODE (operands[0]))
2358 case SFmode:
2359 func = "_Qp_qtos";
2360 break;
2361 case DFmode:
2362 func = "_Qp_qtod";
2363 break;
2364 default:
2365 gcc_unreachable ();
2367 break;
2369 case FLOAT:
2370 switch (GET_MODE (operands[1]))
2372 case SImode:
2373 func = "_Qp_itoq";
2374 if (TARGET_ARCH64)
2375 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2376 break;
2377 case DImode:
2378 func = "_Qp_xtoq";
2379 break;
2380 default:
2381 gcc_unreachable ();
2383 break;
2385 case UNSIGNED_FLOAT:
2386 switch (GET_MODE (operands[1]))
2388 case SImode:
2389 func = "_Qp_uitoq";
2390 if (TARGET_ARCH64)
2391 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2392 break;
2393 case DImode:
2394 func = "_Qp_uxtoq";
2395 break;
2396 default:
2397 gcc_unreachable ();
2399 break;
2401 case FIX:
2402 switch (GET_MODE (operands[0]))
2404 case SImode:
2405 func = "_Qp_qtoi";
2406 break;
2407 case DImode:
2408 func = "_Qp_qtox";
2409 break;
2410 default:
2411 gcc_unreachable ();
2413 break;
2415 case UNSIGNED_FIX:
2416 switch (GET_MODE (operands[0]))
2418 case SImode:
2419 func = "_Qp_qtoui";
2420 break;
2421 case DImode:
2422 func = "_Qp_qtoux";
2423 break;
2424 default:
2425 gcc_unreachable ();
2427 break;
2429 default:
2430 gcc_unreachable ();
2433 emit_soft_tfmode_libcall (func, 2, operands);
2436 /* Expand a hard-float tfmode operation. All arguments must be in
2437 registers. */
2439 static void
2440 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2442 rtx op, dest;
2444 if (GET_RTX_CLASS (code) == RTX_UNARY)
2446 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2447 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2449 else
2451 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2452 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2453 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2454 operands[1], operands[2]);
2457 if (register_operand (operands[0], VOIDmode))
2458 dest = operands[0];
2459 else
2460 dest = gen_reg_rtx (GET_MODE (operands[0]));
2462 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2464 if (dest != operands[0])
2465 emit_move_insn (operands[0], dest);
2468 void
2469 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2471 if (TARGET_HARD_QUAD)
2472 emit_hard_tfmode_operation (code, operands);
2473 else
2474 emit_soft_tfmode_binop (code, operands);
2477 void
2478 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2480 if (TARGET_HARD_QUAD)
2481 emit_hard_tfmode_operation (code, operands);
2482 else
2483 emit_soft_tfmode_unop (code, operands);
2486 void
2487 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2489 if (TARGET_HARD_QUAD)
2490 emit_hard_tfmode_operation (code, operands);
2491 else
2492 emit_soft_tfmode_cvt (code, operands);
2495 /* Return nonzero if a branch/jump/call instruction will be emitting
2496 nop into its delay slot. */
2499 empty_delay_slot (rtx insn)
2501 rtx seq;
2503 /* If no previous instruction (should not happen), return true. */
2504 if (PREV_INSN (insn) == NULL)
2505 return 1;
2507 seq = NEXT_INSN (PREV_INSN (insn));
2508 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2509 return 0;
2511 return 1;
2514 /* Return nonzero if TRIAL can go into the call delay slot. */
2517 tls_call_delay (rtx trial)
2519 rtx pat;
2521 /* Binutils allows
2522 call __tls_get_addr, %tgd_call (foo)
2523 add %l7, %o0, %o0, %tgd_add (foo)
2524 while Sun as/ld does not. */
2525 if (TARGET_GNU_TLS || !TARGET_TLS)
2526 return 1;
2528 pat = PATTERN (trial);
2530 /* We must reject tgd_add{32|64}, i.e.
2531 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2532 and tldm_add{32|64}, i.e.
2533 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2534 for Sun as/ld. */
2535 if (GET_CODE (pat) == SET
2536 && GET_CODE (SET_SRC (pat)) == PLUS)
2538 rtx unspec = XEXP (SET_SRC (pat), 1);
2540 if (GET_CODE (unspec) == UNSPEC
2541 && (XINT (unspec, 1) == UNSPEC_TLSGD
2542 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2543 return 0;
2546 return 1;
2549 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2550 instruction. RETURN_P is true if the v9 variant 'return' is to be
2551 considered in the test too.
2553 TRIAL must be a SET whose destination is a REG appropriate for the
2554 'restore' instruction or, if RETURN_P is true, for the 'return'
2555 instruction. */
2557 static int
2558 eligible_for_restore_insn (rtx trial, bool return_p)
2560 rtx pat = PATTERN (trial);
2561 rtx src = SET_SRC (pat);
2563 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2564 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2565 && arith_operand (src, GET_MODE (src)))
2567 if (TARGET_ARCH64)
2568 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2569 else
2570 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2573 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2574 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2575 && arith_double_operand (src, GET_MODE (src)))
2576 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2578 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2579 else if (! TARGET_FPU && register_operand (src, SFmode))
2580 return 1;
2582 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2583 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2584 return 1;
2586 /* If we have the 'return' instruction, anything that does not use
2587 local or output registers and can go into a delay slot wins. */
2588 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2589 && (get_attr_in_uncond_branch_delay (trial)
2590 == IN_UNCOND_BRANCH_DELAY_TRUE))
2591 return 1;
2593 /* The 'restore src1,src2,dest' pattern for SImode. */
2594 else if (GET_CODE (src) == PLUS
2595 && register_operand (XEXP (src, 0), SImode)
2596 && arith_operand (XEXP (src, 1), SImode))
2597 return 1;
2599 /* The 'restore src1,src2,dest' pattern for DImode. */
2600 else if (GET_CODE (src) == PLUS
2601 && register_operand (XEXP (src, 0), DImode)
2602 && arith_double_operand (XEXP (src, 1), DImode))
2603 return 1;
2605 /* The 'restore src1,%lo(src2),dest' pattern. */
2606 else if (GET_CODE (src) == LO_SUM
2607 && ! TARGET_CM_MEDMID
2608 && ((register_operand (XEXP (src, 0), SImode)
2609 && immediate_operand (XEXP (src, 1), SImode))
2610 || (TARGET_ARCH64
2611 && register_operand (XEXP (src, 0), DImode)
2612 && immediate_operand (XEXP (src, 1), DImode))))
2613 return 1;
2615 /* The 'restore src,src,dest' pattern. */
2616 else if (GET_CODE (src) == ASHIFT
2617 && (register_operand (XEXP (src, 0), SImode)
2618 || register_operand (XEXP (src, 0), DImode))
2619 && XEXP (src, 1) == const1_rtx)
2620 return 1;
2622 return 0;
2625 /* Return nonzero if TRIAL can go into the function return's
2626 delay slot. */
2629 eligible_for_return_delay (rtx trial)
2631 rtx pat;
2633 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2634 return 0;
2636 if (get_attr_length (trial) != 1)
2637 return 0;
2639 /* If there are any call-saved registers, we should scan TRIAL if it
2640 does not reference them. For now just make it easy. */
2641 if (num_gfregs)
2642 return 0;
2644 /* If the function uses __builtin_eh_return, the eh_return machinery
2645 occupies the delay slot. */
2646 if (crtl->calls_eh_return)
2647 return 0;
2649 /* In the case of a true leaf function, anything can go into the slot. */
2650 if (sparc_leaf_function_p)
2651 return get_attr_in_uncond_branch_delay (trial)
2652 == IN_UNCOND_BRANCH_DELAY_TRUE;
2654 pat = PATTERN (trial);
2656 /* Otherwise, only operations which can be done in tandem with
2657 a `restore' or `return' insn can go into the delay slot. */
2658 if (GET_CODE (SET_DEST (pat)) != REG
2659 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2660 return 0;
2662 /* If this instruction sets up floating point register and we have a return
2663 instruction, it can probably go in. But restore will not work
2664 with FP_REGS. */
2665 if (REGNO (SET_DEST (pat)) >= 32)
2666 return (TARGET_V9
2667 && ! epilogue_renumber (&pat, 1)
2668 && (get_attr_in_uncond_branch_delay (trial)
2669 == IN_UNCOND_BRANCH_DELAY_TRUE));
2671 return eligible_for_restore_insn (trial, true);
2674 /* Return nonzero if TRIAL can go into the sibling call's
2675 delay slot. */
2678 eligible_for_sibcall_delay (rtx trial)
2680 rtx pat;
2682 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2683 return 0;
2685 if (get_attr_length (trial) != 1)
2686 return 0;
2688 pat = PATTERN (trial);
2690 if (sparc_leaf_function_p)
2692 /* If the tail call is done using the call instruction,
2693 we have to restore %o7 in the delay slot. */
2694 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2695 return 0;
2697 /* %g1 is used to build the function address */
2698 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2699 return 0;
2701 return 1;
2704 /* Otherwise, only operations which can be done in tandem with
2705 a `restore' insn can go into the delay slot. */
2706 if (GET_CODE (SET_DEST (pat)) != REG
2707 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2708 || REGNO (SET_DEST (pat)) >= 32)
2709 return 0;
2711 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2712 in most cases. */
2713 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2714 return 0;
2716 return eligible_for_restore_insn (trial, false);
2720 short_branch (int uid1, int uid2)
2722 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2724 /* Leave a few words of "slop". */
2725 if (delta >= -1023 && delta <= 1022)
2726 return 1;
2728 return 0;
2731 /* Return nonzero if REG is not used after INSN.
2732 We assume REG is a reload reg, and therefore does
2733 not live past labels or calls or jumps. */
2735 reg_unused_after (rtx reg, rtx insn)
2737 enum rtx_code code, prev_code = UNKNOWN;
2739 while ((insn = NEXT_INSN (insn)))
2741 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2742 return 1;
2744 code = GET_CODE (insn);
2745 if (GET_CODE (insn) == CODE_LABEL)
2746 return 1;
2748 if (INSN_P (insn))
2750 rtx set = single_set (insn);
2751 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2752 if (set && in_src)
2753 return 0;
2754 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2755 return 1;
2756 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2757 return 0;
2759 prev_code = code;
2761 return 1;
2764 /* Determine if it's legal to put X into the constant pool. This
2765 is not possible if X contains the address of a symbol that is
2766 not constant (TLS) or not known at final link time (PIC). */
2768 static bool
2769 sparc_cannot_force_const_mem (rtx x)
2771 switch (GET_CODE (x))
2773 case CONST_INT:
2774 case CONST_DOUBLE:
2775 case CONST_VECTOR:
2776 /* Accept all non-symbolic constants. */
2777 return false;
2779 case LABEL_REF:
2780 /* Labels are OK iff we are non-PIC. */
2781 return flag_pic != 0;
2783 case SYMBOL_REF:
2784 /* 'Naked' TLS symbol references are never OK,
2785 non-TLS symbols are OK iff we are non-PIC. */
2786 if (SYMBOL_REF_TLS_MODEL (x))
2787 return true;
2788 else
2789 return flag_pic != 0;
2791 case CONST:
2792 return sparc_cannot_force_const_mem (XEXP (x, 0));
2793 case PLUS:
2794 case MINUS:
2795 return sparc_cannot_force_const_mem (XEXP (x, 0))
2796 || sparc_cannot_force_const_mem (XEXP (x, 1));
2797 case UNSPEC:
2798 return true;
2799 default:
2800 gcc_unreachable ();
2804 /* PIC support. */
2805 static GTY(()) char pic_helper_symbol_name[256];
2806 static GTY(()) rtx pic_helper_symbol;
2807 static GTY(()) bool pic_helper_emitted_p = false;
2808 static GTY(()) rtx global_offset_table;
2810 /* Ensure that we are not using patterns that are not OK with PIC. */
2813 check_pic (int i)
2815 switch (flag_pic)
2817 case 1:
2818 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2819 && (GET_CODE (recog_data.operand[i]) != CONST
2820 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2821 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2822 == global_offset_table)
2823 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2824 == CONST))));
2825 case 2:
2826 default:
2827 return 1;
2831 /* Return true if X is an address which needs a temporary register when
2832 reloaded while generating PIC code. */
2835 pic_address_needs_scratch (rtx x)
2837 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2838 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2839 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2840 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2841 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2842 return 1;
2844 return 0;
2847 /* Determine if a given RTX is a valid constant. We already know this
2848 satisfies CONSTANT_P. */
2850 bool
2851 legitimate_constant_p (rtx x)
2853 rtx inner;
2855 switch (GET_CODE (x))
2857 case SYMBOL_REF:
2858 /* TLS symbols are not constant. */
2859 if (SYMBOL_REF_TLS_MODEL (x))
2860 return false;
2861 break;
2863 case CONST:
2864 inner = XEXP (x, 0);
2866 /* Offsets of TLS symbols are never valid.
2867 Discourage CSE from creating them. */
2868 if (GET_CODE (inner) == PLUS
2869 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2870 return false;
2871 break;
2873 case CONST_DOUBLE:
2874 if (GET_MODE (x) == VOIDmode)
2875 return true;
2877 /* Floating point constants are generally not ok.
2878 The only exception is 0.0 in VIS. */
2879 if (TARGET_VIS
2880 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2881 && const_zero_operand (x, GET_MODE (x)))
2882 return true;
2884 return false;
2886 case CONST_VECTOR:
2887 /* Vector constants are generally not ok.
2888 The only exception is 0 in VIS. */
2889 if (TARGET_VIS
2890 && const_zero_operand (x, GET_MODE (x)))
2891 return true;
2893 return false;
2895 default:
2896 break;
2899 return true;
2902 /* Determine if a given RTX is a valid constant address. */
2904 bool
2905 constant_address_p (rtx x)
2907 switch (GET_CODE (x))
2909 case LABEL_REF:
2910 case CONST_INT:
2911 case HIGH:
2912 return true;
2914 case CONST:
2915 if (flag_pic && pic_address_needs_scratch (x))
2916 return false;
2917 return legitimate_constant_p (x);
2919 case SYMBOL_REF:
2920 return !flag_pic && legitimate_constant_p (x);
2922 default:
2923 return false;
2927 /* Nonzero if the constant value X is a legitimate general operand
2928 when generating PIC code. It is given that flag_pic is on and
2929 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2931 bool
2932 legitimate_pic_operand_p (rtx x)
2934 if (pic_address_needs_scratch (x))
2935 return false;
2936 if (SPARC_SYMBOL_REF_TLS_P (x)
2937 || (GET_CODE (x) == CONST
2938 && GET_CODE (XEXP (x, 0)) == PLUS
2939 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2940 return false;
2941 return true;
2944 /* Return nonzero if ADDR is a valid memory address.
2945 STRICT specifies whether strict register checking applies. */
2948 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2950 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2952 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2953 rs1 = addr;
2954 else if (GET_CODE (addr) == PLUS)
2956 rs1 = XEXP (addr, 0);
2957 rs2 = XEXP (addr, 1);
2959 /* Canonicalize. REG comes first, if there are no regs,
2960 LO_SUM comes first. */
2961 if (!REG_P (rs1)
2962 && GET_CODE (rs1) != SUBREG
2963 && (REG_P (rs2)
2964 || GET_CODE (rs2) == SUBREG
2965 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2967 rs1 = XEXP (addr, 1);
2968 rs2 = XEXP (addr, 0);
2971 if ((flag_pic == 1
2972 && rs1 == pic_offset_table_rtx
2973 && !REG_P (rs2)
2974 && GET_CODE (rs2) != SUBREG
2975 && GET_CODE (rs2) != LO_SUM
2976 && GET_CODE (rs2) != MEM
2977 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2978 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2979 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2980 || ((REG_P (rs1)
2981 || GET_CODE (rs1) == SUBREG)
2982 && RTX_OK_FOR_OFFSET_P (rs2)))
2984 imm1 = rs2;
2985 rs2 = NULL;
2987 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2988 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2990 /* We prohibit REG + REG for TFmode when there are no quad move insns
2991 and we consequently need to split. We do this because REG+REG
2992 is not an offsettable address. If we get the situation in reload
2993 where source and destination of a movtf pattern are both MEMs with
2994 REG+REG address, then only one of them gets converted to an
2995 offsettable address. */
2996 if (mode == TFmode
2997 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2998 return 0;
3000 /* We prohibit REG + REG on ARCH32 if not optimizing for
3001 DFmode/DImode because then mem_min_alignment is likely to be zero
3002 after reload and the forced split would lack a matching splitter
3003 pattern. */
3004 if (TARGET_ARCH32 && !optimize
3005 && (mode == DFmode || mode == DImode))
3006 return 0;
3008 else if (USE_AS_OFFSETABLE_LO10
3009 && GET_CODE (rs1) == LO_SUM
3010 && TARGET_ARCH64
3011 && ! TARGET_CM_MEDMID
3012 && RTX_OK_FOR_OLO10_P (rs2))
3014 rs2 = NULL;
3015 imm1 = XEXP (rs1, 1);
3016 rs1 = XEXP (rs1, 0);
3017 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3018 return 0;
3021 else if (GET_CODE (addr) == LO_SUM)
3023 rs1 = XEXP (addr, 0);
3024 imm1 = XEXP (addr, 1);
3026 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
3027 return 0;
3029 /* We can't allow TFmode in 32-bit mode, because an offset greater
3030 than the alignment (8) may cause the LO_SUM to overflow. */
3031 if (mode == TFmode && TARGET_ARCH32)
3032 return 0;
3034 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3035 return 1;
3036 else
3037 return 0;
3039 if (GET_CODE (rs1) == SUBREG)
3040 rs1 = SUBREG_REG (rs1);
3041 if (!REG_P (rs1))
3042 return 0;
3044 if (rs2)
3046 if (GET_CODE (rs2) == SUBREG)
3047 rs2 = SUBREG_REG (rs2);
3048 if (!REG_P (rs2))
3049 return 0;
3052 if (strict)
3054 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3055 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3056 return 0;
3058 else
3060 if ((REGNO (rs1) >= 32
3061 && REGNO (rs1) != FRAME_POINTER_REGNUM
3062 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3063 || (rs2
3064 && (REGNO (rs2) >= 32
3065 && REGNO (rs2) != FRAME_POINTER_REGNUM
3066 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3067 return 0;
3069 return 1;
3072 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3074 static GTY(()) rtx sparc_tls_symbol;
3076 static rtx
3077 sparc_tls_get_addr (void)
3079 if (!sparc_tls_symbol)
3080 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3082 return sparc_tls_symbol;
3085 static rtx
3086 sparc_tls_got (void)
3088 rtx temp;
3089 if (flag_pic)
3091 crtl->uses_pic_offset_table = 1;
3092 return pic_offset_table_rtx;
3095 if (!global_offset_table)
3096 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3097 temp = gen_reg_rtx (Pmode);
3098 emit_move_insn (temp, global_offset_table);
3099 return temp;
3102 /* Return 1 if *X is a thread-local symbol. */
3104 static int
3105 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3107 return SPARC_SYMBOL_REF_TLS_P (*x);
3110 /* Return 1 if X contains a thread-local symbol. */
3112 bool
3113 sparc_tls_referenced_p (rtx x)
3115 if (!TARGET_HAVE_TLS)
3116 return false;
3118 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3121 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3122 this (thread-local) address. */
3125 legitimize_tls_address (rtx addr)
3127 rtx temp1, temp2, temp3, ret, o0, got, insn;
3129 gcc_assert (can_create_pseudo_p ());
3131 if (GET_CODE (addr) == SYMBOL_REF)
3132 switch (SYMBOL_REF_TLS_MODEL (addr))
3134 case TLS_MODEL_GLOBAL_DYNAMIC:
3135 start_sequence ();
3136 temp1 = gen_reg_rtx (SImode);
3137 temp2 = gen_reg_rtx (SImode);
3138 ret = gen_reg_rtx (Pmode);
3139 o0 = gen_rtx_REG (Pmode, 8);
3140 got = sparc_tls_got ();
3141 emit_insn (gen_tgd_hi22 (temp1, addr));
3142 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3143 if (TARGET_ARCH32)
3145 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3146 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3147 addr, const1_rtx));
3149 else
3151 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3152 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3153 addr, const1_rtx));
3155 CALL_INSN_FUNCTION_USAGE (insn)
3156 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3157 CALL_INSN_FUNCTION_USAGE (insn));
3158 insn = get_insns ();
3159 end_sequence ();
3160 emit_libcall_block (insn, ret, o0, addr);
3161 break;
3163 case TLS_MODEL_LOCAL_DYNAMIC:
3164 start_sequence ();
3165 temp1 = gen_reg_rtx (SImode);
3166 temp2 = gen_reg_rtx (SImode);
3167 temp3 = gen_reg_rtx (Pmode);
3168 ret = gen_reg_rtx (Pmode);
3169 o0 = gen_rtx_REG (Pmode, 8);
3170 got = sparc_tls_got ();
3171 emit_insn (gen_tldm_hi22 (temp1));
3172 emit_insn (gen_tldm_lo10 (temp2, temp1));
3173 if (TARGET_ARCH32)
3175 emit_insn (gen_tldm_add32 (o0, got, temp2));
3176 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3177 const1_rtx));
3179 else
3181 emit_insn (gen_tldm_add64 (o0, got, temp2));
3182 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3183 const1_rtx));
3185 CALL_INSN_FUNCTION_USAGE (insn)
3186 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3187 CALL_INSN_FUNCTION_USAGE (insn));
3188 insn = get_insns ();
3189 end_sequence ();
3190 emit_libcall_block (insn, temp3, o0,
3191 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3192 UNSPEC_TLSLD_BASE));
3193 temp1 = gen_reg_rtx (SImode);
3194 temp2 = gen_reg_rtx (SImode);
3195 emit_insn (gen_tldo_hix22 (temp1, addr));
3196 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3197 if (TARGET_ARCH32)
3198 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3199 else
3200 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3201 break;
3203 case TLS_MODEL_INITIAL_EXEC:
3204 temp1 = gen_reg_rtx (SImode);
3205 temp2 = gen_reg_rtx (SImode);
3206 temp3 = gen_reg_rtx (Pmode);
3207 got = sparc_tls_got ();
3208 emit_insn (gen_tie_hi22 (temp1, addr));
3209 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3210 if (TARGET_ARCH32)
3211 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3212 else
3213 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3214 if (TARGET_SUN_TLS)
3216 ret = gen_reg_rtx (Pmode);
3217 if (TARGET_ARCH32)
3218 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3219 temp3, addr));
3220 else
3221 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3222 temp3, addr));
3224 else
3225 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3226 break;
3228 case TLS_MODEL_LOCAL_EXEC:
3229 temp1 = gen_reg_rtx (Pmode);
3230 temp2 = gen_reg_rtx (Pmode);
3231 if (TARGET_ARCH32)
3233 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3234 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3236 else
3238 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3239 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3241 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3242 break;
3244 default:
3245 gcc_unreachable ();
3248 else
3249 gcc_unreachable (); /* for now ... */
3251 return ret;
3255 /* Legitimize PIC addresses. If the address is already position-independent,
3256 we return ORIG. Newly generated position-independent addresses go into a
3257 reg. This is REG if nonzero, otherwise we allocate register(s) as
3258 necessary. */
3261 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3262 rtx reg)
3264 if (GET_CODE (orig) == SYMBOL_REF
3265 /* See the comment in sparc_expand_move. */
3266 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3268 rtx pic_ref, address;
3269 rtx insn;
3271 if (reg == 0)
3273 gcc_assert (! reload_in_progress && ! reload_completed);
3274 reg = gen_reg_rtx (Pmode);
3277 if (flag_pic == 2)
3279 /* If not during reload, allocate another temp reg here for loading
3280 in the address, so that these instructions can be optimized
3281 properly. */
3282 rtx temp_reg = ((reload_in_progress || reload_completed)
3283 ? reg : gen_reg_rtx (Pmode));
3285 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3286 won't get confused into thinking that these two instructions
3287 are loading in the true address of the symbol. If in the
3288 future a PIC rtx exists, that should be used instead. */
3289 if (TARGET_ARCH64)
3291 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3292 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3294 else
3296 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3297 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3299 address = temp_reg;
3301 else
3302 address = orig;
3304 pic_ref = gen_const_mem (Pmode,
3305 gen_rtx_PLUS (Pmode,
3306 pic_offset_table_rtx, address));
3307 crtl->uses_pic_offset_table = 1;
3308 insn = emit_move_insn (reg, pic_ref);
3309 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3310 by loop. */
3311 set_unique_reg_note (insn, REG_EQUAL, orig);
3312 return reg;
3314 else if (GET_CODE (orig) == CONST)
3316 rtx base, offset;
3318 if (GET_CODE (XEXP (orig, 0)) == PLUS
3319 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3320 return orig;
3322 if (reg == 0)
3324 gcc_assert (! reload_in_progress && ! reload_completed);
3325 reg = gen_reg_rtx (Pmode);
3328 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3329 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3330 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3331 base == reg ? 0 : reg);
3333 if (GET_CODE (offset) == CONST_INT)
3335 if (SMALL_INT (offset))
3336 return plus_constant (base, INTVAL (offset));
3337 else if (! reload_in_progress && ! reload_completed)
3338 offset = force_reg (Pmode, offset);
3339 else
3340 /* If we reach here, then something is seriously wrong. */
3341 gcc_unreachable ();
3343 return gen_rtx_PLUS (Pmode, base, offset);
3345 else if (GET_CODE (orig) == LABEL_REF)
3346 /* ??? Why do we do this? */
3347 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3348 the register is live instead, in case it is eliminated. */
3349 crtl->uses_pic_offset_table = 1;
3351 return orig;
3354 /* Try machine-dependent ways of modifying an illegitimate address X
3355 to be legitimate. If we find one, return the new, valid address.
3357 OLDX is the address as it was before break_out_memory_refs was called.
3358 In some cases it is useful to look at this to decide what needs to be done.
3360 MODE is the mode of the operand pointed to by X. */
3363 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3365 rtx orig_x = x;
3367 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3368 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3369 force_operand (XEXP (x, 0), NULL_RTX));
3370 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3371 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3372 force_operand (XEXP (x, 1), NULL_RTX));
3373 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3374 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3375 XEXP (x, 1));
3376 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3377 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3378 force_operand (XEXP (x, 1), NULL_RTX));
3380 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3381 return x;
3383 if (SPARC_SYMBOL_REF_TLS_P (x))
3384 x = legitimize_tls_address (x);
3385 else if (flag_pic)
3386 x = legitimize_pic_address (x, mode, 0);
3387 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3388 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3389 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3390 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3391 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3392 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3393 else if (GET_CODE (x) == SYMBOL_REF
3394 || GET_CODE (x) == CONST
3395 || GET_CODE (x) == LABEL_REF)
3396 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3397 return x;
3400 /* Emit the special PIC helper function. */
3402 static void
3403 emit_pic_helper (void)
3405 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3406 int align;
3408 switch_to_section (text_section);
3410 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3411 if (align > 0)
3412 ASM_OUTPUT_ALIGN (asm_out_file, align);
3413 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3414 if (flag_delayed_branch)
3415 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3416 pic_name, pic_name);
3417 else
3418 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3419 pic_name, pic_name);
3421 pic_helper_emitted_p = true;
3424 /* Emit code to load the PIC register. */
3426 static void
3427 load_pic_register (bool delay_pic_helper)
3429 int orig_flag_pic = flag_pic;
3431 if (TARGET_VXWORKS_RTP)
3433 emit_insn (gen_vxworks_load_got ());
3434 emit_use (pic_offset_table_rtx);
3435 return;
3438 /* If we haven't initialized the special PIC symbols, do so now. */
3439 if (!pic_helper_symbol_name[0])
3441 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3442 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3443 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3446 /* If we haven't emitted the special PIC helper function, do so now unless
3447 we are requested to delay it. */
3448 if (!delay_pic_helper && !pic_helper_emitted_p)
3449 emit_pic_helper ();
3451 flag_pic = 0;
3452 if (TARGET_ARCH64)
3453 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3454 pic_helper_symbol));
3455 else
3456 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3457 pic_helper_symbol));
3458 flag_pic = orig_flag_pic;
3460 /* Need to emit this whether or not we obey regdecls,
3461 since setjmp/longjmp can cause life info to screw up.
3462 ??? In the case where we don't obey regdecls, this is not sufficient
3463 since we may not fall out the bottom. */
3464 emit_use (pic_offset_table_rtx);
3467 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3468 address of the call target. */
3470 void
3471 sparc_emit_call_insn (rtx pat, rtx addr)
3473 rtx insn;
3475 insn = emit_call_insn (pat);
3477 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3478 if (TARGET_VXWORKS_RTP
3479 && flag_pic
3480 && GET_CODE (addr) == SYMBOL_REF
3481 && (SYMBOL_REF_DECL (addr)
3482 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3483 : !SYMBOL_REF_LOCAL_P (addr)))
3485 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3486 crtl->uses_pic_offset_table = 1;
3490 /* Return 1 if RTX is a MEM which is known to be aligned to at
3491 least a DESIRED byte boundary. */
3494 mem_min_alignment (rtx mem, int desired)
3496 rtx addr, base, offset;
3498 /* If it's not a MEM we can't accept it. */
3499 if (GET_CODE (mem) != MEM)
3500 return 0;
3502 /* Obviously... */
3503 if (!TARGET_UNALIGNED_DOUBLES
3504 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3505 return 1;
3507 /* ??? The rest of the function predates MEM_ALIGN so
3508 there is probably a bit of redundancy. */
3509 addr = XEXP (mem, 0);
3510 base = offset = NULL_RTX;
3511 if (GET_CODE (addr) == PLUS)
3513 if (GET_CODE (XEXP (addr, 0)) == REG)
3515 base = XEXP (addr, 0);
3517 /* What we are saying here is that if the base
3518 REG is aligned properly, the compiler will make
3519 sure any REG based index upon it will be so
3520 as well. */
3521 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3522 offset = XEXP (addr, 1);
3523 else
3524 offset = const0_rtx;
3527 else if (GET_CODE (addr) == REG)
3529 base = addr;
3530 offset = const0_rtx;
3533 if (base != NULL_RTX)
3535 int regno = REGNO (base);
3537 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3539 /* Check if the compiler has recorded some information
3540 about the alignment of the base REG. If reload has
3541 completed, we already matched with proper alignments.
3542 If not running global_alloc, reload might give us
3543 unaligned pointer to local stack though. */
3544 if (((cfun != 0
3545 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3546 || (optimize && reload_completed))
3547 && (INTVAL (offset) & (desired - 1)) == 0)
3548 return 1;
3550 else
3552 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3553 return 1;
3556 else if (! TARGET_UNALIGNED_DOUBLES
3557 || CONSTANT_P (addr)
3558 || GET_CODE (addr) == LO_SUM)
3560 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3561 is true, in which case we can only assume that an access is aligned if
3562 it is to a constant address, or the address involves a LO_SUM. */
3563 return 1;
3566 /* An obviously unaligned address. */
3567 return 0;
3571 /* Vectors to keep interesting information about registers where it can easily
3572 be got. We used to use the actual mode value as the bit number, but there
3573 are more than 32 modes now. Instead we use two tables: one indexed by
3574 hard register number, and one indexed by mode. */
3576 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3577 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3578 mapped into one sparc_mode_class mode. */
3580 enum sparc_mode_class {
3581 S_MODE, D_MODE, T_MODE, O_MODE,
3582 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3583 CC_MODE, CCFP_MODE
3586 /* Modes for single-word and smaller quantities. */
3587 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3589 /* Modes for double-word and smaller quantities. */
3590 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3592 /* Modes for quad-word and smaller quantities. */
3593 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3595 /* Modes for 8-word and smaller quantities. */
3596 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3598 /* Modes for single-float quantities. We must allow any single word or
3599 smaller quantity. This is because the fix/float conversion instructions
3600 take integer inputs/outputs from the float registers. */
3601 #define SF_MODES (S_MODES)
3603 /* Modes for double-float and smaller quantities. */
3604 #define DF_MODES (S_MODES | D_MODES)
3606 /* Modes for double-float only quantities. */
3607 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3609 /* Modes for quad-float only quantities. */
3610 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3612 /* Modes for quad-float and smaller quantities. */
3613 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3615 /* Modes for quad-float and double-float quantities. */
3616 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3618 /* Modes for quad-float pair only quantities. */
3619 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3621 /* Modes for quad-float pairs and smaller quantities. */
3622 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3624 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3626 /* Modes for condition codes. */
3627 #define CC_MODES (1 << (int) CC_MODE)
3628 #define CCFP_MODES (1 << (int) CCFP_MODE)
3630 /* Value is 1 if register/mode pair is acceptable on sparc.
3631 The funny mixture of D and T modes is because integer operations
3632 do not specially operate on tetra quantities, so non-quad-aligned
3633 registers can hold quadword quantities (except %o4 and %i4 because
3634 they cross fixed registers). */
3636 /* This points to either the 32 bit or the 64 bit version. */
3637 const int *hard_regno_mode_classes;
3639 static const int hard_32bit_mode_classes[] = {
3640 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3641 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3642 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3643 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3645 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3646 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3647 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3648 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3650 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3651 and none can hold SFmode/SImode values. */
3652 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3653 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3654 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3655 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3657 /* %fcc[0123] */
3658 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3660 /* %icc */
3661 CC_MODES
3664 static const int hard_64bit_mode_classes[] = {
3665 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3666 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3667 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3668 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3670 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3671 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3672 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3673 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3675 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3676 and none can hold SFmode/SImode values. */
3677 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3678 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3679 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3680 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3682 /* %fcc[0123] */
3683 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3685 /* %icc */
3686 CC_MODES
3689 int sparc_mode_class [NUM_MACHINE_MODES];
3691 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3693 static void
3694 sparc_init_modes (void)
3696 int i;
3698 for (i = 0; i < NUM_MACHINE_MODES; i++)
3700 switch (GET_MODE_CLASS (i))
3702 case MODE_INT:
3703 case MODE_PARTIAL_INT:
3704 case MODE_COMPLEX_INT:
3705 if (GET_MODE_SIZE (i) <= 4)
3706 sparc_mode_class[i] = 1 << (int) S_MODE;
3707 else if (GET_MODE_SIZE (i) == 8)
3708 sparc_mode_class[i] = 1 << (int) D_MODE;
3709 else if (GET_MODE_SIZE (i) == 16)
3710 sparc_mode_class[i] = 1 << (int) T_MODE;
3711 else if (GET_MODE_SIZE (i) == 32)
3712 sparc_mode_class[i] = 1 << (int) O_MODE;
3713 else
3714 sparc_mode_class[i] = 0;
3715 break;
3716 case MODE_VECTOR_INT:
3717 if (GET_MODE_SIZE (i) <= 4)
3718 sparc_mode_class[i] = 1 << (int)SF_MODE;
3719 else if (GET_MODE_SIZE (i) == 8)
3720 sparc_mode_class[i] = 1 << (int)DF_MODE;
3721 break;
3722 case MODE_FLOAT:
3723 case MODE_COMPLEX_FLOAT:
3724 if (GET_MODE_SIZE (i) <= 4)
3725 sparc_mode_class[i] = 1 << (int) SF_MODE;
3726 else if (GET_MODE_SIZE (i) == 8)
3727 sparc_mode_class[i] = 1 << (int) DF_MODE;
3728 else if (GET_MODE_SIZE (i) == 16)
3729 sparc_mode_class[i] = 1 << (int) TF_MODE;
3730 else if (GET_MODE_SIZE (i) == 32)
3731 sparc_mode_class[i] = 1 << (int) OF_MODE;
3732 else
3733 sparc_mode_class[i] = 0;
3734 break;
3735 case MODE_CC:
3736 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3737 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3738 else
3739 sparc_mode_class[i] = 1 << (int) CC_MODE;
3740 break;
3741 default:
3742 sparc_mode_class[i] = 0;
3743 break;
3747 if (TARGET_ARCH64)
3748 hard_regno_mode_classes = hard_64bit_mode_classes;
3749 else
3750 hard_regno_mode_classes = hard_32bit_mode_classes;
3752 /* Initialize the array used by REGNO_REG_CLASS. */
3753 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3755 if (i < 16 && TARGET_V8PLUS)
3756 sparc_regno_reg_class[i] = I64_REGS;
3757 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3758 sparc_regno_reg_class[i] = GENERAL_REGS;
3759 else if (i < 64)
3760 sparc_regno_reg_class[i] = FP_REGS;
3761 else if (i < 96)
3762 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3763 else if (i < 100)
3764 sparc_regno_reg_class[i] = FPCC_REGS;
3765 else
3766 sparc_regno_reg_class[i] = NO_REGS;
3770 /* Compute the frame size required by the function. This function is called
3771 during the reload pass and also by sparc_expand_prologue. */
3773 HOST_WIDE_INT
3774 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3776 int outgoing_args_size = (crtl->outgoing_args_size
3777 + REG_PARM_STACK_SPACE (current_function_decl));
3778 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3779 int i;
3781 if (TARGET_ARCH64)
3783 for (i = 0; i < 8; i++)
3784 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3785 n_regs += 2;
3787 else
3789 for (i = 0; i < 8; i += 2)
3790 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3791 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3792 n_regs += 2;
3795 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3796 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3797 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3798 n_regs += 2;
3800 /* Set up values for use in prologue and epilogue. */
3801 num_gfregs = n_regs;
3803 if (leaf_function_p
3804 && n_regs == 0
3805 && size == 0
3806 && crtl->outgoing_args_size == 0)
3807 actual_fsize = apparent_fsize = 0;
3808 else
3810 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3811 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3812 apparent_fsize += n_regs * 4;
3813 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3816 /* Make sure nothing can clobber our register windows.
3817 If a SAVE must be done, or there is a stack-local variable,
3818 the register window area must be allocated. */
3819 if (! leaf_function_p || size > 0)
3820 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3822 return SPARC_STACK_ALIGN (actual_fsize);
3825 /* Output any necessary .register pseudo-ops. */
3827 void
3828 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3830 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3831 int i;
3833 if (TARGET_ARCH32)
3834 return;
3836 /* Check if %g[2367] were used without
3837 .register being printed for them already. */
3838 for (i = 2; i < 8; i++)
3840 if (df_regs_ever_live_p (i)
3841 && ! sparc_hard_reg_printed [i])
3843 sparc_hard_reg_printed [i] = 1;
3844 /* %g7 is used as TLS base register, use #ignore
3845 for it instead of #scratch. */
3846 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3847 i == 7 ? "ignore" : "scratch");
3849 if (i == 3) i = 5;
3851 #endif
3854 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3855 as needed. LOW should be double-word aligned for 32-bit registers.
3856 Return the new OFFSET. */
3858 #define SORR_SAVE 0
3859 #define SORR_RESTORE 1
3861 static int
3862 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3864 rtx mem, insn;
3865 int i;
3867 if (TARGET_ARCH64 && high <= 32)
3869 for (i = low; i < high; i++)
3871 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3873 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3874 set_mem_alias_set (mem, sparc_sr_alias_set);
3875 if (action == SORR_SAVE)
3877 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3878 RTX_FRAME_RELATED_P (insn) = 1;
3880 else /* action == SORR_RESTORE */
3881 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3882 offset += 8;
3886 else
3888 for (i = low; i < high; i += 2)
3890 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
3891 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
3892 enum machine_mode mode;
3893 int regno;
3895 if (reg0 && reg1)
3897 mode = i < 32 ? DImode : DFmode;
3898 regno = i;
3900 else if (reg0)
3902 mode = i < 32 ? SImode : SFmode;
3903 regno = i;
3905 else if (reg1)
3907 mode = i < 32 ? SImode : SFmode;
3908 regno = i + 1;
3909 offset += 4;
3911 else
3912 continue;
3914 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3915 set_mem_alias_set (mem, sparc_sr_alias_set);
3916 if (action == SORR_SAVE)
3918 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3919 RTX_FRAME_RELATED_P (insn) = 1;
3921 else /* action == SORR_RESTORE */
3922 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3924 /* Always preserve double-word alignment. */
3925 offset = (offset + 7) & -8;
3929 return offset;
3932 /* Emit code to save call-saved registers. */
3934 static void
3935 emit_save_or_restore_regs (int action)
3937 HOST_WIDE_INT offset;
3938 rtx base;
3940 offset = frame_base_offset - apparent_fsize;
3942 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3944 /* ??? This might be optimized a little as %g1 might already have a
3945 value close enough that a single add insn will do. */
3946 /* ??? Although, all of this is probably only a temporary fix
3947 because if %g1 can hold a function result, then
3948 sparc_expand_epilogue will lose (the result will be
3949 clobbered). */
3950 base = gen_rtx_REG (Pmode, 1);
3951 emit_move_insn (base, GEN_INT (offset));
3952 emit_insn (gen_rtx_SET (VOIDmode,
3953 base,
3954 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3955 offset = 0;
3957 else
3958 base = frame_base_reg;
3960 offset = save_or_restore_regs (0, 8, base, offset, action);
3961 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3964 /* Generate a save_register_window insn. */
3966 static rtx
3967 gen_save_register_window (rtx increment)
3969 if (TARGET_ARCH64)
3970 return gen_save_register_windowdi (increment);
3971 else
3972 return gen_save_register_windowsi (increment);
3975 /* Generate an increment for the stack pointer. */
3977 static rtx
3978 gen_stack_pointer_inc (rtx increment)
3980 return gen_rtx_SET (VOIDmode,
3981 stack_pointer_rtx,
3982 gen_rtx_PLUS (Pmode,
3983 stack_pointer_rtx,
3984 increment));
3987 /* Generate a decrement for the stack pointer. */
3989 static rtx
3990 gen_stack_pointer_dec (rtx decrement)
3992 return gen_rtx_SET (VOIDmode,
3993 stack_pointer_rtx,
3994 gen_rtx_MINUS (Pmode,
3995 stack_pointer_rtx,
3996 decrement));
3999 /* Expand the function prologue. The prologue is responsible for reserving
4000 storage for the frame, saving the call-saved registers and loading the
4001 PIC register if needed. */
4003 void
4004 sparc_expand_prologue (void)
4006 rtx insn;
4007 int i;
4009 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4010 on the final value of the flag means deferring the prologue/epilogue
4011 expansion until just before the second scheduling pass, which is too
4012 late to emit multiple epilogues or return insns.
4014 Of course we are making the assumption that the value of the flag
4015 will not change between now and its final value. Of the three parts
4016 of the formula, only the last one can reasonably vary. Let's take a
4017 closer look, after assuming that the first two ones are set to true
4018 (otherwise the last value is effectively silenced).
4020 If only_leaf_regs_used returns false, the global predicate will also
4021 be false so the actual frame size calculated below will be positive.
4022 As a consequence, the save_register_window insn will be emitted in
4023 the instruction stream; now this insn explicitly references %fp
4024 which is not a leaf register so only_leaf_regs_used will always
4025 return false subsequently.
4027 If only_leaf_regs_used returns true, we hope that the subsequent
4028 optimization passes won't cause non-leaf registers to pop up. For
4029 example, the regrename pass has special provisions to not rename to
4030 non-leaf registers in a leaf function. */
4031 sparc_leaf_function_p
4032 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4034 /* Need to use actual_fsize, since we are also allocating
4035 space for our callee (and our own register save area). */
4036 actual_fsize
4037 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4039 /* Advertise that the data calculated just above are now valid. */
4040 sparc_prologue_data_valid_p = true;
4042 if (sparc_leaf_function_p)
4044 frame_base_reg = stack_pointer_rtx;
4045 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4047 else
4049 frame_base_reg = hard_frame_pointer_rtx;
4050 frame_base_offset = SPARC_STACK_BIAS;
4053 if (actual_fsize == 0)
4054 /* do nothing. */ ;
4055 else if (sparc_leaf_function_p)
4057 if (actual_fsize <= 4096)
4058 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4059 else if (actual_fsize <= 8192)
4061 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4062 /* %sp is still the CFA register. */
4063 RTX_FRAME_RELATED_P (insn) = 1;
4064 insn
4065 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4067 else
4069 rtx reg = gen_rtx_REG (Pmode, 1);
4070 emit_move_insn (reg, GEN_INT (-actual_fsize));
4071 insn = emit_insn (gen_stack_pointer_inc (reg));
4072 REG_NOTES (insn) =
4073 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4074 gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
4075 REG_NOTES (insn));
4078 RTX_FRAME_RELATED_P (insn) = 1;
4080 else
4082 if (actual_fsize <= 4096)
4083 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4084 else if (actual_fsize <= 8192)
4086 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4087 /* %sp is not the CFA register anymore. */
4088 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4090 else
4092 rtx reg = gen_rtx_REG (Pmode, 1);
4093 emit_move_insn (reg, GEN_INT (-actual_fsize));
4094 insn = emit_insn (gen_save_register_window (reg));
4097 RTX_FRAME_RELATED_P (insn) = 1;
4098 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4099 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4102 if (num_gfregs)
4103 emit_save_or_restore_regs (SORR_SAVE);
4105 /* Load the PIC register if needed. */
4106 if (flag_pic && crtl->uses_pic_offset_table)
4107 load_pic_register (false);
4110 /* This function generates the assembly code for function entry, which boils
4111 down to emitting the necessary .register directives. */
4113 static void
4114 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4116 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4117 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4119 sparc_output_scratch_registers (file);
4122 /* Expand the function epilogue, either normal or part of a sibcall.
4123 We emit all the instructions except the return or the call. */
4125 void
4126 sparc_expand_epilogue (void)
4128 if (num_gfregs)
4129 emit_save_or_restore_regs (SORR_RESTORE);
4131 if (actual_fsize == 0)
4132 /* do nothing. */ ;
4133 else if (sparc_leaf_function_p)
4135 if (actual_fsize <= 4096)
4136 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4137 else if (actual_fsize <= 8192)
4139 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4140 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4142 else
4144 rtx reg = gen_rtx_REG (Pmode, 1);
4145 emit_move_insn (reg, GEN_INT (-actual_fsize));
4146 emit_insn (gen_stack_pointer_dec (reg));
4151 /* Return true if it is appropriate to emit `return' instructions in the
4152 body of a function. */
4154 bool
4155 sparc_can_use_return_insn_p (void)
4157 return sparc_prologue_data_valid_p
4158 && (actual_fsize == 0 || !sparc_leaf_function_p);
4161 /* This function generates the assembly code for function exit. */
4163 static void
4164 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4166 /* If code does not drop into the epilogue, we have to still output
4167 a dummy nop for the sake of sane backtraces. Otherwise, if the
4168 last two instructions of a function were "call foo; dslot;" this
4169 can make the return PC of foo (i.e. address of call instruction
4170 plus 8) point to the first instruction in the next function. */
4172 rtx insn, last_real_insn;
4174 insn = get_last_insn ();
4176 last_real_insn = prev_real_insn (insn);
4177 if (last_real_insn
4178 && GET_CODE (last_real_insn) == INSN
4179 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4180 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4182 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4183 fputs("\tnop\n", file);
4185 sparc_output_deferred_case_vectors ();
4188 /* Output a 'restore' instruction. */
4190 static void
4191 output_restore (rtx pat)
4193 rtx operands[3];
4195 if (! pat)
4197 fputs ("\t restore\n", asm_out_file);
4198 return;
4201 gcc_assert (GET_CODE (pat) == SET);
4203 operands[0] = SET_DEST (pat);
4204 pat = SET_SRC (pat);
4206 switch (GET_CODE (pat))
4208 case PLUS:
4209 operands[1] = XEXP (pat, 0);
4210 operands[2] = XEXP (pat, 1);
4211 output_asm_insn (" restore %r1, %2, %Y0", operands);
4212 break;
4213 case LO_SUM:
4214 operands[1] = XEXP (pat, 0);
4215 operands[2] = XEXP (pat, 1);
4216 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4217 break;
4218 case ASHIFT:
4219 operands[1] = XEXP (pat, 0);
4220 gcc_assert (XEXP (pat, 1) == const1_rtx);
4221 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4222 break;
4223 default:
4224 operands[1] = pat;
4225 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4226 break;
4230 /* Output a return. */
4232 const char *
4233 output_return (rtx insn)
4235 if (sparc_leaf_function_p)
4237 /* This is a leaf function so we don't have to bother restoring the
4238 register window, which frees us from dealing with the convoluted
4239 semantics of restore/return. We simply output the jump to the
4240 return address and the insn in the delay slot (if any). */
4242 gcc_assert (! crtl->calls_eh_return);
4244 return "jmp\t%%o7+%)%#";
4246 else
4248 /* This is a regular function so we have to restore the register window.
4249 We may have a pending insn for the delay slot, which will be either
4250 combined with the 'restore' instruction or put in the delay slot of
4251 the 'return' instruction. */
4253 if (crtl->calls_eh_return)
4255 /* If the function uses __builtin_eh_return, the eh_return
4256 machinery occupies the delay slot. */
4257 gcc_assert (! final_sequence);
4259 if (! flag_delayed_branch)
4260 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4262 if (TARGET_V9)
4263 fputs ("\treturn\t%i7+8\n", asm_out_file);
4264 else
4265 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4267 if (flag_delayed_branch)
4268 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4269 else
4270 fputs ("\t nop\n", asm_out_file);
4272 else if (final_sequence)
4274 rtx delay, pat;
4276 delay = NEXT_INSN (insn);
4277 gcc_assert (delay);
4279 pat = PATTERN (delay);
4281 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4283 epilogue_renumber (&pat, 0);
4284 return "return\t%%i7+%)%#";
4286 else
4288 output_asm_insn ("jmp\t%%i7+%)", NULL);
4289 output_restore (pat);
4290 PATTERN (delay) = gen_blockage ();
4291 INSN_CODE (delay) = -1;
4294 else
4296 /* The delay slot is empty. */
4297 if (TARGET_V9)
4298 return "return\t%%i7+%)\n\t nop";
4299 else if (flag_delayed_branch)
4300 return "jmp\t%%i7+%)\n\t restore";
4301 else
4302 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4306 return "";
4309 /* Output a sibling call. */
4311 const char *
4312 output_sibcall (rtx insn, rtx call_operand)
4314 rtx operands[1];
4316 gcc_assert (flag_delayed_branch);
4318 operands[0] = call_operand;
4320 if (sparc_leaf_function_p)
4322 /* This is a leaf function so we don't have to bother restoring the
4323 register window. We simply output the jump to the function and
4324 the insn in the delay slot (if any). */
4326 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4328 if (final_sequence)
4329 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4330 operands);
4331 else
4332 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4333 it into branch if possible. */
4334 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4335 operands);
4337 else
4339 /* This is a regular function so we have to restore the register window.
4340 We may have a pending insn for the delay slot, which will be combined
4341 with the 'restore' instruction. */
4343 output_asm_insn ("call\t%a0, 0", operands);
4345 if (final_sequence)
4347 rtx delay = NEXT_INSN (insn);
4348 gcc_assert (delay);
4350 output_restore (PATTERN (delay));
4352 PATTERN (delay) = gen_blockage ();
4353 INSN_CODE (delay) = -1;
4355 else
4356 output_restore (NULL_RTX);
4359 return "";
4362 /* Functions for handling argument passing.
4364 For 32-bit, the first 6 args are normally in registers and the rest are
4365 pushed. Any arg that starts within the first 6 words is at least
4366 partially passed in a register unless its data type forbids.
4368 For 64-bit, the argument registers are laid out as an array of 16 elements
4369 and arguments are added sequentially. The first 6 int args and up to the
4370 first 16 fp args (depending on size) are passed in regs.
4372 Slot Stack Integral Float Float in structure Double Long Double
4373 ---- ----- -------- ----- ------------------ ------ -----------
4374 15 [SP+248] %f31 %f30,%f31 %d30
4375 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4376 13 [SP+232] %f27 %f26,%f27 %d26
4377 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4378 11 [SP+216] %f23 %f22,%f23 %d22
4379 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4380 9 [SP+200] %f19 %f18,%f19 %d18
4381 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4382 7 [SP+184] %f15 %f14,%f15 %d14
4383 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4384 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4385 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4386 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4387 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4388 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4389 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4391 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4393 Integral arguments are always passed as 64-bit quantities appropriately
4394 extended.
4396 Passing of floating point values is handled as follows.
4397 If a prototype is in scope:
4398 If the value is in a named argument (i.e. not a stdarg function or a
4399 value not part of the `...') then the value is passed in the appropriate
4400 fp reg.
4401 If the value is part of the `...' and is passed in one of the first 6
4402 slots then the value is passed in the appropriate int reg.
4403 If the value is part of the `...' and is not passed in one of the first 6
4404 slots then the value is passed in memory.
4405 If a prototype is not in scope:
4406 If the value is one of the first 6 arguments the value is passed in the
4407 appropriate integer reg and the appropriate fp reg.
4408 If the value is not one of the first 6 arguments the value is passed in
4409 the appropriate fp reg and in memory.
4412 Summary of the calling conventions implemented by GCC on SPARC:
4414 32-bit ABI:
4415 size argument return value
4417 small integer <4 int. reg. int. reg.
4418 word 4 int. reg. int. reg.
4419 double word 8 int. reg. int. reg.
4421 _Complex small integer <8 int. reg. int. reg.
4422 _Complex word 8 int. reg. int. reg.
4423 _Complex double word 16 memory int. reg.
4425 vector integer <=8 int. reg. FP reg.
4426 vector integer >8 memory memory
4428 float 4 int. reg. FP reg.
4429 double 8 int. reg. FP reg.
4430 long double 16 memory memory
4432 _Complex float 8 memory FP reg.
4433 _Complex double 16 memory FP reg.
4434 _Complex long double 32 memory FP reg.
4436 vector float any memory memory
4438 aggregate any memory memory
4442 64-bit ABI:
4443 size argument return value
4445 small integer <8 int. reg. int. reg.
4446 word 8 int. reg. int. reg.
4447 double word 16 int. reg. int. reg.
4449 _Complex small integer <16 int. reg. int. reg.
4450 _Complex word 16 int. reg. int. reg.
4451 _Complex double word 32 memory int. reg.
4453 vector integer <=16 FP reg. FP reg.
4454 vector integer 16<s<=32 memory FP reg.
4455 vector integer >32 memory memory
4457 float 4 FP reg. FP reg.
4458 double 8 FP reg. FP reg.
4459 long double 16 FP reg. FP reg.
4461 _Complex float 8 FP reg. FP reg.
4462 _Complex double 16 FP reg. FP reg.
4463 _Complex long double 32 memory FP reg.
4465 vector float <=16 FP reg. FP reg.
4466 vector float 16<s<=32 memory FP reg.
4467 vector float >32 memory memory
4469 aggregate <=16 reg. reg.
4470 aggregate 16<s<=32 memory reg.
4471 aggregate >32 memory memory
4475 Note #1: complex floating-point types follow the extended SPARC ABIs as
4476 implemented by the Sun compiler.
4478 Note #2: integral vector types follow the scalar floating-point types
4479 conventions to match what is implemented by the Sun VIS SDK.
4481 Note #3: floating-point vector types follow the aggregate types
4482 conventions. */
4485 /* Maximum number of int regs for args. */
4486 #define SPARC_INT_ARG_MAX 6
4487 /* Maximum number of fp regs for args. */
4488 #define SPARC_FP_ARG_MAX 16
4490 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4492 /* Handle the INIT_CUMULATIVE_ARGS macro.
4493 Initialize a variable CUM of type CUMULATIVE_ARGS
4494 for a call to a function whose data type is FNTYPE.
4495 For a library call, FNTYPE is 0. */
4497 void
4498 init_cumulative_args (struct sparc_args *cum, tree fntype,
4499 rtx libname ATTRIBUTE_UNUSED,
4500 tree fndecl ATTRIBUTE_UNUSED)
4502 cum->words = 0;
4503 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4504 cum->libcall_p = fntype == 0;
4507 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4508 When a prototype says `char' or `short', really pass an `int'. */
4510 static bool
4511 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4513 return TARGET_ARCH32 ? true : false;
4516 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4518 static bool
4519 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4521 return TARGET_ARCH64 ? true : false;
4524 /* Scan the record type TYPE and return the following predicates:
4525 - INTREGS_P: the record contains at least one field or sub-field
4526 that is eligible for promotion in integer registers.
4527 - FP_REGS_P: the record contains at least one field or sub-field
4528 that is eligible for promotion in floating-point registers.
4529 - PACKED_P: the record contains at least one field that is packed.
4531 Sub-fields are not taken into account for the PACKED_P predicate. */
4533 static void
4534 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4536 tree field;
4538 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4540 if (TREE_CODE (field) == FIELD_DECL)
4542 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4543 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4544 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4545 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4546 && TARGET_FPU)
4547 *fpregs_p = 1;
4548 else
4549 *intregs_p = 1;
4551 if (packed_p && DECL_PACKED (field))
4552 *packed_p = 1;
4557 /* Compute the slot number to pass an argument in.
4558 Return the slot number or -1 if passing on the stack.
4560 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4561 the preceding args and about the function being called.
4562 MODE is the argument's machine mode.
4563 TYPE is the data type of the argument (as a tree).
4564 This is null for libcalls where that information may
4565 not be available.
4566 NAMED is nonzero if this argument is a named parameter
4567 (otherwise it is an extra parameter matching an ellipsis).
4568 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4569 *PREGNO records the register number to use if scalar type.
4570 *PPADDING records the amount of padding needed in words. */
4572 static int
4573 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4574 tree type, int named, int incoming_p,
4575 int *pregno, int *ppadding)
4577 int regbase = (incoming_p
4578 ? SPARC_INCOMING_INT_ARG_FIRST
4579 : SPARC_OUTGOING_INT_ARG_FIRST);
4580 int slotno = cum->words;
4581 enum mode_class mclass;
4582 int regno;
4584 *ppadding = 0;
4586 if (type && TREE_ADDRESSABLE (type))
4587 return -1;
4589 if (TARGET_ARCH32
4590 && mode == BLKmode
4591 && type
4592 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4593 return -1;
4595 /* For SPARC64, objects requiring 16-byte alignment get it. */
4596 if (TARGET_ARCH64
4597 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4598 && (slotno & 1) != 0)
4599 slotno++, *ppadding = 1;
4601 mclass = GET_MODE_CLASS (mode);
4602 if (type && TREE_CODE (type) == VECTOR_TYPE)
4604 /* Vector types deserve special treatment because they are
4605 polymorphic wrt their mode, depending upon whether VIS
4606 instructions are enabled. */
4607 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4609 /* The SPARC port defines no floating-point vector modes. */
4610 gcc_assert (mode == BLKmode);
4612 else
4614 /* Integral vector types should either have a vector
4615 mode or an integral mode, because we are guaranteed
4616 by pass_by_reference that their size is not greater
4617 than 16 bytes and TImode is 16-byte wide. */
4618 gcc_assert (mode != BLKmode);
4620 /* Vector integers are handled like floats according to
4621 the Sun VIS SDK. */
4622 mclass = MODE_FLOAT;
4626 switch (mclass)
4628 case MODE_FLOAT:
4629 case MODE_COMPLEX_FLOAT:
4630 case MODE_VECTOR_INT:
4631 if (TARGET_ARCH64 && TARGET_FPU && named)
4633 if (slotno >= SPARC_FP_ARG_MAX)
4634 return -1;
4635 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4636 /* Arguments filling only one single FP register are
4637 right-justified in the outer double FP register. */
4638 if (GET_MODE_SIZE (mode) <= 4)
4639 regno++;
4640 break;
4642 /* fallthrough */
4644 case MODE_INT:
4645 case MODE_COMPLEX_INT:
4646 if (slotno >= SPARC_INT_ARG_MAX)
4647 return -1;
4648 regno = regbase + slotno;
4649 break;
4651 case MODE_RANDOM:
4652 if (mode == VOIDmode)
4653 /* MODE is VOIDmode when generating the actual call. */
4654 return -1;
4656 gcc_assert (mode == BLKmode);
4658 if (TARGET_ARCH32
4659 || !type
4660 || (TREE_CODE (type) != VECTOR_TYPE
4661 && TREE_CODE (type) != RECORD_TYPE))
4663 if (slotno >= SPARC_INT_ARG_MAX)
4664 return -1;
4665 regno = regbase + slotno;
4667 else /* TARGET_ARCH64 && type */
4669 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4671 /* First see what kinds of registers we would need. */
4672 if (TREE_CODE (type) == VECTOR_TYPE)
4673 fpregs_p = 1;
4674 else
4675 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4677 /* The ABI obviously doesn't specify how packed structures
4678 are passed. These are defined to be passed in int regs
4679 if possible, otherwise memory. */
4680 if (packed_p || !named)
4681 fpregs_p = 0, intregs_p = 1;
4683 /* If all arg slots are filled, then must pass on stack. */
4684 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4685 return -1;
4687 /* If there are only int args and all int arg slots are filled,
4688 then must pass on stack. */
4689 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4690 return -1;
4692 /* Note that even if all int arg slots are filled, fp members may
4693 still be passed in regs if such regs are available.
4694 *PREGNO isn't set because there may be more than one, it's up
4695 to the caller to compute them. */
4696 return slotno;
4698 break;
4700 default :
4701 gcc_unreachable ();
4704 *pregno = regno;
4705 return slotno;
4708 /* Handle recursive register counting for structure field layout. */
4710 struct function_arg_record_value_parms
4712 rtx ret; /* return expression being built. */
4713 int slotno; /* slot number of the argument. */
4714 int named; /* whether the argument is named. */
4715 int regbase; /* regno of the base register. */
4716 int stack; /* 1 if part of the argument is on the stack. */
4717 int intoffset; /* offset of the first pending integer field. */
4718 unsigned int nregs; /* number of words passed in registers. */
4721 static void function_arg_record_value_3
4722 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4723 static void function_arg_record_value_2
4724 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4725 static void function_arg_record_value_1
4726 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4727 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4728 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4730 /* A subroutine of function_arg_record_value. Traverse the structure
4731 recursively and determine how many registers will be required. */
4733 static void
4734 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4735 struct function_arg_record_value_parms *parms,
4736 bool packed_p)
4738 tree field;
4740 /* We need to compute how many registers are needed so we can
4741 allocate the PARALLEL but before we can do that we need to know
4742 whether there are any packed fields. The ABI obviously doesn't
4743 specify how structures are passed in this case, so they are
4744 defined to be passed in int regs if possible, otherwise memory,
4745 regardless of whether there are fp values present. */
4747 if (! packed_p)
4748 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4750 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4752 packed_p = true;
4753 break;
4757 /* Compute how many registers we need. */
4758 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4760 if (TREE_CODE (field) == FIELD_DECL)
4762 HOST_WIDE_INT bitpos = startbitpos;
4764 if (DECL_SIZE (field) != 0)
4766 if (integer_zerop (DECL_SIZE (field)))
4767 continue;
4769 if (host_integerp (bit_position (field), 1))
4770 bitpos += int_bit_position (field);
4773 /* ??? FIXME: else assume zero offset. */
4775 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4776 function_arg_record_value_1 (TREE_TYPE (field),
4777 bitpos,
4778 parms,
4779 packed_p);
4780 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4781 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4782 && TARGET_FPU
4783 && parms->named
4784 && ! packed_p)
4786 if (parms->intoffset != -1)
4788 unsigned int startbit, endbit;
4789 int intslots, this_slotno;
4791 startbit = parms->intoffset & -BITS_PER_WORD;
4792 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4794 intslots = (endbit - startbit) / BITS_PER_WORD;
4795 this_slotno = parms->slotno + parms->intoffset
4796 / BITS_PER_WORD;
4798 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4800 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4801 /* We need to pass this field on the stack. */
4802 parms->stack = 1;
4805 parms->nregs += intslots;
4806 parms->intoffset = -1;
4809 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4810 If it wasn't true we wouldn't be here. */
4811 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4812 && DECL_MODE (field) == BLKmode)
4813 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4814 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4815 parms->nregs += 2;
4816 else
4817 parms->nregs += 1;
4819 else
4821 if (parms->intoffset == -1)
4822 parms->intoffset = bitpos;
4828 /* A subroutine of function_arg_record_value. Assign the bits of the
4829 structure between parms->intoffset and bitpos to integer registers. */
4831 static void
4832 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4833 struct function_arg_record_value_parms *parms)
4835 enum machine_mode mode;
4836 unsigned int regno;
4837 unsigned int startbit, endbit;
4838 int this_slotno, intslots, intoffset;
4839 rtx reg;
4841 if (parms->intoffset == -1)
4842 return;
4844 intoffset = parms->intoffset;
4845 parms->intoffset = -1;
4847 startbit = intoffset & -BITS_PER_WORD;
4848 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4849 intslots = (endbit - startbit) / BITS_PER_WORD;
4850 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4852 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4853 if (intslots <= 0)
4854 return;
4856 /* If this is the trailing part of a word, only load that much into
4857 the register. Otherwise load the whole register. Note that in
4858 the latter case we may pick up unwanted bits. It's not a problem
4859 at the moment but may wish to revisit. */
4861 if (intoffset % BITS_PER_WORD != 0)
4862 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4863 MODE_INT);
4864 else
4865 mode = word_mode;
4867 intoffset /= BITS_PER_UNIT;
4870 regno = parms->regbase + this_slotno;
4871 reg = gen_rtx_REG (mode, regno);
4872 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4873 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4875 this_slotno += 1;
4876 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4877 mode = word_mode;
4878 parms->nregs += 1;
4879 intslots -= 1;
4881 while (intslots > 0);
4884 /* A subroutine of function_arg_record_value. Traverse the structure
4885 recursively and assign bits to floating point registers. Track which
4886 bits in between need integer registers; invoke function_arg_record_value_3
4887 to make that happen. */
4889 static void
4890 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
4891 struct function_arg_record_value_parms *parms,
4892 bool packed_p)
4894 tree field;
4896 if (! packed_p)
4897 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4899 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4901 packed_p = true;
4902 break;
4906 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4908 if (TREE_CODE (field) == FIELD_DECL)
4910 HOST_WIDE_INT bitpos = startbitpos;
4912 if (DECL_SIZE (field) != 0)
4914 if (integer_zerop (DECL_SIZE (field)))
4915 continue;
4917 if (host_integerp (bit_position (field), 1))
4918 bitpos += int_bit_position (field);
4921 /* ??? FIXME: else assume zero offset. */
4923 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4924 function_arg_record_value_2 (TREE_TYPE (field),
4925 bitpos,
4926 parms,
4927 packed_p);
4928 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4929 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4930 && TARGET_FPU
4931 && parms->named
4932 && ! packed_p)
4934 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4935 int regno, nregs, pos;
4936 enum machine_mode mode = DECL_MODE (field);
4937 rtx reg;
4939 function_arg_record_value_3 (bitpos, parms);
4941 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4942 && mode == BLKmode)
4944 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4945 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4947 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4949 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4950 nregs = 2;
4952 else
4953 nregs = 1;
4955 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4956 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4957 regno++;
4958 reg = gen_rtx_REG (mode, regno);
4959 pos = bitpos / BITS_PER_UNIT;
4960 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4961 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4962 parms->nregs += 1;
4963 while (--nregs > 0)
4965 regno += GET_MODE_SIZE (mode) / 4;
4966 reg = gen_rtx_REG (mode, regno);
4967 pos += GET_MODE_SIZE (mode);
4968 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4969 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4970 parms->nregs += 1;
4973 else
4975 if (parms->intoffset == -1)
4976 parms->intoffset = bitpos;
4982 /* Used by function_arg and function_value to implement the complex
4983 conventions of the 64-bit ABI for passing and returning structures.
4984 Return an expression valid as a return value for the two macros
4985 FUNCTION_ARG and FUNCTION_VALUE.
4987 TYPE is the data type of the argument (as a tree).
4988 This is null for libcalls where that information may
4989 not be available.
4990 MODE is the argument's machine mode.
4991 SLOTNO is the index number of the argument's slot in the parameter array.
4992 NAMED is nonzero if this argument is a named parameter
4993 (otherwise it is an extra parameter matching an ellipsis).
4994 REGBASE is the regno of the base register for the parameter array. */
4996 static rtx
4997 function_arg_record_value (const_tree type, enum machine_mode mode,
4998 int slotno, int named, int regbase)
5000 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5001 struct function_arg_record_value_parms parms;
5002 unsigned int nregs;
5004 parms.ret = NULL_RTX;
5005 parms.slotno = slotno;
5006 parms.named = named;
5007 parms.regbase = regbase;
5008 parms.stack = 0;
5010 /* Compute how many registers we need. */
5011 parms.nregs = 0;
5012 parms.intoffset = 0;
5013 function_arg_record_value_1 (type, 0, &parms, false);
5015 /* Take into account pending integer fields. */
5016 if (parms.intoffset != -1)
5018 unsigned int startbit, endbit;
5019 int intslots, this_slotno;
5021 startbit = parms.intoffset & -BITS_PER_WORD;
5022 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5023 intslots = (endbit - startbit) / BITS_PER_WORD;
5024 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5026 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5028 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5029 /* We need to pass this field on the stack. */
5030 parms.stack = 1;
5033 parms.nregs += intslots;
5035 nregs = parms.nregs;
5037 /* Allocate the vector and handle some annoying special cases. */
5038 if (nregs == 0)
5040 /* ??? Empty structure has no value? Duh? */
5041 if (typesize <= 0)
5043 /* Though there's nothing really to store, return a word register
5044 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5045 leads to breakage due to the fact that there are zero bytes to
5046 load. */
5047 return gen_rtx_REG (mode, regbase);
5049 else
5051 /* ??? C++ has structures with no fields, and yet a size. Give up
5052 for now and pass everything back in integer registers. */
5053 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5055 if (nregs + slotno > SPARC_INT_ARG_MAX)
5056 nregs = SPARC_INT_ARG_MAX - slotno;
5058 gcc_assert (nregs != 0);
5060 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5062 /* If at least one field must be passed on the stack, generate
5063 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5064 also be passed on the stack. We can't do much better because the
5065 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5066 of structures for which the fields passed exclusively in registers
5067 are not at the beginning of the structure. */
5068 if (parms.stack)
5069 XVECEXP (parms.ret, 0, 0)
5070 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5072 /* Fill in the entries. */
5073 parms.nregs = 0;
5074 parms.intoffset = 0;
5075 function_arg_record_value_2 (type, 0, &parms, false);
5076 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5078 gcc_assert (parms.nregs == nregs);
5080 return parms.ret;
5083 /* Used by function_arg and function_value to implement the conventions
5084 of the 64-bit ABI for passing and returning unions.
5085 Return an expression valid as a return value for the two macros
5086 FUNCTION_ARG and FUNCTION_VALUE.
5088 SIZE is the size in bytes of the union.
5089 MODE is the argument's machine mode.
5090 REGNO is the hard register the union will be passed in. */
5092 static rtx
5093 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5094 int regno)
5096 int nwords = ROUND_ADVANCE (size), i;
5097 rtx regs;
5099 /* See comment in previous function for empty structures. */
5100 if (nwords == 0)
5101 return gen_rtx_REG (mode, regno);
5103 if (slotno == SPARC_INT_ARG_MAX - 1)
5104 nwords = 1;
5106 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5108 for (i = 0; i < nwords; i++)
5110 /* Unions are passed left-justified. */
5111 XVECEXP (regs, 0, i)
5112 = gen_rtx_EXPR_LIST (VOIDmode,
5113 gen_rtx_REG (word_mode, regno),
5114 GEN_INT (UNITS_PER_WORD * i));
5115 regno++;
5118 return regs;
5121 /* Used by function_arg and function_value to implement the conventions
5122 for passing and returning large (BLKmode) vectors.
5123 Return an expression valid as a return value for the two macros
5124 FUNCTION_ARG and FUNCTION_VALUE.
5126 SIZE is the size in bytes of the vector.
5127 BASE_MODE is the argument's base machine mode.
5128 REGNO is the FP hard register the vector will be passed in. */
5130 static rtx
5131 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5133 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5134 int nregs = size / base_mode_size, i;
5135 rtx regs;
5137 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5139 for (i = 0; i < nregs; i++)
5141 XVECEXP (regs, 0, i)
5142 = gen_rtx_EXPR_LIST (VOIDmode,
5143 gen_rtx_REG (base_mode, regno),
5144 GEN_INT (base_mode_size * i));
5145 regno += base_mode_size / 4;
5148 return regs;
5151 /* Handle the FUNCTION_ARG macro.
5152 Determine where to put an argument to a function.
5153 Value is zero to push the argument on the stack,
5154 or a hard register in which to store the argument.
5156 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5157 the preceding args and about the function being called.
5158 MODE is the argument's machine mode.
5159 TYPE is the data type of the argument (as a tree).
5160 This is null for libcalls where that information may
5161 not be available.
5162 NAMED is nonzero if this argument is a named parameter
5163 (otherwise it is an extra parameter matching an ellipsis).
5164 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5167 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5168 tree type, int named, int incoming_p)
5170 int regbase = (incoming_p
5171 ? SPARC_INCOMING_INT_ARG_FIRST
5172 : SPARC_OUTGOING_INT_ARG_FIRST);
5173 int slotno, regno, padding;
5174 enum mode_class mclass = GET_MODE_CLASS (mode);
5176 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5177 &regno, &padding);
5178 if (slotno == -1)
5179 return 0;
5181 /* Vector types deserve special treatment because they are polymorphic wrt
5182 their mode, depending upon whether VIS instructions are enabled. */
5183 if (type && TREE_CODE (type) == VECTOR_TYPE)
5185 HOST_WIDE_INT size = int_size_in_bytes (type);
5186 gcc_assert ((TARGET_ARCH32 && size <= 8)
5187 || (TARGET_ARCH64 && size <= 16));
5189 if (mode == BLKmode)
5190 return function_arg_vector_value (size,
5191 TYPE_MODE (TREE_TYPE (type)),
5192 SPARC_FP_ARG_FIRST + 2*slotno);
5193 else
5194 mclass = MODE_FLOAT;
5197 if (TARGET_ARCH32)
5198 return gen_rtx_REG (mode, regno);
5200 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5201 and are promoted to registers if possible. */
5202 if (type && TREE_CODE (type) == RECORD_TYPE)
5204 HOST_WIDE_INT size = int_size_in_bytes (type);
5205 gcc_assert (size <= 16);
5207 return function_arg_record_value (type, mode, slotno, named, regbase);
5210 /* Unions up to 16 bytes in size are passed in integer registers. */
5211 else if (type && TREE_CODE (type) == UNION_TYPE)
5213 HOST_WIDE_INT size = int_size_in_bytes (type);
5214 gcc_assert (size <= 16);
5216 return function_arg_union_value (size, mode, slotno, regno);
5219 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5220 but also have the slot allocated for them.
5221 If no prototype is in scope fp values in register slots get passed
5222 in two places, either fp regs and int regs or fp regs and memory. */
5223 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5224 && SPARC_FP_REG_P (regno))
5226 rtx reg = gen_rtx_REG (mode, regno);
5227 if (cum->prototype_p || cum->libcall_p)
5229 /* "* 2" because fp reg numbers are recorded in 4 byte
5230 quantities. */
5231 #if 0
5232 /* ??? This will cause the value to be passed in the fp reg and
5233 in the stack. When a prototype exists we want to pass the
5234 value in the reg but reserve space on the stack. That's an
5235 optimization, and is deferred [for a bit]. */
5236 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5237 return gen_rtx_PARALLEL (mode,
5238 gen_rtvec (2,
5239 gen_rtx_EXPR_LIST (VOIDmode,
5240 NULL_RTX, const0_rtx),
5241 gen_rtx_EXPR_LIST (VOIDmode,
5242 reg, const0_rtx)));
5243 else
5244 #else
5245 /* ??? It seems that passing back a register even when past
5246 the area declared by REG_PARM_STACK_SPACE will allocate
5247 space appropriately, and will not copy the data onto the
5248 stack, exactly as we desire.
5250 This is due to locate_and_pad_parm being called in
5251 expand_call whenever reg_parm_stack_space > 0, which
5252 while beneficial to our example here, would seem to be
5253 in error from what had been intended. Ho hum... -- r~ */
5254 #endif
5255 return reg;
5257 else
5259 rtx v0, v1;
5261 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5263 int intreg;
5265 /* On incoming, we don't need to know that the value
5266 is passed in %f0 and %i0, and it confuses other parts
5267 causing needless spillage even on the simplest cases. */
5268 if (incoming_p)
5269 return reg;
5271 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5272 + (regno - SPARC_FP_ARG_FIRST) / 2);
5274 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5275 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5276 const0_rtx);
5277 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5279 else
5281 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5282 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5283 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5288 /* All other aggregate types are passed in an integer register in a mode
5289 corresponding to the size of the type. */
5290 else if (type && AGGREGATE_TYPE_P (type))
5292 HOST_WIDE_INT size = int_size_in_bytes (type);
5293 gcc_assert (size <= 16);
5295 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5298 return gen_rtx_REG (mode, regno);
5301 /* For an arg passed partly in registers and partly in memory,
5302 this is the number of bytes of registers used.
5303 For args passed entirely in registers or entirely in memory, zero.
5305 Any arg that starts in the first 6 regs but won't entirely fit in them
5306 needs partial registers on v8. On v9, structures with integer
5307 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5308 values that begin in the last fp reg [where "last fp reg" varies with the
5309 mode] will be split between that reg and memory. */
5311 static int
5312 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5313 tree type, bool named)
5315 int slotno, regno, padding;
5317 /* We pass 0 for incoming_p here, it doesn't matter. */
5318 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5320 if (slotno == -1)
5321 return 0;
5323 if (TARGET_ARCH32)
5325 if ((slotno + (mode == BLKmode
5326 ? ROUND_ADVANCE (int_size_in_bytes (type))
5327 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5328 > SPARC_INT_ARG_MAX)
5329 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5331 else
5333 /* We are guaranteed by pass_by_reference that the size of the
5334 argument is not greater than 16 bytes, so we only need to return
5335 one word if the argument is partially passed in registers. */
5337 if (type && AGGREGATE_TYPE_P (type))
5339 int size = int_size_in_bytes (type);
5341 if (size > UNITS_PER_WORD
5342 && slotno == SPARC_INT_ARG_MAX - 1)
5343 return UNITS_PER_WORD;
5345 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5346 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5347 && ! (TARGET_FPU && named)))
5349 /* The complex types are passed as packed types. */
5350 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5351 && slotno == SPARC_INT_ARG_MAX - 1)
5352 return UNITS_PER_WORD;
5354 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5356 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5357 > SPARC_FP_ARG_MAX)
5358 return UNITS_PER_WORD;
5362 return 0;
5365 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5366 Specify whether to pass the argument by reference. */
5368 static bool
5369 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5370 enum machine_mode mode, const_tree type,
5371 bool named ATTRIBUTE_UNUSED)
5373 if (TARGET_ARCH32)
5374 /* Original SPARC 32-bit ABI says that structures and unions,
5375 and quad-precision floats are passed by reference. For Pascal,
5376 also pass arrays by reference. All other base types are passed
5377 in registers.
5379 Extended ABI (as implemented by the Sun compiler) says that all
5380 complex floats are passed by reference. Pass complex integers
5381 in registers up to 8 bytes. More generally, enforce the 2-word
5382 cap for passing arguments in registers.
5384 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5385 integers are passed like floats of the same size, that is in
5386 registers up to 8 bytes. Pass all vector floats by reference
5387 like structure and unions. */
5388 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5389 || mode == SCmode
5390 /* Catch CDImode, TFmode, DCmode and TCmode. */
5391 || GET_MODE_SIZE (mode) > 8
5392 || (type
5393 && TREE_CODE (type) == VECTOR_TYPE
5394 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5395 else
5396 /* Original SPARC 64-bit ABI says that structures and unions
5397 smaller than 16 bytes are passed in registers, as well as
5398 all other base types.
5400 Extended ABI (as implemented by the Sun compiler) says that
5401 complex floats are passed in registers up to 16 bytes. Pass
5402 all complex integers in registers up to 16 bytes. More generally,
5403 enforce the 2-word cap for passing arguments in registers.
5405 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5406 integers are passed like floats of the same size, that is in
5407 registers (up to 16 bytes). Pass all vector floats like structure
5408 and unions. */
5409 return ((type
5410 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5411 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5412 /* Catch CTImode and TCmode. */
5413 || GET_MODE_SIZE (mode) > 16);
5416 /* Handle the FUNCTION_ARG_ADVANCE macro.
5417 Update the data in CUM to advance over an argument
5418 of mode MODE and data type TYPE.
5419 TYPE is null for libcalls where that information may not be available. */
5421 void
5422 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5423 tree type, int named)
5425 int slotno, regno, padding;
5427 /* We pass 0 for incoming_p here, it doesn't matter. */
5428 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5430 /* If register required leading padding, add it. */
5431 if (slotno != -1)
5432 cum->words += padding;
5434 if (TARGET_ARCH32)
5436 cum->words += (mode != BLKmode
5437 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5438 : ROUND_ADVANCE (int_size_in_bytes (type)));
5440 else
5442 if (type && AGGREGATE_TYPE_P (type))
5444 int size = int_size_in_bytes (type);
5446 if (size <= 8)
5447 ++cum->words;
5448 else if (size <= 16)
5449 cum->words += 2;
5450 else /* passed by reference */
5451 ++cum->words;
5453 else
5455 cum->words += (mode != BLKmode
5456 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5457 : ROUND_ADVANCE (int_size_in_bytes (type)));
5462 /* Handle the FUNCTION_ARG_PADDING macro.
5463 For the 64 bit ABI structs are always stored left shifted in their
5464 argument slot. */
5466 enum direction
5467 function_arg_padding (enum machine_mode mode, const_tree type)
5469 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5470 return upward;
5472 /* Fall back to the default. */
5473 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5476 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5477 Specify whether to return the return value in memory. */
5479 static bool
5480 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5482 if (TARGET_ARCH32)
5483 /* Original SPARC 32-bit ABI says that structures and unions,
5484 and quad-precision floats are returned in memory. All other
5485 base types are returned in registers.
5487 Extended ABI (as implemented by the Sun compiler) says that
5488 all complex floats are returned in registers (8 FP registers
5489 at most for '_Complex long double'). Return all complex integers
5490 in registers (4 at most for '_Complex long long').
5492 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5493 integers are returned like floats of the same size, that is in
5494 registers up to 8 bytes and in memory otherwise. Return all
5495 vector floats in memory like structure and unions; note that
5496 they always have BLKmode like the latter. */
5497 return (TYPE_MODE (type) == BLKmode
5498 || TYPE_MODE (type) == TFmode
5499 || (TREE_CODE (type) == VECTOR_TYPE
5500 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5501 else
5502 /* Original SPARC 64-bit ABI says that structures and unions
5503 smaller than 32 bytes are returned in registers, as well as
5504 all other base types.
5506 Extended ABI (as implemented by the Sun compiler) says that all
5507 complex floats are returned in registers (8 FP registers at most
5508 for '_Complex long double'). Return all complex integers in
5509 registers (4 at most for '_Complex TItype').
5511 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5512 integers are returned like floats of the same size, that is in
5513 registers. Return all vector floats like structure and unions;
5514 note that they always have BLKmode like the latter. */
5515 return ((TYPE_MODE (type) == BLKmode
5516 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5519 /* Handle the TARGET_STRUCT_VALUE target hook.
5520 Return where to find the structure return value address. */
5522 static rtx
5523 sparc_struct_value_rtx (tree fndecl, int incoming)
5525 if (TARGET_ARCH64)
5526 return 0;
5527 else
5529 rtx mem;
5531 if (incoming)
5532 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5533 STRUCT_VALUE_OFFSET));
5534 else
5535 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5536 STRUCT_VALUE_OFFSET));
5538 /* Only follow the SPARC ABI for fixed-size structure returns.
5539 Variable size structure returns are handled per the normal
5540 procedures in GCC. This is enabled by -mstd-struct-return */
5541 if (incoming == 2
5542 && sparc_std_struct_return
5543 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5544 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5546 /* We must check and adjust the return address, as it is
5547 optional as to whether the return object is really
5548 provided. */
5549 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5550 rtx scratch = gen_reg_rtx (SImode);
5551 rtx endlab = gen_label_rtx ();
5553 /* Calculate the return object size */
5554 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5555 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5556 /* Construct a temporary return value */
5557 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5559 /* Implement SPARC 32-bit psABI callee returns struck checking
5560 requirements:
5562 Fetch the instruction where we will return to and see if
5563 it's an unimp instruction (the most significant 10 bits
5564 will be zero). */
5565 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5566 plus_constant (ret_rtx, 8)));
5567 /* Assume the size is valid and pre-adjust */
5568 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5569 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5570 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5571 /* Assign stack temp:
5572 Write the address of the memory pointed to by temp_val into
5573 the memory pointed to by mem */
5574 emit_move_insn (mem, XEXP (temp_val, 0));
5575 emit_label (endlab);
5578 set_mem_alias_set (mem, struct_value_alias_set);
5579 return mem;
5583 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5584 For v9, function return values are subject to the same rules as arguments,
5585 except that up to 32 bytes may be returned in registers. */
5588 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5590 /* Beware that the two values are swapped here wrt function_arg. */
5591 int regbase = (incoming_p
5592 ? SPARC_OUTGOING_INT_ARG_FIRST
5593 : SPARC_INCOMING_INT_ARG_FIRST);
5594 enum mode_class mclass = GET_MODE_CLASS (mode);
5595 int regno;
5597 /* Vector types deserve special treatment because they are polymorphic wrt
5598 their mode, depending upon whether VIS instructions are enabled. */
5599 if (type && TREE_CODE (type) == VECTOR_TYPE)
5601 HOST_WIDE_INT size = int_size_in_bytes (type);
5602 gcc_assert ((TARGET_ARCH32 && size <= 8)
5603 || (TARGET_ARCH64 && size <= 32));
5605 if (mode == BLKmode)
5606 return function_arg_vector_value (size,
5607 TYPE_MODE (TREE_TYPE (type)),
5608 SPARC_FP_ARG_FIRST);
5609 else
5610 mclass = MODE_FLOAT;
5613 if (TARGET_ARCH64 && type)
5615 /* Structures up to 32 bytes in size are returned in registers. */
5616 if (TREE_CODE (type) == RECORD_TYPE)
5618 HOST_WIDE_INT size = int_size_in_bytes (type);
5619 gcc_assert (size <= 32);
5621 return function_arg_record_value (type, mode, 0, 1, regbase);
5624 /* Unions up to 32 bytes in size are returned in integer registers. */
5625 else if (TREE_CODE (type) == UNION_TYPE)
5627 HOST_WIDE_INT size = int_size_in_bytes (type);
5628 gcc_assert (size <= 32);
5630 return function_arg_union_value (size, mode, 0, regbase);
5633 /* Objects that require it are returned in FP registers. */
5634 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5637 /* All other aggregate types are returned in an integer register in a
5638 mode corresponding to the size of the type. */
5639 else if (AGGREGATE_TYPE_P (type))
5641 /* All other aggregate types are passed in an integer register
5642 in a mode corresponding to the size of the type. */
5643 HOST_WIDE_INT size = int_size_in_bytes (type);
5644 gcc_assert (size <= 32);
5646 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5648 /* ??? We probably should have made the same ABI change in
5649 3.4.0 as the one we made for unions. The latter was
5650 required by the SCD though, while the former is not
5651 specified, so we favored compatibility and efficiency.
5653 Now we're stuck for aggregates larger than 16 bytes,
5654 because OImode vanished in the meantime. Let's not
5655 try to be unduly clever, and simply follow the ABI
5656 for unions in that case. */
5657 if (mode == BLKmode)
5658 return function_arg_union_value (size, mode, 0, regbase);
5659 else
5660 mclass = MODE_INT;
5663 /* This must match PROMOTE_FUNCTION_MODE. */
5664 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5665 mode = word_mode;
5668 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5669 regno = SPARC_FP_ARG_FIRST;
5670 else
5671 regno = regbase;
5673 return gen_rtx_REG (mode, regno);
5676 /* Do what is necessary for `va_start'. We look at the current function
5677 to determine if stdarg or varargs is used and return the address of
5678 the first unnamed parameter. */
5680 static rtx
5681 sparc_builtin_saveregs (void)
5683 int first_reg = crtl->args.info.words;
5684 rtx address;
5685 int regno;
5687 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5688 emit_move_insn (gen_rtx_MEM (word_mode,
5689 gen_rtx_PLUS (Pmode,
5690 frame_pointer_rtx,
5691 GEN_INT (FIRST_PARM_OFFSET (0)
5692 + (UNITS_PER_WORD
5693 * regno)))),
5694 gen_rtx_REG (word_mode,
5695 SPARC_INCOMING_INT_ARG_FIRST + regno));
5697 address = gen_rtx_PLUS (Pmode,
5698 frame_pointer_rtx,
5699 GEN_INT (FIRST_PARM_OFFSET (0)
5700 + UNITS_PER_WORD * first_reg));
5702 return address;
5705 /* Implement `va_start' for stdarg. */
5707 static void
5708 sparc_va_start (tree valist, rtx nextarg)
5710 nextarg = expand_builtin_saveregs ();
5711 std_expand_builtin_va_start (valist, nextarg);
5714 /* Implement `va_arg' for stdarg. */
5716 static tree
5717 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5718 gimple_seq *post_p)
5720 HOST_WIDE_INT size, rsize, align;
5721 tree addr, incr;
5722 bool indirect;
5723 tree ptrtype = build_pointer_type (type);
5725 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5727 indirect = true;
5728 size = rsize = UNITS_PER_WORD;
5729 align = 0;
5731 else
5733 indirect = false;
5734 size = int_size_in_bytes (type);
5735 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5736 align = 0;
5738 if (TARGET_ARCH64)
5740 /* For SPARC64, objects requiring 16-byte alignment get it. */
5741 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5742 align = 2 * UNITS_PER_WORD;
5744 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5745 are left-justified in their slots. */
5746 if (AGGREGATE_TYPE_P (type))
5748 if (size == 0)
5749 size = rsize = UNITS_PER_WORD;
5750 else
5751 size = rsize;
5756 incr = valist;
5757 if (align)
5759 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5760 size_int (align - 1));
5761 incr = fold_convert (sizetype, incr);
5762 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5763 size_int (-align));
5764 incr = fold_convert (ptr_type_node, incr);
5767 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5768 addr = incr;
5770 if (BYTES_BIG_ENDIAN && size < rsize)
5771 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5772 size_int (rsize - size));
5774 if (indirect)
5776 addr = fold_convert (build_pointer_type (ptrtype), addr);
5777 addr = build_va_arg_indirect_ref (addr);
5779 /* If the address isn't aligned properly for the type,
5780 we may need to copy to a temporary.
5781 FIXME: This is inefficient. Usually we can do this
5782 in registers. */
5783 else if (align == 0
5784 && TYPE_ALIGN (type) > BITS_PER_WORD)
5786 tree tmp = create_tmp_var (type, "va_arg_tmp");
5787 tree dest_addr = build_fold_addr_expr (tmp);
5789 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY], 3,
5790 dest_addr,
5791 addr,
5792 size_int (rsize));
5794 gimplify_and_add (copy, pre_p);
5795 addr = dest_addr;
5797 else
5798 addr = fold_convert (ptrtype, addr);
5800 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5801 gimplify_assign (valist, incr, post_p);
5803 return build_va_arg_indirect_ref (addr);
5806 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5807 Specify whether the vector mode is supported by the hardware. */
5809 static bool
5810 sparc_vector_mode_supported_p (enum machine_mode mode)
5812 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5815 /* Return the string to output an unconditional branch to LABEL, which is
5816 the operand number of the label.
5818 DEST is the destination insn (i.e. the label), INSN is the source. */
5820 const char *
5821 output_ubranch (rtx dest, int label, rtx insn)
5823 static char string[64];
5824 bool v9_form = false;
5825 char *p;
5827 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5829 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5830 - INSN_ADDRESSES (INSN_UID (insn)));
5831 /* Leave some instructions for "slop". */
5832 if (delta >= -260000 && delta < 260000)
5833 v9_form = true;
5836 if (v9_form)
5837 strcpy (string, "ba%*,pt\t%%xcc, ");
5838 else
5839 strcpy (string, "b%*\t");
5841 p = strchr (string, '\0');
5842 *p++ = '%';
5843 *p++ = 'l';
5844 *p++ = '0' + label;
5845 *p++ = '%';
5846 *p++ = '(';
5847 *p = '\0';
5849 return string;
5852 /* Return the string to output a conditional branch to LABEL, which is
5853 the operand number of the label. OP is the conditional expression.
5854 XEXP (OP, 0) is assumed to be a condition code register (integer or
5855 floating point) and its mode specifies what kind of comparison we made.
5857 DEST is the destination insn (i.e. the label), INSN is the source.
5859 REVERSED is nonzero if we should reverse the sense of the comparison.
5861 ANNUL is nonzero if we should generate an annulling branch. */
5863 const char *
5864 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5865 rtx insn)
5867 static char string[64];
5868 enum rtx_code code = GET_CODE (op);
5869 rtx cc_reg = XEXP (op, 0);
5870 enum machine_mode mode = GET_MODE (cc_reg);
5871 const char *labelno, *branch;
5872 int spaces = 8, far;
5873 char *p;
5875 /* v9 branches are limited to +-1MB. If it is too far away,
5876 change
5878 bne,pt %xcc, .LC30
5882 be,pn %xcc, .+12
5884 ba .LC30
5888 fbne,a,pn %fcc2, .LC29
5892 fbe,pt %fcc2, .+16
5894 ba .LC29 */
5896 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5897 if (reversed ^ far)
5899 /* Reversal of FP compares takes care -- an ordered compare
5900 becomes an unordered compare and vice versa. */
5901 if (mode == CCFPmode || mode == CCFPEmode)
5902 code = reverse_condition_maybe_unordered (code);
5903 else
5904 code = reverse_condition (code);
5907 /* Start by writing the branch condition. */
5908 if (mode == CCFPmode || mode == CCFPEmode)
5910 switch (code)
5912 case NE:
5913 branch = "fbne";
5914 break;
5915 case EQ:
5916 branch = "fbe";
5917 break;
5918 case GE:
5919 branch = "fbge";
5920 break;
5921 case GT:
5922 branch = "fbg";
5923 break;
5924 case LE:
5925 branch = "fble";
5926 break;
5927 case LT:
5928 branch = "fbl";
5929 break;
5930 case UNORDERED:
5931 branch = "fbu";
5932 break;
5933 case ORDERED:
5934 branch = "fbo";
5935 break;
5936 case UNGT:
5937 branch = "fbug";
5938 break;
5939 case UNLT:
5940 branch = "fbul";
5941 break;
5942 case UNEQ:
5943 branch = "fbue";
5944 break;
5945 case UNGE:
5946 branch = "fbuge";
5947 break;
5948 case UNLE:
5949 branch = "fbule";
5950 break;
5951 case LTGT:
5952 branch = "fblg";
5953 break;
5955 default:
5956 gcc_unreachable ();
5959 /* ??? !v9: FP branches cannot be preceded by another floating point
5960 insn. Because there is currently no concept of pre-delay slots,
5961 we can fix this only by always emitting a nop before a floating
5962 point branch. */
5964 string[0] = '\0';
5965 if (! TARGET_V9)
5966 strcpy (string, "nop\n\t");
5967 strcat (string, branch);
5969 else
5971 switch (code)
5973 case NE:
5974 branch = "bne";
5975 break;
5976 case EQ:
5977 branch = "be";
5978 break;
5979 case GE:
5980 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5981 branch = "bpos";
5982 else
5983 branch = "bge";
5984 break;
5985 case GT:
5986 branch = "bg";
5987 break;
5988 case LE:
5989 branch = "ble";
5990 break;
5991 case LT:
5992 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5993 branch = "bneg";
5994 else
5995 branch = "bl";
5996 break;
5997 case GEU:
5998 branch = "bgeu";
5999 break;
6000 case GTU:
6001 branch = "bgu";
6002 break;
6003 case LEU:
6004 branch = "bleu";
6005 break;
6006 case LTU:
6007 branch = "blu";
6008 break;
6010 default:
6011 gcc_unreachable ();
6013 strcpy (string, branch);
6015 spaces -= strlen (branch);
6016 p = strchr (string, '\0');
6018 /* Now add the annulling, the label, and a possible noop. */
6019 if (annul && ! far)
6021 strcpy (p, ",a");
6022 p += 2;
6023 spaces -= 2;
6026 if (TARGET_V9)
6028 rtx note;
6029 int v8 = 0;
6031 if (! far && insn && INSN_ADDRESSES_SET_P ())
6033 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6034 - INSN_ADDRESSES (INSN_UID (insn)));
6035 /* Leave some instructions for "slop". */
6036 if (delta < -260000 || delta >= 260000)
6037 v8 = 1;
6040 if (mode == CCFPmode || mode == CCFPEmode)
6042 static char v9_fcc_labelno[] = "%%fccX, ";
6043 /* Set the char indicating the number of the fcc reg to use. */
6044 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6045 labelno = v9_fcc_labelno;
6046 if (v8)
6048 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6049 labelno = "";
6052 else if (mode == CCXmode || mode == CCX_NOOVmode)
6054 labelno = "%%xcc, ";
6055 gcc_assert (! v8);
6057 else
6059 labelno = "%%icc, ";
6060 if (v8)
6061 labelno = "";
6064 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6066 strcpy (p,
6067 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6068 ? ",pt" : ",pn");
6069 p += 3;
6070 spaces -= 3;
6073 else
6074 labelno = "";
6076 if (spaces > 0)
6077 *p++ = '\t';
6078 else
6079 *p++ = ' ';
6080 strcpy (p, labelno);
6081 p = strchr (p, '\0');
6082 if (far)
6084 strcpy (p, ".+12\n\t nop\n\tb\t");
6085 /* Skip the next insn if requested or
6086 if we know that it will be a nop. */
6087 if (annul || ! final_sequence)
6088 p[3] = '6';
6089 p += 14;
6091 *p++ = '%';
6092 *p++ = 'l';
6093 *p++ = label + '0';
6094 *p++ = '%';
6095 *p++ = '#';
6096 *p = '\0';
6098 return string;
6101 /* Emit a library call comparison between floating point X and Y.
6102 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6103 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6104 values as arguments instead of the TFmode registers themselves,
6105 that's why we cannot call emit_float_lib_cmp. */
6106 void
6107 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6109 const char *qpfunc;
6110 rtx slot0, slot1, result, tem, tem2;
6111 enum machine_mode mode;
6113 switch (comparison)
6115 case EQ:
6116 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6117 break;
6119 case NE:
6120 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6121 break;
6123 case GT:
6124 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6125 break;
6127 case GE:
6128 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6129 break;
6131 case LT:
6132 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6133 break;
6135 case LE:
6136 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6137 break;
6139 case ORDERED:
6140 case UNORDERED:
6141 case UNGT:
6142 case UNLT:
6143 case UNEQ:
6144 case UNGE:
6145 case UNLE:
6146 case LTGT:
6147 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6148 break;
6150 default:
6151 gcc_unreachable ();
6154 if (TARGET_ARCH64)
6156 if (GET_CODE (x) != MEM)
6158 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6159 emit_move_insn (slot0, x);
6161 else
6162 slot0 = x;
6164 if (GET_CODE (y) != MEM)
6166 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6167 emit_move_insn (slot1, y);
6169 else
6170 slot1 = y;
6172 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6173 DImode, 2,
6174 XEXP (slot0, 0), Pmode,
6175 XEXP (slot1, 0), Pmode);
6177 mode = DImode;
6179 else
6181 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6182 SImode, 2,
6183 x, TFmode, y, TFmode);
6185 mode = SImode;
6189 /* Immediately move the result of the libcall into a pseudo
6190 register so reload doesn't clobber the value if it needs
6191 the return register for a spill reg. */
6192 result = gen_reg_rtx (mode);
6193 emit_move_insn (result, hard_libcall_value (mode));
6195 switch (comparison)
6197 default:
6198 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6199 break;
6200 case ORDERED:
6201 case UNORDERED:
6202 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6203 NULL_RTX, mode, 0);
6204 break;
6205 case UNGT:
6206 case UNGE:
6207 emit_cmp_insn (result, const1_rtx,
6208 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6209 break;
6210 case UNLE:
6211 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6212 break;
6213 case UNLT:
6214 tem = gen_reg_rtx (mode);
6215 if (TARGET_ARCH32)
6216 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6217 else
6218 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6219 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6220 break;
6221 case UNEQ:
6222 case LTGT:
6223 tem = gen_reg_rtx (mode);
6224 if (TARGET_ARCH32)
6225 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6226 else
6227 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6228 tem2 = gen_reg_rtx (mode);
6229 if (TARGET_ARCH32)
6230 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6231 else
6232 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6233 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6234 NULL_RTX, mode, 0);
6235 break;
6239 /* Generate an unsigned DImode to FP conversion. This is the same code
6240 optabs would emit if we didn't have TFmode patterns. */
6242 void
6243 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6245 rtx neglab, donelab, i0, i1, f0, in, out;
6247 out = operands[0];
6248 in = force_reg (DImode, operands[1]);
6249 neglab = gen_label_rtx ();
6250 donelab = gen_label_rtx ();
6251 i0 = gen_reg_rtx (DImode);
6252 i1 = gen_reg_rtx (DImode);
6253 f0 = gen_reg_rtx (mode);
6255 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6257 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6258 emit_jump_insn (gen_jump (donelab));
6259 emit_barrier ();
6261 emit_label (neglab);
6263 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6264 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6265 emit_insn (gen_iordi3 (i0, i0, i1));
6266 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6267 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6269 emit_label (donelab);
6272 /* Generate an FP to unsigned DImode conversion. This is the same code
6273 optabs would emit if we didn't have TFmode patterns. */
6275 void
6276 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6278 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6280 out = operands[0];
6281 in = force_reg (mode, operands[1]);
6282 neglab = gen_label_rtx ();
6283 donelab = gen_label_rtx ();
6284 i0 = gen_reg_rtx (DImode);
6285 i1 = gen_reg_rtx (DImode);
6286 limit = gen_reg_rtx (mode);
6287 f0 = gen_reg_rtx (mode);
6289 emit_move_insn (limit,
6290 CONST_DOUBLE_FROM_REAL_VALUE (
6291 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6292 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6294 emit_insn (gen_rtx_SET (VOIDmode,
6295 out,
6296 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6297 emit_jump_insn (gen_jump (donelab));
6298 emit_barrier ();
6300 emit_label (neglab);
6302 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6303 emit_insn (gen_rtx_SET (VOIDmode,
6305 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6306 emit_insn (gen_movdi (i1, const1_rtx));
6307 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6308 emit_insn (gen_xordi3 (out, i0, i1));
6310 emit_label (donelab);
6313 /* Return the string to output a conditional branch to LABEL, testing
6314 register REG. LABEL is the operand number of the label; REG is the
6315 operand number of the reg. OP is the conditional expression. The mode
6316 of REG says what kind of comparison we made.
6318 DEST is the destination insn (i.e. the label), INSN is the source.
6320 REVERSED is nonzero if we should reverse the sense of the comparison.
6322 ANNUL is nonzero if we should generate an annulling branch. */
6324 const char *
6325 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6326 int annul, rtx insn)
6328 static char string[64];
6329 enum rtx_code code = GET_CODE (op);
6330 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6331 rtx note;
6332 int far;
6333 char *p;
6335 /* branch on register are limited to +-128KB. If it is too far away,
6336 change
6338 brnz,pt %g1, .LC30
6342 brz,pn %g1, .+12
6344 ba,pt %xcc, .LC30
6348 brgez,a,pn %o1, .LC29
6352 brlz,pt %o1, .+16
6354 ba,pt %xcc, .LC29 */
6356 far = get_attr_length (insn) >= 3;
6358 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6359 if (reversed ^ far)
6360 code = reverse_condition (code);
6362 /* Only 64 bit versions of these instructions exist. */
6363 gcc_assert (mode == DImode);
6365 /* Start by writing the branch condition. */
6367 switch (code)
6369 case NE:
6370 strcpy (string, "brnz");
6371 break;
6373 case EQ:
6374 strcpy (string, "brz");
6375 break;
6377 case GE:
6378 strcpy (string, "brgez");
6379 break;
6381 case LT:
6382 strcpy (string, "brlz");
6383 break;
6385 case LE:
6386 strcpy (string, "brlez");
6387 break;
6389 case GT:
6390 strcpy (string, "brgz");
6391 break;
6393 default:
6394 gcc_unreachable ();
6397 p = strchr (string, '\0');
6399 /* Now add the annulling, reg, label, and nop. */
6400 if (annul && ! far)
6402 strcpy (p, ",a");
6403 p += 2;
6406 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6408 strcpy (p,
6409 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6410 ? ",pt" : ",pn");
6411 p += 3;
6414 *p = p < string + 8 ? '\t' : ' ';
6415 p++;
6416 *p++ = '%';
6417 *p++ = '0' + reg;
6418 *p++ = ',';
6419 *p++ = ' ';
6420 if (far)
6422 int veryfar = 1, delta;
6424 if (INSN_ADDRESSES_SET_P ())
6426 delta = (INSN_ADDRESSES (INSN_UID (dest))
6427 - INSN_ADDRESSES (INSN_UID (insn)));
6428 /* Leave some instructions for "slop". */
6429 if (delta >= -260000 && delta < 260000)
6430 veryfar = 0;
6433 strcpy (p, ".+12\n\t nop\n\t");
6434 /* Skip the next insn if requested or
6435 if we know that it will be a nop. */
6436 if (annul || ! final_sequence)
6437 p[3] = '6';
6438 p += 12;
6439 if (veryfar)
6441 strcpy (p, "b\t");
6442 p += 2;
6444 else
6446 strcpy (p, "ba,pt\t%%xcc, ");
6447 p += 13;
6450 *p++ = '%';
6451 *p++ = 'l';
6452 *p++ = '0' + label;
6453 *p++ = '%';
6454 *p++ = '#';
6455 *p = '\0';
6457 return string;
6460 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6461 Such instructions cannot be used in the delay slot of return insn on v9.
6462 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6465 static int
6466 epilogue_renumber (register rtx *where, int test)
6468 register const char *fmt;
6469 register int i;
6470 register enum rtx_code code;
6472 if (*where == 0)
6473 return 0;
6475 code = GET_CODE (*where);
6477 switch (code)
6479 case REG:
6480 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6481 return 1;
6482 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6483 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6484 case SCRATCH:
6485 case CC0:
6486 case PC:
6487 case CONST_INT:
6488 case CONST_DOUBLE:
6489 return 0;
6491 /* Do not replace the frame pointer with the stack pointer because
6492 it can cause the delayed instruction to load below the stack.
6493 This occurs when instructions like:
6495 (set (reg/i:SI 24 %i0)
6496 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6497 (const_int -20 [0xffffffec])) 0))
6499 are in the return delayed slot. */
6500 case PLUS:
6501 if (GET_CODE (XEXP (*where, 0)) == REG
6502 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6503 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6504 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6505 return 1;
6506 break;
6508 case MEM:
6509 if (SPARC_STACK_BIAS
6510 && GET_CODE (XEXP (*where, 0)) == REG
6511 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6512 return 1;
6513 break;
6515 default:
6516 break;
6519 fmt = GET_RTX_FORMAT (code);
6521 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6523 if (fmt[i] == 'E')
6525 register int j;
6526 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6527 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6528 return 1;
6530 else if (fmt[i] == 'e'
6531 && epilogue_renumber (&(XEXP (*where, i)), test))
6532 return 1;
6534 return 0;
6537 /* Leaf functions and non-leaf functions have different needs. */
6539 static const int
6540 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6542 static const int
6543 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6545 static const int *const reg_alloc_orders[] = {
6546 reg_leaf_alloc_order,
6547 reg_nonleaf_alloc_order};
6549 void
6550 order_regs_for_local_alloc (void)
6552 static int last_order_nonleaf = 1;
6554 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6556 last_order_nonleaf = !last_order_nonleaf;
6557 memcpy ((char *) reg_alloc_order,
6558 (const char *) reg_alloc_orders[last_order_nonleaf],
6559 FIRST_PSEUDO_REGISTER * sizeof (int));
6563 /* Return 1 if REG and MEM are legitimate enough to allow the various
6564 mem<-->reg splits to be run. */
6567 sparc_splitdi_legitimate (rtx reg, rtx mem)
6569 /* Punt if we are here by mistake. */
6570 gcc_assert (reload_completed);
6572 /* We must have an offsettable memory reference. */
6573 if (! offsettable_memref_p (mem))
6574 return 0;
6576 /* If we have legitimate args for ldd/std, we do not want
6577 the split to happen. */
6578 if ((REGNO (reg) % 2) == 0
6579 && mem_min_alignment (mem, 8))
6580 return 0;
6582 /* Success. */
6583 return 1;
6586 /* Return 1 if x and y are some kind of REG and they refer to
6587 different hard registers. This test is guaranteed to be
6588 run after reload. */
6591 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6593 if (GET_CODE (x) != REG)
6594 return 0;
6595 if (GET_CODE (y) != REG)
6596 return 0;
6597 if (REGNO (x) == REGNO (y))
6598 return 0;
6599 return 1;
6602 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6603 This makes them candidates for using ldd and std insns.
6605 Note reg1 and reg2 *must* be hard registers. */
6608 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6610 /* We might have been passed a SUBREG. */
6611 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6612 return 0;
6614 if (REGNO (reg1) % 2 != 0)
6615 return 0;
6617 /* Integer ldd is deprecated in SPARC V9 */
6618 if (TARGET_V9 && REGNO (reg1) < 32)
6619 return 0;
6621 return (REGNO (reg1) == REGNO (reg2) - 1);
6624 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6625 an ldd or std insn.
6627 This can only happen when addr1 and addr2, the addresses in mem1
6628 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6629 addr1 must also be aligned on a 64-bit boundary.
6631 Also iff dependent_reg_rtx is not null it should not be used to
6632 compute the address for mem1, i.e. we cannot optimize a sequence
6633 like:
6634 ld [%o0], %o0
6635 ld [%o0 + 4], %o1
6637 ldd [%o0], %o0
6638 nor:
6639 ld [%g3 + 4], %g3
6640 ld [%g3], %g2
6642 ldd [%g3], %g2
6644 But, note that the transformation from:
6645 ld [%g2 + 4], %g3
6646 ld [%g2], %g2
6648 ldd [%g2], %g2
6649 is perfectly fine. Thus, the peephole2 patterns always pass us
6650 the destination register of the first load, never the second one.
6652 For stores we don't have a similar problem, so dependent_reg_rtx is
6653 NULL_RTX. */
6656 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6658 rtx addr1, addr2;
6659 unsigned int reg1;
6660 HOST_WIDE_INT offset1;
6662 /* The mems cannot be volatile. */
6663 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6664 return 0;
6666 /* MEM1 should be aligned on a 64-bit boundary. */
6667 if (MEM_ALIGN (mem1) < 64)
6668 return 0;
6670 addr1 = XEXP (mem1, 0);
6671 addr2 = XEXP (mem2, 0);
6673 /* Extract a register number and offset (if used) from the first addr. */
6674 if (GET_CODE (addr1) == PLUS)
6676 /* If not a REG, return zero. */
6677 if (GET_CODE (XEXP (addr1, 0)) != REG)
6678 return 0;
6679 else
6681 reg1 = REGNO (XEXP (addr1, 0));
6682 /* The offset must be constant! */
6683 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6684 return 0;
6685 offset1 = INTVAL (XEXP (addr1, 1));
6688 else if (GET_CODE (addr1) != REG)
6689 return 0;
6690 else
6692 reg1 = REGNO (addr1);
6693 /* This was a simple (mem (reg)) expression. Offset is 0. */
6694 offset1 = 0;
6697 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6698 if (GET_CODE (addr2) != PLUS)
6699 return 0;
6701 if (GET_CODE (XEXP (addr2, 0)) != REG
6702 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6703 return 0;
6705 if (reg1 != REGNO (XEXP (addr2, 0)))
6706 return 0;
6708 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6709 return 0;
6711 /* The first offset must be evenly divisible by 8 to ensure the
6712 address is 64 bit aligned. */
6713 if (offset1 % 8 != 0)
6714 return 0;
6716 /* The offset for the second addr must be 4 more than the first addr. */
6717 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6718 return 0;
6720 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6721 instructions. */
6722 return 1;
6725 /* Return 1 if reg is a pseudo, or is the first register in
6726 a hard register pair. This makes it a candidate for use in
6727 ldd and std insns. */
6730 register_ok_for_ldd (rtx reg)
6732 /* We might have been passed a SUBREG. */
6733 if (GET_CODE (reg) != REG)
6734 return 0;
6736 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6737 return (REGNO (reg) % 2 == 0);
6738 else
6739 return 1;
6742 /* Print operand X (an rtx) in assembler syntax to file FILE.
6743 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6744 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6746 void
6747 print_operand (FILE *file, rtx x, int code)
6749 switch (code)
6751 case '#':
6752 /* Output an insn in a delay slot. */
6753 if (final_sequence)
6754 sparc_indent_opcode = 1;
6755 else
6756 fputs ("\n\t nop", file);
6757 return;
6758 case '*':
6759 /* Output an annul flag if there's nothing for the delay slot and we
6760 are optimizing. This is always used with '(' below.
6761 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6762 this is a dbx bug. So, we only do this when optimizing.
6763 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6764 Always emit a nop in case the next instruction is a branch. */
6765 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6766 fputs (",a", file);
6767 return;
6768 case '(':
6769 /* Output a 'nop' if there's nothing for the delay slot and we are
6770 not optimizing. This is always used with '*' above. */
6771 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6772 fputs ("\n\t nop", file);
6773 else if (final_sequence)
6774 sparc_indent_opcode = 1;
6775 return;
6776 case ')':
6777 /* Output the right displacement from the saved PC on function return.
6778 The caller may have placed an "unimp" insn immediately after the call
6779 so we have to account for it. This insn is used in the 32-bit ABI
6780 when calling a function that returns a non zero-sized structure. The
6781 64-bit ABI doesn't have it. Be careful to have this test be the same
6782 as that used on the call. The exception here is that when
6783 sparc_std_struct_return is enabled, the psABI is followed exactly
6784 and the adjustment is made by the code in sparc_struct_value_rtx.
6785 The call emitted is the same when sparc_std_struct_return is
6786 present. */
6787 if (! TARGET_ARCH64
6788 && cfun->returns_struct
6789 && ! sparc_std_struct_return
6790 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6791 == INTEGER_CST)
6792 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6793 fputs ("12", file);
6794 else
6795 fputc ('8', file);
6796 return;
6797 case '_':
6798 /* Output the Embedded Medium/Anywhere code model base register. */
6799 fputs (EMBMEDANY_BASE_REG, file);
6800 return;
6801 case '&':
6802 /* Print some local dynamic TLS name. */
6803 assemble_name (file, get_some_local_dynamic_name ());
6804 return;
6806 case 'Y':
6807 /* Adjust the operand to take into account a RESTORE operation. */
6808 if (GET_CODE (x) == CONST_INT)
6809 break;
6810 else if (GET_CODE (x) != REG)
6811 output_operand_lossage ("invalid %%Y operand");
6812 else if (REGNO (x) < 8)
6813 fputs (reg_names[REGNO (x)], file);
6814 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6815 fputs (reg_names[REGNO (x)-16], file);
6816 else
6817 output_operand_lossage ("invalid %%Y operand");
6818 return;
6819 case 'L':
6820 /* Print out the low order register name of a register pair. */
6821 if (WORDS_BIG_ENDIAN)
6822 fputs (reg_names[REGNO (x)+1], file);
6823 else
6824 fputs (reg_names[REGNO (x)], file);
6825 return;
6826 case 'H':
6827 /* Print out the high order register name of a register pair. */
6828 if (WORDS_BIG_ENDIAN)
6829 fputs (reg_names[REGNO (x)], file);
6830 else
6831 fputs (reg_names[REGNO (x)+1], file);
6832 return;
6833 case 'R':
6834 /* Print out the second register name of a register pair or quad.
6835 I.e., R (%o0) => %o1. */
6836 fputs (reg_names[REGNO (x)+1], file);
6837 return;
6838 case 'S':
6839 /* Print out the third register name of a register quad.
6840 I.e., S (%o0) => %o2. */
6841 fputs (reg_names[REGNO (x)+2], file);
6842 return;
6843 case 'T':
6844 /* Print out the fourth register name of a register quad.
6845 I.e., T (%o0) => %o3. */
6846 fputs (reg_names[REGNO (x)+3], file);
6847 return;
6848 case 'x':
6849 /* Print a condition code register. */
6850 if (REGNO (x) == SPARC_ICC_REG)
6852 /* We don't handle CC[X]_NOOVmode because they're not supposed
6853 to occur here. */
6854 if (GET_MODE (x) == CCmode)
6855 fputs ("%icc", file);
6856 else if (GET_MODE (x) == CCXmode)
6857 fputs ("%xcc", file);
6858 else
6859 gcc_unreachable ();
6861 else
6862 /* %fccN register */
6863 fputs (reg_names[REGNO (x)], file);
6864 return;
6865 case 'm':
6866 /* Print the operand's address only. */
6867 output_address (XEXP (x, 0));
6868 return;
6869 case 'r':
6870 /* In this case we need a register. Use %g0 if the
6871 operand is const0_rtx. */
6872 if (x == const0_rtx
6873 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6875 fputs ("%g0", file);
6876 return;
6878 else
6879 break;
6881 case 'A':
6882 switch (GET_CODE (x))
6884 case IOR: fputs ("or", file); break;
6885 case AND: fputs ("and", file); break;
6886 case XOR: fputs ("xor", file); break;
6887 default: output_operand_lossage ("invalid %%A operand");
6889 return;
6891 case 'B':
6892 switch (GET_CODE (x))
6894 case IOR: fputs ("orn", file); break;
6895 case AND: fputs ("andn", file); break;
6896 case XOR: fputs ("xnor", file); break;
6897 default: output_operand_lossage ("invalid %%B operand");
6899 return;
6901 /* These are used by the conditional move instructions. */
6902 case 'c' :
6903 case 'C':
6905 enum rtx_code rc = GET_CODE (x);
6907 if (code == 'c')
6909 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6910 if (mode == CCFPmode || mode == CCFPEmode)
6911 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6912 else
6913 rc = reverse_condition (GET_CODE (x));
6915 switch (rc)
6917 case NE: fputs ("ne", file); break;
6918 case EQ: fputs ("e", file); break;
6919 case GE: fputs ("ge", file); break;
6920 case GT: fputs ("g", file); break;
6921 case LE: fputs ("le", file); break;
6922 case LT: fputs ("l", file); break;
6923 case GEU: fputs ("geu", file); break;
6924 case GTU: fputs ("gu", file); break;
6925 case LEU: fputs ("leu", file); break;
6926 case LTU: fputs ("lu", file); break;
6927 case LTGT: fputs ("lg", file); break;
6928 case UNORDERED: fputs ("u", file); break;
6929 case ORDERED: fputs ("o", file); break;
6930 case UNLT: fputs ("ul", file); break;
6931 case UNLE: fputs ("ule", file); break;
6932 case UNGT: fputs ("ug", file); break;
6933 case UNGE: fputs ("uge", file); break;
6934 case UNEQ: fputs ("ue", file); break;
6935 default: output_operand_lossage (code == 'c'
6936 ? "invalid %%c operand"
6937 : "invalid %%C operand");
6939 return;
6942 /* These are used by the movr instruction pattern. */
6943 case 'd':
6944 case 'D':
6946 enum rtx_code rc = (code == 'd'
6947 ? reverse_condition (GET_CODE (x))
6948 : GET_CODE (x));
6949 switch (rc)
6951 case NE: fputs ("ne", file); break;
6952 case EQ: fputs ("e", file); break;
6953 case GE: fputs ("gez", file); break;
6954 case LT: fputs ("lz", file); break;
6955 case LE: fputs ("lez", file); break;
6956 case GT: fputs ("gz", file); break;
6957 default: output_operand_lossage (code == 'd'
6958 ? "invalid %%d operand"
6959 : "invalid %%D operand");
6961 return;
6964 case 'b':
6966 /* Print a sign-extended character. */
6967 int i = trunc_int_for_mode (INTVAL (x), QImode);
6968 fprintf (file, "%d", i);
6969 return;
6972 case 'f':
6973 /* Operand must be a MEM; write its address. */
6974 if (GET_CODE (x) != MEM)
6975 output_operand_lossage ("invalid %%f operand");
6976 output_address (XEXP (x, 0));
6977 return;
6979 case 's':
6981 /* Print a sign-extended 32-bit value. */
6982 HOST_WIDE_INT i;
6983 if (GET_CODE(x) == CONST_INT)
6984 i = INTVAL (x);
6985 else if (GET_CODE(x) == CONST_DOUBLE)
6986 i = CONST_DOUBLE_LOW (x);
6987 else
6989 output_operand_lossage ("invalid %%s operand");
6990 return;
6992 i = trunc_int_for_mode (i, SImode);
6993 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6994 return;
6997 case 0:
6998 /* Do nothing special. */
6999 break;
7001 default:
7002 /* Undocumented flag. */
7003 output_operand_lossage ("invalid operand output code");
7006 if (GET_CODE (x) == REG)
7007 fputs (reg_names[REGNO (x)], file);
7008 else if (GET_CODE (x) == MEM)
7010 fputc ('[', file);
7011 /* Poor Sun assembler doesn't understand absolute addressing. */
7012 if (CONSTANT_P (XEXP (x, 0)))
7013 fputs ("%g0+", file);
7014 output_address (XEXP (x, 0));
7015 fputc (']', file);
7017 else if (GET_CODE (x) == HIGH)
7019 fputs ("%hi(", file);
7020 output_addr_const (file, XEXP (x, 0));
7021 fputc (')', file);
7023 else if (GET_CODE (x) == LO_SUM)
7025 print_operand (file, XEXP (x, 0), 0);
7026 if (TARGET_CM_MEDMID)
7027 fputs ("+%l44(", file);
7028 else
7029 fputs ("+%lo(", file);
7030 output_addr_const (file, XEXP (x, 1));
7031 fputc (')', file);
7033 else if (GET_CODE (x) == CONST_DOUBLE
7034 && (GET_MODE (x) == VOIDmode
7035 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7037 if (CONST_DOUBLE_HIGH (x) == 0)
7038 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7039 else if (CONST_DOUBLE_HIGH (x) == -1
7040 && CONST_DOUBLE_LOW (x) < 0)
7041 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7042 else
7043 output_operand_lossage ("long long constant not a valid immediate operand");
7045 else if (GET_CODE (x) == CONST_DOUBLE)
7046 output_operand_lossage ("floating point constant not a valid immediate operand");
7047 else { output_addr_const (file, x); }
7050 /* Target hook for assembling integer objects. The sparc version has
7051 special handling for aligned DI-mode objects. */
7053 static bool
7054 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7056 /* ??? We only output .xword's for symbols and only then in environments
7057 where the assembler can handle them. */
7058 if (aligned_p && size == 8
7059 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7061 if (TARGET_V9)
7063 assemble_integer_with_op ("\t.xword\t", x);
7064 return true;
7066 else
7068 assemble_aligned_integer (4, const0_rtx);
7069 assemble_aligned_integer (4, x);
7070 return true;
7073 return default_assemble_integer (x, size, aligned_p);
7076 /* Return the value of a code used in the .proc pseudo-op that says
7077 what kind of result this function returns. For non-C types, we pick
7078 the closest C type. */
7080 #ifndef SHORT_TYPE_SIZE
7081 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7082 #endif
7084 #ifndef INT_TYPE_SIZE
7085 #define INT_TYPE_SIZE BITS_PER_WORD
7086 #endif
7088 #ifndef LONG_TYPE_SIZE
7089 #define LONG_TYPE_SIZE BITS_PER_WORD
7090 #endif
7092 #ifndef LONG_LONG_TYPE_SIZE
7093 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7094 #endif
7096 #ifndef FLOAT_TYPE_SIZE
7097 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7098 #endif
7100 #ifndef DOUBLE_TYPE_SIZE
7101 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7102 #endif
7104 #ifndef LONG_DOUBLE_TYPE_SIZE
7105 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7106 #endif
7108 unsigned long
7109 sparc_type_code (register tree type)
7111 register unsigned long qualifiers = 0;
7112 register unsigned shift;
7114 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7115 setting more, since some assemblers will give an error for this. Also,
7116 we must be careful to avoid shifts of 32 bits or more to avoid getting
7117 unpredictable results. */
7119 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7121 switch (TREE_CODE (type))
7123 case ERROR_MARK:
7124 return qualifiers;
7126 case ARRAY_TYPE:
7127 qualifiers |= (3 << shift);
7128 break;
7130 case FUNCTION_TYPE:
7131 case METHOD_TYPE:
7132 qualifiers |= (2 << shift);
7133 break;
7135 case POINTER_TYPE:
7136 case REFERENCE_TYPE:
7137 case OFFSET_TYPE:
7138 qualifiers |= (1 << shift);
7139 break;
7141 case RECORD_TYPE:
7142 return (qualifiers | 8);
7144 case UNION_TYPE:
7145 case QUAL_UNION_TYPE:
7146 return (qualifiers | 9);
7148 case ENUMERAL_TYPE:
7149 return (qualifiers | 10);
7151 case VOID_TYPE:
7152 return (qualifiers | 16);
7154 case INTEGER_TYPE:
7155 /* If this is a range type, consider it to be the underlying
7156 type. */
7157 if (TREE_TYPE (type) != 0)
7158 break;
7160 /* Carefully distinguish all the standard types of C,
7161 without messing up if the language is not C. We do this by
7162 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7163 look at both the names and the above fields, but that's redundant.
7164 Any type whose size is between two C types will be considered
7165 to be the wider of the two types. Also, we do not have a
7166 special code to use for "long long", so anything wider than
7167 long is treated the same. Note that we can't distinguish
7168 between "int" and "long" in this code if they are the same
7169 size, but that's fine, since neither can the assembler. */
7171 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7172 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7174 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7175 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7177 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7178 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7180 else
7181 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7183 case REAL_TYPE:
7184 /* If this is a range type, consider it to be the underlying
7185 type. */
7186 if (TREE_TYPE (type) != 0)
7187 break;
7189 /* Carefully distinguish all the standard types of C,
7190 without messing up if the language is not C. */
7192 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7193 return (qualifiers | 6);
7195 else
7196 return (qualifiers | 7);
7198 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7199 /* ??? We need to distinguish between double and float complex types,
7200 but I don't know how yet because I can't reach this code from
7201 existing front-ends. */
7202 return (qualifiers | 7); /* Who knows? */
7204 case VECTOR_TYPE:
7205 case BOOLEAN_TYPE: /* Boolean truth value type. */
7206 case LANG_TYPE: /* ? */
7207 return qualifiers;
7209 default:
7210 gcc_unreachable (); /* Not a type! */
7214 return qualifiers;
7217 /* Nested function support. */
7219 /* Emit RTL insns to initialize the variable parts of a trampoline.
7220 FNADDR is an RTX for the address of the function's pure code.
7221 CXT is an RTX for the static chain value for the function.
7223 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7224 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7225 (to store insns). This is a bit excessive. Perhaps a different
7226 mechanism would be better here.
7228 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7230 void
7231 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7233 /* SPARC 32-bit trampoline:
7235 sethi %hi(fn), %g1
7236 sethi %hi(static), %g2
7237 jmp %g1+%lo(fn)
7238 or %g2, %lo(static), %g2
7240 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7241 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7244 emit_move_insn
7245 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7246 expand_binop (SImode, ior_optab,
7247 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7248 size_int (10), 0, 1),
7249 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7250 NULL_RTX, 1, OPTAB_DIRECT));
7252 emit_move_insn
7253 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7254 expand_binop (SImode, ior_optab,
7255 expand_shift (RSHIFT_EXPR, SImode, cxt,
7256 size_int (10), 0, 1),
7257 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7258 NULL_RTX, 1, OPTAB_DIRECT));
7260 emit_move_insn
7261 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7262 expand_binop (SImode, ior_optab,
7263 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7264 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7265 NULL_RTX, 1, OPTAB_DIRECT));
7267 emit_move_insn
7268 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7269 expand_binop (SImode, ior_optab,
7270 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7271 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7272 NULL_RTX, 1, OPTAB_DIRECT));
7274 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7275 aligned on a 16 byte boundary so one flush clears it all. */
7276 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7277 if (sparc_cpu != PROCESSOR_ULTRASPARC
7278 && sparc_cpu != PROCESSOR_ULTRASPARC3
7279 && sparc_cpu != PROCESSOR_NIAGARA
7280 && sparc_cpu != PROCESSOR_NIAGARA2)
7281 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7282 plus_constant (tramp, 8)))));
7284 /* Call __enable_execute_stack after writing onto the stack to make sure
7285 the stack address is accessible. */
7286 #ifdef ENABLE_EXECUTE_STACK
7287 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7288 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7289 #endif
7293 /* The 64-bit version is simpler because it makes more sense to load the
7294 values as "immediate" data out of the trampoline. It's also easier since
7295 we can read the PC without clobbering a register. */
7297 void
7298 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7300 /* SPARC 64-bit trampoline:
7302 rd %pc, %g1
7303 ldx [%g1+24], %g5
7304 jmp %g5
7305 ldx [%g1+16], %g5
7306 +16 bytes data
7309 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7310 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7311 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7312 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7313 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7314 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7315 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7316 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7317 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7318 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7319 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7321 if (sparc_cpu != PROCESSOR_ULTRASPARC
7322 && sparc_cpu != PROCESSOR_ULTRASPARC3
7323 && sparc_cpu != PROCESSOR_NIAGARA
7324 && sparc_cpu != PROCESSOR_NIAGARA2)
7325 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7327 /* Call __enable_execute_stack after writing onto the stack to make sure
7328 the stack address is accessible. */
7329 #ifdef ENABLE_EXECUTE_STACK
7330 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7331 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7332 #endif
7335 /* Adjust the cost of a scheduling dependency. Return the new cost of
7336 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7338 static int
7339 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7341 enum attr_type insn_type;
7343 if (! recog_memoized (insn))
7344 return 0;
7346 insn_type = get_attr_type (insn);
7348 if (REG_NOTE_KIND (link) == 0)
7350 /* Data dependency; DEP_INSN writes a register that INSN reads some
7351 cycles later. */
7353 /* if a load, then the dependence must be on the memory address;
7354 add an extra "cycle". Note that the cost could be two cycles
7355 if the reg was written late in an instruction group; we ca not tell
7356 here. */
7357 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7358 return cost + 3;
7360 /* Get the delay only if the address of the store is the dependence. */
7361 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7363 rtx pat = PATTERN(insn);
7364 rtx dep_pat = PATTERN (dep_insn);
7366 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7367 return cost; /* This should not happen! */
7369 /* The dependency between the two instructions was on the data that
7370 is being stored. Assume that this implies that the address of the
7371 store is not dependent. */
7372 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7373 return cost;
7375 return cost + 3; /* An approximation. */
7378 /* A shift instruction cannot receive its data from an instruction
7379 in the same cycle; add a one cycle penalty. */
7380 if (insn_type == TYPE_SHIFT)
7381 return cost + 3; /* Split before cascade into shift. */
7383 else
7385 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7386 INSN writes some cycles later. */
7388 /* These are only significant for the fpu unit; writing a fp reg before
7389 the fpu has finished with it stalls the processor. */
7391 /* Reusing an integer register causes no problems. */
7392 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7393 return 0;
7396 return cost;
7399 static int
7400 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7402 enum attr_type insn_type, dep_type;
7403 rtx pat = PATTERN(insn);
7404 rtx dep_pat = PATTERN (dep_insn);
7406 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7407 return cost;
7409 insn_type = get_attr_type (insn);
7410 dep_type = get_attr_type (dep_insn);
7412 switch (REG_NOTE_KIND (link))
7414 case 0:
7415 /* Data dependency; DEP_INSN writes a register that INSN reads some
7416 cycles later. */
7418 switch (insn_type)
7420 case TYPE_STORE:
7421 case TYPE_FPSTORE:
7422 /* Get the delay iff the address of the store is the dependence. */
7423 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7424 return cost;
7426 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7427 return cost;
7428 return cost + 3;
7430 case TYPE_LOAD:
7431 case TYPE_SLOAD:
7432 case TYPE_FPLOAD:
7433 /* If a load, then the dependence must be on the memory address. If
7434 the addresses aren't equal, then it might be a false dependency */
7435 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7437 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7438 || GET_CODE (SET_DEST (dep_pat)) != MEM
7439 || GET_CODE (SET_SRC (pat)) != MEM
7440 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7441 XEXP (SET_SRC (pat), 0)))
7442 return cost + 2;
7444 return cost + 8;
7446 break;
7448 case TYPE_BRANCH:
7449 /* Compare to branch latency is 0. There is no benefit from
7450 separating compare and branch. */
7451 if (dep_type == TYPE_COMPARE)
7452 return 0;
7453 /* Floating point compare to branch latency is less than
7454 compare to conditional move. */
7455 if (dep_type == TYPE_FPCMP)
7456 return cost - 1;
7457 break;
7458 default:
7459 break;
7461 break;
7463 case REG_DEP_ANTI:
7464 /* Anti-dependencies only penalize the fpu unit. */
7465 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7466 return 0;
7467 break;
7469 default:
7470 break;
7473 return cost;
7476 static int
7477 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7479 switch (sparc_cpu)
7481 case PROCESSOR_SUPERSPARC:
7482 cost = supersparc_adjust_cost (insn, link, dep, cost);
7483 break;
7484 case PROCESSOR_HYPERSPARC:
7485 case PROCESSOR_SPARCLITE86X:
7486 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7487 break;
7488 default:
7489 break;
7491 return cost;
7494 static void
7495 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7496 int sched_verbose ATTRIBUTE_UNUSED,
7497 int max_ready ATTRIBUTE_UNUSED)
7501 static int
7502 sparc_use_sched_lookahead (void)
7504 if (sparc_cpu == PROCESSOR_NIAGARA
7505 || sparc_cpu == PROCESSOR_NIAGARA2)
7506 return 0;
7507 if (sparc_cpu == PROCESSOR_ULTRASPARC
7508 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7509 return 4;
7510 if ((1 << sparc_cpu) &
7511 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7512 (1 << PROCESSOR_SPARCLITE86X)))
7513 return 3;
7514 return 0;
7517 static int
7518 sparc_issue_rate (void)
7520 switch (sparc_cpu)
7522 case PROCESSOR_NIAGARA:
7523 case PROCESSOR_NIAGARA2:
7524 default:
7525 return 1;
7526 case PROCESSOR_V9:
7527 /* Assume V9 processors are capable of at least dual-issue. */
7528 return 2;
7529 case PROCESSOR_SUPERSPARC:
7530 return 3;
7531 case PROCESSOR_HYPERSPARC:
7532 case PROCESSOR_SPARCLITE86X:
7533 return 2;
7534 case PROCESSOR_ULTRASPARC:
7535 case PROCESSOR_ULTRASPARC3:
7536 return 4;
7540 static int
7541 set_extends (rtx insn)
7543 register rtx pat = PATTERN (insn);
7545 switch (GET_CODE (SET_SRC (pat)))
7547 /* Load and some shift instructions zero extend. */
7548 case MEM:
7549 case ZERO_EXTEND:
7550 /* sethi clears the high bits */
7551 case HIGH:
7552 /* LO_SUM is used with sethi. sethi cleared the high
7553 bits and the values used with lo_sum are positive */
7554 case LO_SUM:
7555 /* Store flag stores 0 or 1 */
7556 case LT: case LTU:
7557 case GT: case GTU:
7558 case LE: case LEU:
7559 case GE: case GEU:
7560 case EQ:
7561 case NE:
7562 return 1;
7563 case AND:
7565 rtx op0 = XEXP (SET_SRC (pat), 0);
7566 rtx op1 = XEXP (SET_SRC (pat), 1);
7567 if (GET_CODE (op1) == CONST_INT)
7568 return INTVAL (op1) >= 0;
7569 if (GET_CODE (op0) != REG)
7570 return 0;
7571 if (sparc_check_64 (op0, insn) == 1)
7572 return 1;
7573 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7575 case IOR:
7576 case XOR:
7578 rtx op0 = XEXP (SET_SRC (pat), 0);
7579 rtx op1 = XEXP (SET_SRC (pat), 1);
7580 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7581 return 0;
7582 if (GET_CODE (op1) == CONST_INT)
7583 return INTVAL (op1) >= 0;
7584 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7586 case LSHIFTRT:
7587 return GET_MODE (SET_SRC (pat)) == SImode;
7588 /* Positive integers leave the high bits zero. */
7589 case CONST_DOUBLE:
7590 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7591 case CONST_INT:
7592 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7593 case ASHIFTRT:
7594 case SIGN_EXTEND:
7595 return - (GET_MODE (SET_SRC (pat)) == SImode);
7596 case REG:
7597 return sparc_check_64 (SET_SRC (pat), insn);
7598 default:
7599 return 0;
7603 /* We _ought_ to have only one kind per function, but... */
7604 static GTY(()) rtx sparc_addr_diff_list;
7605 static GTY(()) rtx sparc_addr_list;
7607 void
7608 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7610 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7611 if (diff)
7612 sparc_addr_diff_list
7613 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7614 else
7615 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7618 static void
7619 sparc_output_addr_vec (rtx vec)
7621 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7622 int idx, vlen = XVECLEN (body, 0);
7624 #ifdef ASM_OUTPUT_ADDR_VEC_START
7625 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7626 #endif
7628 #ifdef ASM_OUTPUT_CASE_LABEL
7629 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7630 NEXT_INSN (lab));
7631 #else
7632 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7633 #endif
7635 for (idx = 0; idx < vlen; idx++)
7637 ASM_OUTPUT_ADDR_VEC_ELT
7638 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7641 #ifdef ASM_OUTPUT_ADDR_VEC_END
7642 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7643 #endif
7646 static void
7647 sparc_output_addr_diff_vec (rtx vec)
7649 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7650 rtx base = XEXP (XEXP (body, 0), 0);
7651 int idx, vlen = XVECLEN (body, 1);
7653 #ifdef ASM_OUTPUT_ADDR_VEC_START
7654 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7655 #endif
7657 #ifdef ASM_OUTPUT_CASE_LABEL
7658 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7659 NEXT_INSN (lab));
7660 #else
7661 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7662 #endif
7664 for (idx = 0; idx < vlen; idx++)
7666 ASM_OUTPUT_ADDR_DIFF_ELT
7667 (asm_out_file,
7668 body,
7669 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7670 CODE_LABEL_NUMBER (base));
7673 #ifdef ASM_OUTPUT_ADDR_VEC_END
7674 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7675 #endif
7678 static void
7679 sparc_output_deferred_case_vectors (void)
7681 rtx t;
7682 int align;
7684 if (sparc_addr_list == NULL_RTX
7685 && sparc_addr_diff_list == NULL_RTX)
7686 return;
7688 /* Align to cache line in the function's code section. */
7689 switch_to_section (current_function_section ());
7691 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7692 if (align > 0)
7693 ASM_OUTPUT_ALIGN (asm_out_file, align);
7695 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7696 sparc_output_addr_vec (XEXP (t, 0));
7697 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7698 sparc_output_addr_diff_vec (XEXP (t, 0));
7700 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7703 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7704 unknown. Return 1 if the high bits are zero, -1 if the register is
7705 sign extended. */
7707 sparc_check_64 (rtx x, rtx insn)
7709 /* If a register is set only once it is safe to ignore insns this
7710 code does not know how to handle. The loop will either recognize
7711 the single set and return the correct value or fail to recognize
7712 it and return 0. */
7713 int set_once = 0;
7714 rtx y = x;
7716 gcc_assert (GET_CODE (x) == REG);
7718 if (GET_MODE (x) == DImode)
7719 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7721 if (flag_expensive_optimizations
7722 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7723 set_once = 1;
7725 if (insn == 0)
7727 if (set_once)
7728 insn = get_last_insn_anywhere ();
7729 else
7730 return 0;
7733 while ((insn = PREV_INSN (insn)))
7735 switch (GET_CODE (insn))
7737 case JUMP_INSN:
7738 case NOTE:
7739 break;
7740 case CODE_LABEL:
7741 case CALL_INSN:
7742 default:
7743 if (! set_once)
7744 return 0;
7745 break;
7746 case INSN:
7748 rtx pat = PATTERN (insn);
7749 if (GET_CODE (pat) != SET)
7750 return 0;
7751 if (rtx_equal_p (x, SET_DEST (pat)))
7752 return set_extends (insn);
7753 if (y && rtx_equal_p (y, SET_DEST (pat)))
7754 return set_extends (insn);
7755 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7756 return 0;
7760 return 0;
7763 /* Returns assembly code to perform a DImode shift using
7764 a 64-bit global or out register on SPARC-V8+. */
7765 const char *
7766 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7768 static char asm_code[60];
7770 /* The scratch register is only required when the destination
7771 register is not a 64-bit global or out register. */
7772 if (which_alternative != 2)
7773 operands[3] = operands[0];
7775 /* We can only shift by constants <= 63. */
7776 if (GET_CODE (operands[2]) == CONST_INT)
7777 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7779 if (GET_CODE (operands[1]) == CONST_INT)
7781 output_asm_insn ("mov\t%1, %3", operands);
7783 else
7785 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7786 if (sparc_check_64 (operands[1], insn) <= 0)
7787 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7788 output_asm_insn ("or\t%L1, %3, %3", operands);
7791 strcpy(asm_code, opcode);
7793 if (which_alternative != 2)
7794 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7795 else
7796 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7799 /* Output rtl to increment the profiler label LABELNO
7800 for profiling a function entry. */
7802 void
7803 sparc_profile_hook (int labelno)
7805 char buf[32];
7806 rtx lab, fun;
7808 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7809 if (NO_PROFILE_COUNTERS)
7811 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7813 else
7815 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7816 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7817 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7821 #ifdef OBJECT_FORMAT_ELF
7822 static void
7823 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7824 tree decl)
7826 if (flags & SECTION_MERGE)
7828 /* entsize cannot be expressed in this section attributes
7829 encoding style. */
7830 default_elf_asm_named_section (name, flags, decl);
7831 return;
7834 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7836 if (!(flags & SECTION_DEBUG))
7837 fputs (",#alloc", asm_out_file);
7838 if (flags & SECTION_WRITE)
7839 fputs (",#write", asm_out_file);
7840 if (flags & SECTION_TLS)
7841 fputs (",#tls", asm_out_file);
7842 if (flags & SECTION_CODE)
7843 fputs (",#execinstr", asm_out_file);
7845 /* ??? Handle SECTION_BSS. */
7847 fputc ('\n', asm_out_file);
7849 #endif /* OBJECT_FORMAT_ELF */
7851 /* We do not allow indirect calls to be optimized into sibling calls.
7853 We cannot use sibling calls when delayed branches are disabled
7854 because they will likely require the call delay slot to be filled.
7856 Also, on SPARC 32-bit we cannot emit a sibling call when the
7857 current function returns a structure. This is because the "unimp
7858 after call" convention would cause the callee to return to the
7859 wrong place. The generic code already disallows cases where the
7860 function being called returns a structure.
7862 It may seem strange how this last case could occur. Usually there
7863 is code after the call which jumps to epilogue code which dumps the
7864 return value into the struct return area. That ought to invalidate
7865 the sibling call right? Well, in the C++ case we can end up passing
7866 the pointer to the struct return area to a constructor (which returns
7867 void) and then nothing else happens. Such a sibling call would look
7868 valid without the added check here.
7870 VxWorks PIC PLT entries require the global pointer to be initialized
7871 on entry. We therefore can't emit sibling calls to them. */
7872 static bool
7873 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7875 return (decl
7876 && flag_delayed_branch
7877 && (TARGET_ARCH64 || ! cfun->returns_struct)
7878 && !(TARGET_VXWORKS_RTP
7879 && flag_pic
7880 && !targetm.binds_local_p (decl)));
7883 /* libfunc renaming. */
7884 #include "config/gofast.h"
7886 static void
7887 sparc_init_libfuncs (void)
7889 if (TARGET_ARCH32)
7891 /* Use the subroutines that Sun's library provides for integer
7892 multiply and divide. The `*' prevents an underscore from
7893 being prepended by the compiler. .umul is a little faster
7894 than .mul. */
7895 set_optab_libfunc (smul_optab, SImode, "*.umul");
7896 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7897 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7898 set_optab_libfunc (smod_optab, SImode, "*.rem");
7899 set_optab_libfunc (umod_optab, SImode, "*.urem");
7901 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7902 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7903 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7904 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7905 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7906 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7908 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7909 is because with soft-float, the SFmode and DFmode sqrt
7910 instructions will be absent, and the compiler will notice and
7911 try to use the TFmode sqrt instruction for calls to the
7912 builtin function sqrt, but this fails. */
7913 if (TARGET_FPU)
7914 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7916 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7917 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7918 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7919 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7920 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7921 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7923 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7924 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7925 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7926 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7928 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7929 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7930 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7931 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7933 if (DITF_CONVERSION_LIBFUNCS)
7935 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7936 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7937 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7938 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7941 if (SUN_CONVERSION_LIBFUNCS)
7943 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7944 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7945 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7946 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7949 if (TARGET_ARCH64)
7951 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7952 do not exist in the library. Make sure the compiler does not
7953 emit calls to them by accident. (It should always use the
7954 hardware instructions.) */
7955 set_optab_libfunc (smul_optab, SImode, 0);
7956 set_optab_libfunc (sdiv_optab, SImode, 0);
7957 set_optab_libfunc (udiv_optab, SImode, 0);
7958 set_optab_libfunc (smod_optab, SImode, 0);
7959 set_optab_libfunc (umod_optab, SImode, 0);
7961 if (SUN_INTEGER_MULTIPLY_64)
7963 set_optab_libfunc (smul_optab, DImode, "__mul64");
7964 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7965 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7966 set_optab_libfunc (smod_optab, DImode, "__rem64");
7967 set_optab_libfunc (umod_optab, DImode, "__urem64");
7970 if (SUN_CONVERSION_LIBFUNCS)
7972 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7973 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7974 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7975 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7979 gofast_maybe_init_libfuncs ();
7982 #define def_builtin(NAME, CODE, TYPE) \
7983 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7984 NULL_TREE)
7986 /* Implement the TARGET_INIT_BUILTINS target hook.
7987 Create builtin functions for special SPARC instructions. */
7989 static void
7990 sparc_init_builtins (void)
7992 if (TARGET_VIS)
7993 sparc_vis_init_builtins ();
7996 /* Create builtin functions for VIS 1.0 instructions. */
7998 static void
7999 sparc_vis_init_builtins (void)
8001 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8002 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8003 tree v4hi = build_vector_type (intHI_type_node, 4);
8004 tree v2hi = build_vector_type (intHI_type_node, 2);
8005 tree v2si = build_vector_type (intSI_type_node, 2);
8007 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8008 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8009 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8010 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8011 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8012 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8013 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8014 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8015 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8016 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8017 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8018 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8019 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8020 v8qi, v8qi,
8021 intDI_type_node, 0);
8022 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8023 intDI_type_node,
8024 intDI_type_node, 0);
8025 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8026 ptr_type_node,
8027 intSI_type_node, 0);
8028 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8029 ptr_type_node,
8030 intDI_type_node, 0);
8032 /* Packing and expanding vectors. */
8033 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8034 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8035 v8qi_ftype_v2si_v8qi);
8036 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8037 v2hi_ftype_v2si);
8038 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8039 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8040 v8qi_ftype_v4qi_v4qi);
8042 /* Multiplications. */
8043 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8044 v4hi_ftype_v4qi_v4hi);
8045 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8046 v4hi_ftype_v4qi_v2hi);
8047 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8048 v4hi_ftype_v4qi_v2hi);
8049 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8050 v4hi_ftype_v8qi_v4hi);
8051 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8052 v4hi_ftype_v8qi_v4hi);
8053 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8054 v2si_ftype_v4qi_v2hi);
8055 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8056 v2si_ftype_v4qi_v2hi);
8058 /* Data aligning. */
8059 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8060 v4hi_ftype_v4hi_v4hi);
8061 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8062 v8qi_ftype_v8qi_v8qi);
8063 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8064 v2si_ftype_v2si_v2si);
8065 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8066 di_ftype_di_di);
8067 if (TARGET_ARCH64)
8068 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8069 ptr_ftype_ptr_di);
8070 else
8071 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8072 ptr_ftype_ptr_si);
8074 /* Pixel distance. */
8075 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8076 di_ftype_v8qi_v8qi_di);
8079 /* Handle TARGET_EXPAND_BUILTIN target hook.
8080 Expand builtin functions for sparc intrinsics. */
8082 static rtx
8083 sparc_expand_builtin (tree exp, rtx target,
8084 rtx subtarget ATTRIBUTE_UNUSED,
8085 enum machine_mode tmode ATTRIBUTE_UNUSED,
8086 int ignore ATTRIBUTE_UNUSED)
8088 tree arg;
8089 call_expr_arg_iterator iter;
8090 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8091 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8092 rtx pat, op[4];
8093 enum machine_mode mode[4];
8094 int arg_count = 0;
8096 mode[0] = insn_data[icode].operand[0].mode;
8097 if (!target
8098 || GET_MODE (target) != mode[0]
8099 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8100 op[0] = gen_reg_rtx (mode[0]);
8101 else
8102 op[0] = target;
8104 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8106 arg_count++;
8107 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8108 op[arg_count] = expand_normal (arg);
8110 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8111 mode[arg_count]))
8112 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8115 switch (arg_count)
8117 case 1:
8118 pat = GEN_FCN (icode) (op[0], op[1]);
8119 break;
8120 case 2:
8121 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8122 break;
8123 case 3:
8124 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8125 break;
8126 default:
8127 gcc_unreachable ();
8130 if (!pat)
8131 return NULL_RTX;
8133 emit_insn (pat);
8135 return op[0];
8138 static int
8139 sparc_vis_mul8x16 (int e8, int e16)
8141 return (e8 * e16 + 128) / 256;
8144 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8145 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8146 constants. A tree list with the results of the multiplications is returned,
8147 and each element in the list is of INNER_TYPE. */
8149 static tree
8150 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8152 tree n_elts = NULL_TREE;
8153 int scale;
8155 switch (fncode)
8157 case CODE_FOR_fmul8x16_vis:
8158 for (; elts0 && elts1;
8159 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8161 int val
8162 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8163 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8164 n_elts = tree_cons (NULL_TREE,
8165 build_int_cst (inner_type, val),
8166 n_elts);
8168 break;
8170 case CODE_FOR_fmul8x16au_vis:
8171 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8173 for (; elts0; elts0 = TREE_CHAIN (elts0))
8175 int val
8176 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8177 scale);
8178 n_elts = tree_cons (NULL_TREE,
8179 build_int_cst (inner_type, val),
8180 n_elts);
8182 break;
8184 case CODE_FOR_fmul8x16al_vis:
8185 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8187 for (; elts0; elts0 = TREE_CHAIN (elts0))
8189 int val
8190 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8191 scale);
8192 n_elts = tree_cons (NULL_TREE,
8193 build_int_cst (inner_type, val),
8194 n_elts);
8196 break;
8198 default:
8199 gcc_unreachable ();
8202 return nreverse (n_elts);
8205 /* Handle TARGET_FOLD_BUILTIN target hook.
8206 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8207 result of the function call is ignored. NULL_TREE is returned if the
8208 function could not be folded. */
8210 static tree
8211 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8213 tree arg0, arg1, arg2;
8214 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8216 if (ignore
8217 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8218 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8219 return fold_convert (rtype, integer_zero_node);
8221 switch (DECL_FUNCTION_CODE (fndecl))
8223 case CODE_FOR_fexpand_vis:
8224 arg0 = TREE_VALUE (arglist);
8225 STRIP_NOPS (arg0);
8227 if (TREE_CODE (arg0) == VECTOR_CST)
8229 tree inner_type = TREE_TYPE (rtype);
8230 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8231 tree n_elts = NULL_TREE;
8233 for (; elts; elts = TREE_CHAIN (elts))
8235 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8236 n_elts = tree_cons (NULL_TREE,
8237 build_int_cst (inner_type, val),
8238 n_elts);
8240 return build_vector (rtype, nreverse (n_elts));
8242 break;
8244 case CODE_FOR_fmul8x16_vis:
8245 case CODE_FOR_fmul8x16au_vis:
8246 case CODE_FOR_fmul8x16al_vis:
8247 arg0 = TREE_VALUE (arglist);
8248 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8249 STRIP_NOPS (arg0);
8250 STRIP_NOPS (arg1);
8252 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8254 tree inner_type = TREE_TYPE (rtype);
8255 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8256 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8257 tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8258 inner_type, elts0, elts1);
8260 return build_vector (rtype, n_elts);
8262 break;
8264 case CODE_FOR_fpmerge_vis:
8265 arg0 = TREE_VALUE (arglist);
8266 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8267 STRIP_NOPS (arg0);
8268 STRIP_NOPS (arg1);
8270 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8272 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8273 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8274 tree n_elts = NULL_TREE;
8276 for (; elts0 && elts1;
8277 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8279 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8280 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8283 return build_vector (rtype, nreverse (n_elts));
8285 break;
8287 case CODE_FOR_pdist_vis:
8288 arg0 = TREE_VALUE (arglist);
8289 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8290 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8291 STRIP_NOPS (arg0);
8292 STRIP_NOPS (arg1);
8293 STRIP_NOPS (arg2);
8295 if (TREE_CODE (arg0) == VECTOR_CST
8296 && TREE_CODE (arg1) == VECTOR_CST
8297 && TREE_CODE (arg2) == INTEGER_CST)
8299 int overflow = 0;
8300 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8301 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8302 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8303 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8305 for (; elts0 && elts1;
8306 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8308 unsigned HOST_WIDE_INT
8309 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8310 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8311 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8312 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8314 unsigned HOST_WIDE_INT l;
8315 HOST_WIDE_INT h;
8317 overflow |= neg_double (low1, high1, &l, &h);
8318 overflow |= add_double (low0, high0, l, h, &l, &h);
8319 if (h < 0)
8320 overflow |= neg_double (l, h, &l, &h);
8322 overflow |= add_double (low, high, l, h, &low, &high);
8325 gcc_assert (overflow == 0);
8327 return build_int_cst_wide (rtype, low, high);
8330 default:
8331 break;
8334 return NULL_TREE;
8338 sparc_extra_constraint_check (rtx op, int c, int strict)
8340 int reload_ok_mem;
8342 if (TARGET_ARCH64
8343 && (c == 'T' || c == 'U'))
8344 return 0;
8346 switch (c)
8348 case 'Q':
8349 return fp_sethi_p (op);
8351 case 'R':
8352 return fp_mov_p (op);
8354 case 'S':
8355 return fp_high_losum_p (op);
8357 case 'U':
8358 if (! strict
8359 || (GET_CODE (op) == REG
8360 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8361 || reg_renumber[REGNO (op)] >= 0)))
8362 return register_ok_for_ldd (op);
8364 return 0;
8366 case 'W':
8367 case 'T':
8368 break;
8370 case 'Y':
8371 return const_zero_operand (op, GET_MODE (op));
8373 default:
8374 return 0;
8377 /* Our memory extra constraints have to emulate the
8378 behavior of 'm' and 'o' in order for reload to work
8379 correctly. */
8380 if (GET_CODE (op) == MEM)
8382 reload_ok_mem = 0;
8383 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8384 && (! strict
8385 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8386 reload_ok_mem = 1;
8388 else
8390 reload_ok_mem = (reload_in_progress
8391 && GET_CODE (op) == REG
8392 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8393 && reg_renumber [REGNO (op)] < 0);
8396 return reload_ok_mem;
8399 /* ??? This duplicates information provided to the compiler by the
8400 ??? scheduler description. Some day, teach genautomata to output
8401 ??? the latencies and then CSE will just use that. */
8403 static bool
8404 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8406 enum machine_mode mode = GET_MODE (x);
8407 bool float_mode_p = FLOAT_MODE_P (mode);
8409 switch (code)
8411 case CONST_INT:
8412 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8414 *total = 0;
8415 return true;
8417 /* FALLTHRU */
8419 case HIGH:
8420 *total = 2;
8421 return true;
8423 case CONST:
8424 case LABEL_REF:
8425 case SYMBOL_REF:
8426 *total = 4;
8427 return true;
8429 case CONST_DOUBLE:
8430 if (GET_MODE (x) == VOIDmode
8431 && ((CONST_DOUBLE_HIGH (x) == 0
8432 && CONST_DOUBLE_LOW (x) < 0x1000)
8433 || (CONST_DOUBLE_HIGH (x) == -1
8434 && CONST_DOUBLE_LOW (x) < 0
8435 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8436 *total = 0;
8437 else
8438 *total = 8;
8439 return true;
8441 case MEM:
8442 /* If outer-code was a sign or zero extension, a cost
8443 of COSTS_N_INSNS (1) was already added in. This is
8444 why we are subtracting it back out. */
8445 if (outer_code == ZERO_EXTEND)
8447 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8449 else if (outer_code == SIGN_EXTEND)
8451 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8453 else if (float_mode_p)
8455 *total = sparc_costs->float_load;
8457 else
8459 *total = sparc_costs->int_load;
8462 return true;
8464 case PLUS:
8465 case MINUS:
8466 if (float_mode_p)
8467 *total = sparc_costs->float_plusminus;
8468 else
8469 *total = COSTS_N_INSNS (1);
8470 return false;
8472 case MULT:
8473 if (float_mode_p)
8474 *total = sparc_costs->float_mul;
8475 else if (! TARGET_HARD_MUL)
8476 *total = COSTS_N_INSNS (25);
8477 else
8479 int bit_cost;
8481 bit_cost = 0;
8482 if (sparc_costs->int_mul_bit_factor)
8484 int nbits;
8486 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8488 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8489 for (nbits = 0; value != 0; value &= value - 1)
8490 nbits++;
8492 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8493 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8495 rtx x1 = XEXP (x, 1);
8496 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8497 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8499 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8500 nbits++;
8501 for (; value2 != 0; value2 &= value2 - 1)
8502 nbits++;
8504 else
8505 nbits = 7;
8507 if (nbits < 3)
8508 nbits = 3;
8509 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8510 bit_cost = COSTS_N_INSNS (bit_cost);
8513 if (mode == DImode)
8514 *total = sparc_costs->int_mulX + bit_cost;
8515 else
8516 *total = sparc_costs->int_mul + bit_cost;
8518 return false;
8520 case ASHIFT:
8521 case ASHIFTRT:
8522 case LSHIFTRT:
8523 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8524 return false;
8526 case DIV:
8527 case UDIV:
8528 case MOD:
8529 case UMOD:
8530 if (float_mode_p)
8532 if (mode == DFmode)
8533 *total = sparc_costs->float_div_df;
8534 else
8535 *total = sparc_costs->float_div_sf;
8537 else
8539 if (mode == DImode)
8540 *total = sparc_costs->int_divX;
8541 else
8542 *total = sparc_costs->int_div;
8544 return false;
8546 case NEG:
8547 if (! float_mode_p)
8549 *total = COSTS_N_INSNS (1);
8550 return false;
8552 /* FALLTHRU */
8554 case ABS:
8555 case FLOAT:
8556 case UNSIGNED_FLOAT:
8557 case FIX:
8558 case UNSIGNED_FIX:
8559 case FLOAT_EXTEND:
8560 case FLOAT_TRUNCATE:
8561 *total = sparc_costs->float_move;
8562 return false;
8564 case SQRT:
8565 if (mode == DFmode)
8566 *total = sparc_costs->float_sqrt_df;
8567 else
8568 *total = sparc_costs->float_sqrt_sf;
8569 return false;
8571 case COMPARE:
8572 if (float_mode_p)
8573 *total = sparc_costs->float_cmp;
8574 else
8575 *total = COSTS_N_INSNS (1);
8576 return false;
8578 case IF_THEN_ELSE:
8579 if (float_mode_p)
8580 *total = sparc_costs->float_cmove;
8581 else
8582 *total = sparc_costs->int_cmove;
8583 return false;
8585 case IOR:
8586 /* Handle the NAND vector patterns. */
8587 if (sparc_vector_mode_supported_p (GET_MODE (x))
8588 && GET_CODE (XEXP (x, 0)) == NOT
8589 && GET_CODE (XEXP (x, 1)) == NOT)
8591 *total = COSTS_N_INSNS (1);
8592 return true;
8594 else
8595 return false;
8597 default:
8598 return false;
8602 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8603 This is achieved by means of a manual dynamic stack space allocation in
8604 the current frame. We make the assumption that SEQ doesn't contain any
8605 function calls, with the possible exception of calls to the PIC helper. */
8607 static void
8608 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8610 /* We must preserve the lowest 16 words for the register save area. */
8611 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8612 /* We really need only 2 words of fresh stack space. */
8613 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8615 rtx slot
8616 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8617 SPARC_STACK_BIAS + offset));
8619 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8620 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8621 if (reg2)
8622 emit_insn (gen_rtx_SET (VOIDmode,
8623 adjust_address (slot, word_mode, UNITS_PER_WORD),
8624 reg2));
8625 emit_insn (seq);
8626 if (reg2)
8627 emit_insn (gen_rtx_SET (VOIDmode,
8628 reg2,
8629 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8630 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8631 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8634 /* Output the assembler code for a thunk function. THUNK_DECL is the
8635 declaration for the thunk function itself, FUNCTION is the decl for
8636 the target function. DELTA is an immediate constant offset to be
8637 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8638 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8640 static void
8641 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8642 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8643 tree function)
8645 rtx this_rtx, insn, funexp;
8646 unsigned int int_arg_first;
8648 reload_completed = 1;
8649 epilogue_completed = 1;
8651 emit_note (NOTE_INSN_PROLOGUE_END);
8653 if (flag_delayed_branch)
8655 /* We will emit a regular sibcall below, so we need to instruct
8656 output_sibcall that we are in a leaf function. */
8657 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8659 /* This will cause final.c to invoke leaf_renumber_regs so we
8660 must behave as if we were in a not-yet-leafified function. */
8661 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8663 else
8665 /* We will emit the sibcall manually below, so we will need to
8666 manually spill non-leaf registers. */
8667 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8669 /* We really are in a leaf function. */
8670 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8673 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8674 returns a structure, the structure return pointer is there instead. */
8675 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8676 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8677 else
8678 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8680 /* Add DELTA. When possible use a plain add, otherwise load it into
8681 a register first. */
8682 if (delta)
8684 rtx delta_rtx = GEN_INT (delta);
8686 if (! SPARC_SIMM13_P (delta))
8688 rtx scratch = gen_rtx_REG (Pmode, 1);
8689 emit_move_insn (scratch, delta_rtx);
8690 delta_rtx = scratch;
8693 /* THIS_RTX += DELTA. */
8694 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8697 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
8698 if (vcall_offset)
8700 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8701 rtx scratch = gen_rtx_REG (Pmode, 1);
8703 gcc_assert (vcall_offset < 0);
8705 /* SCRATCH = *THIS_RTX. */
8706 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8708 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8709 may not have any available scratch register at this point. */
8710 if (SPARC_SIMM13_P (vcall_offset))
8712 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8713 else if (! fixed_regs[5]
8714 /* The below sequence is made up of at least 2 insns,
8715 while the default method may need only one. */
8716 && vcall_offset < -8192)
8718 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8719 emit_move_insn (scratch2, vcall_offset_rtx);
8720 vcall_offset_rtx = scratch2;
8722 else
8724 rtx increment = GEN_INT (-4096);
8726 /* VCALL_OFFSET is a negative number whose typical range can be
8727 estimated as -32768..0 in 32-bit mode. In almost all cases
8728 it is therefore cheaper to emit multiple add insns than
8729 spilling and loading the constant into a register (at least
8730 6 insns). */
8731 while (! SPARC_SIMM13_P (vcall_offset))
8733 emit_insn (gen_add2_insn (scratch, increment));
8734 vcall_offset += 4096;
8736 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8739 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
8740 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8741 gen_rtx_PLUS (Pmode,
8742 scratch,
8743 vcall_offset_rtx)));
8745 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
8746 emit_insn (gen_add2_insn (this_rtx, scratch));
8749 /* Generate a tail call to the target function. */
8750 if (! TREE_USED (function))
8752 assemble_external (function);
8753 TREE_USED (function) = 1;
8755 funexp = XEXP (DECL_RTL (function), 0);
8757 if (flag_delayed_branch)
8759 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8760 insn = emit_call_insn (gen_sibcall (funexp));
8761 SIBLING_CALL_P (insn) = 1;
8763 else
8765 /* The hoops we have to jump through in order to generate a sibcall
8766 without using delay slots... */
8767 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8769 if (flag_pic)
8771 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8772 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8773 start_sequence ();
8774 /* Delay emitting the PIC helper function because it needs to
8775 change the section and we are emitting assembly code. */
8776 load_pic_register (true); /* clobbers %o7 */
8777 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8778 seq = get_insns ();
8779 end_sequence ();
8780 emit_and_preserve (seq, spill_reg, spill_reg2);
8782 else if (TARGET_ARCH32)
8784 emit_insn (gen_rtx_SET (VOIDmode,
8785 scratch,
8786 gen_rtx_HIGH (SImode, funexp)));
8787 emit_insn (gen_rtx_SET (VOIDmode,
8788 scratch,
8789 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8791 else /* TARGET_ARCH64 */
8793 switch (sparc_cmodel)
8795 case CM_MEDLOW:
8796 case CM_MEDMID:
8797 /* The destination can serve as a temporary. */
8798 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8799 break;
8801 case CM_MEDANY:
8802 case CM_EMBMEDANY:
8803 /* The destination cannot serve as a temporary. */
8804 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8805 start_sequence ();
8806 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8807 seq = get_insns ();
8808 end_sequence ();
8809 emit_and_preserve (seq, spill_reg, 0);
8810 break;
8812 default:
8813 gcc_unreachable ();
8817 emit_jump_insn (gen_indirect_jump (scratch));
8820 emit_barrier ();
8822 /* Run just enough of rest_of_compilation to get the insns emitted.
8823 There's not really enough bulk here to make other passes such as
8824 instruction scheduling worth while. Note that use_thunk calls
8825 assemble_start_function and assemble_end_function. */
8826 insn = get_insns ();
8827 insn_locators_alloc ();
8828 shorten_branches (insn);
8829 final_start_function (insn, file, 1);
8830 final (insn, file, 1);
8831 final_end_function ();
8832 free_after_compilation (cfun);
8834 reload_completed = 0;
8835 epilogue_completed = 0;
8838 /* Return true if sparc_output_mi_thunk would be able to output the
8839 assembler code for the thunk function specified by the arguments
8840 it is passed, and false otherwise. */
8841 static bool
8842 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8843 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8844 HOST_WIDE_INT vcall_offset,
8845 const_tree function ATTRIBUTE_UNUSED)
8847 /* Bound the loop used in the default method above. */
8848 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8851 /* How to allocate a 'struct machine_function'. */
8853 static struct machine_function *
8854 sparc_init_machine_status (void)
8856 return GGC_CNEW (struct machine_function);
8859 /* Locate some local-dynamic symbol still in use by this function
8860 so that we can print its name in local-dynamic base patterns. */
8862 static const char *
8863 get_some_local_dynamic_name (void)
8865 rtx insn;
8867 if (cfun->machine->some_ld_name)
8868 return cfun->machine->some_ld_name;
8870 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8871 if (INSN_P (insn)
8872 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8873 return cfun->machine->some_ld_name;
8875 gcc_unreachable ();
8878 static int
8879 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8881 rtx x = *px;
8883 if (x
8884 && GET_CODE (x) == SYMBOL_REF
8885 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8887 cfun->machine->some_ld_name = XSTR (x, 0);
8888 return 1;
8891 return 0;
8894 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8895 This is called from dwarf2out.c to emit call frame instructions
8896 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8897 static void
8898 sparc_dwarf_handle_frame_unspec (const char *label,
8899 rtx pattern ATTRIBUTE_UNUSED,
8900 int index ATTRIBUTE_UNUSED)
8902 gcc_assert (index == UNSPECV_SAVEW);
8903 dwarf2out_window_save (label);
8906 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8907 We need to emit DTP-relative relocations. */
8909 static void
8910 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8912 switch (size)
8914 case 4:
8915 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8916 break;
8917 case 8:
8918 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8919 break;
8920 default:
8921 gcc_unreachable ();
8923 output_addr_const (file, x);
8924 fputs (")", file);
8927 /* Do whatever processing is required at the end of a file. */
8929 static void
8930 sparc_file_end (void)
8932 /* If we haven't emitted the special PIC helper function, do so now. */
8933 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8934 emit_pic_helper ();
8936 if (NEED_INDICATE_EXEC_STACK)
8937 file_end_indicate_exec_stack ();
8940 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8941 /* Implement TARGET_MANGLE_TYPE. */
8943 static const char *
8944 sparc_mangle_type (const_tree type)
8946 if (!TARGET_64BIT
8947 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8948 && TARGET_LONG_DOUBLE_128)
8949 return "g";
8951 /* For all other types, use normal C++ mangling. */
8952 return NULL;
8954 #endif
8956 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8957 compare and swap on the word containing the byte or half-word. */
8959 void
8960 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8962 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8963 rtx addr = gen_reg_rtx (Pmode);
8964 rtx off = gen_reg_rtx (SImode);
8965 rtx oldv = gen_reg_rtx (SImode);
8966 rtx newv = gen_reg_rtx (SImode);
8967 rtx oldvalue = gen_reg_rtx (SImode);
8968 rtx newvalue = gen_reg_rtx (SImode);
8969 rtx res = gen_reg_rtx (SImode);
8970 rtx resv = gen_reg_rtx (SImode);
8971 rtx memsi, val, mask, end_label, loop_label, cc;
8973 emit_insn (gen_rtx_SET (VOIDmode, addr,
8974 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8976 if (Pmode != SImode)
8977 addr1 = gen_lowpart (SImode, addr1);
8978 emit_insn (gen_rtx_SET (VOIDmode, off,
8979 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8981 memsi = gen_rtx_MEM (SImode, addr);
8982 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8983 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8985 val = force_reg (SImode, memsi);
8987 emit_insn (gen_rtx_SET (VOIDmode, off,
8988 gen_rtx_XOR (SImode, off,
8989 GEN_INT (GET_MODE (mem) == QImode
8990 ? 3 : 2))));
8992 emit_insn (gen_rtx_SET (VOIDmode, off,
8993 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8995 if (GET_MODE (mem) == QImode)
8996 mask = force_reg (SImode, GEN_INT (0xff));
8997 else
8998 mask = force_reg (SImode, GEN_INT (0xffff));
9000 emit_insn (gen_rtx_SET (VOIDmode, mask,
9001 gen_rtx_ASHIFT (SImode, mask, off)));
9003 emit_insn (gen_rtx_SET (VOIDmode, val,
9004 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9005 val)));
9007 oldval = gen_lowpart (SImode, oldval);
9008 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9009 gen_rtx_ASHIFT (SImode, oldval, off)));
9011 newval = gen_lowpart_common (SImode, newval);
9012 emit_insn (gen_rtx_SET (VOIDmode, newv,
9013 gen_rtx_ASHIFT (SImode, newval, off)));
9015 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9016 gen_rtx_AND (SImode, oldv, mask)));
9018 emit_insn (gen_rtx_SET (VOIDmode, newv,
9019 gen_rtx_AND (SImode, newv, mask)));
9021 end_label = gen_label_rtx ();
9022 loop_label = gen_label_rtx ();
9023 emit_label (loop_label);
9025 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9026 gen_rtx_IOR (SImode, oldv, val)));
9028 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9029 gen_rtx_IOR (SImode, newv, val)));
9031 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9033 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9035 emit_insn (gen_rtx_SET (VOIDmode, resv,
9036 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9037 res)));
9039 sparc_compare_op0 = resv;
9040 sparc_compare_op1 = val;
9041 cc = gen_compare_reg (NE);
9043 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9045 sparc_compare_emitted = cc;
9046 emit_jump_insn (gen_bne (loop_label));
9048 emit_label (end_label);
9050 emit_insn (gen_rtx_SET (VOIDmode, res,
9051 gen_rtx_AND (SImode, res, mask)));
9053 emit_insn (gen_rtx_SET (VOIDmode, res,
9054 gen_rtx_LSHIFTRT (SImode, res, off)));
9056 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9059 #include "gt-sparc.h"