* Merge from mainline
[official-gcc.git] / gcc / config / sparc / sparc.c
blobe880226412e0b726465c7de4f52c90a074ee9b6b
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "insn-codes.h"
37 #include "conditions.h"
38 #include "output.h"
39 #include "insn-attr.h"
40 #include "flags.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
52 #include "tree-gimple.h"
53 #include "langhooks.h"
55 /* Processor costs */
56 static const
57 struct processor_costs cypress_costs = {
58 COSTS_N_INSNS (2), /* int load */
59 COSTS_N_INSNS (2), /* int signed load */
60 COSTS_N_INSNS (2), /* int zeroed load */
61 COSTS_N_INSNS (2), /* float load */
62 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
63 COSTS_N_INSNS (5), /* fadd, fsub */
64 COSTS_N_INSNS (1), /* fcmp */
65 COSTS_N_INSNS (1), /* fmov, fmovr */
66 COSTS_N_INSNS (7), /* fmul */
67 COSTS_N_INSNS (37), /* fdivs */
68 COSTS_N_INSNS (37), /* fdivd */
69 COSTS_N_INSNS (63), /* fsqrts */
70 COSTS_N_INSNS (63), /* fsqrtd */
71 COSTS_N_INSNS (1), /* imul */
72 COSTS_N_INSNS (1), /* imulX */
73 0, /* imul bit factor */
74 COSTS_N_INSNS (1), /* idiv */
75 COSTS_N_INSNS (1), /* idivX */
76 COSTS_N_INSNS (1), /* movcc/movr */
77 0, /* shift penalty */
80 static const
81 struct processor_costs supersparc_costs = {
82 COSTS_N_INSNS (1), /* int load */
83 COSTS_N_INSNS (1), /* int signed load */
84 COSTS_N_INSNS (1), /* int zeroed load */
85 COSTS_N_INSNS (0), /* float load */
86 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
87 COSTS_N_INSNS (3), /* fadd, fsub */
88 COSTS_N_INSNS (3), /* fcmp */
89 COSTS_N_INSNS (1), /* fmov, fmovr */
90 COSTS_N_INSNS (3), /* fmul */
91 COSTS_N_INSNS (6), /* fdivs */
92 COSTS_N_INSNS (9), /* fdivd */
93 COSTS_N_INSNS (12), /* fsqrts */
94 COSTS_N_INSNS (12), /* fsqrtd */
95 COSTS_N_INSNS (4), /* imul */
96 COSTS_N_INSNS (4), /* imulX */
97 0, /* imul bit factor */
98 COSTS_N_INSNS (4), /* idiv */
99 COSTS_N_INSNS (4), /* idivX */
100 COSTS_N_INSNS (1), /* movcc/movr */
101 1, /* shift penalty */
104 static const
105 struct processor_costs hypersparc_costs = {
106 COSTS_N_INSNS (1), /* int load */
107 COSTS_N_INSNS (1), /* int signed load */
108 COSTS_N_INSNS (1), /* int zeroed load */
109 COSTS_N_INSNS (1), /* float load */
110 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
111 COSTS_N_INSNS (1), /* fadd, fsub */
112 COSTS_N_INSNS (1), /* fcmp */
113 COSTS_N_INSNS (1), /* fmov, fmovr */
114 COSTS_N_INSNS (1), /* fmul */
115 COSTS_N_INSNS (8), /* fdivs */
116 COSTS_N_INSNS (12), /* fdivd */
117 COSTS_N_INSNS (17), /* fsqrts */
118 COSTS_N_INSNS (17), /* fsqrtd */
119 COSTS_N_INSNS (17), /* imul */
120 COSTS_N_INSNS (17), /* imulX */
121 0, /* imul bit factor */
122 COSTS_N_INSNS (17), /* idiv */
123 COSTS_N_INSNS (17), /* idivX */
124 COSTS_N_INSNS (1), /* movcc/movr */
125 0, /* shift penalty */
128 static const
129 struct processor_costs sparclet_costs = {
130 COSTS_N_INSNS (3), /* int load */
131 COSTS_N_INSNS (3), /* int signed load */
132 COSTS_N_INSNS (1), /* int zeroed load */
133 COSTS_N_INSNS (1), /* float load */
134 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
135 COSTS_N_INSNS (1), /* fadd, fsub */
136 COSTS_N_INSNS (1), /* fcmp */
137 COSTS_N_INSNS (1), /* fmov, fmovr */
138 COSTS_N_INSNS (1), /* fmul */
139 COSTS_N_INSNS (1), /* fdivs */
140 COSTS_N_INSNS (1), /* fdivd */
141 COSTS_N_INSNS (1), /* fsqrts */
142 COSTS_N_INSNS (1), /* fsqrtd */
143 COSTS_N_INSNS (5), /* imul */
144 COSTS_N_INSNS (5), /* imulX */
145 0, /* imul bit factor */
146 COSTS_N_INSNS (5), /* idiv */
147 COSTS_N_INSNS (5), /* idivX */
148 COSTS_N_INSNS (1), /* movcc/movr */
149 0, /* shift penalty */
152 static const
153 struct processor_costs ultrasparc_costs = {
154 COSTS_N_INSNS (2), /* int load */
155 COSTS_N_INSNS (3), /* int signed load */
156 COSTS_N_INSNS (2), /* int zeroed load */
157 COSTS_N_INSNS (2), /* float load */
158 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
159 COSTS_N_INSNS (4), /* fadd, fsub */
160 COSTS_N_INSNS (1), /* fcmp */
161 COSTS_N_INSNS (2), /* fmov, fmovr */
162 COSTS_N_INSNS (4), /* fmul */
163 COSTS_N_INSNS (13), /* fdivs */
164 COSTS_N_INSNS (23), /* fdivd */
165 COSTS_N_INSNS (13), /* fsqrts */
166 COSTS_N_INSNS (23), /* fsqrtd */
167 COSTS_N_INSNS (4), /* imul */
168 COSTS_N_INSNS (4), /* imulX */
169 2, /* imul bit factor */
170 COSTS_N_INSNS (37), /* idiv */
171 COSTS_N_INSNS (68), /* idivX */
172 COSTS_N_INSNS (2), /* movcc/movr */
173 2, /* shift penalty */
176 static const
177 struct processor_costs ultrasparc3_costs = {
178 COSTS_N_INSNS (2), /* int load */
179 COSTS_N_INSNS (3), /* int signed load */
180 COSTS_N_INSNS (3), /* int zeroed load */
181 COSTS_N_INSNS (2), /* float load */
182 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
183 COSTS_N_INSNS (4), /* fadd, fsub */
184 COSTS_N_INSNS (5), /* fcmp */
185 COSTS_N_INSNS (3), /* fmov, fmovr */
186 COSTS_N_INSNS (4), /* fmul */
187 COSTS_N_INSNS (17), /* fdivs */
188 COSTS_N_INSNS (20), /* fdivd */
189 COSTS_N_INSNS (20), /* fsqrts */
190 COSTS_N_INSNS (29), /* fsqrtd */
191 COSTS_N_INSNS (6), /* imul */
192 COSTS_N_INSNS (6), /* imulX */
193 0, /* imul bit factor */
194 COSTS_N_INSNS (40), /* idiv */
195 COSTS_N_INSNS (71), /* idivX */
196 COSTS_N_INSNS (2), /* movcc/movr */
197 0, /* shift penalty */
200 static const
201 struct processor_costs niagara_costs = {
202 COSTS_N_INSNS (3), /* int load */
203 COSTS_N_INSNS (3), /* int signed load */
204 COSTS_N_INSNS (3), /* int zeroed load */
205 COSTS_N_INSNS (9), /* float load */
206 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
207 COSTS_N_INSNS (8), /* fadd, fsub */
208 COSTS_N_INSNS (26), /* fcmp */
209 COSTS_N_INSNS (8), /* fmov, fmovr */
210 COSTS_N_INSNS (29), /* fmul */
211 COSTS_N_INSNS (54), /* fdivs */
212 COSTS_N_INSNS (83), /* fdivd */
213 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
214 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
215 COSTS_N_INSNS (11), /* imul */
216 COSTS_N_INSNS (11), /* imulX */
217 0, /* imul bit factor */
218 COSTS_N_INSNS (72), /* idiv */
219 COSTS_N_INSNS (72), /* idivX */
220 COSTS_N_INSNS (1), /* movcc/movr */
221 0, /* shift penalty */
224 const struct processor_costs *sparc_costs = &cypress_costs;
226 #ifdef HAVE_AS_RELAX_OPTION
227 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
228 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
229 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
230 somebody does not branch between the sethi and jmp. */
231 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
232 #else
233 #define LEAF_SIBCALL_SLOT_RESERVED_P \
234 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
235 #endif
237 /* Global variables for machine-dependent things. */
239 /* Size of frame. Need to know this to emit return insns from leaf procedures.
240 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
241 reload pass. This is important as the value is later used for scheduling
242 (to see what can go in a delay slot).
243 APPARENT_FSIZE is the size of the stack less the register save area and less
244 the outgoing argument area. It is used when saving call preserved regs. */
245 static HOST_WIDE_INT apparent_fsize;
246 static HOST_WIDE_INT actual_fsize;
248 /* Number of live general or floating point registers needed to be
249 saved (as 4-byte quantities). */
250 static int num_gfregs;
252 /* The alias set for prologue/epilogue register save/restore. */
253 static GTY(()) int sparc_sr_alias_set;
255 /* The alias set for the structure return value. */
256 static GTY(()) int struct_value_alias_set;
258 /* Save the operands last given to a compare for use when we
259 generate a scc or bcc insn. */
260 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
262 /* Vector to say how input registers are mapped to output registers.
263 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
264 eliminate it. You must use -fomit-frame-pointer to get that. */
265 char leaf_reg_remap[] =
266 { 0, 1, 2, 3, 4, 5, 6, 7,
267 -1, -1, -1, -1, -1, -1, 14, -1,
268 -1, -1, -1, -1, -1, -1, -1, -1,
269 8, 9, 10, 11, 12, 13, -1, 15,
271 32, 33, 34, 35, 36, 37, 38, 39,
272 40, 41, 42, 43, 44, 45, 46, 47,
273 48, 49, 50, 51, 52, 53, 54, 55,
274 56, 57, 58, 59, 60, 61, 62, 63,
275 64, 65, 66, 67, 68, 69, 70, 71,
276 72, 73, 74, 75, 76, 77, 78, 79,
277 80, 81, 82, 83, 84, 85, 86, 87,
278 88, 89, 90, 91, 92, 93, 94, 95,
279 96, 97, 98, 99, 100};
281 /* Vector, indexed by hard register number, which contains 1
282 for a register that is allowable in a candidate for leaf
283 function treatment. */
284 char sparc_leaf_regs[] =
285 { 1, 1, 1, 1, 1, 1, 1, 1,
286 0, 0, 0, 0, 0, 0, 1, 0,
287 0, 0, 0, 0, 0, 0, 0, 0,
288 1, 1, 1, 1, 1, 1, 0, 1,
289 1, 1, 1, 1, 1, 1, 1, 1,
290 1, 1, 1, 1, 1, 1, 1, 1,
291 1, 1, 1, 1, 1, 1, 1, 1,
292 1, 1, 1, 1, 1, 1, 1, 1,
293 1, 1, 1, 1, 1, 1, 1, 1,
294 1, 1, 1, 1, 1, 1, 1, 1,
295 1, 1, 1, 1, 1, 1, 1, 1,
296 1, 1, 1, 1, 1, 1, 1, 1,
297 1, 1, 1, 1, 1};
299 struct machine_function GTY(())
301 /* Some local-dynamic TLS symbol name. */
302 const char *some_ld_name;
304 /* True if the current function is leaf and uses only leaf regs,
305 so that the SPARC leaf function optimization can be applied.
306 Private version of current_function_uses_only_leaf_regs, see
307 sparc_expand_prologue for the rationale. */
308 int leaf_function_p;
310 /* True if the data calculated by sparc_expand_prologue are valid. */
311 bool prologue_data_valid_p;
314 #define sparc_leaf_function_p cfun->machine->leaf_function_p
315 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
317 /* Register we pretend to think the frame pointer is allocated to.
318 Normally, this is %fp, but if we are in a leaf procedure, this
319 is %sp+"something". We record "something" separately as it may
320 be too big for reg+constant addressing. */
321 static rtx frame_base_reg;
322 static HOST_WIDE_INT frame_base_offset;
324 /* 1 if the next opcode is to be specially indented. */
325 int sparc_indent_opcode = 0;
327 static bool sparc_handle_option (size_t, const char *, int);
328 static void sparc_init_modes (void);
329 static void scan_record_type (tree, int *, int *, int *);
330 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
331 tree, int, int, int *, int *);
333 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
334 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
336 static void sparc_output_addr_vec (rtx);
337 static void sparc_output_addr_diff_vec (rtx);
338 static void sparc_output_deferred_case_vectors (void);
339 static rtx sparc_builtin_saveregs (void);
340 static int epilogue_renumber (rtx *, int);
341 static bool sparc_assemble_integer (rtx, unsigned int, int);
342 static int set_extends (rtx);
343 static void emit_pic_helper (void);
344 static void load_pic_register (bool);
345 static int save_or_restore_regs (int, int, rtx, int, int);
346 static void emit_save_or_restore_regs (int);
347 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
348 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
349 #ifdef OBJECT_FORMAT_ELF
350 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
351 #endif
353 static int sparc_adjust_cost (rtx, rtx, rtx, int);
354 static int sparc_issue_rate (void);
355 static void sparc_sched_init (FILE *, int, int);
356 static int sparc_use_sched_lookahead (void);
358 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
359 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
360 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
361 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
362 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
364 static bool sparc_function_ok_for_sibcall (tree, tree);
365 static void sparc_init_libfuncs (void);
366 static void sparc_init_builtins (void);
367 static void sparc_vis_init_builtins (void);
368 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
369 static tree sparc_fold_builtin (tree, tree, bool);
370 static int sparc_vis_mul8x16 (int, int);
371 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
372 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
373 HOST_WIDE_INT, tree);
374 static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
375 HOST_WIDE_INT, tree);
376 static struct machine_function * sparc_init_machine_status (void);
377 static bool sparc_cannot_force_const_mem (rtx);
378 static rtx sparc_tls_get_addr (void);
379 static rtx sparc_tls_got (void);
380 static const char *get_some_local_dynamic_name (void);
381 static int get_some_local_dynamic_name_1 (rtx *, void *);
382 static bool sparc_rtx_costs (rtx, int, int, int *);
383 static bool sparc_promote_prototypes (tree);
384 static rtx sparc_struct_value_rtx (tree, int);
385 static bool sparc_return_in_memory (tree, tree);
386 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
387 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
388 static bool sparc_vector_mode_supported_p (enum machine_mode);
389 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
390 enum machine_mode, tree, bool);
391 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
392 enum machine_mode, tree, bool);
393 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
394 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
395 static void sparc_file_end (void);
396 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
397 static const char *sparc_mangle_fundamental_type (tree);
398 #endif
399 #ifdef SUBTARGET_ATTRIBUTE_TABLE
400 const struct attribute_spec sparc_attribute_table[];
401 #endif
403 /* Option handling. */
405 /* Parsed value. */
406 enum cmodel sparc_cmodel;
408 char sparc_hard_reg_printed[8];
410 struct sparc_cpu_select sparc_select[] =
412 /* switch name, tune arch */
413 { (char *)0, "default", 1, 1 },
414 { (char *)0, "-mcpu=", 1, 1 },
415 { (char *)0, "-mtune=", 1, 0 },
416 { 0, 0, 0, 0 }
419 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
420 enum processor_type sparc_cpu;
422 /* Whether\fan FPU option was specified. */
423 static bool fpu_option_set = false;
425 /* Initialize the GCC target structure. */
427 /* The sparc default is to use .half rather than .short for aligned
428 HI objects. Use .word instead of .long on non-ELF systems. */
429 #undef TARGET_ASM_ALIGNED_HI_OP
430 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
431 #ifndef OBJECT_FORMAT_ELF
432 #undef TARGET_ASM_ALIGNED_SI_OP
433 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
434 #endif
436 #undef TARGET_ASM_UNALIGNED_HI_OP
437 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
438 #undef TARGET_ASM_UNALIGNED_SI_OP
439 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
440 #undef TARGET_ASM_UNALIGNED_DI_OP
441 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
443 /* The target hook has to handle DI-mode values. */
444 #undef TARGET_ASM_INTEGER
445 #define TARGET_ASM_INTEGER sparc_assemble_integer
447 #undef TARGET_ASM_FUNCTION_PROLOGUE
448 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
449 #undef TARGET_ASM_FUNCTION_EPILOGUE
450 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
452 #undef TARGET_SCHED_ADJUST_COST
453 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
454 #undef TARGET_SCHED_ISSUE_RATE
455 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
456 #undef TARGET_SCHED_INIT
457 #define TARGET_SCHED_INIT sparc_sched_init
458 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
459 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
461 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
462 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
464 #undef TARGET_INIT_LIBFUNCS
465 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
466 #undef TARGET_INIT_BUILTINS
467 #define TARGET_INIT_BUILTINS sparc_init_builtins
469 #undef TARGET_EXPAND_BUILTIN
470 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
471 #undef TARGET_FOLD_BUILTIN
472 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
474 #if TARGET_TLS
475 #undef TARGET_HAVE_TLS
476 #define TARGET_HAVE_TLS true
477 #endif
479 #undef TARGET_CANNOT_FORCE_CONST_MEM
480 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
482 #undef TARGET_ASM_OUTPUT_MI_THUNK
483 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
484 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
485 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
487 #undef TARGET_RTX_COSTS
488 #define TARGET_RTX_COSTS sparc_rtx_costs
489 #undef TARGET_ADDRESS_COST
490 #define TARGET_ADDRESS_COST hook_int_rtx_0
492 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
493 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
494 test for this value. */
495 #undef TARGET_PROMOTE_FUNCTION_ARGS
496 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
498 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
499 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
500 test for this value. */
501 #undef TARGET_PROMOTE_FUNCTION_RETURN
502 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
504 #undef TARGET_PROMOTE_PROTOTYPES
505 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
507 #undef TARGET_STRUCT_VALUE_RTX
508 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
509 #undef TARGET_RETURN_IN_MEMORY
510 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
511 #undef TARGET_MUST_PASS_IN_STACK
512 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
513 #undef TARGET_PASS_BY_REFERENCE
514 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
515 #undef TARGET_ARG_PARTIAL_BYTES
516 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
518 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
519 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
520 #undef TARGET_STRICT_ARGUMENT_NAMING
521 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
523 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
524 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
526 #undef TARGET_VECTOR_MODE_SUPPORTED_P
527 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
529 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
530 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
532 #ifdef SUBTARGET_INSERT_ATTRIBUTES
533 #undef TARGET_INSERT_ATTRIBUTES
534 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
535 #endif
537 #ifdef SUBTARGET_ATTRIBUTE_TABLE
538 #undef TARGET_ATTRIBUTE_TABLE
539 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
540 #endif
542 #undef TARGET_RELAXED_ORDERING
543 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
545 #undef TARGET_DEFAULT_TARGET_FLAGS
546 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
547 #undef TARGET_HANDLE_OPTION
548 #define TARGET_HANDLE_OPTION sparc_handle_option
550 #if TARGET_GNU_TLS
551 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
552 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
553 #endif
555 #undef TARGET_ASM_FILE_END
556 #define TARGET_ASM_FILE_END sparc_file_end
558 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
559 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
560 #define TARGET_MANGLE_FUNDAMENTAL_TYPE sparc_mangle_fundamental_type
561 #endif
563 struct gcc_target targetm = TARGET_INITIALIZER;
565 /* Implement TARGET_HANDLE_OPTION. */
567 static bool
568 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
570 switch (code)
572 case OPT_mfpu:
573 case OPT_mhard_float:
574 case OPT_msoft_float:
575 fpu_option_set = true;
576 break;
578 case OPT_mcpu_:
579 sparc_select[1].string = arg;
580 break;
582 case OPT_mtune_:
583 sparc_select[2].string = arg;
584 break;
587 return true;
590 /* Validate and override various options, and do some machine dependent
591 initialization. */
593 void
594 sparc_override_options (void)
596 static struct code_model {
597 const char *const name;
598 const int value;
599 } const cmodels[] = {
600 { "32", CM_32 },
601 { "medlow", CM_MEDLOW },
602 { "medmid", CM_MEDMID },
603 { "medany", CM_MEDANY },
604 { "embmedany", CM_EMBMEDANY },
605 { 0, 0 }
607 const struct code_model *cmodel;
608 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
609 static struct cpu_default {
610 const int cpu;
611 const char *const name;
612 } const cpu_default[] = {
613 /* There must be one entry here for each TARGET_CPU value. */
614 { TARGET_CPU_sparc, "cypress" },
615 { TARGET_CPU_sparclet, "tsc701" },
616 { TARGET_CPU_sparclite, "f930" },
617 { TARGET_CPU_v8, "v8" },
618 { TARGET_CPU_hypersparc, "hypersparc" },
619 { TARGET_CPU_sparclite86x, "sparclite86x" },
620 { TARGET_CPU_supersparc, "supersparc" },
621 { TARGET_CPU_v9, "v9" },
622 { TARGET_CPU_ultrasparc, "ultrasparc" },
623 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
624 { TARGET_CPU_niagara, "niagara" },
625 { 0, 0 }
627 const struct cpu_default *def;
628 /* Table of values for -m{cpu,tune}=. */
629 static struct cpu_table {
630 const char *const name;
631 const enum processor_type processor;
632 const int disable;
633 const int enable;
634 } const cpu_table[] = {
635 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
636 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
637 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
638 /* TI TMS390Z55 supersparc */
639 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
640 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
641 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
642 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
643 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
644 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
645 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
646 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
647 MASK_SPARCLITE },
648 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
649 /* TEMIC sparclet */
650 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
651 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
652 /* TI ultrasparc I, II, IIi */
653 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
654 /* Although insns using %y are deprecated, it is a clear win on current
655 ultrasparcs. */
656 |MASK_DEPRECATED_V8_INSNS},
657 /* TI ultrasparc III */
658 /* ??? Check if %y issue still holds true in ultra3. */
659 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
660 /* UltraSPARC T1 */
661 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
662 { 0, 0, 0, 0 }
664 const struct cpu_table *cpu;
665 const struct sparc_cpu_select *sel;
666 int fpu;
668 #ifndef SPARC_BI_ARCH
669 /* Check for unsupported architecture size. */
670 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
671 error ("%s is not supported by this configuration",
672 DEFAULT_ARCH32_P ? "-m64" : "-m32");
673 #endif
675 /* We force all 64bit archs to use 128 bit long double */
676 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
678 error ("-mlong-double-64 not allowed with -m64");
679 target_flags |= MASK_LONG_DOUBLE_128;
682 /* Code model selection. */
683 sparc_cmodel = SPARC_DEFAULT_CMODEL;
685 #ifdef SPARC_BI_ARCH
686 if (TARGET_ARCH32)
687 sparc_cmodel = CM_32;
688 #endif
690 if (sparc_cmodel_string != NULL)
692 if (TARGET_ARCH64)
694 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
695 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
696 break;
697 if (cmodel->name == NULL)
698 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
699 else
700 sparc_cmodel = cmodel->value;
702 else
703 error ("-mcmodel= is not supported on 32 bit systems");
706 fpu = TARGET_FPU; /* save current -mfpu status */
708 /* Set the default CPU. */
709 for (def = &cpu_default[0]; def->name; ++def)
710 if (def->cpu == TARGET_CPU_DEFAULT)
711 break;
712 gcc_assert (def->name);
713 sparc_select[0].string = def->name;
715 for (sel = &sparc_select[0]; sel->name; ++sel)
717 if (sel->string)
719 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
720 if (! strcmp (sel->string, cpu->name))
722 if (sel->set_tune_p)
723 sparc_cpu = cpu->processor;
725 if (sel->set_arch_p)
727 target_flags &= ~cpu->disable;
728 target_flags |= cpu->enable;
730 break;
733 if (! cpu->name)
734 error ("bad value (%s) for %s switch", sel->string, sel->name);
738 /* If -mfpu or -mno-fpu was explicitly used, don't override with
739 the processor default. */
740 if (fpu_option_set)
741 target_flags = (target_flags & ~MASK_FPU) | fpu;
743 /* Don't allow -mvis if FPU is disabled. */
744 if (! TARGET_FPU)
745 target_flags &= ~MASK_VIS;
747 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
748 are available.
749 -m64 also implies v9. */
750 if (TARGET_VIS || TARGET_ARCH64)
752 target_flags |= MASK_V9;
753 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
756 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
757 if (TARGET_V9 && TARGET_ARCH32)
758 target_flags |= MASK_DEPRECATED_V8_INSNS;
760 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
761 if (! TARGET_V9 || TARGET_ARCH64)
762 target_flags &= ~MASK_V8PLUS;
764 /* Don't use stack biasing in 32 bit mode. */
765 if (TARGET_ARCH32)
766 target_flags &= ~MASK_STACK_BIAS;
768 /* Supply a default value for align_functions. */
769 if (align_functions == 0
770 && (sparc_cpu == PROCESSOR_ULTRASPARC
771 || sparc_cpu == PROCESSOR_ULTRASPARC3
772 || sparc_cpu == PROCESSOR_NIAGARA))
773 align_functions = 32;
775 /* Validate PCC_STRUCT_RETURN. */
776 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
777 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
779 /* Only use .uaxword when compiling for a 64-bit target. */
780 if (!TARGET_ARCH64)
781 targetm.asm_out.unaligned_op.di = NULL;
783 /* Do various machine dependent initializations. */
784 sparc_init_modes ();
786 /* Acquire unique alias sets for our private stuff. */
787 sparc_sr_alias_set = new_alias_set ();
788 struct_value_alias_set = new_alias_set ();
790 /* Set up function hooks. */
791 init_machine_status = sparc_init_machine_status;
793 switch (sparc_cpu)
795 case PROCESSOR_V7:
796 case PROCESSOR_CYPRESS:
797 sparc_costs = &cypress_costs;
798 break;
799 case PROCESSOR_V8:
800 case PROCESSOR_SPARCLITE:
801 case PROCESSOR_SUPERSPARC:
802 sparc_costs = &supersparc_costs;
803 break;
804 case PROCESSOR_F930:
805 case PROCESSOR_F934:
806 case PROCESSOR_HYPERSPARC:
807 case PROCESSOR_SPARCLITE86X:
808 sparc_costs = &hypersparc_costs;
809 break;
810 case PROCESSOR_SPARCLET:
811 case PROCESSOR_TSC701:
812 sparc_costs = &sparclet_costs;
813 break;
814 case PROCESSOR_V9:
815 case PROCESSOR_ULTRASPARC:
816 sparc_costs = &ultrasparc_costs;
817 break;
818 case PROCESSOR_ULTRASPARC3:
819 sparc_costs = &ultrasparc3_costs;
820 break;
821 case PROCESSOR_NIAGARA:
822 sparc_costs = &niagara_costs;
823 break;
826 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
827 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
828 target_flags |= MASK_LONG_DOUBLE_128;
829 #endif
832 #ifdef SUBTARGET_ATTRIBUTE_TABLE
833 /* Table of valid machine attributes. */
834 const struct attribute_spec sparc_attribute_table[] =
836 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
837 SUBTARGET_ATTRIBUTE_TABLE,
838 { NULL, 0, 0, false, false, false, NULL }
840 #endif
842 /* Miscellaneous utilities. */
844 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
845 or branch on register contents instructions. */
848 v9_regcmp_p (enum rtx_code code)
850 return (code == EQ || code == NE || code == GE || code == LT
851 || code == LE || code == GT);
854 /* Nonzero if OP is a floating point constant which can
855 be loaded into an integer register using a single
856 sethi instruction. */
859 fp_sethi_p (rtx op)
861 if (GET_CODE (op) == CONST_DOUBLE)
863 REAL_VALUE_TYPE r;
864 long i;
866 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
867 REAL_VALUE_TO_TARGET_SINGLE (r, i);
868 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
871 return 0;
874 /* Nonzero if OP is a floating point constant which can
875 be loaded into an integer register using a single
876 mov instruction. */
879 fp_mov_p (rtx op)
881 if (GET_CODE (op) == CONST_DOUBLE)
883 REAL_VALUE_TYPE r;
884 long i;
886 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
887 REAL_VALUE_TO_TARGET_SINGLE (r, i);
888 return SPARC_SIMM13_P (i);
891 return 0;
894 /* Nonzero if OP is a floating point constant which can
895 be loaded into an integer register using a high/losum
896 instruction sequence. */
899 fp_high_losum_p (rtx op)
901 /* The constraints calling this should only be in
902 SFmode move insns, so any constant which cannot
903 be moved using a single insn will do. */
904 if (GET_CODE (op) == CONST_DOUBLE)
906 REAL_VALUE_TYPE r;
907 long i;
909 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
910 REAL_VALUE_TO_TARGET_SINGLE (r, i);
911 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
914 return 0;
917 /* Expand a move instruction. Return true if all work is done. */
919 bool
920 sparc_expand_move (enum machine_mode mode, rtx *operands)
922 /* Handle sets of MEM first. */
923 if (GET_CODE (operands[0]) == MEM)
925 /* 0 is a register (or a pair of registers) on SPARC. */
926 if (register_or_zero_operand (operands[1], mode))
927 return false;
929 if (!reload_in_progress)
931 operands[0] = validize_mem (operands[0]);
932 operands[1] = force_reg (mode, operands[1]);
936 /* Fixup TLS cases. */
937 if (TARGET_HAVE_TLS
938 && CONSTANT_P (operands[1])
939 && GET_CODE (operands[1]) != HIGH
940 && sparc_tls_referenced_p (operands [1]))
942 rtx sym = operands[1];
943 rtx addend = NULL;
945 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
947 addend = XEXP (XEXP (sym, 0), 1);
948 sym = XEXP (XEXP (sym, 0), 0);
951 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
953 sym = legitimize_tls_address (sym);
954 if (addend)
956 sym = gen_rtx_PLUS (mode, sym, addend);
957 sym = force_operand (sym, operands[0]);
959 operands[1] = sym;
962 /* Fixup PIC cases. */
963 if (flag_pic && CONSTANT_P (operands[1]))
965 if (pic_address_needs_scratch (operands[1]))
966 operands[1] = legitimize_pic_address (operands[1], mode, 0);
968 if (GET_CODE (operands[1]) == LABEL_REF && mode == SImode)
970 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
971 return true;
974 if (GET_CODE (operands[1]) == LABEL_REF && mode == DImode)
976 gcc_assert (TARGET_ARCH64);
977 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
978 return true;
981 if (symbolic_operand (operands[1], mode))
983 operands[1] = legitimize_pic_address (operands[1],
984 mode,
985 (reload_in_progress ?
986 operands[0] :
987 NULL_RTX));
988 return false;
992 /* If we are trying to toss an integer constant into FP registers,
993 or loading a FP or vector constant, force it into memory. */
994 if (CONSTANT_P (operands[1])
995 && REG_P (operands[0])
996 && (SPARC_FP_REG_P (REGNO (operands[0]))
997 || SCALAR_FLOAT_MODE_P (mode)
998 || VECTOR_MODE_P (mode)))
1000 /* emit_group_store will send such bogosity to us when it is
1001 not storing directly into memory. So fix this up to avoid
1002 crashes in output_constant_pool. */
1003 if (operands [1] == const0_rtx)
1004 operands[1] = CONST0_RTX (mode);
1006 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1007 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1008 && const_zero_operand (operands[1], mode))
1009 return false;
1011 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1012 /* We are able to build any SF constant in integer registers
1013 with at most 2 instructions. */
1014 && (mode == SFmode
1015 /* And any DF constant in integer registers. */
1016 || (mode == DFmode
1017 && (reload_completed || reload_in_progress))))
1018 return false;
1020 operands[1] = force_const_mem (mode, operands[1]);
1021 if (!reload_in_progress)
1022 operands[1] = validize_mem (operands[1]);
1023 return false;
1026 /* Accept non-constants and valid constants unmodified. */
1027 if (!CONSTANT_P (operands[1])
1028 || GET_CODE (operands[1]) == HIGH
1029 || input_operand (operands[1], mode))
1030 return false;
1032 switch (mode)
1034 case QImode:
1035 /* All QImode constants require only one insn, so proceed. */
1036 break;
1038 case HImode:
1039 case SImode:
1040 sparc_emit_set_const32 (operands[0], operands[1]);
1041 return true;
1043 case DImode:
1044 /* input_operand should have filtered out 32-bit mode. */
1045 sparc_emit_set_const64 (operands[0], operands[1]);
1046 return true;
1048 default:
1049 gcc_unreachable ();
1052 return false;
1055 /* Load OP1, a 32-bit constant, into OP0, a register.
1056 We know it can't be done in one insn when we get
1057 here, the move expander guarantees this. */
1059 void
1060 sparc_emit_set_const32 (rtx op0, rtx op1)
1062 enum machine_mode mode = GET_MODE (op0);
1063 rtx temp;
1065 if (reload_in_progress || reload_completed)
1066 temp = op0;
1067 else
1068 temp = gen_reg_rtx (mode);
1070 if (GET_CODE (op1) == CONST_INT)
1072 gcc_assert (!small_int_operand (op1, mode)
1073 && !const_high_operand (op1, mode));
1075 /* Emit them as real moves instead of a HIGH/LO_SUM,
1076 this way CSE can see everything and reuse intermediate
1077 values if it wants. */
1078 emit_insn (gen_rtx_SET (VOIDmode, temp,
1079 GEN_INT (INTVAL (op1)
1080 & ~(HOST_WIDE_INT)0x3ff)));
1082 emit_insn (gen_rtx_SET (VOIDmode,
1083 op0,
1084 gen_rtx_IOR (mode, temp,
1085 GEN_INT (INTVAL (op1) & 0x3ff))));
1087 else
1089 /* A symbol, emit in the traditional way. */
1090 emit_insn (gen_rtx_SET (VOIDmode, temp,
1091 gen_rtx_HIGH (mode, op1)));
1092 emit_insn (gen_rtx_SET (VOIDmode,
1093 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1097 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1098 If TEMP is nonzero, we are forbidden to use any other scratch
1099 registers. Otherwise, we are allowed to generate them as needed.
1101 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1102 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1104 void
1105 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1107 rtx temp1, temp2, temp3, temp4, temp5;
1108 rtx ti_temp = 0;
1110 if (temp && GET_MODE (temp) == TImode)
1112 ti_temp = temp;
1113 temp = gen_rtx_REG (DImode, REGNO (temp));
1116 /* SPARC-V9 code-model support. */
1117 switch (sparc_cmodel)
1119 case CM_MEDLOW:
1120 /* The range spanned by all instructions in the object is less
1121 than 2^31 bytes (2GB) and the distance from any instruction
1122 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1123 than 2^31 bytes (2GB).
1125 The executable must be in the low 4TB of the virtual address
1126 space.
1128 sethi %hi(symbol), %temp1
1129 or %temp1, %lo(symbol), %reg */
1130 if (temp)
1131 temp1 = temp; /* op0 is allowed. */
1132 else
1133 temp1 = gen_reg_rtx (DImode);
1135 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1136 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1137 break;
1139 case CM_MEDMID:
1140 /* The range spanned by all instructions in the object is less
1141 than 2^31 bytes (2GB) and the distance from any instruction
1142 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1143 than 2^31 bytes (2GB).
1145 The executable must be in the low 16TB of the virtual address
1146 space.
1148 sethi %h44(symbol), %temp1
1149 or %temp1, %m44(symbol), %temp2
1150 sllx %temp2, 12, %temp3
1151 or %temp3, %l44(symbol), %reg */
1152 if (temp)
1154 temp1 = op0;
1155 temp2 = op0;
1156 temp3 = temp; /* op0 is allowed. */
1158 else
1160 temp1 = gen_reg_rtx (DImode);
1161 temp2 = gen_reg_rtx (DImode);
1162 temp3 = gen_reg_rtx (DImode);
1165 emit_insn (gen_seth44 (temp1, op1));
1166 emit_insn (gen_setm44 (temp2, temp1, op1));
1167 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1168 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1169 emit_insn (gen_setl44 (op0, temp3, op1));
1170 break;
1172 case CM_MEDANY:
1173 /* The range spanned by all instructions in the object is less
1174 than 2^31 bytes (2GB) and the distance from any instruction
1175 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1176 than 2^31 bytes (2GB).
1178 The executable can be placed anywhere in the virtual address
1179 space.
1181 sethi %hh(symbol), %temp1
1182 sethi %lm(symbol), %temp2
1183 or %temp1, %hm(symbol), %temp3
1184 sllx %temp3, 32, %temp4
1185 or %temp4, %temp2, %temp5
1186 or %temp5, %lo(symbol), %reg */
1187 if (temp)
1189 /* It is possible that one of the registers we got for operands[2]
1190 might coincide with that of operands[0] (which is why we made
1191 it TImode). Pick the other one to use as our scratch. */
1192 if (rtx_equal_p (temp, op0))
1194 gcc_assert (ti_temp);
1195 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1197 temp1 = op0;
1198 temp2 = temp; /* op0 is _not_ allowed, see above. */
1199 temp3 = op0;
1200 temp4 = op0;
1201 temp5 = op0;
1203 else
1205 temp1 = gen_reg_rtx (DImode);
1206 temp2 = gen_reg_rtx (DImode);
1207 temp3 = gen_reg_rtx (DImode);
1208 temp4 = gen_reg_rtx (DImode);
1209 temp5 = gen_reg_rtx (DImode);
1212 emit_insn (gen_sethh (temp1, op1));
1213 emit_insn (gen_setlm (temp2, op1));
1214 emit_insn (gen_sethm (temp3, temp1, op1));
1215 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1216 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1217 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1218 gen_rtx_PLUS (DImode, temp4, temp2)));
1219 emit_insn (gen_setlo (op0, temp5, op1));
1220 break;
1222 case CM_EMBMEDANY:
1223 /* Old old old backwards compatibility kruft here.
1224 Essentially it is MEDLOW with a fixed 64-bit
1225 virtual base added to all data segment addresses.
1226 Text-segment stuff is computed like MEDANY, we can't
1227 reuse the code above because the relocation knobs
1228 look different.
1230 Data segment: sethi %hi(symbol), %temp1
1231 add %temp1, EMBMEDANY_BASE_REG, %temp2
1232 or %temp2, %lo(symbol), %reg */
1233 if (data_segment_operand (op1, GET_MODE (op1)))
1235 if (temp)
1237 temp1 = temp; /* op0 is allowed. */
1238 temp2 = op0;
1240 else
1242 temp1 = gen_reg_rtx (DImode);
1243 temp2 = gen_reg_rtx (DImode);
1246 emit_insn (gen_embmedany_sethi (temp1, op1));
1247 emit_insn (gen_embmedany_brsum (temp2, temp1));
1248 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1251 /* Text segment: sethi %uhi(symbol), %temp1
1252 sethi %hi(symbol), %temp2
1253 or %temp1, %ulo(symbol), %temp3
1254 sllx %temp3, 32, %temp4
1255 or %temp4, %temp2, %temp5
1256 or %temp5, %lo(symbol), %reg */
1257 else
1259 if (temp)
1261 /* It is possible that one of the registers we got for operands[2]
1262 might coincide with that of operands[0] (which is why we made
1263 it TImode). Pick the other one to use as our scratch. */
1264 if (rtx_equal_p (temp, op0))
1266 gcc_assert (ti_temp);
1267 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1269 temp1 = op0;
1270 temp2 = temp; /* op0 is _not_ allowed, see above. */
1271 temp3 = op0;
1272 temp4 = op0;
1273 temp5 = op0;
1275 else
1277 temp1 = gen_reg_rtx (DImode);
1278 temp2 = gen_reg_rtx (DImode);
1279 temp3 = gen_reg_rtx (DImode);
1280 temp4 = gen_reg_rtx (DImode);
1281 temp5 = gen_reg_rtx (DImode);
1284 emit_insn (gen_embmedany_textuhi (temp1, op1));
1285 emit_insn (gen_embmedany_texthi (temp2, op1));
1286 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1287 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1288 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1289 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1290 gen_rtx_PLUS (DImode, temp4, temp2)));
1291 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1293 break;
1295 default:
1296 gcc_unreachable ();
1300 #if HOST_BITS_PER_WIDE_INT == 32
1301 void
1302 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1304 gcc_unreachable ();
1306 #else
1307 /* These avoid problems when cross compiling. If we do not
1308 go through all this hair then the optimizer will see
1309 invalid REG_EQUAL notes or in some cases none at all. */
1310 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1311 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1312 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1313 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1315 /* The optimizer is not to assume anything about exactly
1316 which bits are set for a HIGH, they are unspecified.
1317 Unfortunately this leads to many missed optimizations
1318 during CSE. We mask out the non-HIGH bits, and matches
1319 a plain movdi, to alleviate this problem. */
1320 static rtx
1321 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1323 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1326 static rtx
1327 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1329 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1332 static rtx
1333 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1335 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1338 static rtx
1339 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1341 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1344 /* Worker routines for 64-bit constant formation on arch64.
1345 One of the key things to be doing in these emissions is
1346 to create as many temp REGs as possible. This makes it
1347 possible for half-built constants to be used later when
1348 such values are similar to something required later on.
1349 Without doing this, the optimizer cannot see such
1350 opportunities. */
1352 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1353 unsigned HOST_WIDE_INT, int);
1355 static void
1356 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1357 unsigned HOST_WIDE_INT low_bits, int is_neg)
1359 unsigned HOST_WIDE_INT high_bits;
1361 if (is_neg)
1362 high_bits = (~low_bits) & 0xffffffff;
1363 else
1364 high_bits = low_bits;
1366 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1367 if (!is_neg)
1369 emit_insn (gen_rtx_SET (VOIDmode, op0,
1370 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1372 else
1374 /* If we are XOR'ing with -1, then we should emit a one's complement
1375 instead. This way the combiner will notice logical operations
1376 such as ANDN later on and substitute. */
1377 if ((low_bits & 0x3ff) == 0x3ff)
1379 emit_insn (gen_rtx_SET (VOIDmode, op0,
1380 gen_rtx_NOT (DImode, temp)));
1382 else
1384 emit_insn (gen_rtx_SET (VOIDmode, op0,
1385 gen_safe_XOR64 (temp,
1386 (-(HOST_WIDE_INT)0x400
1387 | (low_bits & 0x3ff)))));
1392 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1393 unsigned HOST_WIDE_INT, int);
1395 static void
1396 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1397 unsigned HOST_WIDE_INT high_bits,
1398 unsigned HOST_WIDE_INT low_immediate,
1399 int shift_count)
1401 rtx temp2 = op0;
1403 if ((high_bits & 0xfffffc00) != 0)
1405 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1406 if ((high_bits & ~0xfffffc00) != 0)
1407 emit_insn (gen_rtx_SET (VOIDmode, op0,
1408 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1409 else
1410 temp2 = temp;
1412 else
1414 emit_insn (gen_safe_SET64 (temp, high_bits));
1415 temp2 = temp;
1418 /* Now shift it up into place. */
1419 emit_insn (gen_rtx_SET (VOIDmode, op0,
1420 gen_rtx_ASHIFT (DImode, temp2,
1421 GEN_INT (shift_count))));
1423 /* If there is a low immediate part piece, finish up by
1424 putting that in as well. */
1425 if (low_immediate != 0)
1426 emit_insn (gen_rtx_SET (VOIDmode, op0,
1427 gen_safe_OR64 (op0, low_immediate)));
1430 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1431 unsigned HOST_WIDE_INT);
1433 /* Full 64-bit constant decomposition. Even though this is the
1434 'worst' case, we still optimize a few things away. */
1435 static void
1436 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1437 unsigned HOST_WIDE_INT high_bits,
1438 unsigned HOST_WIDE_INT low_bits)
1440 rtx sub_temp;
1442 if (reload_in_progress || reload_completed)
1443 sub_temp = op0;
1444 else
1445 sub_temp = gen_reg_rtx (DImode);
1447 if ((high_bits & 0xfffffc00) != 0)
1449 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1450 if ((high_bits & ~0xfffffc00) != 0)
1451 emit_insn (gen_rtx_SET (VOIDmode,
1452 sub_temp,
1453 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1454 else
1455 sub_temp = temp;
1457 else
1459 emit_insn (gen_safe_SET64 (temp, high_bits));
1460 sub_temp = temp;
1463 if (!reload_in_progress && !reload_completed)
1465 rtx temp2 = gen_reg_rtx (DImode);
1466 rtx temp3 = gen_reg_rtx (DImode);
1467 rtx temp4 = gen_reg_rtx (DImode);
1469 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1470 gen_rtx_ASHIFT (DImode, sub_temp,
1471 GEN_INT (32))));
1473 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1474 if ((low_bits & ~0xfffffc00) != 0)
1476 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1477 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1478 emit_insn (gen_rtx_SET (VOIDmode, op0,
1479 gen_rtx_PLUS (DImode, temp4, temp3)));
1481 else
1483 emit_insn (gen_rtx_SET (VOIDmode, op0,
1484 gen_rtx_PLUS (DImode, temp4, temp2)));
1487 else
1489 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1490 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1491 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1492 int to_shift = 12;
1494 /* We are in the middle of reload, so this is really
1495 painful. However we do still make an attempt to
1496 avoid emitting truly stupid code. */
1497 if (low1 != const0_rtx)
1499 emit_insn (gen_rtx_SET (VOIDmode, op0,
1500 gen_rtx_ASHIFT (DImode, sub_temp,
1501 GEN_INT (to_shift))));
1502 emit_insn (gen_rtx_SET (VOIDmode, op0,
1503 gen_rtx_IOR (DImode, op0, low1)));
1504 sub_temp = op0;
1505 to_shift = 12;
1507 else
1509 to_shift += 12;
1511 if (low2 != const0_rtx)
1513 emit_insn (gen_rtx_SET (VOIDmode, op0,
1514 gen_rtx_ASHIFT (DImode, sub_temp,
1515 GEN_INT (to_shift))));
1516 emit_insn (gen_rtx_SET (VOIDmode, op0,
1517 gen_rtx_IOR (DImode, op0, low2)));
1518 sub_temp = op0;
1519 to_shift = 8;
1521 else
1523 to_shift += 8;
1525 emit_insn (gen_rtx_SET (VOIDmode, op0,
1526 gen_rtx_ASHIFT (DImode, sub_temp,
1527 GEN_INT (to_shift))));
1528 if (low3 != const0_rtx)
1529 emit_insn (gen_rtx_SET (VOIDmode, op0,
1530 gen_rtx_IOR (DImode, op0, low3)));
1531 /* phew... */
1535 /* Analyze a 64-bit constant for certain properties. */
1536 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1537 unsigned HOST_WIDE_INT,
1538 int *, int *, int *);
1540 static void
1541 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1542 unsigned HOST_WIDE_INT low_bits,
1543 int *hbsp, int *lbsp, int *abbasp)
1545 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1546 int i;
1548 lowest_bit_set = highest_bit_set = -1;
1549 i = 0;
1552 if ((lowest_bit_set == -1)
1553 && ((low_bits >> i) & 1))
1554 lowest_bit_set = i;
1555 if ((highest_bit_set == -1)
1556 && ((high_bits >> (32 - i - 1)) & 1))
1557 highest_bit_set = (64 - i - 1);
1559 while (++i < 32
1560 && ((highest_bit_set == -1)
1561 || (lowest_bit_set == -1)));
1562 if (i == 32)
1564 i = 0;
1567 if ((lowest_bit_set == -1)
1568 && ((high_bits >> i) & 1))
1569 lowest_bit_set = i + 32;
1570 if ((highest_bit_set == -1)
1571 && ((low_bits >> (32 - i - 1)) & 1))
1572 highest_bit_set = 32 - i - 1;
1574 while (++i < 32
1575 && ((highest_bit_set == -1)
1576 || (lowest_bit_set == -1)));
1578 /* If there are no bits set this should have gone out
1579 as one instruction! */
1580 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1581 all_bits_between_are_set = 1;
1582 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1584 if (i < 32)
1586 if ((low_bits & (1 << i)) != 0)
1587 continue;
1589 else
1591 if ((high_bits & (1 << (i - 32))) != 0)
1592 continue;
1594 all_bits_between_are_set = 0;
1595 break;
1597 *hbsp = highest_bit_set;
1598 *lbsp = lowest_bit_set;
1599 *abbasp = all_bits_between_are_set;
1602 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1604 static int
1605 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1606 unsigned HOST_WIDE_INT low_bits)
1608 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1610 if (high_bits == 0
1611 || high_bits == 0xffffffff)
1612 return 1;
1614 analyze_64bit_constant (high_bits, low_bits,
1615 &highest_bit_set, &lowest_bit_set,
1616 &all_bits_between_are_set);
1618 if ((highest_bit_set == 63
1619 || lowest_bit_set == 0)
1620 && all_bits_between_are_set != 0)
1621 return 1;
1623 if ((highest_bit_set - lowest_bit_set) < 21)
1624 return 1;
1626 return 0;
1629 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1630 unsigned HOST_WIDE_INT,
1631 int, int);
1633 static unsigned HOST_WIDE_INT
1634 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1635 unsigned HOST_WIDE_INT low_bits,
1636 int lowest_bit_set, int shift)
1638 HOST_WIDE_INT hi, lo;
1640 if (lowest_bit_set < 32)
1642 lo = (low_bits >> lowest_bit_set) << shift;
1643 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1645 else
1647 lo = 0;
1648 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1650 gcc_assert (! (hi & lo));
1651 return (hi | lo);
1654 /* Here we are sure to be arch64 and this is an integer constant
1655 being loaded into a register. Emit the most efficient
1656 insn sequence possible. Detection of all the 1-insn cases
1657 has been done already. */
1658 void
1659 sparc_emit_set_const64 (rtx op0, rtx op1)
1661 unsigned HOST_WIDE_INT high_bits, low_bits;
1662 int lowest_bit_set, highest_bit_set;
1663 int all_bits_between_are_set;
1664 rtx temp = 0;
1666 /* Sanity check that we know what we are working with. */
1667 gcc_assert (TARGET_ARCH64
1668 && (GET_CODE (op0) == SUBREG
1669 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1671 if (reload_in_progress || reload_completed)
1672 temp = op0;
1674 if (GET_CODE (op1) != CONST_INT)
1676 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1677 return;
1680 if (! temp)
1681 temp = gen_reg_rtx (DImode);
1683 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1684 low_bits = (INTVAL (op1) & 0xffffffff);
1686 /* low_bits bits 0 --> 31
1687 high_bits bits 32 --> 63 */
1689 analyze_64bit_constant (high_bits, low_bits,
1690 &highest_bit_set, &lowest_bit_set,
1691 &all_bits_between_are_set);
1693 /* First try for a 2-insn sequence. */
1695 /* These situations are preferred because the optimizer can
1696 * do more things with them:
1697 * 1) mov -1, %reg
1698 * sllx %reg, shift, %reg
1699 * 2) mov -1, %reg
1700 * srlx %reg, shift, %reg
1701 * 3) mov some_small_const, %reg
1702 * sllx %reg, shift, %reg
1704 if (((highest_bit_set == 63
1705 || lowest_bit_set == 0)
1706 && all_bits_between_are_set != 0)
1707 || ((highest_bit_set - lowest_bit_set) < 12))
1709 HOST_WIDE_INT the_const = -1;
1710 int shift = lowest_bit_set;
1712 if ((highest_bit_set != 63
1713 && lowest_bit_set != 0)
1714 || all_bits_between_are_set == 0)
1716 the_const =
1717 create_simple_focus_bits (high_bits, low_bits,
1718 lowest_bit_set, 0);
1720 else if (lowest_bit_set == 0)
1721 shift = -(63 - highest_bit_set);
1723 gcc_assert (SPARC_SIMM13_P (the_const));
1724 gcc_assert (shift != 0);
1726 emit_insn (gen_safe_SET64 (temp, the_const));
1727 if (shift > 0)
1728 emit_insn (gen_rtx_SET (VOIDmode,
1729 op0,
1730 gen_rtx_ASHIFT (DImode,
1731 temp,
1732 GEN_INT (shift))));
1733 else if (shift < 0)
1734 emit_insn (gen_rtx_SET (VOIDmode,
1735 op0,
1736 gen_rtx_LSHIFTRT (DImode,
1737 temp,
1738 GEN_INT (-shift))));
1739 return;
1742 /* Now a range of 22 or less bits set somewhere.
1743 * 1) sethi %hi(focus_bits), %reg
1744 * sllx %reg, shift, %reg
1745 * 2) sethi %hi(focus_bits), %reg
1746 * srlx %reg, shift, %reg
1748 if ((highest_bit_set - lowest_bit_set) < 21)
1750 unsigned HOST_WIDE_INT focus_bits =
1751 create_simple_focus_bits (high_bits, low_bits,
1752 lowest_bit_set, 10);
1754 gcc_assert (SPARC_SETHI_P (focus_bits));
1755 gcc_assert (lowest_bit_set != 10);
1757 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1759 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1760 if (lowest_bit_set < 10)
1761 emit_insn (gen_rtx_SET (VOIDmode,
1762 op0,
1763 gen_rtx_LSHIFTRT (DImode, temp,
1764 GEN_INT (10 - lowest_bit_set))));
1765 else if (lowest_bit_set > 10)
1766 emit_insn (gen_rtx_SET (VOIDmode,
1767 op0,
1768 gen_rtx_ASHIFT (DImode, temp,
1769 GEN_INT (lowest_bit_set - 10))));
1770 return;
1773 /* 1) sethi %hi(low_bits), %reg
1774 * or %reg, %lo(low_bits), %reg
1775 * 2) sethi %hi(~low_bits), %reg
1776 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1778 if (high_bits == 0
1779 || high_bits == 0xffffffff)
1781 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1782 (high_bits == 0xffffffff));
1783 return;
1786 /* Now, try 3-insn sequences. */
1788 /* 1) sethi %hi(high_bits), %reg
1789 * or %reg, %lo(high_bits), %reg
1790 * sllx %reg, 32, %reg
1792 if (low_bits == 0)
1794 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1795 return;
1798 /* We may be able to do something quick
1799 when the constant is negated, so try that. */
1800 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1801 (~low_bits) & 0xfffffc00))
1803 /* NOTE: The trailing bits get XOR'd so we need the
1804 non-negated bits, not the negated ones. */
1805 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1807 if ((((~high_bits) & 0xffffffff) == 0
1808 && ((~low_bits) & 0x80000000) == 0)
1809 || (((~high_bits) & 0xffffffff) == 0xffffffff
1810 && ((~low_bits) & 0x80000000) != 0))
1812 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1814 if ((SPARC_SETHI_P (fast_int)
1815 && (~high_bits & 0xffffffff) == 0)
1816 || SPARC_SIMM13_P (fast_int))
1817 emit_insn (gen_safe_SET64 (temp, fast_int));
1818 else
1819 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1821 else
1823 rtx negated_const;
1824 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1825 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1826 sparc_emit_set_const64 (temp, negated_const);
1829 /* If we are XOR'ing with -1, then we should emit a one's complement
1830 instead. This way the combiner will notice logical operations
1831 such as ANDN later on and substitute. */
1832 if (trailing_bits == 0x3ff)
1834 emit_insn (gen_rtx_SET (VOIDmode, op0,
1835 gen_rtx_NOT (DImode, temp)));
1837 else
1839 emit_insn (gen_rtx_SET (VOIDmode,
1840 op0,
1841 gen_safe_XOR64 (temp,
1842 (-0x400 | trailing_bits))));
1844 return;
1847 /* 1) sethi %hi(xxx), %reg
1848 * or %reg, %lo(xxx), %reg
1849 * sllx %reg, yyy, %reg
1851 * ??? This is just a generalized version of the low_bits==0
1852 * thing above, FIXME...
1854 if ((highest_bit_set - lowest_bit_set) < 32)
1856 unsigned HOST_WIDE_INT focus_bits =
1857 create_simple_focus_bits (high_bits, low_bits,
1858 lowest_bit_set, 0);
1860 /* We can't get here in this state. */
1861 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1863 /* So what we know is that the set bits straddle the
1864 middle of the 64-bit word. */
1865 sparc_emit_set_const64_quick2 (op0, temp,
1866 focus_bits, 0,
1867 lowest_bit_set);
1868 return;
1871 /* 1) sethi %hi(high_bits), %reg
1872 * or %reg, %lo(high_bits), %reg
1873 * sllx %reg, 32, %reg
1874 * or %reg, low_bits, %reg
1876 if (SPARC_SIMM13_P(low_bits)
1877 && ((int)low_bits > 0))
1879 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1880 return;
1883 /* The easiest way when all else fails, is full decomposition. */
1884 #if 0
1885 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1886 high_bits, low_bits, ~high_bits, ~low_bits);
1887 #endif
1888 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1890 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1892 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1893 return the mode to be used for the comparison. For floating-point,
1894 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1895 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1896 processing is needed. */
1898 enum machine_mode
1899 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1901 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1903 switch (op)
1905 case EQ:
1906 case NE:
1907 case UNORDERED:
1908 case ORDERED:
1909 case UNLT:
1910 case UNLE:
1911 case UNGT:
1912 case UNGE:
1913 case UNEQ:
1914 case LTGT:
1915 return CCFPmode;
1917 case LT:
1918 case LE:
1919 case GT:
1920 case GE:
1921 return CCFPEmode;
1923 default:
1924 gcc_unreachable ();
1927 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1928 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1930 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1931 return CCX_NOOVmode;
1932 else
1933 return CC_NOOVmode;
1935 else
1937 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1938 return CCXmode;
1939 else
1940 return CCmode;
1944 /* X and Y are two things to compare using CODE. Emit the compare insn and
1945 return the rtx for the cc reg in the proper mode. */
1948 gen_compare_reg (enum rtx_code code)
1950 rtx x = sparc_compare_op0;
1951 rtx y = sparc_compare_op1;
1952 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
1953 rtx cc_reg;
1955 if (sparc_compare_emitted != NULL_RTX)
1957 cc_reg = sparc_compare_emitted;
1958 sparc_compare_emitted = NULL_RTX;
1959 return cc_reg;
1962 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
1963 fcc regs (cse can't tell they're really call clobbered regs and will
1964 remove a duplicate comparison even if there is an intervening function
1965 call - it will then try to reload the cc reg via an int reg which is why
1966 we need the movcc patterns). It is possible to provide the movcc
1967 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
1968 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
1969 to tell cse that CCFPE mode registers (even pseudos) are call
1970 clobbered. */
1972 /* ??? This is an experiment. Rather than making changes to cse which may
1973 or may not be easy/clean, we do our own cse. This is possible because
1974 we will generate hard registers. Cse knows they're call clobbered (it
1975 doesn't know the same thing about pseudos). If we guess wrong, no big
1976 deal, but if we win, great! */
1978 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1979 #if 1 /* experiment */
1981 int reg;
1982 /* We cycle through the registers to ensure they're all exercised. */
1983 static int next_fcc_reg = 0;
1984 /* Previous x,y for each fcc reg. */
1985 static rtx prev_args[4][2];
1987 /* Scan prev_args for x,y. */
1988 for (reg = 0; reg < 4; reg++)
1989 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
1990 break;
1991 if (reg == 4)
1993 reg = next_fcc_reg;
1994 prev_args[reg][0] = x;
1995 prev_args[reg][1] = y;
1996 next_fcc_reg = (next_fcc_reg + 1) & 3;
1998 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2000 #else
2001 cc_reg = gen_reg_rtx (mode);
2002 #endif /* ! experiment */
2003 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2004 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2005 else
2006 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2008 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2009 gen_rtx_COMPARE (mode, x, y)));
2011 return cc_reg;
2014 /* This function is used for v9 only.
2015 CODE is the code for an Scc's comparison.
2016 OPERANDS[0] is the target of the Scc insn.
2017 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2018 been generated yet).
2020 This function is needed to turn
2022 (set (reg:SI 110)
2023 (gt (reg:CCX 100 %icc)
2024 (const_int 0)))
2025 into
2026 (set (reg:SI 110)
2027 (gt:DI (reg:CCX 100 %icc)
2028 (const_int 0)))
2030 IE: The instruction recognizer needs to see the mode of the comparison to
2031 find the right instruction. We could use "gt:DI" right in the
2032 define_expand, but leaving it out allows us to handle DI, SI, etc.
2034 We refer to the global sparc compare operands sparc_compare_op0 and
2035 sparc_compare_op1. */
2038 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2040 if (! TARGET_ARCH64
2041 && (GET_MODE (sparc_compare_op0) == DImode
2042 || GET_MODE (operands[0]) == DImode))
2043 return 0;
2045 /* Try to use the movrCC insns. */
2046 if (TARGET_ARCH64
2047 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2048 && sparc_compare_op1 == const0_rtx
2049 && v9_regcmp_p (compare_code))
2051 rtx op0 = sparc_compare_op0;
2052 rtx temp;
2054 /* Special case for op0 != 0. This can be done with one instruction if
2055 operands[0] == sparc_compare_op0. */
2057 if (compare_code == NE
2058 && GET_MODE (operands[0]) == DImode
2059 && rtx_equal_p (op0, operands[0]))
2061 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2062 gen_rtx_IF_THEN_ELSE (DImode,
2063 gen_rtx_fmt_ee (compare_code, DImode,
2064 op0, const0_rtx),
2065 const1_rtx,
2066 operands[0])));
2067 return 1;
2070 if (reg_overlap_mentioned_p (operands[0], op0))
2072 /* Handle the case where operands[0] == sparc_compare_op0.
2073 We "early clobber" the result. */
2074 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2075 emit_move_insn (op0, sparc_compare_op0);
2078 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2079 if (GET_MODE (op0) != DImode)
2081 temp = gen_reg_rtx (DImode);
2082 convert_move (temp, op0, 0);
2084 else
2085 temp = op0;
2086 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2087 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2088 gen_rtx_fmt_ee (compare_code, DImode,
2089 temp, const0_rtx),
2090 const1_rtx,
2091 operands[0])));
2092 return 1;
2094 else
2096 operands[1] = gen_compare_reg (compare_code);
2098 switch (GET_MODE (operands[1]))
2100 case CCmode :
2101 case CCXmode :
2102 case CCFPEmode :
2103 case CCFPmode :
2104 break;
2105 default :
2106 gcc_unreachable ();
2108 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2109 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2110 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2111 gen_rtx_fmt_ee (compare_code,
2112 GET_MODE (operands[1]),
2113 operands[1], const0_rtx),
2114 const1_rtx, operands[0])));
2115 return 1;
2119 /* Emit a conditional jump insn for the v9 architecture using comparison code
2120 CODE and jump target LABEL.
2121 This function exists to take advantage of the v9 brxx insns. */
2123 void
2124 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2126 gcc_assert (sparc_compare_emitted == NULL_RTX);
2127 emit_jump_insn (gen_rtx_SET (VOIDmode,
2128 pc_rtx,
2129 gen_rtx_IF_THEN_ELSE (VOIDmode,
2130 gen_rtx_fmt_ee (code, GET_MODE (op0),
2131 op0, const0_rtx),
2132 gen_rtx_LABEL_REF (VOIDmode, label),
2133 pc_rtx)));
2136 /* Generate a DFmode part of a hard TFmode register.
2137 REG is the TFmode hard register, LOW is 1 for the
2138 low 64bit of the register and 0 otherwise.
2141 gen_df_reg (rtx reg, int low)
2143 int regno = REGNO (reg);
2145 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2146 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2147 return gen_rtx_REG (DFmode, regno);
2150 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2151 Unlike normal calls, TFmode operands are passed by reference. It is
2152 assumed that no more than 3 operands are required. */
2154 static void
2155 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2157 rtx ret_slot = NULL, arg[3], func_sym;
2158 int i;
2160 /* We only expect to be called for conversions, unary, and binary ops. */
2161 gcc_assert (nargs == 2 || nargs == 3);
2163 for (i = 0; i < nargs; ++i)
2165 rtx this_arg = operands[i];
2166 rtx this_slot;
2168 /* TFmode arguments and return values are passed by reference. */
2169 if (GET_MODE (this_arg) == TFmode)
2171 int force_stack_temp;
2173 force_stack_temp = 0;
2174 if (TARGET_BUGGY_QP_LIB && i == 0)
2175 force_stack_temp = 1;
2177 if (GET_CODE (this_arg) == MEM
2178 && ! force_stack_temp)
2179 this_arg = XEXP (this_arg, 0);
2180 else if (CONSTANT_P (this_arg)
2181 && ! force_stack_temp)
2183 this_slot = force_const_mem (TFmode, this_arg);
2184 this_arg = XEXP (this_slot, 0);
2186 else
2188 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2190 /* Operand 0 is the return value. We'll copy it out later. */
2191 if (i > 0)
2192 emit_move_insn (this_slot, this_arg);
2193 else
2194 ret_slot = this_slot;
2196 this_arg = XEXP (this_slot, 0);
2200 arg[i] = this_arg;
2203 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2205 if (GET_MODE (operands[0]) == TFmode)
2207 if (nargs == 2)
2208 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2209 arg[0], GET_MODE (arg[0]),
2210 arg[1], GET_MODE (arg[1]));
2211 else
2212 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2213 arg[0], GET_MODE (arg[0]),
2214 arg[1], GET_MODE (arg[1]),
2215 arg[2], GET_MODE (arg[2]));
2217 if (ret_slot)
2218 emit_move_insn (operands[0], ret_slot);
2220 else
2222 rtx ret;
2224 gcc_assert (nargs == 2);
2226 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2227 GET_MODE (operands[0]), 1,
2228 arg[1], GET_MODE (arg[1]));
2230 if (ret != operands[0])
2231 emit_move_insn (operands[0], ret);
2235 /* Expand soft-float TFmode calls to sparc abi routines. */
2237 static void
2238 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2240 const char *func;
2242 switch (code)
2244 case PLUS:
2245 func = "_Qp_add";
2246 break;
2247 case MINUS:
2248 func = "_Qp_sub";
2249 break;
2250 case MULT:
2251 func = "_Qp_mul";
2252 break;
2253 case DIV:
2254 func = "_Qp_div";
2255 break;
2256 default:
2257 gcc_unreachable ();
2260 emit_soft_tfmode_libcall (func, 3, operands);
2263 static void
2264 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2266 const char *func;
2268 gcc_assert (code == SQRT);
2269 func = "_Qp_sqrt";
2271 emit_soft_tfmode_libcall (func, 2, operands);
2274 static void
2275 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2277 const char *func;
2279 switch (code)
2281 case FLOAT_EXTEND:
2282 switch (GET_MODE (operands[1]))
2284 case SFmode:
2285 func = "_Qp_stoq";
2286 break;
2287 case DFmode:
2288 func = "_Qp_dtoq";
2289 break;
2290 default:
2291 gcc_unreachable ();
2293 break;
2295 case FLOAT_TRUNCATE:
2296 switch (GET_MODE (operands[0]))
2298 case SFmode:
2299 func = "_Qp_qtos";
2300 break;
2301 case DFmode:
2302 func = "_Qp_qtod";
2303 break;
2304 default:
2305 gcc_unreachable ();
2307 break;
2309 case FLOAT:
2310 switch (GET_MODE (operands[1]))
2312 case SImode:
2313 func = "_Qp_itoq";
2314 break;
2315 case DImode:
2316 func = "_Qp_xtoq";
2317 break;
2318 default:
2319 gcc_unreachable ();
2321 break;
2323 case UNSIGNED_FLOAT:
2324 switch (GET_MODE (operands[1]))
2326 case SImode:
2327 func = "_Qp_uitoq";
2328 break;
2329 case DImode:
2330 func = "_Qp_uxtoq";
2331 break;
2332 default:
2333 gcc_unreachable ();
2335 break;
2337 case FIX:
2338 switch (GET_MODE (operands[0]))
2340 case SImode:
2341 func = "_Qp_qtoi";
2342 break;
2343 case DImode:
2344 func = "_Qp_qtox";
2345 break;
2346 default:
2347 gcc_unreachable ();
2349 break;
2351 case UNSIGNED_FIX:
2352 switch (GET_MODE (operands[0]))
2354 case SImode:
2355 func = "_Qp_qtoui";
2356 break;
2357 case DImode:
2358 func = "_Qp_qtoux";
2359 break;
2360 default:
2361 gcc_unreachable ();
2363 break;
2365 default:
2366 gcc_unreachable ();
2369 emit_soft_tfmode_libcall (func, 2, operands);
2372 /* Expand a hard-float tfmode operation. All arguments must be in
2373 registers. */
2375 static void
2376 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2378 rtx op, dest;
2380 if (GET_RTX_CLASS (code) == RTX_UNARY)
2382 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2383 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2385 else
2387 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2388 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2389 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2390 operands[1], operands[2]);
2393 if (register_operand (operands[0], VOIDmode))
2394 dest = operands[0];
2395 else
2396 dest = gen_reg_rtx (GET_MODE (operands[0]));
2398 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2400 if (dest != operands[0])
2401 emit_move_insn (operands[0], dest);
2404 void
2405 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2407 if (TARGET_HARD_QUAD)
2408 emit_hard_tfmode_operation (code, operands);
2409 else
2410 emit_soft_tfmode_binop (code, operands);
2413 void
2414 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2416 if (TARGET_HARD_QUAD)
2417 emit_hard_tfmode_operation (code, operands);
2418 else
2419 emit_soft_tfmode_unop (code, operands);
2422 void
2423 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2425 if (TARGET_HARD_QUAD)
2426 emit_hard_tfmode_operation (code, operands);
2427 else
2428 emit_soft_tfmode_cvt (code, operands);
2431 /* Return nonzero if a branch/jump/call instruction will be emitting
2432 nop into its delay slot. */
2435 empty_delay_slot (rtx insn)
2437 rtx seq;
2439 /* If no previous instruction (should not happen), return true. */
2440 if (PREV_INSN (insn) == NULL)
2441 return 1;
2443 seq = NEXT_INSN (PREV_INSN (insn));
2444 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2445 return 0;
2447 return 1;
2450 /* Return nonzero if TRIAL can go into the call delay slot. */
2453 tls_call_delay (rtx trial)
2455 rtx pat;
2457 /* Binutils allows
2458 call __tls_get_addr, %tgd_call (foo)
2459 add %l7, %o0, %o0, %tgd_add (foo)
2460 while Sun as/ld does not. */
2461 if (TARGET_GNU_TLS || !TARGET_TLS)
2462 return 1;
2464 pat = PATTERN (trial);
2466 /* We must reject tgd_add{32|64}, i.e.
2467 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2468 and tldm_add{32|64}, i.e.
2469 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2470 for Sun as/ld. */
2471 if (GET_CODE (pat) == SET
2472 && GET_CODE (SET_SRC (pat)) == PLUS)
2474 rtx unspec = XEXP (SET_SRC (pat), 1);
2476 if (GET_CODE (unspec) == UNSPEC
2477 && (XINT (unspec, 1) == UNSPEC_TLSGD
2478 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2479 return 0;
2482 return 1;
2485 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2486 instruction. RETURN_P is true if the v9 variant 'return' is to be
2487 considered in the test too.
2489 TRIAL must be a SET whose destination is a REG appropriate for the
2490 'restore' instruction or, if RETURN_P is true, for the 'return'
2491 instruction. */
2493 static int
2494 eligible_for_restore_insn (rtx trial, bool return_p)
2496 rtx pat = PATTERN (trial);
2497 rtx src = SET_SRC (pat);
2499 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2500 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2501 && arith_operand (src, GET_MODE (src)))
2503 if (TARGET_ARCH64)
2504 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2505 else
2506 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2509 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2510 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2511 && arith_double_operand (src, GET_MODE (src)))
2512 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2514 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2515 else if (! TARGET_FPU && register_operand (src, SFmode))
2516 return 1;
2518 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2519 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2520 return 1;
2522 /* If we have the 'return' instruction, anything that does not use
2523 local or output registers and can go into a delay slot wins. */
2524 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2525 && (get_attr_in_uncond_branch_delay (trial)
2526 == IN_UNCOND_BRANCH_DELAY_TRUE))
2527 return 1;
2529 /* The 'restore src1,src2,dest' pattern for SImode. */
2530 else if (GET_CODE (src) == PLUS
2531 && register_operand (XEXP (src, 0), SImode)
2532 && arith_operand (XEXP (src, 1), SImode))
2533 return 1;
2535 /* The 'restore src1,src2,dest' pattern for DImode. */
2536 else if (GET_CODE (src) == PLUS
2537 && register_operand (XEXP (src, 0), DImode)
2538 && arith_double_operand (XEXP (src, 1), DImode))
2539 return 1;
2541 /* The 'restore src1,%lo(src2),dest' pattern. */
2542 else if (GET_CODE (src) == LO_SUM
2543 && ! TARGET_CM_MEDMID
2544 && ((register_operand (XEXP (src, 0), SImode)
2545 && immediate_operand (XEXP (src, 1), SImode))
2546 || (TARGET_ARCH64
2547 && register_operand (XEXP (src, 0), DImode)
2548 && immediate_operand (XEXP (src, 1), DImode))))
2549 return 1;
2551 /* The 'restore src,src,dest' pattern. */
2552 else if (GET_CODE (src) == ASHIFT
2553 && (register_operand (XEXP (src, 0), SImode)
2554 || register_operand (XEXP (src, 0), DImode))
2555 && XEXP (src, 1) == const1_rtx)
2556 return 1;
2558 return 0;
2561 /* Return nonzero if TRIAL can go into the function return's
2562 delay slot. */
2565 eligible_for_return_delay (rtx trial)
2567 rtx pat;
2569 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2570 return 0;
2572 if (get_attr_length (trial) != 1)
2573 return 0;
2575 /* If there are any call-saved registers, we should scan TRIAL if it
2576 does not reference them. For now just make it easy. */
2577 if (num_gfregs)
2578 return 0;
2580 /* If the function uses __builtin_eh_return, the eh_return machinery
2581 occupies the delay slot. */
2582 if (current_function_calls_eh_return)
2583 return 0;
2585 /* In the case of a true leaf function, anything can go into the slot. */
2586 if (sparc_leaf_function_p)
2587 return get_attr_in_uncond_branch_delay (trial)
2588 == IN_UNCOND_BRANCH_DELAY_TRUE;
2590 pat = PATTERN (trial);
2592 /* Otherwise, only operations which can be done in tandem with
2593 a `restore' or `return' insn can go into the delay slot. */
2594 if (GET_CODE (SET_DEST (pat)) != REG
2595 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2596 return 0;
2598 /* If this instruction sets up floating point register and we have a return
2599 instruction, it can probably go in. But restore will not work
2600 with FP_REGS. */
2601 if (REGNO (SET_DEST (pat)) >= 32)
2602 return (TARGET_V9
2603 && ! epilogue_renumber (&pat, 1)
2604 && (get_attr_in_uncond_branch_delay (trial)
2605 == IN_UNCOND_BRANCH_DELAY_TRUE));
2607 return eligible_for_restore_insn (trial, true);
2610 /* Return nonzero if TRIAL can go into the sibling call's
2611 delay slot. */
2614 eligible_for_sibcall_delay (rtx trial)
2616 rtx pat;
2618 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2619 return 0;
2621 if (get_attr_length (trial) != 1)
2622 return 0;
2624 pat = PATTERN (trial);
2626 if (sparc_leaf_function_p)
2628 /* If the tail call is done using the call instruction,
2629 we have to restore %o7 in the delay slot. */
2630 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2631 return 0;
2633 /* %g1 is used to build the function address */
2634 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2635 return 0;
2637 return 1;
2640 /* Otherwise, only operations which can be done in tandem with
2641 a `restore' insn can go into the delay slot. */
2642 if (GET_CODE (SET_DEST (pat)) != REG
2643 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2644 || REGNO (SET_DEST (pat)) >= 32)
2645 return 0;
2647 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2648 in most cases. */
2649 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2650 return 0;
2652 return eligible_for_restore_insn (trial, false);
2656 short_branch (int uid1, int uid2)
2658 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2660 /* Leave a few words of "slop". */
2661 if (delta >= -1023 && delta <= 1022)
2662 return 1;
2664 return 0;
2667 /* Return nonzero if REG is not used after INSN.
2668 We assume REG is a reload reg, and therefore does
2669 not live past labels or calls or jumps. */
2671 reg_unused_after (rtx reg, rtx insn)
2673 enum rtx_code code, prev_code = UNKNOWN;
2675 while ((insn = NEXT_INSN (insn)))
2677 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2678 return 1;
2680 code = GET_CODE (insn);
2681 if (GET_CODE (insn) == CODE_LABEL)
2682 return 1;
2684 if (INSN_P (insn))
2686 rtx set = single_set (insn);
2687 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2688 if (set && in_src)
2689 return 0;
2690 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2691 return 1;
2692 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2693 return 0;
2695 prev_code = code;
2697 return 1;
2700 /* Determine if it's legal to put X into the constant pool. This
2701 is not possible if X contains the address of a symbol that is
2702 not constant (TLS) or not known at final link time (PIC). */
2704 static bool
2705 sparc_cannot_force_const_mem (rtx x)
2707 switch (GET_CODE (x))
2709 case CONST_INT:
2710 case CONST_DOUBLE:
2711 case CONST_VECTOR:
2712 /* Accept all non-symbolic constants. */
2713 return false;
2715 case LABEL_REF:
2716 /* Labels are OK iff we are non-PIC. */
2717 return flag_pic != 0;
2719 case SYMBOL_REF:
2720 /* 'Naked' TLS symbol references are never OK,
2721 non-TLS symbols are OK iff we are non-PIC. */
2722 if (SYMBOL_REF_TLS_MODEL (x))
2723 return true;
2724 else
2725 return flag_pic != 0;
2727 case CONST:
2728 return sparc_cannot_force_const_mem (XEXP (x, 0));
2729 case PLUS:
2730 case MINUS:
2731 return sparc_cannot_force_const_mem (XEXP (x, 0))
2732 || sparc_cannot_force_const_mem (XEXP (x, 1));
2733 case UNSPEC:
2734 return true;
2735 default:
2736 gcc_unreachable ();
2740 /* PIC support. */
2741 static GTY(()) char pic_helper_symbol_name[256];
2742 static GTY(()) rtx pic_helper_symbol;
2743 static GTY(()) bool pic_helper_emitted_p = false;
2744 static GTY(()) rtx global_offset_table;
2746 /* Ensure that we are not using patterns that are not OK with PIC. */
2749 check_pic (int i)
2751 switch (flag_pic)
2753 case 1:
2754 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2755 && (GET_CODE (recog_data.operand[i]) != CONST
2756 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2757 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2758 == global_offset_table)
2759 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2760 == CONST))));
2761 case 2:
2762 default:
2763 return 1;
2767 /* Return true if X is an address which needs a temporary register when
2768 reloaded while generating PIC code. */
2771 pic_address_needs_scratch (rtx x)
2773 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2774 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2775 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2776 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2777 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2778 return 1;
2780 return 0;
2783 /* Determine if a given RTX is a valid constant. We already know this
2784 satisfies CONSTANT_P. */
2786 bool
2787 legitimate_constant_p (rtx x)
2789 rtx inner;
2791 switch (GET_CODE (x))
2793 case SYMBOL_REF:
2794 /* TLS symbols are not constant. */
2795 if (SYMBOL_REF_TLS_MODEL (x))
2796 return false;
2797 break;
2799 case CONST:
2800 inner = XEXP (x, 0);
2802 /* Offsets of TLS symbols are never valid.
2803 Discourage CSE from creating them. */
2804 if (GET_CODE (inner) == PLUS
2805 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2806 return false;
2807 break;
2809 case CONST_DOUBLE:
2810 if (GET_MODE (x) == VOIDmode)
2811 return true;
2813 /* Floating point constants are generally not ok.
2814 The only exception is 0.0 in VIS. */
2815 if (TARGET_VIS
2816 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2817 && const_zero_operand (x, GET_MODE (x)))
2818 return true;
2820 return false;
2822 case CONST_VECTOR:
2823 /* Vector constants are generally not ok.
2824 The only exception is 0 in VIS. */
2825 if (TARGET_VIS
2826 && const_zero_operand (x, GET_MODE (x)))
2827 return true;
2829 return false;
2831 default:
2832 break;
2835 return true;
2838 /* Determine if a given RTX is a valid constant address. */
2840 bool
2841 constant_address_p (rtx x)
2843 switch (GET_CODE (x))
2845 case LABEL_REF:
2846 case CONST_INT:
2847 case HIGH:
2848 return true;
2850 case CONST:
2851 if (flag_pic && pic_address_needs_scratch (x))
2852 return false;
2853 return legitimate_constant_p (x);
2855 case SYMBOL_REF:
2856 return !flag_pic && legitimate_constant_p (x);
2858 default:
2859 return false;
2863 /* Nonzero if the constant value X is a legitimate general operand
2864 when generating PIC code. It is given that flag_pic is on and
2865 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2867 bool
2868 legitimate_pic_operand_p (rtx x)
2870 if (pic_address_needs_scratch (x))
2871 return false;
2872 if (SPARC_SYMBOL_REF_TLS_P (x)
2873 || (GET_CODE (x) == CONST
2874 && GET_CODE (XEXP (x, 0)) == PLUS
2875 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2876 return false;
2877 return true;
2880 /* Return nonzero if ADDR is a valid memory address.
2881 STRICT specifies whether strict register checking applies. */
2884 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2886 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2888 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2889 rs1 = addr;
2890 else if (GET_CODE (addr) == PLUS)
2892 rs1 = XEXP (addr, 0);
2893 rs2 = XEXP (addr, 1);
2895 /* Canonicalize. REG comes first, if there are no regs,
2896 LO_SUM comes first. */
2897 if (!REG_P (rs1)
2898 && GET_CODE (rs1) != SUBREG
2899 && (REG_P (rs2)
2900 || GET_CODE (rs2) == SUBREG
2901 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2903 rs1 = XEXP (addr, 1);
2904 rs2 = XEXP (addr, 0);
2907 if ((flag_pic == 1
2908 && rs1 == pic_offset_table_rtx
2909 && !REG_P (rs2)
2910 && GET_CODE (rs2) != SUBREG
2911 && GET_CODE (rs2) != LO_SUM
2912 && GET_CODE (rs2) != MEM
2913 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2914 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2915 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2916 || ((REG_P (rs1)
2917 || GET_CODE (rs1) == SUBREG)
2918 && RTX_OK_FOR_OFFSET_P (rs2)))
2920 imm1 = rs2;
2921 rs2 = NULL;
2923 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2924 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2926 /* We prohibit REG + REG for TFmode when there are no quad move insns
2927 and we consequently need to split. We do this because REG+REG
2928 is not an offsettable address. If we get the situation in reload
2929 where source and destination of a movtf pattern are both MEMs with
2930 REG+REG address, then only one of them gets converted to an
2931 offsettable address. */
2932 if (mode == TFmode
2933 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2934 return 0;
2936 /* We prohibit REG + REG on ARCH32 if not optimizing for
2937 DFmode/DImode because then mem_min_alignment is likely to be zero
2938 after reload and the forced split would lack a matching splitter
2939 pattern. */
2940 if (TARGET_ARCH32 && !optimize
2941 && (mode == DFmode || mode == DImode))
2942 return 0;
2944 else if (USE_AS_OFFSETABLE_LO10
2945 && GET_CODE (rs1) == LO_SUM
2946 && TARGET_ARCH64
2947 && ! TARGET_CM_MEDMID
2948 && RTX_OK_FOR_OLO10_P (rs2))
2950 rs2 = NULL;
2951 imm1 = XEXP (rs1, 1);
2952 rs1 = XEXP (rs1, 0);
2953 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2954 return 0;
2957 else if (GET_CODE (addr) == LO_SUM)
2959 rs1 = XEXP (addr, 0);
2960 imm1 = XEXP (addr, 1);
2962 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2963 return 0;
2965 /* We can't allow TFmode in 32-bit mode, because an offset greater
2966 than the alignment (8) may cause the LO_SUM to overflow. */
2967 if (mode == TFmode && TARGET_ARCH32)
2968 return 0;
2970 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
2971 return 1;
2972 else
2973 return 0;
2975 if (GET_CODE (rs1) == SUBREG)
2976 rs1 = SUBREG_REG (rs1);
2977 if (!REG_P (rs1))
2978 return 0;
2980 if (rs2)
2982 if (GET_CODE (rs2) == SUBREG)
2983 rs2 = SUBREG_REG (rs2);
2984 if (!REG_P (rs2))
2985 return 0;
2988 if (strict)
2990 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
2991 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
2992 return 0;
2994 else
2996 if ((REGNO (rs1) >= 32
2997 && REGNO (rs1) != FRAME_POINTER_REGNUM
2998 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
2999 || (rs2
3000 && (REGNO (rs2) >= 32
3001 && REGNO (rs2) != FRAME_POINTER_REGNUM
3002 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3003 return 0;
3005 return 1;
3008 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3010 static GTY(()) rtx sparc_tls_symbol;
3012 static rtx
3013 sparc_tls_get_addr (void)
3015 if (!sparc_tls_symbol)
3016 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3018 return sparc_tls_symbol;
3021 static rtx
3022 sparc_tls_got (void)
3024 rtx temp;
3025 if (flag_pic)
3027 current_function_uses_pic_offset_table = 1;
3028 return pic_offset_table_rtx;
3031 if (!global_offset_table)
3032 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3033 temp = gen_reg_rtx (Pmode);
3034 emit_move_insn (temp, global_offset_table);
3035 return temp;
3038 /* Return 1 if *X is a thread-local symbol. */
3040 static int
3041 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3043 return SPARC_SYMBOL_REF_TLS_P (*x);
3046 /* Return 1 if X contains a thread-local symbol. */
3048 bool
3049 sparc_tls_referenced_p (rtx x)
3051 if (!TARGET_HAVE_TLS)
3052 return false;
3054 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3057 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3058 this (thread-local) address. */
3061 legitimize_tls_address (rtx addr)
3063 rtx temp1, temp2, temp3, ret, o0, got, insn;
3065 gcc_assert (! no_new_pseudos);
3067 if (GET_CODE (addr) == SYMBOL_REF)
3068 switch (SYMBOL_REF_TLS_MODEL (addr))
3070 case TLS_MODEL_GLOBAL_DYNAMIC:
3071 start_sequence ();
3072 temp1 = gen_reg_rtx (SImode);
3073 temp2 = gen_reg_rtx (SImode);
3074 ret = gen_reg_rtx (Pmode);
3075 o0 = gen_rtx_REG (Pmode, 8);
3076 got = sparc_tls_got ();
3077 emit_insn (gen_tgd_hi22 (temp1, addr));
3078 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3079 if (TARGET_ARCH32)
3081 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3082 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3083 addr, const1_rtx));
3085 else
3087 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3088 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3089 addr, const1_rtx));
3091 CALL_INSN_FUNCTION_USAGE (insn)
3092 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3093 CALL_INSN_FUNCTION_USAGE (insn));
3094 insn = get_insns ();
3095 end_sequence ();
3096 emit_libcall_block (insn, ret, o0, addr);
3097 break;
3099 case TLS_MODEL_LOCAL_DYNAMIC:
3100 start_sequence ();
3101 temp1 = gen_reg_rtx (SImode);
3102 temp2 = gen_reg_rtx (SImode);
3103 temp3 = gen_reg_rtx (Pmode);
3104 ret = gen_reg_rtx (Pmode);
3105 o0 = gen_rtx_REG (Pmode, 8);
3106 got = sparc_tls_got ();
3107 emit_insn (gen_tldm_hi22 (temp1));
3108 emit_insn (gen_tldm_lo10 (temp2, temp1));
3109 if (TARGET_ARCH32)
3111 emit_insn (gen_tldm_add32 (o0, got, temp2));
3112 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3113 const1_rtx));
3115 else
3117 emit_insn (gen_tldm_add64 (o0, got, temp2));
3118 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3119 const1_rtx));
3121 CALL_INSN_FUNCTION_USAGE (insn)
3122 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3123 CALL_INSN_FUNCTION_USAGE (insn));
3124 insn = get_insns ();
3125 end_sequence ();
3126 emit_libcall_block (insn, temp3, o0,
3127 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3128 UNSPEC_TLSLD_BASE));
3129 temp1 = gen_reg_rtx (SImode);
3130 temp2 = gen_reg_rtx (SImode);
3131 emit_insn (gen_tldo_hix22 (temp1, addr));
3132 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3133 if (TARGET_ARCH32)
3134 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3135 else
3136 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3137 break;
3139 case TLS_MODEL_INITIAL_EXEC:
3140 temp1 = gen_reg_rtx (SImode);
3141 temp2 = gen_reg_rtx (SImode);
3142 temp3 = gen_reg_rtx (Pmode);
3143 got = sparc_tls_got ();
3144 emit_insn (gen_tie_hi22 (temp1, addr));
3145 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3146 if (TARGET_ARCH32)
3147 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3148 else
3149 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3150 if (TARGET_SUN_TLS)
3152 ret = gen_reg_rtx (Pmode);
3153 if (TARGET_ARCH32)
3154 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3155 temp3, addr));
3156 else
3157 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3158 temp3, addr));
3160 else
3161 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3162 break;
3164 case TLS_MODEL_LOCAL_EXEC:
3165 temp1 = gen_reg_rtx (Pmode);
3166 temp2 = gen_reg_rtx (Pmode);
3167 if (TARGET_ARCH32)
3169 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3170 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3172 else
3174 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3175 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3177 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3178 break;
3180 default:
3181 gcc_unreachable ();
3184 else
3185 gcc_unreachable (); /* for now ... */
3187 return ret;
3191 /* Legitimize PIC addresses. If the address is already position-independent,
3192 we return ORIG. Newly generated position-independent addresses go into a
3193 reg. This is REG if nonzero, otherwise we allocate register(s) as
3194 necessary. */
3197 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3198 rtx reg)
3200 if (GET_CODE (orig) == SYMBOL_REF)
3202 rtx pic_ref, address;
3203 rtx insn;
3205 if (reg == 0)
3207 gcc_assert (! reload_in_progress && ! reload_completed);
3208 reg = gen_reg_rtx (Pmode);
3211 if (flag_pic == 2)
3213 /* If not during reload, allocate another temp reg here for loading
3214 in the address, so that these instructions can be optimized
3215 properly. */
3216 rtx temp_reg = ((reload_in_progress || reload_completed)
3217 ? reg : gen_reg_rtx (Pmode));
3219 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3220 won't get confused into thinking that these two instructions
3221 are loading in the true address of the symbol. If in the
3222 future a PIC rtx exists, that should be used instead. */
3223 if (TARGET_ARCH64)
3225 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3226 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3228 else
3230 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3231 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3233 address = temp_reg;
3235 else
3236 address = orig;
3238 pic_ref = gen_const_mem (Pmode,
3239 gen_rtx_PLUS (Pmode,
3240 pic_offset_table_rtx, address));
3241 current_function_uses_pic_offset_table = 1;
3242 insn = emit_move_insn (reg, pic_ref);
3243 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3244 by loop. */
3245 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3246 REG_NOTES (insn));
3247 return reg;
3249 else if (GET_CODE (orig) == CONST)
3251 rtx base, offset;
3253 if (GET_CODE (XEXP (orig, 0)) == PLUS
3254 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3255 return orig;
3257 if (reg == 0)
3259 gcc_assert (! reload_in_progress && ! reload_completed);
3260 reg = gen_reg_rtx (Pmode);
3263 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3264 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3265 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3266 base == reg ? 0 : reg);
3268 if (GET_CODE (offset) == CONST_INT)
3270 if (SMALL_INT (offset))
3271 return plus_constant (base, INTVAL (offset));
3272 else if (! reload_in_progress && ! reload_completed)
3273 offset = force_reg (Pmode, offset);
3274 else
3275 /* If we reach here, then something is seriously wrong. */
3276 gcc_unreachable ();
3278 return gen_rtx_PLUS (Pmode, base, offset);
3280 else if (GET_CODE (orig) == LABEL_REF)
3281 /* ??? Why do we do this? */
3282 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3283 the register is live instead, in case it is eliminated. */
3284 current_function_uses_pic_offset_table = 1;
3286 return orig;
3289 /* Try machine-dependent ways of modifying an illegitimate address X
3290 to be legitimate. If we find one, return the new, valid address.
3292 OLDX is the address as it was before break_out_memory_refs was called.
3293 In some cases it is useful to look at this to decide what needs to be done.
3295 MODE is the mode of the operand pointed to by X. */
3298 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3300 rtx orig_x = x;
3302 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3303 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3304 force_operand (XEXP (x, 0), NULL_RTX));
3305 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3306 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3307 force_operand (XEXP (x, 1), NULL_RTX));
3308 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3309 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3310 XEXP (x, 1));
3311 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3312 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3313 force_operand (XEXP (x, 1), NULL_RTX));
3315 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3316 return x;
3318 if (SPARC_SYMBOL_REF_TLS_P (x))
3319 x = legitimize_tls_address (x);
3320 else if (flag_pic)
3321 x = legitimize_pic_address (x, mode, 0);
3322 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3323 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3324 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3325 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3326 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3327 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3328 else if (GET_CODE (x) == SYMBOL_REF
3329 || GET_CODE (x) == CONST
3330 || GET_CODE (x) == LABEL_REF)
3331 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3332 return x;
3335 /* Emit the special PIC helper function. */
3337 static void
3338 emit_pic_helper (void)
3340 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3341 int align;
3343 switch_to_section (text_section);
3345 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3346 if (align > 0)
3347 ASM_OUTPUT_ALIGN (asm_out_file, align);
3348 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3349 if (flag_delayed_branch)
3350 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3351 pic_name, pic_name);
3352 else
3353 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3354 pic_name, pic_name);
3356 pic_helper_emitted_p = true;
3359 /* Emit code to load the PIC register. */
3361 static void
3362 load_pic_register (bool delay_pic_helper)
3364 int orig_flag_pic = flag_pic;
3366 /* If we haven't initialized the special PIC symbols, do so now. */
3367 if (!pic_helper_symbol_name[0])
3369 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3370 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3371 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3374 /* If we haven't emitted the special PIC helper function, do so now unless
3375 we are requested to delay it. */
3376 if (!delay_pic_helper && !pic_helper_emitted_p)
3377 emit_pic_helper ();
3379 flag_pic = 0;
3380 if (TARGET_ARCH64)
3381 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3382 pic_helper_symbol));
3383 else
3384 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3385 pic_helper_symbol));
3386 flag_pic = orig_flag_pic;
3388 /* Need to emit this whether or not we obey regdecls,
3389 since setjmp/longjmp can cause life info to screw up.
3390 ??? In the case where we don't obey regdecls, this is not sufficient
3391 since we may not fall out the bottom. */
3392 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3395 /* Return 1 if RTX is a MEM which is known to be aligned to at
3396 least a DESIRED byte boundary. */
3399 mem_min_alignment (rtx mem, int desired)
3401 rtx addr, base, offset;
3403 /* If it's not a MEM we can't accept it. */
3404 if (GET_CODE (mem) != MEM)
3405 return 0;
3407 /* Obviously... */
3408 if (!TARGET_UNALIGNED_DOUBLES
3409 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3410 return 1;
3412 /* ??? The rest of the function predates MEM_ALIGN so
3413 there is probably a bit of redundancy. */
3414 addr = XEXP (mem, 0);
3415 base = offset = NULL_RTX;
3416 if (GET_CODE (addr) == PLUS)
3418 if (GET_CODE (XEXP (addr, 0)) == REG)
3420 base = XEXP (addr, 0);
3422 /* What we are saying here is that if the base
3423 REG is aligned properly, the compiler will make
3424 sure any REG based index upon it will be so
3425 as well. */
3426 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3427 offset = XEXP (addr, 1);
3428 else
3429 offset = const0_rtx;
3432 else if (GET_CODE (addr) == REG)
3434 base = addr;
3435 offset = const0_rtx;
3438 if (base != NULL_RTX)
3440 int regno = REGNO (base);
3442 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3444 /* Check if the compiler has recorded some information
3445 about the alignment of the base REG. If reload has
3446 completed, we already matched with proper alignments.
3447 If not running global_alloc, reload might give us
3448 unaligned pointer to local stack though. */
3449 if (((cfun != 0
3450 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3451 || (optimize && reload_completed))
3452 && (INTVAL (offset) & (desired - 1)) == 0)
3453 return 1;
3455 else
3457 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3458 return 1;
3461 else if (! TARGET_UNALIGNED_DOUBLES
3462 || CONSTANT_P (addr)
3463 || GET_CODE (addr) == LO_SUM)
3465 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3466 is true, in which case we can only assume that an access is aligned if
3467 it is to a constant address, or the address involves a LO_SUM. */
3468 return 1;
3471 /* An obviously unaligned address. */
3472 return 0;
3476 /* Vectors to keep interesting information about registers where it can easily
3477 be got. We used to use the actual mode value as the bit number, but there
3478 are more than 32 modes now. Instead we use two tables: one indexed by
3479 hard register number, and one indexed by mode. */
3481 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3482 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
3483 mapped into one sparc_mode_class mode. */
3485 enum sparc_mode_class {
3486 S_MODE, D_MODE, T_MODE, O_MODE,
3487 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3488 CC_MODE, CCFP_MODE
3491 /* Modes for single-word and smaller quantities. */
3492 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3494 /* Modes for double-word and smaller quantities. */
3495 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3497 /* Modes for quad-word and smaller quantities. */
3498 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3500 /* Modes for 8-word and smaller quantities. */
3501 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3503 /* Modes for single-float quantities. We must allow any single word or
3504 smaller quantity. This is because the fix/float conversion instructions
3505 take integer inputs/outputs from the float registers. */
3506 #define SF_MODES (S_MODES)
3508 /* Modes for double-float and smaller quantities. */
3509 #define DF_MODES (S_MODES | D_MODES)
3511 /* Modes for double-float only quantities. */
3512 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3514 /* Modes for quad-float only quantities. */
3515 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3517 /* Modes for quad-float and smaller quantities. */
3518 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3520 /* Modes for quad-float and double-float quantities. */
3521 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3523 /* Modes for quad-float pair only quantities. */
3524 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3526 /* Modes for quad-float pairs and smaller quantities. */
3527 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3529 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3531 /* Modes for condition codes. */
3532 #define CC_MODES (1 << (int) CC_MODE)
3533 #define CCFP_MODES (1 << (int) CCFP_MODE)
3535 /* Value is 1 if register/mode pair is acceptable on sparc.
3536 The funny mixture of D and T modes is because integer operations
3537 do not specially operate on tetra quantities, so non-quad-aligned
3538 registers can hold quadword quantities (except %o4 and %i4 because
3539 they cross fixed registers). */
3541 /* This points to either the 32 bit or the 64 bit version. */
3542 const int *hard_regno_mode_classes;
3544 static const int hard_32bit_mode_classes[] = {
3545 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3546 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3547 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3548 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3550 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3551 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3552 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3553 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3555 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3556 and none can hold SFmode/SImode values. */
3557 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3558 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3559 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3560 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3562 /* %fcc[0123] */
3563 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3565 /* %icc */
3566 CC_MODES
3569 static const int hard_64bit_mode_classes[] = {
3570 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3571 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3572 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3573 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3575 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3576 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3577 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3578 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3580 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3581 and none can hold SFmode/SImode values. */
3582 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3583 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3584 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3585 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3587 /* %fcc[0123] */
3588 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3590 /* %icc */
3591 CC_MODES
3594 int sparc_mode_class [NUM_MACHINE_MODES];
3596 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3598 static void
3599 sparc_init_modes (void)
3601 int i;
3603 for (i = 0; i < NUM_MACHINE_MODES; i++)
3605 switch (GET_MODE_CLASS (i))
3607 case MODE_INT:
3608 case MODE_PARTIAL_INT:
3609 case MODE_COMPLEX_INT:
3610 if (GET_MODE_SIZE (i) <= 4)
3611 sparc_mode_class[i] = 1 << (int) S_MODE;
3612 else if (GET_MODE_SIZE (i) == 8)
3613 sparc_mode_class[i] = 1 << (int) D_MODE;
3614 else if (GET_MODE_SIZE (i) == 16)
3615 sparc_mode_class[i] = 1 << (int) T_MODE;
3616 else if (GET_MODE_SIZE (i) == 32)
3617 sparc_mode_class[i] = 1 << (int) O_MODE;
3618 else
3619 sparc_mode_class[i] = 0;
3620 break;
3621 case MODE_VECTOR_INT:
3622 if (GET_MODE_SIZE (i) <= 4)
3623 sparc_mode_class[i] = 1 << (int)SF_MODE;
3624 else if (GET_MODE_SIZE (i) == 8)
3625 sparc_mode_class[i] = 1 << (int)DF_MODE;
3626 break;
3627 case MODE_FLOAT:
3628 case MODE_COMPLEX_FLOAT:
3629 if (GET_MODE_SIZE (i) <= 4)
3630 sparc_mode_class[i] = 1 << (int) SF_MODE;
3631 else if (GET_MODE_SIZE (i) == 8)
3632 sparc_mode_class[i] = 1 << (int) DF_MODE;
3633 else if (GET_MODE_SIZE (i) == 16)
3634 sparc_mode_class[i] = 1 << (int) TF_MODE;
3635 else if (GET_MODE_SIZE (i) == 32)
3636 sparc_mode_class[i] = 1 << (int) OF_MODE;
3637 else
3638 sparc_mode_class[i] = 0;
3639 break;
3640 case MODE_CC:
3641 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3642 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3643 else
3644 sparc_mode_class[i] = 1 << (int) CC_MODE;
3645 break;
3646 default:
3647 sparc_mode_class[i] = 0;
3648 break;
3652 if (TARGET_ARCH64)
3653 hard_regno_mode_classes = hard_64bit_mode_classes;
3654 else
3655 hard_regno_mode_classes = hard_32bit_mode_classes;
3657 /* Initialize the array used by REGNO_REG_CLASS. */
3658 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3660 if (i < 16 && TARGET_V8PLUS)
3661 sparc_regno_reg_class[i] = I64_REGS;
3662 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3663 sparc_regno_reg_class[i] = GENERAL_REGS;
3664 else if (i < 64)
3665 sparc_regno_reg_class[i] = FP_REGS;
3666 else if (i < 96)
3667 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3668 else if (i < 100)
3669 sparc_regno_reg_class[i] = FPCC_REGS;
3670 else
3671 sparc_regno_reg_class[i] = NO_REGS;
3675 /* Compute the frame size required by the function. This function is called
3676 during the reload pass and also by sparc_expand_prologue. */
3678 HOST_WIDE_INT
3679 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3681 int outgoing_args_size = (current_function_outgoing_args_size
3682 + REG_PARM_STACK_SPACE (current_function_decl));
3683 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3684 int i;
3686 if (TARGET_ARCH64)
3688 for (i = 0; i < 8; i++)
3689 if (regs_ever_live[i] && ! call_used_regs[i])
3690 n_regs += 2;
3692 else
3694 for (i = 0; i < 8; i += 2)
3695 if ((regs_ever_live[i] && ! call_used_regs[i])
3696 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3697 n_regs += 2;
3700 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3701 if ((regs_ever_live[i] && ! call_used_regs[i])
3702 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3703 n_regs += 2;
3705 /* Set up values for use in prologue and epilogue. */
3706 num_gfregs = n_regs;
3708 if (leaf_function_p
3709 && n_regs == 0
3710 && size == 0
3711 && current_function_outgoing_args_size == 0)
3712 actual_fsize = apparent_fsize = 0;
3713 else
3715 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3716 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3717 apparent_fsize += n_regs * 4;
3718 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3721 /* Make sure nothing can clobber our register windows.
3722 If a SAVE must be done, or there is a stack-local variable,
3723 the register window area must be allocated. */
3724 if (! leaf_function_p || size > 0)
3725 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3727 return SPARC_STACK_ALIGN (actual_fsize);
3730 /* Output any necessary .register pseudo-ops. */
3732 void
3733 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3735 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3736 int i;
3738 if (TARGET_ARCH32)
3739 return;
3741 /* Check if %g[2367] were used without
3742 .register being printed for them already. */
3743 for (i = 2; i < 8; i++)
3745 if (regs_ever_live [i]
3746 && ! sparc_hard_reg_printed [i])
3748 sparc_hard_reg_printed [i] = 1;
3749 /* %g7 is used as TLS base register, use #ignore
3750 for it instead of #scratch. */
3751 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3752 i == 7 ? "ignore" : "scratch");
3754 if (i == 3) i = 5;
3756 #endif
3759 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3760 as needed. LOW should be double-word aligned for 32-bit registers.
3761 Return the new OFFSET. */
3763 #define SORR_SAVE 0
3764 #define SORR_RESTORE 1
3766 static int
3767 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3769 rtx mem, insn;
3770 int i;
3772 if (TARGET_ARCH64 && high <= 32)
3774 for (i = low; i < high; i++)
3776 if (regs_ever_live[i] && ! call_used_regs[i])
3778 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3779 set_mem_alias_set (mem, sparc_sr_alias_set);
3780 if (action == SORR_SAVE)
3782 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3783 RTX_FRAME_RELATED_P (insn) = 1;
3785 else /* action == SORR_RESTORE */
3786 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3787 offset += 8;
3791 else
3793 for (i = low; i < high; i += 2)
3795 bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
3796 bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
3797 enum machine_mode mode;
3798 int regno;
3800 if (reg0 && reg1)
3802 mode = i < 32 ? DImode : DFmode;
3803 regno = i;
3805 else if (reg0)
3807 mode = i < 32 ? SImode : SFmode;
3808 regno = i;
3810 else if (reg1)
3812 mode = i < 32 ? SImode : SFmode;
3813 regno = i + 1;
3814 offset += 4;
3816 else
3817 continue;
3819 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3820 set_mem_alias_set (mem, sparc_sr_alias_set);
3821 if (action == SORR_SAVE)
3823 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3824 RTX_FRAME_RELATED_P (insn) = 1;
3826 else /* action == SORR_RESTORE */
3827 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3829 /* Always preserve double-word alignment. */
3830 offset = (offset + 7) & -8;
3834 return offset;
3837 /* Emit code to save call-saved registers. */
3839 static void
3840 emit_save_or_restore_regs (int action)
3842 HOST_WIDE_INT offset;
3843 rtx base;
3845 offset = frame_base_offset - apparent_fsize;
3847 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3849 /* ??? This might be optimized a little as %g1 might already have a
3850 value close enough that a single add insn will do. */
3851 /* ??? Although, all of this is probably only a temporary fix
3852 because if %g1 can hold a function result, then
3853 sparc_expand_epilogue will lose (the result will be
3854 clobbered). */
3855 base = gen_rtx_REG (Pmode, 1);
3856 emit_move_insn (base, GEN_INT (offset));
3857 emit_insn (gen_rtx_SET (VOIDmode,
3858 base,
3859 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3860 offset = 0;
3862 else
3863 base = frame_base_reg;
3865 offset = save_or_restore_regs (0, 8, base, offset, action);
3866 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3869 /* Generate a save_register_window insn. */
3871 static rtx
3872 gen_save_register_window (rtx increment)
3874 if (TARGET_ARCH64)
3875 return gen_save_register_windowdi (increment);
3876 else
3877 return gen_save_register_windowsi (increment);
3880 /* Generate an increment for the stack pointer. */
3882 static rtx
3883 gen_stack_pointer_inc (rtx increment)
3885 return gen_rtx_SET (VOIDmode,
3886 stack_pointer_rtx,
3887 gen_rtx_PLUS (Pmode,
3888 stack_pointer_rtx,
3889 increment));
3892 /* Generate a decrement for the stack pointer. */
3894 static rtx
3895 gen_stack_pointer_dec (rtx decrement)
3897 return gen_rtx_SET (VOIDmode,
3898 stack_pointer_rtx,
3899 gen_rtx_MINUS (Pmode,
3900 stack_pointer_rtx,
3901 decrement));
3904 /* Expand the function prologue. The prologue is responsible for reserving
3905 storage for the frame, saving the call-saved registers and loading the
3906 PIC register if needed. */
3908 void
3909 sparc_expand_prologue (void)
3911 rtx insn;
3912 int i;
3914 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
3915 on the final value of the flag means deferring the prologue/epilogue
3916 expansion until just before the second scheduling pass, which is too
3917 late to emit multiple epilogues or return insns.
3919 Of course we are making the assumption that the value of the flag
3920 will not change between now and its final value. Of the three parts
3921 of the formula, only the last one can reasonably vary. Let's take a
3922 closer look, after assuming that the first two ones are set to true
3923 (otherwise the last value is effectively silenced).
3925 If only_leaf_regs_used returns false, the global predicate will also
3926 be false so the actual frame size calculated below will be positive.
3927 As a consequence, the save_register_window insn will be emitted in
3928 the instruction stream; now this insn explicitly references %fp
3929 which is not a leaf register so only_leaf_regs_used will always
3930 return false subsequently.
3932 If only_leaf_regs_used returns true, we hope that the subsequent
3933 optimization passes won't cause non-leaf registers to pop up. For
3934 example, the regrename pass has special provisions to not rename to
3935 non-leaf registers in a leaf function. */
3936 sparc_leaf_function_p
3937 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
3939 /* Need to use actual_fsize, since we are also allocating
3940 space for our callee (and our own register save area). */
3941 actual_fsize
3942 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
3944 /* Advertise that the data calculated just above are now valid. */
3945 sparc_prologue_data_valid_p = true;
3947 if (sparc_leaf_function_p)
3949 frame_base_reg = stack_pointer_rtx;
3950 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
3952 else
3954 frame_base_reg = hard_frame_pointer_rtx;
3955 frame_base_offset = SPARC_STACK_BIAS;
3958 if (actual_fsize == 0)
3959 /* do nothing. */ ;
3960 else if (sparc_leaf_function_p)
3962 if (actual_fsize <= 4096)
3963 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
3964 else if (actual_fsize <= 8192)
3966 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
3967 /* %sp is still the CFA register. */
3968 RTX_FRAME_RELATED_P (insn) = 1;
3969 insn
3970 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3972 else
3974 rtx reg = gen_rtx_REG (Pmode, 1);
3975 emit_move_insn (reg, GEN_INT (-actual_fsize));
3976 insn = emit_insn (gen_stack_pointer_inc (reg));
3977 REG_NOTES (insn) =
3978 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3979 gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
3980 REG_NOTES (insn));
3983 RTX_FRAME_RELATED_P (insn) = 1;
3985 else
3987 if (actual_fsize <= 4096)
3988 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
3989 else if (actual_fsize <= 8192)
3991 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
3992 /* %sp is not the CFA register anymore. */
3993 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3995 else
3997 rtx reg = gen_rtx_REG (Pmode, 1);
3998 emit_move_insn (reg, GEN_INT (-actual_fsize));
3999 insn = emit_insn (gen_save_register_window (reg));
4002 RTX_FRAME_RELATED_P (insn) = 1;
4003 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4004 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4007 if (num_gfregs)
4008 emit_save_or_restore_regs (SORR_SAVE);
4010 /* Load the PIC register if needed. */
4011 if (flag_pic && current_function_uses_pic_offset_table)
4012 load_pic_register (false);
4015 /* This function generates the assembly code for function entry, which boils
4016 down to emitting the necessary .register directives. */
4018 static void
4019 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4021 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4022 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4024 sparc_output_scratch_registers (file);
4027 /* Expand the function epilogue, either normal or part of a sibcall.
4028 We emit all the instructions except the return or the call. */
4030 void
4031 sparc_expand_epilogue (void)
4033 if (num_gfregs)
4034 emit_save_or_restore_regs (SORR_RESTORE);
4036 if (actual_fsize == 0)
4037 /* do nothing. */ ;
4038 else if (sparc_leaf_function_p)
4040 if (actual_fsize <= 4096)
4041 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4042 else if (actual_fsize <= 8192)
4044 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4045 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4047 else
4049 rtx reg = gen_rtx_REG (Pmode, 1);
4050 emit_move_insn (reg, GEN_INT (-actual_fsize));
4051 emit_insn (gen_stack_pointer_dec (reg));
4056 /* Return true if it is appropriate to emit `return' instructions in the
4057 body of a function. */
4059 bool
4060 sparc_can_use_return_insn_p (void)
4062 return sparc_prologue_data_valid_p
4063 && (actual_fsize == 0 || !sparc_leaf_function_p);
4066 /* This function generates the assembly code for function exit. */
4068 static void
4069 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4071 /* If code does not drop into the epilogue, we have to still output
4072 a dummy nop for the sake of sane backtraces. Otherwise, if the
4073 last two instructions of a function were "call foo; dslot;" this
4074 can make the return PC of foo (i.e. address of call instruction
4075 plus 8) point to the first instruction in the next function. */
4077 rtx insn, last_real_insn;
4079 insn = get_last_insn ();
4081 last_real_insn = prev_real_insn (insn);
4082 if (last_real_insn
4083 && GET_CODE (last_real_insn) == INSN
4084 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4085 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4087 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4088 fputs("\tnop\n", file);
4090 sparc_output_deferred_case_vectors ();
4093 /* Output a 'restore' instruction. */
4095 static void
4096 output_restore (rtx pat)
4098 rtx operands[3];
4100 if (! pat)
4102 fputs ("\t restore\n", asm_out_file);
4103 return;
4106 gcc_assert (GET_CODE (pat) == SET);
4108 operands[0] = SET_DEST (pat);
4109 pat = SET_SRC (pat);
4111 switch (GET_CODE (pat))
4113 case PLUS:
4114 operands[1] = XEXP (pat, 0);
4115 operands[2] = XEXP (pat, 1);
4116 output_asm_insn (" restore %r1, %2, %Y0", operands);
4117 break;
4118 case LO_SUM:
4119 operands[1] = XEXP (pat, 0);
4120 operands[2] = XEXP (pat, 1);
4121 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4122 break;
4123 case ASHIFT:
4124 operands[1] = XEXP (pat, 0);
4125 gcc_assert (XEXP (pat, 1) == const1_rtx);
4126 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4127 break;
4128 default:
4129 operands[1] = pat;
4130 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4131 break;
4135 /* Output a return. */
4137 const char *
4138 output_return (rtx insn)
4140 if (sparc_leaf_function_p)
4142 /* This is a leaf function so we don't have to bother restoring the
4143 register window, which frees us from dealing with the convoluted
4144 semantics of restore/return. We simply output the jump to the
4145 return address and the insn in the delay slot (if any). */
4147 gcc_assert (! current_function_calls_eh_return);
4149 return "jmp\t%%o7+%)%#";
4151 else
4153 /* This is a regular function so we have to restore the register window.
4154 We may have a pending insn for the delay slot, which will be either
4155 combined with the 'restore' instruction or put in the delay slot of
4156 the 'return' instruction. */
4158 if (current_function_calls_eh_return)
4160 /* If the function uses __builtin_eh_return, the eh_return
4161 machinery occupies the delay slot. */
4162 gcc_assert (! final_sequence);
4164 if (! flag_delayed_branch)
4165 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4167 if (TARGET_V9)
4168 fputs ("\treturn\t%i7+8\n", asm_out_file);
4169 else
4170 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4172 if (flag_delayed_branch)
4173 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4174 else
4175 fputs ("\t nop\n", asm_out_file);
4177 else if (final_sequence)
4179 rtx delay, pat;
4181 delay = NEXT_INSN (insn);
4182 gcc_assert (delay);
4184 pat = PATTERN (delay);
4186 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4188 epilogue_renumber (&pat, 0);
4189 return "return\t%%i7+%)%#";
4191 else
4193 output_asm_insn ("jmp\t%%i7+%)", NULL);
4194 output_restore (pat);
4195 PATTERN (delay) = gen_blockage ();
4196 INSN_CODE (delay) = -1;
4199 else
4201 /* The delay slot is empty. */
4202 if (TARGET_V9)
4203 return "return\t%%i7+%)\n\t nop";
4204 else if (flag_delayed_branch)
4205 return "jmp\t%%i7+%)\n\t restore";
4206 else
4207 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4211 return "";
4214 /* Output a sibling call. */
4216 const char *
4217 output_sibcall (rtx insn, rtx call_operand)
4219 rtx operands[1];
4221 gcc_assert (flag_delayed_branch);
4223 operands[0] = call_operand;
4225 if (sparc_leaf_function_p)
4227 /* This is a leaf function so we don't have to bother restoring the
4228 register window. We simply output the jump to the function and
4229 the insn in the delay slot (if any). */
4231 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4233 if (final_sequence)
4234 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4235 operands);
4236 else
4237 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4238 it into branch if possible. */
4239 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4240 operands);
4242 else
4244 /* This is a regular function so we have to restore the register window.
4245 We may have a pending insn for the delay slot, which will be combined
4246 with the 'restore' instruction. */
4248 output_asm_insn ("call\t%a0, 0", operands);
4250 if (final_sequence)
4252 rtx delay = NEXT_INSN (insn);
4253 gcc_assert (delay);
4255 output_restore (PATTERN (delay));
4257 PATTERN (delay) = gen_blockage ();
4258 INSN_CODE (delay) = -1;
4260 else
4261 output_restore (NULL_RTX);
4264 return "";
4267 /* Functions for handling argument passing.
4269 For 32-bit, the first 6 args are normally in registers and the rest are
4270 pushed. Any arg that starts within the first 6 words is at least
4271 partially passed in a register unless its data type forbids.
4273 For 64-bit, the argument registers are laid out as an array of 16 elements
4274 and arguments are added sequentially. The first 6 int args and up to the
4275 first 16 fp args (depending on size) are passed in regs.
4277 Slot Stack Integral Float Float in structure Double Long Double
4278 ---- ----- -------- ----- ------------------ ------ -----------
4279 15 [SP+248] %f31 %f30,%f31 %d30
4280 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4281 13 [SP+232] %f27 %f26,%f27 %d26
4282 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4283 11 [SP+216] %f23 %f22,%f23 %d22
4284 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4285 9 [SP+200] %f19 %f18,%f19 %d18
4286 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4287 7 [SP+184] %f15 %f14,%f15 %d14
4288 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4289 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4290 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4291 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4292 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4293 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4294 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4296 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4298 Integral arguments are always passed as 64-bit quantities appropriately
4299 extended.
4301 Passing of floating point values is handled as follows.
4302 If a prototype is in scope:
4303 If the value is in a named argument (i.e. not a stdarg function or a
4304 value not part of the `...') then the value is passed in the appropriate
4305 fp reg.
4306 If the value is part of the `...' and is passed in one of the first 6
4307 slots then the value is passed in the appropriate int reg.
4308 If the value is part of the `...' and is not passed in one of the first 6
4309 slots then the value is passed in memory.
4310 If a prototype is not in scope:
4311 If the value is one of the first 6 arguments the value is passed in the
4312 appropriate integer reg and the appropriate fp reg.
4313 If the value is not one of the first 6 arguments the value is passed in
4314 the appropriate fp reg and in memory.
4317 Summary of the calling conventions implemented by GCC on SPARC:
4319 32-bit ABI:
4320 size argument return value
4322 small integer <4 int. reg. int. reg.
4323 word 4 int. reg. int. reg.
4324 double word 8 int. reg. int. reg.
4326 _Complex small integer <8 int. reg. int. reg.
4327 _Complex word 8 int. reg. int. reg.
4328 _Complex double word 16 memory int. reg.
4330 vector integer <=8 int. reg. FP reg.
4331 vector integer >8 memory memory
4333 float 4 int. reg. FP reg.
4334 double 8 int. reg. FP reg.
4335 long double 16 memory memory
4337 _Complex float 8 memory FP reg.
4338 _Complex double 16 memory FP reg.
4339 _Complex long double 32 memory FP reg.
4341 vector float any memory memory
4343 aggregate any memory memory
4347 64-bit ABI:
4348 size argument return value
4350 small integer <8 int. reg. int. reg.
4351 word 8 int. reg. int. reg.
4352 double word 16 int. reg. int. reg.
4354 _Complex small integer <16 int. reg. int. reg.
4355 _Complex word 16 int. reg. int. reg.
4356 _Complex double word 32 memory int. reg.
4358 vector integer <=16 FP reg. FP reg.
4359 vector integer 16<s<=32 memory FP reg.
4360 vector integer >32 memory memory
4362 float 4 FP reg. FP reg.
4363 double 8 FP reg. FP reg.
4364 long double 16 FP reg. FP reg.
4366 _Complex float 8 FP reg. FP reg.
4367 _Complex double 16 FP reg. FP reg.
4368 _Complex long double 32 memory FP reg.
4370 vector float <=16 FP reg. FP reg.
4371 vector float 16<s<=32 memory FP reg.
4372 vector float >32 memory memory
4374 aggregate <=16 reg. reg.
4375 aggregate 16<s<=32 memory reg.
4376 aggregate >32 memory memory
4380 Note #1: complex floating-point types follow the extended SPARC ABIs as
4381 implemented by the Sun compiler.
4383 Note #2: integral vector types follow the scalar floating-point types
4384 conventions to match what is implemented by the Sun VIS SDK.
4386 Note #3: floating-point vector types follow the aggregate types
4387 conventions. */
4390 /* Maximum number of int regs for args. */
4391 #define SPARC_INT_ARG_MAX 6
4392 /* Maximum number of fp regs for args. */
4393 #define SPARC_FP_ARG_MAX 16
4395 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4397 /* Handle the INIT_CUMULATIVE_ARGS macro.
4398 Initialize a variable CUM of type CUMULATIVE_ARGS
4399 for a call to a function whose data type is FNTYPE.
4400 For a library call, FNTYPE is 0. */
4402 void
4403 init_cumulative_args (struct sparc_args *cum, tree fntype,
4404 rtx libname ATTRIBUTE_UNUSED,
4405 tree fndecl ATTRIBUTE_UNUSED)
4407 cum->words = 0;
4408 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4409 cum->libcall_p = fntype == 0;
4412 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4413 When a prototype says `char' or `short', really pass an `int'. */
4415 static bool
4416 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
4418 return TARGET_ARCH32 ? true : false;
4421 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4423 static bool
4424 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4426 return TARGET_ARCH64 ? true : false;
4429 /* Scan the record type TYPE and return the following predicates:
4430 - INTREGS_P: the record contains at least one field or sub-field
4431 that is eligible for promotion in integer registers.
4432 - FP_REGS_P: the record contains at least one field or sub-field
4433 that is eligible for promotion in floating-point registers.
4434 - PACKED_P: the record contains at least one field that is packed.
4436 Sub-fields are not taken into account for the PACKED_P predicate. */
4438 static void
4439 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4441 tree field;
4443 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4445 if (TREE_CODE (field) == FIELD_DECL)
4447 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4448 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4449 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4450 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4451 && TARGET_FPU)
4452 *fpregs_p = 1;
4453 else
4454 *intregs_p = 1;
4456 if (packed_p && DECL_PACKED (field))
4457 *packed_p = 1;
4462 /* Compute the slot number to pass an argument in.
4463 Return the slot number or -1 if passing on the stack.
4465 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4466 the preceding args and about the function being called.
4467 MODE is the argument's machine mode.
4468 TYPE is the data type of the argument (as a tree).
4469 This is null for libcalls where that information may
4470 not be available.
4471 NAMED is nonzero if this argument is a named parameter
4472 (otherwise it is an extra parameter matching an ellipsis).
4473 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4474 *PREGNO records the register number to use if scalar type.
4475 *PPADDING records the amount of padding needed in words. */
4477 static int
4478 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4479 tree type, int named, int incoming_p,
4480 int *pregno, int *ppadding)
4482 int regbase = (incoming_p
4483 ? SPARC_INCOMING_INT_ARG_FIRST
4484 : SPARC_OUTGOING_INT_ARG_FIRST);
4485 int slotno = cum->words;
4486 enum mode_class mclass;
4487 int regno;
4489 *ppadding = 0;
4491 if (type && TREE_ADDRESSABLE (type))
4492 return -1;
4494 if (TARGET_ARCH32
4495 && mode == BLKmode
4496 && type
4497 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4498 return -1;
4500 /* For SPARC64, objects requiring 16-byte alignment get it. */
4501 if (TARGET_ARCH64
4502 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4503 && (slotno & 1) != 0)
4504 slotno++, *ppadding = 1;
4506 mclass = GET_MODE_CLASS (mode);
4507 if (type && TREE_CODE (type) == VECTOR_TYPE)
4509 /* Vector types deserve special treatment because they are
4510 polymorphic wrt their mode, depending upon whether VIS
4511 instructions are enabled. */
4512 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4514 /* The SPARC port defines no floating-point vector modes. */
4515 gcc_assert (mode == BLKmode);
4517 else
4519 /* Integral vector types should either have a vector
4520 mode or an integral mode, because we are guaranteed
4521 by pass_by_reference that their size is not greater
4522 than 16 bytes and TImode is 16-byte wide. */
4523 gcc_assert (mode != BLKmode);
4525 /* Vector integers are handled like floats according to
4526 the Sun VIS SDK. */
4527 mclass = MODE_FLOAT;
4531 switch (mclass)
4533 case MODE_FLOAT:
4534 case MODE_COMPLEX_FLOAT:
4535 if (TARGET_ARCH64 && TARGET_FPU && named)
4537 if (slotno >= SPARC_FP_ARG_MAX)
4538 return -1;
4539 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4540 /* Arguments filling only one single FP register are
4541 right-justified in the outer double FP register. */
4542 if (GET_MODE_SIZE (mode) <= 4)
4543 regno++;
4544 break;
4546 /* fallthrough */
4548 case MODE_INT:
4549 case MODE_COMPLEX_INT:
4550 if (slotno >= SPARC_INT_ARG_MAX)
4551 return -1;
4552 regno = regbase + slotno;
4553 break;
4555 case MODE_RANDOM:
4556 if (mode == VOIDmode)
4557 /* MODE is VOIDmode when generating the actual call. */
4558 return -1;
4560 gcc_assert (mode == BLKmode);
4562 if (TARGET_ARCH32 || !type || (TREE_CODE (type) == UNION_TYPE))
4564 if (slotno >= SPARC_INT_ARG_MAX)
4565 return -1;
4566 regno = regbase + slotno;
4568 else /* TARGET_ARCH64 && type */
4570 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4572 /* First see what kinds of registers we would need. */
4573 if (TREE_CODE (type) == VECTOR_TYPE)
4574 fpregs_p = 1;
4575 else
4576 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4578 /* The ABI obviously doesn't specify how packed structures
4579 are passed. These are defined to be passed in int regs
4580 if possible, otherwise memory. */
4581 if (packed_p || !named)
4582 fpregs_p = 0, intregs_p = 1;
4584 /* If all arg slots are filled, then must pass on stack. */
4585 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4586 return -1;
4588 /* If there are only int args and all int arg slots are filled,
4589 then must pass on stack. */
4590 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4591 return -1;
4593 /* Note that even if all int arg slots are filled, fp members may
4594 still be passed in regs if such regs are available.
4595 *PREGNO isn't set because there may be more than one, it's up
4596 to the caller to compute them. */
4597 return slotno;
4599 break;
4601 default :
4602 gcc_unreachable ();
4605 *pregno = regno;
4606 return slotno;
4609 /* Handle recursive register counting for structure field layout. */
4611 struct function_arg_record_value_parms
4613 rtx ret; /* return expression being built. */
4614 int slotno; /* slot number of the argument. */
4615 int named; /* whether the argument is named. */
4616 int regbase; /* regno of the base register. */
4617 int stack; /* 1 if part of the argument is on the stack. */
4618 int intoffset; /* offset of the first pending integer field. */
4619 unsigned int nregs; /* number of words passed in registers. */
4622 static void function_arg_record_value_3
4623 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4624 static void function_arg_record_value_2
4625 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4626 static void function_arg_record_value_1
4627 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4628 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
4629 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4631 /* A subroutine of function_arg_record_value. Traverse the structure
4632 recursively and determine how many registers will be required. */
4634 static void
4635 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
4636 struct function_arg_record_value_parms *parms,
4637 bool packed_p)
4639 tree field;
4641 /* We need to compute how many registers are needed so we can
4642 allocate the PARALLEL but before we can do that we need to know
4643 whether there are any packed fields. The ABI obviously doesn't
4644 specify how structures are passed in this case, so they are
4645 defined to be passed in int regs if possible, otherwise memory,
4646 regardless of whether there are fp values present. */
4648 if (! packed_p)
4649 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4651 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4653 packed_p = true;
4654 break;
4658 /* Compute how many registers we need. */
4659 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4661 if (TREE_CODE (field) == FIELD_DECL)
4663 HOST_WIDE_INT bitpos = startbitpos;
4665 if (DECL_SIZE (field) != 0)
4667 if (integer_zerop (DECL_SIZE (field)))
4668 continue;
4670 if (host_integerp (bit_position (field), 1))
4671 bitpos += int_bit_position (field);
4674 /* ??? FIXME: else assume zero offset. */
4676 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4677 function_arg_record_value_1 (TREE_TYPE (field),
4678 bitpos,
4679 parms,
4680 packed_p);
4681 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4682 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4683 && TARGET_FPU
4684 && parms->named
4685 && ! packed_p)
4687 if (parms->intoffset != -1)
4689 unsigned int startbit, endbit;
4690 int intslots, this_slotno;
4692 startbit = parms->intoffset & -BITS_PER_WORD;
4693 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4695 intslots = (endbit - startbit) / BITS_PER_WORD;
4696 this_slotno = parms->slotno + parms->intoffset
4697 / BITS_PER_WORD;
4699 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4701 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4702 /* We need to pass this field on the stack. */
4703 parms->stack = 1;
4706 parms->nregs += intslots;
4707 parms->intoffset = -1;
4710 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4711 If it wasn't true we wouldn't be here. */
4712 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4713 && DECL_MODE (field) == BLKmode)
4714 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4715 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4716 parms->nregs += 2;
4717 else
4718 parms->nregs += 1;
4720 else
4722 if (parms->intoffset == -1)
4723 parms->intoffset = bitpos;
4729 /* A subroutine of function_arg_record_value. Assign the bits of the
4730 structure between parms->intoffset and bitpos to integer registers. */
4732 static void
4733 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4734 struct function_arg_record_value_parms *parms)
4736 enum machine_mode mode;
4737 unsigned int regno;
4738 unsigned int startbit, endbit;
4739 int this_slotno, intslots, intoffset;
4740 rtx reg;
4742 if (parms->intoffset == -1)
4743 return;
4745 intoffset = parms->intoffset;
4746 parms->intoffset = -1;
4748 startbit = intoffset & -BITS_PER_WORD;
4749 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4750 intslots = (endbit - startbit) / BITS_PER_WORD;
4751 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4753 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4754 if (intslots <= 0)
4755 return;
4757 /* If this is the trailing part of a word, only load that much into
4758 the register. Otherwise load the whole register. Note that in
4759 the latter case we may pick up unwanted bits. It's not a problem
4760 at the moment but may wish to revisit. */
4762 if (intoffset % BITS_PER_WORD != 0)
4763 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4764 MODE_INT);
4765 else
4766 mode = word_mode;
4768 intoffset /= BITS_PER_UNIT;
4771 regno = parms->regbase + this_slotno;
4772 reg = gen_rtx_REG (mode, regno);
4773 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4774 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4776 this_slotno += 1;
4777 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4778 mode = word_mode;
4779 parms->nregs += 1;
4780 intslots -= 1;
4782 while (intslots > 0);
4785 /* A subroutine of function_arg_record_value. Traverse the structure
4786 recursively and assign bits to floating point registers. Track which
4787 bits in between need integer registers; invoke function_arg_record_value_3
4788 to make that happen. */
4790 static void
4791 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
4792 struct function_arg_record_value_parms *parms,
4793 bool packed_p)
4795 tree field;
4797 if (! packed_p)
4798 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4800 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4802 packed_p = true;
4803 break;
4807 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4809 if (TREE_CODE (field) == FIELD_DECL)
4811 HOST_WIDE_INT bitpos = startbitpos;
4813 if (DECL_SIZE (field) != 0)
4815 if (integer_zerop (DECL_SIZE (field)))
4816 continue;
4818 if (host_integerp (bit_position (field), 1))
4819 bitpos += int_bit_position (field);
4822 /* ??? FIXME: else assume zero offset. */
4824 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4825 function_arg_record_value_2 (TREE_TYPE (field),
4826 bitpos,
4827 parms,
4828 packed_p);
4829 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4830 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4831 && TARGET_FPU
4832 && parms->named
4833 && ! packed_p)
4835 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4836 int regno, nregs, pos;
4837 enum machine_mode mode = DECL_MODE (field);
4838 rtx reg;
4840 function_arg_record_value_3 (bitpos, parms);
4842 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4843 && mode == BLKmode)
4845 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4846 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4848 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4850 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4851 nregs = 2;
4853 else
4854 nregs = 1;
4856 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4857 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4858 regno++;
4859 reg = gen_rtx_REG (mode, regno);
4860 pos = bitpos / BITS_PER_UNIT;
4861 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4862 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4863 parms->nregs += 1;
4864 while (--nregs > 0)
4866 regno += GET_MODE_SIZE (mode) / 4;
4867 reg = gen_rtx_REG (mode, regno);
4868 pos += GET_MODE_SIZE (mode);
4869 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4870 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4871 parms->nregs += 1;
4874 else
4876 if (parms->intoffset == -1)
4877 parms->intoffset = bitpos;
4883 /* Used by function_arg and function_value to implement the complex
4884 conventions of the 64-bit ABI for passing and returning structures.
4885 Return an expression valid as a return value for the two macros
4886 FUNCTION_ARG and FUNCTION_VALUE.
4888 TYPE is the data type of the argument (as a tree).
4889 This is null for libcalls where that information may
4890 not be available.
4891 MODE is the argument's machine mode.
4892 SLOTNO is the index number of the argument's slot in the parameter array.
4893 NAMED is nonzero if this argument is a named parameter
4894 (otherwise it is an extra parameter matching an ellipsis).
4895 REGBASE is the regno of the base register for the parameter array. */
4897 static rtx
4898 function_arg_record_value (tree type, enum machine_mode mode,
4899 int slotno, int named, int regbase)
4901 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4902 struct function_arg_record_value_parms parms;
4903 unsigned int nregs;
4905 parms.ret = NULL_RTX;
4906 parms.slotno = slotno;
4907 parms.named = named;
4908 parms.regbase = regbase;
4909 parms.stack = 0;
4911 /* Compute how many registers we need. */
4912 parms.nregs = 0;
4913 parms.intoffset = 0;
4914 function_arg_record_value_1 (type, 0, &parms, false);
4916 /* Take into account pending integer fields. */
4917 if (parms.intoffset != -1)
4919 unsigned int startbit, endbit;
4920 int intslots, this_slotno;
4922 startbit = parms.intoffset & -BITS_PER_WORD;
4923 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4924 intslots = (endbit - startbit) / BITS_PER_WORD;
4925 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4927 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4929 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4930 /* We need to pass this field on the stack. */
4931 parms.stack = 1;
4934 parms.nregs += intslots;
4936 nregs = parms.nregs;
4938 /* Allocate the vector and handle some annoying special cases. */
4939 if (nregs == 0)
4941 /* ??? Empty structure has no value? Duh? */
4942 if (typesize <= 0)
4944 /* Though there's nothing really to store, return a word register
4945 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
4946 leads to breakage due to the fact that there are zero bytes to
4947 load. */
4948 return gen_rtx_REG (mode, regbase);
4950 else
4952 /* ??? C++ has structures with no fields, and yet a size. Give up
4953 for now and pass everything back in integer registers. */
4954 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4956 if (nregs + slotno > SPARC_INT_ARG_MAX)
4957 nregs = SPARC_INT_ARG_MAX - slotno;
4959 gcc_assert (nregs != 0);
4961 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
4963 /* If at least one field must be passed on the stack, generate
4964 (parallel [(expr_list (nil) ...) ...]) so that all fields will
4965 also be passed on the stack. We can't do much better because the
4966 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
4967 of structures for which the fields passed exclusively in registers
4968 are not at the beginning of the structure. */
4969 if (parms.stack)
4970 XVECEXP (parms.ret, 0, 0)
4971 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
4973 /* Fill in the entries. */
4974 parms.nregs = 0;
4975 parms.intoffset = 0;
4976 function_arg_record_value_2 (type, 0, &parms, false);
4977 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
4979 gcc_assert (parms.nregs == nregs);
4981 return parms.ret;
4984 /* Used by function_arg and function_value to implement the conventions
4985 of the 64-bit ABI for passing and returning unions.
4986 Return an expression valid as a return value for the two macros
4987 FUNCTION_ARG and FUNCTION_VALUE.
4989 SIZE is the size in bytes of the union.
4990 MODE is the argument's machine mode.
4991 REGNO is the hard register the union will be passed in. */
4993 static rtx
4994 function_arg_union_value (int size, enum machine_mode mode, int slotno,
4995 int regno)
4997 int nwords = ROUND_ADVANCE (size), i;
4998 rtx regs;
5000 /* See comment in previous function for empty structures. */
5001 if (nwords == 0)
5002 return gen_rtx_REG (mode, regno);
5004 if (slotno == SPARC_INT_ARG_MAX - 1)
5005 nwords = 1;
5007 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5009 for (i = 0; i < nwords; i++)
5011 /* Unions are passed left-justified. */
5012 XVECEXP (regs, 0, i)
5013 = gen_rtx_EXPR_LIST (VOIDmode,
5014 gen_rtx_REG (word_mode, regno),
5015 GEN_INT (UNITS_PER_WORD * i));
5016 regno++;
5019 return regs;
5022 /* Used by function_arg and function_value to implement the conventions
5023 for passing and returning large (BLKmode) vectors.
5024 Return an expression valid as a return value for the two macros
5025 FUNCTION_ARG and FUNCTION_VALUE.
5027 SIZE is the size in bytes of the vector.
5028 BASE_MODE is the argument's base machine mode.
5029 REGNO is the FP hard register the vector will be passed in. */
5031 static rtx
5032 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5034 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5035 int nregs = size / base_mode_size, i;
5036 rtx regs;
5038 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5040 for (i = 0; i < nregs; i++)
5042 XVECEXP (regs, 0, i)
5043 = gen_rtx_EXPR_LIST (VOIDmode,
5044 gen_rtx_REG (base_mode, regno),
5045 GEN_INT (base_mode_size * i));
5046 regno += base_mode_size / 4;
5049 return regs;
5052 /* Handle the FUNCTION_ARG macro.
5053 Determine where to put an argument to a function.
5054 Value is zero to push the argument on the stack,
5055 or a hard register in which to store the argument.
5057 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5058 the preceding args and about the function being called.
5059 MODE is the argument's machine mode.
5060 TYPE is the data type of the argument (as a tree).
5061 This is null for libcalls where that information may
5062 not be available.
5063 NAMED is nonzero if this argument is a named parameter
5064 (otherwise it is an extra parameter matching an ellipsis).
5065 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5068 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5069 tree type, int named, int incoming_p)
5071 int regbase = (incoming_p
5072 ? SPARC_INCOMING_INT_ARG_FIRST
5073 : SPARC_OUTGOING_INT_ARG_FIRST);
5074 int slotno, regno, padding;
5075 enum mode_class mclass = GET_MODE_CLASS (mode);
5076 rtx reg;
5078 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5079 &regno, &padding);
5081 if (slotno == -1)
5082 return 0;
5084 if (TARGET_ARCH32)
5086 reg = gen_rtx_REG (mode, regno);
5087 return reg;
5090 if (type && TREE_CODE (type) == RECORD_TYPE)
5092 /* Structures up to 16 bytes in size are passed in arg slots on the
5093 stack and are promoted to registers where possible. */
5095 gcc_assert (int_size_in_bytes (type) <= 16);
5097 return function_arg_record_value (type, mode, slotno, named, regbase);
5099 else if (type && TREE_CODE (type) == UNION_TYPE)
5101 HOST_WIDE_INT size = int_size_in_bytes (type);
5103 gcc_assert (size <= 16);
5105 return function_arg_union_value (size, mode, slotno, regno);
5107 else if (type && TREE_CODE (type) == VECTOR_TYPE)
5109 /* Vector types deserve special treatment because they are
5110 polymorphic wrt their mode, depending upon whether VIS
5111 instructions are enabled. */
5112 HOST_WIDE_INT size = int_size_in_bytes (type);
5114 gcc_assert (size <= 16);
5116 if (mode == BLKmode)
5117 return function_arg_vector_value (size,
5118 TYPE_MODE (TREE_TYPE (type)),
5119 SPARC_FP_ARG_FIRST + 2*slotno);
5120 else
5121 mclass = MODE_FLOAT;
5124 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5125 but also have the slot allocated for them.
5126 If no prototype is in scope fp values in register slots get passed
5127 in two places, either fp regs and int regs or fp regs and memory. */
5128 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5129 && SPARC_FP_REG_P (regno))
5131 reg = gen_rtx_REG (mode, regno);
5132 if (cum->prototype_p || cum->libcall_p)
5134 /* "* 2" because fp reg numbers are recorded in 4 byte
5135 quantities. */
5136 #if 0
5137 /* ??? This will cause the value to be passed in the fp reg and
5138 in the stack. When a prototype exists we want to pass the
5139 value in the reg but reserve space on the stack. That's an
5140 optimization, and is deferred [for a bit]. */
5141 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5142 return gen_rtx_PARALLEL (mode,
5143 gen_rtvec (2,
5144 gen_rtx_EXPR_LIST (VOIDmode,
5145 NULL_RTX, const0_rtx),
5146 gen_rtx_EXPR_LIST (VOIDmode,
5147 reg, const0_rtx)));
5148 else
5149 #else
5150 /* ??? It seems that passing back a register even when past
5151 the area declared by REG_PARM_STACK_SPACE will allocate
5152 space appropriately, and will not copy the data onto the
5153 stack, exactly as we desire.
5155 This is due to locate_and_pad_parm being called in
5156 expand_call whenever reg_parm_stack_space > 0, which
5157 while beneficial to our example here, would seem to be
5158 in error from what had been intended. Ho hum... -- r~ */
5159 #endif
5160 return reg;
5162 else
5164 rtx v0, v1;
5166 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5168 int intreg;
5170 /* On incoming, we don't need to know that the value
5171 is passed in %f0 and %i0, and it confuses other parts
5172 causing needless spillage even on the simplest cases. */
5173 if (incoming_p)
5174 return reg;
5176 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5177 + (regno - SPARC_FP_ARG_FIRST) / 2);
5179 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5180 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5181 const0_rtx);
5182 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5184 else
5186 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5187 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5188 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5192 else
5194 /* Scalar or complex int. */
5195 reg = gen_rtx_REG (mode, regno);
5198 return reg;
5201 /* For an arg passed partly in registers and partly in memory,
5202 this is the number of bytes of registers used.
5203 For args passed entirely in registers or entirely in memory, zero.
5205 Any arg that starts in the first 6 regs but won't entirely fit in them
5206 needs partial registers on v8. On v9, structures with integer
5207 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5208 values that begin in the last fp reg [where "last fp reg" varies with the
5209 mode] will be split between that reg and memory. */
5211 static int
5212 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5213 tree type, bool named)
5215 int slotno, regno, padding;
5217 /* We pass 0 for incoming_p here, it doesn't matter. */
5218 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5220 if (slotno == -1)
5221 return 0;
5223 if (TARGET_ARCH32)
5225 if ((slotno + (mode == BLKmode
5226 ? ROUND_ADVANCE (int_size_in_bytes (type))
5227 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5228 > SPARC_INT_ARG_MAX)
5229 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5231 else
5233 /* We are guaranteed by pass_by_reference that the size of the
5234 argument is not greater than 16 bytes, so we only need to return
5235 one word if the argument is partially passed in registers. */
5237 if (type && AGGREGATE_TYPE_P (type))
5239 int size = int_size_in_bytes (type);
5241 if (size > UNITS_PER_WORD
5242 && slotno == SPARC_INT_ARG_MAX - 1)
5243 return UNITS_PER_WORD;
5245 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5246 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5247 && ! (TARGET_FPU && named)))
5249 /* The complex types are passed as packed types. */
5250 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5251 && slotno == SPARC_INT_ARG_MAX - 1)
5252 return UNITS_PER_WORD;
5254 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5256 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5257 > SPARC_FP_ARG_MAX)
5258 return UNITS_PER_WORD;
5262 return 0;
5265 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5266 Specify whether to pass the argument by reference. */
5268 static bool
5269 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5270 enum machine_mode mode, tree type,
5271 bool named ATTRIBUTE_UNUSED)
5273 if (TARGET_ARCH32)
5275 /* Original SPARC 32-bit ABI says that structures and unions,
5276 and quad-precision floats are passed by reference. For Pascal,
5277 also pass arrays by reference. All other base types are passed
5278 in registers.
5280 Extended ABI (as implemented by the Sun compiler) says that all
5281 complex floats are passed by reference. Pass complex integers
5282 in registers up to 8 bytes. More generally, enforce the 2-word
5283 cap for passing arguments in registers.
5285 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5286 integers are passed like floats of the same size, that is in
5287 registers up to 8 bytes. Pass all vector floats by reference
5288 like structure and unions. */
5289 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5290 || mode == SCmode
5291 /* Catch CDImode, TFmode, DCmode and TCmode. */
5292 || GET_MODE_SIZE (mode) > 8
5293 || (type
5294 && TREE_CODE (type) == VECTOR_TYPE
5295 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5297 else
5299 /* Original SPARC 64-bit ABI says that structures and unions
5300 smaller than 16 bytes are passed in registers, as well as
5301 all other base types. For Pascal, pass arrays by reference.
5303 Extended ABI (as implemented by the Sun compiler) says that
5304 complex floats are passed in registers up to 16 bytes. Pass
5305 all complex integers in registers up to 16 bytes. More generally,
5306 enforce the 2-word cap for passing arguments in registers.
5308 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5309 integers are passed like floats of the same size, that is in
5310 registers (up to 16 bytes). Pass all vector floats like structure
5311 and unions. */
5312 return ((type && TREE_CODE (type) == ARRAY_TYPE)
5313 || (type
5314 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5315 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5316 /* Catch CTImode and TCmode. */
5317 || GET_MODE_SIZE (mode) > 16);
5321 /* Handle the FUNCTION_ARG_ADVANCE macro.
5322 Update the data in CUM to advance over an argument
5323 of mode MODE and data type TYPE.
5324 TYPE is null for libcalls where that information may not be available. */
5326 void
5327 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5328 tree type, int named)
5330 int slotno, regno, padding;
5332 /* We pass 0 for incoming_p here, it doesn't matter. */
5333 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5335 /* If register required leading padding, add it. */
5336 if (slotno != -1)
5337 cum->words += padding;
5339 if (TARGET_ARCH32)
5341 cum->words += (mode != BLKmode
5342 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5343 : ROUND_ADVANCE (int_size_in_bytes (type)));
5345 else
5347 if (type && AGGREGATE_TYPE_P (type))
5349 int size = int_size_in_bytes (type);
5351 if (size <= 8)
5352 ++cum->words;
5353 else if (size <= 16)
5354 cum->words += 2;
5355 else /* passed by reference */
5356 ++cum->words;
5358 else
5360 cum->words += (mode != BLKmode
5361 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5362 : ROUND_ADVANCE (int_size_in_bytes (type)));
5367 /* Handle the FUNCTION_ARG_PADDING macro.
5368 For the 64 bit ABI structs are always stored left shifted in their
5369 argument slot. */
5371 enum direction
5372 function_arg_padding (enum machine_mode mode, tree type)
5374 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5375 return upward;
5377 /* Fall back to the default. */
5378 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5381 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5382 Specify whether to return the return value in memory. */
5384 static bool
5385 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5387 if (TARGET_ARCH32)
5388 /* Original SPARC 32-bit ABI says that structures and unions,
5389 and quad-precision floats are returned in memory. All other
5390 base types are returned in registers.
5392 Extended ABI (as implemented by the Sun compiler) says that
5393 all complex floats are returned in registers (8 FP registers
5394 at most for '_Complex long double'). Return all complex integers
5395 in registers (4 at most for '_Complex long long').
5397 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5398 integers are returned like floats of the same size, that is in
5399 registers up to 8 bytes and in memory otherwise. Return all
5400 vector floats in memory like structure and unions; note that
5401 they always have BLKmode like the latter. */
5402 return (TYPE_MODE (type) == BLKmode
5403 || TYPE_MODE (type) == TFmode
5404 || (TREE_CODE (type) == VECTOR_TYPE
5405 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5406 else
5407 /* Original SPARC 64-bit ABI says that structures and unions
5408 smaller than 32 bytes are returned in registers, as well as
5409 all other base types.
5411 Extended ABI (as implemented by the Sun compiler) says that all
5412 complex floats are returned in registers (8 FP registers at most
5413 for '_Complex long double'). Return all complex integers in
5414 registers (4 at most for '_Complex TItype').
5416 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5417 integers are returned like floats of the same size, that is in
5418 registers. Return all vector floats like structure and unions;
5419 note that they always have BLKmode like the latter. */
5420 return ((TYPE_MODE (type) == BLKmode
5421 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5424 /* Handle the TARGET_STRUCT_VALUE target hook.
5425 Return where to find the structure return value address. */
5427 static rtx
5428 sparc_struct_value_rtx (tree fndecl, int incoming)
5430 if (TARGET_ARCH64)
5431 return 0;
5432 else
5434 rtx mem;
5436 if (incoming)
5437 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5438 STRUCT_VALUE_OFFSET));
5439 else
5440 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5441 STRUCT_VALUE_OFFSET));
5443 /* Only follow the SPARC ABI for fixed-size structure returns.
5444 Variable size structure returns are handled per the normal
5445 procedures in GCC. This is enabled by -mstd-struct-return */
5446 if (incoming == 2
5447 && sparc_std_struct_return
5448 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5449 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5451 /* We must check and adjust the return address, as it is
5452 optional as to whether the return object is really
5453 provided. */
5454 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5455 rtx scratch = gen_reg_rtx (SImode);
5456 rtx endlab = gen_label_rtx ();
5458 /* Calculate the return object size */
5459 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5460 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5461 /* Construct a temporary return value */
5462 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5464 /* Implement SPARC 32-bit psABI callee returns struck checking
5465 requirements:
5467 Fetch the instruction where we will return to and see if
5468 it's an unimp instruction (the most significant 10 bits
5469 will be zero). */
5470 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5471 plus_constant (ret_rtx, 8)));
5472 /* Assume the size is valid and pre-adjust */
5473 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5474 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5475 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5476 /* Assign stack temp:
5477 Write the address of the memory pointed to by temp_val into
5478 the memory pointed to by mem */
5479 emit_move_insn (mem, XEXP (temp_val, 0));
5480 emit_label (endlab);
5483 set_mem_alias_set (mem, struct_value_alias_set);
5484 return mem;
5488 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5489 For v9, function return values are subject to the same rules as arguments,
5490 except that up to 32 bytes may be returned in registers. */
5493 function_value (tree type, enum machine_mode mode, int incoming_p)
5495 /* Beware that the two values are swapped here wrt function_arg. */
5496 int regbase = (incoming_p
5497 ? SPARC_OUTGOING_INT_ARG_FIRST
5498 : SPARC_INCOMING_INT_ARG_FIRST);
5499 enum mode_class mclass = GET_MODE_CLASS (mode);
5500 int regno;
5502 if (type && TREE_CODE (type) == VECTOR_TYPE)
5504 /* Vector types deserve special treatment because they are
5505 polymorphic wrt their mode, depending upon whether VIS
5506 instructions are enabled. */
5507 HOST_WIDE_INT size = int_size_in_bytes (type);
5509 gcc_assert ((TARGET_ARCH32 && size <= 8)
5510 || (TARGET_ARCH64 && size <= 32));
5512 if (mode == BLKmode)
5513 return function_arg_vector_value (size,
5514 TYPE_MODE (TREE_TYPE (type)),
5515 SPARC_FP_ARG_FIRST);
5516 else
5517 mclass = MODE_FLOAT;
5519 else if (type && TARGET_ARCH64)
5521 if (TREE_CODE (type) == RECORD_TYPE)
5523 /* Structures up to 32 bytes in size are passed in registers,
5524 promoted to fp registers where possible. */
5526 gcc_assert (int_size_in_bytes (type) <= 32);
5528 return function_arg_record_value (type, mode, 0, 1, regbase);
5530 else if (TREE_CODE (type) == UNION_TYPE)
5532 HOST_WIDE_INT size = int_size_in_bytes (type);
5534 gcc_assert (size <= 32);
5536 return function_arg_union_value (size, mode, 0, regbase);
5538 else if (AGGREGATE_TYPE_P (type))
5540 /* All other aggregate types are passed in an integer register
5541 in a mode corresponding to the size of the type. */
5542 HOST_WIDE_INT bytes = int_size_in_bytes (type);
5544 gcc_assert (bytes <= 32);
5546 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
5548 /* ??? We probably should have made the same ABI change in
5549 3.4.0 as the one we made for unions. The latter was
5550 required by the SCD though, while the former is not
5551 specified, so we favored compatibility and efficiency.
5553 Now we're stuck for aggregates larger than 16 bytes,
5554 because OImode vanished in the meantime. Let's not
5555 try to be unduly clever, and simply follow the ABI
5556 for unions in that case. */
5557 if (mode == BLKmode)
5558 return function_arg_union_value (bytes, mode, 0, regbase);
5559 else
5560 mclass = MODE_INT;
5562 else if (mclass == MODE_INT
5563 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5564 mode = word_mode;
5567 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5568 && TARGET_FPU)
5569 regno = SPARC_FP_ARG_FIRST;
5570 else
5571 regno = regbase;
5573 return gen_rtx_REG (mode, regno);
5576 /* Do what is necessary for `va_start'. We look at the current function
5577 to determine if stdarg or varargs is used and return the address of
5578 the first unnamed parameter. */
5580 static rtx
5581 sparc_builtin_saveregs (void)
5583 int first_reg = current_function_args_info.words;
5584 rtx address;
5585 int regno;
5587 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5588 emit_move_insn (gen_rtx_MEM (word_mode,
5589 gen_rtx_PLUS (Pmode,
5590 frame_pointer_rtx,
5591 GEN_INT (FIRST_PARM_OFFSET (0)
5592 + (UNITS_PER_WORD
5593 * regno)))),
5594 gen_rtx_REG (word_mode,
5595 SPARC_INCOMING_INT_ARG_FIRST + regno));
5597 address = gen_rtx_PLUS (Pmode,
5598 frame_pointer_rtx,
5599 GEN_INT (FIRST_PARM_OFFSET (0)
5600 + UNITS_PER_WORD * first_reg));
5602 return address;
5605 /* Implement `va_start' for stdarg. */
5607 void
5608 sparc_va_start (tree valist, rtx nextarg)
5610 nextarg = expand_builtin_saveregs ();
5611 std_expand_builtin_va_start (valist, nextarg);
5614 /* Implement `va_arg' for stdarg. */
5616 static tree
5617 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5619 HOST_WIDE_INT size, rsize, align;
5620 tree addr, incr;
5621 bool indirect;
5622 tree ptrtype = build_pointer_type (type);
5624 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5626 indirect = true;
5627 size = rsize = UNITS_PER_WORD;
5628 align = 0;
5630 else
5632 indirect = false;
5633 size = int_size_in_bytes (type);
5634 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5635 align = 0;
5637 if (TARGET_ARCH64)
5639 /* For SPARC64, objects requiring 16-byte alignment get it. */
5640 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5641 align = 2 * UNITS_PER_WORD;
5643 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5644 are left-justified in their slots. */
5645 if (AGGREGATE_TYPE_P (type))
5647 if (size == 0)
5648 size = rsize = UNITS_PER_WORD;
5649 else
5650 size = rsize;
5655 incr = valist;
5656 if (align)
5658 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5659 ssize_int (align - 1)));
5660 incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
5661 ssize_int (-align)));
5664 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5665 addr = incr;
5667 if (BYTES_BIG_ENDIAN && size < rsize)
5668 addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5669 ssize_int (rsize - size)));
5671 if (indirect)
5673 addr = fold_convert (build_pointer_type (ptrtype), addr);
5674 addr = build_va_arg_indirect_ref (addr);
5676 /* If the address isn't aligned properly for the type,
5677 we may need to copy to a temporary.
5678 FIXME: This is inefficient. Usually we can do this
5679 in registers. */
5680 else if (align == 0
5681 && TYPE_ALIGN (type) > BITS_PER_WORD)
5683 tree tmp = create_tmp_var (type, "va_arg_tmp");
5684 tree dest_addr = build_fold_addr_expr (tmp);
5686 tree copy = build_function_call_expr
5687 (implicit_built_in_decls[BUILT_IN_MEMCPY],
5688 tree_cons (NULL_TREE, dest_addr,
5689 tree_cons (NULL_TREE, addr,
5690 tree_cons (NULL_TREE, size_int (rsize),
5691 NULL_TREE))));
5693 gimplify_and_add (copy, pre_p);
5694 addr = dest_addr;
5696 else
5697 addr = fold_convert (ptrtype, addr);
5699 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
5700 incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
5701 gimplify_and_add (incr, post_p);
5703 return build_va_arg_indirect_ref (addr);
5706 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5707 Specify whether the vector mode is supported by the hardware. */
5709 static bool
5710 sparc_vector_mode_supported_p (enum machine_mode mode)
5712 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5715 /* Return the string to output an unconditional branch to LABEL, which is
5716 the operand number of the label.
5718 DEST is the destination insn (i.e. the label), INSN is the source. */
5720 const char *
5721 output_ubranch (rtx dest, int label, rtx insn)
5723 static char string[64];
5724 bool v9_form = false;
5725 char *p;
5727 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5729 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5730 - INSN_ADDRESSES (INSN_UID (insn)));
5731 /* Leave some instructions for "slop". */
5732 if (delta >= -260000 && delta < 260000)
5733 v9_form = true;
5736 if (v9_form)
5737 strcpy (string, "ba%*,pt\t%%xcc, ");
5738 else
5739 strcpy (string, "b%*\t");
5741 p = strchr (string, '\0');
5742 *p++ = '%';
5743 *p++ = 'l';
5744 *p++ = '0' + label;
5745 *p++ = '%';
5746 *p++ = '(';
5747 *p = '\0';
5749 return string;
5752 /* Return the string to output a conditional branch to LABEL, which is
5753 the operand number of the label. OP is the conditional expression.
5754 XEXP (OP, 0) is assumed to be a condition code register (integer or
5755 floating point) and its mode specifies what kind of comparison we made.
5757 DEST is the destination insn (i.e. the label), INSN is the source.
5759 REVERSED is nonzero if we should reverse the sense of the comparison.
5761 ANNUL is nonzero if we should generate an annulling branch. */
5763 const char *
5764 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5765 rtx insn)
5767 static char string[64];
5768 enum rtx_code code = GET_CODE (op);
5769 rtx cc_reg = XEXP (op, 0);
5770 enum machine_mode mode = GET_MODE (cc_reg);
5771 const char *labelno, *branch;
5772 int spaces = 8, far;
5773 char *p;
5775 /* v9 branches are limited to +-1MB. If it is too far away,
5776 change
5778 bne,pt %xcc, .LC30
5782 be,pn %xcc, .+12
5784 ba .LC30
5788 fbne,a,pn %fcc2, .LC29
5792 fbe,pt %fcc2, .+16
5794 ba .LC29 */
5796 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5797 if (reversed ^ far)
5799 /* Reversal of FP compares takes care -- an ordered compare
5800 becomes an unordered compare and vice versa. */
5801 if (mode == CCFPmode || mode == CCFPEmode)
5802 code = reverse_condition_maybe_unordered (code);
5803 else
5804 code = reverse_condition (code);
5807 /* Start by writing the branch condition. */
5808 if (mode == CCFPmode || mode == CCFPEmode)
5810 switch (code)
5812 case NE:
5813 branch = "fbne";
5814 break;
5815 case EQ:
5816 branch = "fbe";
5817 break;
5818 case GE:
5819 branch = "fbge";
5820 break;
5821 case GT:
5822 branch = "fbg";
5823 break;
5824 case LE:
5825 branch = "fble";
5826 break;
5827 case LT:
5828 branch = "fbl";
5829 break;
5830 case UNORDERED:
5831 branch = "fbu";
5832 break;
5833 case ORDERED:
5834 branch = "fbo";
5835 break;
5836 case UNGT:
5837 branch = "fbug";
5838 break;
5839 case UNLT:
5840 branch = "fbul";
5841 break;
5842 case UNEQ:
5843 branch = "fbue";
5844 break;
5845 case UNGE:
5846 branch = "fbuge";
5847 break;
5848 case UNLE:
5849 branch = "fbule";
5850 break;
5851 case LTGT:
5852 branch = "fblg";
5853 break;
5855 default:
5856 gcc_unreachable ();
5859 /* ??? !v9: FP branches cannot be preceded by another floating point
5860 insn. Because there is currently no concept of pre-delay slots,
5861 we can fix this only by always emitting a nop before a floating
5862 point branch. */
5864 string[0] = '\0';
5865 if (! TARGET_V9)
5866 strcpy (string, "nop\n\t");
5867 strcat (string, branch);
5869 else
5871 switch (code)
5873 case NE:
5874 branch = "bne";
5875 break;
5876 case EQ:
5877 branch = "be";
5878 break;
5879 case GE:
5880 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5881 branch = "bpos";
5882 else
5883 branch = "bge";
5884 break;
5885 case GT:
5886 branch = "bg";
5887 break;
5888 case LE:
5889 branch = "ble";
5890 break;
5891 case LT:
5892 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5893 branch = "bneg";
5894 else
5895 branch = "bl";
5896 break;
5897 case GEU:
5898 branch = "bgeu";
5899 break;
5900 case GTU:
5901 branch = "bgu";
5902 break;
5903 case LEU:
5904 branch = "bleu";
5905 break;
5906 case LTU:
5907 branch = "blu";
5908 break;
5910 default:
5911 gcc_unreachable ();
5913 strcpy (string, branch);
5915 spaces -= strlen (branch);
5916 p = strchr (string, '\0');
5918 /* Now add the annulling, the label, and a possible noop. */
5919 if (annul && ! far)
5921 strcpy (p, ",a");
5922 p += 2;
5923 spaces -= 2;
5926 if (TARGET_V9)
5928 rtx note;
5929 int v8 = 0;
5931 if (! far && insn && INSN_ADDRESSES_SET_P ())
5933 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5934 - INSN_ADDRESSES (INSN_UID (insn)));
5935 /* Leave some instructions for "slop". */
5936 if (delta < -260000 || delta >= 260000)
5937 v8 = 1;
5940 if (mode == CCFPmode || mode == CCFPEmode)
5942 static char v9_fcc_labelno[] = "%%fccX, ";
5943 /* Set the char indicating the number of the fcc reg to use. */
5944 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
5945 labelno = v9_fcc_labelno;
5946 if (v8)
5948 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
5949 labelno = "";
5952 else if (mode == CCXmode || mode == CCX_NOOVmode)
5954 labelno = "%%xcc, ";
5955 gcc_assert (! v8);
5957 else
5959 labelno = "%%icc, ";
5960 if (v8)
5961 labelno = "";
5964 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
5966 strcpy (p,
5967 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
5968 ? ",pt" : ",pn");
5969 p += 3;
5970 spaces -= 3;
5973 else
5974 labelno = "";
5976 if (spaces > 0)
5977 *p++ = '\t';
5978 else
5979 *p++ = ' ';
5980 strcpy (p, labelno);
5981 p = strchr (p, '\0');
5982 if (far)
5984 strcpy (p, ".+12\n\t nop\n\tb\t");
5985 /* Skip the next insn if requested or
5986 if we know that it will be a nop. */
5987 if (annul || ! final_sequence)
5988 p[3] = '6';
5989 p += 14;
5991 *p++ = '%';
5992 *p++ = 'l';
5993 *p++ = label + '0';
5994 *p++ = '%';
5995 *p++ = '#';
5996 *p = '\0';
5998 return string;
6001 /* Emit a library call comparison between floating point X and Y.
6002 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6003 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6004 values as arguments instead of the TFmode registers themselves,
6005 that's why we cannot call emit_float_lib_cmp. */
6006 void
6007 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6009 const char *qpfunc;
6010 rtx slot0, slot1, result, tem, tem2;
6011 enum machine_mode mode;
6013 switch (comparison)
6015 case EQ:
6016 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6017 break;
6019 case NE:
6020 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6021 break;
6023 case GT:
6024 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6025 break;
6027 case GE:
6028 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6029 break;
6031 case LT:
6032 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6033 break;
6035 case LE:
6036 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6037 break;
6039 case ORDERED:
6040 case UNORDERED:
6041 case UNGT:
6042 case UNLT:
6043 case UNEQ:
6044 case UNGE:
6045 case UNLE:
6046 case LTGT:
6047 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6048 break;
6050 default:
6051 gcc_unreachable ();
6054 if (TARGET_ARCH64)
6056 if (GET_CODE (x) != MEM)
6058 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6059 emit_move_insn (slot0, x);
6061 else
6062 slot0 = x;
6064 if (GET_CODE (y) != MEM)
6066 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6067 emit_move_insn (slot1, y);
6069 else
6070 slot1 = y;
6072 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6073 DImode, 2,
6074 XEXP (slot0, 0), Pmode,
6075 XEXP (slot1, 0), Pmode);
6077 mode = DImode;
6079 else
6081 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6082 SImode, 2,
6083 x, TFmode, y, TFmode);
6085 mode = SImode;
6089 /* Immediately move the result of the libcall into a pseudo
6090 register so reload doesn't clobber the value if it needs
6091 the return register for a spill reg. */
6092 result = gen_reg_rtx (mode);
6093 emit_move_insn (result, hard_libcall_value (mode));
6095 switch (comparison)
6097 default:
6098 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6099 break;
6100 case ORDERED:
6101 case UNORDERED:
6102 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6103 NULL_RTX, mode, 0);
6104 break;
6105 case UNGT:
6106 case UNGE:
6107 emit_cmp_insn (result, const1_rtx,
6108 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6109 break;
6110 case UNLE:
6111 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6112 break;
6113 case UNLT:
6114 tem = gen_reg_rtx (mode);
6115 if (TARGET_ARCH32)
6116 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6117 else
6118 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6119 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6120 break;
6121 case UNEQ:
6122 case LTGT:
6123 tem = gen_reg_rtx (mode);
6124 if (TARGET_ARCH32)
6125 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6126 else
6127 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6128 tem2 = gen_reg_rtx (mode);
6129 if (TARGET_ARCH32)
6130 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6131 else
6132 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6133 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6134 NULL_RTX, mode, 0);
6135 break;
6139 /* Generate an unsigned DImode to FP conversion. This is the same code
6140 optabs would emit if we didn't have TFmode patterns. */
6142 void
6143 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6145 rtx neglab, donelab, i0, i1, f0, in, out;
6147 out = operands[0];
6148 in = force_reg (DImode, operands[1]);
6149 neglab = gen_label_rtx ();
6150 donelab = gen_label_rtx ();
6151 i0 = gen_reg_rtx (DImode);
6152 i1 = gen_reg_rtx (DImode);
6153 f0 = gen_reg_rtx (mode);
6155 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6157 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6158 emit_jump_insn (gen_jump (donelab));
6159 emit_barrier ();
6161 emit_label (neglab);
6163 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6164 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6165 emit_insn (gen_iordi3 (i0, i0, i1));
6166 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6167 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6169 emit_label (donelab);
6172 /* Generate an FP to unsigned DImode conversion. This is the same code
6173 optabs would emit if we didn't have TFmode patterns. */
6175 void
6176 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6178 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6180 out = operands[0];
6181 in = force_reg (mode, operands[1]);
6182 neglab = gen_label_rtx ();
6183 donelab = gen_label_rtx ();
6184 i0 = gen_reg_rtx (DImode);
6185 i1 = gen_reg_rtx (DImode);
6186 limit = gen_reg_rtx (mode);
6187 f0 = gen_reg_rtx (mode);
6189 emit_move_insn (limit,
6190 CONST_DOUBLE_FROM_REAL_VALUE (
6191 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6192 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6194 emit_insn (gen_rtx_SET (VOIDmode,
6195 out,
6196 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6197 emit_jump_insn (gen_jump (donelab));
6198 emit_barrier ();
6200 emit_label (neglab);
6202 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6203 emit_insn (gen_rtx_SET (VOIDmode,
6205 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6206 emit_insn (gen_movdi (i1, const1_rtx));
6207 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6208 emit_insn (gen_xordi3 (out, i0, i1));
6210 emit_label (donelab);
6213 /* Return the string to output a conditional branch to LABEL, testing
6214 register REG. LABEL is the operand number of the label; REG is the
6215 operand number of the reg. OP is the conditional expression. The mode
6216 of REG says what kind of comparison we made.
6218 DEST is the destination insn (i.e. the label), INSN is the source.
6220 REVERSED is nonzero if we should reverse the sense of the comparison.
6222 ANNUL is nonzero if we should generate an annulling branch. */
6224 const char *
6225 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6226 int annul, rtx insn)
6228 static char string[64];
6229 enum rtx_code code = GET_CODE (op);
6230 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6231 rtx note;
6232 int far;
6233 char *p;
6235 /* branch on register are limited to +-128KB. If it is too far away,
6236 change
6238 brnz,pt %g1, .LC30
6242 brz,pn %g1, .+12
6244 ba,pt %xcc, .LC30
6248 brgez,a,pn %o1, .LC29
6252 brlz,pt %o1, .+16
6254 ba,pt %xcc, .LC29 */
6256 far = get_attr_length (insn) >= 3;
6258 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6259 if (reversed ^ far)
6260 code = reverse_condition (code);
6262 /* Only 64 bit versions of these instructions exist. */
6263 gcc_assert (mode == DImode);
6265 /* Start by writing the branch condition. */
6267 switch (code)
6269 case NE:
6270 strcpy (string, "brnz");
6271 break;
6273 case EQ:
6274 strcpy (string, "brz");
6275 break;
6277 case GE:
6278 strcpy (string, "brgez");
6279 break;
6281 case LT:
6282 strcpy (string, "brlz");
6283 break;
6285 case LE:
6286 strcpy (string, "brlez");
6287 break;
6289 case GT:
6290 strcpy (string, "brgz");
6291 break;
6293 default:
6294 gcc_unreachable ();
6297 p = strchr (string, '\0');
6299 /* Now add the annulling, reg, label, and nop. */
6300 if (annul && ! far)
6302 strcpy (p, ",a");
6303 p += 2;
6306 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6308 strcpy (p,
6309 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6310 ? ",pt" : ",pn");
6311 p += 3;
6314 *p = p < string + 8 ? '\t' : ' ';
6315 p++;
6316 *p++ = '%';
6317 *p++ = '0' + reg;
6318 *p++ = ',';
6319 *p++ = ' ';
6320 if (far)
6322 int veryfar = 1, delta;
6324 if (INSN_ADDRESSES_SET_P ())
6326 delta = (INSN_ADDRESSES (INSN_UID (dest))
6327 - INSN_ADDRESSES (INSN_UID (insn)));
6328 /* Leave some instructions for "slop". */
6329 if (delta >= -260000 && delta < 260000)
6330 veryfar = 0;
6333 strcpy (p, ".+12\n\t nop\n\t");
6334 /* Skip the next insn if requested or
6335 if we know that it will be a nop. */
6336 if (annul || ! final_sequence)
6337 p[3] = '6';
6338 p += 12;
6339 if (veryfar)
6341 strcpy (p, "b\t");
6342 p += 2;
6344 else
6346 strcpy (p, "ba,pt\t%%xcc, ");
6347 p += 13;
6350 *p++ = '%';
6351 *p++ = 'l';
6352 *p++ = '0' + label;
6353 *p++ = '%';
6354 *p++ = '#';
6355 *p = '\0';
6357 return string;
6360 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6361 Such instructions cannot be used in the delay slot of return insn on v9.
6362 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6365 static int
6366 epilogue_renumber (register rtx *where, int test)
6368 register const char *fmt;
6369 register int i;
6370 register enum rtx_code code;
6372 if (*where == 0)
6373 return 0;
6375 code = GET_CODE (*where);
6377 switch (code)
6379 case REG:
6380 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6381 return 1;
6382 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6383 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6384 case SCRATCH:
6385 case CC0:
6386 case PC:
6387 case CONST_INT:
6388 case CONST_DOUBLE:
6389 return 0;
6391 /* Do not replace the frame pointer with the stack pointer because
6392 it can cause the delayed instruction to load below the stack.
6393 This occurs when instructions like:
6395 (set (reg/i:SI 24 %i0)
6396 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6397 (const_int -20 [0xffffffec])) 0))
6399 are in the return delayed slot. */
6400 case PLUS:
6401 if (GET_CODE (XEXP (*where, 0)) == REG
6402 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6403 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6404 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6405 return 1;
6406 break;
6408 case MEM:
6409 if (SPARC_STACK_BIAS
6410 && GET_CODE (XEXP (*where, 0)) == REG
6411 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6412 return 1;
6413 break;
6415 default:
6416 break;
6419 fmt = GET_RTX_FORMAT (code);
6421 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6423 if (fmt[i] == 'E')
6425 register int j;
6426 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6427 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6428 return 1;
6430 else if (fmt[i] == 'e'
6431 && epilogue_renumber (&(XEXP (*where, i)), test))
6432 return 1;
6434 return 0;
6437 /* Leaf functions and non-leaf functions have different needs. */
6439 static const int
6440 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6442 static const int
6443 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6445 static const int *const reg_alloc_orders[] = {
6446 reg_leaf_alloc_order,
6447 reg_nonleaf_alloc_order};
6449 void
6450 order_regs_for_local_alloc (void)
6452 static int last_order_nonleaf = 1;
6454 if (regs_ever_live[15] != last_order_nonleaf)
6456 last_order_nonleaf = !last_order_nonleaf;
6457 memcpy ((char *) reg_alloc_order,
6458 (const char *) reg_alloc_orders[last_order_nonleaf],
6459 FIRST_PSEUDO_REGISTER * sizeof (int));
6463 /* Return 1 if REG and MEM are legitimate enough to allow the various
6464 mem<-->reg splits to be run. */
6467 sparc_splitdi_legitimate (rtx reg, rtx mem)
6469 /* Punt if we are here by mistake. */
6470 gcc_assert (reload_completed);
6472 /* We must have an offsettable memory reference. */
6473 if (! offsettable_memref_p (mem))
6474 return 0;
6476 /* If we have legitimate args for ldd/std, we do not want
6477 the split to happen. */
6478 if ((REGNO (reg) % 2) == 0
6479 && mem_min_alignment (mem, 8))
6480 return 0;
6482 /* Success. */
6483 return 1;
6486 /* Return 1 if x and y are some kind of REG and they refer to
6487 different hard registers. This test is guaranteed to be
6488 run after reload. */
6491 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6493 if (GET_CODE (x) != REG)
6494 return 0;
6495 if (GET_CODE (y) != REG)
6496 return 0;
6497 if (REGNO (x) == REGNO (y))
6498 return 0;
6499 return 1;
6502 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6503 This makes them candidates for using ldd and std insns.
6505 Note reg1 and reg2 *must* be hard registers. */
6508 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6510 /* We might have been passed a SUBREG. */
6511 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6512 return 0;
6514 if (REGNO (reg1) % 2 != 0)
6515 return 0;
6517 /* Integer ldd is deprecated in SPARC V9 */
6518 if (TARGET_V9 && REGNO (reg1) < 32)
6519 return 0;
6521 return (REGNO (reg1) == REGNO (reg2) - 1);
6524 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6525 an ldd or std insn.
6527 This can only happen when addr1 and addr2, the addresses in mem1
6528 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6529 addr1 must also be aligned on a 64-bit boundary.
6531 Also iff dependent_reg_rtx is not null it should not be used to
6532 compute the address for mem1, i.e. we cannot optimize a sequence
6533 like:
6534 ld [%o0], %o0
6535 ld [%o0 + 4], %o1
6537 ldd [%o0], %o0
6538 nor:
6539 ld [%g3 + 4], %g3
6540 ld [%g3], %g2
6542 ldd [%g3], %g2
6544 But, note that the transformation from:
6545 ld [%g2 + 4], %g3
6546 ld [%g2], %g2
6548 ldd [%g2], %g2
6549 is perfectly fine. Thus, the peephole2 patterns always pass us
6550 the destination register of the first load, never the second one.
6552 For stores we don't have a similar problem, so dependent_reg_rtx is
6553 NULL_RTX. */
6556 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6558 rtx addr1, addr2;
6559 unsigned int reg1;
6560 HOST_WIDE_INT offset1;
6562 /* The mems cannot be volatile. */
6563 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6564 return 0;
6566 /* MEM1 should be aligned on a 64-bit boundary. */
6567 if (MEM_ALIGN (mem1) < 64)
6568 return 0;
6570 addr1 = XEXP (mem1, 0);
6571 addr2 = XEXP (mem2, 0);
6573 /* Extract a register number and offset (if used) from the first addr. */
6574 if (GET_CODE (addr1) == PLUS)
6576 /* If not a REG, return zero. */
6577 if (GET_CODE (XEXP (addr1, 0)) != REG)
6578 return 0;
6579 else
6581 reg1 = REGNO (XEXP (addr1, 0));
6582 /* The offset must be constant! */
6583 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6584 return 0;
6585 offset1 = INTVAL (XEXP (addr1, 1));
6588 else if (GET_CODE (addr1) != REG)
6589 return 0;
6590 else
6592 reg1 = REGNO (addr1);
6593 /* This was a simple (mem (reg)) expression. Offset is 0. */
6594 offset1 = 0;
6597 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6598 if (GET_CODE (addr2) != PLUS)
6599 return 0;
6601 if (GET_CODE (XEXP (addr2, 0)) != REG
6602 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6603 return 0;
6605 if (reg1 != REGNO (XEXP (addr2, 0)))
6606 return 0;
6608 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6609 return 0;
6611 /* The first offset must be evenly divisible by 8 to ensure the
6612 address is 64 bit aligned. */
6613 if (offset1 % 8 != 0)
6614 return 0;
6616 /* The offset for the second addr must be 4 more than the first addr. */
6617 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6618 return 0;
6620 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6621 instructions. */
6622 return 1;
6625 /* Return 1 if reg is a pseudo, or is the first register in
6626 a hard register pair. This makes it a candidate for use in
6627 ldd and std insns. */
6630 register_ok_for_ldd (rtx reg)
6632 /* We might have been passed a SUBREG. */
6633 if (GET_CODE (reg) != REG)
6634 return 0;
6636 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6637 return (REGNO (reg) % 2 == 0);
6638 else
6639 return 1;
6642 /* Print operand X (an rtx) in assembler syntax to file FILE.
6643 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6644 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6646 void
6647 print_operand (FILE *file, rtx x, int code)
6649 switch (code)
6651 case '#':
6652 /* Output an insn in a delay slot. */
6653 if (final_sequence)
6654 sparc_indent_opcode = 1;
6655 else
6656 fputs ("\n\t nop", file);
6657 return;
6658 case '*':
6659 /* Output an annul flag if there's nothing for the delay slot and we
6660 are optimizing. This is always used with '(' below.
6661 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6662 this is a dbx bug. So, we only do this when optimizing.
6663 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6664 Always emit a nop in case the next instruction is a branch. */
6665 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6666 fputs (",a", file);
6667 return;
6668 case '(':
6669 /* Output a 'nop' if there's nothing for the delay slot and we are
6670 not optimizing. This is always used with '*' above. */
6671 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6672 fputs ("\n\t nop", file);
6673 else if (final_sequence)
6674 sparc_indent_opcode = 1;
6675 return;
6676 case ')':
6677 /* Output the right displacement from the saved PC on function return.
6678 The caller may have placed an "unimp" insn immediately after the call
6679 so we have to account for it. This insn is used in the 32-bit ABI
6680 when calling a function that returns a non zero-sized structure. The
6681 64-bit ABI doesn't have it. Be careful to have this test be the same
6682 as that used on the call. The exception here is that when
6683 sparc_std_struct_return is enabled, the psABI is followed exactly
6684 and the adjustment is made by the code in sparc_struct_value_rtx.
6685 The call emitted is the same when sparc_std_struct_return is
6686 present. */
6687 if (! TARGET_ARCH64
6688 && current_function_returns_struct
6689 && ! sparc_std_struct_return
6690 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6691 == INTEGER_CST)
6692 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6693 fputs ("12", file);
6694 else
6695 fputc ('8', file);
6696 return;
6697 case '_':
6698 /* Output the Embedded Medium/Anywhere code model base register. */
6699 fputs (EMBMEDANY_BASE_REG, file);
6700 return;
6701 case '&':
6702 /* Print some local dynamic TLS name. */
6703 assemble_name (file, get_some_local_dynamic_name ());
6704 return;
6706 case 'Y':
6707 /* Adjust the operand to take into account a RESTORE operation. */
6708 if (GET_CODE (x) == CONST_INT)
6709 break;
6710 else if (GET_CODE (x) != REG)
6711 output_operand_lossage ("invalid %%Y operand");
6712 else if (REGNO (x) < 8)
6713 fputs (reg_names[REGNO (x)], file);
6714 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6715 fputs (reg_names[REGNO (x)-16], file);
6716 else
6717 output_operand_lossage ("invalid %%Y operand");
6718 return;
6719 case 'L':
6720 /* Print out the low order register name of a register pair. */
6721 if (WORDS_BIG_ENDIAN)
6722 fputs (reg_names[REGNO (x)+1], file);
6723 else
6724 fputs (reg_names[REGNO (x)], file);
6725 return;
6726 case 'H':
6727 /* Print out the high order register name of a register pair. */
6728 if (WORDS_BIG_ENDIAN)
6729 fputs (reg_names[REGNO (x)], file);
6730 else
6731 fputs (reg_names[REGNO (x)+1], file);
6732 return;
6733 case 'R':
6734 /* Print out the second register name of a register pair or quad.
6735 I.e., R (%o0) => %o1. */
6736 fputs (reg_names[REGNO (x)+1], file);
6737 return;
6738 case 'S':
6739 /* Print out the third register name of a register quad.
6740 I.e., S (%o0) => %o2. */
6741 fputs (reg_names[REGNO (x)+2], file);
6742 return;
6743 case 'T':
6744 /* Print out the fourth register name of a register quad.
6745 I.e., T (%o0) => %o3. */
6746 fputs (reg_names[REGNO (x)+3], file);
6747 return;
6748 case 'x':
6749 /* Print a condition code register. */
6750 if (REGNO (x) == SPARC_ICC_REG)
6752 /* We don't handle CC[X]_NOOVmode because they're not supposed
6753 to occur here. */
6754 if (GET_MODE (x) == CCmode)
6755 fputs ("%icc", file);
6756 else if (GET_MODE (x) == CCXmode)
6757 fputs ("%xcc", file);
6758 else
6759 gcc_unreachable ();
6761 else
6762 /* %fccN register */
6763 fputs (reg_names[REGNO (x)], file);
6764 return;
6765 case 'm':
6766 /* Print the operand's address only. */
6767 output_address (XEXP (x, 0));
6768 return;
6769 case 'r':
6770 /* In this case we need a register. Use %g0 if the
6771 operand is const0_rtx. */
6772 if (x == const0_rtx
6773 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6775 fputs ("%g0", file);
6776 return;
6778 else
6779 break;
6781 case 'A':
6782 switch (GET_CODE (x))
6784 case IOR: fputs ("or", file); break;
6785 case AND: fputs ("and", file); break;
6786 case XOR: fputs ("xor", file); break;
6787 default: output_operand_lossage ("invalid %%A operand");
6789 return;
6791 case 'B':
6792 switch (GET_CODE (x))
6794 case IOR: fputs ("orn", file); break;
6795 case AND: fputs ("andn", file); break;
6796 case XOR: fputs ("xnor", file); break;
6797 default: output_operand_lossage ("invalid %%B operand");
6799 return;
6801 /* These are used by the conditional move instructions. */
6802 case 'c' :
6803 case 'C':
6805 enum rtx_code rc = GET_CODE (x);
6807 if (code == 'c')
6809 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6810 if (mode == CCFPmode || mode == CCFPEmode)
6811 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6812 else
6813 rc = reverse_condition (GET_CODE (x));
6815 switch (rc)
6817 case NE: fputs ("ne", file); break;
6818 case EQ: fputs ("e", file); break;
6819 case GE: fputs ("ge", file); break;
6820 case GT: fputs ("g", file); break;
6821 case LE: fputs ("le", file); break;
6822 case LT: fputs ("l", file); break;
6823 case GEU: fputs ("geu", file); break;
6824 case GTU: fputs ("gu", file); break;
6825 case LEU: fputs ("leu", file); break;
6826 case LTU: fputs ("lu", file); break;
6827 case LTGT: fputs ("lg", file); break;
6828 case UNORDERED: fputs ("u", file); break;
6829 case ORDERED: fputs ("o", file); break;
6830 case UNLT: fputs ("ul", file); break;
6831 case UNLE: fputs ("ule", file); break;
6832 case UNGT: fputs ("ug", file); break;
6833 case UNGE: fputs ("uge", file); break;
6834 case UNEQ: fputs ("ue", file); break;
6835 default: output_operand_lossage (code == 'c'
6836 ? "invalid %%c operand"
6837 : "invalid %%C operand");
6839 return;
6842 /* These are used by the movr instruction pattern. */
6843 case 'd':
6844 case 'D':
6846 enum rtx_code rc = (code == 'd'
6847 ? reverse_condition (GET_CODE (x))
6848 : GET_CODE (x));
6849 switch (rc)
6851 case NE: fputs ("ne", file); break;
6852 case EQ: fputs ("e", file); break;
6853 case GE: fputs ("gez", file); break;
6854 case LT: fputs ("lz", file); break;
6855 case LE: fputs ("lez", file); break;
6856 case GT: fputs ("gz", file); break;
6857 default: output_operand_lossage (code == 'd'
6858 ? "invalid %%d operand"
6859 : "invalid %%D operand");
6861 return;
6864 case 'b':
6866 /* Print a sign-extended character. */
6867 int i = trunc_int_for_mode (INTVAL (x), QImode);
6868 fprintf (file, "%d", i);
6869 return;
6872 case 'f':
6873 /* Operand must be a MEM; write its address. */
6874 if (GET_CODE (x) != MEM)
6875 output_operand_lossage ("invalid %%f operand");
6876 output_address (XEXP (x, 0));
6877 return;
6879 case 's':
6881 /* Print a sign-extended 32-bit value. */
6882 HOST_WIDE_INT i;
6883 if (GET_CODE(x) == CONST_INT)
6884 i = INTVAL (x);
6885 else if (GET_CODE(x) == CONST_DOUBLE)
6886 i = CONST_DOUBLE_LOW (x);
6887 else
6889 output_operand_lossage ("invalid %%s operand");
6890 return;
6892 i = trunc_int_for_mode (i, SImode);
6893 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6894 return;
6897 case 0:
6898 /* Do nothing special. */
6899 break;
6901 default:
6902 /* Undocumented flag. */
6903 output_operand_lossage ("invalid operand output code");
6906 if (GET_CODE (x) == REG)
6907 fputs (reg_names[REGNO (x)], file);
6908 else if (GET_CODE (x) == MEM)
6910 fputc ('[', file);
6911 /* Poor Sun assembler doesn't understand absolute addressing. */
6912 if (CONSTANT_P (XEXP (x, 0)))
6913 fputs ("%g0+", file);
6914 output_address (XEXP (x, 0));
6915 fputc (']', file);
6917 else if (GET_CODE (x) == HIGH)
6919 fputs ("%hi(", file);
6920 output_addr_const (file, XEXP (x, 0));
6921 fputc (')', file);
6923 else if (GET_CODE (x) == LO_SUM)
6925 print_operand (file, XEXP (x, 0), 0);
6926 if (TARGET_CM_MEDMID)
6927 fputs ("+%l44(", file);
6928 else
6929 fputs ("+%lo(", file);
6930 output_addr_const (file, XEXP (x, 1));
6931 fputc (')', file);
6933 else if (GET_CODE (x) == CONST_DOUBLE
6934 && (GET_MODE (x) == VOIDmode
6935 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
6937 if (CONST_DOUBLE_HIGH (x) == 0)
6938 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
6939 else if (CONST_DOUBLE_HIGH (x) == -1
6940 && CONST_DOUBLE_LOW (x) < 0)
6941 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
6942 else
6943 output_operand_lossage ("long long constant not a valid immediate operand");
6945 else if (GET_CODE (x) == CONST_DOUBLE)
6946 output_operand_lossage ("floating point constant not a valid immediate operand");
6947 else { output_addr_const (file, x); }
6950 /* Target hook for assembling integer objects. The sparc version has
6951 special handling for aligned DI-mode objects. */
6953 static bool
6954 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
6956 /* ??? We only output .xword's for symbols and only then in environments
6957 where the assembler can handle them. */
6958 if (aligned_p && size == 8
6959 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
6961 if (TARGET_V9)
6963 assemble_integer_with_op ("\t.xword\t", x);
6964 return true;
6966 else
6968 assemble_aligned_integer (4, const0_rtx);
6969 assemble_aligned_integer (4, x);
6970 return true;
6973 return default_assemble_integer (x, size, aligned_p);
6976 /* Return the value of a code used in the .proc pseudo-op that says
6977 what kind of result this function returns. For non-C types, we pick
6978 the closest C type. */
6980 #ifndef SHORT_TYPE_SIZE
6981 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
6982 #endif
6984 #ifndef INT_TYPE_SIZE
6985 #define INT_TYPE_SIZE BITS_PER_WORD
6986 #endif
6988 #ifndef LONG_TYPE_SIZE
6989 #define LONG_TYPE_SIZE BITS_PER_WORD
6990 #endif
6992 #ifndef LONG_LONG_TYPE_SIZE
6993 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
6994 #endif
6996 #ifndef FLOAT_TYPE_SIZE
6997 #define FLOAT_TYPE_SIZE BITS_PER_WORD
6998 #endif
7000 #ifndef DOUBLE_TYPE_SIZE
7001 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7002 #endif
7004 #ifndef LONG_DOUBLE_TYPE_SIZE
7005 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7006 #endif
7008 unsigned long
7009 sparc_type_code (register tree type)
7011 register unsigned long qualifiers = 0;
7012 register unsigned shift;
7014 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7015 setting more, since some assemblers will give an error for this. Also,
7016 we must be careful to avoid shifts of 32 bits or more to avoid getting
7017 unpredictable results. */
7019 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7021 switch (TREE_CODE (type))
7023 case ERROR_MARK:
7024 return qualifiers;
7026 case ARRAY_TYPE:
7027 qualifiers |= (3 << shift);
7028 break;
7030 case FUNCTION_TYPE:
7031 case METHOD_TYPE:
7032 qualifiers |= (2 << shift);
7033 break;
7035 case POINTER_TYPE:
7036 case REFERENCE_TYPE:
7037 case OFFSET_TYPE:
7038 qualifiers |= (1 << shift);
7039 break;
7041 case RECORD_TYPE:
7042 return (qualifiers | 8);
7044 case UNION_TYPE:
7045 case QUAL_UNION_TYPE:
7046 return (qualifiers | 9);
7048 case ENUMERAL_TYPE:
7049 return (qualifiers | 10);
7051 case VOID_TYPE:
7052 return (qualifiers | 16);
7054 case INTEGER_TYPE:
7055 /* If this is a range type, consider it to be the underlying
7056 type. */
7057 if (TREE_TYPE (type) != 0)
7058 break;
7060 /* Carefully distinguish all the standard types of C,
7061 without messing up if the language is not C. We do this by
7062 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7063 look at both the names and the above fields, but that's redundant.
7064 Any type whose size is between two C types will be considered
7065 to be the wider of the two types. Also, we do not have a
7066 special code to use for "long long", so anything wider than
7067 long is treated the same. Note that we can't distinguish
7068 between "int" and "long" in this code if they are the same
7069 size, but that's fine, since neither can the assembler. */
7071 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7072 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7074 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7075 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7077 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7078 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7080 else
7081 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7083 case REAL_TYPE:
7084 /* If this is a range type, consider it to be the underlying
7085 type. */
7086 if (TREE_TYPE (type) != 0)
7087 break;
7089 /* Carefully distinguish all the standard types of C,
7090 without messing up if the language is not C. */
7092 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7093 return (qualifiers | 6);
7095 else
7096 return (qualifiers | 7);
7098 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7099 /* ??? We need to distinguish between double and float complex types,
7100 but I don't know how yet because I can't reach this code from
7101 existing front-ends. */
7102 return (qualifiers | 7); /* Who knows? */
7104 case VECTOR_TYPE:
7105 case BOOLEAN_TYPE: /* Boolean truth value type. */
7106 case LANG_TYPE: /* ? */
7107 return qualifiers;
7109 default:
7110 gcc_unreachable (); /* Not a type! */
7114 return qualifiers;
7117 /* Nested function support. */
7119 /* Emit RTL insns to initialize the variable parts of a trampoline.
7120 FNADDR is an RTX for the address of the function's pure code.
7121 CXT is an RTX for the static chain value for the function.
7123 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7124 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7125 (to store insns). This is a bit excessive. Perhaps a different
7126 mechanism would be better here.
7128 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7130 void
7131 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7133 /* SPARC 32-bit trampoline:
7135 sethi %hi(fn), %g1
7136 sethi %hi(static), %g2
7137 jmp %g1+%lo(fn)
7138 or %g2, %lo(static), %g2
7140 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7141 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7144 emit_move_insn
7145 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7146 expand_binop (SImode, ior_optab,
7147 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7148 size_int (10), 0, 1),
7149 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7150 NULL_RTX, 1, OPTAB_DIRECT));
7152 emit_move_insn
7153 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7154 expand_binop (SImode, ior_optab,
7155 expand_shift (RSHIFT_EXPR, SImode, cxt,
7156 size_int (10), 0, 1),
7157 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7158 NULL_RTX, 1, OPTAB_DIRECT));
7160 emit_move_insn
7161 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7162 expand_binop (SImode, ior_optab,
7163 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7164 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7165 NULL_RTX, 1, OPTAB_DIRECT));
7167 emit_move_insn
7168 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7169 expand_binop (SImode, ior_optab,
7170 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7171 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7172 NULL_RTX, 1, OPTAB_DIRECT));
7174 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7175 aligned on a 16 byte boundary so one flush clears it all. */
7176 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7177 if (sparc_cpu != PROCESSOR_ULTRASPARC
7178 && sparc_cpu != PROCESSOR_ULTRASPARC3
7179 && sparc_cpu != PROCESSOR_NIAGARA)
7180 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7181 plus_constant (tramp, 8)))));
7183 /* Call __enable_execute_stack after writing onto the stack to make sure
7184 the stack address is accessible. */
7185 #ifdef ENABLE_EXECUTE_STACK
7186 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7187 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7188 #endif
7192 /* The 64-bit version is simpler because it makes more sense to load the
7193 values as "immediate" data out of the trampoline. It's also easier since
7194 we can read the PC without clobbering a register. */
7196 void
7197 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7199 /* SPARC 64-bit trampoline:
7201 rd %pc, %g1
7202 ldx [%g1+24], %g5
7203 jmp %g5
7204 ldx [%g1+16], %g5
7205 +16 bytes data
7208 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7209 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7210 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7211 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7212 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7213 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7214 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7215 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7216 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7217 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7218 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7220 if (sparc_cpu != PROCESSOR_ULTRASPARC
7221 && sparc_cpu != PROCESSOR_ULTRASPARC3
7222 && sparc_cpu != PROCESSOR_NIAGARA)
7223 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7225 /* Call __enable_execute_stack after writing onto the stack to make sure
7226 the stack address is accessible. */
7227 #ifdef ENABLE_EXECUTE_STACK
7228 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7229 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7230 #endif
7233 /* Adjust the cost of a scheduling dependency. Return the new cost of
7234 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7236 static int
7237 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7239 enum attr_type insn_type;
7241 if (! recog_memoized (insn))
7242 return 0;
7244 insn_type = get_attr_type (insn);
7246 if (REG_NOTE_KIND (link) == 0)
7248 /* Data dependency; DEP_INSN writes a register that INSN reads some
7249 cycles later. */
7251 /* if a load, then the dependence must be on the memory address;
7252 add an extra "cycle". Note that the cost could be two cycles
7253 if the reg was written late in an instruction group; we ca not tell
7254 here. */
7255 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7256 return cost + 3;
7258 /* Get the delay only if the address of the store is the dependence. */
7259 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7261 rtx pat = PATTERN(insn);
7262 rtx dep_pat = PATTERN (dep_insn);
7264 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7265 return cost; /* This should not happen! */
7267 /* The dependency between the two instructions was on the data that
7268 is being stored. Assume that this implies that the address of the
7269 store is not dependent. */
7270 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7271 return cost;
7273 return cost + 3; /* An approximation. */
7276 /* A shift instruction cannot receive its data from an instruction
7277 in the same cycle; add a one cycle penalty. */
7278 if (insn_type == TYPE_SHIFT)
7279 return cost + 3; /* Split before cascade into shift. */
7281 else
7283 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7284 INSN writes some cycles later. */
7286 /* These are only significant for the fpu unit; writing a fp reg before
7287 the fpu has finished with it stalls the processor. */
7289 /* Reusing an integer register causes no problems. */
7290 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7291 return 0;
7294 return cost;
7297 static int
7298 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7300 enum attr_type insn_type, dep_type;
7301 rtx pat = PATTERN(insn);
7302 rtx dep_pat = PATTERN (dep_insn);
7304 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7305 return cost;
7307 insn_type = get_attr_type (insn);
7308 dep_type = get_attr_type (dep_insn);
7310 switch (REG_NOTE_KIND (link))
7312 case 0:
7313 /* Data dependency; DEP_INSN writes a register that INSN reads some
7314 cycles later. */
7316 switch (insn_type)
7318 case TYPE_STORE:
7319 case TYPE_FPSTORE:
7320 /* Get the delay iff the address of the store is the dependence. */
7321 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7322 return cost;
7324 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7325 return cost;
7326 return cost + 3;
7328 case TYPE_LOAD:
7329 case TYPE_SLOAD:
7330 case TYPE_FPLOAD:
7331 /* If a load, then the dependence must be on the memory address. If
7332 the addresses aren't equal, then it might be a false dependency */
7333 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7335 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7336 || GET_CODE (SET_DEST (dep_pat)) != MEM
7337 || GET_CODE (SET_SRC (pat)) != MEM
7338 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7339 XEXP (SET_SRC (pat), 0)))
7340 return cost + 2;
7342 return cost + 8;
7344 break;
7346 case TYPE_BRANCH:
7347 /* Compare to branch latency is 0. There is no benefit from
7348 separating compare and branch. */
7349 if (dep_type == TYPE_COMPARE)
7350 return 0;
7351 /* Floating point compare to branch latency is less than
7352 compare to conditional move. */
7353 if (dep_type == TYPE_FPCMP)
7354 return cost - 1;
7355 break;
7356 default:
7357 break;
7359 break;
7361 case REG_DEP_ANTI:
7362 /* Anti-dependencies only penalize the fpu unit. */
7363 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7364 return 0;
7365 break;
7367 default:
7368 break;
7371 return cost;
7374 static int
7375 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7377 switch (sparc_cpu)
7379 case PROCESSOR_SUPERSPARC:
7380 cost = supersparc_adjust_cost (insn, link, dep, cost);
7381 break;
7382 case PROCESSOR_HYPERSPARC:
7383 case PROCESSOR_SPARCLITE86X:
7384 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7385 break;
7386 default:
7387 break;
7389 return cost;
7392 static void
7393 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7394 int sched_verbose ATTRIBUTE_UNUSED,
7395 int max_ready ATTRIBUTE_UNUSED)
7399 static int
7400 sparc_use_sched_lookahead (void)
7402 if (sparc_cpu == PROCESSOR_NIAGARA)
7403 return 0;
7404 if (sparc_cpu == PROCESSOR_ULTRASPARC
7405 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7406 return 4;
7407 if ((1 << sparc_cpu) &
7408 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7409 (1 << PROCESSOR_SPARCLITE86X)))
7410 return 3;
7411 return 0;
7414 static int
7415 sparc_issue_rate (void)
7417 switch (sparc_cpu)
7419 case PROCESSOR_NIAGARA:
7420 default:
7421 return 1;
7422 case PROCESSOR_V9:
7423 /* Assume V9 processors are capable of at least dual-issue. */
7424 return 2;
7425 case PROCESSOR_SUPERSPARC:
7426 return 3;
7427 case PROCESSOR_HYPERSPARC:
7428 case PROCESSOR_SPARCLITE86X:
7429 return 2;
7430 case PROCESSOR_ULTRASPARC:
7431 case PROCESSOR_ULTRASPARC3:
7432 return 4;
7436 static int
7437 set_extends (rtx insn)
7439 register rtx pat = PATTERN (insn);
7441 switch (GET_CODE (SET_SRC (pat)))
7443 /* Load and some shift instructions zero extend. */
7444 case MEM:
7445 case ZERO_EXTEND:
7446 /* sethi clears the high bits */
7447 case HIGH:
7448 /* LO_SUM is used with sethi. sethi cleared the high
7449 bits and the values used with lo_sum are positive */
7450 case LO_SUM:
7451 /* Store flag stores 0 or 1 */
7452 case LT: case LTU:
7453 case GT: case GTU:
7454 case LE: case LEU:
7455 case GE: case GEU:
7456 case EQ:
7457 case NE:
7458 return 1;
7459 case AND:
7461 rtx op0 = XEXP (SET_SRC (pat), 0);
7462 rtx op1 = XEXP (SET_SRC (pat), 1);
7463 if (GET_CODE (op1) == CONST_INT)
7464 return INTVAL (op1) >= 0;
7465 if (GET_CODE (op0) != REG)
7466 return 0;
7467 if (sparc_check_64 (op0, insn) == 1)
7468 return 1;
7469 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7471 case IOR:
7472 case XOR:
7474 rtx op0 = XEXP (SET_SRC (pat), 0);
7475 rtx op1 = XEXP (SET_SRC (pat), 1);
7476 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7477 return 0;
7478 if (GET_CODE (op1) == CONST_INT)
7479 return INTVAL (op1) >= 0;
7480 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7482 case LSHIFTRT:
7483 return GET_MODE (SET_SRC (pat)) == SImode;
7484 /* Positive integers leave the high bits zero. */
7485 case CONST_DOUBLE:
7486 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7487 case CONST_INT:
7488 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7489 case ASHIFTRT:
7490 case SIGN_EXTEND:
7491 return - (GET_MODE (SET_SRC (pat)) == SImode);
7492 case REG:
7493 return sparc_check_64 (SET_SRC (pat), insn);
7494 default:
7495 return 0;
7499 /* We _ought_ to have only one kind per function, but... */
7500 static GTY(()) rtx sparc_addr_diff_list;
7501 static GTY(()) rtx sparc_addr_list;
7503 void
7504 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7506 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7507 if (diff)
7508 sparc_addr_diff_list
7509 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7510 else
7511 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7514 static void
7515 sparc_output_addr_vec (rtx vec)
7517 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7518 int idx, vlen = XVECLEN (body, 0);
7520 #ifdef ASM_OUTPUT_ADDR_VEC_START
7521 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7522 #endif
7524 #ifdef ASM_OUTPUT_CASE_LABEL
7525 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7526 NEXT_INSN (lab));
7527 #else
7528 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7529 #endif
7531 for (idx = 0; idx < vlen; idx++)
7533 ASM_OUTPUT_ADDR_VEC_ELT
7534 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7537 #ifdef ASM_OUTPUT_ADDR_VEC_END
7538 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7539 #endif
7542 static void
7543 sparc_output_addr_diff_vec (rtx vec)
7545 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7546 rtx base = XEXP (XEXP (body, 0), 0);
7547 int idx, vlen = XVECLEN (body, 1);
7549 #ifdef ASM_OUTPUT_ADDR_VEC_START
7550 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7551 #endif
7553 #ifdef ASM_OUTPUT_CASE_LABEL
7554 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7555 NEXT_INSN (lab));
7556 #else
7557 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7558 #endif
7560 for (idx = 0; idx < vlen; idx++)
7562 ASM_OUTPUT_ADDR_DIFF_ELT
7563 (asm_out_file,
7564 body,
7565 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7566 CODE_LABEL_NUMBER (base));
7569 #ifdef ASM_OUTPUT_ADDR_VEC_END
7570 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7571 #endif
7574 static void
7575 sparc_output_deferred_case_vectors (void)
7577 rtx t;
7578 int align;
7580 if (sparc_addr_list == NULL_RTX
7581 && sparc_addr_diff_list == NULL_RTX)
7582 return;
7584 /* Align to cache line in the function's code section. */
7585 switch_to_section (current_function_section ());
7587 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7588 if (align > 0)
7589 ASM_OUTPUT_ALIGN (asm_out_file, align);
7591 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7592 sparc_output_addr_vec (XEXP (t, 0));
7593 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7594 sparc_output_addr_diff_vec (XEXP (t, 0));
7596 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7599 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7600 unknown. Return 1 if the high bits are zero, -1 if the register is
7601 sign extended. */
7603 sparc_check_64 (rtx x, rtx insn)
7605 /* If a register is set only once it is safe to ignore insns this
7606 code does not know how to handle. The loop will either recognize
7607 the single set and return the correct value or fail to recognize
7608 it and return 0. */
7609 int set_once = 0;
7610 rtx y = x;
7612 gcc_assert (GET_CODE (x) == REG);
7614 if (GET_MODE (x) == DImode)
7615 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7617 if (flag_expensive_optimizations
7618 && REG_N_SETS (REGNO (y)) == 1)
7619 set_once = 1;
7621 if (insn == 0)
7623 if (set_once)
7624 insn = get_last_insn_anywhere ();
7625 else
7626 return 0;
7629 while ((insn = PREV_INSN (insn)))
7631 switch (GET_CODE (insn))
7633 case JUMP_INSN:
7634 case NOTE:
7635 break;
7636 case CODE_LABEL:
7637 case CALL_INSN:
7638 default:
7639 if (! set_once)
7640 return 0;
7641 break;
7642 case INSN:
7644 rtx pat = PATTERN (insn);
7645 if (GET_CODE (pat) != SET)
7646 return 0;
7647 if (rtx_equal_p (x, SET_DEST (pat)))
7648 return set_extends (insn);
7649 if (y && rtx_equal_p (y, SET_DEST (pat)))
7650 return set_extends (insn);
7651 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7652 return 0;
7656 return 0;
7659 /* Returns assembly code to perform a DImode shift using
7660 a 64-bit global or out register on SPARC-V8+. */
7661 const char *
7662 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7664 static char asm_code[60];
7666 /* The scratch register is only required when the destination
7667 register is not a 64-bit global or out register. */
7668 if (which_alternative != 2)
7669 operands[3] = operands[0];
7671 /* We can only shift by constants <= 63. */
7672 if (GET_CODE (operands[2]) == CONST_INT)
7673 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7675 if (GET_CODE (operands[1]) == CONST_INT)
7677 output_asm_insn ("mov\t%1, %3", operands);
7679 else
7681 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7682 if (sparc_check_64 (operands[1], insn) <= 0)
7683 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7684 output_asm_insn ("or\t%L1, %3, %3", operands);
7687 strcpy(asm_code, opcode);
7689 if (which_alternative != 2)
7690 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7691 else
7692 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7695 /* Output rtl to increment the profiler label LABELNO
7696 for profiling a function entry. */
7698 void
7699 sparc_profile_hook (int labelno)
7701 char buf[32];
7702 rtx lab, fun;
7704 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7705 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7706 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7708 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7711 #ifdef OBJECT_FORMAT_ELF
7712 static void
7713 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7714 tree decl)
7716 if (flags & SECTION_MERGE)
7718 /* entsize cannot be expressed in this section attributes
7719 encoding style. */
7720 default_elf_asm_named_section (name, flags, decl);
7721 return;
7724 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7726 if (!(flags & SECTION_DEBUG))
7727 fputs (",#alloc", asm_out_file);
7728 if (flags & SECTION_WRITE)
7729 fputs (",#write", asm_out_file);
7730 if (flags & SECTION_TLS)
7731 fputs (",#tls", asm_out_file);
7732 if (flags & SECTION_CODE)
7733 fputs (",#execinstr", asm_out_file);
7735 /* ??? Handle SECTION_BSS. */
7737 fputc ('\n', asm_out_file);
7739 #endif /* OBJECT_FORMAT_ELF */
7741 /* We do not allow indirect calls to be optimized into sibling calls.
7743 We cannot use sibling calls when delayed branches are disabled
7744 because they will likely require the call delay slot to be filled.
7746 Also, on SPARC 32-bit we cannot emit a sibling call when the
7747 current function returns a structure. This is because the "unimp
7748 after call" convention would cause the callee to return to the
7749 wrong place. The generic code already disallows cases where the
7750 function being called returns a structure.
7752 It may seem strange how this last case could occur. Usually there
7753 is code after the call which jumps to epilogue code which dumps the
7754 return value into the struct return area. That ought to invalidate
7755 the sibling call right? Well, in the C++ case we can end up passing
7756 the pointer to the struct return area to a constructor (which returns
7757 void) and then nothing else happens. Such a sibling call would look
7758 valid without the added check here. */
7759 static bool
7760 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7762 return (decl
7763 && flag_delayed_branch
7764 && (TARGET_ARCH64 || ! current_function_returns_struct));
7767 /* libfunc renaming. */
7768 #include "config/gofast.h"
7770 static void
7771 sparc_init_libfuncs (void)
7773 if (TARGET_ARCH32)
7775 /* Use the subroutines that Sun's library provides for integer
7776 multiply and divide. The `*' prevents an underscore from
7777 being prepended by the compiler. .umul is a little faster
7778 than .mul. */
7779 set_optab_libfunc (smul_optab, SImode, "*.umul");
7780 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7781 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7782 set_optab_libfunc (smod_optab, SImode, "*.rem");
7783 set_optab_libfunc (umod_optab, SImode, "*.urem");
7785 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7786 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7787 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7788 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7789 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7790 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7792 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7793 is because with soft-float, the SFmode and DFmode sqrt
7794 instructions will be absent, and the compiler will notice and
7795 try to use the TFmode sqrt instruction for calls to the
7796 builtin function sqrt, but this fails. */
7797 if (TARGET_FPU)
7798 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7800 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7801 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7802 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7803 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7804 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7805 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7807 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7808 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7809 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7810 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7812 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7813 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7814 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7815 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7817 if (DITF_CONVERSION_LIBFUNCS)
7819 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7820 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7821 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7822 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7825 if (SUN_CONVERSION_LIBFUNCS)
7827 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7828 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7829 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7830 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7833 if (TARGET_ARCH64)
7835 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7836 do not exist in the library. Make sure the compiler does not
7837 emit calls to them by accident. (It should always use the
7838 hardware instructions.) */
7839 set_optab_libfunc (smul_optab, SImode, 0);
7840 set_optab_libfunc (sdiv_optab, SImode, 0);
7841 set_optab_libfunc (udiv_optab, SImode, 0);
7842 set_optab_libfunc (smod_optab, SImode, 0);
7843 set_optab_libfunc (umod_optab, SImode, 0);
7845 if (SUN_INTEGER_MULTIPLY_64)
7847 set_optab_libfunc (smul_optab, DImode, "__mul64");
7848 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7849 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7850 set_optab_libfunc (smod_optab, DImode, "__rem64");
7851 set_optab_libfunc (umod_optab, DImode, "__urem64");
7854 if (SUN_CONVERSION_LIBFUNCS)
7856 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7857 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7858 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7859 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7863 gofast_maybe_init_libfuncs ();
7866 #define def_builtin(NAME, CODE, TYPE) \
7867 lang_hooks.builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7868 NULL_TREE)
7870 /* Implement the TARGET_INIT_BUILTINS target hook.
7871 Create builtin functions for special SPARC instructions. */
7873 static void
7874 sparc_init_builtins (void)
7876 if (TARGET_VIS)
7877 sparc_vis_init_builtins ();
7880 /* Create builtin functions for VIS 1.0 instructions. */
7882 static void
7883 sparc_vis_init_builtins (void)
7885 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7886 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7887 tree v4hi = build_vector_type (intHI_type_node, 4);
7888 tree v2hi = build_vector_type (intHI_type_node, 2);
7889 tree v2si = build_vector_type (intSI_type_node, 2);
7891 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
7892 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
7893 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
7894 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
7895 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
7896 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
7897 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
7898 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
7899 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
7900 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
7901 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
7902 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
7903 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
7904 v8qi, v8qi,
7905 intDI_type_node, 0);
7906 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
7907 intDI_type_node,
7908 intDI_type_node, 0);
7909 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
7910 ptr_type_node,
7911 intSI_type_node, 0);
7912 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
7913 ptr_type_node,
7914 intDI_type_node, 0);
7916 /* Packing and expanding vectors. */
7917 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
7918 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
7919 v8qi_ftype_v2si_v8qi);
7920 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
7921 v2hi_ftype_v2si);
7922 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
7923 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
7924 v8qi_ftype_v4qi_v4qi);
7926 /* Multiplications. */
7927 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
7928 v4hi_ftype_v4qi_v4hi);
7929 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
7930 v4hi_ftype_v4qi_v2hi);
7931 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
7932 v4hi_ftype_v4qi_v2hi);
7933 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
7934 v4hi_ftype_v8qi_v4hi);
7935 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
7936 v4hi_ftype_v8qi_v4hi);
7937 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
7938 v2si_ftype_v4qi_v2hi);
7939 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
7940 v2si_ftype_v4qi_v2hi);
7942 /* Data aligning. */
7943 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
7944 v4hi_ftype_v4hi_v4hi);
7945 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
7946 v8qi_ftype_v8qi_v8qi);
7947 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
7948 v2si_ftype_v2si_v2si);
7949 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
7950 di_ftype_di_di);
7951 if (TARGET_ARCH64)
7952 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
7953 ptr_ftype_ptr_di);
7954 else
7955 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
7956 ptr_ftype_ptr_si);
7958 /* Pixel distance. */
7959 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
7960 di_ftype_v8qi_v8qi_di);
7963 /* Handle TARGET_EXPAND_BUILTIN target hook.
7964 Expand builtin functions for sparc intrinsics. */
7966 static rtx
7967 sparc_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
7968 enum machine_mode tmode, int ignore ATTRIBUTE_UNUSED)
7970 tree arglist;
7971 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7972 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
7973 rtx pat, op[4];
7974 enum machine_mode mode[4];
7975 int arg_count = 0;
7977 mode[arg_count] = tmode;
7979 if (target == 0
7980 || GET_MODE (target) != tmode
7981 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7982 op[arg_count] = gen_reg_rtx (tmode);
7983 else
7984 op[arg_count] = target;
7986 for (arglist = TREE_OPERAND (exp, 1); arglist;
7987 arglist = TREE_CHAIN (arglist))
7989 tree arg = TREE_VALUE (arglist);
7991 arg_count++;
7992 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
7993 op[arg_count] = expand_normal (arg);
7995 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
7996 mode[arg_count]))
7997 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8000 switch (arg_count)
8002 case 1:
8003 pat = GEN_FCN (icode) (op[0], op[1]);
8004 break;
8005 case 2:
8006 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8007 break;
8008 case 3:
8009 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8010 break;
8011 default:
8012 gcc_unreachable ();
8015 if (!pat)
8016 return NULL_RTX;
8018 emit_insn (pat);
8020 return op[0];
8023 static int
8024 sparc_vis_mul8x16 (int e8, int e16)
8026 return (e8 * e16 + 128) / 256;
8029 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8030 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8031 constants. A tree list with the results of the multiplications is returned,
8032 and each element in the list is of INNER_TYPE. */
8034 static tree
8035 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8037 tree n_elts = NULL_TREE;
8038 int scale;
8040 switch (fncode)
8042 case CODE_FOR_fmul8x16_vis:
8043 for (; elts0 && elts1;
8044 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8046 int val
8047 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8048 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8049 n_elts = tree_cons (NULL_TREE,
8050 build_int_cst (inner_type, val),
8051 n_elts);
8053 break;
8055 case CODE_FOR_fmul8x16au_vis:
8056 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8058 for (; elts0; elts0 = TREE_CHAIN (elts0))
8060 int val
8061 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8062 scale);
8063 n_elts = tree_cons (NULL_TREE,
8064 build_int_cst (inner_type, val),
8065 n_elts);
8067 break;
8069 case CODE_FOR_fmul8x16al_vis:
8070 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8072 for (; elts0; elts0 = TREE_CHAIN (elts0))
8074 int val
8075 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8076 scale);
8077 n_elts = tree_cons (NULL_TREE,
8078 build_int_cst (inner_type, val),
8079 n_elts);
8081 break;
8083 default:
8084 gcc_unreachable ();
8087 return nreverse (n_elts);
8090 /* Handle TARGET_FOLD_BUILTIN target hook.
8091 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8092 result of the function call is ignored. NULL_TREE is returned if the
8093 function could not be folded. */
8095 static tree
8096 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8098 tree arg0, arg1, arg2;
8099 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8102 if (ignore && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8103 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8104 return build_int_cst (rtype, 0);
8106 switch (DECL_FUNCTION_CODE (fndecl))
8108 case CODE_FOR_fexpand_vis:
8109 arg0 = TREE_VALUE (arglist);
8110 STRIP_NOPS (arg0);
8112 if (TREE_CODE (arg0) == VECTOR_CST)
8114 tree inner_type = TREE_TYPE (rtype);
8115 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8116 tree n_elts = NULL_TREE;
8118 for (; elts; elts = TREE_CHAIN (elts))
8120 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8121 n_elts = tree_cons (NULL_TREE,
8122 build_int_cst (inner_type, val),
8123 n_elts);
8125 return build_vector (rtype, nreverse (n_elts));
8127 break;
8129 case CODE_FOR_fmul8x16_vis:
8130 case CODE_FOR_fmul8x16au_vis:
8131 case CODE_FOR_fmul8x16al_vis:
8132 arg0 = TREE_VALUE (arglist);
8133 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8134 STRIP_NOPS (arg0);
8135 STRIP_NOPS (arg1);
8137 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8139 tree inner_type = TREE_TYPE (rtype);
8140 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8141 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8142 tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8143 inner_type, elts0, elts1);
8145 return build_vector (rtype, n_elts);
8147 break;
8149 case CODE_FOR_fpmerge_vis:
8150 arg0 = TREE_VALUE (arglist);
8151 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8152 STRIP_NOPS (arg0);
8153 STRIP_NOPS (arg1);
8155 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8157 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8158 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8159 tree n_elts = NULL_TREE;
8161 for (; elts0 && elts1;
8162 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8164 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8165 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8168 return build_vector (rtype, nreverse (n_elts));
8170 break;
8172 case CODE_FOR_pdist_vis:
8173 arg0 = TREE_VALUE (arglist);
8174 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8175 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8176 STRIP_NOPS (arg0);
8177 STRIP_NOPS (arg1);
8178 STRIP_NOPS (arg2);
8180 if (TREE_CODE (arg0) == VECTOR_CST
8181 && TREE_CODE (arg1) == VECTOR_CST
8182 && TREE_CODE (arg2) == INTEGER_CST)
8184 int overflow = 0;
8185 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8186 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8187 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8188 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8190 for (; elts0 && elts1;
8191 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8193 unsigned HOST_WIDE_INT
8194 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8195 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8196 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8197 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8199 unsigned HOST_WIDE_INT l;
8200 HOST_WIDE_INT h;
8202 overflow |= neg_double (low1, high1, &l, &h);
8203 overflow |= add_double (low0, high0, l, h, &l, &h);
8204 if (h < 0)
8205 overflow |= neg_double (l, h, &l, &h);
8207 overflow |= add_double (low, high, l, h, &low, &high);
8210 gcc_assert (overflow == 0);
8212 return build_int_cst_wide (rtype, low, high);
8215 default:
8216 break;
8218 return NULL_TREE;
8222 sparc_extra_constraint_check (rtx op, int c, int strict)
8224 int reload_ok_mem;
8226 if (TARGET_ARCH64
8227 && (c == 'T' || c == 'U'))
8228 return 0;
8230 switch (c)
8232 case 'Q':
8233 return fp_sethi_p (op);
8235 case 'R':
8236 return fp_mov_p (op);
8238 case 'S':
8239 return fp_high_losum_p (op);
8241 case 'U':
8242 if (! strict
8243 || (GET_CODE (op) == REG
8244 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8245 || reg_renumber[REGNO (op)] >= 0)))
8246 return register_ok_for_ldd (op);
8248 return 0;
8250 case 'W':
8251 case 'T':
8252 break;
8254 case 'Y':
8255 return const_zero_operand (op, GET_MODE (op));
8257 default:
8258 return 0;
8261 /* Our memory extra constraints have to emulate the
8262 behavior of 'm' and 'o' in order for reload to work
8263 correctly. */
8264 if (GET_CODE (op) == MEM)
8266 reload_ok_mem = 0;
8267 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8268 && (! strict
8269 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8270 reload_ok_mem = 1;
8272 else
8274 reload_ok_mem = (reload_in_progress
8275 && GET_CODE (op) == REG
8276 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8277 && reg_renumber [REGNO (op)] < 0);
8280 return reload_ok_mem;
8283 /* ??? This duplicates information provided to the compiler by the
8284 ??? scheduler description. Some day, teach genautomata to output
8285 ??? the latencies and then CSE will just use that. */
8287 static bool
8288 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8290 enum machine_mode mode = GET_MODE (x);
8291 bool float_mode_p = FLOAT_MODE_P (mode);
8293 switch (code)
8295 case CONST_INT:
8296 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8298 *total = 0;
8299 return true;
8301 /* FALLTHRU */
8303 case HIGH:
8304 *total = 2;
8305 return true;
8307 case CONST:
8308 case LABEL_REF:
8309 case SYMBOL_REF:
8310 *total = 4;
8311 return true;
8313 case CONST_DOUBLE:
8314 if (GET_MODE (x) == VOIDmode
8315 && ((CONST_DOUBLE_HIGH (x) == 0
8316 && CONST_DOUBLE_LOW (x) < 0x1000)
8317 || (CONST_DOUBLE_HIGH (x) == -1
8318 && CONST_DOUBLE_LOW (x) < 0
8319 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8320 *total = 0;
8321 else
8322 *total = 8;
8323 return true;
8325 case MEM:
8326 /* If outer-code was a sign or zero extension, a cost
8327 of COSTS_N_INSNS (1) was already added in. This is
8328 why we are subtracting it back out. */
8329 if (outer_code == ZERO_EXTEND)
8331 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8333 else if (outer_code == SIGN_EXTEND)
8335 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8337 else if (float_mode_p)
8339 *total = sparc_costs->float_load;
8341 else
8343 *total = sparc_costs->int_load;
8346 return true;
8348 case PLUS:
8349 case MINUS:
8350 if (float_mode_p)
8351 *total = sparc_costs->float_plusminus;
8352 else
8353 *total = COSTS_N_INSNS (1);
8354 return false;
8356 case MULT:
8357 if (float_mode_p)
8358 *total = sparc_costs->float_mul;
8359 else if (! TARGET_HARD_MUL)
8360 *total = COSTS_N_INSNS (25);
8361 else
8363 int bit_cost;
8365 bit_cost = 0;
8366 if (sparc_costs->int_mul_bit_factor)
8368 int nbits;
8370 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8372 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8373 for (nbits = 0; value != 0; value &= value - 1)
8374 nbits++;
8376 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8377 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8379 rtx x1 = XEXP (x, 1);
8380 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8381 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8383 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8384 nbits++;
8385 for (; value2 != 0; value2 &= value2 - 1)
8386 nbits++;
8388 else
8389 nbits = 7;
8391 if (nbits < 3)
8392 nbits = 3;
8393 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8394 bit_cost = COSTS_N_INSNS (bit_cost);
8397 if (mode == DImode)
8398 *total = sparc_costs->int_mulX + bit_cost;
8399 else
8400 *total = sparc_costs->int_mul + bit_cost;
8402 return false;
8404 case ASHIFT:
8405 case ASHIFTRT:
8406 case LSHIFTRT:
8407 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8408 return false;
8410 case DIV:
8411 case UDIV:
8412 case MOD:
8413 case UMOD:
8414 if (float_mode_p)
8416 if (mode == DFmode)
8417 *total = sparc_costs->float_div_df;
8418 else
8419 *total = sparc_costs->float_div_sf;
8421 else
8423 if (mode == DImode)
8424 *total = sparc_costs->int_divX;
8425 else
8426 *total = sparc_costs->int_div;
8428 return false;
8430 case NEG:
8431 if (! float_mode_p)
8433 *total = COSTS_N_INSNS (1);
8434 return false;
8436 /* FALLTHRU */
8438 case ABS:
8439 case FLOAT:
8440 case UNSIGNED_FLOAT:
8441 case FIX:
8442 case UNSIGNED_FIX:
8443 case FLOAT_EXTEND:
8444 case FLOAT_TRUNCATE:
8445 *total = sparc_costs->float_move;
8446 return false;
8448 case SQRT:
8449 if (mode == DFmode)
8450 *total = sparc_costs->float_sqrt_df;
8451 else
8452 *total = sparc_costs->float_sqrt_sf;
8453 return false;
8455 case COMPARE:
8456 if (float_mode_p)
8457 *total = sparc_costs->float_cmp;
8458 else
8459 *total = COSTS_N_INSNS (1);
8460 return false;
8462 case IF_THEN_ELSE:
8463 if (float_mode_p)
8464 *total = sparc_costs->float_cmove;
8465 else
8466 *total = sparc_costs->int_cmove;
8467 return false;
8469 case IOR:
8470 /* Handle the NAND vector patterns. */
8471 if (sparc_vector_mode_supported_p (GET_MODE (x))
8472 && GET_CODE (XEXP (x, 0)) == NOT
8473 && GET_CODE (XEXP (x, 1)) == NOT)
8475 *total = COSTS_N_INSNS (1);
8476 return true;
8478 else
8479 return false;
8481 default:
8482 return false;
8486 /* Emit the sequence of insns SEQ while preserving the registers. */
8488 static void
8489 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8491 /* STACK_BOUNDARY guarantees that this is a 2-word slot. */
8492 rtx slot = gen_rtx_MEM (word_mode,
8493 plus_constant (stack_pointer_rtx, SPARC_STACK_BIAS));
8495 emit_insn (gen_stack_pointer_dec (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT)));
8496 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8497 if (reg2)
8498 emit_insn (gen_rtx_SET (VOIDmode,
8499 adjust_address (slot, word_mode, UNITS_PER_WORD),
8500 reg2));
8501 emit_insn (seq);
8502 if (reg2)
8503 emit_insn (gen_rtx_SET (VOIDmode,
8504 reg2,
8505 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8506 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8507 emit_insn (gen_stack_pointer_inc (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT)));
8510 /* Output the assembler code for a thunk function. THUNK_DECL is the
8511 declaration for the thunk function itself, FUNCTION is the decl for
8512 the target function. DELTA is an immediate constant offset to be
8513 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8514 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8516 static void
8517 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8518 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8519 tree function)
8521 rtx this, insn, funexp;
8522 unsigned int int_arg_first;
8524 reload_completed = 1;
8525 epilogue_completed = 1;
8526 no_new_pseudos = 1;
8527 reset_block_changes ();
8529 emit_note (NOTE_INSN_PROLOGUE_END);
8531 if (flag_delayed_branch)
8533 /* We will emit a regular sibcall below, so we need to instruct
8534 output_sibcall that we are in a leaf function. */
8535 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8537 /* This will cause final.c to invoke leaf_renumber_regs so we
8538 must behave as if we were in a not-yet-leafified function. */
8539 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8541 else
8543 /* We will emit the sibcall manually below, so we will need to
8544 manually spill non-leaf registers. */
8545 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8547 /* We really are in a leaf function. */
8548 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8551 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8552 returns a structure, the structure return pointer is there instead. */
8553 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8554 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8555 else
8556 this = gen_rtx_REG (Pmode, int_arg_first);
8558 /* Add DELTA. When possible use a plain add, otherwise load it into
8559 a register first. */
8560 if (delta)
8562 rtx delta_rtx = GEN_INT (delta);
8564 if (! SPARC_SIMM13_P (delta))
8566 rtx scratch = gen_rtx_REG (Pmode, 1);
8567 emit_move_insn (scratch, delta_rtx);
8568 delta_rtx = scratch;
8571 /* THIS += DELTA. */
8572 emit_insn (gen_add2_insn (this, delta_rtx));
8575 /* Add the word at address (*THIS + VCALL_OFFSET). */
8576 if (vcall_offset)
8578 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8579 rtx scratch = gen_rtx_REG (Pmode, 1);
8581 gcc_assert (vcall_offset < 0);
8583 /* SCRATCH = *THIS. */
8584 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8586 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8587 may not have any available scratch register at this point. */
8588 if (SPARC_SIMM13_P (vcall_offset))
8590 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8591 else if (! fixed_regs[5]
8592 /* The below sequence is made up of at least 2 insns,
8593 while the default method may need only one. */
8594 && vcall_offset < -8192)
8596 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8597 emit_move_insn (scratch2, vcall_offset_rtx);
8598 vcall_offset_rtx = scratch2;
8600 else
8602 rtx increment = GEN_INT (-4096);
8604 /* VCALL_OFFSET is a negative number whose typical range can be
8605 estimated as -32768..0 in 32-bit mode. In almost all cases
8606 it is therefore cheaper to emit multiple add insns than
8607 spilling and loading the constant into a register (at least
8608 6 insns). */
8609 while (! SPARC_SIMM13_P (vcall_offset))
8611 emit_insn (gen_add2_insn (scratch, increment));
8612 vcall_offset += 4096;
8614 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8617 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8618 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8619 gen_rtx_PLUS (Pmode,
8620 scratch,
8621 vcall_offset_rtx)));
8623 /* THIS += *(*THIS + VCALL_OFFSET). */
8624 emit_insn (gen_add2_insn (this, scratch));
8627 /* Generate a tail call to the target function. */
8628 if (! TREE_USED (function))
8630 assemble_external (function);
8631 TREE_USED (function) = 1;
8633 funexp = XEXP (DECL_RTL (function), 0);
8635 if (flag_delayed_branch)
8637 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8638 insn = emit_call_insn (gen_sibcall (funexp));
8639 SIBLING_CALL_P (insn) = 1;
8641 else
8643 /* The hoops we have to jump through in order to generate a sibcall
8644 without using delay slots... */
8645 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8647 if (flag_pic)
8649 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8650 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8651 start_sequence ();
8652 /* Delay emitting the PIC helper function because it needs to
8653 change the section and we are emitting assembly code. */
8654 load_pic_register (true); /* clobbers %o7 */
8655 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8656 seq = get_insns ();
8657 end_sequence ();
8658 emit_and_preserve (seq, spill_reg, spill_reg2);
8660 else if (TARGET_ARCH32)
8662 emit_insn (gen_rtx_SET (VOIDmode,
8663 scratch,
8664 gen_rtx_HIGH (SImode, funexp)));
8665 emit_insn (gen_rtx_SET (VOIDmode,
8666 scratch,
8667 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8669 else /* TARGET_ARCH64 */
8671 switch (sparc_cmodel)
8673 case CM_MEDLOW:
8674 case CM_MEDMID:
8675 /* The destination can serve as a temporary. */
8676 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8677 break;
8679 case CM_MEDANY:
8680 case CM_EMBMEDANY:
8681 /* The destination cannot serve as a temporary. */
8682 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8683 start_sequence ();
8684 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8685 seq = get_insns ();
8686 end_sequence ();
8687 emit_and_preserve (seq, spill_reg, 0);
8688 break;
8690 default:
8691 gcc_unreachable ();
8695 emit_jump_insn (gen_indirect_jump (scratch));
8698 emit_barrier ();
8700 /* Run just enough of rest_of_compilation to get the insns emitted.
8701 There's not really enough bulk here to make other passes such as
8702 instruction scheduling worth while. Note that use_thunk calls
8703 assemble_start_function and assemble_end_function. */
8704 insn = get_insns ();
8705 insn_locators_initialize ();
8706 shorten_branches (insn);
8707 final_start_function (insn, file, 1);
8708 final (insn, file, 1);
8709 final_end_function ();
8711 reload_completed = 0;
8712 epilogue_completed = 0;
8713 no_new_pseudos = 0;
8716 /* Return true if sparc_output_mi_thunk would be able to output the
8717 assembler code for the thunk function specified by the arguments
8718 it is passed, and false otherwise. */
8719 static bool
8720 sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8721 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8722 HOST_WIDE_INT vcall_offset,
8723 tree function ATTRIBUTE_UNUSED)
8725 /* Bound the loop used in the default method above. */
8726 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8729 /* How to allocate a 'struct machine_function'. */
8731 static struct machine_function *
8732 sparc_init_machine_status (void)
8734 return ggc_alloc_cleared (sizeof (struct machine_function));
8737 /* Locate some local-dynamic symbol still in use by this function
8738 so that we can print its name in local-dynamic base patterns. */
8740 static const char *
8741 get_some_local_dynamic_name (void)
8743 rtx insn;
8745 if (cfun->machine->some_ld_name)
8746 return cfun->machine->some_ld_name;
8748 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8749 if (INSN_P (insn)
8750 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8751 return cfun->machine->some_ld_name;
8753 gcc_unreachable ();
8756 static int
8757 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8759 rtx x = *px;
8761 if (x
8762 && GET_CODE (x) == SYMBOL_REF
8763 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8765 cfun->machine->some_ld_name = XSTR (x, 0);
8766 return 1;
8769 return 0;
8772 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8773 This is called from dwarf2out.c to emit call frame instructions
8774 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8775 static void
8776 sparc_dwarf_handle_frame_unspec (const char *label,
8777 rtx pattern ATTRIBUTE_UNUSED,
8778 int index ATTRIBUTE_UNUSED)
8780 gcc_assert (index == UNSPECV_SAVEW);
8781 dwarf2out_window_save (label);
8784 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8785 We need to emit DTP-relative relocations. */
8787 static void
8788 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8790 switch (size)
8792 case 4:
8793 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8794 break;
8795 case 8:
8796 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8797 break;
8798 default:
8799 gcc_unreachable ();
8801 output_addr_const (file, x);
8802 fputs (")", file);
8805 /* Do whatever processing is required at the end of a file. */
8807 static void
8808 sparc_file_end (void)
8810 /* If we haven't emitted the special PIC helper function, do so now. */
8811 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8812 emit_pic_helper ();
8814 if (NEED_INDICATE_EXEC_STACK)
8815 file_end_indicate_exec_stack ();
8818 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8819 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
8821 static const char *
8822 sparc_mangle_fundamental_type (tree type)
8824 if (!TARGET_64BIT
8825 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8826 && TARGET_LONG_DOUBLE_128)
8827 return "g";
8829 /* For all other types, use normal C++ mangling. */
8830 return NULL;
8832 #endif
8834 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8835 compare and swap on the word containing the byte or half-word. */
8837 void
8838 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8840 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8841 rtx addr = gen_reg_rtx (Pmode);
8842 rtx off = gen_reg_rtx (SImode);
8843 rtx oldv = gen_reg_rtx (SImode);
8844 rtx newv = gen_reg_rtx (SImode);
8845 rtx oldvalue = gen_reg_rtx (SImode);
8846 rtx newvalue = gen_reg_rtx (SImode);
8847 rtx res = gen_reg_rtx (SImode);
8848 rtx resv = gen_reg_rtx (SImode);
8849 rtx memsi, val, mask, end_label, loop_label, cc;
8851 emit_insn (gen_rtx_SET (VOIDmode, addr,
8852 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8854 if (Pmode != SImode)
8855 addr1 = gen_lowpart (SImode, addr1);
8856 emit_insn (gen_rtx_SET (VOIDmode, off,
8857 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8859 memsi = gen_rtx_MEM (SImode, addr);
8860 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8861 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8863 val = force_reg (SImode, memsi);
8865 emit_insn (gen_rtx_SET (VOIDmode, off,
8866 gen_rtx_XOR (SImode, off,
8867 GEN_INT (GET_MODE (mem) == QImode
8868 ? 3 : 2))));
8870 emit_insn (gen_rtx_SET (VOIDmode, off,
8871 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8873 if (GET_MODE (mem) == QImode)
8874 mask = force_reg (SImode, GEN_INT (0xff));
8875 else
8876 mask = force_reg (SImode, GEN_INT (0xffff));
8878 emit_insn (gen_rtx_SET (VOIDmode, mask,
8879 gen_rtx_ASHIFT (SImode, mask, off)));
8881 emit_insn (gen_rtx_SET (VOIDmode, val,
8882 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8883 val)));
8885 oldval = gen_lowpart (SImode, oldval);
8886 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8887 gen_rtx_ASHIFT (SImode, oldval, off)));
8889 newval = gen_lowpart_common (SImode, newval);
8890 emit_insn (gen_rtx_SET (VOIDmode, newv,
8891 gen_rtx_ASHIFT (SImode, newval, off)));
8893 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8894 gen_rtx_AND (SImode, oldv, mask)));
8896 emit_insn (gen_rtx_SET (VOIDmode, newv,
8897 gen_rtx_AND (SImode, newv, mask)));
8899 end_label = gen_label_rtx ();
8900 loop_label = gen_label_rtx ();
8901 emit_label (loop_label);
8903 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
8904 gen_rtx_IOR (SImode, oldv, val)));
8906 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
8907 gen_rtx_IOR (SImode, newv, val)));
8909 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
8911 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
8913 emit_insn (gen_rtx_SET (VOIDmode, resv,
8914 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8915 res)));
8917 sparc_compare_op0 = resv;
8918 sparc_compare_op1 = val;
8919 cc = gen_compare_reg (NE);
8921 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
8923 sparc_compare_emitted = cc;
8924 emit_jump_insn (gen_bne (loop_label));
8926 emit_label (end_label);
8928 emit_insn (gen_rtx_SET (VOIDmode, res,
8929 gen_rtx_AND (SImode, res, mask)));
8931 emit_insn (gen_rtx_SET (VOIDmode, res,
8932 gen_rtx_LSHIFTRT (SImode, res, off)));
8934 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
8937 #include "gt-sparc.h"