Merged with mainline at revision 128810.
[official-gcc.git] / gcc / config / sparc / sparc.c
blob3ad1a7f3cdf5c1e2eedc340b02929705792b31e7
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "tm_p.h"
47 #include "debug.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
51 #include "tree-gimple.h"
52 #include "langhooks.h"
53 #include "params.h"
54 #include "df.h"
56 /* Processor costs */
57 static const
58 struct processor_costs cypress_costs = {
59 COSTS_N_INSNS (2), /* int load */
60 COSTS_N_INSNS (2), /* int signed load */
61 COSTS_N_INSNS (2), /* int zeroed load */
62 COSTS_N_INSNS (2), /* float load */
63 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
64 COSTS_N_INSNS (5), /* fadd, fsub */
65 COSTS_N_INSNS (1), /* fcmp */
66 COSTS_N_INSNS (1), /* fmov, fmovr */
67 COSTS_N_INSNS (7), /* fmul */
68 COSTS_N_INSNS (37), /* fdivs */
69 COSTS_N_INSNS (37), /* fdivd */
70 COSTS_N_INSNS (63), /* fsqrts */
71 COSTS_N_INSNS (63), /* fsqrtd */
72 COSTS_N_INSNS (1), /* imul */
73 COSTS_N_INSNS (1), /* imulX */
74 0, /* imul bit factor */
75 COSTS_N_INSNS (1), /* idiv */
76 COSTS_N_INSNS (1), /* idivX */
77 COSTS_N_INSNS (1), /* movcc/movr */
78 0, /* shift penalty */
81 static const
82 struct processor_costs supersparc_costs = {
83 COSTS_N_INSNS (1), /* int load */
84 COSTS_N_INSNS (1), /* int signed load */
85 COSTS_N_INSNS (1), /* int zeroed load */
86 COSTS_N_INSNS (0), /* float load */
87 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
88 COSTS_N_INSNS (3), /* fadd, fsub */
89 COSTS_N_INSNS (3), /* fcmp */
90 COSTS_N_INSNS (1), /* fmov, fmovr */
91 COSTS_N_INSNS (3), /* fmul */
92 COSTS_N_INSNS (6), /* fdivs */
93 COSTS_N_INSNS (9), /* fdivd */
94 COSTS_N_INSNS (12), /* fsqrts */
95 COSTS_N_INSNS (12), /* fsqrtd */
96 COSTS_N_INSNS (4), /* imul */
97 COSTS_N_INSNS (4), /* imulX */
98 0, /* imul bit factor */
99 COSTS_N_INSNS (4), /* idiv */
100 COSTS_N_INSNS (4), /* idivX */
101 COSTS_N_INSNS (1), /* movcc/movr */
102 1, /* shift penalty */
105 static const
106 struct processor_costs hypersparc_costs = {
107 COSTS_N_INSNS (1), /* int load */
108 COSTS_N_INSNS (1), /* int signed load */
109 COSTS_N_INSNS (1), /* int zeroed load */
110 COSTS_N_INSNS (1), /* float load */
111 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
112 COSTS_N_INSNS (1), /* fadd, fsub */
113 COSTS_N_INSNS (1), /* fcmp */
114 COSTS_N_INSNS (1), /* fmov, fmovr */
115 COSTS_N_INSNS (1), /* fmul */
116 COSTS_N_INSNS (8), /* fdivs */
117 COSTS_N_INSNS (12), /* fdivd */
118 COSTS_N_INSNS (17), /* fsqrts */
119 COSTS_N_INSNS (17), /* fsqrtd */
120 COSTS_N_INSNS (17), /* imul */
121 COSTS_N_INSNS (17), /* imulX */
122 0, /* imul bit factor */
123 COSTS_N_INSNS (17), /* idiv */
124 COSTS_N_INSNS (17), /* idivX */
125 COSTS_N_INSNS (1), /* movcc/movr */
126 0, /* shift penalty */
129 static const
130 struct processor_costs sparclet_costs = {
131 COSTS_N_INSNS (3), /* int load */
132 COSTS_N_INSNS (3), /* int signed load */
133 COSTS_N_INSNS (1), /* int zeroed load */
134 COSTS_N_INSNS (1), /* float load */
135 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
136 COSTS_N_INSNS (1), /* fadd, fsub */
137 COSTS_N_INSNS (1), /* fcmp */
138 COSTS_N_INSNS (1), /* fmov, fmovr */
139 COSTS_N_INSNS (1), /* fmul */
140 COSTS_N_INSNS (1), /* fdivs */
141 COSTS_N_INSNS (1), /* fdivd */
142 COSTS_N_INSNS (1), /* fsqrts */
143 COSTS_N_INSNS (1), /* fsqrtd */
144 COSTS_N_INSNS (5), /* imul */
145 COSTS_N_INSNS (5), /* imulX */
146 0, /* imul bit factor */
147 COSTS_N_INSNS (5), /* idiv */
148 COSTS_N_INSNS (5), /* idivX */
149 COSTS_N_INSNS (1), /* movcc/movr */
150 0, /* shift penalty */
153 static const
154 struct processor_costs ultrasparc_costs = {
155 COSTS_N_INSNS (2), /* int load */
156 COSTS_N_INSNS (3), /* int signed load */
157 COSTS_N_INSNS (2), /* int zeroed load */
158 COSTS_N_INSNS (2), /* float load */
159 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
160 COSTS_N_INSNS (4), /* fadd, fsub */
161 COSTS_N_INSNS (1), /* fcmp */
162 COSTS_N_INSNS (2), /* fmov, fmovr */
163 COSTS_N_INSNS (4), /* fmul */
164 COSTS_N_INSNS (13), /* fdivs */
165 COSTS_N_INSNS (23), /* fdivd */
166 COSTS_N_INSNS (13), /* fsqrts */
167 COSTS_N_INSNS (23), /* fsqrtd */
168 COSTS_N_INSNS (4), /* imul */
169 COSTS_N_INSNS (4), /* imulX */
170 2, /* imul bit factor */
171 COSTS_N_INSNS (37), /* idiv */
172 COSTS_N_INSNS (68), /* idivX */
173 COSTS_N_INSNS (2), /* movcc/movr */
174 2, /* shift penalty */
177 static const
178 struct processor_costs ultrasparc3_costs = {
179 COSTS_N_INSNS (2), /* int load */
180 COSTS_N_INSNS (3), /* int signed load */
181 COSTS_N_INSNS (3), /* int zeroed load */
182 COSTS_N_INSNS (2), /* float load */
183 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
184 COSTS_N_INSNS (4), /* fadd, fsub */
185 COSTS_N_INSNS (5), /* fcmp */
186 COSTS_N_INSNS (3), /* fmov, fmovr */
187 COSTS_N_INSNS (4), /* fmul */
188 COSTS_N_INSNS (17), /* fdivs */
189 COSTS_N_INSNS (20), /* fdivd */
190 COSTS_N_INSNS (20), /* fsqrts */
191 COSTS_N_INSNS (29), /* fsqrtd */
192 COSTS_N_INSNS (6), /* imul */
193 COSTS_N_INSNS (6), /* imulX */
194 0, /* imul bit factor */
195 COSTS_N_INSNS (40), /* idiv */
196 COSTS_N_INSNS (71), /* idivX */
197 COSTS_N_INSNS (2), /* movcc/movr */
198 0, /* shift penalty */
201 static const
202 struct processor_costs niagara_costs = {
203 COSTS_N_INSNS (3), /* int load */
204 COSTS_N_INSNS (3), /* int signed load */
205 COSTS_N_INSNS (3), /* int zeroed load */
206 COSTS_N_INSNS (9), /* float load */
207 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
208 COSTS_N_INSNS (8), /* fadd, fsub */
209 COSTS_N_INSNS (26), /* fcmp */
210 COSTS_N_INSNS (8), /* fmov, fmovr */
211 COSTS_N_INSNS (29), /* fmul */
212 COSTS_N_INSNS (54), /* fdivs */
213 COSTS_N_INSNS (83), /* fdivd */
214 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
215 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
216 COSTS_N_INSNS (11), /* imul */
217 COSTS_N_INSNS (11), /* imulX */
218 0, /* imul bit factor */
219 COSTS_N_INSNS (72), /* idiv */
220 COSTS_N_INSNS (72), /* idivX */
221 COSTS_N_INSNS (1), /* movcc/movr */
222 0, /* shift penalty */
225 const struct processor_costs *sparc_costs = &cypress_costs;
227 #ifdef HAVE_AS_RELAX_OPTION
228 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
229 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
230 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
231 somebody does not branch between the sethi and jmp. */
232 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
233 #else
234 #define LEAF_SIBCALL_SLOT_RESERVED_P \
235 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
236 #endif
238 /* Global variables for machine-dependent things. */
240 /* Size of frame. Need to know this to emit return insns from leaf procedures.
241 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
242 reload pass. This is important as the value is later used for scheduling
243 (to see what can go in a delay slot).
244 APPARENT_FSIZE is the size of the stack less the register save area and less
245 the outgoing argument area. It is used when saving call preserved regs. */
246 static HOST_WIDE_INT apparent_fsize;
247 static HOST_WIDE_INT actual_fsize;
249 /* Number of live general or floating point registers needed to be
250 saved (as 4-byte quantities). */
251 static int num_gfregs;
253 /* The alias set for prologue/epilogue register save/restore. */
254 static GTY(()) alias_set_type sparc_sr_alias_set;
256 /* The alias set for the structure return value. */
257 static GTY(()) alias_set_type struct_value_alias_set;
259 /* Save the operands last given to a compare for use when we
260 generate a scc or bcc insn. */
261 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
263 /* Vector to say how input registers are mapped to output registers.
264 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
265 eliminate it. You must use -fomit-frame-pointer to get that. */
266 char leaf_reg_remap[] =
267 { 0, 1, 2, 3, 4, 5, 6, 7,
268 -1, -1, -1, -1, -1, -1, 14, -1,
269 -1, -1, -1, -1, -1, -1, -1, -1,
270 8, 9, 10, 11, 12, 13, -1, 15,
272 32, 33, 34, 35, 36, 37, 38, 39,
273 40, 41, 42, 43, 44, 45, 46, 47,
274 48, 49, 50, 51, 52, 53, 54, 55,
275 56, 57, 58, 59, 60, 61, 62, 63,
276 64, 65, 66, 67, 68, 69, 70, 71,
277 72, 73, 74, 75, 76, 77, 78, 79,
278 80, 81, 82, 83, 84, 85, 86, 87,
279 88, 89, 90, 91, 92, 93, 94, 95,
280 96, 97, 98, 99, 100};
282 /* Vector, indexed by hard register number, which contains 1
283 for a register that is allowable in a candidate for leaf
284 function treatment. */
285 char sparc_leaf_regs[] =
286 { 1, 1, 1, 1, 1, 1, 1, 1,
287 0, 0, 0, 0, 0, 0, 1, 0,
288 0, 0, 0, 0, 0, 0, 0, 0,
289 1, 1, 1, 1, 1, 1, 0, 1,
290 1, 1, 1, 1, 1, 1, 1, 1,
291 1, 1, 1, 1, 1, 1, 1, 1,
292 1, 1, 1, 1, 1, 1, 1, 1,
293 1, 1, 1, 1, 1, 1, 1, 1,
294 1, 1, 1, 1, 1, 1, 1, 1,
295 1, 1, 1, 1, 1, 1, 1, 1,
296 1, 1, 1, 1, 1, 1, 1, 1,
297 1, 1, 1, 1, 1, 1, 1, 1,
298 1, 1, 1, 1, 1};
300 struct machine_function GTY(())
302 /* Some local-dynamic TLS symbol name. */
303 const char *some_ld_name;
305 /* True if the current function is leaf and uses only leaf regs,
306 so that the SPARC leaf function optimization can be applied.
307 Private version of current_function_uses_only_leaf_regs, see
308 sparc_expand_prologue for the rationale. */
309 int leaf_function_p;
311 /* True if the data calculated by sparc_expand_prologue are valid. */
312 bool prologue_data_valid_p;
315 #define sparc_leaf_function_p cfun->machine->leaf_function_p
316 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
318 /* Register we pretend to think the frame pointer is allocated to.
319 Normally, this is %fp, but if we are in a leaf procedure, this
320 is %sp+"something". We record "something" separately as it may
321 be too big for reg+constant addressing. */
322 static rtx frame_base_reg;
323 static HOST_WIDE_INT frame_base_offset;
325 /* 1 if the next opcode is to be specially indented. */
326 int sparc_indent_opcode = 0;
328 static bool sparc_handle_option (size_t, const char *, int);
329 static void sparc_init_modes (void);
330 static void scan_record_type (tree, int *, int *, int *);
331 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
332 tree, int, int, int *, int *);
334 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
335 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
337 static void sparc_output_addr_vec (rtx);
338 static void sparc_output_addr_diff_vec (rtx);
339 static void sparc_output_deferred_case_vectors (void);
340 static rtx sparc_builtin_saveregs (void);
341 static int epilogue_renumber (rtx *, int);
342 static bool sparc_assemble_integer (rtx, unsigned int, int);
343 static int set_extends (rtx);
344 static void emit_pic_helper (void);
345 static void load_pic_register (bool);
346 static int save_or_restore_regs (int, int, rtx, int, int);
347 static void emit_save_or_restore_regs (int);
348 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
349 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
350 #ifdef OBJECT_FORMAT_ELF
351 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
352 #endif
354 static int sparc_adjust_cost (rtx, rtx, rtx, int);
355 static int sparc_issue_rate (void);
356 static void sparc_sched_init (FILE *, int, int);
357 static int sparc_use_sched_lookahead (void);
359 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
360 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
361 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
362 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
363 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
365 static bool sparc_function_ok_for_sibcall (tree, tree);
366 static void sparc_init_libfuncs (void);
367 static void sparc_init_builtins (void);
368 static void sparc_vis_init_builtins (void);
369 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
370 static tree sparc_fold_builtin (tree, tree, bool);
371 static int sparc_vis_mul8x16 (int, int);
372 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
373 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
374 HOST_WIDE_INT, tree);
375 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
376 HOST_WIDE_INT, const_tree);
377 static struct machine_function * sparc_init_machine_status (void);
378 static bool sparc_cannot_force_const_mem (rtx);
379 static rtx sparc_tls_get_addr (void);
380 static rtx sparc_tls_got (void);
381 static const char *get_some_local_dynamic_name (void);
382 static int get_some_local_dynamic_name_1 (rtx *, void *);
383 static bool sparc_rtx_costs (rtx, int, int, int *);
384 static bool sparc_promote_prototypes (const_tree);
385 static rtx sparc_struct_value_rtx (tree, int);
386 static bool sparc_return_in_memory (const_tree, const_tree);
387 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
388 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
389 static bool sparc_vector_mode_supported_p (enum machine_mode);
390 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
391 enum machine_mode, const_tree, bool);
392 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
393 enum machine_mode, tree, bool);
394 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
395 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
396 static void sparc_file_end (void);
397 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
398 static const char *sparc_mangle_type (const_tree);
399 #endif
400 #ifdef SUBTARGET_ATTRIBUTE_TABLE
401 const struct attribute_spec sparc_attribute_table[];
402 #endif
404 /* Option handling. */
406 /* Parsed value. */
407 enum cmodel sparc_cmodel;
409 char sparc_hard_reg_printed[8];
411 struct sparc_cpu_select sparc_select[] =
413 /* switch name, tune arch */
414 { (char *)0, "default", 1, 1 },
415 { (char *)0, "-mcpu=", 1, 1 },
416 { (char *)0, "-mtune=", 1, 0 },
417 { 0, 0, 0, 0 }
420 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
421 enum processor_type sparc_cpu;
423 /* Whether\fan FPU option was specified. */
424 static bool fpu_option_set = false;
426 /* Initialize the GCC target structure. */
428 /* The sparc default is to use .half rather than .short for aligned
429 HI objects. Use .word instead of .long on non-ELF systems. */
430 #undef TARGET_ASM_ALIGNED_HI_OP
431 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
432 #ifndef OBJECT_FORMAT_ELF
433 #undef TARGET_ASM_ALIGNED_SI_OP
434 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
435 #endif
437 #undef TARGET_ASM_UNALIGNED_HI_OP
438 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
439 #undef TARGET_ASM_UNALIGNED_SI_OP
440 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
441 #undef TARGET_ASM_UNALIGNED_DI_OP
442 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
444 /* The target hook has to handle DI-mode values. */
445 #undef TARGET_ASM_INTEGER
446 #define TARGET_ASM_INTEGER sparc_assemble_integer
448 #undef TARGET_ASM_FUNCTION_PROLOGUE
449 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
450 #undef TARGET_ASM_FUNCTION_EPILOGUE
451 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
453 #undef TARGET_SCHED_ADJUST_COST
454 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
455 #undef TARGET_SCHED_ISSUE_RATE
456 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
457 #undef TARGET_SCHED_INIT
458 #define TARGET_SCHED_INIT sparc_sched_init
459 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
460 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
462 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
463 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
465 #undef TARGET_INIT_LIBFUNCS
466 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
467 #undef TARGET_INIT_BUILTINS
468 #define TARGET_INIT_BUILTINS sparc_init_builtins
470 #undef TARGET_EXPAND_BUILTIN
471 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
472 #undef TARGET_FOLD_BUILTIN
473 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
475 #if TARGET_TLS
476 #undef TARGET_HAVE_TLS
477 #define TARGET_HAVE_TLS true
478 #endif
480 #undef TARGET_CANNOT_FORCE_CONST_MEM
481 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
483 #undef TARGET_ASM_OUTPUT_MI_THUNK
484 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
485 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
486 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
488 #undef TARGET_RTX_COSTS
489 #define TARGET_RTX_COSTS sparc_rtx_costs
490 #undef TARGET_ADDRESS_COST
491 #define TARGET_ADDRESS_COST hook_int_rtx_0
493 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
494 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
495 test for this value. */
496 #undef TARGET_PROMOTE_FUNCTION_ARGS
497 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
499 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
500 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
501 test for this value. */
502 #undef TARGET_PROMOTE_FUNCTION_RETURN
503 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
505 #undef TARGET_PROMOTE_PROTOTYPES
506 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
508 #undef TARGET_STRUCT_VALUE_RTX
509 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
510 #undef TARGET_RETURN_IN_MEMORY
511 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
512 #undef TARGET_MUST_PASS_IN_STACK
513 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
514 #undef TARGET_PASS_BY_REFERENCE
515 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
516 #undef TARGET_ARG_PARTIAL_BYTES
517 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
519 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
520 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
521 #undef TARGET_STRICT_ARGUMENT_NAMING
522 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
524 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
525 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
527 #undef TARGET_VECTOR_MODE_SUPPORTED_P
528 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
530 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
531 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
533 #ifdef SUBTARGET_INSERT_ATTRIBUTES
534 #undef TARGET_INSERT_ATTRIBUTES
535 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
536 #endif
538 #ifdef SUBTARGET_ATTRIBUTE_TABLE
539 #undef TARGET_ATTRIBUTE_TABLE
540 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
541 #endif
543 #undef TARGET_RELAXED_ORDERING
544 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
546 #undef TARGET_DEFAULT_TARGET_FLAGS
547 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
548 #undef TARGET_HANDLE_OPTION
549 #define TARGET_HANDLE_OPTION sparc_handle_option
551 #if TARGET_GNU_TLS
552 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
553 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
554 #endif
556 #undef TARGET_ASM_FILE_END
557 #define TARGET_ASM_FILE_END sparc_file_end
559 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
560 #undef TARGET_MANGLE_TYPE
561 #define TARGET_MANGLE_TYPE sparc_mangle_type
562 #endif
564 struct gcc_target targetm = TARGET_INITIALIZER;
566 /* Implement TARGET_HANDLE_OPTION. */
568 static bool
569 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
571 switch (code)
573 case OPT_mfpu:
574 case OPT_mhard_float:
575 case OPT_msoft_float:
576 fpu_option_set = true;
577 break;
579 case OPT_mcpu_:
580 sparc_select[1].string = arg;
581 break;
583 case OPT_mtune_:
584 sparc_select[2].string = arg;
585 break;
588 return true;
591 /* Validate and override various options, and do some machine dependent
592 initialization. */
594 void
595 sparc_override_options (void)
597 static struct code_model {
598 const char *const name;
599 const int value;
600 } const cmodels[] = {
601 { "32", CM_32 },
602 { "medlow", CM_MEDLOW },
603 { "medmid", CM_MEDMID },
604 { "medany", CM_MEDANY },
605 { "embmedany", CM_EMBMEDANY },
606 { 0, 0 }
608 const struct code_model *cmodel;
609 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
610 static struct cpu_default {
611 const int cpu;
612 const char *const name;
613 } const cpu_default[] = {
614 /* There must be one entry here for each TARGET_CPU value. */
615 { TARGET_CPU_sparc, "cypress" },
616 { TARGET_CPU_sparclet, "tsc701" },
617 { TARGET_CPU_sparclite, "f930" },
618 { TARGET_CPU_v8, "v8" },
619 { TARGET_CPU_hypersparc, "hypersparc" },
620 { TARGET_CPU_sparclite86x, "sparclite86x" },
621 { TARGET_CPU_supersparc, "supersparc" },
622 { TARGET_CPU_v9, "v9" },
623 { TARGET_CPU_ultrasparc, "ultrasparc" },
624 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
625 { TARGET_CPU_niagara, "niagara" },
626 { 0, 0 }
628 const struct cpu_default *def;
629 /* Table of values for -m{cpu,tune}=. */
630 static struct cpu_table {
631 const char *const name;
632 const enum processor_type processor;
633 const int disable;
634 const int enable;
635 } const cpu_table[] = {
636 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
637 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
638 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
639 /* TI TMS390Z55 supersparc */
640 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
641 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
642 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
643 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
644 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
645 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
646 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
647 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
648 MASK_SPARCLITE },
649 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
650 /* TEMIC sparclet */
651 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
652 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
653 /* TI ultrasparc I, II, IIi */
654 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
655 /* Although insns using %y are deprecated, it is a clear win on current
656 ultrasparcs. */
657 |MASK_DEPRECATED_V8_INSNS},
658 /* TI ultrasparc III */
659 /* ??? Check if %y issue still holds true in ultra3. */
660 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
661 /* UltraSPARC T1 */
662 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
663 { 0, 0, 0, 0 }
665 const struct cpu_table *cpu;
666 const struct sparc_cpu_select *sel;
667 int fpu;
669 #ifndef SPARC_BI_ARCH
670 /* Check for unsupported architecture size. */
671 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
672 error ("%s is not supported by this configuration",
673 DEFAULT_ARCH32_P ? "-m64" : "-m32");
674 #endif
676 /* We force all 64bit archs to use 128 bit long double */
677 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
679 error ("-mlong-double-64 not allowed with -m64");
680 target_flags |= MASK_LONG_DOUBLE_128;
683 /* Code model selection. */
684 sparc_cmodel = SPARC_DEFAULT_CMODEL;
686 #ifdef SPARC_BI_ARCH
687 if (TARGET_ARCH32)
688 sparc_cmodel = CM_32;
689 #endif
691 if (sparc_cmodel_string != NULL)
693 if (TARGET_ARCH64)
695 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
696 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
697 break;
698 if (cmodel->name == NULL)
699 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
700 else
701 sparc_cmodel = cmodel->value;
703 else
704 error ("-mcmodel= is not supported on 32 bit systems");
707 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
709 /* Set the default CPU. */
710 for (def = &cpu_default[0]; def->name; ++def)
711 if (def->cpu == TARGET_CPU_DEFAULT)
712 break;
713 gcc_assert (def->name);
714 sparc_select[0].string = def->name;
716 for (sel = &sparc_select[0]; sel->name; ++sel)
718 if (sel->string)
720 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
721 if (! strcmp (sel->string, cpu->name))
723 if (sel->set_tune_p)
724 sparc_cpu = cpu->processor;
726 if (sel->set_arch_p)
728 target_flags &= ~cpu->disable;
729 target_flags |= cpu->enable;
731 break;
734 if (! cpu->name)
735 error ("bad value (%s) for %s switch", sel->string, sel->name);
739 /* If -mfpu or -mno-fpu was explicitly used, don't override with
740 the processor default. */
741 if (fpu_option_set)
742 target_flags = (target_flags & ~MASK_FPU) | fpu;
744 /* Don't allow -mvis if FPU is disabled. */
745 if (! TARGET_FPU)
746 target_flags &= ~MASK_VIS;
748 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
749 are available.
750 -m64 also implies v9. */
751 if (TARGET_VIS || TARGET_ARCH64)
753 target_flags |= MASK_V9;
754 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
757 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
758 if (TARGET_V9 && TARGET_ARCH32)
759 target_flags |= MASK_DEPRECATED_V8_INSNS;
761 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
762 if (! TARGET_V9 || TARGET_ARCH64)
763 target_flags &= ~MASK_V8PLUS;
765 /* Don't use stack biasing in 32 bit mode. */
766 if (TARGET_ARCH32)
767 target_flags &= ~MASK_STACK_BIAS;
769 /* Supply a default value for align_functions. */
770 if (align_functions == 0
771 && (sparc_cpu == PROCESSOR_ULTRASPARC
772 || sparc_cpu == PROCESSOR_ULTRASPARC3
773 || sparc_cpu == PROCESSOR_NIAGARA))
774 align_functions = 32;
776 /* Validate PCC_STRUCT_RETURN. */
777 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
778 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
780 /* Only use .uaxword when compiling for a 64-bit target. */
781 if (!TARGET_ARCH64)
782 targetm.asm_out.unaligned_op.di = NULL;
784 /* Do various machine dependent initializations. */
785 sparc_init_modes ();
787 /* Acquire unique alias sets for our private stuff. */
788 sparc_sr_alias_set = new_alias_set ();
789 struct_value_alias_set = new_alias_set ();
791 /* Set up function hooks. */
792 init_machine_status = sparc_init_machine_status;
794 switch (sparc_cpu)
796 case PROCESSOR_V7:
797 case PROCESSOR_CYPRESS:
798 sparc_costs = &cypress_costs;
799 break;
800 case PROCESSOR_V8:
801 case PROCESSOR_SPARCLITE:
802 case PROCESSOR_SUPERSPARC:
803 sparc_costs = &supersparc_costs;
804 break;
805 case PROCESSOR_F930:
806 case PROCESSOR_F934:
807 case PROCESSOR_HYPERSPARC:
808 case PROCESSOR_SPARCLITE86X:
809 sparc_costs = &hypersparc_costs;
810 break;
811 case PROCESSOR_SPARCLET:
812 case PROCESSOR_TSC701:
813 sparc_costs = &sparclet_costs;
814 break;
815 case PROCESSOR_V9:
816 case PROCESSOR_ULTRASPARC:
817 sparc_costs = &ultrasparc_costs;
818 break;
819 case PROCESSOR_ULTRASPARC3:
820 sparc_costs = &ultrasparc3_costs;
821 break;
822 case PROCESSOR_NIAGARA:
823 sparc_costs = &niagara_costs;
824 break;
827 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
828 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
829 target_flags |= MASK_LONG_DOUBLE_128;
830 #endif
832 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
833 set_param_value ("simultaneous-prefetches",
834 ((sparc_cpu == PROCESSOR_ULTRASPARC
835 || sparc_cpu == PROCESSOR_NIAGARA)
837 : (sparc_cpu == PROCESSOR_ULTRASPARC3
838 ? 8 : 3)));
839 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
840 set_param_value ("l1-cache-line-size",
841 ((sparc_cpu == PROCESSOR_ULTRASPARC
842 || sparc_cpu == PROCESSOR_ULTRASPARC3
843 || sparc_cpu == PROCESSOR_NIAGARA)
844 ? 64 : 32));
847 #ifdef SUBTARGET_ATTRIBUTE_TABLE
848 /* Table of valid machine attributes. */
849 const struct attribute_spec sparc_attribute_table[] =
851 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
852 SUBTARGET_ATTRIBUTE_TABLE,
853 { NULL, 0, 0, false, false, false, NULL }
855 #endif
857 /* Miscellaneous utilities. */
859 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
860 or branch on register contents instructions. */
863 v9_regcmp_p (enum rtx_code code)
865 return (code == EQ || code == NE || code == GE || code == LT
866 || code == LE || code == GT);
869 /* Nonzero if OP is a floating point constant which can
870 be loaded into an integer register using a single
871 sethi instruction. */
874 fp_sethi_p (rtx op)
876 if (GET_CODE (op) == CONST_DOUBLE)
878 REAL_VALUE_TYPE r;
879 long i;
881 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
882 REAL_VALUE_TO_TARGET_SINGLE (r, i);
883 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
886 return 0;
889 /* Nonzero if OP is a floating point constant which can
890 be loaded into an integer register using a single
891 mov instruction. */
894 fp_mov_p (rtx op)
896 if (GET_CODE (op) == CONST_DOUBLE)
898 REAL_VALUE_TYPE r;
899 long i;
901 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
902 REAL_VALUE_TO_TARGET_SINGLE (r, i);
903 return SPARC_SIMM13_P (i);
906 return 0;
909 /* Nonzero if OP is a floating point constant which can
910 be loaded into an integer register using a high/losum
911 instruction sequence. */
914 fp_high_losum_p (rtx op)
916 /* The constraints calling this should only be in
917 SFmode move insns, so any constant which cannot
918 be moved using a single insn will do. */
919 if (GET_CODE (op) == CONST_DOUBLE)
921 REAL_VALUE_TYPE r;
922 long i;
924 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
925 REAL_VALUE_TO_TARGET_SINGLE (r, i);
926 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
929 return 0;
932 /* Expand a move instruction. Return true if all work is done. */
934 bool
935 sparc_expand_move (enum machine_mode mode, rtx *operands)
937 /* Handle sets of MEM first. */
938 if (GET_CODE (operands[0]) == MEM)
940 /* 0 is a register (or a pair of registers) on SPARC. */
941 if (register_or_zero_operand (operands[1], mode))
942 return false;
944 if (!reload_in_progress)
946 operands[0] = validize_mem (operands[0]);
947 operands[1] = force_reg (mode, operands[1]);
951 /* Fixup TLS cases. */
952 if (TARGET_HAVE_TLS
953 && CONSTANT_P (operands[1])
954 && GET_CODE (operands[1]) != HIGH
955 && sparc_tls_referenced_p (operands [1]))
957 rtx sym = operands[1];
958 rtx addend = NULL;
960 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
962 addend = XEXP (XEXP (sym, 0), 1);
963 sym = XEXP (XEXP (sym, 0), 0);
966 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
968 sym = legitimize_tls_address (sym);
969 if (addend)
971 sym = gen_rtx_PLUS (mode, sym, addend);
972 sym = force_operand (sym, operands[0]);
974 operands[1] = sym;
977 /* Fixup PIC cases. */
978 if (flag_pic && CONSTANT_P (operands[1]))
980 if (pic_address_needs_scratch (operands[1]))
981 operands[1] = legitimize_pic_address (operands[1], mode, 0);
983 /* VxWorks does not impose a fixed gap between segments; the run-time
984 gap can be different from the object-file gap. We therefore can't
985 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
986 are absolutely sure that X is in the same segment as the GOT.
987 Unfortunately, the flexibility of linker scripts means that we
988 can't be sure of that in general, so assume that _G_O_T_-relative
989 accesses are never valid on VxWorks. */
990 if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
992 if (mode == SImode)
994 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
995 return true;
998 if (mode == DImode)
1000 gcc_assert (TARGET_ARCH64);
1001 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1002 return true;
1006 if (symbolic_operand (operands[1], mode))
1008 operands[1] = legitimize_pic_address (operands[1],
1009 mode,
1010 (reload_in_progress ?
1011 operands[0] :
1012 NULL_RTX));
1013 return false;
1017 /* If we are trying to toss an integer constant into FP registers,
1018 or loading a FP or vector constant, force it into memory. */
1019 if (CONSTANT_P (operands[1])
1020 && REG_P (operands[0])
1021 && (SPARC_FP_REG_P (REGNO (operands[0]))
1022 || SCALAR_FLOAT_MODE_P (mode)
1023 || VECTOR_MODE_P (mode)))
1025 /* emit_group_store will send such bogosity to us when it is
1026 not storing directly into memory. So fix this up to avoid
1027 crashes in output_constant_pool. */
1028 if (operands [1] == const0_rtx)
1029 operands[1] = CONST0_RTX (mode);
1031 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1032 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1033 && const_zero_operand (operands[1], mode))
1034 return false;
1036 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1037 /* We are able to build any SF constant in integer registers
1038 with at most 2 instructions. */
1039 && (mode == SFmode
1040 /* And any DF constant in integer registers. */
1041 || (mode == DFmode
1042 && (reload_completed || reload_in_progress))))
1043 return false;
1045 operands[1] = force_const_mem (mode, operands[1]);
1046 if (!reload_in_progress)
1047 operands[1] = validize_mem (operands[1]);
1048 return false;
1051 /* Accept non-constants and valid constants unmodified. */
1052 if (!CONSTANT_P (operands[1])
1053 || GET_CODE (operands[1]) == HIGH
1054 || input_operand (operands[1], mode))
1055 return false;
1057 switch (mode)
1059 case QImode:
1060 /* All QImode constants require only one insn, so proceed. */
1061 break;
1063 case HImode:
1064 case SImode:
1065 sparc_emit_set_const32 (operands[0], operands[1]);
1066 return true;
1068 case DImode:
1069 /* input_operand should have filtered out 32-bit mode. */
1070 sparc_emit_set_const64 (operands[0], operands[1]);
1071 return true;
1073 default:
1074 gcc_unreachable ();
1077 return false;
1080 /* Load OP1, a 32-bit constant, into OP0, a register.
1081 We know it can't be done in one insn when we get
1082 here, the move expander guarantees this. */
1084 void
1085 sparc_emit_set_const32 (rtx op0, rtx op1)
1087 enum machine_mode mode = GET_MODE (op0);
1088 rtx temp;
1090 if (reload_in_progress || reload_completed)
1091 temp = op0;
1092 else
1093 temp = gen_reg_rtx (mode);
1095 if (GET_CODE (op1) == CONST_INT)
1097 gcc_assert (!small_int_operand (op1, mode)
1098 && !const_high_operand (op1, mode));
1100 /* Emit them as real moves instead of a HIGH/LO_SUM,
1101 this way CSE can see everything and reuse intermediate
1102 values if it wants. */
1103 emit_insn (gen_rtx_SET (VOIDmode, temp,
1104 GEN_INT (INTVAL (op1)
1105 & ~(HOST_WIDE_INT)0x3ff)));
1107 emit_insn (gen_rtx_SET (VOIDmode,
1108 op0,
1109 gen_rtx_IOR (mode, temp,
1110 GEN_INT (INTVAL (op1) & 0x3ff))));
1112 else
1114 /* A symbol, emit in the traditional way. */
1115 emit_insn (gen_rtx_SET (VOIDmode, temp,
1116 gen_rtx_HIGH (mode, op1)));
1117 emit_insn (gen_rtx_SET (VOIDmode,
1118 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1122 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1123 If TEMP is nonzero, we are forbidden to use any other scratch
1124 registers. Otherwise, we are allowed to generate them as needed.
1126 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1127 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1129 void
1130 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1132 rtx temp1, temp2, temp3, temp4, temp5;
1133 rtx ti_temp = 0;
1135 if (temp && GET_MODE (temp) == TImode)
1137 ti_temp = temp;
1138 temp = gen_rtx_REG (DImode, REGNO (temp));
1141 /* SPARC-V9 code-model support. */
1142 switch (sparc_cmodel)
1144 case CM_MEDLOW:
1145 /* The range spanned by all instructions in the object is less
1146 than 2^31 bytes (2GB) and the distance from any instruction
1147 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1148 than 2^31 bytes (2GB).
1150 The executable must be in the low 4TB of the virtual address
1151 space.
1153 sethi %hi(symbol), %temp1
1154 or %temp1, %lo(symbol), %reg */
1155 if (temp)
1156 temp1 = temp; /* op0 is allowed. */
1157 else
1158 temp1 = gen_reg_rtx (DImode);
1160 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1161 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1162 break;
1164 case CM_MEDMID:
1165 /* The range spanned by all instructions in the object is less
1166 than 2^31 bytes (2GB) and the distance from any instruction
1167 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1168 than 2^31 bytes (2GB).
1170 The executable must be in the low 16TB of the virtual address
1171 space.
1173 sethi %h44(symbol), %temp1
1174 or %temp1, %m44(symbol), %temp2
1175 sllx %temp2, 12, %temp3
1176 or %temp3, %l44(symbol), %reg */
1177 if (temp)
1179 temp1 = op0;
1180 temp2 = op0;
1181 temp3 = temp; /* op0 is allowed. */
1183 else
1185 temp1 = gen_reg_rtx (DImode);
1186 temp2 = gen_reg_rtx (DImode);
1187 temp3 = gen_reg_rtx (DImode);
1190 emit_insn (gen_seth44 (temp1, op1));
1191 emit_insn (gen_setm44 (temp2, temp1, op1));
1192 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1193 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1194 emit_insn (gen_setl44 (op0, temp3, op1));
1195 break;
1197 case CM_MEDANY:
1198 /* The range spanned by all instructions in the object is less
1199 than 2^31 bytes (2GB) and the distance from any instruction
1200 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1201 than 2^31 bytes (2GB).
1203 The executable can be placed anywhere in the virtual address
1204 space.
1206 sethi %hh(symbol), %temp1
1207 sethi %lm(symbol), %temp2
1208 or %temp1, %hm(symbol), %temp3
1209 sllx %temp3, 32, %temp4
1210 or %temp4, %temp2, %temp5
1211 or %temp5, %lo(symbol), %reg */
1212 if (temp)
1214 /* It is possible that one of the registers we got for operands[2]
1215 might coincide with that of operands[0] (which is why we made
1216 it TImode). Pick the other one to use as our scratch. */
1217 if (rtx_equal_p (temp, op0))
1219 gcc_assert (ti_temp);
1220 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1222 temp1 = op0;
1223 temp2 = temp; /* op0 is _not_ allowed, see above. */
1224 temp3 = op0;
1225 temp4 = op0;
1226 temp5 = op0;
1228 else
1230 temp1 = gen_reg_rtx (DImode);
1231 temp2 = gen_reg_rtx (DImode);
1232 temp3 = gen_reg_rtx (DImode);
1233 temp4 = gen_reg_rtx (DImode);
1234 temp5 = gen_reg_rtx (DImode);
1237 emit_insn (gen_sethh (temp1, op1));
1238 emit_insn (gen_setlm (temp2, op1));
1239 emit_insn (gen_sethm (temp3, temp1, op1));
1240 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1241 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1242 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1243 gen_rtx_PLUS (DImode, temp4, temp2)));
1244 emit_insn (gen_setlo (op0, temp5, op1));
1245 break;
1247 case CM_EMBMEDANY:
1248 /* Old old old backwards compatibility kruft here.
1249 Essentially it is MEDLOW with a fixed 64-bit
1250 virtual base added to all data segment addresses.
1251 Text-segment stuff is computed like MEDANY, we can't
1252 reuse the code above because the relocation knobs
1253 look different.
1255 Data segment: sethi %hi(symbol), %temp1
1256 add %temp1, EMBMEDANY_BASE_REG, %temp2
1257 or %temp2, %lo(symbol), %reg */
1258 if (data_segment_operand (op1, GET_MODE (op1)))
1260 if (temp)
1262 temp1 = temp; /* op0 is allowed. */
1263 temp2 = op0;
1265 else
1267 temp1 = gen_reg_rtx (DImode);
1268 temp2 = gen_reg_rtx (DImode);
1271 emit_insn (gen_embmedany_sethi (temp1, op1));
1272 emit_insn (gen_embmedany_brsum (temp2, temp1));
1273 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1276 /* Text segment: sethi %uhi(symbol), %temp1
1277 sethi %hi(symbol), %temp2
1278 or %temp1, %ulo(symbol), %temp3
1279 sllx %temp3, 32, %temp4
1280 or %temp4, %temp2, %temp5
1281 or %temp5, %lo(symbol), %reg */
1282 else
1284 if (temp)
1286 /* It is possible that one of the registers we got for operands[2]
1287 might coincide with that of operands[0] (which is why we made
1288 it TImode). Pick the other one to use as our scratch. */
1289 if (rtx_equal_p (temp, op0))
1291 gcc_assert (ti_temp);
1292 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1294 temp1 = op0;
1295 temp2 = temp; /* op0 is _not_ allowed, see above. */
1296 temp3 = op0;
1297 temp4 = op0;
1298 temp5 = op0;
1300 else
1302 temp1 = gen_reg_rtx (DImode);
1303 temp2 = gen_reg_rtx (DImode);
1304 temp3 = gen_reg_rtx (DImode);
1305 temp4 = gen_reg_rtx (DImode);
1306 temp5 = gen_reg_rtx (DImode);
1309 emit_insn (gen_embmedany_textuhi (temp1, op1));
1310 emit_insn (gen_embmedany_texthi (temp2, op1));
1311 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1312 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1313 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1314 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1315 gen_rtx_PLUS (DImode, temp4, temp2)));
1316 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1318 break;
1320 default:
1321 gcc_unreachable ();
1325 #if HOST_BITS_PER_WIDE_INT == 32
1326 void
1327 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1329 gcc_unreachable ();
1331 #else
1332 /* These avoid problems when cross compiling. If we do not
1333 go through all this hair then the optimizer will see
1334 invalid REG_EQUAL notes or in some cases none at all. */
1335 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1336 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1337 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1338 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1340 /* The optimizer is not to assume anything about exactly
1341 which bits are set for a HIGH, they are unspecified.
1342 Unfortunately this leads to many missed optimizations
1343 during CSE. We mask out the non-HIGH bits, and matches
1344 a plain movdi, to alleviate this problem. */
1345 static rtx
1346 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1348 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1351 static rtx
1352 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1354 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1357 static rtx
1358 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1360 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1363 static rtx
1364 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1366 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1369 /* Worker routines for 64-bit constant formation on arch64.
1370 One of the key things to be doing in these emissions is
1371 to create as many temp REGs as possible. This makes it
1372 possible for half-built constants to be used later when
1373 such values are similar to something required later on.
1374 Without doing this, the optimizer cannot see such
1375 opportunities. */
1377 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1378 unsigned HOST_WIDE_INT, int);
1380 static void
1381 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1382 unsigned HOST_WIDE_INT low_bits, int is_neg)
1384 unsigned HOST_WIDE_INT high_bits;
1386 if (is_neg)
1387 high_bits = (~low_bits) & 0xffffffff;
1388 else
1389 high_bits = low_bits;
1391 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1392 if (!is_neg)
1394 emit_insn (gen_rtx_SET (VOIDmode, op0,
1395 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1397 else
1399 /* If we are XOR'ing with -1, then we should emit a one's complement
1400 instead. This way the combiner will notice logical operations
1401 such as ANDN later on and substitute. */
1402 if ((low_bits & 0x3ff) == 0x3ff)
1404 emit_insn (gen_rtx_SET (VOIDmode, op0,
1405 gen_rtx_NOT (DImode, temp)));
1407 else
1409 emit_insn (gen_rtx_SET (VOIDmode, op0,
1410 gen_safe_XOR64 (temp,
1411 (-(HOST_WIDE_INT)0x400
1412 | (low_bits & 0x3ff)))));
1417 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1418 unsigned HOST_WIDE_INT, int);
1420 static void
1421 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1422 unsigned HOST_WIDE_INT high_bits,
1423 unsigned HOST_WIDE_INT low_immediate,
1424 int shift_count)
1426 rtx temp2 = op0;
1428 if ((high_bits & 0xfffffc00) != 0)
1430 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1431 if ((high_bits & ~0xfffffc00) != 0)
1432 emit_insn (gen_rtx_SET (VOIDmode, op0,
1433 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1434 else
1435 temp2 = temp;
1437 else
1439 emit_insn (gen_safe_SET64 (temp, high_bits));
1440 temp2 = temp;
1443 /* Now shift it up into place. */
1444 emit_insn (gen_rtx_SET (VOIDmode, op0,
1445 gen_rtx_ASHIFT (DImode, temp2,
1446 GEN_INT (shift_count))));
1448 /* If there is a low immediate part piece, finish up by
1449 putting that in as well. */
1450 if (low_immediate != 0)
1451 emit_insn (gen_rtx_SET (VOIDmode, op0,
1452 gen_safe_OR64 (op0, low_immediate)));
1455 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1456 unsigned HOST_WIDE_INT);
1458 /* Full 64-bit constant decomposition. Even though this is the
1459 'worst' case, we still optimize a few things away. */
1460 static void
1461 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1462 unsigned HOST_WIDE_INT high_bits,
1463 unsigned HOST_WIDE_INT low_bits)
1465 rtx sub_temp;
1467 if (reload_in_progress || reload_completed)
1468 sub_temp = op0;
1469 else
1470 sub_temp = gen_reg_rtx (DImode);
1472 if ((high_bits & 0xfffffc00) != 0)
1474 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1475 if ((high_bits & ~0xfffffc00) != 0)
1476 emit_insn (gen_rtx_SET (VOIDmode,
1477 sub_temp,
1478 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1479 else
1480 sub_temp = temp;
1482 else
1484 emit_insn (gen_safe_SET64 (temp, high_bits));
1485 sub_temp = temp;
1488 if (!reload_in_progress && !reload_completed)
1490 rtx temp2 = gen_reg_rtx (DImode);
1491 rtx temp3 = gen_reg_rtx (DImode);
1492 rtx temp4 = gen_reg_rtx (DImode);
1494 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1495 gen_rtx_ASHIFT (DImode, sub_temp,
1496 GEN_INT (32))));
1498 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1499 if ((low_bits & ~0xfffffc00) != 0)
1501 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1502 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1503 emit_insn (gen_rtx_SET (VOIDmode, op0,
1504 gen_rtx_PLUS (DImode, temp4, temp3)));
1506 else
1508 emit_insn (gen_rtx_SET (VOIDmode, op0,
1509 gen_rtx_PLUS (DImode, temp4, temp2)));
1512 else
1514 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1515 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1516 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1517 int to_shift = 12;
1519 /* We are in the middle of reload, so this is really
1520 painful. However we do still make an attempt to
1521 avoid emitting truly stupid code. */
1522 if (low1 != const0_rtx)
1524 emit_insn (gen_rtx_SET (VOIDmode, op0,
1525 gen_rtx_ASHIFT (DImode, sub_temp,
1526 GEN_INT (to_shift))));
1527 emit_insn (gen_rtx_SET (VOIDmode, op0,
1528 gen_rtx_IOR (DImode, op0, low1)));
1529 sub_temp = op0;
1530 to_shift = 12;
1532 else
1534 to_shift += 12;
1536 if (low2 != const0_rtx)
1538 emit_insn (gen_rtx_SET (VOIDmode, op0,
1539 gen_rtx_ASHIFT (DImode, sub_temp,
1540 GEN_INT (to_shift))));
1541 emit_insn (gen_rtx_SET (VOIDmode, op0,
1542 gen_rtx_IOR (DImode, op0, low2)));
1543 sub_temp = op0;
1544 to_shift = 8;
1546 else
1548 to_shift += 8;
1550 emit_insn (gen_rtx_SET (VOIDmode, op0,
1551 gen_rtx_ASHIFT (DImode, sub_temp,
1552 GEN_INT (to_shift))));
1553 if (low3 != const0_rtx)
1554 emit_insn (gen_rtx_SET (VOIDmode, op0,
1555 gen_rtx_IOR (DImode, op0, low3)));
1556 /* phew... */
1560 /* Analyze a 64-bit constant for certain properties. */
1561 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1562 unsigned HOST_WIDE_INT,
1563 int *, int *, int *);
1565 static void
1566 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1567 unsigned HOST_WIDE_INT low_bits,
1568 int *hbsp, int *lbsp, int *abbasp)
1570 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1571 int i;
1573 lowest_bit_set = highest_bit_set = -1;
1574 i = 0;
1577 if ((lowest_bit_set == -1)
1578 && ((low_bits >> i) & 1))
1579 lowest_bit_set = i;
1580 if ((highest_bit_set == -1)
1581 && ((high_bits >> (32 - i - 1)) & 1))
1582 highest_bit_set = (64 - i - 1);
1584 while (++i < 32
1585 && ((highest_bit_set == -1)
1586 || (lowest_bit_set == -1)));
1587 if (i == 32)
1589 i = 0;
1592 if ((lowest_bit_set == -1)
1593 && ((high_bits >> i) & 1))
1594 lowest_bit_set = i + 32;
1595 if ((highest_bit_set == -1)
1596 && ((low_bits >> (32 - i - 1)) & 1))
1597 highest_bit_set = 32 - i - 1;
1599 while (++i < 32
1600 && ((highest_bit_set == -1)
1601 || (lowest_bit_set == -1)));
1603 /* If there are no bits set this should have gone out
1604 as one instruction! */
1605 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1606 all_bits_between_are_set = 1;
1607 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1609 if (i < 32)
1611 if ((low_bits & (1 << i)) != 0)
1612 continue;
1614 else
1616 if ((high_bits & (1 << (i - 32))) != 0)
1617 continue;
1619 all_bits_between_are_set = 0;
1620 break;
1622 *hbsp = highest_bit_set;
1623 *lbsp = lowest_bit_set;
1624 *abbasp = all_bits_between_are_set;
1627 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1629 static int
1630 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1631 unsigned HOST_WIDE_INT low_bits)
1633 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1635 if (high_bits == 0
1636 || high_bits == 0xffffffff)
1637 return 1;
1639 analyze_64bit_constant (high_bits, low_bits,
1640 &highest_bit_set, &lowest_bit_set,
1641 &all_bits_between_are_set);
1643 if ((highest_bit_set == 63
1644 || lowest_bit_set == 0)
1645 && all_bits_between_are_set != 0)
1646 return 1;
1648 if ((highest_bit_set - lowest_bit_set) < 21)
1649 return 1;
1651 return 0;
1654 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1655 unsigned HOST_WIDE_INT,
1656 int, int);
1658 static unsigned HOST_WIDE_INT
1659 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1660 unsigned HOST_WIDE_INT low_bits,
1661 int lowest_bit_set, int shift)
1663 HOST_WIDE_INT hi, lo;
1665 if (lowest_bit_set < 32)
1667 lo = (low_bits >> lowest_bit_set) << shift;
1668 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1670 else
1672 lo = 0;
1673 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1675 gcc_assert (! (hi & lo));
1676 return (hi | lo);
1679 /* Here we are sure to be arch64 and this is an integer constant
1680 being loaded into a register. Emit the most efficient
1681 insn sequence possible. Detection of all the 1-insn cases
1682 has been done already. */
1683 void
1684 sparc_emit_set_const64 (rtx op0, rtx op1)
1686 unsigned HOST_WIDE_INT high_bits, low_bits;
1687 int lowest_bit_set, highest_bit_set;
1688 int all_bits_between_are_set;
1689 rtx temp = 0;
1691 /* Sanity check that we know what we are working with. */
1692 gcc_assert (TARGET_ARCH64
1693 && (GET_CODE (op0) == SUBREG
1694 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1696 if (reload_in_progress || reload_completed)
1697 temp = op0;
1699 if (GET_CODE (op1) != CONST_INT)
1701 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1702 return;
1705 if (! temp)
1706 temp = gen_reg_rtx (DImode);
1708 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1709 low_bits = (INTVAL (op1) & 0xffffffff);
1711 /* low_bits bits 0 --> 31
1712 high_bits bits 32 --> 63 */
1714 analyze_64bit_constant (high_bits, low_bits,
1715 &highest_bit_set, &lowest_bit_set,
1716 &all_bits_between_are_set);
1718 /* First try for a 2-insn sequence. */
1720 /* These situations are preferred because the optimizer can
1721 * do more things with them:
1722 * 1) mov -1, %reg
1723 * sllx %reg, shift, %reg
1724 * 2) mov -1, %reg
1725 * srlx %reg, shift, %reg
1726 * 3) mov some_small_const, %reg
1727 * sllx %reg, shift, %reg
1729 if (((highest_bit_set == 63
1730 || lowest_bit_set == 0)
1731 && all_bits_between_are_set != 0)
1732 || ((highest_bit_set - lowest_bit_set) < 12))
1734 HOST_WIDE_INT the_const = -1;
1735 int shift = lowest_bit_set;
1737 if ((highest_bit_set != 63
1738 && lowest_bit_set != 0)
1739 || all_bits_between_are_set == 0)
1741 the_const =
1742 create_simple_focus_bits (high_bits, low_bits,
1743 lowest_bit_set, 0);
1745 else if (lowest_bit_set == 0)
1746 shift = -(63 - highest_bit_set);
1748 gcc_assert (SPARC_SIMM13_P (the_const));
1749 gcc_assert (shift != 0);
1751 emit_insn (gen_safe_SET64 (temp, the_const));
1752 if (shift > 0)
1753 emit_insn (gen_rtx_SET (VOIDmode,
1754 op0,
1755 gen_rtx_ASHIFT (DImode,
1756 temp,
1757 GEN_INT (shift))));
1758 else if (shift < 0)
1759 emit_insn (gen_rtx_SET (VOIDmode,
1760 op0,
1761 gen_rtx_LSHIFTRT (DImode,
1762 temp,
1763 GEN_INT (-shift))));
1764 return;
1767 /* Now a range of 22 or less bits set somewhere.
1768 * 1) sethi %hi(focus_bits), %reg
1769 * sllx %reg, shift, %reg
1770 * 2) sethi %hi(focus_bits), %reg
1771 * srlx %reg, shift, %reg
1773 if ((highest_bit_set - lowest_bit_set) < 21)
1775 unsigned HOST_WIDE_INT focus_bits =
1776 create_simple_focus_bits (high_bits, low_bits,
1777 lowest_bit_set, 10);
1779 gcc_assert (SPARC_SETHI_P (focus_bits));
1780 gcc_assert (lowest_bit_set != 10);
1782 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1784 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1785 if (lowest_bit_set < 10)
1786 emit_insn (gen_rtx_SET (VOIDmode,
1787 op0,
1788 gen_rtx_LSHIFTRT (DImode, temp,
1789 GEN_INT (10 - lowest_bit_set))));
1790 else if (lowest_bit_set > 10)
1791 emit_insn (gen_rtx_SET (VOIDmode,
1792 op0,
1793 gen_rtx_ASHIFT (DImode, temp,
1794 GEN_INT (lowest_bit_set - 10))));
1795 return;
1798 /* 1) sethi %hi(low_bits), %reg
1799 * or %reg, %lo(low_bits), %reg
1800 * 2) sethi %hi(~low_bits), %reg
1801 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1803 if (high_bits == 0
1804 || high_bits == 0xffffffff)
1806 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1807 (high_bits == 0xffffffff));
1808 return;
1811 /* Now, try 3-insn sequences. */
1813 /* 1) sethi %hi(high_bits), %reg
1814 * or %reg, %lo(high_bits), %reg
1815 * sllx %reg, 32, %reg
1817 if (low_bits == 0)
1819 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1820 return;
1823 /* We may be able to do something quick
1824 when the constant is negated, so try that. */
1825 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1826 (~low_bits) & 0xfffffc00))
1828 /* NOTE: The trailing bits get XOR'd so we need the
1829 non-negated bits, not the negated ones. */
1830 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1832 if ((((~high_bits) & 0xffffffff) == 0
1833 && ((~low_bits) & 0x80000000) == 0)
1834 || (((~high_bits) & 0xffffffff) == 0xffffffff
1835 && ((~low_bits) & 0x80000000) != 0))
1837 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1839 if ((SPARC_SETHI_P (fast_int)
1840 && (~high_bits & 0xffffffff) == 0)
1841 || SPARC_SIMM13_P (fast_int))
1842 emit_insn (gen_safe_SET64 (temp, fast_int));
1843 else
1844 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1846 else
1848 rtx negated_const;
1849 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1850 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1851 sparc_emit_set_const64 (temp, negated_const);
1854 /* If we are XOR'ing with -1, then we should emit a one's complement
1855 instead. This way the combiner will notice logical operations
1856 such as ANDN later on and substitute. */
1857 if (trailing_bits == 0x3ff)
1859 emit_insn (gen_rtx_SET (VOIDmode, op0,
1860 gen_rtx_NOT (DImode, temp)));
1862 else
1864 emit_insn (gen_rtx_SET (VOIDmode,
1865 op0,
1866 gen_safe_XOR64 (temp,
1867 (-0x400 | trailing_bits))));
1869 return;
1872 /* 1) sethi %hi(xxx), %reg
1873 * or %reg, %lo(xxx), %reg
1874 * sllx %reg, yyy, %reg
1876 * ??? This is just a generalized version of the low_bits==0
1877 * thing above, FIXME...
1879 if ((highest_bit_set - lowest_bit_set) < 32)
1881 unsigned HOST_WIDE_INT focus_bits =
1882 create_simple_focus_bits (high_bits, low_bits,
1883 lowest_bit_set, 0);
1885 /* We can't get here in this state. */
1886 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1888 /* So what we know is that the set bits straddle the
1889 middle of the 64-bit word. */
1890 sparc_emit_set_const64_quick2 (op0, temp,
1891 focus_bits, 0,
1892 lowest_bit_set);
1893 return;
1896 /* 1) sethi %hi(high_bits), %reg
1897 * or %reg, %lo(high_bits), %reg
1898 * sllx %reg, 32, %reg
1899 * or %reg, low_bits, %reg
1901 if (SPARC_SIMM13_P(low_bits)
1902 && ((int)low_bits > 0))
1904 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1905 return;
1908 /* The easiest way when all else fails, is full decomposition. */
1909 #if 0
1910 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1911 high_bits, low_bits, ~high_bits, ~low_bits);
1912 #endif
1913 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1915 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1917 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1918 return the mode to be used for the comparison. For floating-point,
1919 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1920 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1921 processing is needed. */
1923 enum machine_mode
1924 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1926 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1928 switch (op)
1930 case EQ:
1931 case NE:
1932 case UNORDERED:
1933 case ORDERED:
1934 case UNLT:
1935 case UNLE:
1936 case UNGT:
1937 case UNGE:
1938 case UNEQ:
1939 case LTGT:
1940 return CCFPmode;
1942 case LT:
1943 case LE:
1944 case GT:
1945 case GE:
1946 return CCFPEmode;
1948 default:
1949 gcc_unreachable ();
1952 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1953 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1955 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1956 return CCX_NOOVmode;
1957 else
1958 return CC_NOOVmode;
1960 else
1962 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1963 return CCXmode;
1964 else
1965 return CCmode;
1969 /* X and Y are two things to compare using CODE. Emit the compare insn and
1970 return the rtx for the cc reg in the proper mode. */
1973 gen_compare_reg (enum rtx_code code)
1975 rtx x = sparc_compare_op0;
1976 rtx y = sparc_compare_op1;
1977 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
1978 rtx cc_reg;
1980 if (sparc_compare_emitted != NULL_RTX)
1982 cc_reg = sparc_compare_emitted;
1983 sparc_compare_emitted = NULL_RTX;
1984 return cc_reg;
1987 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
1988 fcc regs (cse can't tell they're really call clobbered regs and will
1989 remove a duplicate comparison even if there is an intervening function
1990 call - it will then try to reload the cc reg via an int reg which is why
1991 we need the movcc patterns). It is possible to provide the movcc
1992 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
1993 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
1994 to tell cse that CCFPE mode registers (even pseudos) are call
1995 clobbered. */
1997 /* ??? This is an experiment. Rather than making changes to cse which may
1998 or may not be easy/clean, we do our own cse. This is possible because
1999 we will generate hard registers. Cse knows they're call clobbered (it
2000 doesn't know the same thing about pseudos). If we guess wrong, no big
2001 deal, but if we win, great! */
2003 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2004 #if 1 /* experiment */
2006 int reg;
2007 /* We cycle through the registers to ensure they're all exercised. */
2008 static int next_fcc_reg = 0;
2009 /* Previous x,y for each fcc reg. */
2010 static rtx prev_args[4][2];
2012 /* Scan prev_args for x,y. */
2013 for (reg = 0; reg < 4; reg++)
2014 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2015 break;
2016 if (reg == 4)
2018 reg = next_fcc_reg;
2019 prev_args[reg][0] = x;
2020 prev_args[reg][1] = y;
2021 next_fcc_reg = (next_fcc_reg + 1) & 3;
2023 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2025 #else
2026 cc_reg = gen_reg_rtx (mode);
2027 #endif /* ! experiment */
2028 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2029 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2030 else
2031 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2033 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2034 gen_rtx_COMPARE (mode, x, y)));
2036 return cc_reg;
2039 /* This function is used for v9 only.
2040 CODE is the code for an Scc's comparison.
2041 OPERANDS[0] is the target of the Scc insn.
2042 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2043 been generated yet).
2045 This function is needed to turn
2047 (set (reg:SI 110)
2048 (gt (reg:CCX 100 %icc)
2049 (const_int 0)))
2050 into
2051 (set (reg:SI 110)
2052 (gt:DI (reg:CCX 100 %icc)
2053 (const_int 0)))
2055 IE: The instruction recognizer needs to see the mode of the comparison to
2056 find the right instruction. We could use "gt:DI" right in the
2057 define_expand, but leaving it out allows us to handle DI, SI, etc.
2059 We refer to the global sparc compare operands sparc_compare_op0 and
2060 sparc_compare_op1. */
2063 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2065 if (! TARGET_ARCH64
2066 && (GET_MODE (sparc_compare_op0) == DImode
2067 || GET_MODE (operands[0]) == DImode))
2068 return 0;
2070 /* Try to use the movrCC insns. */
2071 if (TARGET_ARCH64
2072 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2073 && sparc_compare_op1 == const0_rtx
2074 && v9_regcmp_p (compare_code))
2076 rtx op0 = sparc_compare_op0;
2077 rtx temp;
2079 /* Special case for op0 != 0. This can be done with one instruction if
2080 operands[0] == sparc_compare_op0. */
2082 if (compare_code == NE
2083 && GET_MODE (operands[0]) == DImode
2084 && rtx_equal_p (op0, operands[0]))
2086 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2087 gen_rtx_IF_THEN_ELSE (DImode,
2088 gen_rtx_fmt_ee (compare_code, DImode,
2089 op0, const0_rtx),
2090 const1_rtx,
2091 operands[0])));
2092 return 1;
2095 if (reg_overlap_mentioned_p (operands[0], op0))
2097 /* Handle the case where operands[0] == sparc_compare_op0.
2098 We "early clobber" the result. */
2099 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2100 emit_move_insn (op0, sparc_compare_op0);
2103 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2104 if (GET_MODE (op0) != DImode)
2106 temp = gen_reg_rtx (DImode);
2107 convert_move (temp, op0, 0);
2109 else
2110 temp = op0;
2111 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2112 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2113 gen_rtx_fmt_ee (compare_code, DImode,
2114 temp, const0_rtx),
2115 const1_rtx,
2116 operands[0])));
2117 return 1;
2119 else
2121 operands[1] = gen_compare_reg (compare_code);
2123 switch (GET_MODE (operands[1]))
2125 case CCmode :
2126 case CCXmode :
2127 case CCFPEmode :
2128 case CCFPmode :
2129 break;
2130 default :
2131 gcc_unreachable ();
2133 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2134 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2135 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2136 gen_rtx_fmt_ee (compare_code,
2137 GET_MODE (operands[1]),
2138 operands[1], const0_rtx),
2139 const1_rtx, operands[0])));
2140 return 1;
2144 /* Emit a conditional jump insn for the v9 architecture using comparison code
2145 CODE and jump target LABEL.
2146 This function exists to take advantage of the v9 brxx insns. */
2148 void
2149 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2151 gcc_assert (sparc_compare_emitted == NULL_RTX);
2152 emit_jump_insn (gen_rtx_SET (VOIDmode,
2153 pc_rtx,
2154 gen_rtx_IF_THEN_ELSE (VOIDmode,
2155 gen_rtx_fmt_ee (code, GET_MODE (op0),
2156 op0, const0_rtx),
2157 gen_rtx_LABEL_REF (VOIDmode, label),
2158 pc_rtx)));
2161 /* Generate a DFmode part of a hard TFmode register.
2162 REG is the TFmode hard register, LOW is 1 for the
2163 low 64bit of the register and 0 otherwise.
2166 gen_df_reg (rtx reg, int low)
2168 int regno = REGNO (reg);
2170 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2171 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2172 return gen_rtx_REG (DFmode, regno);
2175 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2176 Unlike normal calls, TFmode operands are passed by reference. It is
2177 assumed that no more than 3 operands are required. */
2179 static void
2180 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2182 rtx ret_slot = NULL, arg[3], func_sym;
2183 int i;
2185 /* We only expect to be called for conversions, unary, and binary ops. */
2186 gcc_assert (nargs == 2 || nargs == 3);
2188 for (i = 0; i < nargs; ++i)
2190 rtx this_arg = operands[i];
2191 rtx this_slot;
2193 /* TFmode arguments and return values are passed by reference. */
2194 if (GET_MODE (this_arg) == TFmode)
2196 int force_stack_temp;
2198 force_stack_temp = 0;
2199 if (TARGET_BUGGY_QP_LIB && i == 0)
2200 force_stack_temp = 1;
2202 if (GET_CODE (this_arg) == MEM
2203 && ! force_stack_temp)
2204 this_arg = XEXP (this_arg, 0);
2205 else if (CONSTANT_P (this_arg)
2206 && ! force_stack_temp)
2208 this_slot = force_const_mem (TFmode, this_arg);
2209 this_arg = XEXP (this_slot, 0);
2211 else
2213 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2215 /* Operand 0 is the return value. We'll copy it out later. */
2216 if (i > 0)
2217 emit_move_insn (this_slot, this_arg);
2218 else
2219 ret_slot = this_slot;
2221 this_arg = XEXP (this_slot, 0);
2225 arg[i] = this_arg;
2228 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2230 if (GET_MODE (operands[0]) == TFmode)
2232 if (nargs == 2)
2233 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2234 arg[0], GET_MODE (arg[0]),
2235 arg[1], GET_MODE (arg[1]));
2236 else
2237 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2238 arg[0], GET_MODE (arg[0]),
2239 arg[1], GET_MODE (arg[1]),
2240 arg[2], GET_MODE (arg[2]));
2242 if (ret_slot)
2243 emit_move_insn (operands[0], ret_slot);
2245 else
2247 rtx ret;
2249 gcc_assert (nargs == 2);
2251 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2252 GET_MODE (operands[0]), 1,
2253 arg[1], GET_MODE (arg[1]));
2255 if (ret != operands[0])
2256 emit_move_insn (operands[0], ret);
2260 /* Expand soft-float TFmode calls to sparc abi routines. */
2262 static void
2263 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2265 const char *func;
2267 switch (code)
2269 case PLUS:
2270 func = "_Qp_add";
2271 break;
2272 case MINUS:
2273 func = "_Qp_sub";
2274 break;
2275 case MULT:
2276 func = "_Qp_mul";
2277 break;
2278 case DIV:
2279 func = "_Qp_div";
2280 break;
2281 default:
2282 gcc_unreachable ();
2285 emit_soft_tfmode_libcall (func, 3, operands);
2288 static void
2289 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2291 const char *func;
2293 gcc_assert (code == SQRT);
2294 func = "_Qp_sqrt";
2296 emit_soft_tfmode_libcall (func, 2, operands);
2299 static void
2300 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2302 const char *func;
2304 switch (code)
2306 case FLOAT_EXTEND:
2307 switch (GET_MODE (operands[1]))
2309 case SFmode:
2310 func = "_Qp_stoq";
2311 break;
2312 case DFmode:
2313 func = "_Qp_dtoq";
2314 break;
2315 default:
2316 gcc_unreachable ();
2318 break;
2320 case FLOAT_TRUNCATE:
2321 switch (GET_MODE (operands[0]))
2323 case SFmode:
2324 func = "_Qp_qtos";
2325 break;
2326 case DFmode:
2327 func = "_Qp_qtod";
2328 break;
2329 default:
2330 gcc_unreachable ();
2332 break;
2334 case FLOAT:
2335 switch (GET_MODE (operands[1]))
2337 case SImode:
2338 func = "_Qp_itoq";
2339 break;
2340 case DImode:
2341 func = "_Qp_xtoq";
2342 break;
2343 default:
2344 gcc_unreachable ();
2346 break;
2348 case UNSIGNED_FLOAT:
2349 switch (GET_MODE (operands[1]))
2351 case SImode:
2352 func = "_Qp_uitoq";
2353 break;
2354 case DImode:
2355 func = "_Qp_uxtoq";
2356 break;
2357 default:
2358 gcc_unreachable ();
2360 break;
2362 case FIX:
2363 switch (GET_MODE (operands[0]))
2365 case SImode:
2366 func = "_Qp_qtoi";
2367 break;
2368 case DImode:
2369 func = "_Qp_qtox";
2370 break;
2371 default:
2372 gcc_unreachable ();
2374 break;
2376 case UNSIGNED_FIX:
2377 switch (GET_MODE (operands[0]))
2379 case SImode:
2380 func = "_Qp_qtoui";
2381 break;
2382 case DImode:
2383 func = "_Qp_qtoux";
2384 break;
2385 default:
2386 gcc_unreachable ();
2388 break;
2390 default:
2391 gcc_unreachable ();
2394 emit_soft_tfmode_libcall (func, 2, operands);
2397 /* Expand a hard-float tfmode operation. All arguments must be in
2398 registers. */
2400 static void
2401 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2403 rtx op, dest;
2405 if (GET_RTX_CLASS (code) == RTX_UNARY)
2407 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2408 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2410 else
2412 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2413 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2414 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2415 operands[1], operands[2]);
2418 if (register_operand (operands[0], VOIDmode))
2419 dest = operands[0];
2420 else
2421 dest = gen_reg_rtx (GET_MODE (operands[0]));
2423 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2425 if (dest != operands[0])
2426 emit_move_insn (operands[0], dest);
2429 void
2430 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2432 if (TARGET_HARD_QUAD)
2433 emit_hard_tfmode_operation (code, operands);
2434 else
2435 emit_soft_tfmode_binop (code, operands);
2438 void
2439 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2441 if (TARGET_HARD_QUAD)
2442 emit_hard_tfmode_operation (code, operands);
2443 else
2444 emit_soft_tfmode_unop (code, operands);
2447 void
2448 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2450 if (TARGET_HARD_QUAD)
2451 emit_hard_tfmode_operation (code, operands);
2452 else
2453 emit_soft_tfmode_cvt (code, operands);
2456 /* Return nonzero if a branch/jump/call instruction will be emitting
2457 nop into its delay slot. */
2460 empty_delay_slot (rtx insn)
2462 rtx seq;
2464 /* If no previous instruction (should not happen), return true. */
2465 if (PREV_INSN (insn) == NULL)
2466 return 1;
2468 seq = NEXT_INSN (PREV_INSN (insn));
2469 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2470 return 0;
2472 return 1;
2475 /* Return nonzero if TRIAL can go into the call delay slot. */
2478 tls_call_delay (rtx trial)
2480 rtx pat;
2482 /* Binutils allows
2483 call __tls_get_addr, %tgd_call (foo)
2484 add %l7, %o0, %o0, %tgd_add (foo)
2485 while Sun as/ld does not. */
2486 if (TARGET_GNU_TLS || !TARGET_TLS)
2487 return 1;
2489 pat = PATTERN (trial);
2491 /* We must reject tgd_add{32|64}, i.e.
2492 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2493 and tldm_add{32|64}, i.e.
2494 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2495 for Sun as/ld. */
2496 if (GET_CODE (pat) == SET
2497 && GET_CODE (SET_SRC (pat)) == PLUS)
2499 rtx unspec = XEXP (SET_SRC (pat), 1);
2501 if (GET_CODE (unspec) == UNSPEC
2502 && (XINT (unspec, 1) == UNSPEC_TLSGD
2503 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2504 return 0;
2507 return 1;
2510 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2511 instruction. RETURN_P is true if the v9 variant 'return' is to be
2512 considered in the test too.
2514 TRIAL must be a SET whose destination is a REG appropriate for the
2515 'restore' instruction or, if RETURN_P is true, for the 'return'
2516 instruction. */
2518 static int
2519 eligible_for_restore_insn (rtx trial, bool return_p)
2521 rtx pat = PATTERN (trial);
2522 rtx src = SET_SRC (pat);
2524 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2525 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2526 && arith_operand (src, GET_MODE (src)))
2528 if (TARGET_ARCH64)
2529 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2530 else
2531 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2534 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2535 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2536 && arith_double_operand (src, GET_MODE (src)))
2537 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2539 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2540 else if (! TARGET_FPU && register_operand (src, SFmode))
2541 return 1;
2543 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2544 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2545 return 1;
2547 /* If we have the 'return' instruction, anything that does not use
2548 local or output registers and can go into a delay slot wins. */
2549 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2550 && (get_attr_in_uncond_branch_delay (trial)
2551 == IN_UNCOND_BRANCH_DELAY_TRUE))
2552 return 1;
2554 /* The 'restore src1,src2,dest' pattern for SImode. */
2555 else if (GET_CODE (src) == PLUS
2556 && register_operand (XEXP (src, 0), SImode)
2557 && arith_operand (XEXP (src, 1), SImode))
2558 return 1;
2560 /* The 'restore src1,src2,dest' pattern for DImode. */
2561 else if (GET_CODE (src) == PLUS
2562 && register_operand (XEXP (src, 0), DImode)
2563 && arith_double_operand (XEXP (src, 1), DImode))
2564 return 1;
2566 /* The 'restore src1,%lo(src2),dest' pattern. */
2567 else if (GET_CODE (src) == LO_SUM
2568 && ! TARGET_CM_MEDMID
2569 && ((register_operand (XEXP (src, 0), SImode)
2570 && immediate_operand (XEXP (src, 1), SImode))
2571 || (TARGET_ARCH64
2572 && register_operand (XEXP (src, 0), DImode)
2573 && immediate_operand (XEXP (src, 1), DImode))))
2574 return 1;
2576 /* The 'restore src,src,dest' pattern. */
2577 else if (GET_CODE (src) == ASHIFT
2578 && (register_operand (XEXP (src, 0), SImode)
2579 || register_operand (XEXP (src, 0), DImode))
2580 && XEXP (src, 1) == const1_rtx)
2581 return 1;
2583 return 0;
2586 /* Return nonzero if TRIAL can go into the function return's
2587 delay slot. */
2590 eligible_for_return_delay (rtx trial)
2592 rtx pat;
2594 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2595 return 0;
2597 if (get_attr_length (trial) != 1)
2598 return 0;
2600 /* If there are any call-saved registers, we should scan TRIAL if it
2601 does not reference them. For now just make it easy. */
2602 if (num_gfregs)
2603 return 0;
2605 /* If the function uses __builtin_eh_return, the eh_return machinery
2606 occupies the delay slot. */
2607 if (current_function_calls_eh_return)
2608 return 0;
2610 /* In the case of a true leaf function, anything can go into the slot. */
2611 if (sparc_leaf_function_p)
2612 return get_attr_in_uncond_branch_delay (trial)
2613 == IN_UNCOND_BRANCH_DELAY_TRUE;
2615 pat = PATTERN (trial);
2617 /* Otherwise, only operations which can be done in tandem with
2618 a `restore' or `return' insn can go into the delay slot. */
2619 if (GET_CODE (SET_DEST (pat)) != REG
2620 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2621 return 0;
2623 /* If this instruction sets up floating point register and we have a return
2624 instruction, it can probably go in. But restore will not work
2625 with FP_REGS. */
2626 if (REGNO (SET_DEST (pat)) >= 32)
2627 return (TARGET_V9
2628 && ! epilogue_renumber (&pat, 1)
2629 && (get_attr_in_uncond_branch_delay (trial)
2630 == IN_UNCOND_BRANCH_DELAY_TRUE));
2632 return eligible_for_restore_insn (trial, true);
2635 /* Return nonzero if TRIAL can go into the sibling call's
2636 delay slot. */
2639 eligible_for_sibcall_delay (rtx trial)
2641 rtx pat;
2643 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2644 return 0;
2646 if (get_attr_length (trial) != 1)
2647 return 0;
2649 pat = PATTERN (trial);
2651 if (sparc_leaf_function_p)
2653 /* If the tail call is done using the call instruction,
2654 we have to restore %o7 in the delay slot. */
2655 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2656 return 0;
2658 /* %g1 is used to build the function address */
2659 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2660 return 0;
2662 return 1;
2665 /* Otherwise, only operations which can be done in tandem with
2666 a `restore' insn can go into the delay slot. */
2667 if (GET_CODE (SET_DEST (pat)) != REG
2668 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2669 || REGNO (SET_DEST (pat)) >= 32)
2670 return 0;
2672 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2673 in most cases. */
2674 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2675 return 0;
2677 return eligible_for_restore_insn (trial, false);
2681 short_branch (int uid1, int uid2)
2683 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2685 /* Leave a few words of "slop". */
2686 if (delta >= -1023 && delta <= 1022)
2687 return 1;
2689 return 0;
2692 /* Return nonzero if REG is not used after INSN.
2693 We assume REG is a reload reg, and therefore does
2694 not live past labels or calls or jumps. */
2696 reg_unused_after (rtx reg, rtx insn)
2698 enum rtx_code code, prev_code = UNKNOWN;
2700 while ((insn = NEXT_INSN (insn)))
2702 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2703 return 1;
2705 code = GET_CODE (insn);
2706 if (GET_CODE (insn) == CODE_LABEL)
2707 return 1;
2709 if (INSN_P (insn))
2711 rtx set = single_set (insn);
2712 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2713 if (set && in_src)
2714 return 0;
2715 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2716 return 1;
2717 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2718 return 0;
2720 prev_code = code;
2722 return 1;
2725 /* Determine if it's legal to put X into the constant pool. This
2726 is not possible if X contains the address of a symbol that is
2727 not constant (TLS) or not known at final link time (PIC). */
2729 static bool
2730 sparc_cannot_force_const_mem (rtx x)
2732 switch (GET_CODE (x))
2734 case CONST_INT:
2735 case CONST_DOUBLE:
2736 case CONST_VECTOR:
2737 /* Accept all non-symbolic constants. */
2738 return false;
2740 case LABEL_REF:
2741 /* Labels are OK iff we are non-PIC. */
2742 return flag_pic != 0;
2744 case SYMBOL_REF:
2745 /* 'Naked' TLS symbol references are never OK,
2746 non-TLS symbols are OK iff we are non-PIC. */
2747 if (SYMBOL_REF_TLS_MODEL (x))
2748 return true;
2749 else
2750 return flag_pic != 0;
2752 case CONST:
2753 return sparc_cannot_force_const_mem (XEXP (x, 0));
2754 case PLUS:
2755 case MINUS:
2756 return sparc_cannot_force_const_mem (XEXP (x, 0))
2757 || sparc_cannot_force_const_mem (XEXP (x, 1));
2758 case UNSPEC:
2759 return true;
2760 default:
2761 gcc_unreachable ();
2765 /* PIC support. */
2766 static GTY(()) char pic_helper_symbol_name[256];
2767 static GTY(()) rtx pic_helper_symbol;
2768 static GTY(()) bool pic_helper_emitted_p = false;
2769 static GTY(()) rtx global_offset_table;
2771 /* Ensure that we are not using patterns that are not OK with PIC. */
2774 check_pic (int i)
2776 switch (flag_pic)
2778 case 1:
2779 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2780 && (GET_CODE (recog_data.operand[i]) != CONST
2781 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2782 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2783 == global_offset_table)
2784 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2785 == CONST))));
2786 case 2:
2787 default:
2788 return 1;
2792 /* Return true if X is an address which needs a temporary register when
2793 reloaded while generating PIC code. */
2796 pic_address_needs_scratch (rtx x)
2798 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2799 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2800 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2801 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2802 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2803 return 1;
2805 return 0;
2808 /* Determine if a given RTX is a valid constant. We already know this
2809 satisfies CONSTANT_P. */
2811 bool
2812 legitimate_constant_p (rtx x)
2814 rtx inner;
2816 switch (GET_CODE (x))
2818 case SYMBOL_REF:
2819 /* TLS symbols are not constant. */
2820 if (SYMBOL_REF_TLS_MODEL (x))
2821 return false;
2822 break;
2824 case CONST:
2825 inner = XEXP (x, 0);
2827 /* Offsets of TLS symbols are never valid.
2828 Discourage CSE from creating them. */
2829 if (GET_CODE (inner) == PLUS
2830 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2831 return false;
2832 break;
2834 case CONST_DOUBLE:
2835 if (GET_MODE (x) == VOIDmode)
2836 return true;
2838 /* Floating point constants are generally not ok.
2839 The only exception is 0.0 in VIS. */
2840 if (TARGET_VIS
2841 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2842 && const_zero_operand (x, GET_MODE (x)))
2843 return true;
2845 return false;
2847 case CONST_VECTOR:
2848 /* Vector constants are generally not ok.
2849 The only exception is 0 in VIS. */
2850 if (TARGET_VIS
2851 && const_zero_operand (x, GET_MODE (x)))
2852 return true;
2854 return false;
2856 default:
2857 break;
2860 return true;
2863 /* Determine if a given RTX is a valid constant address. */
2865 bool
2866 constant_address_p (rtx x)
2868 switch (GET_CODE (x))
2870 case LABEL_REF:
2871 case CONST_INT:
2872 case HIGH:
2873 return true;
2875 case CONST:
2876 if (flag_pic && pic_address_needs_scratch (x))
2877 return false;
2878 return legitimate_constant_p (x);
2880 case SYMBOL_REF:
2881 return !flag_pic && legitimate_constant_p (x);
2883 default:
2884 return false;
2888 /* Nonzero if the constant value X is a legitimate general operand
2889 when generating PIC code. It is given that flag_pic is on and
2890 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2892 bool
2893 legitimate_pic_operand_p (rtx x)
2895 if (pic_address_needs_scratch (x))
2896 return false;
2897 if (SPARC_SYMBOL_REF_TLS_P (x)
2898 || (GET_CODE (x) == CONST
2899 && GET_CODE (XEXP (x, 0)) == PLUS
2900 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2901 return false;
2902 return true;
2905 /* Return nonzero if ADDR is a valid memory address.
2906 STRICT specifies whether strict register checking applies. */
2909 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2911 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2913 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2914 rs1 = addr;
2915 else if (GET_CODE (addr) == PLUS)
2917 rs1 = XEXP (addr, 0);
2918 rs2 = XEXP (addr, 1);
2920 /* Canonicalize. REG comes first, if there are no regs,
2921 LO_SUM comes first. */
2922 if (!REG_P (rs1)
2923 && GET_CODE (rs1) != SUBREG
2924 && (REG_P (rs2)
2925 || GET_CODE (rs2) == SUBREG
2926 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2928 rs1 = XEXP (addr, 1);
2929 rs2 = XEXP (addr, 0);
2932 if ((flag_pic == 1
2933 && rs1 == pic_offset_table_rtx
2934 && !REG_P (rs2)
2935 && GET_CODE (rs2) != SUBREG
2936 && GET_CODE (rs2) != LO_SUM
2937 && GET_CODE (rs2) != MEM
2938 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2939 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2940 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2941 || ((REG_P (rs1)
2942 || GET_CODE (rs1) == SUBREG)
2943 && RTX_OK_FOR_OFFSET_P (rs2)))
2945 imm1 = rs2;
2946 rs2 = NULL;
2948 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2949 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2951 /* We prohibit REG + REG for TFmode when there are no quad move insns
2952 and we consequently need to split. We do this because REG+REG
2953 is not an offsettable address. If we get the situation in reload
2954 where source and destination of a movtf pattern are both MEMs with
2955 REG+REG address, then only one of them gets converted to an
2956 offsettable address. */
2957 if (mode == TFmode
2958 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2959 return 0;
2961 /* We prohibit REG + REG on ARCH32 if not optimizing for
2962 DFmode/DImode because then mem_min_alignment is likely to be zero
2963 after reload and the forced split would lack a matching splitter
2964 pattern. */
2965 if (TARGET_ARCH32 && !optimize
2966 && (mode == DFmode || mode == DImode))
2967 return 0;
2969 else if (USE_AS_OFFSETABLE_LO10
2970 && GET_CODE (rs1) == LO_SUM
2971 && TARGET_ARCH64
2972 && ! TARGET_CM_MEDMID
2973 && RTX_OK_FOR_OLO10_P (rs2))
2975 rs2 = NULL;
2976 imm1 = XEXP (rs1, 1);
2977 rs1 = XEXP (rs1, 0);
2978 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2979 return 0;
2982 else if (GET_CODE (addr) == LO_SUM)
2984 rs1 = XEXP (addr, 0);
2985 imm1 = XEXP (addr, 1);
2987 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2988 return 0;
2990 /* We can't allow TFmode in 32-bit mode, because an offset greater
2991 than the alignment (8) may cause the LO_SUM to overflow. */
2992 if (mode == TFmode && TARGET_ARCH32)
2993 return 0;
2995 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
2996 return 1;
2997 else
2998 return 0;
3000 if (GET_CODE (rs1) == SUBREG)
3001 rs1 = SUBREG_REG (rs1);
3002 if (!REG_P (rs1))
3003 return 0;
3005 if (rs2)
3007 if (GET_CODE (rs2) == SUBREG)
3008 rs2 = SUBREG_REG (rs2);
3009 if (!REG_P (rs2))
3010 return 0;
3013 if (strict)
3015 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3016 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3017 return 0;
3019 else
3021 if ((REGNO (rs1) >= 32
3022 && REGNO (rs1) != FRAME_POINTER_REGNUM
3023 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3024 || (rs2
3025 && (REGNO (rs2) >= 32
3026 && REGNO (rs2) != FRAME_POINTER_REGNUM
3027 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3028 return 0;
3030 return 1;
3033 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3035 static GTY(()) rtx sparc_tls_symbol;
3037 static rtx
3038 sparc_tls_get_addr (void)
3040 if (!sparc_tls_symbol)
3041 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3043 return sparc_tls_symbol;
3046 static rtx
3047 sparc_tls_got (void)
3049 rtx temp;
3050 if (flag_pic)
3052 current_function_uses_pic_offset_table = 1;
3053 return pic_offset_table_rtx;
3056 if (!global_offset_table)
3057 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3058 temp = gen_reg_rtx (Pmode);
3059 emit_move_insn (temp, global_offset_table);
3060 return temp;
3063 /* Return 1 if *X is a thread-local symbol. */
3065 static int
3066 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3068 return SPARC_SYMBOL_REF_TLS_P (*x);
3071 /* Return 1 if X contains a thread-local symbol. */
3073 bool
3074 sparc_tls_referenced_p (rtx x)
3076 if (!TARGET_HAVE_TLS)
3077 return false;
3079 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3082 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3083 this (thread-local) address. */
3086 legitimize_tls_address (rtx addr)
3088 rtx temp1, temp2, temp3, ret, o0, got, insn;
3090 gcc_assert (can_create_pseudo_p ());
3092 if (GET_CODE (addr) == SYMBOL_REF)
3093 switch (SYMBOL_REF_TLS_MODEL (addr))
3095 case TLS_MODEL_GLOBAL_DYNAMIC:
3096 start_sequence ();
3097 temp1 = gen_reg_rtx (SImode);
3098 temp2 = gen_reg_rtx (SImode);
3099 ret = gen_reg_rtx (Pmode);
3100 o0 = gen_rtx_REG (Pmode, 8);
3101 got = sparc_tls_got ();
3102 emit_insn (gen_tgd_hi22 (temp1, addr));
3103 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3104 if (TARGET_ARCH32)
3106 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3107 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3108 addr, const1_rtx));
3110 else
3112 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3113 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3114 addr, const1_rtx));
3116 CALL_INSN_FUNCTION_USAGE (insn)
3117 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3118 CALL_INSN_FUNCTION_USAGE (insn));
3119 insn = get_insns ();
3120 end_sequence ();
3121 emit_libcall_block (insn, ret, o0, addr);
3122 break;
3124 case TLS_MODEL_LOCAL_DYNAMIC:
3125 start_sequence ();
3126 temp1 = gen_reg_rtx (SImode);
3127 temp2 = gen_reg_rtx (SImode);
3128 temp3 = gen_reg_rtx (Pmode);
3129 ret = gen_reg_rtx (Pmode);
3130 o0 = gen_rtx_REG (Pmode, 8);
3131 got = sparc_tls_got ();
3132 emit_insn (gen_tldm_hi22 (temp1));
3133 emit_insn (gen_tldm_lo10 (temp2, temp1));
3134 if (TARGET_ARCH32)
3136 emit_insn (gen_tldm_add32 (o0, got, temp2));
3137 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3138 const1_rtx));
3140 else
3142 emit_insn (gen_tldm_add64 (o0, got, temp2));
3143 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3144 const1_rtx));
3146 CALL_INSN_FUNCTION_USAGE (insn)
3147 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3148 CALL_INSN_FUNCTION_USAGE (insn));
3149 insn = get_insns ();
3150 end_sequence ();
3151 emit_libcall_block (insn, temp3, o0,
3152 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3153 UNSPEC_TLSLD_BASE));
3154 temp1 = gen_reg_rtx (SImode);
3155 temp2 = gen_reg_rtx (SImode);
3156 emit_insn (gen_tldo_hix22 (temp1, addr));
3157 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3158 if (TARGET_ARCH32)
3159 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3160 else
3161 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3162 break;
3164 case TLS_MODEL_INITIAL_EXEC:
3165 temp1 = gen_reg_rtx (SImode);
3166 temp2 = gen_reg_rtx (SImode);
3167 temp3 = gen_reg_rtx (Pmode);
3168 got = sparc_tls_got ();
3169 emit_insn (gen_tie_hi22 (temp1, addr));
3170 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3171 if (TARGET_ARCH32)
3172 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3173 else
3174 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3175 if (TARGET_SUN_TLS)
3177 ret = gen_reg_rtx (Pmode);
3178 if (TARGET_ARCH32)
3179 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3180 temp3, addr));
3181 else
3182 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3183 temp3, addr));
3185 else
3186 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3187 break;
3189 case TLS_MODEL_LOCAL_EXEC:
3190 temp1 = gen_reg_rtx (Pmode);
3191 temp2 = gen_reg_rtx (Pmode);
3192 if (TARGET_ARCH32)
3194 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3195 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3197 else
3199 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3200 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3202 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3203 break;
3205 default:
3206 gcc_unreachable ();
3209 else
3210 gcc_unreachable (); /* for now ... */
3212 return ret;
3216 /* Legitimize PIC addresses. If the address is already position-independent,
3217 we return ORIG. Newly generated position-independent addresses go into a
3218 reg. This is REG if nonzero, otherwise we allocate register(s) as
3219 necessary. */
3222 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3223 rtx reg)
3225 if (GET_CODE (orig) == SYMBOL_REF
3226 /* See the comment in sparc_expand_move. */
3227 || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3229 rtx pic_ref, address;
3230 rtx insn;
3232 if (reg == 0)
3234 gcc_assert (! reload_in_progress && ! reload_completed);
3235 reg = gen_reg_rtx (Pmode);
3238 if (flag_pic == 2)
3240 /* If not during reload, allocate another temp reg here for loading
3241 in the address, so that these instructions can be optimized
3242 properly. */
3243 rtx temp_reg = ((reload_in_progress || reload_completed)
3244 ? reg : gen_reg_rtx (Pmode));
3246 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3247 won't get confused into thinking that these two instructions
3248 are loading in the true address of the symbol. If in the
3249 future a PIC rtx exists, that should be used instead. */
3250 if (TARGET_ARCH64)
3252 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3253 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3255 else
3257 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3258 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3260 address = temp_reg;
3262 else
3263 address = orig;
3265 pic_ref = gen_const_mem (Pmode,
3266 gen_rtx_PLUS (Pmode,
3267 pic_offset_table_rtx, address));
3268 current_function_uses_pic_offset_table = 1;
3269 insn = emit_move_insn (reg, pic_ref);
3270 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3271 by loop. */
3272 set_unique_reg_note (insn, REG_EQUAL, orig);
3273 return reg;
3275 else if (GET_CODE (orig) == CONST)
3277 rtx base, offset;
3279 if (GET_CODE (XEXP (orig, 0)) == PLUS
3280 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3281 return orig;
3283 if (reg == 0)
3285 gcc_assert (! reload_in_progress && ! reload_completed);
3286 reg = gen_reg_rtx (Pmode);
3289 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3290 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3291 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3292 base == reg ? 0 : reg);
3294 if (GET_CODE (offset) == CONST_INT)
3296 if (SMALL_INT (offset))
3297 return plus_constant (base, INTVAL (offset));
3298 else if (! reload_in_progress && ! reload_completed)
3299 offset = force_reg (Pmode, offset);
3300 else
3301 /* If we reach here, then something is seriously wrong. */
3302 gcc_unreachable ();
3304 return gen_rtx_PLUS (Pmode, base, offset);
3306 else if (GET_CODE (orig) == LABEL_REF)
3307 /* ??? Why do we do this? */
3308 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3309 the register is live instead, in case it is eliminated. */
3310 current_function_uses_pic_offset_table = 1;
3312 return orig;
3315 /* Try machine-dependent ways of modifying an illegitimate address X
3316 to be legitimate. If we find one, return the new, valid address.
3318 OLDX is the address as it was before break_out_memory_refs was called.
3319 In some cases it is useful to look at this to decide what needs to be done.
3321 MODE is the mode of the operand pointed to by X. */
3324 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3326 rtx orig_x = x;
3328 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3329 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3330 force_operand (XEXP (x, 0), NULL_RTX));
3331 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3332 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3333 force_operand (XEXP (x, 1), NULL_RTX));
3334 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3335 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3336 XEXP (x, 1));
3337 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3338 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3339 force_operand (XEXP (x, 1), NULL_RTX));
3341 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3342 return x;
3344 if (SPARC_SYMBOL_REF_TLS_P (x))
3345 x = legitimize_tls_address (x);
3346 else if (flag_pic)
3347 x = legitimize_pic_address (x, mode, 0);
3348 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3349 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3350 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3351 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3352 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3353 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3354 else if (GET_CODE (x) == SYMBOL_REF
3355 || GET_CODE (x) == CONST
3356 || GET_CODE (x) == LABEL_REF)
3357 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3358 return x;
3361 /* Emit the special PIC helper function. */
3363 static void
3364 emit_pic_helper (void)
3366 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3367 int align;
3369 switch_to_section (text_section);
3371 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3372 if (align > 0)
3373 ASM_OUTPUT_ALIGN (asm_out_file, align);
3374 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3375 if (flag_delayed_branch)
3376 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3377 pic_name, pic_name);
3378 else
3379 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3380 pic_name, pic_name);
3382 pic_helper_emitted_p = true;
3385 /* Emit code to load the PIC register. */
3387 static void
3388 load_pic_register (bool delay_pic_helper)
3390 int orig_flag_pic = flag_pic;
3392 if (TARGET_VXWORKS_RTP)
3394 emit_insn (gen_vxworks_load_got ());
3395 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3396 return;
3399 /* If we haven't initialized the special PIC symbols, do so now. */
3400 if (!pic_helper_symbol_name[0])
3402 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3403 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3404 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3407 /* If we haven't emitted the special PIC helper function, do so now unless
3408 we are requested to delay it. */
3409 if (!delay_pic_helper && !pic_helper_emitted_p)
3410 emit_pic_helper ();
3412 flag_pic = 0;
3413 if (TARGET_ARCH64)
3414 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3415 pic_helper_symbol));
3416 else
3417 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3418 pic_helper_symbol));
3419 flag_pic = orig_flag_pic;
3421 /* Need to emit this whether or not we obey regdecls,
3422 since setjmp/longjmp can cause life info to screw up.
3423 ??? In the case where we don't obey regdecls, this is not sufficient
3424 since we may not fall out the bottom. */
3425 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3428 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3429 address of the call target. */
3431 void
3432 sparc_emit_call_insn (rtx pat, rtx addr)
3434 rtx insn;
3436 insn = emit_call_insn (pat);
3438 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3439 if (TARGET_VXWORKS_RTP
3440 && flag_pic
3441 && GET_CODE (addr) == SYMBOL_REF
3442 && (SYMBOL_REF_DECL (addr)
3443 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3444 : !SYMBOL_REF_LOCAL_P (addr)))
3446 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3447 current_function_uses_pic_offset_table = 1;
3451 /* Return 1 if RTX is a MEM which is known to be aligned to at
3452 least a DESIRED byte boundary. */
3455 mem_min_alignment (rtx mem, int desired)
3457 rtx addr, base, offset;
3459 /* If it's not a MEM we can't accept it. */
3460 if (GET_CODE (mem) != MEM)
3461 return 0;
3463 /* Obviously... */
3464 if (!TARGET_UNALIGNED_DOUBLES
3465 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3466 return 1;
3468 /* ??? The rest of the function predates MEM_ALIGN so
3469 there is probably a bit of redundancy. */
3470 addr = XEXP (mem, 0);
3471 base = offset = NULL_RTX;
3472 if (GET_CODE (addr) == PLUS)
3474 if (GET_CODE (XEXP (addr, 0)) == REG)
3476 base = XEXP (addr, 0);
3478 /* What we are saying here is that if the base
3479 REG is aligned properly, the compiler will make
3480 sure any REG based index upon it will be so
3481 as well. */
3482 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3483 offset = XEXP (addr, 1);
3484 else
3485 offset = const0_rtx;
3488 else if (GET_CODE (addr) == REG)
3490 base = addr;
3491 offset = const0_rtx;
3494 if (base != NULL_RTX)
3496 int regno = REGNO (base);
3498 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3500 /* Check if the compiler has recorded some information
3501 about the alignment of the base REG. If reload has
3502 completed, we already matched with proper alignments.
3503 If not running global_alloc, reload might give us
3504 unaligned pointer to local stack though. */
3505 if (((cfun != 0
3506 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3507 || (optimize && reload_completed))
3508 && (INTVAL (offset) & (desired - 1)) == 0)
3509 return 1;
3511 else
3513 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3514 return 1;
3517 else if (! TARGET_UNALIGNED_DOUBLES
3518 || CONSTANT_P (addr)
3519 || GET_CODE (addr) == LO_SUM)
3521 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3522 is true, in which case we can only assume that an access is aligned if
3523 it is to a constant address, or the address involves a LO_SUM. */
3524 return 1;
3527 /* An obviously unaligned address. */
3528 return 0;
3532 /* Vectors to keep interesting information about registers where it can easily
3533 be got. We used to use the actual mode value as the bit number, but there
3534 are more than 32 modes now. Instead we use two tables: one indexed by
3535 hard register number, and one indexed by mode. */
3537 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3538 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3539 mapped into one sparc_mode_class mode. */
3541 enum sparc_mode_class {
3542 S_MODE, D_MODE, T_MODE, O_MODE,
3543 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3544 CC_MODE, CCFP_MODE
3547 /* Modes for single-word and smaller quantities. */
3548 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3550 /* Modes for double-word and smaller quantities. */
3551 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3553 /* Modes for quad-word and smaller quantities. */
3554 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3556 /* Modes for 8-word and smaller quantities. */
3557 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3559 /* Modes for single-float quantities. We must allow any single word or
3560 smaller quantity. This is because the fix/float conversion instructions
3561 take integer inputs/outputs from the float registers. */
3562 #define SF_MODES (S_MODES)
3564 /* Modes for double-float and smaller quantities. */
3565 #define DF_MODES (S_MODES | D_MODES)
3567 /* Modes for double-float only quantities. */
3568 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3570 /* Modes for quad-float only quantities. */
3571 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3573 /* Modes for quad-float and smaller quantities. */
3574 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3576 /* Modes for quad-float and double-float quantities. */
3577 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3579 /* Modes for quad-float pair only quantities. */
3580 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3582 /* Modes for quad-float pairs and smaller quantities. */
3583 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3585 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3587 /* Modes for condition codes. */
3588 #define CC_MODES (1 << (int) CC_MODE)
3589 #define CCFP_MODES (1 << (int) CCFP_MODE)
3591 /* Value is 1 if register/mode pair is acceptable on sparc.
3592 The funny mixture of D and T modes is because integer operations
3593 do not specially operate on tetra quantities, so non-quad-aligned
3594 registers can hold quadword quantities (except %o4 and %i4 because
3595 they cross fixed registers). */
3597 /* This points to either the 32 bit or the 64 bit version. */
3598 const int *hard_regno_mode_classes;
3600 static const int hard_32bit_mode_classes[] = {
3601 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3602 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3603 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3604 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3606 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3607 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3608 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3609 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3611 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3612 and none can hold SFmode/SImode values. */
3613 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3614 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3615 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3616 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3618 /* %fcc[0123] */
3619 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3621 /* %icc */
3622 CC_MODES
3625 static const int hard_64bit_mode_classes[] = {
3626 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3627 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3628 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3629 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3631 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3632 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3633 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3634 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3636 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3637 and none can hold SFmode/SImode values. */
3638 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3639 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3640 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3641 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3643 /* %fcc[0123] */
3644 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3646 /* %icc */
3647 CC_MODES
3650 int sparc_mode_class [NUM_MACHINE_MODES];
3652 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3654 static void
3655 sparc_init_modes (void)
3657 int i;
3659 for (i = 0; i < NUM_MACHINE_MODES; i++)
3661 switch (GET_MODE_CLASS (i))
3663 case MODE_INT:
3664 case MODE_PARTIAL_INT:
3665 case MODE_COMPLEX_INT:
3666 if (GET_MODE_SIZE (i) <= 4)
3667 sparc_mode_class[i] = 1 << (int) S_MODE;
3668 else if (GET_MODE_SIZE (i) == 8)
3669 sparc_mode_class[i] = 1 << (int) D_MODE;
3670 else if (GET_MODE_SIZE (i) == 16)
3671 sparc_mode_class[i] = 1 << (int) T_MODE;
3672 else if (GET_MODE_SIZE (i) == 32)
3673 sparc_mode_class[i] = 1 << (int) O_MODE;
3674 else
3675 sparc_mode_class[i] = 0;
3676 break;
3677 case MODE_VECTOR_INT:
3678 if (GET_MODE_SIZE (i) <= 4)
3679 sparc_mode_class[i] = 1 << (int)SF_MODE;
3680 else if (GET_MODE_SIZE (i) == 8)
3681 sparc_mode_class[i] = 1 << (int)DF_MODE;
3682 break;
3683 case MODE_FLOAT:
3684 case MODE_COMPLEX_FLOAT:
3685 if (GET_MODE_SIZE (i) <= 4)
3686 sparc_mode_class[i] = 1 << (int) SF_MODE;
3687 else if (GET_MODE_SIZE (i) == 8)
3688 sparc_mode_class[i] = 1 << (int) DF_MODE;
3689 else if (GET_MODE_SIZE (i) == 16)
3690 sparc_mode_class[i] = 1 << (int) TF_MODE;
3691 else if (GET_MODE_SIZE (i) == 32)
3692 sparc_mode_class[i] = 1 << (int) OF_MODE;
3693 else
3694 sparc_mode_class[i] = 0;
3695 break;
3696 case MODE_CC:
3697 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3698 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3699 else
3700 sparc_mode_class[i] = 1 << (int) CC_MODE;
3701 break;
3702 default:
3703 sparc_mode_class[i] = 0;
3704 break;
3708 if (TARGET_ARCH64)
3709 hard_regno_mode_classes = hard_64bit_mode_classes;
3710 else
3711 hard_regno_mode_classes = hard_32bit_mode_classes;
3713 /* Initialize the array used by REGNO_REG_CLASS. */
3714 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3716 if (i < 16 && TARGET_V8PLUS)
3717 sparc_regno_reg_class[i] = I64_REGS;
3718 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3719 sparc_regno_reg_class[i] = GENERAL_REGS;
3720 else if (i < 64)
3721 sparc_regno_reg_class[i] = FP_REGS;
3722 else if (i < 96)
3723 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3724 else if (i < 100)
3725 sparc_regno_reg_class[i] = FPCC_REGS;
3726 else
3727 sparc_regno_reg_class[i] = NO_REGS;
3731 /* Compute the frame size required by the function. This function is called
3732 during the reload pass and also by sparc_expand_prologue. */
3734 HOST_WIDE_INT
3735 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3737 int outgoing_args_size = (current_function_outgoing_args_size
3738 + REG_PARM_STACK_SPACE (current_function_decl));
3739 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3740 int i;
3742 if (TARGET_ARCH64)
3744 for (i = 0; i < 8; i++)
3745 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3746 n_regs += 2;
3748 else
3750 for (i = 0; i < 8; i += 2)
3751 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3752 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3753 n_regs += 2;
3756 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3757 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3758 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3759 n_regs += 2;
3761 /* Set up values for use in prologue and epilogue. */
3762 num_gfregs = n_regs;
3764 if (leaf_function_p
3765 && n_regs == 0
3766 && size == 0
3767 && current_function_outgoing_args_size == 0)
3768 actual_fsize = apparent_fsize = 0;
3769 else
3771 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3772 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3773 apparent_fsize += n_regs * 4;
3774 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3777 /* Make sure nothing can clobber our register windows.
3778 If a SAVE must be done, or there is a stack-local variable,
3779 the register window area must be allocated. */
3780 if (! leaf_function_p || size > 0)
3781 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3783 return SPARC_STACK_ALIGN (actual_fsize);
3786 /* Output any necessary .register pseudo-ops. */
3788 void
3789 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3791 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3792 int i;
3794 if (TARGET_ARCH32)
3795 return;
3797 /* Check if %g[2367] were used without
3798 .register being printed for them already. */
3799 for (i = 2; i < 8; i++)
3801 if (df_regs_ever_live_p (i)
3802 && ! sparc_hard_reg_printed [i])
3804 sparc_hard_reg_printed [i] = 1;
3805 /* %g7 is used as TLS base register, use #ignore
3806 for it instead of #scratch. */
3807 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3808 i == 7 ? "ignore" : "scratch");
3810 if (i == 3) i = 5;
3812 #endif
3815 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3816 as needed. LOW should be double-word aligned for 32-bit registers.
3817 Return the new OFFSET. */
3819 #define SORR_SAVE 0
3820 #define SORR_RESTORE 1
3822 static int
3823 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3825 rtx mem, insn;
3826 int i;
3828 if (TARGET_ARCH64 && high <= 32)
3830 for (i = low; i < high; i++)
3832 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3834 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3835 set_mem_alias_set (mem, sparc_sr_alias_set);
3836 if (action == SORR_SAVE)
3838 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3839 RTX_FRAME_RELATED_P (insn) = 1;
3841 else /* action == SORR_RESTORE */
3842 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3843 offset += 8;
3847 else
3849 for (i = low; i < high; i += 2)
3851 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
3852 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
3853 enum machine_mode mode;
3854 int regno;
3856 if (reg0 && reg1)
3858 mode = i < 32 ? DImode : DFmode;
3859 regno = i;
3861 else if (reg0)
3863 mode = i < 32 ? SImode : SFmode;
3864 regno = i;
3866 else if (reg1)
3868 mode = i < 32 ? SImode : SFmode;
3869 regno = i + 1;
3870 offset += 4;
3872 else
3873 continue;
3875 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3876 set_mem_alias_set (mem, sparc_sr_alias_set);
3877 if (action == SORR_SAVE)
3879 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3880 RTX_FRAME_RELATED_P (insn) = 1;
3882 else /* action == SORR_RESTORE */
3883 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3885 /* Always preserve double-word alignment. */
3886 offset = (offset + 7) & -8;
3890 return offset;
3893 /* Emit code to save call-saved registers. */
3895 static void
3896 emit_save_or_restore_regs (int action)
3898 HOST_WIDE_INT offset;
3899 rtx base;
3901 offset = frame_base_offset - apparent_fsize;
3903 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3905 /* ??? This might be optimized a little as %g1 might already have a
3906 value close enough that a single add insn will do. */
3907 /* ??? Although, all of this is probably only a temporary fix
3908 because if %g1 can hold a function result, then
3909 sparc_expand_epilogue will lose (the result will be
3910 clobbered). */
3911 base = gen_rtx_REG (Pmode, 1);
3912 emit_move_insn (base, GEN_INT (offset));
3913 emit_insn (gen_rtx_SET (VOIDmode,
3914 base,
3915 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3916 offset = 0;
3918 else
3919 base = frame_base_reg;
3921 offset = save_or_restore_regs (0, 8, base, offset, action);
3922 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3925 /* Generate a save_register_window insn. */
3927 static rtx
3928 gen_save_register_window (rtx increment)
3930 if (TARGET_ARCH64)
3931 return gen_save_register_windowdi (increment);
3932 else
3933 return gen_save_register_windowsi (increment);
3936 /* Generate an increment for the stack pointer. */
3938 static rtx
3939 gen_stack_pointer_inc (rtx increment)
3941 return gen_rtx_SET (VOIDmode,
3942 stack_pointer_rtx,
3943 gen_rtx_PLUS (Pmode,
3944 stack_pointer_rtx,
3945 increment));
3948 /* Generate a decrement for the stack pointer. */
3950 static rtx
3951 gen_stack_pointer_dec (rtx decrement)
3953 return gen_rtx_SET (VOIDmode,
3954 stack_pointer_rtx,
3955 gen_rtx_MINUS (Pmode,
3956 stack_pointer_rtx,
3957 decrement));
3960 /* Expand the function prologue. The prologue is responsible for reserving
3961 storage for the frame, saving the call-saved registers and loading the
3962 PIC register if needed. */
3964 void
3965 sparc_expand_prologue (void)
3967 rtx insn;
3968 int i;
3970 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
3971 on the final value of the flag means deferring the prologue/epilogue
3972 expansion until just before the second scheduling pass, which is too
3973 late to emit multiple epilogues or return insns.
3975 Of course we are making the assumption that the value of the flag
3976 will not change between now and its final value. Of the three parts
3977 of the formula, only the last one can reasonably vary. Let's take a
3978 closer look, after assuming that the first two ones are set to true
3979 (otherwise the last value is effectively silenced).
3981 If only_leaf_regs_used returns false, the global predicate will also
3982 be false so the actual frame size calculated below will be positive.
3983 As a consequence, the save_register_window insn will be emitted in
3984 the instruction stream; now this insn explicitly references %fp
3985 which is not a leaf register so only_leaf_regs_used will always
3986 return false subsequently.
3988 If only_leaf_regs_used returns true, we hope that the subsequent
3989 optimization passes won't cause non-leaf registers to pop up. For
3990 example, the regrename pass has special provisions to not rename to
3991 non-leaf registers in a leaf function. */
3992 sparc_leaf_function_p
3993 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
3995 /* Need to use actual_fsize, since we are also allocating
3996 space for our callee (and our own register save area). */
3997 actual_fsize
3998 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4000 /* Advertise that the data calculated just above are now valid. */
4001 sparc_prologue_data_valid_p = true;
4003 if (sparc_leaf_function_p)
4005 frame_base_reg = stack_pointer_rtx;
4006 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4008 else
4010 frame_base_reg = hard_frame_pointer_rtx;
4011 frame_base_offset = SPARC_STACK_BIAS;
4014 if (actual_fsize == 0)
4015 /* do nothing. */ ;
4016 else if (sparc_leaf_function_p)
4018 if (actual_fsize <= 4096)
4019 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4020 else if (actual_fsize <= 8192)
4022 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4023 /* %sp is still the CFA register. */
4024 RTX_FRAME_RELATED_P (insn) = 1;
4025 insn
4026 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4028 else
4030 rtx reg = gen_rtx_REG (Pmode, 1);
4031 emit_move_insn (reg, GEN_INT (-actual_fsize));
4032 insn = emit_insn (gen_stack_pointer_inc (reg));
4033 REG_NOTES (insn) =
4034 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4035 gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
4036 REG_NOTES (insn));
4039 RTX_FRAME_RELATED_P (insn) = 1;
4041 else
4043 if (actual_fsize <= 4096)
4044 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4045 else if (actual_fsize <= 8192)
4047 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4048 /* %sp is not the CFA register anymore. */
4049 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4051 else
4053 rtx reg = gen_rtx_REG (Pmode, 1);
4054 emit_move_insn (reg, GEN_INT (-actual_fsize));
4055 insn = emit_insn (gen_save_register_window (reg));
4058 RTX_FRAME_RELATED_P (insn) = 1;
4059 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4060 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4063 if (num_gfregs)
4064 emit_save_or_restore_regs (SORR_SAVE);
4066 /* Load the PIC register if needed. */
4067 if (flag_pic && current_function_uses_pic_offset_table)
4068 load_pic_register (false);
4071 /* This function generates the assembly code for function entry, which boils
4072 down to emitting the necessary .register directives. */
4074 static void
4075 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4077 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4078 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4080 sparc_output_scratch_registers (file);
4083 /* Expand the function epilogue, either normal or part of a sibcall.
4084 We emit all the instructions except the return or the call. */
4086 void
4087 sparc_expand_epilogue (void)
4089 if (num_gfregs)
4090 emit_save_or_restore_regs (SORR_RESTORE);
4092 if (actual_fsize == 0)
4093 /* do nothing. */ ;
4094 else if (sparc_leaf_function_p)
4096 if (actual_fsize <= 4096)
4097 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4098 else if (actual_fsize <= 8192)
4100 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4101 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4103 else
4105 rtx reg = gen_rtx_REG (Pmode, 1);
4106 emit_move_insn (reg, GEN_INT (-actual_fsize));
4107 emit_insn (gen_stack_pointer_dec (reg));
4112 /* Return true if it is appropriate to emit `return' instructions in the
4113 body of a function. */
4115 bool
4116 sparc_can_use_return_insn_p (void)
4118 return sparc_prologue_data_valid_p
4119 && (actual_fsize == 0 || !sparc_leaf_function_p);
4122 /* This function generates the assembly code for function exit. */
4124 static void
4125 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4127 /* If code does not drop into the epilogue, we have to still output
4128 a dummy nop for the sake of sane backtraces. Otherwise, if the
4129 last two instructions of a function were "call foo; dslot;" this
4130 can make the return PC of foo (i.e. address of call instruction
4131 plus 8) point to the first instruction in the next function. */
4133 rtx insn, last_real_insn;
4135 insn = get_last_insn ();
4137 last_real_insn = prev_real_insn (insn);
4138 if (last_real_insn
4139 && GET_CODE (last_real_insn) == INSN
4140 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4141 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4143 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4144 fputs("\tnop\n", file);
4146 sparc_output_deferred_case_vectors ();
4149 /* Output a 'restore' instruction. */
4151 static void
4152 output_restore (rtx pat)
4154 rtx operands[3];
4156 if (! pat)
4158 fputs ("\t restore\n", asm_out_file);
4159 return;
4162 gcc_assert (GET_CODE (pat) == SET);
4164 operands[0] = SET_DEST (pat);
4165 pat = SET_SRC (pat);
4167 switch (GET_CODE (pat))
4169 case PLUS:
4170 operands[1] = XEXP (pat, 0);
4171 operands[2] = XEXP (pat, 1);
4172 output_asm_insn (" restore %r1, %2, %Y0", operands);
4173 break;
4174 case LO_SUM:
4175 operands[1] = XEXP (pat, 0);
4176 operands[2] = XEXP (pat, 1);
4177 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4178 break;
4179 case ASHIFT:
4180 operands[1] = XEXP (pat, 0);
4181 gcc_assert (XEXP (pat, 1) == const1_rtx);
4182 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4183 break;
4184 default:
4185 operands[1] = pat;
4186 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4187 break;
4191 /* Output a return. */
4193 const char *
4194 output_return (rtx insn)
4196 if (sparc_leaf_function_p)
4198 /* This is a leaf function so we don't have to bother restoring the
4199 register window, which frees us from dealing with the convoluted
4200 semantics of restore/return. We simply output the jump to the
4201 return address and the insn in the delay slot (if any). */
4203 gcc_assert (! current_function_calls_eh_return);
4205 return "jmp\t%%o7+%)%#";
4207 else
4209 /* This is a regular function so we have to restore the register window.
4210 We may have a pending insn for the delay slot, which will be either
4211 combined with the 'restore' instruction or put in the delay slot of
4212 the 'return' instruction. */
4214 if (current_function_calls_eh_return)
4216 /* If the function uses __builtin_eh_return, the eh_return
4217 machinery occupies the delay slot. */
4218 gcc_assert (! final_sequence);
4220 if (! flag_delayed_branch)
4221 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4223 if (TARGET_V9)
4224 fputs ("\treturn\t%i7+8\n", asm_out_file);
4225 else
4226 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4228 if (flag_delayed_branch)
4229 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4230 else
4231 fputs ("\t nop\n", asm_out_file);
4233 else if (final_sequence)
4235 rtx delay, pat;
4237 delay = NEXT_INSN (insn);
4238 gcc_assert (delay);
4240 pat = PATTERN (delay);
4242 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4244 epilogue_renumber (&pat, 0);
4245 return "return\t%%i7+%)%#";
4247 else
4249 output_asm_insn ("jmp\t%%i7+%)", NULL);
4250 output_restore (pat);
4251 PATTERN (delay) = gen_blockage ();
4252 INSN_CODE (delay) = -1;
4255 else
4257 /* The delay slot is empty. */
4258 if (TARGET_V9)
4259 return "return\t%%i7+%)\n\t nop";
4260 else if (flag_delayed_branch)
4261 return "jmp\t%%i7+%)\n\t restore";
4262 else
4263 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4267 return "";
4270 /* Output a sibling call. */
4272 const char *
4273 output_sibcall (rtx insn, rtx call_operand)
4275 rtx operands[1];
4277 gcc_assert (flag_delayed_branch);
4279 operands[0] = call_operand;
4281 if (sparc_leaf_function_p)
4283 /* This is a leaf function so we don't have to bother restoring the
4284 register window. We simply output the jump to the function and
4285 the insn in the delay slot (if any). */
4287 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4289 if (final_sequence)
4290 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4291 operands);
4292 else
4293 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4294 it into branch if possible. */
4295 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4296 operands);
4298 else
4300 /* This is a regular function so we have to restore the register window.
4301 We may have a pending insn for the delay slot, which will be combined
4302 with the 'restore' instruction. */
4304 output_asm_insn ("call\t%a0, 0", operands);
4306 if (final_sequence)
4308 rtx delay = NEXT_INSN (insn);
4309 gcc_assert (delay);
4311 output_restore (PATTERN (delay));
4313 PATTERN (delay) = gen_blockage ();
4314 INSN_CODE (delay) = -1;
4316 else
4317 output_restore (NULL_RTX);
4320 return "";
4323 /* Functions for handling argument passing.
4325 For 32-bit, the first 6 args are normally in registers and the rest are
4326 pushed. Any arg that starts within the first 6 words is at least
4327 partially passed in a register unless its data type forbids.
4329 For 64-bit, the argument registers are laid out as an array of 16 elements
4330 and arguments are added sequentially. The first 6 int args and up to the
4331 first 16 fp args (depending on size) are passed in regs.
4333 Slot Stack Integral Float Float in structure Double Long Double
4334 ---- ----- -------- ----- ------------------ ------ -----------
4335 15 [SP+248] %f31 %f30,%f31 %d30
4336 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4337 13 [SP+232] %f27 %f26,%f27 %d26
4338 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4339 11 [SP+216] %f23 %f22,%f23 %d22
4340 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4341 9 [SP+200] %f19 %f18,%f19 %d18
4342 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4343 7 [SP+184] %f15 %f14,%f15 %d14
4344 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4345 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4346 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4347 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4348 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4349 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4350 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4352 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4354 Integral arguments are always passed as 64-bit quantities appropriately
4355 extended.
4357 Passing of floating point values is handled as follows.
4358 If a prototype is in scope:
4359 If the value is in a named argument (i.e. not a stdarg function or a
4360 value not part of the `...') then the value is passed in the appropriate
4361 fp reg.
4362 If the value is part of the `...' and is passed in one of the first 6
4363 slots then the value is passed in the appropriate int reg.
4364 If the value is part of the `...' and is not passed in one of the first 6
4365 slots then the value is passed in memory.
4366 If a prototype is not in scope:
4367 If the value is one of the first 6 arguments the value is passed in the
4368 appropriate integer reg and the appropriate fp reg.
4369 If the value is not one of the first 6 arguments the value is passed in
4370 the appropriate fp reg and in memory.
4373 Summary of the calling conventions implemented by GCC on SPARC:
4375 32-bit ABI:
4376 size argument return value
4378 small integer <4 int. reg. int. reg.
4379 word 4 int. reg. int. reg.
4380 double word 8 int. reg. int. reg.
4382 _Complex small integer <8 int. reg. int. reg.
4383 _Complex word 8 int. reg. int. reg.
4384 _Complex double word 16 memory int. reg.
4386 vector integer <=8 int. reg. FP reg.
4387 vector integer >8 memory memory
4389 float 4 int. reg. FP reg.
4390 double 8 int. reg. FP reg.
4391 long double 16 memory memory
4393 _Complex float 8 memory FP reg.
4394 _Complex double 16 memory FP reg.
4395 _Complex long double 32 memory FP reg.
4397 vector float any memory memory
4399 aggregate any memory memory
4403 64-bit ABI:
4404 size argument return value
4406 small integer <8 int. reg. int. reg.
4407 word 8 int. reg. int. reg.
4408 double word 16 int. reg. int. reg.
4410 _Complex small integer <16 int. reg. int. reg.
4411 _Complex word 16 int. reg. int. reg.
4412 _Complex double word 32 memory int. reg.
4414 vector integer <=16 FP reg. FP reg.
4415 vector integer 16<s<=32 memory FP reg.
4416 vector integer >32 memory memory
4418 float 4 FP reg. FP reg.
4419 double 8 FP reg. FP reg.
4420 long double 16 FP reg. FP reg.
4422 _Complex float 8 FP reg. FP reg.
4423 _Complex double 16 FP reg. FP reg.
4424 _Complex long double 32 memory FP reg.
4426 vector float <=16 FP reg. FP reg.
4427 vector float 16<s<=32 memory FP reg.
4428 vector float >32 memory memory
4430 aggregate <=16 reg. reg.
4431 aggregate 16<s<=32 memory reg.
4432 aggregate >32 memory memory
4436 Note #1: complex floating-point types follow the extended SPARC ABIs as
4437 implemented by the Sun compiler.
4439 Note #2: integral vector types follow the scalar floating-point types
4440 conventions to match what is implemented by the Sun VIS SDK.
4442 Note #3: floating-point vector types follow the aggregate types
4443 conventions. */
4446 /* Maximum number of int regs for args. */
4447 #define SPARC_INT_ARG_MAX 6
4448 /* Maximum number of fp regs for args. */
4449 #define SPARC_FP_ARG_MAX 16
4451 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4453 /* Handle the INIT_CUMULATIVE_ARGS macro.
4454 Initialize a variable CUM of type CUMULATIVE_ARGS
4455 for a call to a function whose data type is FNTYPE.
4456 For a library call, FNTYPE is 0. */
4458 void
4459 init_cumulative_args (struct sparc_args *cum, tree fntype,
4460 rtx libname ATTRIBUTE_UNUSED,
4461 tree fndecl ATTRIBUTE_UNUSED)
4463 cum->words = 0;
4464 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4465 cum->libcall_p = fntype == 0;
4468 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4469 When a prototype says `char' or `short', really pass an `int'. */
4471 static bool
4472 sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4474 return TARGET_ARCH32 ? true : false;
4477 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4479 static bool
4480 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4482 return TARGET_ARCH64 ? true : false;
4485 /* Scan the record type TYPE and return the following predicates:
4486 - INTREGS_P: the record contains at least one field or sub-field
4487 that is eligible for promotion in integer registers.
4488 - FP_REGS_P: the record contains at least one field or sub-field
4489 that is eligible for promotion in floating-point registers.
4490 - PACKED_P: the record contains at least one field that is packed.
4492 Sub-fields are not taken into account for the PACKED_P predicate. */
4494 static void
4495 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4497 tree field;
4499 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4501 if (TREE_CODE (field) == FIELD_DECL)
4503 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4504 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4505 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4506 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4507 && TARGET_FPU)
4508 *fpregs_p = 1;
4509 else
4510 *intregs_p = 1;
4512 if (packed_p && DECL_PACKED (field))
4513 *packed_p = 1;
4518 /* Compute the slot number to pass an argument in.
4519 Return the slot number or -1 if passing on the stack.
4521 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4522 the preceding args and about the function being called.
4523 MODE is the argument's machine mode.
4524 TYPE is the data type of the argument (as a tree).
4525 This is null for libcalls where that information may
4526 not be available.
4527 NAMED is nonzero if this argument is a named parameter
4528 (otherwise it is an extra parameter matching an ellipsis).
4529 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4530 *PREGNO records the register number to use if scalar type.
4531 *PPADDING records the amount of padding needed in words. */
4533 static int
4534 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4535 tree type, int named, int incoming_p,
4536 int *pregno, int *ppadding)
4538 int regbase = (incoming_p
4539 ? SPARC_INCOMING_INT_ARG_FIRST
4540 : SPARC_OUTGOING_INT_ARG_FIRST);
4541 int slotno = cum->words;
4542 enum mode_class mclass;
4543 int regno;
4545 *ppadding = 0;
4547 if (type && TREE_ADDRESSABLE (type))
4548 return -1;
4550 if (TARGET_ARCH32
4551 && mode == BLKmode
4552 && type
4553 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4554 return -1;
4556 /* For SPARC64, objects requiring 16-byte alignment get it. */
4557 if (TARGET_ARCH64
4558 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4559 && (slotno & 1) != 0)
4560 slotno++, *ppadding = 1;
4562 mclass = GET_MODE_CLASS (mode);
4563 if (type && TREE_CODE (type) == VECTOR_TYPE)
4565 /* Vector types deserve special treatment because they are
4566 polymorphic wrt their mode, depending upon whether VIS
4567 instructions are enabled. */
4568 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4570 /* The SPARC port defines no floating-point vector modes. */
4571 gcc_assert (mode == BLKmode);
4573 else
4575 /* Integral vector types should either have a vector
4576 mode or an integral mode, because we are guaranteed
4577 by pass_by_reference that their size is not greater
4578 than 16 bytes and TImode is 16-byte wide. */
4579 gcc_assert (mode != BLKmode);
4581 /* Vector integers are handled like floats according to
4582 the Sun VIS SDK. */
4583 mclass = MODE_FLOAT;
4587 switch (mclass)
4589 case MODE_FLOAT:
4590 case MODE_COMPLEX_FLOAT:
4591 if (TARGET_ARCH64 && TARGET_FPU && named)
4593 if (slotno >= SPARC_FP_ARG_MAX)
4594 return -1;
4595 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4596 /* Arguments filling only one single FP register are
4597 right-justified in the outer double FP register. */
4598 if (GET_MODE_SIZE (mode) <= 4)
4599 regno++;
4600 break;
4602 /* fallthrough */
4604 case MODE_INT:
4605 case MODE_COMPLEX_INT:
4606 if (slotno >= SPARC_INT_ARG_MAX)
4607 return -1;
4608 regno = regbase + slotno;
4609 break;
4611 case MODE_RANDOM:
4612 if (mode == VOIDmode)
4613 /* MODE is VOIDmode when generating the actual call. */
4614 return -1;
4616 gcc_assert (mode == BLKmode);
4618 if (TARGET_ARCH32
4619 || !type
4620 || (TREE_CODE (type) != VECTOR_TYPE
4621 && TREE_CODE (type) != RECORD_TYPE))
4623 if (slotno >= SPARC_INT_ARG_MAX)
4624 return -1;
4625 regno = regbase + slotno;
4627 else /* TARGET_ARCH64 && type */
4629 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4631 /* First see what kinds of registers we would need. */
4632 if (TREE_CODE (type) == VECTOR_TYPE)
4633 fpregs_p = 1;
4634 else
4635 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4637 /* The ABI obviously doesn't specify how packed structures
4638 are passed. These are defined to be passed in int regs
4639 if possible, otherwise memory. */
4640 if (packed_p || !named)
4641 fpregs_p = 0, intregs_p = 1;
4643 /* If all arg slots are filled, then must pass on stack. */
4644 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4645 return -1;
4647 /* If there are only int args and all int arg slots are filled,
4648 then must pass on stack. */
4649 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4650 return -1;
4652 /* Note that even if all int arg slots are filled, fp members may
4653 still be passed in regs if such regs are available.
4654 *PREGNO isn't set because there may be more than one, it's up
4655 to the caller to compute them. */
4656 return slotno;
4658 break;
4660 default :
4661 gcc_unreachable ();
4664 *pregno = regno;
4665 return slotno;
4668 /* Handle recursive register counting for structure field layout. */
4670 struct function_arg_record_value_parms
4672 rtx ret; /* return expression being built. */
4673 int slotno; /* slot number of the argument. */
4674 int named; /* whether the argument is named. */
4675 int regbase; /* regno of the base register. */
4676 int stack; /* 1 if part of the argument is on the stack. */
4677 int intoffset; /* offset of the first pending integer field. */
4678 unsigned int nregs; /* number of words passed in registers. */
4681 static void function_arg_record_value_3
4682 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4683 static void function_arg_record_value_2
4684 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4685 static void function_arg_record_value_1
4686 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4687 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4688 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4690 /* A subroutine of function_arg_record_value. Traverse the structure
4691 recursively and determine how many registers will be required. */
4693 static void
4694 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4695 struct function_arg_record_value_parms *parms,
4696 bool packed_p)
4698 tree field;
4700 /* We need to compute how many registers are needed so we can
4701 allocate the PARALLEL but before we can do that we need to know
4702 whether there are any packed fields. The ABI obviously doesn't
4703 specify how structures are passed in this case, so they are
4704 defined to be passed in int regs if possible, otherwise memory,
4705 regardless of whether there are fp values present. */
4707 if (! packed_p)
4708 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4710 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4712 packed_p = true;
4713 break;
4717 /* Compute how many registers we need. */
4718 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4720 if (TREE_CODE (field) == FIELD_DECL)
4722 HOST_WIDE_INT bitpos = startbitpos;
4724 if (DECL_SIZE (field) != 0)
4726 if (integer_zerop (DECL_SIZE (field)))
4727 continue;
4729 if (host_integerp (bit_position (field), 1))
4730 bitpos += int_bit_position (field);
4733 /* ??? FIXME: else assume zero offset. */
4735 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4736 function_arg_record_value_1 (TREE_TYPE (field),
4737 bitpos,
4738 parms,
4739 packed_p);
4740 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4741 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4742 && TARGET_FPU
4743 && parms->named
4744 && ! packed_p)
4746 if (parms->intoffset != -1)
4748 unsigned int startbit, endbit;
4749 int intslots, this_slotno;
4751 startbit = parms->intoffset & -BITS_PER_WORD;
4752 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4754 intslots = (endbit - startbit) / BITS_PER_WORD;
4755 this_slotno = parms->slotno + parms->intoffset
4756 / BITS_PER_WORD;
4758 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4760 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4761 /* We need to pass this field on the stack. */
4762 parms->stack = 1;
4765 parms->nregs += intslots;
4766 parms->intoffset = -1;
4769 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4770 If it wasn't true we wouldn't be here. */
4771 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4772 && DECL_MODE (field) == BLKmode)
4773 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4774 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4775 parms->nregs += 2;
4776 else
4777 parms->nregs += 1;
4779 else
4781 if (parms->intoffset == -1)
4782 parms->intoffset = bitpos;
4788 /* A subroutine of function_arg_record_value. Assign the bits of the
4789 structure between parms->intoffset and bitpos to integer registers. */
4791 static void
4792 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4793 struct function_arg_record_value_parms *parms)
4795 enum machine_mode mode;
4796 unsigned int regno;
4797 unsigned int startbit, endbit;
4798 int this_slotno, intslots, intoffset;
4799 rtx reg;
4801 if (parms->intoffset == -1)
4802 return;
4804 intoffset = parms->intoffset;
4805 parms->intoffset = -1;
4807 startbit = intoffset & -BITS_PER_WORD;
4808 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4809 intslots = (endbit - startbit) / BITS_PER_WORD;
4810 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4812 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4813 if (intslots <= 0)
4814 return;
4816 /* If this is the trailing part of a word, only load that much into
4817 the register. Otherwise load the whole register. Note that in
4818 the latter case we may pick up unwanted bits. It's not a problem
4819 at the moment but may wish to revisit. */
4821 if (intoffset % BITS_PER_WORD != 0)
4822 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4823 MODE_INT);
4824 else
4825 mode = word_mode;
4827 intoffset /= BITS_PER_UNIT;
4830 regno = parms->regbase + this_slotno;
4831 reg = gen_rtx_REG (mode, regno);
4832 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4833 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4835 this_slotno += 1;
4836 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4837 mode = word_mode;
4838 parms->nregs += 1;
4839 intslots -= 1;
4841 while (intslots > 0);
4844 /* A subroutine of function_arg_record_value. Traverse the structure
4845 recursively and assign bits to floating point registers. Track which
4846 bits in between need integer registers; invoke function_arg_record_value_3
4847 to make that happen. */
4849 static void
4850 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
4851 struct function_arg_record_value_parms *parms,
4852 bool packed_p)
4854 tree field;
4856 if (! packed_p)
4857 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4859 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4861 packed_p = true;
4862 break;
4866 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4868 if (TREE_CODE (field) == FIELD_DECL)
4870 HOST_WIDE_INT bitpos = startbitpos;
4872 if (DECL_SIZE (field) != 0)
4874 if (integer_zerop (DECL_SIZE (field)))
4875 continue;
4877 if (host_integerp (bit_position (field), 1))
4878 bitpos += int_bit_position (field);
4881 /* ??? FIXME: else assume zero offset. */
4883 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4884 function_arg_record_value_2 (TREE_TYPE (field),
4885 bitpos,
4886 parms,
4887 packed_p);
4888 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4889 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4890 && TARGET_FPU
4891 && parms->named
4892 && ! packed_p)
4894 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4895 int regno, nregs, pos;
4896 enum machine_mode mode = DECL_MODE (field);
4897 rtx reg;
4899 function_arg_record_value_3 (bitpos, parms);
4901 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4902 && mode == BLKmode)
4904 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4905 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4907 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4909 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4910 nregs = 2;
4912 else
4913 nregs = 1;
4915 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4916 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4917 regno++;
4918 reg = gen_rtx_REG (mode, regno);
4919 pos = bitpos / BITS_PER_UNIT;
4920 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4921 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4922 parms->nregs += 1;
4923 while (--nregs > 0)
4925 regno += GET_MODE_SIZE (mode) / 4;
4926 reg = gen_rtx_REG (mode, regno);
4927 pos += GET_MODE_SIZE (mode);
4928 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4929 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4930 parms->nregs += 1;
4933 else
4935 if (parms->intoffset == -1)
4936 parms->intoffset = bitpos;
4942 /* Used by function_arg and function_value to implement the complex
4943 conventions of the 64-bit ABI for passing and returning structures.
4944 Return an expression valid as a return value for the two macros
4945 FUNCTION_ARG and FUNCTION_VALUE.
4947 TYPE is the data type of the argument (as a tree).
4948 This is null for libcalls where that information may
4949 not be available.
4950 MODE is the argument's machine mode.
4951 SLOTNO is the index number of the argument's slot in the parameter array.
4952 NAMED is nonzero if this argument is a named parameter
4953 (otherwise it is an extra parameter matching an ellipsis).
4954 REGBASE is the regno of the base register for the parameter array. */
4956 static rtx
4957 function_arg_record_value (const_tree type, enum machine_mode mode,
4958 int slotno, int named, int regbase)
4960 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4961 struct function_arg_record_value_parms parms;
4962 unsigned int nregs;
4964 parms.ret = NULL_RTX;
4965 parms.slotno = slotno;
4966 parms.named = named;
4967 parms.regbase = regbase;
4968 parms.stack = 0;
4970 /* Compute how many registers we need. */
4971 parms.nregs = 0;
4972 parms.intoffset = 0;
4973 function_arg_record_value_1 (type, 0, &parms, false);
4975 /* Take into account pending integer fields. */
4976 if (parms.intoffset != -1)
4978 unsigned int startbit, endbit;
4979 int intslots, this_slotno;
4981 startbit = parms.intoffset & -BITS_PER_WORD;
4982 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4983 intslots = (endbit - startbit) / BITS_PER_WORD;
4984 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4986 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4988 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4989 /* We need to pass this field on the stack. */
4990 parms.stack = 1;
4993 parms.nregs += intslots;
4995 nregs = parms.nregs;
4997 /* Allocate the vector and handle some annoying special cases. */
4998 if (nregs == 0)
5000 /* ??? Empty structure has no value? Duh? */
5001 if (typesize <= 0)
5003 /* Though there's nothing really to store, return a word register
5004 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5005 leads to breakage due to the fact that there are zero bytes to
5006 load. */
5007 return gen_rtx_REG (mode, regbase);
5009 else
5011 /* ??? C++ has structures with no fields, and yet a size. Give up
5012 for now and pass everything back in integer registers. */
5013 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5015 if (nregs + slotno > SPARC_INT_ARG_MAX)
5016 nregs = SPARC_INT_ARG_MAX - slotno;
5018 gcc_assert (nregs != 0);
5020 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5022 /* If at least one field must be passed on the stack, generate
5023 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5024 also be passed on the stack. We can't do much better because the
5025 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5026 of structures for which the fields passed exclusively in registers
5027 are not at the beginning of the structure. */
5028 if (parms.stack)
5029 XVECEXP (parms.ret, 0, 0)
5030 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5032 /* Fill in the entries. */
5033 parms.nregs = 0;
5034 parms.intoffset = 0;
5035 function_arg_record_value_2 (type, 0, &parms, false);
5036 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5038 gcc_assert (parms.nregs == nregs);
5040 return parms.ret;
5043 /* Used by function_arg and function_value to implement the conventions
5044 of the 64-bit ABI for passing and returning unions.
5045 Return an expression valid as a return value for the two macros
5046 FUNCTION_ARG and FUNCTION_VALUE.
5048 SIZE is the size in bytes of the union.
5049 MODE is the argument's machine mode.
5050 REGNO is the hard register the union will be passed in. */
5052 static rtx
5053 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5054 int regno)
5056 int nwords = ROUND_ADVANCE (size), i;
5057 rtx regs;
5059 /* See comment in previous function for empty structures. */
5060 if (nwords == 0)
5061 return gen_rtx_REG (mode, regno);
5063 if (slotno == SPARC_INT_ARG_MAX - 1)
5064 nwords = 1;
5066 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5068 for (i = 0; i < nwords; i++)
5070 /* Unions are passed left-justified. */
5071 XVECEXP (regs, 0, i)
5072 = gen_rtx_EXPR_LIST (VOIDmode,
5073 gen_rtx_REG (word_mode, regno),
5074 GEN_INT (UNITS_PER_WORD * i));
5075 regno++;
5078 return regs;
5081 /* Used by function_arg and function_value to implement the conventions
5082 for passing and returning large (BLKmode) vectors.
5083 Return an expression valid as a return value for the two macros
5084 FUNCTION_ARG and FUNCTION_VALUE.
5086 SIZE is the size in bytes of the vector.
5087 BASE_MODE is the argument's base machine mode.
5088 REGNO is the FP hard register the vector will be passed in. */
5090 static rtx
5091 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5093 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5094 int nregs = size / base_mode_size, i;
5095 rtx regs;
5097 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5099 for (i = 0; i < nregs; i++)
5101 XVECEXP (regs, 0, i)
5102 = gen_rtx_EXPR_LIST (VOIDmode,
5103 gen_rtx_REG (base_mode, regno),
5104 GEN_INT (base_mode_size * i));
5105 regno += base_mode_size / 4;
5108 return regs;
5111 /* Handle the FUNCTION_ARG macro.
5112 Determine where to put an argument to a function.
5113 Value is zero to push the argument on the stack,
5114 or a hard register in which to store the argument.
5116 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5117 the preceding args and about the function being called.
5118 MODE is the argument's machine mode.
5119 TYPE is the data type of the argument (as a tree).
5120 This is null for libcalls where that information may
5121 not be available.
5122 NAMED is nonzero if this argument is a named parameter
5123 (otherwise it is an extra parameter matching an ellipsis).
5124 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5127 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5128 tree type, int named, int incoming_p)
5130 int regbase = (incoming_p
5131 ? SPARC_INCOMING_INT_ARG_FIRST
5132 : SPARC_OUTGOING_INT_ARG_FIRST);
5133 int slotno, regno, padding;
5134 enum mode_class mclass = GET_MODE_CLASS (mode);
5136 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5137 &regno, &padding);
5138 if (slotno == -1)
5139 return 0;
5141 /* Vector types deserve special treatment because they are polymorphic wrt
5142 their mode, depending upon whether VIS instructions are enabled. */
5143 if (type && TREE_CODE (type) == VECTOR_TYPE)
5145 HOST_WIDE_INT size = int_size_in_bytes (type);
5146 gcc_assert ((TARGET_ARCH32 && size <= 8)
5147 || (TARGET_ARCH64 && size <= 16));
5149 if (mode == BLKmode)
5150 return function_arg_vector_value (size,
5151 TYPE_MODE (TREE_TYPE (type)),
5152 SPARC_FP_ARG_FIRST + 2*slotno);
5153 else
5154 mclass = MODE_FLOAT;
5157 if (TARGET_ARCH32)
5158 return gen_rtx_REG (mode, regno);
5160 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5161 and are promoted to registers if possible. */
5162 if (type && TREE_CODE (type) == RECORD_TYPE)
5164 HOST_WIDE_INT size = int_size_in_bytes (type);
5165 gcc_assert (size <= 16);
5167 return function_arg_record_value (type, mode, slotno, named, regbase);
5170 /* Unions up to 16 bytes in size are passed in integer registers. */
5171 else if (type && TREE_CODE (type) == UNION_TYPE)
5173 HOST_WIDE_INT size = int_size_in_bytes (type);
5174 gcc_assert (size <= 16);
5176 return function_arg_union_value (size, mode, slotno, regno);
5179 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5180 but also have the slot allocated for them.
5181 If no prototype is in scope fp values in register slots get passed
5182 in two places, either fp regs and int regs or fp regs and memory. */
5183 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5184 && SPARC_FP_REG_P (regno))
5186 rtx reg = gen_rtx_REG (mode, regno);
5187 if (cum->prototype_p || cum->libcall_p)
5189 /* "* 2" because fp reg numbers are recorded in 4 byte
5190 quantities. */
5191 #if 0
5192 /* ??? This will cause the value to be passed in the fp reg and
5193 in the stack. When a prototype exists we want to pass the
5194 value in the reg but reserve space on the stack. That's an
5195 optimization, and is deferred [for a bit]. */
5196 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5197 return gen_rtx_PARALLEL (mode,
5198 gen_rtvec (2,
5199 gen_rtx_EXPR_LIST (VOIDmode,
5200 NULL_RTX, const0_rtx),
5201 gen_rtx_EXPR_LIST (VOIDmode,
5202 reg, const0_rtx)));
5203 else
5204 #else
5205 /* ??? It seems that passing back a register even when past
5206 the area declared by REG_PARM_STACK_SPACE will allocate
5207 space appropriately, and will not copy the data onto the
5208 stack, exactly as we desire.
5210 This is due to locate_and_pad_parm being called in
5211 expand_call whenever reg_parm_stack_space > 0, which
5212 while beneficial to our example here, would seem to be
5213 in error from what had been intended. Ho hum... -- r~ */
5214 #endif
5215 return reg;
5217 else
5219 rtx v0, v1;
5221 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5223 int intreg;
5225 /* On incoming, we don't need to know that the value
5226 is passed in %f0 and %i0, and it confuses other parts
5227 causing needless spillage even on the simplest cases. */
5228 if (incoming_p)
5229 return reg;
5231 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5232 + (regno - SPARC_FP_ARG_FIRST) / 2);
5234 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5235 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5236 const0_rtx);
5237 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5239 else
5241 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5242 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5243 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5248 /* All other aggregate types are passed in an integer register in a mode
5249 corresponding to the size of the type. */
5250 else if (type && AGGREGATE_TYPE_P (type))
5252 HOST_WIDE_INT size = int_size_in_bytes (type);
5253 gcc_assert (size <= 16);
5255 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5258 return gen_rtx_REG (mode, regno);
5261 /* For an arg passed partly in registers and partly in memory,
5262 this is the number of bytes of registers used.
5263 For args passed entirely in registers or entirely in memory, zero.
5265 Any arg that starts in the first 6 regs but won't entirely fit in them
5266 needs partial registers on v8. On v9, structures with integer
5267 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5268 values that begin in the last fp reg [where "last fp reg" varies with the
5269 mode] will be split between that reg and memory. */
5271 static int
5272 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5273 tree type, bool named)
5275 int slotno, regno, padding;
5277 /* We pass 0 for incoming_p here, it doesn't matter. */
5278 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5280 if (slotno == -1)
5281 return 0;
5283 if (TARGET_ARCH32)
5285 if ((slotno + (mode == BLKmode
5286 ? ROUND_ADVANCE (int_size_in_bytes (type))
5287 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5288 > SPARC_INT_ARG_MAX)
5289 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5291 else
5293 /* We are guaranteed by pass_by_reference that the size of the
5294 argument is not greater than 16 bytes, so we only need to return
5295 one word if the argument is partially passed in registers. */
5297 if (type && AGGREGATE_TYPE_P (type))
5299 int size = int_size_in_bytes (type);
5301 if (size > UNITS_PER_WORD
5302 && slotno == SPARC_INT_ARG_MAX - 1)
5303 return UNITS_PER_WORD;
5305 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5306 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5307 && ! (TARGET_FPU && named)))
5309 /* The complex types are passed as packed types. */
5310 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5311 && slotno == SPARC_INT_ARG_MAX - 1)
5312 return UNITS_PER_WORD;
5314 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5316 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5317 > SPARC_FP_ARG_MAX)
5318 return UNITS_PER_WORD;
5322 return 0;
5325 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5326 Specify whether to pass the argument by reference. */
5328 static bool
5329 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5330 enum machine_mode mode, const_tree type,
5331 bool named ATTRIBUTE_UNUSED)
5333 if (TARGET_ARCH32)
5334 /* Original SPARC 32-bit ABI says that structures and unions,
5335 and quad-precision floats are passed by reference. For Pascal,
5336 also pass arrays by reference. All other base types are passed
5337 in registers.
5339 Extended ABI (as implemented by the Sun compiler) says that all
5340 complex floats are passed by reference. Pass complex integers
5341 in registers up to 8 bytes. More generally, enforce the 2-word
5342 cap for passing arguments in registers.
5344 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5345 integers are passed like floats of the same size, that is in
5346 registers up to 8 bytes. Pass all vector floats by reference
5347 like structure and unions. */
5348 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5349 || mode == SCmode
5350 /* Catch CDImode, TFmode, DCmode and TCmode. */
5351 || GET_MODE_SIZE (mode) > 8
5352 || (type
5353 && TREE_CODE (type) == VECTOR_TYPE
5354 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5355 else
5356 /* Original SPARC 64-bit ABI says that structures and unions
5357 smaller than 16 bytes are passed in registers, as well as
5358 all other base types.
5360 Extended ABI (as implemented by the Sun compiler) says that
5361 complex floats are passed in registers up to 16 bytes. Pass
5362 all complex integers in registers up to 16 bytes. More generally,
5363 enforce the 2-word cap for passing arguments in registers.
5365 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5366 integers are passed like floats of the same size, that is in
5367 registers (up to 16 bytes). Pass all vector floats like structure
5368 and unions. */
5369 return ((type
5370 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5371 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5372 /* Catch CTImode and TCmode. */
5373 || GET_MODE_SIZE (mode) > 16);
5376 /* Handle the FUNCTION_ARG_ADVANCE macro.
5377 Update the data in CUM to advance over an argument
5378 of mode MODE and data type TYPE.
5379 TYPE is null for libcalls where that information may not be available. */
5381 void
5382 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5383 tree type, int named)
5385 int slotno, regno, padding;
5387 /* We pass 0 for incoming_p here, it doesn't matter. */
5388 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5390 /* If register required leading padding, add it. */
5391 if (slotno != -1)
5392 cum->words += padding;
5394 if (TARGET_ARCH32)
5396 cum->words += (mode != BLKmode
5397 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5398 : ROUND_ADVANCE (int_size_in_bytes (type)));
5400 else
5402 if (type && AGGREGATE_TYPE_P (type))
5404 int size = int_size_in_bytes (type);
5406 if (size <= 8)
5407 ++cum->words;
5408 else if (size <= 16)
5409 cum->words += 2;
5410 else /* passed by reference */
5411 ++cum->words;
5413 else
5415 cum->words += (mode != BLKmode
5416 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5417 : ROUND_ADVANCE (int_size_in_bytes (type)));
5422 /* Handle the FUNCTION_ARG_PADDING macro.
5423 For the 64 bit ABI structs are always stored left shifted in their
5424 argument slot. */
5426 enum direction
5427 function_arg_padding (enum machine_mode mode, const_tree type)
5429 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5430 return upward;
5432 /* Fall back to the default. */
5433 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5436 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5437 Specify whether to return the return value in memory. */
5439 static bool
5440 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5442 if (TARGET_ARCH32)
5443 /* Original SPARC 32-bit ABI says that structures and unions,
5444 and quad-precision floats are returned in memory. All other
5445 base types are returned in registers.
5447 Extended ABI (as implemented by the Sun compiler) says that
5448 all complex floats are returned in registers (8 FP registers
5449 at most for '_Complex long double'). Return all complex integers
5450 in registers (4 at most for '_Complex long long').
5452 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5453 integers are returned like floats of the same size, that is in
5454 registers up to 8 bytes and in memory otherwise. Return all
5455 vector floats in memory like structure and unions; note that
5456 they always have BLKmode like the latter. */
5457 return (TYPE_MODE (type) == BLKmode
5458 || TYPE_MODE (type) == TFmode
5459 || (TREE_CODE (type) == VECTOR_TYPE
5460 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5461 else
5462 /* Original SPARC 64-bit ABI says that structures and unions
5463 smaller than 32 bytes are returned in registers, as well as
5464 all other base types.
5466 Extended ABI (as implemented by the Sun compiler) says that all
5467 complex floats are returned in registers (8 FP registers at most
5468 for '_Complex long double'). Return all complex integers in
5469 registers (4 at most for '_Complex TItype').
5471 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5472 integers are returned like floats of the same size, that is in
5473 registers. Return all vector floats like structure and unions;
5474 note that they always have BLKmode like the latter. */
5475 return ((TYPE_MODE (type) == BLKmode
5476 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5479 /* Handle the TARGET_STRUCT_VALUE target hook.
5480 Return where to find the structure return value address. */
5482 static rtx
5483 sparc_struct_value_rtx (tree fndecl, int incoming)
5485 if (TARGET_ARCH64)
5486 return 0;
5487 else
5489 rtx mem;
5491 if (incoming)
5492 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5493 STRUCT_VALUE_OFFSET));
5494 else
5495 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5496 STRUCT_VALUE_OFFSET));
5498 /* Only follow the SPARC ABI for fixed-size structure returns.
5499 Variable size structure returns are handled per the normal
5500 procedures in GCC. This is enabled by -mstd-struct-return */
5501 if (incoming == 2
5502 && sparc_std_struct_return
5503 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5504 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5506 /* We must check and adjust the return address, as it is
5507 optional as to whether the return object is really
5508 provided. */
5509 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5510 rtx scratch = gen_reg_rtx (SImode);
5511 rtx endlab = gen_label_rtx ();
5513 /* Calculate the return object size */
5514 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5515 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5516 /* Construct a temporary return value */
5517 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5519 /* Implement SPARC 32-bit psABI callee returns struck checking
5520 requirements:
5522 Fetch the instruction where we will return to and see if
5523 it's an unimp instruction (the most significant 10 bits
5524 will be zero). */
5525 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5526 plus_constant (ret_rtx, 8)));
5527 /* Assume the size is valid and pre-adjust */
5528 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5529 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5530 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5531 /* Assign stack temp:
5532 Write the address of the memory pointed to by temp_val into
5533 the memory pointed to by mem */
5534 emit_move_insn (mem, XEXP (temp_val, 0));
5535 emit_label (endlab);
5538 set_mem_alias_set (mem, struct_value_alias_set);
5539 return mem;
5543 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5544 For v9, function return values are subject to the same rules as arguments,
5545 except that up to 32 bytes may be returned in registers. */
5548 function_value (const_tree type, enum machine_mode mode, int incoming_p)
5550 /* Beware that the two values are swapped here wrt function_arg. */
5551 int regbase = (incoming_p
5552 ? SPARC_OUTGOING_INT_ARG_FIRST
5553 : SPARC_INCOMING_INT_ARG_FIRST);
5554 enum mode_class mclass = GET_MODE_CLASS (mode);
5555 int regno;
5557 /* Vector types deserve special treatment because they are polymorphic wrt
5558 their mode, depending upon whether VIS instructions are enabled. */
5559 if (type && TREE_CODE (type) == VECTOR_TYPE)
5561 HOST_WIDE_INT size = int_size_in_bytes (type);
5562 gcc_assert ((TARGET_ARCH32 && size <= 8)
5563 || (TARGET_ARCH64 && size <= 32));
5565 if (mode == BLKmode)
5566 return function_arg_vector_value (size,
5567 TYPE_MODE (TREE_TYPE (type)),
5568 SPARC_FP_ARG_FIRST);
5569 else
5570 mclass = MODE_FLOAT;
5573 if (TARGET_ARCH64 && type)
5575 /* Structures up to 32 bytes in size are returned in registers. */
5576 if (TREE_CODE (type) == RECORD_TYPE)
5578 HOST_WIDE_INT size = int_size_in_bytes (type);
5579 gcc_assert (size <= 32);
5581 return function_arg_record_value (type, mode, 0, 1, regbase);
5584 /* Unions up to 32 bytes in size are returned in integer registers. */
5585 else if (TREE_CODE (type) == UNION_TYPE)
5587 HOST_WIDE_INT size = int_size_in_bytes (type);
5588 gcc_assert (size <= 32);
5590 return function_arg_union_value (size, mode, 0, regbase);
5593 /* Objects that require it are returned in FP registers. */
5594 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5597 /* All other aggregate types are returned in an integer register in a
5598 mode corresponding to the size of the type. */
5599 else if (AGGREGATE_TYPE_P (type))
5601 /* All other aggregate types are passed in an integer register
5602 in a mode corresponding to the size of the type. */
5603 HOST_WIDE_INT size = int_size_in_bytes (type);
5604 gcc_assert (size <= 32);
5606 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5608 /* ??? We probably should have made the same ABI change in
5609 3.4.0 as the one we made for unions. The latter was
5610 required by the SCD though, while the former is not
5611 specified, so we favored compatibility and efficiency.
5613 Now we're stuck for aggregates larger than 16 bytes,
5614 because OImode vanished in the meantime. Let's not
5615 try to be unduly clever, and simply follow the ABI
5616 for unions in that case. */
5617 if (mode == BLKmode)
5618 return function_arg_union_value (size, mode, 0, regbase);
5619 else
5620 mclass = MODE_INT;
5623 /* This must match PROMOTE_FUNCTION_MODE. */
5624 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5625 mode = word_mode;
5628 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5629 regno = SPARC_FP_ARG_FIRST;
5630 else
5631 regno = regbase;
5633 return gen_rtx_REG (mode, regno);
5636 /* Do what is necessary for `va_start'. We look at the current function
5637 to determine if stdarg or varargs is used and return the address of
5638 the first unnamed parameter. */
5640 static rtx
5641 sparc_builtin_saveregs (void)
5643 int first_reg = current_function_args_info.words;
5644 rtx address;
5645 int regno;
5647 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5648 emit_move_insn (gen_rtx_MEM (word_mode,
5649 gen_rtx_PLUS (Pmode,
5650 frame_pointer_rtx,
5651 GEN_INT (FIRST_PARM_OFFSET (0)
5652 + (UNITS_PER_WORD
5653 * regno)))),
5654 gen_rtx_REG (word_mode,
5655 SPARC_INCOMING_INT_ARG_FIRST + regno));
5657 address = gen_rtx_PLUS (Pmode,
5658 frame_pointer_rtx,
5659 GEN_INT (FIRST_PARM_OFFSET (0)
5660 + UNITS_PER_WORD * first_reg));
5662 return address;
5665 /* Implement `va_start' for stdarg. */
5667 void
5668 sparc_va_start (tree valist, rtx nextarg)
5670 nextarg = expand_builtin_saveregs ();
5671 std_expand_builtin_va_start (valist, nextarg);
5674 /* Implement `va_arg' for stdarg. */
5676 static tree
5677 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5679 HOST_WIDE_INT size, rsize, align;
5680 tree addr, incr;
5681 bool indirect;
5682 tree ptrtype = build_pointer_type (type);
5684 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5686 indirect = true;
5687 size = rsize = UNITS_PER_WORD;
5688 align = 0;
5690 else
5692 indirect = false;
5693 size = int_size_in_bytes (type);
5694 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5695 align = 0;
5697 if (TARGET_ARCH64)
5699 /* For SPARC64, objects requiring 16-byte alignment get it. */
5700 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5701 align = 2 * UNITS_PER_WORD;
5703 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5704 are left-justified in their slots. */
5705 if (AGGREGATE_TYPE_P (type))
5707 if (size == 0)
5708 size = rsize = UNITS_PER_WORD;
5709 else
5710 size = rsize;
5715 incr = valist;
5716 if (align)
5718 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5719 size_int (align - 1));
5720 incr = fold_convert (sizetype, incr);
5721 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5722 size_int (-align));
5723 incr = fold_convert (ptr_type_node, incr);
5726 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5727 addr = incr;
5729 if (BYTES_BIG_ENDIAN && size < rsize)
5730 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5731 size_int (rsize - size));
5733 if (indirect)
5735 addr = fold_convert (build_pointer_type (ptrtype), addr);
5736 addr = build_va_arg_indirect_ref (addr);
5738 /* If the address isn't aligned properly for the type,
5739 we may need to copy to a temporary.
5740 FIXME: This is inefficient. Usually we can do this
5741 in registers. */
5742 else if (align == 0
5743 && TYPE_ALIGN (type) > BITS_PER_WORD)
5745 tree tmp = create_tmp_var (type, "va_arg_tmp");
5746 tree dest_addr = build_fold_addr_expr (tmp);
5748 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY], 3,
5749 dest_addr,
5750 addr,
5751 size_int (rsize));
5753 gimplify_and_add (copy, pre_p);
5754 addr = dest_addr;
5756 else
5757 addr = fold_convert (ptrtype, addr);
5759 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5760 incr = build2 (GIMPLE_MODIFY_STMT, ptr_type_node, valist, incr);
5761 gimplify_and_add (incr, post_p);
5763 return build_va_arg_indirect_ref (addr);
5766 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5767 Specify whether the vector mode is supported by the hardware. */
5769 static bool
5770 sparc_vector_mode_supported_p (enum machine_mode mode)
5772 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5775 /* Return the string to output an unconditional branch to LABEL, which is
5776 the operand number of the label.
5778 DEST is the destination insn (i.e. the label), INSN is the source. */
5780 const char *
5781 output_ubranch (rtx dest, int label, rtx insn)
5783 static char string[64];
5784 bool v9_form = false;
5785 char *p;
5787 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5789 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5790 - INSN_ADDRESSES (INSN_UID (insn)));
5791 /* Leave some instructions for "slop". */
5792 if (delta >= -260000 && delta < 260000)
5793 v9_form = true;
5796 if (v9_form)
5797 strcpy (string, "ba%*,pt\t%%xcc, ");
5798 else
5799 strcpy (string, "b%*\t");
5801 p = strchr (string, '\0');
5802 *p++ = '%';
5803 *p++ = 'l';
5804 *p++ = '0' + label;
5805 *p++ = '%';
5806 *p++ = '(';
5807 *p = '\0';
5809 return string;
5812 /* Return the string to output a conditional branch to LABEL, which is
5813 the operand number of the label. OP is the conditional expression.
5814 XEXP (OP, 0) is assumed to be a condition code register (integer or
5815 floating point) and its mode specifies what kind of comparison we made.
5817 DEST is the destination insn (i.e. the label), INSN is the source.
5819 REVERSED is nonzero if we should reverse the sense of the comparison.
5821 ANNUL is nonzero if we should generate an annulling branch. */
5823 const char *
5824 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5825 rtx insn)
5827 static char string[64];
5828 enum rtx_code code = GET_CODE (op);
5829 rtx cc_reg = XEXP (op, 0);
5830 enum machine_mode mode = GET_MODE (cc_reg);
5831 const char *labelno, *branch;
5832 int spaces = 8, far;
5833 char *p;
5835 /* v9 branches are limited to +-1MB. If it is too far away,
5836 change
5838 bne,pt %xcc, .LC30
5842 be,pn %xcc, .+12
5844 ba .LC30
5848 fbne,a,pn %fcc2, .LC29
5852 fbe,pt %fcc2, .+16
5854 ba .LC29 */
5856 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5857 if (reversed ^ far)
5859 /* Reversal of FP compares takes care -- an ordered compare
5860 becomes an unordered compare and vice versa. */
5861 if (mode == CCFPmode || mode == CCFPEmode)
5862 code = reverse_condition_maybe_unordered (code);
5863 else
5864 code = reverse_condition (code);
5867 /* Start by writing the branch condition. */
5868 if (mode == CCFPmode || mode == CCFPEmode)
5870 switch (code)
5872 case NE:
5873 branch = "fbne";
5874 break;
5875 case EQ:
5876 branch = "fbe";
5877 break;
5878 case GE:
5879 branch = "fbge";
5880 break;
5881 case GT:
5882 branch = "fbg";
5883 break;
5884 case LE:
5885 branch = "fble";
5886 break;
5887 case LT:
5888 branch = "fbl";
5889 break;
5890 case UNORDERED:
5891 branch = "fbu";
5892 break;
5893 case ORDERED:
5894 branch = "fbo";
5895 break;
5896 case UNGT:
5897 branch = "fbug";
5898 break;
5899 case UNLT:
5900 branch = "fbul";
5901 break;
5902 case UNEQ:
5903 branch = "fbue";
5904 break;
5905 case UNGE:
5906 branch = "fbuge";
5907 break;
5908 case UNLE:
5909 branch = "fbule";
5910 break;
5911 case LTGT:
5912 branch = "fblg";
5913 break;
5915 default:
5916 gcc_unreachable ();
5919 /* ??? !v9: FP branches cannot be preceded by another floating point
5920 insn. Because there is currently no concept of pre-delay slots,
5921 we can fix this only by always emitting a nop before a floating
5922 point branch. */
5924 string[0] = '\0';
5925 if (! TARGET_V9)
5926 strcpy (string, "nop\n\t");
5927 strcat (string, branch);
5929 else
5931 switch (code)
5933 case NE:
5934 branch = "bne";
5935 break;
5936 case EQ:
5937 branch = "be";
5938 break;
5939 case GE:
5940 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5941 branch = "bpos";
5942 else
5943 branch = "bge";
5944 break;
5945 case GT:
5946 branch = "bg";
5947 break;
5948 case LE:
5949 branch = "ble";
5950 break;
5951 case LT:
5952 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5953 branch = "bneg";
5954 else
5955 branch = "bl";
5956 break;
5957 case GEU:
5958 branch = "bgeu";
5959 break;
5960 case GTU:
5961 branch = "bgu";
5962 break;
5963 case LEU:
5964 branch = "bleu";
5965 break;
5966 case LTU:
5967 branch = "blu";
5968 break;
5970 default:
5971 gcc_unreachable ();
5973 strcpy (string, branch);
5975 spaces -= strlen (branch);
5976 p = strchr (string, '\0');
5978 /* Now add the annulling, the label, and a possible noop. */
5979 if (annul && ! far)
5981 strcpy (p, ",a");
5982 p += 2;
5983 spaces -= 2;
5986 if (TARGET_V9)
5988 rtx note;
5989 int v8 = 0;
5991 if (! far && insn && INSN_ADDRESSES_SET_P ())
5993 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5994 - INSN_ADDRESSES (INSN_UID (insn)));
5995 /* Leave some instructions for "slop". */
5996 if (delta < -260000 || delta >= 260000)
5997 v8 = 1;
6000 if (mode == CCFPmode || mode == CCFPEmode)
6002 static char v9_fcc_labelno[] = "%%fccX, ";
6003 /* Set the char indicating the number of the fcc reg to use. */
6004 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6005 labelno = v9_fcc_labelno;
6006 if (v8)
6008 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6009 labelno = "";
6012 else if (mode == CCXmode || mode == CCX_NOOVmode)
6014 labelno = "%%xcc, ";
6015 gcc_assert (! v8);
6017 else
6019 labelno = "%%icc, ";
6020 if (v8)
6021 labelno = "";
6024 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6026 strcpy (p,
6027 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6028 ? ",pt" : ",pn");
6029 p += 3;
6030 spaces -= 3;
6033 else
6034 labelno = "";
6036 if (spaces > 0)
6037 *p++ = '\t';
6038 else
6039 *p++ = ' ';
6040 strcpy (p, labelno);
6041 p = strchr (p, '\0');
6042 if (far)
6044 strcpy (p, ".+12\n\t nop\n\tb\t");
6045 /* Skip the next insn if requested or
6046 if we know that it will be a nop. */
6047 if (annul || ! final_sequence)
6048 p[3] = '6';
6049 p += 14;
6051 *p++ = '%';
6052 *p++ = 'l';
6053 *p++ = label + '0';
6054 *p++ = '%';
6055 *p++ = '#';
6056 *p = '\0';
6058 return string;
6061 /* Emit a library call comparison between floating point X and Y.
6062 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6063 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6064 values as arguments instead of the TFmode registers themselves,
6065 that's why we cannot call emit_float_lib_cmp. */
6066 void
6067 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6069 const char *qpfunc;
6070 rtx slot0, slot1, result, tem, tem2;
6071 enum machine_mode mode;
6073 switch (comparison)
6075 case EQ:
6076 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6077 break;
6079 case NE:
6080 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6081 break;
6083 case GT:
6084 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6085 break;
6087 case GE:
6088 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6089 break;
6091 case LT:
6092 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6093 break;
6095 case LE:
6096 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6097 break;
6099 case ORDERED:
6100 case UNORDERED:
6101 case UNGT:
6102 case UNLT:
6103 case UNEQ:
6104 case UNGE:
6105 case UNLE:
6106 case LTGT:
6107 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6108 break;
6110 default:
6111 gcc_unreachable ();
6114 if (TARGET_ARCH64)
6116 if (GET_CODE (x) != MEM)
6118 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6119 emit_move_insn (slot0, x);
6121 else
6122 slot0 = x;
6124 if (GET_CODE (y) != MEM)
6126 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6127 emit_move_insn (slot1, y);
6129 else
6130 slot1 = y;
6132 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6133 DImode, 2,
6134 XEXP (slot0, 0), Pmode,
6135 XEXP (slot1, 0), Pmode);
6137 mode = DImode;
6139 else
6141 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6142 SImode, 2,
6143 x, TFmode, y, TFmode);
6145 mode = SImode;
6149 /* Immediately move the result of the libcall into a pseudo
6150 register so reload doesn't clobber the value if it needs
6151 the return register for a spill reg. */
6152 result = gen_reg_rtx (mode);
6153 emit_move_insn (result, hard_libcall_value (mode));
6155 switch (comparison)
6157 default:
6158 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6159 break;
6160 case ORDERED:
6161 case UNORDERED:
6162 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6163 NULL_RTX, mode, 0);
6164 break;
6165 case UNGT:
6166 case UNGE:
6167 emit_cmp_insn (result, const1_rtx,
6168 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6169 break;
6170 case UNLE:
6171 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6172 break;
6173 case UNLT:
6174 tem = gen_reg_rtx (mode);
6175 if (TARGET_ARCH32)
6176 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6177 else
6178 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6179 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6180 break;
6181 case UNEQ:
6182 case LTGT:
6183 tem = gen_reg_rtx (mode);
6184 if (TARGET_ARCH32)
6185 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6186 else
6187 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6188 tem2 = gen_reg_rtx (mode);
6189 if (TARGET_ARCH32)
6190 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6191 else
6192 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6193 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6194 NULL_RTX, mode, 0);
6195 break;
6199 /* Generate an unsigned DImode to FP conversion. This is the same code
6200 optabs would emit if we didn't have TFmode patterns. */
6202 void
6203 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6205 rtx neglab, donelab, i0, i1, f0, in, out;
6207 out = operands[0];
6208 in = force_reg (DImode, operands[1]);
6209 neglab = gen_label_rtx ();
6210 donelab = gen_label_rtx ();
6211 i0 = gen_reg_rtx (DImode);
6212 i1 = gen_reg_rtx (DImode);
6213 f0 = gen_reg_rtx (mode);
6215 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6217 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6218 emit_jump_insn (gen_jump (donelab));
6219 emit_barrier ();
6221 emit_label (neglab);
6223 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6224 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6225 emit_insn (gen_iordi3 (i0, i0, i1));
6226 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6227 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6229 emit_label (donelab);
6232 /* Generate an FP to unsigned DImode conversion. This is the same code
6233 optabs would emit if we didn't have TFmode patterns. */
6235 void
6236 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6238 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6240 out = operands[0];
6241 in = force_reg (mode, operands[1]);
6242 neglab = gen_label_rtx ();
6243 donelab = gen_label_rtx ();
6244 i0 = gen_reg_rtx (DImode);
6245 i1 = gen_reg_rtx (DImode);
6246 limit = gen_reg_rtx (mode);
6247 f0 = gen_reg_rtx (mode);
6249 emit_move_insn (limit,
6250 CONST_DOUBLE_FROM_REAL_VALUE (
6251 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6252 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6254 emit_insn (gen_rtx_SET (VOIDmode,
6255 out,
6256 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6257 emit_jump_insn (gen_jump (donelab));
6258 emit_barrier ();
6260 emit_label (neglab);
6262 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6263 emit_insn (gen_rtx_SET (VOIDmode,
6265 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6266 emit_insn (gen_movdi (i1, const1_rtx));
6267 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6268 emit_insn (gen_xordi3 (out, i0, i1));
6270 emit_label (donelab);
6273 /* Return the string to output a conditional branch to LABEL, testing
6274 register REG. LABEL is the operand number of the label; REG is the
6275 operand number of the reg. OP is the conditional expression. The mode
6276 of REG says what kind of comparison we made.
6278 DEST is the destination insn (i.e. the label), INSN is the source.
6280 REVERSED is nonzero if we should reverse the sense of the comparison.
6282 ANNUL is nonzero if we should generate an annulling branch. */
6284 const char *
6285 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6286 int annul, rtx insn)
6288 static char string[64];
6289 enum rtx_code code = GET_CODE (op);
6290 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6291 rtx note;
6292 int far;
6293 char *p;
6295 /* branch on register are limited to +-128KB. If it is too far away,
6296 change
6298 brnz,pt %g1, .LC30
6302 brz,pn %g1, .+12
6304 ba,pt %xcc, .LC30
6308 brgez,a,pn %o1, .LC29
6312 brlz,pt %o1, .+16
6314 ba,pt %xcc, .LC29 */
6316 far = get_attr_length (insn) >= 3;
6318 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6319 if (reversed ^ far)
6320 code = reverse_condition (code);
6322 /* Only 64 bit versions of these instructions exist. */
6323 gcc_assert (mode == DImode);
6325 /* Start by writing the branch condition. */
6327 switch (code)
6329 case NE:
6330 strcpy (string, "brnz");
6331 break;
6333 case EQ:
6334 strcpy (string, "brz");
6335 break;
6337 case GE:
6338 strcpy (string, "brgez");
6339 break;
6341 case LT:
6342 strcpy (string, "brlz");
6343 break;
6345 case LE:
6346 strcpy (string, "brlez");
6347 break;
6349 case GT:
6350 strcpy (string, "brgz");
6351 break;
6353 default:
6354 gcc_unreachable ();
6357 p = strchr (string, '\0');
6359 /* Now add the annulling, reg, label, and nop. */
6360 if (annul && ! far)
6362 strcpy (p, ",a");
6363 p += 2;
6366 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6368 strcpy (p,
6369 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6370 ? ",pt" : ",pn");
6371 p += 3;
6374 *p = p < string + 8 ? '\t' : ' ';
6375 p++;
6376 *p++ = '%';
6377 *p++ = '0' + reg;
6378 *p++ = ',';
6379 *p++ = ' ';
6380 if (far)
6382 int veryfar = 1, delta;
6384 if (INSN_ADDRESSES_SET_P ())
6386 delta = (INSN_ADDRESSES (INSN_UID (dest))
6387 - INSN_ADDRESSES (INSN_UID (insn)));
6388 /* Leave some instructions for "slop". */
6389 if (delta >= -260000 && delta < 260000)
6390 veryfar = 0;
6393 strcpy (p, ".+12\n\t nop\n\t");
6394 /* Skip the next insn if requested or
6395 if we know that it will be a nop. */
6396 if (annul || ! final_sequence)
6397 p[3] = '6';
6398 p += 12;
6399 if (veryfar)
6401 strcpy (p, "b\t");
6402 p += 2;
6404 else
6406 strcpy (p, "ba,pt\t%%xcc, ");
6407 p += 13;
6410 *p++ = '%';
6411 *p++ = 'l';
6412 *p++ = '0' + label;
6413 *p++ = '%';
6414 *p++ = '#';
6415 *p = '\0';
6417 return string;
6420 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6421 Such instructions cannot be used in the delay slot of return insn on v9.
6422 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6425 static int
6426 epilogue_renumber (register rtx *where, int test)
6428 register const char *fmt;
6429 register int i;
6430 register enum rtx_code code;
6432 if (*where == 0)
6433 return 0;
6435 code = GET_CODE (*where);
6437 switch (code)
6439 case REG:
6440 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6441 return 1;
6442 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6443 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6444 case SCRATCH:
6445 case CC0:
6446 case PC:
6447 case CONST_INT:
6448 case CONST_DOUBLE:
6449 return 0;
6451 /* Do not replace the frame pointer with the stack pointer because
6452 it can cause the delayed instruction to load below the stack.
6453 This occurs when instructions like:
6455 (set (reg/i:SI 24 %i0)
6456 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6457 (const_int -20 [0xffffffec])) 0))
6459 are in the return delayed slot. */
6460 case PLUS:
6461 if (GET_CODE (XEXP (*where, 0)) == REG
6462 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6463 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6464 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6465 return 1;
6466 break;
6468 case MEM:
6469 if (SPARC_STACK_BIAS
6470 && GET_CODE (XEXP (*where, 0)) == REG
6471 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6472 return 1;
6473 break;
6475 default:
6476 break;
6479 fmt = GET_RTX_FORMAT (code);
6481 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6483 if (fmt[i] == 'E')
6485 register int j;
6486 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6487 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6488 return 1;
6490 else if (fmt[i] == 'e'
6491 && epilogue_renumber (&(XEXP (*where, i)), test))
6492 return 1;
6494 return 0;
6497 /* Leaf functions and non-leaf functions have different needs. */
6499 static const int
6500 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6502 static const int
6503 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6505 static const int *const reg_alloc_orders[] = {
6506 reg_leaf_alloc_order,
6507 reg_nonleaf_alloc_order};
6509 void
6510 order_regs_for_local_alloc (void)
6512 static int last_order_nonleaf = 1;
6514 if (df_regs_ever_live_p (15) != last_order_nonleaf)
6516 last_order_nonleaf = !last_order_nonleaf;
6517 memcpy ((char *) reg_alloc_order,
6518 (const char *) reg_alloc_orders[last_order_nonleaf],
6519 FIRST_PSEUDO_REGISTER * sizeof (int));
6523 /* Return 1 if REG and MEM are legitimate enough to allow the various
6524 mem<-->reg splits to be run. */
6527 sparc_splitdi_legitimate (rtx reg, rtx mem)
6529 /* Punt if we are here by mistake. */
6530 gcc_assert (reload_completed);
6532 /* We must have an offsettable memory reference. */
6533 if (! offsettable_memref_p (mem))
6534 return 0;
6536 /* If we have legitimate args for ldd/std, we do not want
6537 the split to happen. */
6538 if ((REGNO (reg) % 2) == 0
6539 && mem_min_alignment (mem, 8))
6540 return 0;
6542 /* Success. */
6543 return 1;
6546 /* Return 1 if x and y are some kind of REG and they refer to
6547 different hard registers. This test is guaranteed to be
6548 run after reload. */
6551 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6553 if (GET_CODE (x) != REG)
6554 return 0;
6555 if (GET_CODE (y) != REG)
6556 return 0;
6557 if (REGNO (x) == REGNO (y))
6558 return 0;
6559 return 1;
6562 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6563 This makes them candidates for using ldd and std insns.
6565 Note reg1 and reg2 *must* be hard registers. */
6568 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6570 /* We might have been passed a SUBREG. */
6571 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6572 return 0;
6574 if (REGNO (reg1) % 2 != 0)
6575 return 0;
6577 /* Integer ldd is deprecated in SPARC V9 */
6578 if (TARGET_V9 && REGNO (reg1) < 32)
6579 return 0;
6581 return (REGNO (reg1) == REGNO (reg2) - 1);
6584 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6585 an ldd or std insn.
6587 This can only happen when addr1 and addr2, the addresses in mem1
6588 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6589 addr1 must also be aligned on a 64-bit boundary.
6591 Also iff dependent_reg_rtx is not null it should not be used to
6592 compute the address for mem1, i.e. we cannot optimize a sequence
6593 like:
6594 ld [%o0], %o0
6595 ld [%o0 + 4], %o1
6597 ldd [%o0], %o0
6598 nor:
6599 ld [%g3 + 4], %g3
6600 ld [%g3], %g2
6602 ldd [%g3], %g2
6604 But, note that the transformation from:
6605 ld [%g2 + 4], %g3
6606 ld [%g2], %g2
6608 ldd [%g2], %g2
6609 is perfectly fine. Thus, the peephole2 patterns always pass us
6610 the destination register of the first load, never the second one.
6612 For stores we don't have a similar problem, so dependent_reg_rtx is
6613 NULL_RTX. */
6616 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6618 rtx addr1, addr2;
6619 unsigned int reg1;
6620 HOST_WIDE_INT offset1;
6622 /* The mems cannot be volatile. */
6623 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6624 return 0;
6626 /* MEM1 should be aligned on a 64-bit boundary. */
6627 if (MEM_ALIGN (mem1) < 64)
6628 return 0;
6630 addr1 = XEXP (mem1, 0);
6631 addr2 = XEXP (mem2, 0);
6633 /* Extract a register number and offset (if used) from the first addr. */
6634 if (GET_CODE (addr1) == PLUS)
6636 /* If not a REG, return zero. */
6637 if (GET_CODE (XEXP (addr1, 0)) != REG)
6638 return 0;
6639 else
6641 reg1 = REGNO (XEXP (addr1, 0));
6642 /* The offset must be constant! */
6643 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6644 return 0;
6645 offset1 = INTVAL (XEXP (addr1, 1));
6648 else if (GET_CODE (addr1) != REG)
6649 return 0;
6650 else
6652 reg1 = REGNO (addr1);
6653 /* This was a simple (mem (reg)) expression. Offset is 0. */
6654 offset1 = 0;
6657 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6658 if (GET_CODE (addr2) != PLUS)
6659 return 0;
6661 if (GET_CODE (XEXP (addr2, 0)) != REG
6662 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6663 return 0;
6665 if (reg1 != REGNO (XEXP (addr2, 0)))
6666 return 0;
6668 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6669 return 0;
6671 /* The first offset must be evenly divisible by 8 to ensure the
6672 address is 64 bit aligned. */
6673 if (offset1 % 8 != 0)
6674 return 0;
6676 /* The offset for the second addr must be 4 more than the first addr. */
6677 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6678 return 0;
6680 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6681 instructions. */
6682 return 1;
6685 /* Return 1 if reg is a pseudo, or is the first register in
6686 a hard register pair. This makes it a candidate for use in
6687 ldd and std insns. */
6690 register_ok_for_ldd (rtx reg)
6692 /* We might have been passed a SUBREG. */
6693 if (GET_CODE (reg) != REG)
6694 return 0;
6696 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6697 return (REGNO (reg) % 2 == 0);
6698 else
6699 return 1;
6702 /* Print operand X (an rtx) in assembler syntax to file FILE.
6703 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6704 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6706 void
6707 print_operand (FILE *file, rtx x, int code)
6709 switch (code)
6711 case '#':
6712 /* Output an insn in a delay slot. */
6713 if (final_sequence)
6714 sparc_indent_opcode = 1;
6715 else
6716 fputs ("\n\t nop", file);
6717 return;
6718 case '*':
6719 /* Output an annul flag if there's nothing for the delay slot and we
6720 are optimizing. This is always used with '(' below.
6721 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6722 this is a dbx bug. So, we only do this when optimizing.
6723 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6724 Always emit a nop in case the next instruction is a branch. */
6725 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6726 fputs (",a", file);
6727 return;
6728 case '(':
6729 /* Output a 'nop' if there's nothing for the delay slot and we are
6730 not optimizing. This is always used with '*' above. */
6731 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6732 fputs ("\n\t nop", file);
6733 else if (final_sequence)
6734 sparc_indent_opcode = 1;
6735 return;
6736 case ')':
6737 /* Output the right displacement from the saved PC on function return.
6738 The caller may have placed an "unimp" insn immediately after the call
6739 so we have to account for it. This insn is used in the 32-bit ABI
6740 when calling a function that returns a non zero-sized structure. The
6741 64-bit ABI doesn't have it. Be careful to have this test be the same
6742 as that used on the call. The exception here is that when
6743 sparc_std_struct_return is enabled, the psABI is followed exactly
6744 and the adjustment is made by the code in sparc_struct_value_rtx.
6745 The call emitted is the same when sparc_std_struct_return is
6746 present. */
6747 if (! TARGET_ARCH64
6748 && current_function_returns_struct
6749 && ! sparc_std_struct_return
6750 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6751 == INTEGER_CST)
6752 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6753 fputs ("12", file);
6754 else
6755 fputc ('8', file);
6756 return;
6757 case '_':
6758 /* Output the Embedded Medium/Anywhere code model base register. */
6759 fputs (EMBMEDANY_BASE_REG, file);
6760 return;
6761 case '&':
6762 /* Print some local dynamic TLS name. */
6763 assemble_name (file, get_some_local_dynamic_name ());
6764 return;
6766 case 'Y':
6767 /* Adjust the operand to take into account a RESTORE operation. */
6768 if (GET_CODE (x) == CONST_INT)
6769 break;
6770 else if (GET_CODE (x) != REG)
6771 output_operand_lossage ("invalid %%Y operand");
6772 else if (REGNO (x) < 8)
6773 fputs (reg_names[REGNO (x)], file);
6774 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6775 fputs (reg_names[REGNO (x)-16], file);
6776 else
6777 output_operand_lossage ("invalid %%Y operand");
6778 return;
6779 case 'L':
6780 /* Print out the low order register name of a register pair. */
6781 if (WORDS_BIG_ENDIAN)
6782 fputs (reg_names[REGNO (x)+1], file);
6783 else
6784 fputs (reg_names[REGNO (x)], file);
6785 return;
6786 case 'H':
6787 /* Print out the high order register name of a register pair. */
6788 if (WORDS_BIG_ENDIAN)
6789 fputs (reg_names[REGNO (x)], file);
6790 else
6791 fputs (reg_names[REGNO (x)+1], file);
6792 return;
6793 case 'R':
6794 /* Print out the second register name of a register pair or quad.
6795 I.e., R (%o0) => %o1. */
6796 fputs (reg_names[REGNO (x)+1], file);
6797 return;
6798 case 'S':
6799 /* Print out the third register name of a register quad.
6800 I.e., S (%o0) => %o2. */
6801 fputs (reg_names[REGNO (x)+2], file);
6802 return;
6803 case 'T':
6804 /* Print out the fourth register name of a register quad.
6805 I.e., T (%o0) => %o3. */
6806 fputs (reg_names[REGNO (x)+3], file);
6807 return;
6808 case 'x':
6809 /* Print a condition code register. */
6810 if (REGNO (x) == SPARC_ICC_REG)
6812 /* We don't handle CC[X]_NOOVmode because they're not supposed
6813 to occur here. */
6814 if (GET_MODE (x) == CCmode)
6815 fputs ("%icc", file);
6816 else if (GET_MODE (x) == CCXmode)
6817 fputs ("%xcc", file);
6818 else
6819 gcc_unreachable ();
6821 else
6822 /* %fccN register */
6823 fputs (reg_names[REGNO (x)], file);
6824 return;
6825 case 'm':
6826 /* Print the operand's address only. */
6827 output_address (XEXP (x, 0));
6828 return;
6829 case 'r':
6830 /* In this case we need a register. Use %g0 if the
6831 operand is const0_rtx. */
6832 if (x == const0_rtx
6833 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6835 fputs ("%g0", file);
6836 return;
6838 else
6839 break;
6841 case 'A':
6842 switch (GET_CODE (x))
6844 case IOR: fputs ("or", file); break;
6845 case AND: fputs ("and", file); break;
6846 case XOR: fputs ("xor", file); break;
6847 default: output_operand_lossage ("invalid %%A operand");
6849 return;
6851 case 'B':
6852 switch (GET_CODE (x))
6854 case IOR: fputs ("orn", file); break;
6855 case AND: fputs ("andn", file); break;
6856 case XOR: fputs ("xnor", file); break;
6857 default: output_operand_lossage ("invalid %%B operand");
6859 return;
6861 /* These are used by the conditional move instructions. */
6862 case 'c' :
6863 case 'C':
6865 enum rtx_code rc = GET_CODE (x);
6867 if (code == 'c')
6869 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6870 if (mode == CCFPmode || mode == CCFPEmode)
6871 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6872 else
6873 rc = reverse_condition (GET_CODE (x));
6875 switch (rc)
6877 case NE: fputs ("ne", file); break;
6878 case EQ: fputs ("e", file); break;
6879 case GE: fputs ("ge", file); break;
6880 case GT: fputs ("g", file); break;
6881 case LE: fputs ("le", file); break;
6882 case LT: fputs ("l", file); break;
6883 case GEU: fputs ("geu", file); break;
6884 case GTU: fputs ("gu", file); break;
6885 case LEU: fputs ("leu", file); break;
6886 case LTU: fputs ("lu", file); break;
6887 case LTGT: fputs ("lg", file); break;
6888 case UNORDERED: fputs ("u", file); break;
6889 case ORDERED: fputs ("o", file); break;
6890 case UNLT: fputs ("ul", file); break;
6891 case UNLE: fputs ("ule", file); break;
6892 case UNGT: fputs ("ug", file); break;
6893 case UNGE: fputs ("uge", file); break;
6894 case UNEQ: fputs ("ue", file); break;
6895 default: output_operand_lossage (code == 'c'
6896 ? "invalid %%c operand"
6897 : "invalid %%C operand");
6899 return;
6902 /* These are used by the movr instruction pattern. */
6903 case 'd':
6904 case 'D':
6906 enum rtx_code rc = (code == 'd'
6907 ? reverse_condition (GET_CODE (x))
6908 : GET_CODE (x));
6909 switch (rc)
6911 case NE: fputs ("ne", file); break;
6912 case EQ: fputs ("e", file); break;
6913 case GE: fputs ("gez", file); break;
6914 case LT: fputs ("lz", file); break;
6915 case LE: fputs ("lez", file); break;
6916 case GT: fputs ("gz", file); break;
6917 default: output_operand_lossage (code == 'd'
6918 ? "invalid %%d operand"
6919 : "invalid %%D operand");
6921 return;
6924 case 'b':
6926 /* Print a sign-extended character. */
6927 int i = trunc_int_for_mode (INTVAL (x), QImode);
6928 fprintf (file, "%d", i);
6929 return;
6932 case 'f':
6933 /* Operand must be a MEM; write its address. */
6934 if (GET_CODE (x) != MEM)
6935 output_operand_lossage ("invalid %%f operand");
6936 output_address (XEXP (x, 0));
6937 return;
6939 case 's':
6941 /* Print a sign-extended 32-bit value. */
6942 HOST_WIDE_INT i;
6943 if (GET_CODE(x) == CONST_INT)
6944 i = INTVAL (x);
6945 else if (GET_CODE(x) == CONST_DOUBLE)
6946 i = CONST_DOUBLE_LOW (x);
6947 else
6949 output_operand_lossage ("invalid %%s operand");
6950 return;
6952 i = trunc_int_for_mode (i, SImode);
6953 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6954 return;
6957 case 0:
6958 /* Do nothing special. */
6959 break;
6961 default:
6962 /* Undocumented flag. */
6963 output_operand_lossage ("invalid operand output code");
6966 if (GET_CODE (x) == REG)
6967 fputs (reg_names[REGNO (x)], file);
6968 else if (GET_CODE (x) == MEM)
6970 fputc ('[', file);
6971 /* Poor Sun assembler doesn't understand absolute addressing. */
6972 if (CONSTANT_P (XEXP (x, 0)))
6973 fputs ("%g0+", file);
6974 output_address (XEXP (x, 0));
6975 fputc (']', file);
6977 else if (GET_CODE (x) == HIGH)
6979 fputs ("%hi(", file);
6980 output_addr_const (file, XEXP (x, 0));
6981 fputc (')', file);
6983 else if (GET_CODE (x) == LO_SUM)
6985 print_operand (file, XEXP (x, 0), 0);
6986 if (TARGET_CM_MEDMID)
6987 fputs ("+%l44(", file);
6988 else
6989 fputs ("+%lo(", file);
6990 output_addr_const (file, XEXP (x, 1));
6991 fputc (')', file);
6993 else if (GET_CODE (x) == CONST_DOUBLE
6994 && (GET_MODE (x) == VOIDmode
6995 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
6997 if (CONST_DOUBLE_HIGH (x) == 0)
6998 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
6999 else if (CONST_DOUBLE_HIGH (x) == -1
7000 && CONST_DOUBLE_LOW (x) < 0)
7001 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7002 else
7003 output_operand_lossage ("long long constant not a valid immediate operand");
7005 else if (GET_CODE (x) == CONST_DOUBLE)
7006 output_operand_lossage ("floating point constant not a valid immediate operand");
7007 else { output_addr_const (file, x); }
7010 /* Target hook for assembling integer objects. The sparc version has
7011 special handling for aligned DI-mode objects. */
7013 static bool
7014 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7016 /* ??? We only output .xword's for symbols and only then in environments
7017 where the assembler can handle them. */
7018 if (aligned_p && size == 8
7019 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7021 if (TARGET_V9)
7023 assemble_integer_with_op ("\t.xword\t", x);
7024 return true;
7026 else
7028 assemble_aligned_integer (4, const0_rtx);
7029 assemble_aligned_integer (4, x);
7030 return true;
7033 return default_assemble_integer (x, size, aligned_p);
7036 /* Return the value of a code used in the .proc pseudo-op that says
7037 what kind of result this function returns. For non-C types, we pick
7038 the closest C type. */
7040 #ifndef SHORT_TYPE_SIZE
7041 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7042 #endif
7044 #ifndef INT_TYPE_SIZE
7045 #define INT_TYPE_SIZE BITS_PER_WORD
7046 #endif
7048 #ifndef LONG_TYPE_SIZE
7049 #define LONG_TYPE_SIZE BITS_PER_WORD
7050 #endif
7052 #ifndef LONG_LONG_TYPE_SIZE
7053 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7054 #endif
7056 #ifndef FLOAT_TYPE_SIZE
7057 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7058 #endif
7060 #ifndef DOUBLE_TYPE_SIZE
7061 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7062 #endif
7064 #ifndef LONG_DOUBLE_TYPE_SIZE
7065 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7066 #endif
7068 unsigned long
7069 sparc_type_code (register tree type)
7071 register unsigned long qualifiers = 0;
7072 register unsigned shift;
7074 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7075 setting more, since some assemblers will give an error for this. Also,
7076 we must be careful to avoid shifts of 32 bits or more to avoid getting
7077 unpredictable results. */
7079 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7081 switch (TREE_CODE (type))
7083 case ERROR_MARK:
7084 return qualifiers;
7086 case ARRAY_TYPE:
7087 qualifiers |= (3 << shift);
7088 break;
7090 case FUNCTION_TYPE:
7091 case METHOD_TYPE:
7092 qualifiers |= (2 << shift);
7093 break;
7095 case POINTER_TYPE:
7096 case REFERENCE_TYPE:
7097 case OFFSET_TYPE:
7098 qualifiers |= (1 << shift);
7099 break;
7101 case RECORD_TYPE:
7102 return (qualifiers | 8);
7104 case UNION_TYPE:
7105 case QUAL_UNION_TYPE:
7106 return (qualifiers | 9);
7108 case ENUMERAL_TYPE:
7109 return (qualifiers | 10);
7111 case VOID_TYPE:
7112 return (qualifiers | 16);
7114 case INTEGER_TYPE:
7115 /* If this is a range type, consider it to be the underlying
7116 type. */
7117 if (TREE_TYPE (type) != 0)
7118 break;
7120 /* Carefully distinguish all the standard types of C,
7121 without messing up if the language is not C. We do this by
7122 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7123 look at both the names and the above fields, but that's redundant.
7124 Any type whose size is between two C types will be considered
7125 to be the wider of the two types. Also, we do not have a
7126 special code to use for "long long", so anything wider than
7127 long is treated the same. Note that we can't distinguish
7128 between "int" and "long" in this code if they are the same
7129 size, but that's fine, since neither can the assembler. */
7131 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7132 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7134 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7135 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7137 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7138 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7140 else
7141 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7143 case REAL_TYPE:
7144 /* If this is a range type, consider it to be the underlying
7145 type. */
7146 if (TREE_TYPE (type) != 0)
7147 break;
7149 /* Carefully distinguish all the standard types of C,
7150 without messing up if the language is not C. */
7152 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7153 return (qualifiers | 6);
7155 else
7156 return (qualifiers | 7);
7158 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7159 /* ??? We need to distinguish between double and float complex types,
7160 but I don't know how yet because I can't reach this code from
7161 existing front-ends. */
7162 return (qualifiers | 7); /* Who knows? */
7164 case VECTOR_TYPE:
7165 case BOOLEAN_TYPE: /* Boolean truth value type. */
7166 case LANG_TYPE: /* ? */
7167 return qualifiers;
7169 default:
7170 gcc_unreachable (); /* Not a type! */
7174 return qualifiers;
7177 /* Nested function support. */
7179 /* Emit RTL insns to initialize the variable parts of a trampoline.
7180 FNADDR is an RTX for the address of the function's pure code.
7181 CXT is an RTX for the static chain value for the function.
7183 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7184 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7185 (to store insns). This is a bit excessive. Perhaps a different
7186 mechanism would be better here.
7188 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7190 void
7191 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7193 /* SPARC 32-bit trampoline:
7195 sethi %hi(fn), %g1
7196 sethi %hi(static), %g2
7197 jmp %g1+%lo(fn)
7198 or %g2, %lo(static), %g2
7200 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7201 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7204 emit_move_insn
7205 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7206 expand_binop (SImode, ior_optab,
7207 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7208 size_int (10), 0, 1),
7209 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7210 NULL_RTX, 1, OPTAB_DIRECT));
7212 emit_move_insn
7213 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7214 expand_binop (SImode, ior_optab,
7215 expand_shift (RSHIFT_EXPR, SImode, cxt,
7216 size_int (10), 0, 1),
7217 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7218 NULL_RTX, 1, OPTAB_DIRECT));
7220 emit_move_insn
7221 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7222 expand_binop (SImode, ior_optab,
7223 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7224 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7225 NULL_RTX, 1, OPTAB_DIRECT));
7227 emit_move_insn
7228 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7229 expand_binop (SImode, ior_optab,
7230 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7231 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7232 NULL_RTX, 1, OPTAB_DIRECT));
7234 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7235 aligned on a 16 byte boundary so one flush clears it all. */
7236 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7237 if (sparc_cpu != PROCESSOR_ULTRASPARC
7238 && sparc_cpu != PROCESSOR_ULTRASPARC3
7239 && sparc_cpu != PROCESSOR_NIAGARA)
7240 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7241 plus_constant (tramp, 8)))));
7243 /* Call __enable_execute_stack after writing onto the stack to make sure
7244 the stack address is accessible. */
7245 #ifdef ENABLE_EXECUTE_STACK
7246 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7247 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7248 #endif
7252 /* The 64-bit version is simpler because it makes more sense to load the
7253 values as "immediate" data out of the trampoline. It's also easier since
7254 we can read the PC without clobbering a register. */
7256 void
7257 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7259 /* SPARC 64-bit trampoline:
7261 rd %pc, %g1
7262 ldx [%g1+24], %g5
7263 jmp %g5
7264 ldx [%g1+16], %g5
7265 +16 bytes data
7268 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7269 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7270 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7271 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7272 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7273 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7274 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7275 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7276 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7277 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7278 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7280 if (sparc_cpu != PROCESSOR_ULTRASPARC
7281 && sparc_cpu != PROCESSOR_ULTRASPARC3
7282 && sparc_cpu != PROCESSOR_NIAGARA)
7283 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7285 /* Call __enable_execute_stack after writing onto the stack to make sure
7286 the stack address is accessible. */
7287 #ifdef ENABLE_EXECUTE_STACK
7288 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7289 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7290 #endif
7293 /* Adjust the cost of a scheduling dependency. Return the new cost of
7294 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7296 static int
7297 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7299 enum attr_type insn_type;
7301 if (! recog_memoized (insn))
7302 return 0;
7304 insn_type = get_attr_type (insn);
7306 if (REG_NOTE_KIND (link) == 0)
7308 /* Data dependency; DEP_INSN writes a register that INSN reads some
7309 cycles later. */
7311 /* if a load, then the dependence must be on the memory address;
7312 add an extra "cycle". Note that the cost could be two cycles
7313 if the reg was written late in an instruction group; we ca not tell
7314 here. */
7315 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7316 return cost + 3;
7318 /* Get the delay only if the address of the store is the dependence. */
7319 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7321 rtx pat = PATTERN(insn);
7322 rtx dep_pat = PATTERN (dep_insn);
7324 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7325 return cost; /* This should not happen! */
7327 /* The dependency between the two instructions was on the data that
7328 is being stored. Assume that this implies that the address of the
7329 store is not dependent. */
7330 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7331 return cost;
7333 return cost + 3; /* An approximation. */
7336 /* A shift instruction cannot receive its data from an instruction
7337 in the same cycle; add a one cycle penalty. */
7338 if (insn_type == TYPE_SHIFT)
7339 return cost + 3; /* Split before cascade into shift. */
7341 else
7343 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7344 INSN writes some cycles later. */
7346 /* These are only significant for the fpu unit; writing a fp reg before
7347 the fpu has finished with it stalls the processor. */
7349 /* Reusing an integer register causes no problems. */
7350 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7351 return 0;
7354 return cost;
7357 static int
7358 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7360 enum attr_type insn_type, dep_type;
7361 rtx pat = PATTERN(insn);
7362 rtx dep_pat = PATTERN (dep_insn);
7364 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7365 return cost;
7367 insn_type = get_attr_type (insn);
7368 dep_type = get_attr_type (dep_insn);
7370 switch (REG_NOTE_KIND (link))
7372 case 0:
7373 /* Data dependency; DEP_INSN writes a register that INSN reads some
7374 cycles later. */
7376 switch (insn_type)
7378 case TYPE_STORE:
7379 case TYPE_FPSTORE:
7380 /* Get the delay iff the address of the store is the dependence. */
7381 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7382 return cost;
7384 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7385 return cost;
7386 return cost + 3;
7388 case TYPE_LOAD:
7389 case TYPE_SLOAD:
7390 case TYPE_FPLOAD:
7391 /* If a load, then the dependence must be on the memory address. If
7392 the addresses aren't equal, then it might be a false dependency */
7393 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7395 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7396 || GET_CODE (SET_DEST (dep_pat)) != MEM
7397 || GET_CODE (SET_SRC (pat)) != MEM
7398 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7399 XEXP (SET_SRC (pat), 0)))
7400 return cost + 2;
7402 return cost + 8;
7404 break;
7406 case TYPE_BRANCH:
7407 /* Compare to branch latency is 0. There is no benefit from
7408 separating compare and branch. */
7409 if (dep_type == TYPE_COMPARE)
7410 return 0;
7411 /* Floating point compare to branch latency is less than
7412 compare to conditional move. */
7413 if (dep_type == TYPE_FPCMP)
7414 return cost - 1;
7415 break;
7416 default:
7417 break;
7419 break;
7421 case REG_DEP_ANTI:
7422 /* Anti-dependencies only penalize the fpu unit. */
7423 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7424 return 0;
7425 break;
7427 default:
7428 break;
7431 return cost;
7434 static int
7435 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7437 switch (sparc_cpu)
7439 case PROCESSOR_SUPERSPARC:
7440 cost = supersparc_adjust_cost (insn, link, dep, cost);
7441 break;
7442 case PROCESSOR_HYPERSPARC:
7443 case PROCESSOR_SPARCLITE86X:
7444 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7445 break;
7446 default:
7447 break;
7449 return cost;
7452 static void
7453 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7454 int sched_verbose ATTRIBUTE_UNUSED,
7455 int max_ready ATTRIBUTE_UNUSED)
7459 static int
7460 sparc_use_sched_lookahead (void)
7462 if (sparc_cpu == PROCESSOR_NIAGARA)
7463 return 0;
7464 if (sparc_cpu == PROCESSOR_ULTRASPARC
7465 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7466 return 4;
7467 if ((1 << sparc_cpu) &
7468 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7469 (1 << PROCESSOR_SPARCLITE86X)))
7470 return 3;
7471 return 0;
7474 static int
7475 sparc_issue_rate (void)
7477 switch (sparc_cpu)
7479 case PROCESSOR_NIAGARA:
7480 default:
7481 return 1;
7482 case PROCESSOR_V9:
7483 /* Assume V9 processors are capable of at least dual-issue. */
7484 return 2;
7485 case PROCESSOR_SUPERSPARC:
7486 return 3;
7487 case PROCESSOR_HYPERSPARC:
7488 case PROCESSOR_SPARCLITE86X:
7489 return 2;
7490 case PROCESSOR_ULTRASPARC:
7491 case PROCESSOR_ULTRASPARC3:
7492 return 4;
7496 static int
7497 set_extends (rtx insn)
7499 register rtx pat = PATTERN (insn);
7501 switch (GET_CODE (SET_SRC (pat)))
7503 /* Load and some shift instructions zero extend. */
7504 case MEM:
7505 case ZERO_EXTEND:
7506 /* sethi clears the high bits */
7507 case HIGH:
7508 /* LO_SUM is used with sethi. sethi cleared the high
7509 bits and the values used with lo_sum are positive */
7510 case LO_SUM:
7511 /* Store flag stores 0 or 1 */
7512 case LT: case LTU:
7513 case GT: case GTU:
7514 case LE: case LEU:
7515 case GE: case GEU:
7516 case EQ:
7517 case NE:
7518 return 1;
7519 case AND:
7521 rtx op0 = XEXP (SET_SRC (pat), 0);
7522 rtx op1 = XEXP (SET_SRC (pat), 1);
7523 if (GET_CODE (op1) == CONST_INT)
7524 return INTVAL (op1) >= 0;
7525 if (GET_CODE (op0) != REG)
7526 return 0;
7527 if (sparc_check_64 (op0, insn) == 1)
7528 return 1;
7529 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7531 case IOR:
7532 case XOR:
7534 rtx op0 = XEXP (SET_SRC (pat), 0);
7535 rtx op1 = XEXP (SET_SRC (pat), 1);
7536 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7537 return 0;
7538 if (GET_CODE (op1) == CONST_INT)
7539 return INTVAL (op1) >= 0;
7540 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7542 case LSHIFTRT:
7543 return GET_MODE (SET_SRC (pat)) == SImode;
7544 /* Positive integers leave the high bits zero. */
7545 case CONST_DOUBLE:
7546 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7547 case CONST_INT:
7548 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7549 case ASHIFTRT:
7550 case SIGN_EXTEND:
7551 return - (GET_MODE (SET_SRC (pat)) == SImode);
7552 case REG:
7553 return sparc_check_64 (SET_SRC (pat), insn);
7554 default:
7555 return 0;
7559 /* We _ought_ to have only one kind per function, but... */
7560 static GTY(()) rtx sparc_addr_diff_list;
7561 static GTY(()) rtx sparc_addr_list;
7563 void
7564 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7566 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7567 if (diff)
7568 sparc_addr_diff_list
7569 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7570 else
7571 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7574 static void
7575 sparc_output_addr_vec (rtx vec)
7577 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7578 int idx, vlen = XVECLEN (body, 0);
7580 #ifdef ASM_OUTPUT_ADDR_VEC_START
7581 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7582 #endif
7584 #ifdef ASM_OUTPUT_CASE_LABEL
7585 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7586 NEXT_INSN (lab));
7587 #else
7588 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7589 #endif
7591 for (idx = 0; idx < vlen; idx++)
7593 ASM_OUTPUT_ADDR_VEC_ELT
7594 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7597 #ifdef ASM_OUTPUT_ADDR_VEC_END
7598 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7599 #endif
7602 static void
7603 sparc_output_addr_diff_vec (rtx vec)
7605 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7606 rtx base = XEXP (XEXP (body, 0), 0);
7607 int idx, vlen = XVECLEN (body, 1);
7609 #ifdef ASM_OUTPUT_ADDR_VEC_START
7610 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7611 #endif
7613 #ifdef ASM_OUTPUT_CASE_LABEL
7614 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7615 NEXT_INSN (lab));
7616 #else
7617 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7618 #endif
7620 for (idx = 0; idx < vlen; idx++)
7622 ASM_OUTPUT_ADDR_DIFF_ELT
7623 (asm_out_file,
7624 body,
7625 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7626 CODE_LABEL_NUMBER (base));
7629 #ifdef ASM_OUTPUT_ADDR_VEC_END
7630 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7631 #endif
7634 static void
7635 sparc_output_deferred_case_vectors (void)
7637 rtx t;
7638 int align;
7640 if (sparc_addr_list == NULL_RTX
7641 && sparc_addr_diff_list == NULL_RTX)
7642 return;
7644 /* Align to cache line in the function's code section. */
7645 switch_to_section (current_function_section ());
7647 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7648 if (align > 0)
7649 ASM_OUTPUT_ALIGN (asm_out_file, align);
7651 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7652 sparc_output_addr_vec (XEXP (t, 0));
7653 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7654 sparc_output_addr_diff_vec (XEXP (t, 0));
7656 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7659 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7660 unknown. Return 1 if the high bits are zero, -1 if the register is
7661 sign extended. */
7663 sparc_check_64 (rtx x, rtx insn)
7665 /* If a register is set only once it is safe to ignore insns this
7666 code does not know how to handle. The loop will either recognize
7667 the single set and return the correct value or fail to recognize
7668 it and return 0. */
7669 int set_once = 0;
7670 rtx y = x;
7672 gcc_assert (GET_CODE (x) == REG);
7674 if (GET_MODE (x) == DImode)
7675 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7677 if (flag_expensive_optimizations
7678 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7679 set_once = 1;
7681 if (insn == 0)
7683 if (set_once)
7684 insn = get_last_insn_anywhere ();
7685 else
7686 return 0;
7689 while ((insn = PREV_INSN (insn)))
7691 switch (GET_CODE (insn))
7693 case JUMP_INSN:
7694 case NOTE:
7695 break;
7696 case CODE_LABEL:
7697 case CALL_INSN:
7698 default:
7699 if (! set_once)
7700 return 0;
7701 break;
7702 case INSN:
7704 rtx pat = PATTERN (insn);
7705 if (GET_CODE (pat) != SET)
7706 return 0;
7707 if (rtx_equal_p (x, SET_DEST (pat)))
7708 return set_extends (insn);
7709 if (y && rtx_equal_p (y, SET_DEST (pat)))
7710 return set_extends (insn);
7711 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7712 return 0;
7716 return 0;
7719 /* Returns assembly code to perform a DImode shift using
7720 a 64-bit global or out register on SPARC-V8+. */
7721 const char *
7722 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7724 static char asm_code[60];
7726 /* The scratch register is only required when the destination
7727 register is not a 64-bit global or out register. */
7728 if (which_alternative != 2)
7729 operands[3] = operands[0];
7731 /* We can only shift by constants <= 63. */
7732 if (GET_CODE (operands[2]) == CONST_INT)
7733 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7735 if (GET_CODE (operands[1]) == CONST_INT)
7737 output_asm_insn ("mov\t%1, %3", operands);
7739 else
7741 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7742 if (sparc_check_64 (operands[1], insn) <= 0)
7743 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7744 output_asm_insn ("or\t%L1, %3, %3", operands);
7747 strcpy(asm_code, opcode);
7749 if (which_alternative != 2)
7750 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7751 else
7752 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7755 /* Output rtl to increment the profiler label LABELNO
7756 for profiling a function entry. */
7758 void
7759 sparc_profile_hook (int labelno)
7761 char buf[32];
7762 rtx lab, fun;
7764 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7765 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7766 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7768 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7771 #ifdef OBJECT_FORMAT_ELF
7772 static void
7773 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7774 tree decl)
7776 if (flags & SECTION_MERGE)
7778 /* entsize cannot be expressed in this section attributes
7779 encoding style. */
7780 default_elf_asm_named_section (name, flags, decl);
7781 return;
7784 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7786 if (!(flags & SECTION_DEBUG))
7787 fputs (",#alloc", asm_out_file);
7788 if (flags & SECTION_WRITE)
7789 fputs (",#write", asm_out_file);
7790 if (flags & SECTION_TLS)
7791 fputs (",#tls", asm_out_file);
7792 if (flags & SECTION_CODE)
7793 fputs (",#execinstr", asm_out_file);
7795 /* ??? Handle SECTION_BSS. */
7797 fputc ('\n', asm_out_file);
7799 #endif /* OBJECT_FORMAT_ELF */
7801 /* We do not allow indirect calls to be optimized into sibling calls.
7803 We cannot use sibling calls when delayed branches are disabled
7804 because they will likely require the call delay slot to be filled.
7806 Also, on SPARC 32-bit we cannot emit a sibling call when the
7807 current function returns a structure. This is because the "unimp
7808 after call" convention would cause the callee to return to the
7809 wrong place. The generic code already disallows cases where the
7810 function being called returns a structure.
7812 It may seem strange how this last case could occur. Usually there
7813 is code after the call which jumps to epilogue code which dumps the
7814 return value into the struct return area. That ought to invalidate
7815 the sibling call right? Well, in the C++ case we can end up passing
7816 the pointer to the struct return area to a constructor (which returns
7817 void) and then nothing else happens. Such a sibling call would look
7818 valid without the added check here.
7820 VxWorks PIC PLT entries require the global pointer to be initialized
7821 on entry. We therefore can't emit sibling calls to them. */
7822 static bool
7823 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7825 return (decl
7826 && flag_delayed_branch
7827 && (TARGET_ARCH64 || ! current_function_returns_struct)
7828 && !(TARGET_VXWORKS_RTP
7829 && flag_pic
7830 && !targetm.binds_local_p (decl)));
7833 /* libfunc renaming. */
7834 #include "config/gofast.h"
7836 static void
7837 sparc_init_libfuncs (void)
7839 if (TARGET_ARCH32)
7841 /* Use the subroutines that Sun's library provides for integer
7842 multiply and divide. The `*' prevents an underscore from
7843 being prepended by the compiler. .umul is a little faster
7844 than .mul. */
7845 set_optab_libfunc (smul_optab, SImode, "*.umul");
7846 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7847 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7848 set_optab_libfunc (smod_optab, SImode, "*.rem");
7849 set_optab_libfunc (umod_optab, SImode, "*.urem");
7851 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7852 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7853 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7854 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7855 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7856 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7858 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7859 is because with soft-float, the SFmode and DFmode sqrt
7860 instructions will be absent, and the compiler will notice and
7861 try to use the TFmode sqrt instruction for calls to the
7862 builtin function sqrt, but this fails. */
7863 if (TARGET_FPU)
7864 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7866 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7867 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7868 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7869 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7870 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7871 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7873 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7874 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7875 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7876 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7878 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7879 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7880 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7881 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7883 if (DITF_CONVERSION_LIBFUNCS)
7885 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7886 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7887 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7888 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7891 if (SUN_CONVERSION_LIBFUNCS)
7893 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7894 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7895 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7896 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7899 if (TARGET_ARCH64)
7901 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7902 do not exist in the library. Make sure the compiler does not
7903 emit calls to them by accident. (It should always use the
7904 hardware instructions.) */
7905 set_optab_libfunc (smul_optab, SImode, 0);
7906 set_optab_libfunc (sdiv_optab, SImode, 0);
7907 set_optab_libfunc (udiv_optab, SImode, 0);
7908 set_optab_libfunc (smod_optab, SImode, 0);
7909 set_optab_libfunc (umod_optab, SImode, 0);
7911 if (SUN_INTEGER_MULTIPLY_64)
7913 set_optab_libfunc (smul_optab, DImode, "__mul64");
7914 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7915 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7916 set_optab_libfunc (smod_optab, DImode, "__rem64");
7917 set_optab_libfunc (umod_optab, DImode, "__urem64");
7920 if (SUN_CONVERSION_LIBFUNCS)
7922 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7923 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7924 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7925 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7929 gofast_maybe_init_libfuncs ();
7932 #define def_builtin(NAME, CODE, TYPE) \
7933 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7934 NULL_TREE)
7936 /* Implement the TARGET_INIT_BUILTINS target hook.
7937 Create builtin functions for special SPARC instructions. */
7939 static void
7940 sparc_init_builtins (void)
7942 if (TARGET_VIS)
7943 sparc_vis_init_builtins ();
7946 /* Create builtin functions for VIS 1.0 instructions. */
7948 static void
7949 sparc_vis_init_builtins (void)
7951 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7952 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7953 tree v4hi = build_vector_type (intHI_type_node, 4);
7954 tree v2hi = build_vector_type (intHI_type_node, 2);
7955 tree v2si = build_vector_type (intSI_type_node, 2);
7957 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
7958 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
7959 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
7960 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
7961 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
7962 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
7963 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
7964 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
7965 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
7966 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
7967 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
7968 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
7969 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
7970 v8qi, v8qi,
7971 intDI_type_node, 0);
7972 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
7973 intDI_type_node,
7974 intDI_type_node, 0);
7975 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
7976 ptr_type_node,
7977 intSI_type_node, 0);
7978 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
7979 ptr_type_node,
7980 intDI_type_node, 0);
7982 /* Packing and expanding vectors. */
7983 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
7984 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
7985 v8qi_ftype_v2si_v8qi);
7986 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
7987 v2hi_ftype_v2si);
7988 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
7989 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
7990 v8qi_ftype_v4qi_v4qi);
7992 /* Multiplications. */
7993 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
7994 v4hi_ftype_v4qi_v4hi);
7995 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
7996 v4hi_ftype_v4qi_v2hi);
7997 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
7998 v4hi_ftype_v4qi_v2hi);
7999 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8000 v4hi_ftype_v8qi_v4hi);
8001 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8002 v4hi_ftype_v8qi_v4hi);
8003 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8004 v2si_ftype_v4qi_v2hi);
8005 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8006 v2si_ftype_v4qi_v2hi);
8008 /* Data aligning. */
8009 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8010 v4hi_ftype_v4hi_v4hi);
8011 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8012 v8qi_ftype_v8qi_v8qi);
8013 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8014 v2si_ftype_v2si_v2si);
8015 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8016 di_ftype_di_di);
8017 if (TARGET_ARCH64)
8018 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8019 ptr_ftype_ptr_di);
8020 else
8021 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8022 ptr_ftype_ptr_si);
8024 /* Pixel distance. */
8025 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8026 di_ftype_v8qi_v8qi_di);
8029 /* Handle TARGET_EXPAND_BUILTIN target hook.
8030 Expand builtin functions for sparc intrinsics. */
8032 static rtx
8033 sparc_expand_builtin (tree exp, rtx target,
8034 rtx subtarget ATTRIBUTE_UNUSED,
8035 enum machine_mode tmode ATTRIBUTE_UNUSED,
8036 int ignore ATTRIBUTE_UNUSED)
8038 tree arg;
8039 call_expr_arg_iterator iter;
8040 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8041 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8042 rtx pat, op[4];
8043 enum machine_mode mode[4];
8044 int arg_count = 0;
8046 mode[0] = insn_data[icode].operand[0].mode;
8047 if (!target
8048 || GET_MODE (target) != mode[0]
8049 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8050 op[0] = gen_reg_rtx (mode[0]);
8051 else
8052 op[0] = target;
8054 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8056 arg_count++;
8057 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8058 op[arg_count] = expand_normal (arg);
8060 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8061 mode[arg_count]))
8062 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8065 switch (arg_count)
8067 case 1:
8068 pat = GEN_FCN (icode) (op[0], op[1]);
8069 break;
8070 case 2:
8071 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8072 break;
8073 case 3:
8074 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8075 break;
8076 default:
8077 gcc_unreachable ();
8080 if (!pat)
8081 return NULL_RTX;
8083 emit_insn (pat);
8085 return op[0];
8088 static int
8089 sparc_vis_mul8x16 (int e8, int e16)
8091 return (e8 * e16 + 128) / 256;
8094 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8095 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8096 constants. A tree list with the results of the multiplications is returned,
8097 and each element in the list is of INNER_TYPE. */
8099 static tree
8100 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8102 tree n_elts = NULL_TREE;
8103 int scale;
8105 switch (fncode)
8107 case CODE_FOR_fmul8x16_vis:
8108 for (; elts0 && elts1;
8109 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8111 int val
8112 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8113 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8114 n_elts = tree_cons (NULL_TREE,
8115 build_int_cst (inner_type, val),
8116 n_elts);
8118 break;
8120 case CODE_FOR_fmul8x16au_vis:
8121 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8123 for (; elts0; elts0 = TREE_CHAIN (elts0))
8125 int val
8126 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8127 scale);
8128 n_elts = tree_cons (NULL_TREE,
8129 build_int_cst (inner_type, val),
8130 n_elts);
8132 break;
8134 case CODE_FOR_fmul8x16al_vis:
8135 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8137 for (; elts0; elts0 = TREE_CHAIN (elts0))
8139 int val
8140 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8141 scale);
8142 n_elts = tree_cons (NULL_TREE,
8143 build_int_cst (inner_type, val),
8144 n_elts);
8146 break;
8148 default:
8149 gcc_unreachable ();
8152 return nreverse (n_elts);
8155 /* Handle TARGET_FOLD_BUILTIN target hook.
8156 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8157 result of the function call is ignored. NULL_TREE is returned if the
8158 function could not be folded. */
8160 static tree
8161 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8163 tree arg0, arg1, arg2;
8164 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8166 if (ignore
8167 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8168 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8169 return fold_convert (rtype, integer_zero_node);
8171 switch (DECL_FUNCTION_CODE (fndecl))
8173 case CODE_FOR_fexpand_vis:
8174 arg0 = TREE_VALUE (arglist);
8175 STRIP_NOPS (arg0);
8177 if (TREE_CODE (arg0) == VECTOR_CST)
8179 tree inner_type = TREE_TYPE (rtype);
8180 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8181 tree n_elts = NULL_TREE;
8183 for (; elts; elts = TREE_CHAIN (elts))
8185 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8186 n_elts = tree_cons (NULL_TREE,
8187 build_int_cst (inner_type, val),
8188 n_elts);
8190 return build_vector (rtype, nreverse (n_elts));
8192 break;
8194 case CODE_FOR_fmul8x16_vis:
8195 case CODE_FOR_fmul8x16au_vis:
8196 case CODE_FOR_fmul8x16al_vis:
8197 arg0 = TREE_VALUE (arglist);
8198 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8199 STRIP_NOPS (arg0);
8200 STRIP_NOPS (arg1);
8202 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8204 tree inner_type = TREE_TYPE (rtype);
8205 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8206 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8207 tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8208 inner_type, elts0, elts1);
8210 return build_vector (rtype, n_elts);
8212 break;
8214 case CODE_FOR_fpmerge_vis:
8215 arg0 = TREE_VALUE (arglist);
8216 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8217 STRIP_NOPS (arg0);
8218 STRIP_NOPS (arg1);
8220 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8222 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8223 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8224 tree n_elts = NULL_TREE;
8226 for (; elts0 && elts1;
8227 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8229 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8230 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8233 return build_vector (rtype, nreverse (n_elts));
8235 break;
8237 case CODE_FOR_pdist_vis:
8238 arg0 = TREE_VALUE (arglist);
8239 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8240 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8241 STRIP_NOPS (arg0);
8242 STRIP_NOPS (arg1);
8243 STRIP_NOPS (arg2);
8245 if (TREE_CODE (arg0) == VECTOR_CST
8246 && TREE_CODE (arg1) == VECTOR_CST
8247 && TREE_CODE (arg2) == INTEGER_CST)
8249 int overflow = 0;
8250 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8251 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8252 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8253 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8255 for (; elts0 && elts1;
8256 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8258 unsigned HOST_WIDE_INT
8259 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8260 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8261 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8262 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8264 unsigned HOST_WIDE_INT l;
8265 HOST_WIDE_INT h;
8267 overflow |= neg_double (low1, high1, &l, &h);
8268 overflow |= add_double (low0, high0, l, h, &l, &h);
8269 if (h < 0)
8270 overflow |= neg_double (l, h, &l, &h);
8272 overflow |= add_double (low, high, l, h, &low, &high);
8275 gcc_assert (overflow == 0);
8277 return build_int_cst_wide (rtype, low, high);
8280 default:
8281 break;
8284 return NULL_TREE;
8288 sparc_extra_constraint_check (rtx op, int c, int strict)
8290 int reload_ok_mem;
8292 if (TARGET_ARCH64
8293 && (c == 'T' || c == 'U'))
8294 return 0;
8296 switch (c)
8298 case 'Q':
8299 return fp_sethi_p (op);
8301 case 'R':
8302 return fp_mov_p (op);
8304 case 'S':
8305 return fp_high_losum_p (op);
8307 case 'U':
8308 if (! strict
8309 || (GET_CODE (op) == REG
8310 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8311 || reg_renumber[REGNO (op)] >= 0)))
8312 return register_ok_for_ldd (op);
8314 return 0;
8316 case 'W':
8317 case 'T':
8318 break;
8320 case 'Y':
8321 return const_zero_operand (op, GET_MODE (op));
8323 default:
8324 return 0;
8327 /* Our memory extra constraints have to emulate the
8328 behavior of 'm' and 'o' in order for reload to work
8329 correctly. */
8330 if (GET_CODE (op) == MEM)
8332 reload_ok_mem = 0;
8333 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8334 && (! strict
8335 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8336 reload_ok_mem = 1;
8338 else
8340 reload_ok_mem = (reload_in_progress
8341 && GET_CODE (op) == REG
8342 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8343 && reg_renumber [REGNO (op)] < 0);
8346 return reload_ok_mem;
8349 /* ??? This duplicates information provided to the compiler by the
8350 ??? scheduler description. Some day, teach genautomata to output
8351 ??? the latencies and then CSE will just use that. */
8353 static bool
8354 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8356 enum machine_mode mode = GET_MODE (x);
8357 bool float_mode_p = FLOAT_MODE_P (mode);
8359 switch (code)
8361 case CONST_INT:
8362 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8364 *total = 0;
8365 return true;
8367 /* FALLTHRU */
8369 case HIGH:
8370 *total = 2;
8371 return true;
8373 case CONST:
8374 case LABEL_REF:
8375 case SYMBOL_REF:
8376 *total = 4;
8377 return true;
8379 case CONST_DOUBLE:
8380 if (GET_MODE (x) == VOIDmode
8381 && ((CONST_DOUBLE_HIGH (x) == 0
8382 && CONST_DOUBLE_LOW (x) < 0x1000)
8383 || (CONST_DOUBLE_HIGH (x) == -1
8384 && CONST_DOUBLE_LOW (x) < 0
8385 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8386 *total = 0;
8387 else
8388 *total = 8;
8389 return true;
8391 case MEM:
8392 /* If outer-code was a sign or zero extension, a cost
8393 of COSTS_N_INSNS (1) was already added in. This is
8394 why we are subtracting it back out. */
8395 if (outer_code == ZERO_EXTEND)
8397 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8399 else if (outer_code == SIGN_EXTEND)
8401 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8403 else if (float_mode_p)
8405 *total = sparc_costs->float_load;
8407 else
8409 *total = sparc_costs->int_load;
8412 return true;
8414 case PLUS:
8415 case MINUS:
8416 if (float_mode_p)
8417 *total = sparc_costs->float_plusminus;
8418 else
8419 *total = COSTS_N_INSNS (1);
8420 return false;
8422 case MULT:
8423 if (float_mode_p)
8424 *total = sparc_costs->float_mul;
8425 else if (! TARGET_HARD_MUL)
8426 *total = COSTS_N_INSNS (25);
8427 else
8429 int bit_cost;
8431 bit_cost = 0;
8432 if (sparc_costs->int_mul_bit_factor)
8434 int nbits;
8436 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8438 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8439 for (nbits = 0; value != 0; value &= value - 1)
8440 nbits++;
8442 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8443 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8445 rtx x1 = XEXP (x, 1);
8446 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8447 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8449 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8450 nbits++;
8451 for (; value2 != 0; value2 &= value2 - 1)
8452 nbits++;
8454 else
8455 nbits = 7;
8457 if (nbits < 3)
8458 nbits = 3;
8459 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8460 bit_cost = COSTS_N_INSNS (bit_cost);
8463 if (mode == DImode)
8464 *total = sparc_costs->int_mulX + bit_cost;
8465 else
8466 *total = sparc_costs->int_mul + bit_cost;
8468 return false;
8470 case ASHIFT:
8471 case ASHIFTRT:
8472 case LSHIFTRT:
8473 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8474 return false;
8476 case DIV:
8477 case UDIV:
8478 case MOD:
8479 case UMOD:
8480 if (float_mode_p)
8482 if (mode == DFmode)
8483 *total = sparc_costs->float_div_df;
8484 else
8485 *total = sparc_costs->float_div_sf;
8487 else
8489 if (mode == DImode)
8490 *total = sparc_costs->int_divX;
8491 else
8492 *total = sparc_costs->int_div;
8494 return false;
8496 case NEG:
8497 if (! float_mode_p)
8499 *total = COSTS_N_INSNS (1);
8500 return false;
8502 /* FALLTHRU */
8504 case ABS:
8505 case FLOAT:
8506 case UNSIGNED_FLOAT:
8507 case FIX:
8508 case UNSIGNED_FIX:
8509 case FLOAT_EXTEND:
8510 case FLOAT_TRUNCATE:
8511 *total = sparc_costs->float_move;
8512 return false;
8514 case SQRT:
8515 if (mode == DFmode)
8516 *total = sparc_costs->float_sqrt_df;
8517 else
8518 *total = sparc_costs->float_sqrt_sf;
8519 return false;
8521 case COMPARE:
8522 if (float_mode_p)
8523 *total = sparc_costs->float_cmp;
8524 else
8525 *total = COSTS_N_INSNS (1);
8526 return false;
8528 case IF_THEN_ELSE:
8529 if (float_mode_p)
8530 *total = sparc_costs->float_cmove;
8531 else
8532 *total = sparc_costs->int_cmove;
8533 return false;
8535 case IOR:
8536 /* Handle the NAND vector patterns. */
8537 if (sparc_vector_mode_supported_p (GET_MODE (x))
8538 && GET_CODE (XEXP (x, 0)) == NOT
8539 && GET_CODE (XEXP (x, 1)) == NOT)
8541 *total = COSTS_N_INSNS (1);
8542 return true;
8544 else
8545 return false;
8547 default:
8548 return false;
8552 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8553 This is achieved by means of a manual dynamic stack space allocation in
8554 the current frame. We make the assumption that SEQ doesn't contain any
8555 function calls, with the possible exception of calls to the PIC helper. */
8557 static void
8558 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8560 /* We must preserve the lowest 16 words for the register save area. */
8561 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8562 /* We really need only 2 words of fresh stack space. */
8563 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8565 rtx slot
8566 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8567 SPARC_STACK_BIAS + offset));
8569 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8570 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8571 if (reg2)
8572 emit_insn (gen_rtx_SET (VOIDmode,
8573 adjust_address (slot, word_mode, UNITS_PER_WORD),
8574 reg2));
8575 emit_insn (seq);
8576 if (reg2)
8577 emit_insn (gen_rtx_SET (VOIDmode,
8578 reg2,
8579 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8580 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8581 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8584 /* Output the assembler code for a thunk function. THUNK_DECL is the
8585 declaration for the thunk function itself, FUNCTION is the decl for
8586 the target function. DELTA is an immediate constant offset to be
8587 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8588 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8590 static void
8591 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8592 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8593 tree function)
8595 rtx this, insn, funexp;
8596 unsigned int int_arg_first;
8598 reload_completed = 1;
8599 epilogue_completed = 1;
8601 emit_note (NOTE_INSN_PROLOGUE_END);
8603 if (flag_delayed_branch)
8605 /* We will emit a regular sibcall below, so we need to instruct
8606 output_sibcall that we are in a leaf function. */
8607 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8609 /* This will cause final.c to invoke leaf_renumber_regs so we
8610 must behave as if we were in a not-yet-leafified function. */
8611 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8613 else
8615 /* We will emit the sibcall manually below, so we will need to
8616 manually spill non-leaf registers. */
8617 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8619 /* We really are in a leaf function. */
8620 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8623 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8624 returns a structure, the structure return pointer is there instead. */
8625 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8626 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8627 else
8628 this = gen_rtx_REG (Pmode, int_arg_first);
8630 /* Add DELTA. When possible use a plain add, otherwise load it into
8631 a register first. */
8632 if (delta)
8634 rtx delta_rtx = GEN_INT (delta);
8636 if (! SPARC_SIMM13_P (delta))
8638 rtx scratch = gen_rtx_REG (Pmode, 1);
8639 emit_move_insn (scratch, delta_rtx);
8640 delta_rtx = scratch;
8643 /* THIS += DELTA. */
8644 emit_insn (gen_add2_insn (this, delta_rtx));
8647 /* Add the word at address (*THIS + VCALL_OFFSET). */
8648 if (vcall_offset)
8650 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8651 rtx scratch = gen_rtx_REG (Pmode, 1);
8653 gcc_assert (vcall_offset < 0);
8655 /* SCRATCH = *THIS. */
8656 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8658 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8659 may not have any available scratch register at this point. */
8660 if (SPARC_SIMM13_P (vcall_offset))
8662 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8663 else if (! fixed_regs[5]
8664 /* The below sequence is made up of at least 2 insns,
8665 while the default method may need only one. */
8666 && vcall_offset < -8192)
8668 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8669 emit_move_insn (scratch2, vcall_offset_rtx);
8670 vcall_offset_rtx = scratch2;
8672 else
8674 rtx increment = GEN_INT (-4096);
8676 /* VCALL_OFFSET is a negative number whose typical range can be
8677 estimated as -32768..0 in 32-bit mode. In almost all cases
8678 it is therefore cheaper to emit multiple add insns than
8679 spilling and loading the constant into a register (at least
8680 6 insns). */
8681 while (! SPARC_SIMM13_P (vcall_offset))
8683 emit_insn (gen_add2_insn (scratch, increment));
8684 vcall_offset += 4096;
8686 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8689 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8690 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8691 gen_rtx_PLUS (Pmode,
8692 scratch,
8693 vcall_offset_rtx)));
8695 /* THIS += *(*THIS + VCALL_OFFSET). */
8696 emit_insn (gen_add2_insn (this, scratch));
8699 /* Generate a tail call to the target function. */
8700 if (! TREE_USED (function))
8702 assemble_external (function);
8703 TREE_USED (function) = 1;
8705 funexp = XEXP (DECL_RTL (function), 0);
8707 if (flag_delayed_branch)
8709 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8710 insn = emit_call_insn (gen_sibcall (funexp));
8711 SIBLING_CALL_P (insn) = 1;
8713 else
8715 /* The hoops we have to jump through in order to generate a sibcall
8716 without using delay slots... */
8717 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8719 if (flag_pic)
8721 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8722 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8723 start_sequence ();
8724 /* Delay emitting the PIC helper function because it needs to
8725 change the section and we are emitting assembly code. */
8726 load_pic_register (true); /* clobbers %o7 */
8727 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8728 seq = get_insns ();
8729 end_sequence ();
8730 emit_and_preserve (seq, spill_reg, spill_reg2);
8732 else if (TARGET_ARCH32)
8734 emit_insn (gen_rtx_SET (VOIDmode,
8735 scratch,
8736 gen_rtx_HIGH (SImode, funexp)));
8737 emit_insn (gen_rtx_SET (VOIDmode,
8738 scratch,
8739 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8741 else /* TARGET_ARCH64 */
8743 switch (sparc_cmodel)
8745 case CM_MEDLOW:
8746 case CM_MEDMID:
8747 /* The destination can serve as a temporary. */
8748 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8749 break;
8751 case CM_MEDANY:
8752 case CM_EMBMEDANY:
8753 /* The destination cannot serve as a temporary. */
8754 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8755 start_sequence ();
8756 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8757 seq = get_insns ();
8758 end_sequence ();
8759 emit_and_preserve (seq, spill_reg, 0);
8760 break;
8762 default:
8763 gcc_unreachable ();
8767 emit_jump_insn (gen_indirect_jump (scratch));
8770 emit_barrier ();
8772 /* Run just enough of rest_of_compilation to get the insns emitted.
8773 There's not really enough bulk here to make other passes such as
8774 instruction scheduling worth while. Note that use_thunk calls
8775 assemble_start_function and assemble_end_function. */
8776 insn = get_insns ();
8777 insn_locators_alloc ();
8778 shorten_branches (insn);
8779 final_start_function (insn, file, 1);
8780 final (insn, file, 1);
8781 final_end_function ();
8783 reload_completed = 0;
8784 epilogue_completed = 0;
8787 /* Return true if sparc_output_mi_thunk would be able to output the
8788 assembler code for the thunk function specified by the arguments
8789 it is passed, and false otherwise. */
8790 static bool
8791 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8792 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8793 HOST_WIDE_INT vcall_offset,
8794 const_tree function ATTRIBUTE_UNUSED)
8796 /* Bound the loop used in the default method above. */
8797 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8800 /* How to allocate a 'struct machine_function'. */
8802 static struct machine_function *
8803 sparc_init_machine_status (void)
8805 return ggc_alloc_cleared (sizeof (struct machine_function));
8808 /* Locate some local-dynamic symbol still in use by this function
8809 so that we can print its name in local-dynamic base patterns. */
8811 static const char *
8812 get_some_local_dynamic_name (void)
8814 rtx insn;
8816 if (cfun->machine->some_ld_name)
8817 return cfun->machine->some_ld_name;
8819 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8820 if (INSN_P (insn)
8821 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8822 return cfun->machine->some_ld_name;
8824 gcc_unreachable ();
8827 static int
8828 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8830 rtx x = *px;
8832 if (x
8833 && GET_CODE (x) == SYMBOL_REF
8834 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8836 cfun->machine->some_ld_name = XSTR (x, 0);
8837 return 1;
8840 return 0;
8843 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8844 This is called from dwarf2out.c to emit call frame instructions
8845 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8846 static void
8847 sparc_dwarf_handle_frame_unspec (const char *label,
8848 rtx pattern ATTRIBUTE_UNUSED,
8849 int index ATTRIBUTE_UNUSED)
8851 gcc_assert (index == UNSPECV_SAVEW);
8852 dwarf2out_window_save (label);
8855 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8856 We need to emit DTP-relative relocations. */
8858 static void
8859 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8861 switch (size)
8863 case 4:
8864 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8865 break;
8866 case 8:
8867 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8868 break;
8869 default:
8870 gcc_unreachable ();
8872 output_addr_const (file, x);
8873 fputs (")", file);
8876 /* Do whatever processing is required at the end of a file. */
8878 static void
8879 sparc_file_end (void)
8881 /* If we haven't emitted the special PIC helper function, do so now. */
8882 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8883 emit_pic_helper ();
8885 if (NEED_INDICATE_EXEC_STACK)
8886 file_end_indicate_exec_stack ();
8889 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8890 /* Implement TARGET_MANGLE_TYPE. */
8892 static const char *
8893 sparc_mangle_type (const_tree type)
8895 if (!TARGET_64BIT
8896 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8897 && TARGET_LONG_DOUBLE_128)
8898 return "g";
8900 /* For all other types, use normal C++ mangling. */
8901 return NULL;
8903 #endif
8905 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8906 compare and swap on the word containing the byte or half-word. */
8908 void
8909 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8911 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8912 rtx addr = gen_reg_rtx (Pmode);
8913 rtx off = gen_reg_rtx (SImode);
8914 rtx oldv = gen_reg_rtx (SImode);
8915 rtx newv = gen_reg_rtx (SImode);
8916 rtx oldvalue = gen_reg_rtx (SImode);
8917 rtx newvalue = gen_reg_rtx (SImode);
8918 rtx res = gen_reg_rtx (SImode);
8919 rtx resv = gen_reg_rtx (SImode);
8920 rtx memsi, val, mask, end_label, loop_label, cc;
8922 emit_insn (gen_rtx_SET (VOIDmode, addr,
8923 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8925 if (Pmode != SImode)
8926 addr1 = gen_lowpart (SImode, addr1);
8927 emit_insn (gen_rtx_SET (VOIDmode, off,
8928 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8930 memsi = gen_rtx_MEM (SImode, addr);
8931 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8932 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8934 val = force_reg (SImode, memsi);
8936 emit_insn (gen_rtx_SET (VOIDmode, off,
8937 gen_rtx_XOR (SImode, off,
8938 GEN_INT (GET_MODE (mem) == QImode
8939 ? 3 : 2))));
8941 emit_insn (gen_rtx_SET (VOIDmode, off,
8942 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8944 if (GET_MODE (mem) == QImode)
8945 mask = force_reg (SImode, GEN_INT (0xff));
8946 else
8947 mask = force_reg (SImode, GEN_INT (0xffff));
8949 emit_insn (gen_rtx_SET (VOIDmode, mask,
8950 gen_rtx_ASHIFT (SImode, mask, off)));
8952 emit_insn (gen_rtx_SET (VOIDmode, val,
8953 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8954 val)));
8956 oldval = gen_lowpart (SImode, oldval);
8957 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8958 gen_rtx_ASHIFT (SImode, oldval, off)));
8960 newval = gen_lowpart_common (SImode, newval);
8961 emit_insn (gen_rtx_SET (VOIDmode, newv,
8962 gen_rtx_ASHIFT (SImode, newval, off)));
8964 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8965 gen_rtx_AND (SImode, oldv, mask)));
8967 emit_insn (gen_rtx_SET (VOIDmode, newv,
8968 gen_rtx_AND (SImode, newv, mask)));
8970 end_label = gen_label_rtx ();
8971 loop_label = gen_label_rtx ();
8972 emit_label (loop_label);
8974 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
8975 gen_rtx_IOR (SImode, oldv, val)));
8977 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
8978 gen_rtx_IOR (SImode, newv, val)));
8980 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
8982 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
8984 emit_insn (gen_rtx_SET (VOIDmode, resv,
8985 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8986 res)));
8988 sparc_compare_op0 = resv;
8989 sparc_compare_op1 = val;
8990 cc = gen_compare_reg (NE);
8992 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
8994 sparc_compare_emitted = cc;
8995 emit_jump_insn (gen_bne (loop_label));
8997 emit_label (end_label);
8999 emit_insn (gen_rtx_SET (VOIDmode, res,
9000 gen_rtx_AND (SImode, res, mask)));
9002 emit_insn (gen_rtx_SET (VOIDmode, res,
9003 gen_rtx_LSHIFTRT (SImode, res, off)));
9005 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9008 #include "gt-sparc.h"