* opt-functions.awk (var_type): New function.
[official-gcc.git] / gcc / config / sparc / sparc.c
blobe5e6430a661cb1b45a6dd8cb2778c0a1fa419b35
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com)
5 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
6 at Cygnus Support.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "tm_p.h"
47 #include "debug.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
51 #include "tree-gimple.h"
52 #include "langhooks.h"
54 /* Processor costs */
55 static const
56 struct processor_costs cypress_costs = {
57 COSTS_N_INSNS (2), /* int load */
58 COSTS_N_INSNS (2), /* int signed load */
59 COSTS_N_INSNS (2), /* int zeroed load */
60 COSTS_N_INSNS (2), /* float load */
61 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
62 COSTS_N_INSNS (5), /* fadd, fsub */
63 COSTS_N_INSNS (1), /* fcmp */
64 COSTS_N_INSNS (1), /* fmov, fmovr */
65 COSTS_N_INSNS (7), /* fmul */
66 COSTS_N_INSNS (37), /* fdivs */
67 COSTS_N_INSNS (37), /* fdivd */
68 COSTS_N_INSNS (63), /* fsqrts */
69 COSTS_N_INSNS (63), /* fsqrtd */
70 COSTS_N_INSNS (1), /* imul */
71 COSTS_N_INSNS (1), /* imulX */
72 0, /* imul bit factor */
73 COSTS_N_INSNS (1), /* idiv */
74 COSTS_N_INSNS (1), /* idivX */
75 COSTS_N_INSNS (1), /* movcc/movr */
76 0, /* shift penalty */
79 static const
80 struct processor_costs supersparc_costs = {
81 COSTS_N_INSNS (1), /* int load */
82 COSTS_N_INSNS (1), /* int signed load */
83 COSTS_N_INSNS (1), /* int zeroed load */
84 COSTS_N_INSNS (0), /* float load */
85 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
86 COSTS_N_INSNS (3), /* fadd, fsub */
87 COSTS_N_INSNS (3), /* fcmp */
88 COSTS_N_INSNS (1), /* fmov, fmovr */
89 COSTS_N_INSNS (3), /* fmul */
90 COSTS_N_INSNS (6), /* fdivs */
91 COSTS_N_INSNS (9), /* fdivd */
92 COSTS_N_INSNS (12), /* fsqrts */
93 COSTS_N_INSNS (12), /* fsqrtd */
94 COSTS_N_INSNS (4), /* imul */
95 COSTS_N_INSNS (4), /* imulX */
96 0, /* imul bit factor */
97 COSTS_N_INSNS (4), /* idiv */
98 COSTS_N_INSNS (4), /* idivX */
99 COSTS_N_INSNS (1), /* movcc/movr */
100 1, /* shift penalty */
103 static const
104 struct processor_costs hypersparc_costs = {
105 COSTS_N_INSNS (1), /* int load */
106 COSTS_N_INSNS (1), /* int signed load */
107 COSTS_N_INSNS (1), /* int zeroed load */
108 COSTS_N_INSNS (1), /* float load */
109 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
110 COSTS_N_INSNS (1), /* fadd, fsub */
111 COSTS_N_INSNS (1), /* fcmp */
112 COSTS_N_INSNS (1), /* fmov, fmovr */
113 COSTS_N_INSNS (1), /* fmul */
114 COSTS_N_INSNS (8), /* fdivs */
115 COSTS_N_INSNS (12), /* fdivd */
116 COSTS_N_INSNS (17), /* fsqrts */
117 COSTS_N_INSNS (17), /* fsqrtd */
118 COSTS_N_INSNS (17), /* imul */
119 COSTS_N_INSNS (17), /* imulX */
120 0, /* imul bit factor */
121 COSTS_N_INSNS (17), /* idiv */
122 COSTS_N_INSNS (17), /* idivX */
123 COSTS_N_INSNS (1), /* movcc/movr */
124 0, /* shift penalty */
127 static const
128 struct processor_costs sparclet_costs = {
129 COSTS_N_INSNS (3), /* int load */
130 COSTS_N_INSNS (3), /* int signed load */
131 COSTS_N_INSNS (1), /* int zeroed load */
132 COSTS_N_INSNS (1), /* float load */
133 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
134 COSTS_N_INSNS (1), /* fadd, fsub */
135 COSTS_N_INSNS (1), /* fcmp */
136 COSTS_N_INSNS (1), /* fmov, fmovr */
137 COSTS_N_INSNS (1), /* fmul */
138 COSTS_N_INSNS (1), /* fdivs */
139 COSTS_N_INSNS (1), /* fdivd */
140 COSTS_N_INSNS (1), /* fsqrts */
141 COSTS_N_INSNS (1), /* fsqrtd */
142 COSTS_N_INSNS (5), /* imul */
143 COSTS_N_INSNS (5), /* imulX */
144 0, /* imul bit factor */
145 COSTS_N_INSNS (5), /* idiv */
146 COSTS_N_INSNS (5), /* idivX */
147 COSTS_N_INSNS (1), /* movcc/movr */
148 0, /* shift penalty */
151 static const
152 struct processor_costs ultrasparc_costs = {
153 COSTS_N_INSNS (2), /* int load */
154 COSTS_N_INSNS (3), /* int signed load */
155 COSTS_N_INSNS (2), /* int zeroed load */
156 COSTS_N_INSNS (2), /* float load */
157 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
158 COSTS_N_INSNS (4), /* fadd, fsub */
159 COSTS_N_INSNS (1), /* fcmp */
160 COSTS_N_INSNS (2), /* fmov, fmovr */
161 COSTS_N_INSNS (4), /* fmul */
162 COSTS_N_INSNS (13), /* fdivs */
163 COSTS_N_INSNS (23), /* fdivd */
164 COSTS_N_INSNS (13), /* fsqrts */
165 COSTS_N_INSNS (23), /* fsqrtd */
166 COSTS_N_INSNS (4), /* imul */
167 COSTS_N_INSNS (4), /* imulX */
168 2, /* imul bit factor */
169 COSTS_N_INSNS (37), /* idiv */
170 COSTS_N_INSNS (68), /* idivX */
171 COSTS_N_INSNS (2), /* movcc/movr */
172 2, /* shift penalty */
175 static const
176 struct processor_costs ultrasparc3_costs = {
177 COSTS_N_INSNS (2), /* int load */
178 COSTS_N_INSNS (3), /* int signed load */
179 COSTS_N_INSNS (3), /* int zeroed load */
180 COSTS_N_INSNS (2), /* float load */
181 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
182 COSTS_N_INSNS (4), /* fadd, fsub */
183 COSTS_N_INSNS (5), /* fcmp */
184 COSTS_N_INSNS (3), /* fmov, fmovr */
185 COSTS_N_INSNS (4), /* fmul */
186 COSTS_N_INSNS (17), /* fdivs */
187 COSTS_N_INSNS (20), /* fdivd */
188 COSTS_N_INSNS (20), /* fsqrts */
189 COSTS_N_INSNS (29), /* fsqrtd */
190 COSTS_N_INSNS (6), /* imul */
191 COSTS_N_INSNS (6), /* imulX */
192 0, /* imul bit factor */
193 COSTS_N_INSNS (40), /* idiv */
194 COSTS_N_INSNS (71), /* idivX */
195 COSTS_N_INSNS (2), /* movcc/movr */
196 0, /* shift penalty */
199 const struct processor_costs *sparc_costs = &cypress_costs;
201 #ifdef HAVE_AS_RELAX_OPTION
202 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
203 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
204 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
205 somebody does not branch between the sethi and jmp. */
206 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
207 #else
208 #define LEAF_SIBCALL_SLOT_RESERVED_P \
209 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
210 #endif
212 /* Global variables for machine-dependent things. */
214 /* Size of frame. Need to know this to emit return insns from leaf procedures.
215 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
216 reload pass. This is important as the value is later used for scheduling
217 (to see what can go in a delay slot).
218 APPARENT_FSIZE is the size of the stack less the register save area and less
219 the outgoing argument area. It is used when saving call preserved regs. */
220 static HOST_WIDE_INT apparent_fsize;
221 static HOST_WIDE_INT actual_fsize;
223 /* Number of live general or floating point registers needed to be
224 saved (as 4-byte quantities). */
225 static int num_gfregs;
227 /* The alias set for prologue/epilogue register save/restore. */
228 static GTY(()) int sparc_sr_alias_set;
230 /* The alias set for the structure return value. */
231 static GTY(()) int struct_value_alias_set;
233 /* Save the operands last given to a compare for use when we
234 generate a scc or bcc insn. */
235 rtx sparc_compare_op0, sparc_compare_op1;
237 /* Vector to say how input registers are mapped to output registers.
238 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
239 eliminate it. You must use -fomit-frame-pointer to get that. */
240 char leaf_reg_remap[] =
241 { 0, 1, 2, 3, 4, 5, 6, 7,
242 -1, -1, -1, -1, -1, -1, 14, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 8, 9, 10, 11, 12, 13, -1, 15,
246 32, 33, 34, 35, 36, 37, 38, 39,
247 40, 41, 42, 43, 44, 45, 46, 47,
248 48, 49, 50, 51, 52, 53, 54, 55,
249 56, 57, 58, 59, 60, 61, 62, 63,
250 64, 65, 66, 67, 68, 69, 70, 71,
251 72, 73, 74, 75, 76, 77, 78, 79,
252 80, 81, 82, 83, 84, 85, 86, 87,
253 88, 89, 90, 91, 92, 93, 94, 95,
254 96, 97, 98, 99, 100};
256 /* Vector, indexed by hard register number, which contains 1
257 for a register that is allowable in a candidate for leaf
258 function treatment. */
259 char sparc_leaf_regs[] =
260 { 1, 1, 1, 1, 1, 1, 1, 1,
261 0, 0, 0, 0, 0, 0, 1, 0,
262 0, 0, 0, 0, 0, 0, 0, 0,
263 1, 1, 1, 1, 1, 1, 0, 1,
264 1, 1, 1, 1, 1, 1, 1, 1,
265 1, 1, 1, 1, 1, 1, 1, 1,
266 1, 1, 1, 1, 1, 1, 1, 1,
267 1, 1, 1, 1, 1, 1, 1, 1,
268 1, 1, 1, 1, 1, 1, 1, 1,
269 1, 1, 1, 1, 1, 1, 1, 1,
270 1, 1, 1, 1, 1, 1, 1, 1,
271 1, 1, 1, 1, 1, 1, 1, 1,
272 1, 1, 1, 1, 1};
274 struct machine_function GTY(())
276 /* Some local-dynamic TLS symbol name. */
277 const char *some_ld_name;
279 /* True if the current function is leaf and uses only leaf regs,
280 so that the SPARC leaf function optimization can be applied.
281 Private version of current_function_uses_only_leaf_regs, see
282 sparc_expand_prologue for the rationale. */
283 int leaf_function_p;
285 /* True if the data calculated by sparc_expand_prologue are valid. */
286 bool prologue_data_valid_p;
289 #define sparc_leaf_function_p cfun->machine->leaf_function_p
290 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
292 /* Register we pretend to think the frame pointer is allocated to.
293 Normally, this is %fp, but if we are in a leaf procedure, this
294 is %sp+"something". We record "something" separately as it may
295 be too big for reg+constant addressing. */
296 static rtx frame_base_reg;
297 static HOST_WIDE_INT frame_base_offset;
299 /* 1 if the next opcode is to be specially indented. */
300 int sparc_indent_opcode = 0;
302 static bool sparc_handle_option (size_t, const char *, int);
303 static void sparc_init_modes (void);
304 static void scan_record_type (tree, int *, int *, int *);
305 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
306 tree, int, int, int *, int *);
308 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
309 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
311 static void sparc_output_addr_vec (rtx);
312 static void sparc_output_addr_diff_vec (rtx);
313 static void sparc_output_deferred_case_vectors (void);
314 static rtx sparc_builtin_saveregs (void);
315 static int epilogue_renumber (rtx *, int);
316 static bool sparc_assemble_integer (rtx, unsigned int, int);
317 static int set_extends (rtx);
318 static void emit_pic_helper (void);
319 static void load_pic_register (bool);
320 static int save_or_restore_regs (int, int, rtx, int, int);
321 static void emit_save_or_restore_regs (int);
322 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
323 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
324 #ifdef OBJECT_FORMAT_ELF
325 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
326 #endif
328 static int sparc_adjust_cost (rtx, rtx, rtx, int);
329 static int sparc_issue_rate (void);
330 static void sparc_sched_init (FILE *, int, int);
331 static int sparc_use_sched_lookahead (void);
333 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
334 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
335 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
336 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
337 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
339 static bool sparc_function_ok_for_sibcall (tree, tree);
340 static void sparc_init_libfuncs (void);
341 static void sparc_init_builtins (void);
342 static void sparc_vis_init_builtins (void);
343 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
344 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
345 HOST_WIDE_INT, tree);
346 static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
347 HOST_WIDE_INT, tree);
348 static struct machine_function * sparc_init_machine_status (void);
349 static bool sparc_cannot_force_const_mem (rtx);
350 static rtx sparc_tls_get_addr (void);
351 static rtx sparc_tls_got (void);
352 static const char *get_some_local_dynamic_name (void);
353 static int get_some_local_dynamic_name_1 (rtx *, void *);
354 static bool sparc_rtx_costs (rtx, int, int, int *);
355 static bool sparc_promote_prototypes (tree);
356 static rtx sparc_struct_value_rtx (tree, int);
357 static bool sparc_return_in_memory (tree, tree);
358 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
359 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
360 static bool sparc_vector_mode_supported_p (enum machine_mode);
361 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
362 enum machine_mode, tree, bool);
363 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
364 enum machine_mode, tree, bool);
365 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
366 static void sparc_file_end (void);
367 #ifdef SUBTARGET_ATTRIBUTE_TABLE
368 const struct attribute_spec sparc_attribute_table[];
369 #endif
371 /* Option handling. */
373 /* Parsed value. */
374 enum cmodel sparc_cmodel;
376 char sparc_hard_reg_printed[8];
378 struct sparc_cpu_select sparc_select[] =
380 /* switch name, tune arch */
381 { (char *)0, "default", 1, 1 },
382 { (char *)0, "-mcpu=", 1, 1 },
383 { (char *)0, "-mtune=", 1, 0 },
384 { 0, 0, 0, 0 }
387 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
388 enum processor_type sparc_cpu;
390 /* Whether\fan FPU option was specified. */
391 static bool fpu_option_set = false;
393 /* Initialize the GCC target structure. */
395 /* The sparc default is to use .half rather than .short for aligned
396 HI objects. Use .word instead of .long on non-ELF systems. */
397 #undef TARGET_ASM_ALIGNED_HI_OP
398 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
399 #ifndef OBJECT_FORMAT_ELF
400 #undef TARGET_ASM_ALIGNED_SI_OP
401 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
402 #endif
404 #undef TARGET_ASM_UNALIGNED_HI_OP
405 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
406 #undef TARGET_ASM_UNALIGNED_SI_OP
407 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
408 #undef TARGET_ASM_UNALIGNED_DI_OP
409 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
411 /* The target hook has to handle DI-mode values. */
412 #undef TARGET_ASM_INTEGER
413 #define TARGET_ASM_INTEGER sparc_assemble_integer
415 #undef TARGET_ASM_FUNCTION_PROLOGUE
416 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
417 #undef TARGET_ASM_FUNCTION_EPILOGUE
418 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
420 #undef TARGET_SCHED_ADJUST_COST
421 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
422 #undef TARGET_SCHED_ISSUE_RATE
423 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
424 #undef TARGET_SCHED_INIT
425 #define TARGET_SCHED_INIT sparc_sched_init
426 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
427 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
429 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
430 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
432 #undef TARGET_INIT_LIBFUNCS
433 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
434 #undef TARGET_INIT_BUILTINS
435 #define TARGET_INIT_BUILTINS sparc_init_builtins
437 #undef TARGET_EXPAND_BUILTIN
438 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
440 #ifdef HAVE_AS_TLS
441 #undef TARGET_HAVE_TLS
442 #define TARGET_HAVE_TLS true
443 #endif
444 #undef TARGET_CANNOT_FORCE_CONST_MEM
445 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
447 #undef TARGET_ASM_OUTPUT_MI_THUNK
448 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
449 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
450 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
452 #undef TARGET_RTX_COSTS
453 #define TARGET_RTX_COSTS sparc_rtx_costs
454 #undef TARGET_ADDRESS_COST
455 #define TARGET_ADDRESS_COST hook_int_rtx_0
457 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
458 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
459 test for this value. */
460 #undef TARGET_PROMOTE_FUNCTION_ARGS
461 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
463 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
464 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
465 test for this value. */
466 #undef TARGET_PROMOTE_FUNCTION_RETURN
467 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
469 #undef TARGET_PROMOTE_PROTOTYPES
470 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
472 #undef TARGET_STRUCT_VALUE_RTX
473 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
474 #undef TARGET_RETURN_IN_MEMORY
475 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
476 #undef TARGET_MUST_PASS_IN_STACK
477 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
478 #undef TARGET_PASS_BY_REFERENCE
479 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
480 #undef TARGET_ARG_PARTIAL_BYTES
481 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
483 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
484 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
485 #undef TARGET_STRICT_ARGUMENT_NAMING
486 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
488 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
489 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
491 #undef TARGET_VECTOR_MODE_SUPPORTED_P
492 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
494 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
495 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
497 #ifdef SUBTARGET_INSERT_ATTRIBUTES
498 #undef TARGET_INSERT_ATTRIBUTES
499 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
500 #endif
502 #ifdef SUBTARGET_ATTRIBUTE_TABLE
503 #undef TARGET_ATTRIBUTE_TABLE
504 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
505 #endif
507 #undef TARGET_RELAXED_ORDERING
508 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
510 #undef TARGET_DEFAULT_TARGET_FLAGS
511 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
512 #undef TARGET_HANDLE_OPTION
513 #define TARGET_HANDLE_OPTION sparc_handle_option
515 #undef TARGET_ASM_FILE_END
516 #define TARGET_ASM_FILE_END sparc_file_end
518 struct gcc_target targetm = TARGET_INITIALIZER;
520 /* Implement TARGET_HANDLE_OPTION. */
522 static bool
523 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
525 switch (code)
527 case OPT_mfpu:
528 case OPT_mhard_float:
529 case OPT_msoft_float:
530 fpu_option_set = true;
531 break;
533 case OPT_mcpu_:
534 sparc_select[1].string = arg;
535 break;
537 case OPT_mtune_:
538 sparc_select[2].string = arg;
539 break;
542 return true;
545 /* Validate and override various options, and do some machine dependent
546 initialization. */
548 void
549 sparc_override_options (void)
551 static struct code_model {
552 const char *const name;
553 const int value;
554 } const cmodels[] = {
555 { "32", CM_32 },
556 { "medlow", CM_MEDLOW },
557 { "medmid", CM_MEDMID },
558 { "medany", CM_MEDANY },
559 { "embmedany", CM_EMBMEDANY },
560 { 0, 0 }
562 const struct code_model *cmodel;
563 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
564 static struct cpu_default {
565 const int cpu;
566 const char *const name;
567 } const cpu_default[] = {
568 /* There must be one entry here for each TARGET_CPU value. */
569 { TARGET_CPU_sparc, "cypress" },
570 { TARGET_CPU_sparclet, "tsc701" },
571 { TARGET_CPU_sparclite, "f930" },
572 { TARGET_CPU_v8, "v8" },
573 { TARGET_CPU_hypersparc, "hypersparc" },
574 { TARGET_CPU_sparclite86x, "sparclite86x" },
575 { TARGET_CPU_supersparc, "supersparc" },
576 { TARGET_CPU_v9, "v9" },
577 { TARGET_CPU_ultrasparc, "ultrasparc" },
578 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
579 { 0, 0 }
581 const struct cpu_default *def;
582 /* Table of values for -m{cpu,tune}=. */
583 static struct cpu_table {
584 const char *const name;
585 const enum processor_type processor;
586 const int disable;
587 const int enable;
588 } const cpu_table[] = {
589 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
590 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
591 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
592 /* TI TMS390Z55 supersparc */
593 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
594 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
595 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
596 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
597 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
598 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
599 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
600 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
601 MASK_SPARCLITE },
602 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
603 /* TEMIC sparclet */
604 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
605 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
606 /* TI ultrasparc I, II, IIi */
607 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
608 /* Although insns using %y are deprecated, it is a clear win on current
609 ultrasparcs. */
610 |MASK_DEPRECATED_V8_INSNS},
611 /* TI ultrasparc III */
612 /* ??? Check if %y issue still holds true in ultra3. */
613 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
614 { 0, 0, 0, 0 }
616 const struct cpu_table *cpu;
617 const struct sparc_cpu_select *sel;
618 int fpu;
620 #ifndef SPARC_BI_ARCH
621 /* Check for unsupported architecture size. */
622 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
623 error ("%s is not supported by this configuration",
624 DEFAULT_ARCH32_P ? "-m64" : "-m32");
625 #endif
627 /* We force all 64bit archs to use 128 bit long double */
628 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
630 error ("-mlong-double-64 not allowed with -m64");
631 target_flags |= MASK_LONG_DOUBLE_128;
634 /* Code model selection. */
635 sparc_cmodel = SPARC_DEFAULT_CMODEL;
637 #ifdef SPARC_BI_ARCH
638 if (TARGET_ARCH32)
639 sparc_cmodel = CM_32;
640 #endif
642 if (sparc_cmodel_string != NULL)
644 if (TARGET_ARCH64)
646 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
647 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
648 break;
649 if (cmodel->name == NULL)
650 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
651 else
652 sparc_cmodel = cmodel->value;
654 else
655 error ("-mcmodel= is not supported on 32 bit systems");
658 fpu = TARGET_FPU; /* save current -mfpu status */
660 /* Set the default CPU. */
661 for (def = &cpu_default[0]; def->name; ++def)
662 if (def->cpu == TARGET_CPU_DEFAULT)
663 break;
664 gcc_assert (def->name);
665 sparc_select[0].string = def->name;
667 for (sel = &sparc_select[0]; sel->name; ++sel)
669 if (sel->string)
671 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
672 if (! strcmp (sel->string, cpu->name))
674 if (sel->set_tune_p)
675 sparc_cpu = cpu->processor;
677 if (sel->set_arch_p)
679 target_flags &= ~cpu->disable;
680 target_flags |= cpu->enable;
682 break;
685 if (! cpu->name)
686 error ("bad value (%s) for %s switch", sel->string, sel->name);
690 /* If -mfpu or -mno-fpu was explicitly used, don't override with
691 the processor default. */
692 if (fpu_option_set)
693 target_flags = (target_flags & ~MASK_FPU) | fpu;
695 /* Don't allow -mvis if FPU is disabled. */
696 if (! TARGET_FPU)
697 target_flags &= ~MASK_VIS;
699 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
700 are available.
701 -m64 also implies v9. */
702 if (TARGET_VIS || TARGET_ARCH64)
704 target_flags |= MASK_V9;
705 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
708 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
709 if (TARGET_V9 && TARGET_ARCH32)
710 target_flags |= MASK_DEPRECATED_V8_INSNS;
712 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
713 if (! TARGET_V9 || TARGET_ARCH64)
714 target_flags &= ~MASK_V8PLUS;
716 /* Don't use stack biasing in 32 bit mode. */
717 if (TARGET_ARCH32)
718 target_flags &= ~MASK_STACK_BIAS;
720 /* Supply a default value for align_functions. */
721 if (align_functions == 0
722 && (sparc_cpu == PROCESSOR_ULTRASPARC
723 || sparc_cpu == PROCESSOR_ULTRASPARC3))
724 align_functions = 32;
726 /* Validate PCC_STRUCT_RETURN. */
727 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
728 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
730 /* Only use .uaxword when compiling for a 64-bit target. */
731 if (!TARGET_ARCH64)
732 targetm.asm_out.unaligned_op.di = NULL;
734 /* Do various machine dependent initializations. */
735 sparc_init_modes ();
737 /* Acquire unique alias sets for our private stuff. */
738 sparc_sr_alias_set = new_alias_set ();
739 struct_value_alias_set = new_alias_set ();
741 /* Set up function hooks. */
742 init_machine_status = sparc_init_machine_status;
744 switch (sparc_cpu)
746 case PROCESSOR_V7:
747 case PROCESSOR_CYPRESS:
748 sparc_costs = &cypress_costs;
749 break;
750 case PROCESSOR_V8:
751 case PROCESSOR_SPARCLITE:
752 case PROCESSOR_SUPERSPARC:
753 sparc_costs = &supersparc_costs;
754 break;
755 case PROCESSOR_F930:
756 case PROCESSOR_F934:
757 case PROCESSOR_HYPERSPARC:
758 case PROCESSOR_SPARCLITE86X:
759 sparc_costs = &hypersparc_costs;
760 break;
761 case PROCESSOR_SPARCLET:
762 case PROCESSOR_TSC701:
763 sparc_costs = &sparclet_costs;
764 break;
765 case PROCESSOR_V9:
766 case PROCESSOR_ULTRASPARC:
767 sparc_costs = &ultrasparc_costs;
768 break;
769 case PROCESSOR_ULTRASPARC3:
770 sparc_costs = &ultrasparc3_costs;
771 break;
775 #ifdef SUBTARGET_ATTRIBUTE_TABLE
776 /* Table of valid machine attributes. */
777 const struct attribute_spec sparc_attribute_table[] =
779 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
780 SUBTARGET_ATTRIBUTE_TABLE,
781 { NULL, 0, 0, false, false, false, NULL }
783 #endif
785 /* Miscellaneous utilities. */
787 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
788 or branch on register contents instructions. */
791 v9_regcmp_p (enum rtx_code code)
793 return (code == EQ || code == NE || code == GE || code == LT
794 || code == LE || code == GT);
797 /* Nonzero if OP is a floating point constant which can
798 be loaded into an integer register using a single
799 sethi instruction. */
802 fp_sethi_p (rtx op)
804 if (GET_CODE (op) == CONST_DOUBLE)
806 REAL_VALUE_TYPE r;
807 long i;
809 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
810 REAL_VALUE_TO_TARGET_SINGLE (r, i);
811 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
814 return 0;
817 /* Nonzero if OP is a floating point constant which can
818 be loaded into an integer register using a single
819 mov instruction. */
822 fp_mov_p (rtx op)
824 if (GET_CODE (op) == CONST_DOUBLE)
826 REAL_VALUE_TYPE r;
827 long i;
829 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
830 REAL_VALUE_TO_TARGET_SINGLE (r, i);
831 return SPARC_SIMM13_P (i);
834 return 0;
837 /* Nonzero if OP is a floating point constant which can
838 be loaded into an integer register using a high/losum
839 instruction sequence. */
842 fp_high_losum_p (rtx op)
844 /* The constraints calling this should only be in
845 SFmode move insns, so any constant which cannot
846 be moved using a single insn will do. */
847 if (GET_CODE (op) == CONST_DOUBLE)
849 REAL_VALUE_TYPE r;
850 long i;
852 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
853 REAL_VALUE_TO_TARGET_SINGLE (r, i);
854 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
857 return 0;
860 /* Expand a move instruction. Return true if all work is done. */
862 bool
863 sparc_expand_move (enum machine_mode mode, rtx *operands)
865 /* Handle sets of MEM first. */
866 if (GET_CODE (operands[0]) == MEM)
868 /* 0 is a register (or a pair of registers) on SPARC. */
869 if (register_or_zero_operand (operands[1], mode))
870 return false;
872 if (!reload_in_progress)
874 operands[0] = validize_mem (operands[0]);
875 operands[1] = force_reg (mode, operands[1]);
879 /* Fixup TLS cases. */
880 if (TARGET_HAVE_TLS
881 && CONSTANT_P (operands[1])
882 && GET_CODE (operands[1]) != HIGH
883 && sparc_tls_referenced_p (operands [1]))
885 rtx sym = operands[1];
886 rtx addend = NULL;
888 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
890 addend = XEXP (XEXP (sym, 0), 1);
891 sym = XEXP (XEXP (sym, 0), 0);
894 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
896 sym = legitimize_tls_address (sym);
897 if (addend)
899 sym = gen_rtx_PLUS (mode, sym, addend);
900 sym = force_operand (sym, operands[0]);
902 operands[1] = sym;
905 /* Fixup PIC cases. */
906 if (flag_pic && CONSTANT_P (operands[1]))
908 if (pic_address_needs_scratch (operands[1]))
909 operands[1] = legitimize_pic_address (operands[1], mode, 0);
911 if (GET_CODE (operands[1]) == LABEL_REF && mode == SImode)
913 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
914 return true;
917 if (GET_CODE (operands[1]) == LABEL_REF && mode == DImode)
919 gcc_assert (TARGET_ARCH64);
920 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
921 return true;
924 if (symbolic_operand (operands[1], mode))
926 operands[1] = legitimize_pic_address (operands[1],
927 mode,
928 (reload_in_progress ?
929 operands[0] :
930 NULL_RTX));
931 return false;
935 /* If we are trying to toss an integer constant into FP registers,
936 or loading a FP or vector constant, force it into memory. */
937 if (CONSTANT_P (operands[1])
938 && REG_P (operands[0])
939 && (SPARC_FP_REG_P (REGNO (operands[0]))
940 || SCALAR_FLOAT_MODE_P (mode)
941 || VECTOR_MODE_P (mode)))
943 /* emit_group_store will send such bogosity to us when it is
944 not storing directly into memory. So fix this up to avoid
945 crashes in output_constant_pool. */
946 if (operands [1] == const0_rtx)
947 operands[1] = CONST0_RTX (mode);
949 /* We can clear FP registers if TARGET_VIS, and always other regs. */
950 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
951 && const_zero_operand (operands[1], mode))
952 return false;
954 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
955 /* We are able to build any SF constant in integer registers
956 with at most 2 instructions. */
957 && (mode == SFmode
958 /* And any DF constant in integer registers. */
959 || (mode == DFmode
960 && (reload_completed || reload_in_progress))))
961 return false;
963 operands[1] = force_const_mem (mode, operands[1]);
964 if (!reload_in_progress)
965 operands[1] = validize_mem (operands[1]);
966 return false;
969 /* Accept non-constants and valid constants unmodified. */
970 if (!CONSTANT_P (operands[1])
971 || GET_CODE (operands[1]) == HIGH
972 || input_operand (operands[1], mode))
973 return false;
975 switch (mode)
977 case QImode:
978 /* All QImode constants require only one insn, so proceed. */
979 break;
981 case HImode:
982 case SImode:
983 sparc_emit_set_const32 (operands[0], operands[1]);
984 return true;
986 case DImode:
987 /* input_operand should have filtered out 32-bit mode. */
988 sparc_emit_set_const64 (operands[0], operands[1]);
989 return true;
991 default:
992 gcc_unreachable ();
995 return false;
998 /* Load OP1, a 32-bit constant, into OP0, a register.
999 We know it can't be done in one insn when we get
1000 here, the move expander guarantees this. */
1002 void
1003 sparc_emit_set_const32 (rtx op0, rtx op1)
1005 enum machine_mode mode = GET_MODE (op0);
1006 rtx temp;
1008 if (reload_in_progress || reload_completed)
1009 temp = op0;
1010 else
1011 temp = gen_reg_rtx (mode);
1013 if (GET_CODE (op1) == CONST_INT)
1015 gcc_assert (!small_int_operand (op1, mode)
1016 && !const_high_operand (op1, mode));
1018 /* Emit them as real moves instead of a HIGH/LO_SUM,
1019 this way CSE can see everything and reuse intermediate
1020 values if it wants. */
1021 emit_insn (gen_rtx_SET (VOIDmode, temp,
1022 GEN_INT (INTVAL (op1)
1023 & ~(HOST_WIDE_INT)0x3ff)));
1025 emit_insn (gen_rtx_SET (VOIDmode,
1026 op0,
1027 gen_rtx_IOR (mode, temp,
1028 GEN_INT (INTVAL (op1) & 0x3ff))));
1030 else
1032 /* A symbol, emit in the traditional way. */
1033 emit_insn (gen_rtx_SET (VOIDmode, temp,
1034 gen_rtx_HIGH (mode, op1)));
1035 emit_insn (gen_rtx_SET (VOIDmode,
1036 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1040 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1041 If TEMP is nonzero, we are forbidden to use any other scratch
1042 registers. Otherwise, we are allowed to generate them as needed.
1044 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1045 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1047 void
1048 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1050 rtx temp1, temp2, temp3, temp4, temp5;
1051 rtx ti_temp = 0;
1053 if (temp && GET_MODE (temp) == TImode)
1055 ti_temp = temp;
1056 temp = gen_rtx_REG (DImode, REGNO (temp));
1059 /* SPARC-V9 code-model support. */
1060 switch (sparc_cmodel)
1062 case CM_MEDLOW:
1063 /* The range spanned by all instructions in the object is less
1064 than 2^31 bytes (2GB) and the distance from any instruction
1065 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1066 than 2^31 bytes (2GB).
1068 The executable must be in the low 4TB of the virtual address
1069 space.
1071 sethi %hi(symbol), %temp1
1072 or %temp1, %lo(symbol), %reg */
1073 if (temp)
1074 temp1 = temp; /* op0 is allowed. */
1075 else
1076 temp1 = gen_reg_rtx (DImode);
1078 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1079 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1080 break;
1082 case CM_MEDMID:
1083 /* The range spanned by all instructions in the object is less
1084 than 2^31 bytes (2GB) and the distance from any instruction
1085 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1086 than 2^31 bytes (2GB).
1088 The executable must be in the low 16TB of the virtual address
1089 space.
1091 sethi %h44(symbol), %temp1
1092 or %temp1, %m44(symbol), %temp2
1093 sllx %temp2, 12, %temp3
1094 or %temp3, %l44(symbol), %reg */
1095 if (temp)
1097 temp1 = op0;
1098 temp2 = op0;
1099 temp3 = temp; /* op0 is allowed. */
1101 else
1103 temp1 = gen_reg_rtx (DImode);
1104 temp2 = gen_reg_rtx (DImode);
1105 temp3 = gen_reg_rtx (DImode);
1108 emit_insn (gen_seth44 (temp1, op1));
1109 emit_insn (gen_setm44 (temp2, temp1, op1));
1110 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1111 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1112 emit_insn (gen_setl44 (op0, temp3, op1));
1113 break;
1115 case CM_MEDANY:
1116 /* The range spanned by all instructions in the object is less
1117 than 2^31 bytes (2GB) and the distance from any instruction
1118 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1119 than 2^31 bytes (2GB).
1121 The executable can be placed anywhere in the virtual address
1122 space.
1124 sethi %hh(symbol), %temp1
1125 sethi %lm(symbol), %temp2
1126 or %temp1, %hm(symbol), %temp3
1127 sllx %temp3, 32, %temp4
1128 or %temp4, %temp2, %temp5
1129 or %temp5, %lo(symbol), %reg */
1130 if (temp)
1132 /* It is possible that one of the registers we got for operands[2]
1133 might coincide with that of operands[0] (which is why we made
1134 it TImode). Pick the other one to use as our scratch. */
1135 if (rtx_equal_p (temp, op0))
1137 gcc_assert (ti_temp);
1138 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1140 temp1 = op0;
1141 temp2 = temp; /* op0 is _not_ allowed, see above. */
1142 temp3 = op0;
1143 temp4 = op0;
1144 temp5 = op0;
1146 else
1148 temp1 = gen_reg_rtx (DImode);
1149 temp2 = gen_reg_rtx (DImode);
1150 temp3 = gen_reg_rtx (DImode);
1151 temp4 = gen_reg_rtx (DImode);
1152 temp5 = gen_reg_rtx (DImode);
1155 emit_insn (gen_sethh (temp1, op1));
1156 emit_insn (gen_setlm (temp2, op1));
1157 emit_insn (gen_sethm (temp3, temp1, op1));
1158 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1159 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1160 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1161 gen_rtx_PLUS (DImode, temp4, temp2)));
1162 emit_insn (gen_setlo (op0, temp5, op1));
1163 break;
1165 case CM_EMBMEDANY:
1166 /* Old old old backwards compatibility kruft here.
1167 Essentially it is MEDLOW with a fixed 64-bit
1168 virtual base added to all data segment addresses.
1169 Text-segment stuff is computed like MEDANY, we can't
1170 reuse the code above because the relocation knobs
1171 look different.
1173 Data segment: sethi %hi(symbol), %temp1
1174 add %temp1, EMBMEDANY_BASE_REG, %temp2
1175 or %temp2, %lo(symbol), %reg */
1176 if (data_segment_operand (op1, GET_MODE (op1)))
1178 if (temp)
1180 temp1 = temp; /* op0 is allowed. */
1181 temp2 = op0;
1183 else
1185 temp1 = gen_reg_rtx (DImode);
1186 temp2 = gen_reg_rtx (DImode);
1189 emit_insn (gen_embmedany_sethi (temp1, op1));
1190 emit_insn (gen_embmedany_brsum (temp2, temp1));
1191 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1194 /* Text segment: sethi %uhi(symbol), %temp1
1195 sethi %hi(symbol), %temp2
1196 or %temp1, %ulo(symbol), %temp3
1197 sllx %temp3, 32, %temp4
1198 or %temp4, %temp2, %temp5
1199 or %temp5, %lo(symbol), %reg */
1200 else
1202 if (temp)
1204 /* It is possible that one of the registers we got for operands[2]
1205 might coincide with that of operands[0] (which is why we made
1206 it TImode). Pick the other one to use as our scratch. */
1207 if (rtx_equal_p (temp, op0))
1209 gcc_assert (ti_temp);
1210 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1212 temp1 = op0;
1213 temp2 = temp; /* op0 is _not_ allowed, see above. */
1214 temp3 = op0;
1215 temp4 = op0;
1216 temp5 = op0;
1218 else
1220 temp1 = gen_reg_rtx (DImode);
1221 temp2 = gen_reg_rtx (DImode);
1222 temp3 = gen_reg_rtx (DImode);
1223 temp4 = gen_reg_rtx (DImode);
1224 temp5 = gen_reg_rtx (DImode);
1227 emit_insn (gen_embmedany_textuhi (temp1, op1));
1228 emit_insn (gen_embmedany_texthi (temp2, op1));
1229 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1230 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1231 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1232 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1233 gen_rtx_PLUS (DImode, temp4, temp2)));
1234 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1236 break;
1238 default:
1239 gcc_unreachable ();
1243 #if HOST_BITS_PER_WIDE_INT == 32
1244 void
1245 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1247 gcc_unreachable ();
1249 #else
1250 /* These avoid problems when cross compiling. If we do not
1251 go through all this hair then the optimizer will see
1252 invalid REG_EQUAL notes or in some cases none at all. */
1253 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1254 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1255 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1256 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1258 /* The optimizer is not to assume anything about exactly
1259 which bits are set for a HIGH, they are unspecified.
1260 Unfortunately this leads to many missed optimizations
1261 during CSE. We mask out the non-HIGH bits, and matches
1262 a plain movdi, to alleviate this problem. */
1263 static rtx
1264 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1266 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1269 static rtx
1270 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1272 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1275 static rtx
1276 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1278 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1281 static rtx
1282 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1284 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1287 /* Worker routines for 64-bit constant formation on arch64.
1288 One of the key things to be doing in these emissions is
1289 to create as many temp REGs as possible. This makes it
1290 possible for half-built constants to be used later when
1291 such values are similar to something required later on.
1292 Without doing this, the optimizer cannot see such
1293 opportunities. */
1295 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1296 unsigned HOST_WIDE_INT, int);
1298 static void
1299 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1300 unsigned HOST_WIDE_INT low_bits, int is_neg)
1302 unsigned HOST_WIDE_INT high_bits;
1304 if (is_neg)
1305 high_bits = (~low_bits) & 0xffffffff;
1306 else
1307 high_bits = low_bits;
1309 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1310 if (!is_neg)
1312 emit_insn (gen_rtx_SET (VOIDmode, op0,
1313 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1315 else
1317 /* If we are XOR'ing with -1, then we should emit a one's complement
1318 instead. This way the combiner will notice logical operations
1319 such as ANDN later on and substitute. */
1320 if ((low_bits & 0x3ff) == 0x3ff)
1322 emit_insn (gen_rtx_SET (VOIDmode, op0,
1323 gen_rtx_NOT (DImode, temp)));
1325 else
1327 emit_insn (gen_rtx_SET (VOIDmode, op0,
1328 gen_safe_XOR64 (temp,
1329 (-(HOST_WIDE_INT)0x400
1330 | (low_bits & 0x3ff)))));
1335 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1336 unsigned HOST_WIDE_INT, int);
1338 static void
1339 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1340 unsigned HOST_WIDE_INT high_bits,
1341 unsigned HOST_WIDE_INT low_immediate,
1342 int shift_count)
1344 rtx temp2 = op0;
1346 if ((high_bits & 0xfffffc00) != 0)
1348 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1349 if ((high_bits & ~0xfffffc00) != 0)
1350 emit_insn (gen_rtx_SET (VOIDmode, op0,
1351 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1352 else
1353 temp2 = temp;
1355 else
1357 emit_insn (gen_safe_SET64 (temp, high_bits));
1358 temp2 = temp;
1361 /* Now shift it up into place. */
1362 emit_insn (gen_rtx_SET (VOIDmode, op0,
1363 gen_rtx_ASHIFT (DImode, temp2,
1364 GEN_INT (shift_count))));
1366 /* If there is a low immediate part piece, finish up by
1367 putting that in as well. */
1368 if (low_immediate != 0)
1369 emit_insn (gen_rtx_SET (VOIDmode, op0,
1370 gen_safe_OR64 (op0, low_immediate)));
1373 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1374 unsigned HOST_WIDE_INT);
1376 /* Full 64-bit constant decomposition. Even though this is the
1377 'worst' case, we still optimize a few things away. */
1378 static void
1379 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1380 unsigned HOST_WIDE_INT high_bits,
1381 unsigned HOST_WIDE_INT low_bits)
1383 rtx sub_temp;
1385 if (reload_in_progress || reload_completed)
1386 sub_temp = op0;
1387 else
1388 sub_temp = gen_reg_rtx (DImode);
1390 if ((high_bits & 0xfffffc00) != 0)
1392 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1393 if ((high_bits & ~0xfffffc00) != 0)
1394 emit_insn (gen_rtx_SET (VOIDmode,
1395 sub_temp,
1396 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1397 else
1398 sub_temp = temp;
1400 else
1402 emit_insn (gen_safe_SET64 (temp, high_bits));
1403 sub_temp = temp;
1406 if (!reload_in_progress && !reload_completed)
1408 rtx temp2 = gen_reg_rtx (DImode);
1409 rtx temp3 = gen_reg_rtx (DImode);
1410 rtx temp4 = gen_reg_rtx (DImode);
1412 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1413 gen_rtx_ASHIFT (DImode, sub_temp,
1414 GEN_INT (32))));
1416 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1417 if ((low_bits & ~0xfffffc00) != 0)
1419 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1420 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1421 emit_insn (gen_rtx_SET (VOIDmode, op0,
1422 gen_rtx_PLUS (DImode, temp4, temp3)));
1424 else
1426 emit_insn (gen_rtx_SET (VOIDmode, op0,
1427 gen_rtx_PLUS (DImode, temp4, temp2)));
1430 else
1432 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1433 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1434 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1435 int to_shift = 12;
1437 /* We are in the middle of reload, so this is really
1438 painful. However we do still make an attempt to
1439 avoid emitting truly stupid code. */
1440 if (low1 != const0_rtx)
1442 emit_insn (gen_rtx_SET (VOIDmode, op0,
1443 gen_rtx_ASHIFT (DImode, sub_temp,
1444 GEN_INT (to_shift))));
1445 emit_insn (gen_rtx_SET (VOIDmode, op0,
1446 gen_rtx_IOR (DImode, op0, low1)));
1447 sub_temp = op0;
1448 to_shift = 12;
1450 else
1452 to_shift += 12;
1454 if (low2 != const0_rtx)
1456 emit_insn (gen_rtx_SET (VOIDmode, op0,
1457 gen_rtx_ASHIFT (DImode, sub_temp,
1458 GEN_INT (to_shift))));
1459 emit_insn (gen_rtx_SET (VOIDmode, op0,
1460 gen_rtx_IOR (DImode, op0, low2)));
1461 sub_temp = op0;
1462 to_shift = 8;
1464 else
1466 to_shift += 8;
1468 emit_insn (gen_rtx_SET (VOIDmode, op0,
1469 gen_rtx_ASHIFT (DImode, sub_temp,
1470 GEN_INT (to_shift))));
1471 if (low3 != const0_rtx)
1472 emit_insn (gen_rtx_SET (VOIDmode, op0,
1473 gen_rtx_IOR (DImode, op0, low3)));
1474 /* phew... */
1478 /* Analyze a 64-bit constant for certain properties. */
1479 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1480 unsigned HOST_WIDE_INT,
1481 int *, int *, int *);
1483 static void
1484 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1485 unsigned HOST_WIDE_INT low_bits,
1486 int *hbsp, int *lbsp, int *abbasp)
1488 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1489 int i;
1491 lowest_bit_set = highest_bit_set = -1;
1492 i = 0;
1495 if ((lowest_bit_set == -1)
1496 && ((low_bits >> i) & 1))
1497 lowest_bit_set = i;
1498 if ((highest_bit_set == -1)
1499 && ((high_bits >> (32 - i - 1)) & 1))
1500 highest_bit_set = (64 - i - 1);
1502 while (++i < 32
1503 && ((highest_bit_set == -1)
1504 || (lowest_bit_set == -1)));
1505 if (i == 32)
1507 i = 0;
1510 if ((lowest_bit_set == -1)
1511 && ((high_bits >> i) & 1))
1512 lowest_bit_set = i + 32;
1513 if ((highest_bit_set == -1)
1514 && ((low_bits >> (32 - i - 1)) & 1))
1515 highest_bit_set = 32 - i - 1;
1517 while (++i < 32
1518 && ((highest_bit_set == -1)
1519 || (lowest_bit_set == -1)));
1521 /* If there are no bits set this should have gone out
1522 as one instruction! */
1523 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1524 all_bits_between_are_set = 1;
1525 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1527 if (i < 32)
1529 if ((low_bits & (1 << i)) != 0)
1530 continue;
1532 else
1534 if ((high_bits & (1 << (i - 32))) != 0)
1535 continue;
1537 all_bits_between_are_set = 0;
1538 break;
1540 *hbsp = highest_bit_set;
1541 *lbsp = lowest_bit_set;
1542 *abbasp = all_bits_between_are_set;
1545 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1547 static int
1548 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1549 unsigned HOST_WIDE_INT low_bits)
1551 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1553 if (high_bits == 0
1554 || high_bits == 0xffffffff)
1555 return 1;
1557 analyze_64bit_constant (high_bits, low_bits,
1558 &highest_bit_set, &lowest_bit_set,
1559 &all_bits_between_are_set);
1561 if ((highest_bit_set == 63
1562 || lowest_bit_set == 0)
1563 && all_bits_between_are_set != 0)
1564 return 1;
1566 if ((highest_bit_set - lowest_bit_set) < 21)
1567 return 1;
1569 return 0;
1572 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1573 unsigned HOST_WIDE_INT,
1574 int, int);
1576 static unsigned HOST_WIDE_INT
1577 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1578 unsigned HOST_WIDE_INT low_bits,
1579 int lowest_bit_set, int shift)
1581 HOST_WIDE_INT hi, lo;
1583 if (lowest_bit_set < 32)
1585 lo = (low_bits >> lowest_bit_set) << shift;
1586 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1588 else
1590 lo = 0;
1591 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1593 gcc_assert (! (hi & lo));
1594 return (hi | lo);
1597 /* Here we are sure to be arch64 and this is an integer constant
1598 being loaded into a register. Emit the most efficient
1599 insn sequence possible. Detection of all the 1-insn cases
1600 has been done already. */
1601 void
1602 sparc_emit_set_const64 (rtx op0, rtx op1)
1604 unsigned HOST_WIDE_INT high_bits, low_bits;
1605 int lowest_bit_set, highest_bit_set;
1606 int all_bits_between_are_set;
1607 rtx temp = 0;
1609 /* Sanity check that we know what we are working with. */
1610 gcc_assert (TARGET_ARCH64
1611 && (GET_CODE (op0) == SUBREG
1612 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1614 if (reload_in_progress || reload_completed)
1615 temp = op0;
1617 if (GET_CODE (op1) != CONST_INT)
1619 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1620 return;
1623 if (! temp)
1624 temp = gen_reg_rtx (DImode);
1626 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1627 low_bits = (INTVAL (op1) & 0xffffffff);
1629 /* low_bits bits 0 --> 31
1630 high_bits bits 32 --> 63 */
1632 analyze_64bit_constant (high_bits, low_bits,
1633 &highest_bit_set, &lowest_bit_set,
1634 &all_bits_between_are_set);
1636 /* First try for a 2-insn sequence. */
1638 /* These situations are preferred because the optimizer can
1639 * do more things with them:
1640 * 1) mov -1, %reg
1641 * sllx %reg, shift, %reg
1642 * 2) mov -1, %reg
1643 * srlx %reg, shift, %reg
1644 * 3) mov some_small_const, %reg
1645 * sllx %reg, shift, %reg
1647 if (((highest_bit_set == 63
1648 || lowest_bit_set == 0)
1649 && all_bits_between_are_set != 0)
1650 || ((highest_bit_set - lowest_bit_set) < 12))
1652 HOST_WIDE_INT the_const = -1;
1653 int shift = lowest_bit_set;
1655 if ((highest_bit_set != 63
1656 && lowest_bit_set != 0)
1657 || all_bits_between_are_set == 0)
1659 the_const =
1660 create_simple_focus_bits (high_bits, low_bits,
1661 lowest_bit_set, 0);
1663 else if (lowest_bit_set == 0)
1664 shift = -(63 - highest_bit_set);
1666 gcc_assert (SPARC_SIMM13_P (the_const));
1667 gcc_assert (shift != 0);
1669 emit_insn (gen_safe_SET64 (temp, the_const));
1670 if (shift > 0)
1671 emit_insn (gen_rtx_SET (VOIDmode,
1672 op0,
1673 gen_rtx_ASHIFT (DImode,
1674 temp,
1675 GEN_INT (shift))));
1676 else if (shift < 0)
1677 emit_insn (gen_rtx_SET (VOIDmode,
1678 op0,
1679 gen_rtx_LSHIFTRT (DImode,
1680 temp,
1681 GEN_INT (-shift))));
1682 return;
1685 /* Now a range of 22 or less bits set somewhere.
1686 * 1) sethi %hi(focus_bits), %reg
1687 * sllx %reg, shift, %reg
1688 * 2) sethi %hi(focus_bits), %reg
1689 * srlx %reg, shift, %reg
1691 if ((highest_bit_set - lowest_bit_set) < 21)
1693 unsigned HOST_WIDE_INT focus_bits =
1694 create_simple_focus_bits (high_bits, low_bits,
1695 lowest_bit_set, 10);
1697 gcc_assert (SPARC_SETHI_P (focus_bits));
1698 gcc_assert (lowest_bit_set != 10);
1700 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1702 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1703 if (lowest_bit_set < 10)
1704 emit_insn (gen_rtx_SET (VOIDmode,
1705 op0,
1706 gen_rtx_LSHIFTRT (DImode, temp,
1707 GEN_INT (10 - lowest_bit_set))));
1708 else if (lowest_bit_set > 10)
1709 emit_insn (gen_rtx_SET (VOIDmode,
1710 op0,
1711 gen_rtx_ASHIFT (DImode, temp,
1712 GEN_INT (lowest_bit_set - 10))));
1713 return;
1716 /* 1) sethi %hi(low_bits), %reg
1717 * or %reg, %lo(low_bits), %reg
1718 * 2) sethi %hi(~low_bits), %reg
1719 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1721 if (high_bits == 0
1722 || high_bits == 0xffffffff)
1724 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1725 (high_bits == 0xffffffff));
1726 return;
1729 /* Now, try 3-insn sequences. */
1731 /* 1) sethi %hi(high_bits), %reg
1732 * or %reg, %lo(high_bits), %reg
1733 * sllx %reg, 32, %reg
1735 if (low_bits == 0)
1737 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1738 return;
1741 /* We may be able to do something quick
1742 when the constant is negated, so try that. */
1743 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1744 (~low_bits) & 0xfffffc00))
1746 /* NOTE: The trailing bits get XOR'd so we need the
1747 non-negated bits, not the negated ones. */
1748 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1750 if ((((~high_bits) & 0xffffffff) == 0
1751 && ((~low_bits) & 0x80000000) == 0)
1752 || (((~high_bits) & 0xffffffff) == 0xffffffff
1753 && ((~low_bits) & 0x80000000) != 0))
1755 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1757 if ((SPARC_SETHI_P (fast_int)
1758 && (~high_bits & 0xffffffff) == 0)
1759 || SPARC_SIMM13_P (fast_int))
1760 emit_insn (gen_safe_SET64 (temp, fast_int));
1761 else
1762 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1764 else
1766 rtx negated_const;
1767 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1768 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1769 sparc_emit_set_const64 (temp, negated_const);
1772 /* If we are XOR'ing with -1, then we should emit a one's complement
1773 instead. This way the combiner will notice logical operations
1774 such as ANDN later on and substitute. */
1775 if (trailing_bits == 0x3ff)
1777 emit_insn (gen_rtx_SET (VOIDmode, op0,
1778 gen_rtx_NOT (DImode, temp)));
1780 else
1782 emit_insn (gen_rtx_SET (VOIDmode,
1783 op0,
1784 gen_safe_XOR64 (temp,
1785 (-0x400 | trailing_bits))));
1787 return;
1790 /* 1) sethi %hi(xxx), %reg
1791 * or %reg, %lo(xxx), %reg
1792 * sllx %reg, yyy, %reg
1794 * ??? This is just a generalized version of the low_bits==0
1795 * thing above, FIXME...
1797 if ((highest_bit_set - lowest_bit_set) < 32)
1799 unsigned HOST_WIDE_INT focus_bits =
1800 create_simple_focus_bits (high_bits, low_bits,
1801 lowest_bit_set, 0);
1803 /* We can't get here in this state. */
1804 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1806 /* So what we know is that the set bits straddle the
1807 middle of the 64-bit word. */
1808 sparc_emit_set_const64_quick2 (op0, temp,
1809 focus_bits, 0,
1810 lowest_bit_set);
1811 return;
1814 /* 1) sethi %hi(high_bits), %reg
1815 * or %reg, %lo(high_bits), %reg
1816 * sllx %reg, 32, %reg
1817 * or %reg, low_bits, %reg
1819 if (SPARC_SIMM13_P(low_bits)
1820 && ((int)low_bits > 0))
1822 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1823 return;
1826 /* The easiest way when all else fails, is full decomposition. */
1827 #if 0
1828 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1829 high_bits, low_bits, ~high_bits, ~low_bits);
1830 #endif
1831 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1833 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1835 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1836 return the mode to be used for the comparison. For floating-point,
1837 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1838 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1839 processing is needed. */
1841 enum machine_mode
1842 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1844 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1846 switch (op)
1848 case EQ:
1849 case NE:
1850 case UNORDERED:
1851 case ORDERED:
1852 case UNLT:
1853 case UNLE:
1854 case UNGT:
1855 case UNGE:
1856 case UNEQ:
1857 case LTGT:
1858 return CCFPmode;
1860 case LT:
1861 case LE:
1862 case GT:
1863 case GE:
1864 return CCFPEmode;
1866 default:
1867 gcc_unreachable ();
1870 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1871 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1873 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1874 return CCX_NOOVmode;
1875 else
1876 return CC_NOOVmode;
1878 else
1880 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1881 return CCXmode;
1882 else
1883 return CCmode;
1887 /* X and Y are two things to compare using CODE. Emit the compare insn and
1888 return the rtx for the cc reg in the proper mode. */
1891 gen_compare_reg (enum rtx_code code, rtx x, rtx y)
1893 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
1894 rtx cc_reg;
1896 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
1897 fcc regs (cse can't tell they're really call clobbered regs and will
1898 remove a duplicate comparison even if there is an intervening function
1899 call - it will then try to reload the cc reg via an int reg which is why
1900 we need the movcc patterns). It is possible to provide the movcc
1901 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
1902 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
1903 to tell cse that CCFPE mode registers (even pseudos) are call
1904 clobbered. */
1906 /* ??? This is an experiment. Rather than making changes to cse which may
1907 or may not be easy/clean, we do our own cse. This is possible because
1908 we will generate hard registers. Cse knows they're call clobbered (it
1909 doesn't know the same thing about pseudos). If we guess wrong, no big
1910 deal, but if we win, great! */
1912 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1913 #if 1 /* experiment */
1915 int reg;
1916 /* We cycle through the registers to ensure they're all exercised. */
1917 static int next_fcc_reg = 0;
1918 /* Previous x,y for each fcc reg. */
1919 static rtx prev_args[4][2];
1921 /* Scan prev_args for x,y. */
1922 for (reg = 0; reg < 4; reg++)
1923 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
1924 break;
1925 if (reg == 4)
1927 reg = next_fcc_reg;
1928 prev_args[reg][0] = x;
1929 prev_args[reg][1] = y;
1930 next_fcc_reg = (next_fcc_reg + 1) & 3;
1932 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
1934 #else
1935 cc_reg = gen_reg_rtx (mode);
1936 #endif /* ! experiment */
1937 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1938 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
1939 else
1940 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
1942 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
1943 gen_rtx_COMPARE (mode, x, y)));
1945 return cc_reg;
1948 /* This function is used for v9 only.
1949 CODE is the code for an Scc's comparison.
1950 OPERANDS[0] is the target of the Scc insn.
1951 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
1952 been generated yet).
1954 This function is needed to turn
1956 (set (reg:SI 110)
1957 (gt (reg:CCX 100 %icc)
1958 (const_int 0)))
1959 into
1960 (set (reg:SI 110)
1961 (gt:DI (reg:CCX 100 %icc)
1962 (const_int 0)))
1964 IE: The instruction recognizer needs to see the mode of the comparison to
1965 find the right instruction. We could use "gt:DI" right in the
1966 define_expand, but leaving it out allows us to handle DI, SI, etc.
1968 We refer to the global sparc compare operands sparc_compare_op0 and
1969 sparc_compare_op1. */
1972 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
1974 rtx temp, op0, op1;
1976 if (! TARGET_ARCH64
1977 && (GET_MODE (sparc_compare_op0) == DImode
1978 || GET_MODE (operands[0]) == DImode))
1979 return 0;
1981 op0 = sparc_compare_op0;
1982 op1 = sparc_compare_op1;
1984 /* Try to use the movrCC insns. */
1985 if (TARGET_ARCH64
1986 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
1987 && op1 == const0_rtx
1988 && v9_regcmp_p (compare_code))
1990 /* Special case for op0 != 0. This can be done with one instruction if
1991 operands[0] == sparc_compare_op0. */
1993 if (compare_code == NE
1994 && GET_MODE (operands[0]) == DImode
1995 && rtx_equal_p (op0, operands[0]))
1997 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
1998 gen_rtx_IF_THEN_ELSE (DImode,
1999 gen_rtx_fmt_ee (compare_code, DImode,
2000 op0, const0_rtx),
2001 const1_rtx,
2002 operands[0])));
2003 return 1;
2006 if (reg_overlap_mentioned_p (operands[0], op0))
2008 /* Handle the case where operands[0] == sparc_compare_op0.
2009 We "early clobber" the result. */
2010 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2011 emit_move_insn (op0, sparc_compare_op0);
2014 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2015 if (GET_MODE (op0) != DImode)
2017 temp = gen_reg_rtx (DImode);
2018 convert_move (temp, op0, 0);
2020 else
2021 temp = op0;
2022 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2023 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2024 gen_rtx_fmt_ee (compare_code, DImode,
2025 temp, const0_rtx),
2026 const1_rtx,
2027 operands[0])));
2028 return 1;
2030 else
2032 operands[1] = gen_compare_reg (compare_code, op0, op1);
2034 switch (GET_MODE (operands[1]))
2036 case CCmode :
2037 case CCXmode :
2038 case CCFPEmode :
2039 case CCFPmode :
2040 break;
2041 default :
2042 gcc_unreachable ();
2044 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2045 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2046 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2047 gen_rtx_fmt_ee (compare_code,
2048 GET_MODE (operands[1]),
2049 operands[1], const0_rtx),
2050 const1_rtx, operands[0])));
2051 return 1;
2055 /* Emit a conditional jump insn for the v9 architecture using comparison code
2056 CODE and jump target LABEL.
2057 This function exists to take advantage of the v9 brxx insns. */
2059 void
2060 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2062 emit_jump_insn (gen_rtx_SET (VOIDmode,
2063 pc_rtx,
2064 gen_rtx_IF_THEN_ELSE (VOIDmode,
2065 gen_rtx_fmt_ee (code, GET_MODE (op0),
2066 op0, const0_rtx),
2067 gen_rtx_LABEL_REF (VOIDmode, label),
2068 pc_rtx)));
2071 /* Generate a DFmode part of a hard TFmode register.
2072 REG is the TFmode hard register, LOW is 1 for the
2073 low 64bit of the register and 0 otherwise.
2076 gen_df_reg (rtx reg, int low)
2078 int regno = REGNO (reg);
2080 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2081 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2082 return gen_rtx_REG (DFmode, regno);
2085 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2086 Unlike normal calls, TFmode operands are passed by reference. It is
2087 assumed that no more than 3 operands are required. */
2089 static void
2090 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2092 rtx ret_slot = NULL, arg[3], func_sym;
2093 int i;
2095 /* We only expect to be called for conversions, unary, and binary ops. */
2096 gcc_assert (nargs == 2 || nargs == 3);
2098 for (i = 0; i < nargs; ++i)
2100 rtx this_arg = operands[i];
2101 rtx this_slot;
2103 /* TFmode arguments and return values are passed by reference. */
2104 if (GET_MODE (this_arg) == TFmode)
2106 int force_stack_temp;
2108 force_stack_temp = 0;
2109 if (TARGET_BUGGY_QP_LIB && i == 0)
2110 force_stack_temp = 1;
2112 if (GET_CODE (this_arg) == MEM
2113 && ! force_stack_temp)
2114 this_arg = XEXP (this_arg, 0);
2115 else if (CONSTANT_P (this_arg)
2116 && ! force_stack_temp)
2118 this_slot = force_const_mem (TFmode, this_arg);
2119 this_arg = XEXP (this_slot, 0);
2121 else
2123 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2125 /* Operand 0 is the return value. We'll copy it out later. */
2126 if (i > 0)
2127 emit_move_insn (this_slot, this_arg);
2128 else
2129 ret_slot = this_slot;
2131 this_arg = XEXP (this_slot, 0);
2135 arg[i] = this_arg;
2138 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2140 if (GET_MODE (operands[0]) == TFmode)
2142 if (nargs == 2)
2143 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2144 arg[0], GET_MODE (arg[0]),
2145 arg[1], GET_MODE (arg[1]));
2146 else
2147 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2148 arg[0], GET_MODE (arg[0]),
2149 arg[1], GET_MODE (arg[1]),
2150 arg[2], GET_MODE (arg[2]));
2152 if (ret_slot)
2153 emit_move_insn (operands[0], ret_slot);
2155 else
2157 rtx ret;
2159 gcc_assert (nargs == 2);
2161 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2162 GET_MODE (operands[0]), 1,
2163 arg[1], GET_MODE (arg[1]));
2165 if (ret != operands[0])
2166 emit_move_insn (operands[0], ret);
2170 /* Expand soft-float TFmode calls to sparc abi routines. */
2172 static void
2173 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2175 const char *func;
2177 switch (code)
2179 case PLUS:
2180 func = "_Qp_add";
2181 break;
2182 case MINUS:
2183 func = "_Qp_sub";
2184 break;
2185 case MULT:
2186 func = "_Qp_mul";
2187 break;
2188 case DIV:
2189 func = "_Qp_div";
2190 break;
2191 default:
2192 gcc_unreachable ();
2195 emit_soft_tfmode_libcall (func, 3, operands);
2198 static void
2199 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2201 const char *func;
2203 gcc_assert (code == SQRT);
2204 func = "_Qp_sqrt";
2206 emit_soft_tfmode_libcall (func, 2, operands);
2209 static void
2210 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2212 const char *func;
2214 switch (code)
2216 case FLOAT_EXTEND:
2217 switch (GET_MODE (operands[1]))
2219 case SFmode:
2220 func = "_Qp_stoq";
2221 break;
2222 case DFmode:
2223 func = "_Qp_dtoq";
2224 break;
2225 default:
2226 gcc_unreachable ();
2228 break;
2230 case FLOAT_TRUNCATE:
2231 switch (GET_MODE (operands[0]))
2233 case SFmode:
2234 func = "_Qp_qtos";
2235 break;
2236 case DFmode:
2237 func = "_Qp_qtod";
2238 break;
2239 default:
2240 gcc_unreachable ();
2242 break;
2244 case FLOAT:
2245 switch (GET_MODE (operands[1]))
2247 case SImode:
2248 func = "_Qp_itoq";
2249 break;
2250 case DImode:
2251 func = "_Qp_xtoq";
2252 break;
2253 default:
2254 gcc_unreachable ();
2256 break;
2258 case UNSIGNED_FLOAT:
2259 switch (GET_MODE (operands[1]))
2261 case SImode:
2262 func = "_Qp_uitoq";
2263 break;
2264 case DImode:
2265 func = "_Qp_uxtoq";
2266 break;
2267 default:
2268 gcc_unreachable ();
2270 break;
2272 case FIX:
2273 switch (GET_MODE (operands[0]))
2275 case SImode:
2276 func = "_Qp_qtoi";
2277 break;
2278 case DImode:
2279 func = "_Qp_qtox";
2280 break;
2281 default:
2282 gcc_unreachable ();
2284 break;
2286 case UNSIGNED_FIX:
2287 switch (GET_MODE (operands[0]))
2289 case SImode:
2290 func = "_Qp_qtoui";
2291 break;
2292 case DImode:
2293 func = "_Qp_qtoux";
2294 break;
2295 default:
2296 gcc_unreachable ();
2298 break;
2300 default:
2301 gcc_unreachable ();
2304 emit_soft_tfmode_libcall (func, 2, operands);
2307 /* Expand a hard-float tfmode operation. All arguments must be in
2308 registers. */
2310 static void
2311 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2313 rtx op, dest;
2315 if (GET_RTX_CLASS (code) == RTX_UNARY)
2317 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2318 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2320 else
2322 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2323 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2324 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2325 operands[1], operands[2]);
2328 if (register_operand (operands[0], VOIDmode))
2329 dest = operands[0];
2330 else
2331 dest = gen_reg_rtx (GET_MODE (operands[0]));
2333 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2335 if (dest != operands[0])
2336 emit_move_insn (operands[0], dest);
2339 void
2340 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2342 if (TARGET_HARD_QUAD)
2343 emit_hard_tfmode_operation (code, operands);
2344 else
2345 emit_soft_tfmode_binop (code, operands);
2348 void
2349 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2351 if (TARGET_HARD_QUAD)
2352 emit_hard_tfmode_operation (code, operands);
2353 else
2354 emit_soft_tfmode_unop (code, operands);
2357 void
2358 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2360 if (TARGET_HARD_QUAD)
2361 emit_hard_tfmode_operation (code, operands);
2362 else
2363 emit_soft_tfmode_cvt (code, operands);
2366 /* Return nonzero if a branch/jump/call instruction will be emitting
2367 nop into its delay slot. */
2370 empty_delay_slot (rtx insn)
2372 rtx seq;
2374 /* If no previous instruction (should not happen), return true. */
2375 if (PREV_INSN (insn) == NULL)
2376 return 1;
2378 seq = NEXT_INSN (PREV_INSN (insn));
2379 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2380 return 0;
2382 return 1;
2385 /* Return nonzero if TRIAL can go into the call delay slot. */
2388 tls_call_delay (rtx trial)
2390 rtx pat, unspec;
2392 /* Binutils allows
2393 call __tls_get_addr, %tgd_call (foo)
2394 add %l7, %o0, %o0, %tgd_add (foo)
2395 while Sun as/ld does not. */
2396 if (TARGET_GNU_TLS || !TARGET_TLS)
2397 return 1;
2399 pat = PATTERN (trial);
2400 if (GET_CODE (pat) != SET || GET_CODE (SET_DEST (pat)) != PLUS)
2401 return 1;
2403 unspec = XEXP (SET_DEST (pat), 1);
2404 if (GET_CODE (unspec) != UNSPEC
2405 || (XINT (unspec, 1) != UNSPEC_TLSGD
2406 && XINT (unspec, 1) != UNSPEC_TLSLDM))
2407 return 1;
2409 return 0;
2412 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2413 instruction. RETURN_P is true if the v9 variant 'return' is to be
2414 considered in the test too.
2416 TRIAL must be a SET whose destination is a REG appropriate for the
2417 'restore' instruction or, if RETURN_P is true, for the 'return'
2418 instruction. */
2420 static int
2421 eligible_for_restore_insn (rtx trial, bool return_p)
2423 rtx pat = PATTERN (trial);
2424 rtx src = SET_SRC (pat);
2426 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2427 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2428 && arith_operand (src, GET_MODE (src)))
2430 if (TARGET_ARCH64)
2431 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2432 else
2433 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2436 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2437 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2438 && arith_double_operand (src, GET_MODE (src)))
2439 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2441 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2442 else if (! TARGET_FPU && register_operand (src, SFmode))
2443 return 1;
2445 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2446 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2447 return 1;
2449 /* If we have the 'return' instruction, anything that does not use
2450 local or output registers and can go into a delay slot wins. */
2451 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2452 && (get_attr_in_uncond_branch_delay (trial)
2453 == IN_UNCOND_BRANCH_DELAY_TRUE))
2454 return 1;
2456 /* The 'restore src1,src2,dest' pattern for SImode. */
2457 else if (GET_CODE (src) == PLUS
2458 && register_operand (XEXP (src, 0), SImode)
2459 && arith_operand (XEXP (src, 1), SImode))
2460 return 1;
2462 /* The 'restore src1,src2,dest' pattern for DImode. */
2463 else if (GET_CODE (src) == PLUS
2464 && register_operand (XEXP (src, 0), DImode)
2465 && arith_double_operand (XEXP (src, 1), DImode))
2466 return 1;
2468 /* The 'restore src1,%lo(src2),dest' pattern. */
2469 else if (GET_CODE (src) == LO_SUM
2470 && ! TARGET_CM_MEDMID
2471 && ((register_operand (XEXP (src, 0), SImode)
2472 && immediate_operand (XEXP (src, 1), SImode))
2473 || (TARGET_ARCH64
2474 && register_operand (XEXP (src, 0), DImode)
2475 && immediate_operand (XEXP (src, 1), DImode))))
2476 return 1;
2478 /* The 'restore src,src,dest' pattern. */
2479 else if (GET_CODE (src) == ASHIFT
2480 && (register_operand (XEXP (src, 0), SImode)
2481 || register_operand (XEXP (src, 0), DImode))
2482 && XEXP (src, 1) == const1_rtx)
2483 return 1;
2485 return 0;
2488 /* Return nonzero if TRIAL can go into the function return's
2489 delay slot. */
2492 eligible_for_return_delay (rtx trial)
2494 rtx pat;
2496 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2497 return 0;
2499 if (get_attr_length (trial) != 1)
2500 return 0;
2502 /* If there are any call-saved registers, we should scan TRIAL if it
2503 does not reference them. For now just make it easy. */
2504 if (num_gfregs)
2505 return 0;
2507 /* If the function uses __builtin_eh_return, the eh_return machinery
2508 occupies the delay slot. */
2509 if (current_function_calls_eh_return)
2510 return 0;
2512 /* In the case of a true leaf function, anything can go into the slot. */
2513 if (sparc_leaf_function_p)
2514 return get_attr_in_uncond_branch_delay (trial)
2515 == IN_UNCOND_BRANCH_DELAY_TRUE;
2517 pat = PATTERN (trial);
2519 /* Otherwise, only operations which can be done in tandem with
2520 a `restore' or `return' insn can go into the delay slot. */
2521 if (GET_CODE (SET_DEST (pat)) != REG
2522 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2523 return 0;
2525 /* If this instruction sets up floating point register and we have a return
2526 instruction, it can probably go in. But restore will not work
2527 with FP_REGS. */
2528 if (REGNO (SET_DEST (pat)) >= 32)
2529 return (TARGET_V9
2530 && ! epilogue_renumber (&pat, 1)
2531 && (get_attr_in_uncond_branch_delay (trial)
2532 == IN_UNCOND_BRANCH_DELAY_TRUE));
2534 return eligible_for_restore_insn (trial, true);
2537 /* Return nonzero if TRIAL can go into the sibling call's
2538 delay slot. */
2541 eligible_for_sibcall_delay (rtx trial)
2543 rtx pat;
2545 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2546 return 0;
2548 if (get_attr_length (trial) != 1)
2549 return 0;
2551 pat = PATTERN (trial);
2553 if (sparc_leaf_function_p)
2555 /* If the tail call is done using the call instruction,
2556 we have to restore %o7 in the delay slot. */
2557 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2558 return 0;
2560 /* %g1 is used to build the function address */
2561 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2562 return 0;
2564 return 1;
2567 /* Otherwise, only operations which can be done in tandem with
2568 a `restore' insn can go into the delay slot. */
2569 if (GET_CODE (SET_DEST (pat)) != REG
2570 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2571 || REGNO (SET_DEST (pat)) >= 32)
2572 return 0;
2574 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2575 in most cases. */
2576 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2577 return 0;
2579 return eligible_for_restore_insn (trial, false);
2583 short_branch (int uid1, int uid2)
2585 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2587 /* Leave a few words of "slop". */
2588 if (delta >= -1023 && delta <= 1022)
2589 return 1;
2591 return 0;
2594 /* Return nonzero if REG is not used after INSN.
2595 We assume REG is a reload reg, and therefore does
2596 not live past labels or calls or jumps. */
2598 reg_unused_after (rtx reg, rtx insn)
2600 enum rtx_code code, prev_code = UNKNOWN;
2602 while ((insn = NEXT_INSN (insn)))
2604 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2605 return 1;
2607 code = GET_CODE (insn);
2608 if (GET_CODE (insn) == CODE_LABEL)
2609 return 1;
2611 if (INSN_P (insn))
2613 rtx set = single_set (insn);
2614 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2615 if (set && in_src)
2616 return 0;
2617 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2618 return 1;
2619 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2620 return 0;
2622 prev_code = code;
2624 return 1;
2627 /* Determine if it's legal to put X into the constant pool. This
2628 is not possible if X contains the address of a symbol that is
2629 not constant (TLS) or not known at final link time (PIC). */
2631 static bool
2632 sparc_cannot_force_const_mem (rtx x)
2634 switch (GET_CODE (x))
2636 case CONST_INT:
2637 case CONST_DOUBLE:
2638 case CONST_VECTOR:
2639 /* Accept all non-symbolic constants. */
2640 return false;
2642 case LABEL_REF:
2643 /* Labels are OK iff we are non-PIC. */
2644 return flag_pic != 0;
2646 case SYMBOL_REF:
2647 /* 'Naked' TLS symbol references are never OK,
2648 non-TLS symbols are OK iff we are non-PIC. */
2649 if (SYMBOL_REF_TLS_MODEL (x))
2650 return true;
2651 else
2652 return flag_pic != 0;
2654 case CONST:
2655 return sparc_cannot_force_const_mem (XEXP (x, 0));
2656 case PLUS:
2657 case MINUS:
2658 return sparc_cannot_force_const_mem (XEXP (x, 0))
2659 || sparc_cannot_force_const_mem (XEXP (x, 1));
2660 case UNSPEC:
2661 return true;
2662 default:
2663 gcc_unreachable ();
2667 /* PIC support. */
2668 static GTY(()) char pic_helper_symbol_name[256];
2669 static GTY(()) rtx pic_helper_symbol;
2670 static GTY(()) bool pic_helper_emitted_p = false;
2671 static GTY(()) rtx global_offset_table;
2673 /* Ensure that we are not using patterns that are not OK with PIC. */
2676 check_pic (int i)
2678 switch (flag_pic)
2680 case 1:
2681 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2682 && (GET_CODE (recog_data.operand[i]) != CONST
2683 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2684 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2685 == global_offset_table)
2686 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2687 == CONST))));
2688 case 2:
2689 default:
2690 return 1;
2694 /* Return true if X is an address which needs a temporary register when
2695 reloaded while generating PIC code. */
2698 pic_address_needs_scratch (rtx x)
2700 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2701 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2702 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2703 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2704 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2705 return 1;
2707 return 0;
2710 /* Determine if a given RTX is a valid constant. We already know this
2711 satisfies CONSTANT_P. */
2713 bool
2714 legitimate_constant_p (rtx x)
2716 rtx inner;
2718 switch (GET_CODE (x))
2720 case SYMBOL_REF:
2721 /* TLS symbols are not constant. */
2722 if (SYMBOL_REF_TLS_MODEL (x))
2723 return false;
2724 break;
2726 case CONST:
2727 inner = XEXP (x, 0);
2729 /* Offsets of TLS symbols are never valid.
2730 Discourage CSE from creating them. */
2731 if (GET_CODE (inner) == PLUS
2732 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2733 return false;
2734 break;
2736 case CONST_DOUBLE:
2737 if (GET_MODE (x) == VOIDmode)
2738 return true;
2740 /* Floating point constants are generally not ok.
2741 The only exception is 0.0 in VIS. */
2742 if (TARGET_VIS
2743 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2744 && const_zero_operand (x, GET_MODE (x)))
2745 return true;
2747 return false;
2749 case CONST_VECTOR:
2750 /* Vector constants are generally not ok.
2751 The only exception is 0 in VIS. */
2752 if (TARGET_VIS
2753 && const_zero_operand (x, GET_MODE (x)))
2754 return true;
2756 return false;
2758 default:
2759 break;
2762 return true;
2765 /* Determine if a given RTX is a valid constant address. */
2767 bool
2768 constant_address_p (rtx x)
2770 switch (GET_CODE (x))
2772 case LABEL_REF:
2773 case CONST_INT:
2774 case HIGH:
2775 return true;
2777 case CONST:
2778 if (flag_pic && pic_address_needs_scratch (x))
2779 return false;
2780 return legitimate_constant_p (x);
2782 case SYMBOL_REF:
2783 return !flag_pic && legitimate_constant_p (x);
2785 default:
2786 return false;
2790 /* Nonzero if the constant value X is a legitimate general operand
2791 when generating PIC code. It is given that flag_pic is on and
2792 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2794 bool
2795 legitimate_pic_operand_p (rtx x)
2797 if (pic_address_needs_scratch (x))
2798 return false;
2799 if (SPARC_SYMBOL_REF_TLS_P (x)
2800 || (GET_CODE (x) == CONST
2801 && GET_CODE (XEXP (x, 0)) == PLUS
2802 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2803 return false;
2804 return true;
2807 /* Return nonzero if ADDR is a valid memory address.
2808 STRICT specifies whether strict register checking applies. */
2811 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2813 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2815 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2816 rs1 = addr;
2817 else if (GET_CODE (addr) == PLUS)
2819 rs1 = XEXP (addr, 0);
2820 rs2 = XEXP (addr, 1);
2822 /* Canonicalize. REG comes first, if there are no regs,
2823 LO_SUM comes first. */
2824 if (!REG_P (rs1)
2825 && GET_CODE (rs1) != SUBREG
2826 && (REG_P (rs2)
2827 || GET_CODE (rs2) == SUBREG
2828 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2830 rs1 = XEXP (addr, 1);
2831 rs2 = XEXP (addr, 0);
2834 if ((flag_pic == 1
2835 && rs1 == pic_offset_table_rtx
2836 && !REG_P (rs2)
2837 && GET_CODE (rs2) != SUBREG
2838 && GET_CODE (rs2) != LO_SUM
2839 && GET_CODE (rs2) != MEM
2840 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2841 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2842 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2843 || ((REG_P (rs1)
2844 || GET_CODE (rs1) == SUBREG)
2845 && RTX_OK_FOR_OFFSET_P (rs2)))
2847 imm1 = rs2;
2848 rs2 = NULL;
2850 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2851 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2853 /* We prohibit REG + REG for TFmode when there are no quad move insns
2854 and we consequently need to split. We do this because REG+REG
2855 is not an offsettable address. If we get the situation in reload
2856 where source and destination of a movtf pattern are both MEMs with
2857 REG+REG address, then only one of them gets converted to an
2858 offsettable address. */
2859 if (mode == TFmode
2860 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2861 return 0;
2863 /* We prohibit REG + REG on ARCH32 if not optimizing for
2864 DFmode/DImode because then mem_min_alignment is likely to be zero
2865 after reload and the forced split would lack a matching splitter
2866 pattern. */
2867 if (TARGET_ARCH32 && !optimize
2868 && (mode == DFmode || mode == DImode))
2869 return 0;
2871 else if (USE_AS_OFFSETABLE_LO10
2872 && GET_CODE (rs1) == LO_SUM
2873 && TARGET_ARCH64
2874 && ! TARGET_CM_MEDMID
2875 && RTX_OK_FOR_OLO10_P (rs2))
2877 rs2 = NULL;
2878 imm1 = XEXP (rs1, 1);
2879 rs1 = XEXP (rs1, 0);
2880 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2881 return 0;
2884 else if (GET_CODE (addr) == LO_SUM)
2886 rs1 = XEXP (addr, 0);
2887 imm1 = XEXP (addr, 1);
2889 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2890 return 0;
2892 /* We can't allow TFmode in 32-bit mode, because an offset greater
2893 than the alignment (8) may cause the LO_SUM to overflow. */
2894 if (mode == TFmode && TARGET_ARCH32)
2895 return 0;
2897 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
2898 return 1;
2899 else
2900 return 0;
2902 if (GET_CODE (rs1) == SUBREG)
2903 rs1 = SUBREG_REG (rs1);
2904 if (!REG_P (rs1))
2905 return 0;
2907 if (rs2)
2909 if (GET_CODE (rs2) == SUBREG)
2910 rs2 = SUBREG_REG (rs2);
2911 if (!REG_P (rs2))
2912 return 0;
2915 if (strict)
2917 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
2918 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
2919 return 0;
2921 else
2923 if ((REGNO (rs1) >= 32
2924 && REGNO (rs1) != FRAME_POINTER_REGNUM
2925 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
2926 || (rs2
2927 && (REGNO (rs2) >= 32
2928 && REGNO (rs2) != FRAME_POINTER_REGNUM
2929 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
2930 return 0;
2932 return 1;
2935 /* Construct the SYMBOL_REF for the tls_get_offset function. */
2937 static GTY(()) rtx sparc_tls_symbol;
2939 static rtx
2940 sparc_tls_get_addr (void)
2942 if (!sparc_tls_symbol)
2943 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
2945 return sparc_tls_symbol;
2948 static rtx
2949 sparc_tls_got (void)
2951 rtx temp;
2952 if (flag_pic)
2954 current_function_uses_pic_offset_table = 1;
2955 return pic_offset_table_rtx;
2958 if (!global_offset_table)
2959 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2960 temp = gen_reg_rtx (Pmode);
2961 emit_move_insn (temp, global_offset_table);
2962 return temp;
2965 /* Return 1 if *X is a thread-local symbol. */
2967 static int
2968 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
2970 return SPARC_SYMBOL_REF_TLS_P (*x);
2973 /* Return 1 if X contains a thread-local symbol. */
2975 bool
2976 sparc_tls_referenced_p (rtx x)
2978 if (!TARGET_HAVE_TLS)
2979 return false;
2981 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
2984 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
2985 this (thread-local) address. */
2988 legitimize_tls_address (rtx addr)
2990 rtx temp1, temp2, temp3, ret, o0, got, insn;
2992 gcc_assert (! no_new_pseudos);
2994 if (GET_CODE (addr) == SYMBOL_REF)
2995 switch (SYMBOL_REF_TLS_MODEL (addr))
2997 case TLS_MODEL_GLOBAL_DYNAMIC:
2998 start_sequence ();
2999 temp1 = gen_reg_rtx (SImode);
3000 temp2 = gen_reg_rtx (SImode);
3001 ret = gen_reg_rtx (Pmode);
3002 o0 = gen_rtx_REG (Pmode, 8);
3003 got = sparc_tls_got ();
3004 emit_insn (gen_tgd_hi22 (temp1, addr));
3005 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3006 if (TARGET_ARCH32)
3008 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3009 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3010 addr, const1_rtx));
3012 else
3014 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3015 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3016 addr, const1_rtx));
3018 CALL_INSN_FUNCTION_USAGE (insn)
3019 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3020 CALL_INSN_FUNCTION_USAGE (insn));
3021 insn = get_insns ();
3022 end_sequence ();
3023 emit_libcall_block (insn, ret, o0, addr);
3024 break;
3026 case TLS_MODEL_LOCAL_DYNAMIC:
3027 start_sequence ();
3028 temp1 = gen_reg_rtx (SImode);
3029 temp2 = gen_reg_rtx (SImode);
3030 temp3 = gen_reg_rtx (Pmode);
3031 ret = gen_reg_rtx (Pmode);
3032 o0 = gen_rtx_REG (Pmode, 8);
3033 got = sparc_tls_got ();
3034 emit_insn (gen_tldm_hi22 (temp1));
3035 emit_insn (gen_tldm_lo10 (temp2, temp1));
3036 if (TARGET_ARCH32)
3038 emit_insn (gen_tldm_add32 (o0, got, temp2));
3039 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3040 const1_rtx));
3042 else
3044 emit_insn (gen_tldm_add64 (o0, got, temp2));
3045 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3046 const1_rtx));
3048 CALL_INSN_FUNCTION_USAGE (insn)
3049 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3050 CALL_INSN_FUNCTION_USAGE (insn));
3051 insn = get_insns ();
3052 end_sequence ();
3053 emit_libcall_block (insn, temp3, o0,
3054 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3055 UNSPEC_TLSLD_BASE));
3056 temp1 = gen_reg_rtx (SImode);
3057 temp2 = gen_reg_rtx (SImode);
3058 emit_insn (gen_tldo_hix22 (temp1, addr));
3059 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3060 if (TARGET_ARCH32)
3061 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3062 else
3063 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3064 break;
3066 case TLS_MODEL_INITIAL_EXEC:
3067 temp1 = gen_reg_rtx (SImode);
3068 temp2 = gen_reg_rtx (SImode);
3069 temp3 = gen_reg_rtx (Pmode);
3070 got = sparc_tls_got ();
3071 emit_insn (gen_tie_hi22 (temp1, addr));
3072 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3073 if (TARGET_ARCH32)
3074 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3075 else
3076 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3077 if (TARGET_SUN_TLS)
3079 ret = gen_reg_rtx (Pmode);
3080 if (TARGET_ARCH32)
3081 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3082 temp3, addr));
3083 else
3084 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3085 temp3, addr));
3087 else
3088 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3089 break;
3091 case TLS_MODEL_LOCAL_EXEC:
3092 temp1 = gen_reg_rtx (Pmode);
3093 temp2 = gen_reg_rtx (Pmode);
3094 if (TARGET_ARCH32)
3096 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3097 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3099 else
3101 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3102 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3104 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3105 break;
3107 default:
3108 gcc_unreachable ();
3111 else
3112 gcc_unreachable (); /* for now ... */
3114 return ret;
3118 /* Legitimize PIC addresses. If the address is already position-independent,
3119 we return ORIG. Newly generated position-independent addresses go into a
3120 reg. This is REG if nonzero, otherwise we allocate register(s) as
3121 necessary. */
3124 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3125 rtx reg)
3127 if (GET_CODE (orig) == SYMBOL_REF)
3129 rtx pic_ref, address;
3130 rtx insn;
3132 if (reg == 0)
3134 gcc_assert (! reload_in_progress && ! reload_completed);
3135 reg = gen_reg_rtx (Pmode);
3138 if (flag_pic == 2)
3140 /* If not during reload, allocate another temp reg here for loading
3141 in the address, so that these instructions can be optimized
3142 properly. */
3143 rtx temp_reg = ((reload_in_progress || reload_completed)
3144 ? reg : gen_reg_rtx (Pmode));
3146 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3147 won't get confused into thinking that these two instructions
3148 are loading in the true address of the symbol. If in the
3149 future a PIC rtx exists, that should be used instead. */
3150 if (TARGET_ARCH64)
3152 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3153 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3155 else
3157 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3158 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3160 address = temp_reg;
3162 else
3163 address = orig;
3165 pic_ref = gen_const_mem (Pmode,
3166 gen_rtx_PLUS (Pmode,
3167 pic_offset_table_rtx, address));
3168 current_function_uses_pic_offset_table = 1;
3169 insn = emit_move_insn (reg, pic_ref);
3170 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3171 by loop. */
3172 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3173 REG_NOTES (insn));
3174 return reg;
3176 else if (GET_CODE (orig) == CONST)
3178 rtx base, offset;
3180 if (GET_CODE (XEXP (orig, 0)) == PLUS
3181 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3182 return orig;
3184 if (reg == 0)
3186 gcc_assert (! reload_in_progress && ! reload_completed);
3187 reg = gen_reg_rtx (Pmode);
3190 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3191 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3192 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3193 base == reg ? 0 : reg);
3195 if (GET_CODE (offset) == CONST_INT)
3197 if (SMALL_INT (offset))
3198 return plus_constant (base, INTVAL (offset));
3199 else if (! reload_in_progress && ! reload_completed)
3200 offset = force_reg (Pmode, offset);
3201 else
3202 /* If we reach here, then something is seriously wrong. */
3203 gcc_unreachable ();
3205 return gen_rtx_PLUS (Pmode, base, offset);
3207 else if (GET_CODE (orig) == LABEL_REF)
3208 /* ??? Why do we do this? */
3209 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3210 the register is live instead, in case it is eliminated. */
3211 current_function_uses_pic_offset_table = 1;
3213 return orig;
3216 /* Try machine-dependent ways of modifying an illegitimate address X
3217 to be legitimate. If we find one, return the new, valid address.
3219 OLDX is the address as it was before break_out_memory_refs was called.
3220 In some cases it is useful to look at this to decide what needs to be done.
3222 MODE is the mode of the operand pointed to by X. */
3225 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3227 rtx orig_x = x;
3229 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3230 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3231 force_operand (XEXP (x, 0), NULL_RTX));
3232 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3233 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3234 force_operand (XEXP (x, 1), NULL_RTX));
3235 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3236 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3237 XEXP (x, 1));
3238 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3239 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3240 force_operand (XEXP (x, 1), NULL_RTX));
3242 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3243 return x;
3245 if (SPARC_SYMBOL_REF_TLS_P (x))
3246 x = legitimize_tls_address (x);
3247 else if (flag_pic)
3248 x = legitimize_pic_address (x, mode, 0);
3249 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3250 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3251 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3252 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3253 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3254 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3255 else if (GET_CODE (x) == SYMBOL_REF
3256 || GET_CODE (x) == CONST
3257 || GET_CODE (x) == LABEL_REF)
3258 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3259 return x;
3262 /* Emit the special PIC helper function. */
3264 static void
3265 emit_pic_helper (void)
3267 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3268 int align;
3270 text_section ();
3272 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3273 if (align > 0)
3274 ASM_OUTPUT_ALIGN (asm_out_file, align);
3275 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3276 if (flag_delayed_branch)
3277 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3278 pic_name, pic_name);
3279 else
3280 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3281 pic_name, pic_name);
3283 pic_helper_emitted_p = true;
3286 /* Emit code to load the PIC register. */
3288 static void
3289 load_pic_register (bool delay_pic_helper)
3291 int orig_flag_pic = flag_pic;
3293 /* If we haven't initialized the special PIC symbols, do so now. */
3294 if (!pic_helper_symbol_name[0])
3296 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3297 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3298 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3301 /* If we haven't emitted the special PIC helper function, do so now unless
3302 we are requested to delay it. */
3303 if (!delay_pic_helper && !pic_helper_emitted_p)
3304 emit_pic_helper ();
3306 flag_pic = 0;
3307 if (TARGET_ARCH64)
3308 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3309 pic_helper_symbol));
3310 else
3311 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3312 pic_helper_symbol));
3313 flag_pic = orig_flag_pic;
3315 /* Need to emit this whether or not we obey regdecls,
3316 since setjmp/longjmp can cause life info to screw up.
3317 ??? In the case where we don't obey regdecls, this is not sufficient
3318 since we may not fall out the bottom. */
3319 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3322 /* Return 1 if RTX is a MEM which is known to be aligned to at
3323 least a DESIRED byte boundary. */
3326 mem_min_alignment (rtx mem, int desired)
3328 rtx addr, base, offset;
3330 /* If it's not a MEM we can't accept it. */
3331 if (GET_CODE (mem) != MEM)
3332 return 0;
3334 /* Obviously... */
3335 if (!TARGET_UNALIGNED_DOUBLES
3336 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3337 return 1;
3339 /* ??? The rest of the function predates MEM_ALIGN so
3340 there is probably a bit of redundancy. */
3341 addr = XEXP (mem, 0);
3342 base = offset = NULL_RTX;
3343 if (GET_CODE (addr) == PLUS)
3345 if (GET_CODE (XEXP (addr, 0)) == REG)
3347 base = XEXP (addr, 0);
3349 /* What we are saying here is that if the base
3350 REG is aligned properly, the compiler will make
3351 sure any REG based index upon it will be so
3352 as well. */
3353 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3354 offset = XEXP (addr, 1);
3355 else
3356 offset = const0_rtx;
3359 else if (GET_CODE (addr) == REG)
3361 base = addr;
3362 offset = const0_rtx;
3365 if (base != NULL_RTX)
3367 int regno = REGNO (base);
3369 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3371 /* Check if the compiler has recorded some information
3372 about the alignment of the base REG. If reload has
3373 completed, we already matched with proper alignments.
3374 If not running global_alloc, reload might give us
3375 unaligned pointer to local stack though. */
3376 if (((cfun != 0
3377 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3378 || (optimize && reload_completed))
3379 && (INTVAL (offset) & (desired - 1)) == 0)
3380 return 1;
3382 else
3384 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3385 return 1;
3388 else if (! TARGET_UNALIGNED_DOUBLES
3389 || CONSTANT_P (addr)
3390 || GET_CODE (addr) == LO_SUM)
3392 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3393 is true, in which case we can only assume that an access is aligned if
3394 it is to a constant address, or the address involves a LO_SUM. */
3395 return 1;
3398 /* An obviously unaligned address. */
3399 return 0;
3403 /* Vectors to keep interesting information about registers where it can easily
3404 be got. We used to use the actual mode value as the bit number, but there
3405 are more than 32 modes now. Instead we use two tables: one indexed by
3406 hard register number, and one indexed by mode. */
3408 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3409 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
3410 mapped into one sparc_mode_class mode. */
3412 enum sparc_mode_class {
3413 S_MODE, D_MODE, T_MODE, O_MODE,
3414 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3415 CC_MODE, CCFP_MODE
3418 /* Modes for single-word and smaller quantities. */
3419 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3421 /* Modes for double-word and smaller quantities. */
3422 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3424 /* Modes for quad-word and smaller quantities. */
3425 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3427 /* Modes for 8-word and smaller quantities. */
3428 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3430 /* Modes for single-float quantities. We must allow any single word or
3431 smaller quantity. This is because the fix/float conversion instructions
3432 take integer inputs/outputs from the float registers. */
3433 #define SF_MODES (S_MODES)
3435 /* Modes for double-float and smaller quantities. */
3436 #define DF_MODES (S_MODES | D_MODES)
3438 /* Modes for double-float only quantities. */
3439 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3441 /* Modes for quad-float only quantities. */
3442 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3444 /* Modes for quad-float and smaller quantities. */
3445 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3447 /* Modes for quad-float and double-float quantities. */
3448 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3450 /* Modes for quad-float pair only quantities. */
3451 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3453 /* Modes for quad-float pairs and smaller quantities. */
3454 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3456 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3458 /* Modes for condition codes. */
3459 #define CC_MODES (1 << (int) CC_MODE)
3460 #define CCFP_MODES (1 << (int) CCFP_MODE)
3462 /* Value is 1 if register/mode pair is acceptable on sparc.
3463 The funny mixture of D and T modes is because integer operations
3464 do not specially operate on tetra quantities, so non-quad-aligned
3465 registers can hold quadword quantities (except %o4 and %i4 because
3466 they cross fixed registers). */
3468 /* This points to either the 32 bit or the 64 bit version. */
3469 const int *hard_regno_mode_classes;
3471 static const int hard_32bit_mode_classes[] = {
3472 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3473 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3474 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3475 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3477 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3478 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3479 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3480 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3482 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3483 and none can hold SFmode/SImode values. */
3484 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3485 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3486 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3487 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3489 /* %fcc[0123] */
3490 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3492 /* %icc */
3493 CC_MODES
3496 static const int hard_64bit_mode_classes[] = {
3497 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3498 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3499 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3500 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3502 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3503 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3504 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3505 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3507 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3508 and none can hold SFmode/SImode values. */
3509 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3510 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3511 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3512 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3514 /* %fcc[0123] */
3515 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3517 /* %icc */
3518 CC_MODES
3521 int sparc_mode_class [NUM_MACHINE_MODES];
3523 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3525 static void
3526 sparc_init_modes (void)
3528 int i;
3530 for (i = 0; i < NUM_MACHINE_MODES; i++)
3532 switch (GET_MODE_CLASS (i))
3534 case MODE_INT:
3535 case MODE_PARTIAL_INT:
3536 case MODE_COMPLEX_INT:
3537 if (GET_MODE_SIZE (i) <= 4)
3538 sparc_mode_class[i] = 1 << (int) S_MODE;
3539 else if (GET_MODE_SIZE (i) == 8)
3540 sparc_mode_class[i] = 1 << (int) D_MODE;
3541 else if (GET_MODE_SIZE (i) == 16)
3542 sparc_mode_class[i] = 1 << (int) T_MODE;
3543 else if (GET_MODE_SIZE (i) == 32)
3544 sparc_mode_class[i] = 1 << (int) O_MODE;
3545 else
3546 sparc_mode_class[i] = 0;
3547 break;
3548 case MODE_VECTOR_INT:
3549 if (GET_MODE_SIZE (i) <= 4)
3550 sparc_mode_class[i] = 1 << (int)SF_MODE;
3551 else if (GET_MODE_SIZE (i) == 8)
3552 sparc_mode_class[i] = 1 << (int)DF_MODE;
3553 break;
3554 case MODE_FLOAT:
3555 case MODE_COMPLEX_FLOAT:
3556 if (GET_MODE_SIZE (i) <= 4)
3557 sparc_mode_class[i] = 1 << (int) SF_MODE;
3558 else if (GET_MODE_SIZE (i) == 8)
3559 sparc_mode_class[i] = 1 << (int) DF_MODE;
3560 else if (GET_MODE_SIZE (i) == 16)
3561 sparc_mode_class[i] = 1 << (int) TF_MODE;
3562 else if (GET_MODE_SIZE (i) == 32)
3563 sparc_mode_class[i] = 1 << (int) OF_MODE;
3564 else
3565 sparc_mode_class[i] = 0;
3566 break;
3567 case MODE_CC:
3568 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3569 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3570 else
3571 sparc_mode_class[i] = 1 << (int) CC_MODE;
3572 break;
3573 default:
3574 sparc_mode_class[i] = 0;
3575 break;
3579 if (TARGET_ARCH64)
3580 hard_regno_mode_classes = hard_64bit_mode_classes;
3581 else
3582 hard_regno_mode_classes = hard_32bit_mode_classes;
3584 /* Initialize the array used by REGNO_REG_CLASS. */
3585 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3587 if (i < 16 && TARGET_V8PLUS)
3588 sparc_regno_reg_class[i] = I64_REGS;
3589 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3590 sparc_regno_reg_class[i] = GENERAL_REGS;
3591 else if (i < 64)
3592 sparc_regno_reg_class[i] = FP_REGS;
3593 else if (i < 96)
3594 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3595 else if (i < 100)
3596 sparc_regno_reg_class[i] = FPCC_REGS;
3597 else
3598 sparc_regno_reg_class[i] = NO_REGS;
3602 /* Compute the frame size required by the function. This function is called
3603 during the reload pass and also by sparc_expand_prologue. */
3605 HOST_WIDE_INT
3606 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3608 int outgoing_args_size = (current_function_outgoing_args_size
3609 + REG_PARM_STACK_SPACE (current_function_decl));
3610 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3611 int i;
3613 if (TARGET_ARCH64)
3615 for (i = 0; i < 8; i++)
3616 if (regs_ever_live[i] && ! call_used_regs[i])
3617 n_regs += 2;
3619 else
3621 for (i = 0; i < 8; i += 2)
3622 if ((regs_ever_live[i] && ! call_used_regs[i])
3623 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3624 n_regs += 2;
3627 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3628 if ((regs_ever_live[i] && ! call_used_regs[i])
3629 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3630 n_regs += 2;
3632 /* Set up values for use in prologue and epilogue. */
3633 num_gfregs = n_regs;
3635 if (leaf_function_p
3636 && n_regs == 0
3637 && size == 0
3638 && current_function_outgoing_args_size == 0)
3639 actual_fsize = apparent_fsize = 0;
3640 else
3642 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3643 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3644 apparent_fsize += n_regs * 4;
3645 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3648 /* Make sure nothing can clobber our register windows.
3649 If a SAVE must be done, or there is a stack-local variable,
3650 the register window area must be allocated. */
3651 if (! leaf_function_p || size > 0)
3652 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3654 return SPARC_STACK_ALIGN (actual_fsize);
3657 /* Output any necessary .register pseudo-ops. */
3659 void
3660 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3662 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3663 int i;
3665 if (TARGET_ARCH32)
3666 return;
3668 /* Check if %g[2367] were used without
3669 .register being printed for them already. */
3670 for (i = 2; i < 8; i++)
3672 if (regs_ever_live [i]
3673 && ! sparc_hard_reg_printed [i])
3675 sparc_hard_reg_printed [i] = 1;
3676 fprintf (file, "\t.register\t%%g%d, #scratch\n", i);
3678 if (i == 3) i = 5;
3680 #endif
3683 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3684 as needed. LOW should be double-word aligned for 32-bit registers.
3685 Return the new OFFSET. */
3687 #define SORR_SAVE 0
3688 #define SORR_RESTORE 1
3690 static int
3691 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3693 rtx mem, insn;
3694 int i;
3696 if (TARGET_ARCH64 && high <= 32)
3698 for (i = low; i < high; i++)
3700 if (regs_ever_live[i] && ! call_used_regs[i])
3702 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3703 set_mem_alias_set (mem, sparc_sr_alias_set);
3704 if (action == SORR_SAVE)
3706 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3707 RTX_FRAME_RELATED_P (insn) = 1;
3709 else /* action == SORR_RESTORE */
3710 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3711 offset += 8;
3715 else
3717 for (i = low; i < high; i += 2)
3719 bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
3720 bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
3721 enum machine_mode mode;
3722 int regno;
3724 if (reg0 && reg1)
3726 mode = i < 32 ? DImode : DFmode;
3727 regno = i;
3729 else if (reg0)
3731 mode = i < 32 ? SImode : SFmode;
3732 regno = i;
3734 else if (reg1)
3736 mode = i < 32 ? SImode : SFmode;
3737 regno = i + 1;
3738 offset += 4;
3740 else
3741 continue;
3743 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3744 set_mem_alias_set (mem, sparc_sr_alias_set);
3745 if (action == SORR_SAVE)
3747 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3748 RTX_FRAME_RELATED_P (insn) = 1;
3750 else /* action == SORR_RESTORE */
3751 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3753 /* Always preserve double-word alignment. */
3754 offset = (offset + 7) & -8;
3758 return offset;
3761 /* Emit code to save call-saved registers. */
3763 static void
3764 emit_save_or_restore_regs (int action)
3766 HOST_WIDE_INT offset;
3767 rtx base;
3769 offset = frame_base_offset - apparent_fsize;
3771 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3773 /* ??? This might be optimized a little as %g1 might already have a
3774 value close enough that a single add insn will do. */
3775 /* ??? Although, all of this is probably only a temporary fix
3776 because if %g1 can hold a function result, then
3777 sparc_expand_epilogue will lose (the result will be
3778 clobbered). */
3779 base = gen_rtx_REG (Pmode, 1);
3780 emit_move_insn (base, GEN_INT (offset));
3781 emit_insn (gen_rtx_SET (VOIDmode,
3782 base,
3783 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3784 offset = 0;
3786 else
3787 base = frame_base_reg;
3789 offset = save_or_restore_regs (0, 8, base, offset, action);
3790 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3793 /* Generate a save_register_window insn. */
3795 static rtx
3796 gen_save_register_window (rtx increment)
3798 if (TARGET_ARCH64)
3799 return gen_save_register_windowdi (increment);
3800 else
3801 return gen_save_register_windowsi (increment);
3804 /* Generate an increment for the stack pointer. */
3806 static rtx
3807 gen_stack_pointer_inc (rtx increment)
3809 if (TARGET_ARCH64)
3810 return gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, increment);
3811 else
3812 return gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, increment);
3815 /* Generate a decrement for the stack pointer. */
3817 static rtx
3818 gen_stack_pointer_dec (rtx decrement)
3820 if (TARGET_ARCH64)
3821 return gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, decrement);
3822 else
3823 return gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, decrement);
3826 /* Expand the function prologue. The prologue is responsible for reserving
3827 storage for the frame, saving the call-saved registers and loading the
3828 PIC register if needed. */
3830 void
3831 sparc_expand_prologue (void)
3833 rtx insn;
3834 int i;
3836 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
3837 on the final value of the flag means deferring the prologue/epilogue
3838 expansion until just before the second scheduling pass, which is too
3839 late to emit multiple epilogues or return insns.
3841 Of course we are making the assumption that the value of the flag
3842 will not change between now and its final value. Of the three parts
3843 of the formula, only the last one can reasonably vary. Let's take a
3844 closer look, after assuming that the first two ones are set to true
3845 (otherwise the last value is effectively silenced).
3847 If only_leaf_regs_used returns false, the global predicate will also
3848 be false so the actual frame size calculated below will be positive.
3849 As a consequence, the save_register_window insn will be emitted in
3850 the instruction stream; now this insn explicitly references %fp
3851 which is not a leaf register so only_leaf_regs_used will always
3852 return false subsequently.
3854 If only_leaf_regs_used returns true, we hope that the subsequent
3855 optimization passes won't cause non-leaf registers to pop up. For
3856 example, the regrename pass has special provisions to not rename to
3857 non-leaf registers in a leaf function. */
3858 sparc_leaf_function_p
3859 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
3861 /* Need to use actual_fsize, since we are also allocating
3862 space for our callee (and our own register save area). */
3863 actual_fsize
3864 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
3866 /* Advertise that the data calculated just above are now valid. */
3867 sparc_prologue_data_valid_p = true;
3869 if (sparc_leaf_function_p)
3871 frame_base_reg = stack_pointer_rtx;
3872 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
3874 else
3876 frame_base_reg = hard_frame_pointer_rtx;
3877 frame_base_offset = SPARC_STACK_BIAS;
3880 if (actual_fsize == 0)
3881 /* do nothing. */ ;
3882 else if (sparc_leaf_function_p)
3884 if (actual_fsize <= 4096)
3885 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
3886 else if (actual_fsize <= 8192)
3888 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
3889 /* %sp is still the CFA register. */
3890 RTX_FRAME_RELATED_P (insn) = 1;
3891 insn
3892 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3894 else
3896 rtx reg = gen_rtx_REG (Pmode, 1);
3897 emit_move_insn (reg, GEN_INT (-actual_fsize));
3898 insn = emit_insn (gen_stack_pointer_inc (reg));
3899 REG_NOTES (insn) =
3900 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3901 PATTERN (gen_stack_pointer_inc (GEN_INT (-actual_fsize))),
3902 REG_NOTES (insn));
3905 RTX_FRAME_RELATED_P (insn) = 1;
3907 else
3909 if (actual_fsize <= 4096)
3910 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
3911 else if (actual_fsize <= 8192)
3913 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
3914 /* %sp is not the CFA register anymore. */
3915 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3917 else
3919 rtx reg = gen_rtx_REG (Pmode, 1);
3920 emit_move_insn (reg, GEN_INT (-actual_fsize));
3921 insn = emit_insn (gen_save_register_window (reg));
3924 RTX_FRAME_RELATED_P (insn) = 1;
3925 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
3926 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
3929 if (num_gfregs)
3930 emit_save_or_restore_regs (SORR_SAVE);
3932 /* Load the PIC register if needed. */
3933 if (flag_pic && current_function_uses_pic_offset_table)
3934 load_pic_register (false);
3937 /* This function generates the assembly code for function entry, which boils
3938 down to emitting the necessary .register directives. */
3940 static void
3941 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3943 /* Check that the assumption we made in sparc_expand_prologue is valid. */
3944 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
3946 sparc_output_scratch_registers (file);
3949 /* Expand the function epilogue, either normal or part of a sibcall.
3950 We emit all the instructions except the return or the call. */
3952 void
3953 sparc_expand_epilogue (void)
3955 if (num_gfregs)
3956 emit_save_or_restore_regs (SORR_RESTORE);
3958 if (actual_fsize == 0)
3959 /* do nothing. */ ;
3960 else if (sparc_leaf_function_p)
3962 if (actual_fsize <= 4096)
3963 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
3964 else if (actual_fsize <= 8192)
3966 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
3967 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
3969 else
3971 rtx reg = gen_rtx_REG (Pmode, 1);
3972 emit_move_insn (reg, GEN_INT (-actual_fsize));
3973 emit_insn (gen_stack_pointer_dec (reg));
3978 /* Return true if it is appropriate to emit `return' instructions in the
3979 body of a function. */
3981 bool
3982 sparc_can_use_return_insn_p (void)
3984 return sparc_prologue_data_valid_p
3985 && (actual_fsize == 0 || !sparc_leaf_function_p);
3988 /* This function generates the assembly code for function exit. */
3990 static void
3991 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3993 /* If code does not drop into the epilogue, we have to still output
3994 a dummy nop for the sake of sane backtraces. Otherwise, if the
3995 last two instructions of a function were "call foo; dslot;" this
3996 can make the return PC of foo (i.e. address of call instruction
3997 plus 8) point to the first instruction in the next function. */
3999 rtx insn, last_real_insn;
4001 insn = get_last_insn ();
4003 last_real_insn = prev_real_insn (insn);
4004 if (last_real_insn
4005 && GET_CODE (last_real_insn) == INSN
4006 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4007 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4009 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4010 fputs("\tnop\n", file);
4012 sparc_output_deferred_case_vectors ();
4015 /* Output a 'restore' instruction. */
4017 static void
4018 output_restore (rtx pat)
4020 rtx operands[3];
4022 if (! pat)
4024 fputs ("\t restore\n", asm_out_file);
4025 return;
4028 gcc_assert (GET_CODE (pat) == SET);
4030 operands[0] = SET_DEST (pat);
4031 pat = SET_SRC (pat);
4033 switch (GET_CODE (pat))
4035 case PLUS:
4036 operands[1] = XEXP (pat, 0);
4037 operands[2] = XEXP (pat, 1);
4038 output_asm_insn (" restore %r1, %2, %Y0", operands);
4039 break;
4040 case LO_SUM:
4041 operands[1] = XEXP (pat, 0);
4042 operands[2] = XEXP (pat, 1);
4043 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4044 break;
4045 case ASHIFT:
4046 operands[1] = XEXP (pat, 0);
4047 gcc_assert (XEXP (pat, 1) == const1_rtx);
4048 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4049 break;
4050 default:
4051 operands[1] = pat;
4052 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4053 break;
4057 /* Output a return. */
4059 const char *
4060 output_return (rtx insn)
4062 if (sparc_leaf_function_p)
4064 /* This is a leaf function so we don't have to bother restoring the
4065 register window, which frees us from dealing with the convoluted
4066 semantics of restore/return. We simply output the jump to the
4067 return address and the insn in the delay slot (if any). */
4069 gcc_assert (! current_function_calls_eh_return);
4071 return "jmp\t%%o7+%)%#";
4073 else
4075 /* This is a regular function so we have to restore the register window.
4076 We may have a pending insn for the delay slot, which will be either
4077 combined with the 'restore' instruction or put in the delay slot of
4078 the 'return' instruction. */
4080 if (current_function_calls_eh_return)
4082 /* If the function uses __builtin_eh_return, the eh_return
4083 machinery occupies the delay slot. */
4084 gcc_assert (! final_sequence);
4086 if (! flag_delayed_branch)
4087 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4089 if (TARGET_V9)
4090 fputs ("\treturn\t%i7+8\n", asm_out_file);
4091 else
4092 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4094 if (flag_delayed_branch)
4095 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4096 else
4097 fputs ("\t nop\n", asm_out_file);
4099 else if (final_sequence)
4101 rtx delay, pat;
4103 delay = NEXT_INSN (insn);
4104 gcc_assert (delay);
4106 pat = PATTERN (delay);
4108 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4110 epilogue_renumber (&pat, 0);
4111 return "return\t%%i7+%)%#";
4113 else
4115 output_asm_insn ("jmp\t%%i7+%)", NULL);
4116 output_restore (pat);
4117 PATTERN (delay) = gen_blockage ();
4118 INSN_CODE (delay) = -1;
4121 else
4123 /* The delay slot is empty. */
4124 if (TARGET_V9)
4125 return "return\t%%i7+%)\n\t nop";
4126 else if (flag_delayed_branch)
4127 return "jmp\t%%i7+%)\n\t restore";
4128 else
4129 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4133 return "";
4136 /* Output a sibling call. */
4138 const char *
4139 output_sibcall (rtx insn, rtx call_operand)
4141 rtx operands[1];
4143 gcc_assert (flag_delayed_branch);
4145 operands[0] = call_operand;
4147 if (sparc_leaf_function_p)
4149 /* This is a leaf function so we don't have to bother restoring the
4150 register window. We simply output the jump to the function and
4151 the insn in the delay slot (if any). */
4153 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4155 if (final_sequence)
4156 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4157 operands);
4158 else
4159 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4160 it into branch if possible. */
4161 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4162 operands);
4164 else
4166 /* This is a regular function so we have to restore the register window.
4167 We may have a pending insn for the delay slot, which will be combined
4168 with the 'restore' instruction. */
4170 output_asm_insn ("call\t%a0, 0", operands);
4172 if (final_sequence)
4174 rtx delay = NEXT_INSN (insn);
4175 gcc_assert (delay);
4177 output_restore (PATTERN (delay));
4179 PATTERN (delay) = gen_blockage ();
4180 INSN_CODE (delay) = -1;
4182 else
4183 output_restore (NULL_RTX);
4186 return "";
4189 /* Functions for handling argument passing.
4191 For 32-bit, the first 6 args are normally in registers and the rest are
4192 pushed. Any arg that starts within the first 6 words is at least
4193 partially passed in a register unless its data type forbids.
4195 For 64-bit, the argument registers are laid out as an array of 16 elements
4196 and arguments are added sequentially. The first 6 int args and up to the
4197 first 16 fp args (depending on size) are passed in regs.
4199 Slot Stack Integral Float Float in structure Double Long Double
4200 ---- ----- -------- ----- ------------------ ------ -----------
4201 15 [SP+248] %f31 %f30,%f31 %d30
4202 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4203 13 [SP+232] %f27 %f26,%f27 %d26
4204 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4205 11 [SP+216] %f23 %f22,%f23 %d22
4206 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4207 9 [SP+200] %f19 %f18,%f19 %d18
4208 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4209 7 [SP+184] %f15 %f14,%f15 %d14
4210 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4211 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4212 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4213 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4214 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4215 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4216 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4218 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4220 Integral arguments are always passed as 64-bit quantities appropriately
4221 extended.
4223 Passing of floating point values is handled as follows.
4224 If a prototype is in scope:
4225 If the value is in a named argument (i.e. not a stdarg function or a
4226 value not part of the `...') then the value is passed in the appropriate
4227 fp reg.
4228 If the value is part of the `...' and is passed in one of the first 6
4229 slots then the value is passed in the appropriate int reg.
4230 If the value is part of the `...' and is not passed in one of the first 6
4231 slots then the value is passed in memory.
4232 If a prototype is not in scope:
4233 If the value is one of the first 6 arguments the value is passed in the
4234 appropriate integer reg and the appropriate fp reg.
4235 If the value is not one of the first 6 arguments the value is passed in
4236 the appropriate fp reg and in memory.
4239 Summary of the calling conventions implemented by GCC on SPARC:
4241 32-bit ABI:
4242 size argument return value
4244 small integer <4 int. reg. int. reg.
4245 word 4 int. reg. int. reg.
4246 double word 8 int. reg. int. reg.
4248 _Complex small integer <8 int. reg. int. reg.
4249 _Complex word 8 int. reg. int. reg.
4250 _Complex double word 16 memory int. reg.
4252 vector integer <=8 int. reg. FP reg.
4253 vector integer >8 memory memory
4255 float 4 int. reg. FP reg.
4256 double 8 int. reg. FP reg.
4257 long double 16 memory memory
4259 _Complex float 8 memory FP reg.
4260 _Complex double 16 memory FP reg.
4261 _Complex long double 32 memory FP reg.
4263 vector float any memory memory
4265 aggregate any memory memory
4269 64-bit ABI:
4270 size argument return value
4272 small integer <8 int. reg. int. reg.
4273 word 8 int. reg. int. reg.
4274 double word 16 int. reg. int. reg.
4276 _Complex small integer <16 int. reg. int. reg.
4277 _Complex word 16 int. reg. int. reg.
4278 _Complex double word 32 memory int. reg.
4280 vector integer <=16 FP reg. FP reg.
4281 vector integer 16<s<=32 memory FP reg.
4282 vector integer >32 memory memory
4284 float 4 FP reg. FP reg.
4285 double 8 FP reg. FP reg.
4286 long double 16 FP reg. FP reg.
4288 _Complex float 8 FP reg. FP reg.
4289 _Complex double 16 FP reg. FP reg.
4290 _Complex long double 32 memory FP reg.
4292 vector float <=16 FP reg. FP reg.
4293 vector float 16<s<=32 memory FP reg.
4294 vector float >32 memory memory
4296 aggregate <=16 reg. reg.
4297 aggregate 16<s<=32 memory reg.
4298 aggregate >32 memory memory
4302 Note #1: complex floating-point types follow the extended SPARC ABIs as
4303 implemented by the Sun compiler.
4305 Note #2: integral vector types follow the scalar floating-point types
4306 conventions to match what is implemented by the Sun VIS SDK.
4308 Note #3: floating-point vector types follow the aggregate types
4309 conventions. */
4312 /* Maximum number of int regs for args. */
4313 #define SPARC_INT_ARG_MAX 6
4314 /* Maximum number of fp regs for args. */
4315 #define SPARC_FP_ARG_MAX 16
4317 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4319 /* Handle the INIT_CUMULATIVE_ARGS macro.
4320 Initialize a variable CUM of type CUMULATIVE_ARGS
4321 for a call to a function whose data type is FNTYPE.
4322 For a library call, FNTYPE is 0. */
4324 void
4325 init_cumulative_args (struct sparc_args *cum, tree fntype,
4326 rtx libname ATTRIBUTE_UNUSED,
4327 tree fndecl ATTRIBUTE_UNUSED)
4329 cum->words = 0;
4330 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4331 cum->libcall_p = fntype == 0;
4334 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4335 When a prototype says `char' or `short', really pass an `int'. */
4337 static bool
4338 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
4340 return TARGET_ARCH32 ? true : false;
4343 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4345 static bool
4346 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4348 return TARGET_ARCH64 ? true : false;
4351 /* Scan the record type TYPE and return the following predicates:
4352 - INTREGS_P: the record contains at least one field or sub-field
4353 that is eligible for promotion in integer registers.
4354 - FP_REGS_P: the record contains at least one field or sub-field
4355 that is eligible for promotion in floating-point registers.
4356 - PACKED_P: the record contains at least one field that is packed.
4358 Sub-fields are not taken into account for the PACKED_P predicate. */
4360 static void
4361 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4363 tree field;
4365 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4367 if (TREE_CODE (field) == FIELD_DECL)
4369 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4370 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4371 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4372 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4373 && TARGET_FPU)
4374 *fpregs_p = 1;
4375 else
4376 *intregs_p = 1;
4378 if (packed_p && DECL_PACKED (field))
4379 *packed_p = 1;
4384 /* Compute the slot number to pass an argument in.
4385 Return the slot number or -1 if passing on the stack.
4387 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4388 the preceding args and about the function being called.
4389 MODE is the argument's machine mode.
4390 TYPE is the data type of the argument (as a tree).
4391 This is null for libcalls where that information may
4392 not be available.
4393 NAMED is nonzero if this argument is a named parameter
4394 (otherwise it is an extra parameter matching an ellipsis).
4395 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4396 *PREGNO records the register number to use if scalar type.
4397 *PPADDING records the amount of padding needed in words. */
4399 static int
4400 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4401 tree type, int named, int incoming_p,
4402 int *pregno, int *ppadding)
4404 int regbase = (incoming_p
4405 ? SPARC_INCOMING_INT_ARG_FIRST
4406 : SPARC_OUTGOING_INT_ARG_FIRST);
4407 int slotno = cum->words;
4408 enum mode_class mclass;
4409 int regno;
4411 *ppadding = 0;
4413 if (type && TREE_ADDRESSABLE (type))
4414 return -1;
4416 if (TARGET_ARCH32
4417 && mode == BLKmode
4418 && type
4419 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4420 return -1;
4422 /* For SPARC64, objects requiring 16-byte alignment get it. */
4423 if (TARGET_ARCH64
4424 && GET_MODE_ALIGNMENT (mode) >= 2 * BITS_PER_WORD
4425 && (slotno & 1) != 0)
4426 slotno++, *ppadding = 1;
4428 mclass = GET_MODE_CLASS (mode);
4429 if (type && TREE_CODE (type) == VECTOR_TYPE)
4431 /* Vector types deserve special treatment because they are
4432 polymorphic wrt their mode, depending upon whether VIS
4433 instructions are enabled. */
4434 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4436 /* The SPARC port defines no floating-point vector modes. */
4437 gcc_assert (mode == BLKmode);
4439 else
4441 /* Integral vector types should either have a vector
4442 mode or an integral mode, because we are guaranteed
4443 by pass_by_reference that their size is not greater
4444 than 16 bytes and TImode is 16-byte wide. */
4445 gcc_assert (mode != BLKmode);
4447 /* Vector integers are handled like floats according to
4448 the Sun VIS SDK. */
4449 mclass = MODE_FLOAT;
4453 switch (mclass)
4455 case MODE_FLOAT:
4456 case MODE_COMPLEX_FLOAT:
4457 if (TARGET_ARCH64 && TARGET_FPU && named)
4459 if (slotno >= SPARC_FP_ARG_MAX)
4460 return -1;
4461 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4462 /* Arguments filling only one single FP register are
4463 right-justified in the outer double FP register. */
4464 if (GET_MODE_SIZE (mode) <= 4)
4465 regno++;
4466 break;
4468 /* fallthrough */
4470 case MODE_INT:
4471 case MODE_COMPLEX_INT:
4472 if (slotno >= SPARC_INT_ARG_MAX)
4473 return -1;
4474 regno = regbase + slotno;
4475 break;
4477 case MODE_RANDOM:
4478 if (mode == VOIDmode)
4479 /* MODE is VOIDmode when generating the actual call. */
4480 return -1;
4482 gcc_assert (mode == BLKmode);
4484 /* For SPARC64, objects requiring 16-byte alignment get it. */
4485 if (TARGET_ARCH64
4486 && type
4487 && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
4488 && (slotno & 1) != 0)
4489 slotno++, *ppadding = 1;
4491 if (TARGET_ARCH32 || !type || (TREE_CODE (type) == UNION_TYPE))
4493 if (slotno >= SPARC_INT_ARG_MAX)
4494 return -1;
4495 regno = regbase + slotno;
4497 else /* TARGET_ARCH64 && type */
4499 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4501 /* First see what kinds of registers we would need. */
4502 if (TREE_CODE (type) == VECTOR_TYPE)
4503 fpregs_p = 1;
4504 else
4505 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4507 /* The ABI obviously doesn't specify how packed structures
4508 are passed. These are defined to be passed in int regs
4509 if possible, otherwise memory. */
4510 if (packed_p || !named)
4511 fpregs_p = 0, intregs_p = 1;
4513 /* If all arg slots are filled, then must pass on stack. */
4514 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4515 return -1;
4517 /* If there are only int args and all int arg slots are filled,
4518 then must pass on stack. */
4519 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4520 return -1;
4522 /* Note that even if all int arg slots are filled, fp members may
4523 still be passed in regs if such regs are available.
4524 *PREGNO isn't set because there may be more than one, it's up
4525 to the caller to compute them. */
4526 return slotno;
4528 break;
4530 default :
4531 gcc_unreachable ();
4534 *pregno = regno;
4535 return slotno;
4538 /* Handle recursive register counting for structure field layout. */
4540 struct function_arg_record_value_parms
4542 rtx ret; /* return expression being built. */
4543 int slotno; /* slot number of the argument. */
4544 int named; /* whether the argument is named. */
4545 int regbase; /* regno of the base register. */
4546 int stack; /* 1 if part of the argument is on the stack. */
4547 int intoffset; /* offset of the first pending integer field. */
4548 unsigned int nregs; /* number of words passed in registers. */
4551 static void function_arg_record_value_3
4552 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4553 static void function_arg_record_value_2
4554 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4555 static void function_arg_record_value_1
4556 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4557 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
4558 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4560 /* A subroutine of function_arg_record_value. Traverse the structure
4561 recursively and determine how many registers will be required. */
4563 static void
4564 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
4565 struct function_arg_record_value_parms *parms,
4566 bool packed_p)
4568 tree field;
4570 /* We need to compute how many registers are needed so we can
4571 allocate the PARALLEL but before we can do that we need to know
4572 whether there are any packed fields. The ABI obviously doesn't
4573 specify how structures are passed in this case, so they are
4574 defined to be passed in int regs if possible, otherwise memory,
4575 regardless of whether there are fp values present. */
4577 if (! packed_p)
4578 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4580 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4582 packed_p = true;
4583 break;
4587 /* Compute how many registers we need. */
4588 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4590 if (TREE_CODE (field) == FIELD_DECL)
4592 HOST_WIDE_INT bitpos = startbitpos;
4594 if (DECL_SIZE (field) != 0)
4596 if (integer_zerop (DECL_SIZE (field)))
4597 continue;
4599 if (host_integerp (bit_position (field), 1))
4600 bitpos += int_bit_position (field);
4603 /* ??? FIXME: else assume zero offset. */
4605 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4606 function_arg_record_value_1 (TREE_TYPE (field),
4607 bitpos,
4608 parms,
4609 packed_p);
4610 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4611 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4612 && TARGET_FPU
4613 && parms->named
4614 && ! packed_p)
4616 if (parms->intoffset != -1)
4618 unsigned int startbit, endbit;
4619 int intslots, this_slotno;
4621 startbit = parms->intoffset & -BITS_PER_WORD;
4622 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4624 intslots = (endbit - startbit) / BITS_PER_WORD;
4625 this_slotno = parms->slotno + parms->intoffset
4626 / BITS_PER_WORD;
4628 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4630 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4631 /* We need to pass this field on the stack. */
4632 parms->stack = 1;
4635 parms->nregs += intslots;
4636 parms->intoffset = -1;
4639 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4640 If it wasn't true we wouldn't be here. */
4641 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4642 && DECL_MODE (field) == BLKmode)
4643 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4644 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4645 parms->nregs += 2;
4646 else
4647 parms->nregs += 1;
4649 else
4651 if (parms->intoffset == -1)
4652 parms->intoffset = bitpos;
4658 /* A subroutine of function_arg_record_value. Assign the bits of the
4659 structure between parms->intoffset and bitpos to integer registers. */
4661 static void
4662 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4663 struct function_arg_record_value_parms *parms)
4665 enum machine_mode mode;
4666 unsigned int regno;
4667 unsigned int startbit, endbit;
4668 int this_slotno, intslots, intoffset;
4669 rtx reg;
4671 if (parms->intoffset == -1)
4672 return;
4674 intoffset = parms->intoffset;
4675 parms->intoffset = -1;
4677 startbit = intoffset & -BITS_PER_WORD;
4678 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4679 intslots = (endbit - startbit) / BITS_PER_WORD;
4680 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4682 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4683 if (intslots <= 0)
4684 return;
4686 /* If this is the trailing part of a word, only load that much into
4687 the register. Otherwise load the whole register. Note that in
4688 the latter case we may pick up unwanted bits. It's not a problem
4689 at the moment but may wish to revisit. */
4691 if (intoffset % BITS_PER_WORD != 0)
4692 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4693 MODE_INT);
4694 else
4695 mode = word_mode;
4697 intoffset /= BITS_PER_UNIT;
4700 regno = parms->regbase + this_slotno;
4701 reg = gen_rtx_REG (mode, regno);
4702 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4703 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4705 this_slotno += 1;
4706 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4707 mode = word_mode;
4708 parms->nregs += 1;
4709 intslots -= 1;
4711 while (intslots > 0);
4714 /* A subroutine of function_arg_record_value. Traverse the structure
4715 recursively and assign bits to floating point registers. Track which
4716 bits in between need integer registers; invoke function_arg_record_value_3
4717 to make that happen. */
4719 static void
4720 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
4721 struct function_arg_record_value_parms *parms,
4722 bool packed_p)
4724 tree field;
4726 if (! packed_p)
4727 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4729 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4731 packed_p = true;
4732 break;
4736 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4738 if (TREE_CODE (field) == FIELD_DECL)
4740 HOST_WIDE_INT bitpos = startbitpos;
4742 if (DECL_SIZE (field) != 0)
4744 if (integer_zerop (DECL_SIZE (field)))
4745 continue;
4747 if (host_integerp (bit_position (field), 1))
4748 bitpos += int_bit_position (field);
4751 /* ??? FIXME: else assume zero offset. */
4753 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4754 function_arg_record_value_2 (TREE_TYPE (field),
4755 bitpos,
4756 parms,
4757 packed_p);
4758 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4759 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4760 && TARGET_FPU
4761 && parms->named
4762 && ! packed_p)
4764 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4765 int regno, nregs, pos;
4766 enum machine_mode mode = DECL_MODE (field);
4767 rtx reg;
4769 function_arg_record_value_3 (bitpos, parms);
4771 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4772 && mode == BLKmode)
4774 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4775 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4777 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4779 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4780 nregs = 2;
4782 else
4783 nregs = 1;
4785 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4786 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4787 regno++;
4788 reg = gen_rtx_REG (mode, regno);
4789 pos = bitpos / BITS_PER_UNIT;
4790 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4791 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4792 parms->nregs += 1;
4793 while (--nregs > 0)
4795 regno += GET_MODE_SIZE (mode) / 4;
4796 reg = gen_rtx_REG (mode, regno);
4797 pos += GET_MODE_SIZE (mode);
4798 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4799 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4800 parms->nregs += 1;
4803 else
4805 if (parms->intoffset == -1)
4806 parms->intoffset = bitpos;
4812 /* Used by function_arg and function_value to implement the complex
4813 conventions of the 64-bit ABI for passing and returning structures.
4814 Return an expression valid as a return value for the two macros
4815 FUNCTION_ARG and FUNCTION_VALUE.
4817 TYPE is the data type of the argument (as a tree).
4818 This is null for libcalls where that information may
4819 not be available.
4820 MODE is the argument's machine mode.
4821 SLOTNO is the index number of the argument's slot in the parameter array.
4822 NAMED is nonzero if this argument is a named parameter
4823 (otherwise it is an extra parameter matching an ellipsis).
4824 REGBASE is the regno of the base register for the parameter array. */
4826 static rtx
4827 function_arg_record_value (tree type, enum machine_mode mode,
4828 int slotno, int named, int regbase)
4830 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4831 struct function_arg_record_value_parms parms;
4832 unsigned int nregs;
4834 parms.ret = NULL_RTX;
4835 parms.slotno = slotno;
4836 parms.named = named;
4837 parms.regbase = regbase;
4838 parms.stack = 0;
4840 /* Compute how many registers we need. */
4841 parms.nregs = 0;
4842 parms.intoffset = 0;
4843 function_arg_record_value_1 (type, 0, &parms, false);
4845 /* Take into account pending integer fields. */
4846 if (parms.intoffset != -1)
4848 unsigned int startbit, endbit;
4849 int intslots, this_slotno;
4851 startbit = parms.intoffset & -BITS_PER_WORD;
4852 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4853 intslots = (endbit - startbit) / BITS_PER_WORD;
4854 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4856 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4858 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4859 /* We need to pass this field on the stack. */
4860 parms.stack = 1;
4863 parms.nregs += intslots;
4865 nregs = parms.nregs;
4867 /* Allocate the vector and handle some annoying special cases. */
4868 if (nregs == 0)
4870 /* ??? Empty structure has no value? Duh? */
4871 if (typesize <= 0)
4873 /* Though there's nothing really to store, return a word register
4874 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
4875 leads to breakage due to the fact that there are zero bytes to
4876 load. */
4877 return gen_rtx_REG (mode, regbase);
4879 else
4881 /* ??? C++ has structures with no fields, and yet a size. Give up
4882 for now and pass everything back in integer registers. */
4883 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4885 if (nregs + slotno > SPARC_INT_ARG_MAX)
4886 nregs = SPARC_INT_ARG_MAX - slotno;
4888 gcc_assert (nregs != 0);
4890 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
4892 /* If at least one field must be passed on the stack, generate
4893 (parallel [(expr_list (nil) ...) ...]) so that all fields will
4894 also be passed on the stack. We can't do much better because the
4895 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
4896 of structures for which the fields passed exclusively in registers
4897 are not at the beginning of the structure. */
4898 if (parms.stack)
4899 XVECEXP (parms.ret, 0, 0)
4900 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
4902 /* Fill in the entries. */
4903 parms.nregs = 0;
4904 parms.intoffset = 0;
4905 function_arg_record_value_2 (type, 0, &parms, false);
4906 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
4908 gcc_assert (parms.nregs == nregs);
4910 return parms.ret;
4913 /* Used by function_arg and function_value to implement the conventions
4914 of the 64-bit ABI for passing and returning unions.
4915 Return an expression valid as a return value for the two macros
4916 FUNCTION_ARG and FUNCTION_VALUE.
4918 SIZE is the size in bytes of the union.
4919 MODE is the argument's machine mode.
4920 REGNO is the hard register the union will be passed in. */
4922 static rtx
4923 function_arg_union_value (int size, enum machine_mode mode, int slotno,
4924 int regno)
4926 int nwords = ROUND_ADVANCE (size), i;
4927 rtx regs;
4929 /* See comment in previous function for empty structures. */
4930 if (nwords == 0)
4931 return gen_rtx_REG (mode, regno);
4933 if (slotno == SPARC_INT_ARG_MAX - 1)
4934 nwords = 1;
4936 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
4938 for (i = 0; i < nwords; i++)
4940 /* Unions are passed left-justified. */
4941 XVECEXP (regs, 0, i)
4942 = gen_rtx_EXPR_LIST (VOIDmode,
4943 gen_rtx_REG (word_mode, regno),
4944 GEN_INT (UNITS_PER_WORD * i));
4945 regno++;
4948 return regs;
4951 /* Used by function_arg and function_value to implement the conventions
4952 for passing and returning large (BLKmode) vectors.
4953 Return an expression valid as a return value for the two macros
4954 FUNCTION_ARG and FUNCTION_VALUE.
4956 SIZE is the size in bytes of the vector.
4957 BASE_MODE is the argument's base machine mode.
4958 REGNO is the FP hard register the vector will be passed in. */
4960 static rtx
4961 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
4963 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
4964 int nregs = size / base_mode_size, i;
4965 rtx regs;
4967 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
4969 for (i = 0; i < nregs; i++)
4971 XVECEXP (regs, 0, i)
4972 = gen_rtx_EXPR_LIST (VOIDmode,
4973 gen_rtx_REG (base_mode, regno),
4974 GEN_INT (base_mode_size * i));
4975 regno += base_mode_size / 4;
4978 return regs;
4981 /* Handle the FUNCTION_ARG macro.
4982 Determine where to put an argument to a function.
4983 Value is zero to push the argument on the stack,
4984 or a hard register in which to store the argument.
4986 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4987 the preceding args and about the function being called.
4988 MODE is the argument's machine mode.
4989 TYPE is the data type of the argument (as a tree).
4990 This is null for libcalls where that information may
4991 not be available.
4992 NAMED is nonzero if this argument is a named parameter
4993 (otherwise it is an extra parameter matching an ellipsis).
4994 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
4997 function_arg (const struct sparc_args *cum, enum machine_mode mode,
4998 tree type, int named, int incoming_p)
5000 int regbase = (incoming_p
5001 ? SPARC_INCOMING_INT_ARG_FIRST
5002 : SPARC_OUTGOING_INT_ARG_FIRST);
5003 int slotno, regno, padding;
5004 enum mode_class mclass = GET_MODE_CLASS (mode);
5005 rtx reg;
5007 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5008 &regno, &padding);
5010 if (slotno == -1)
5011 return 0;
5013 if (TARGET_ARCH32)
5015 reg = gen_rtx_REG (mode, regno);
5016 return reg;
5019 if (type && TREE_CODE (type) == RECORD_TYPE)
5021 /* Structures up to 16 bytes in size are passed in arg slots on the
5022 stack and are promoted to registers where possible. */
5024 gcc_assert (int_size_in_bytes (type) <= 16);
5026 return function_arg_record_value (type, mode, slotno, named, regbase);
5028 else if (type && TREE_CODE (type) == UNION_TYPE)
5030 HOST_WIDE_INT size = int_size_in_bytes (type);
5032 gcc_assert (size <= 16);
5034 return function_arg_union_value (size, mode, slotno, regno);
5036 else if (type && TREE_CODE (type) == VECTOR_TYPE)
5038 /* Vector types deserve special treatment because they are
5039 polymorphic wrt their mode, depending upon whether VIS
5040 instructions are enabled. */
5041 HOST_WIDE_INT size = int_size_in_bytes (type);
5043 gcc_assert (size <= 16);
5045 if (mode == BLKmode)
5046 return function_arg_vector_value (size,
5047 TYPE_MODE (TREE_TYPE (type)),
5048 SPARC_FP_ARG_FIRST + 2*slotno);
5049 else
5050 mclass = MODE_FLOAT;
5053 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5054 but also have the slot allocated for them.
5055 If no prototype is in scope fp values in register slots get passed
5056 in two places, either fp regs and int regs or fp regs and memory. */
5057 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5058 && SPARC_FP_REG_P (regno))
5060 reg = gen_rtx_REG (mode, regno);
5061 if (cum->prototype_p || cum->libcall_p)
5063 /* "* 2" because fp reg numbers are recorded in 4 byte
5064 quantities. */
5065 #if 0
5066 /* ??? This will cause the value to be passed in the fp reg and
5067 in the stack. When a prototype exists we want to pass the
5068 value in the reg but reserve space on the stack. That's an
5069 optimization, and is deferred [for a bit]. */
5070 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5071 return gen_rtx_PARALLEL (mode,
5072 gen_rtvec (2,
5073 gen_rtx_EXPR_LIST (VOIDmode,
5074 NULL_RTX, const0_rtx),
5075 gen_rtx_EXPR_LIST (VOIDmode,
5076 reg, const0_rtx)));
5077 else
5078 #else
5079 /* ??? It seems that passing back a register even when past
5080 the area declared by REG_PARM_STACK_SPACE will allocate
5081 space appropriately, and will not copy the data onto the
5082 stack, exactly as we desire.
5084 This is due to locate_and_pad_parm being called in
5085 expand_call whenever reg_parm_stack_space > 0, which
5086 while beneficial to our example here, would seem to be
5087 in error from what had been intended. Ho hum... -- r~ */
5088 #endif
5089 return reg;
5091 else
5093 rtx v0, v1;
5095 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5097 int intreg;
5099 /* On incoming, we don't need to know that the value
5100 is passed in %f0 and %i0, and it confuses other parts
5101 causing needless spillage even on the simplest cases. */
5102 if (incoming_p)
5103 return reg;
5105 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5106 + (regno - SPARC_FP_ARG_FIRST) / 2);
5108 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5109 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5110 const0_rtx);
5111 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5113 else
5115 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5116 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5117 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5121 else
5123 /* Scalar or complex int. */
5124 reg = gen_rtx_REG (mode, regno);
5127 return reg;
5130 /* For an arg passed partly in registers and partly in memory,
5131 this is the number of bytes of registers used.
5132 For args passed entirely in registers or entirely in memory, zero.
5134 Any arg that starts in the first 6 regs but won't entirely fit in them
5135 needs partial registers on v8. On v9, structures with integer
5136 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5137 values that begin in the last fp reg [where "last fp reg" varies with the
5138 mode] will be split between that reg and memory. */
5140 static int
5141 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5142 tree type, bool named)
5144 int slotno, regno, padding;
5146 /* We pass 0 for incoming_p here, it doesn't matter. */
5147 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5149 if (slotno == -1)
5150 return 0;
5152 if (TARGET_ARCH32)
5154 if ((slotno + (mode == BLKmode
5155 ? ROUND_ADVANCE (int_size_in_bytes (type))
5156 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5157 > SPARC_INT_ARG_MAX)
5158 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5160 else
5162 /* We are guaranteed by pass_by_reference that the size of the
5163 argument is not greater than 16 bytes, so we only need to return
5164 one word if the argument is partially passed in registers. */
5166 if (type && AGGREGATE_TYPE_P (type))
5168 int size = int_size_in_bytes (type);
5170 if (size > UNITS_PER_WORD
5171 && slotno == SPARC_INT_ARG_MAX - 1)
5172 return UNITS_PER_WORD;
5174 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5175 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5176 && ! (TARGET_FPU && named)))
5178 /* The complex types are passed as packed types. */
5179 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5180 && slotno == SPARC_INT_ARG_MAX - 1)
5181 return UNITS_PER_WORD;
5183 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5185 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5186 > SPARC_FP_ARG_MAX)
5187 return UNITS_PER_WORD;
5191 return 0;
5194 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5195 Specify whether to pass the argument by reference. */
5197 static bool
5198 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5199 enum machine_mode mode, tree type,
5200 bool named ATTRIBUTE_UNUSED)
5202 if (TARGET_ARCH32)
5204 /* Original SPARC 32-bit ABI says that structures and unions,
5205 and quad-precision floats are passed by reference. For Pascal,
5206 also pass arrays by reference. All other base types are passed
5207 in registers.
5209 Extended ABI (as implemented by the Sun compiler) says that all
5210 complex floats are passed by reference. Pass complex integers
5211 in registers up to 8 bytes. More generally, enforce the 2-word
5212 cap for passing arguments in registers.
5214 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5215 integers are passed like floats of the same size, that is in
5216 registers up to 8 bytes. Pass all vector floats by reference
5217 like structure and unions. */
5218 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5219 || mode == SCmode
5220 /* Catch CDImode, TFmode, DCmode and TCmode. */
5221 || GET_MODE_SIZE (mode) > 8
5222 || (type
5223 && TREE_CODE (type) == VECTOR_TYPE
5224 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5226 else
5228 /* Original SPARC 64-bit ABI says that structures and unions
5229 smaller than 16 bytes are passed in registers, as well as
5230 all other base types. For Pascal, pass arrays by reference.
5232 Extended ABI (as implemented by the Sun compiler) says that
5233 complex floats are passed in registers up to 16 bytes. Pass
5234 all complex integers in registers up to 16 bytes. More generally,
5235 enforce the 2-word cap for passing arguments in registers.
5237 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5238 integers are passed like floats of the same size, that is in
5239 registers (up to 16 bytes). Pass all vector floats like structure
5240 and unions. */
5241 return ((type && TREE_CODE (type) == ARRAY_TYPE)
5242 || (type
5243 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5244 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5245 /* Catch CTImode and TCmode. */
5246 || GET_MODE_SIZE (mode) > 16);
5250 /* Handle the FUNCTION_ARG_ADVANCE macro.
5251 Update the data in CUM to advance over an argument
5252 of mode MODE and data type TYPE.
5253 TYPE is null for libcalls where that information may not be available. */
5255 void
5256 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5257 tree type, int named)
5259 int slotno, regno, padding;
5261 /* We pass 0 for incoming_p here, it doesn't matter. */
5262 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5264 /* If register required leading padding, add it. */
5265 if (slotno != -1)
5266 cum->words += padding;
5268 if (TARGET_ARCH32)
5270 cum->words += (mode != BLKmode
5271 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5272 : ROUND_ADVANCE (int_size_in_bytes (type)));
5274 else
5276 if (type && AGGREGATE_TYPE_P (type))
5278 int size = int_size_in_bytes (type);
5280 if (size <= 8)
5281 ++cum->words;
5282 else if (size <= 16)
5283 cum->words += 2;
5284 else /* passed by reference */
5285 ++cum->words;
5287 else
5289 cum->words += (mode != BLKmode
5290 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5291 : ROUND_ADVANCE (int_size_in_bytes (type)));
5296 /* Handle the FUNCTION_ARG_PADDING macro.
5297 For the 64 bit ABI structs are always stored left shifted in their
5298 argument slot. */
5300 enum direction
5301 function_arg_padding (enum machine_mode mode, tree type)
5303 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5304 return upward;
5306 /* Fall back to the default. */
5307 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5310 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5311 Specify whether to return the return value in memory. */
5313 static bool
5314 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5316 if (TARGET_ARCH32)
5317 /* Original SPARC 32-bit ABI says that structures and unions,
5318 and quad-precision floats are returned in memory. All other
5319 base types are returned in registers.
5321 Extended ABI (as implemented by the Sun compiler) says that
5322 all complex floats are returned in registers (8 FP registers
5323 at most for '_Complex long double'). Return all complex integers
5324 in registers (4 at most for '_Complex long long').
5326 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5327 integers are returned like floats of the same size, that is in
5328 registers up to 8 bytes and in memory otherwise. Return all
5329 vector floats in memory like structure and unions; note that
5330 they always have BLKmode like the latter. */
5331 return (TYPE_MODE (type) == BLKmode
5332 || TYPE_MODE (type) == TFmode
5333 || (TREE_CODE (type) == VECTOR_TYPE
5334 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5335 else
5336 /* Original SPARC 64-bit ABI says that structures and unions
5337 smaller than 32 bytes are returned in registers, as well as
5338 all other base types.
5340 Extended ABI (as implemented by the Sun compiler) says that all
5341 complex floats are returned in registers (8 FP registers at most
5342 for '_Complex long double'). Return all complex integers in
5343 registers (4 at most for '_Complex TItype').
5345 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5346 integers are returned like floats of the same size, that is in
5347 registers. Return all vector floats like structure and unions;
5348 note that they always have BLKmode like the latter. */
5349 return ((TYPE_MODE (type) == BLKmode
5350 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5353 /* Handle the TARGET_STRUCT_VALUE target hook.
5354 Return where to find the structure return value address. */
5356 static rtx
5357 sparc_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED, int incoming)
5359 if (TARGET_ARCH64)
5360 return 0;
5361 else
5363 rtx mem;
5365 if (incoming)
5366 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5367 STRUCT_VALUE_OFFSET));
5368 else
5369 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5370 STRUCT_VALUE_OFFSET));
5372 set_mem_alias_set (mem, struct_value_alias_set);
5373 return mem;
5377 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5378 For v9, function return values are subject to the same rules as arguments,
5379 except that up to 32 bytes may be returned in registers. */
5382 function_value (tree type, enum machine_mode mode, int incoming_p)
5384 /* Beware that the two values are swapped here wrt function_arg. */
5385 int regbase = (incoming_p
5386 ? SPARC_OUTGOING_INT_ARG_FIRST
5387 : SPARC_INCOMING_INT_ARG_FIRST);
5388 enum mode_class mclass = GET_MODE_CLASS (mode);
5389 int regno;
5391 if (type && TREE_CODE (type) == VECTOR_TYPE)
5393 /* Vector types deserve special treatment because they are
5394 polymorphic wrt their mode, depending upon whether VIS
5395 instructions are enabled. */
5396 HOST_WIDE_INT size = int_size_in_bytes (type);
5398 gcc_assert ((TARGET_ARCH32 && size <= 8)
5399 || (TARGET_ARCH64 && size <= 32));
5401 if (mode == BLKmode)
5402 return function_arg_vector_value (size,
5403 TYPE_MODE (TREE_TYPE (type)),
5404 SPARC_FP_ARG_FIRST);
5405 else
5406 mclass = MODE_FLOAT;
5408 else if (type && TARGET_ARCH64)
5410 if (TREE_CODE (type) == RECORD_TYPE)
5412 /* Structures up to 32 bytes in size are passed in registers,
5413 promoted to fp registers where possible. */
5415 gcc_assert (int_size_in_bytes (type) <= 32);
5417 return function_arg_record_value (type, mode, 0, 1, regbase);
5419 else if (TREE_CODE (type) == UNION_TYPE)
5421 HOST_WIDE_INT size = int_size_in_bytes (type);
5423 gcc_assert (size <= 32);
5425 return function_arg_union_value (size, mode, 0, regbase);
5427 else if (AGGREGATE_TYPE_P (type))
5429 /* All other aggregate types are passed in an integer register
5430 in a mode corresponding to the size of the type. */
5431 HOST_WIDE_INT bytes = int_size_in_bytes (type);
5433 gcc_assert (bytes <= 32);
5435 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
5437 /* ??? We probably should have made the same ABI change in
5438 3.4.0 as the one we made for unions. The latter was
5439 required by the SCD though, while the former is not
5440 specified, so we favored compatibility and efficiency.
5442 Now we're stuck for aggregates larger than 16 bytes,
5443 because OImode vanished in the meantime. Let's not
5444 try to be unduly clever, and simply follow the ABI
5445 for unions in that case. */
5446 if (mode == BLKmode)
5447 return function_arg_union_value (bytes, mode, 0, regbase);
5448 else
5449 mclass = MODE_INT;
5451 else if (mclass == MODE_INT
5452 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5453 mode = word_mode;
5456 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5457 && TARGET_FPU)
5458 regno = SPARC_FP_ARG_FIRST;
5459 else
5460 regno = regbase;
5462 return gen_rtx_REG (mode, regno);
5465 /* Do what is necessary for `va_start'. We look at the current function
5466 to determine if stdarg or varargs is used and return the address of
5467 the first unnamed parameter. */
5469 static rtx
5470 sparc_builtin_saveregs (void)
5472 int first_reg = current_function_args_info.words;
5473 rtx address;
5474 int regno;
5476 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5477 emit_move_insn (gen_rtx_MEM (word_mode,
5478 gen_rtx_PLUS (Pmode,
5479 frame_pointer_rtx,
5480 GEN_INT (FIRST_PARM_OFFSET (0)
5481 + (UNITS_PER_WORD
5482 * regno)))),
5483 gen_rtx_REG (word_mode,
5484 SPARC_INCOMING_INT_ARG_FIRST + regno));
5486 address = gen_rtx_PLUS (Pmode,
5487 frame_pointer_rtx,
5488 GEN_INT (FIRST_PARM_OFFSET (0)
5489 + UNITS_PER_WORD * first_reg));
5491 return address;
5494 /* Implement `va_start' for stdarg. */
5496 void
5497 sparc_va_start (tree valist, rtx nextarg)
5499 nextarg = expand_builtin_saveregs ();
5500 std_expand_builtin_va_start (valist, nextarg);
5503 /* Implement `va_arg' for stdarg. */
5505 static tree
5506 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5508 HOST_WIDE_INT size, rsize, align;
5509 tree addr, incr;
5510 bool indirect;
5511 tree ptrtype = build_pointer_type (type);
5513 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5515 indirect = true;
5516 size = rsize = UNITS_PER_WORD;
5517 align = 0;
5519 else
5521 indirect = false;
5522 size = int_size_in_bytes (type);
5523 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5524 align = 0;
5526 if (TARGET_ARCH64)
5528 /* For SPARC64, objects requiring 16-byte alignment get it. */
5529 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5530 align = 2 * UNITS_PER_WORD;
5532 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5533 are left-justified in their slots. */
5534 if (AGGREGATE_TYPE_P (type))
5536 if (size == 0)
5537 size = rsize = UNITS_PER_WORD;
5538 else
5539 size = rsize;
5544 incr = valist;
5545 if (align)
5547 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5548 ssize_int (align - 1)));
5549 incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
5550 ssize_int (-align)));
5553 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5554 addr = incr;
5556 if (BYTES_BIG_ENDIAN && size < rsize)
5557 addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5558 ssize_int (rsize - size)));
5560 if (indirect)
5562 addr = fold_convert (build_pointer_type (ptrtype), addr);
5563 addr = build_va_arg_indirect_ref (addr);
5565 /* If the address isn't aligned properly for the type,
5566 we may need to copy to a temporary.
5567 FIXME: This is inefficient. Usually we can do this
5568 in registers. */
5569 else if (align == 0
5570 && TYPE_ALIGN (type) > BITS_PER_WORD)
5572 tree tmp = create_tmp_var (type, "va_arg_tmp");
5573 tree dest_addr = build_fold_addr_expr (tmp);
5575 tree copy = build_function_call_expr
5576 (implicit_built_in_decls[BUILT_IN_MEMCPY],
5577 tree_cons (NULL_TREE, dest_addr,
5578 tree_cons (NULL_TREE, addr,
5579 tree_cons (NULL_TREE, size_int (rsize),
5580 NULL_TREE))));
5582 gimplify_and_add (copy, pre_p);
5583 addr = dest_addr;
5585 else
5586 addr = fold_convert (ptrtype, addr);
5588 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
5589 incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
5590 gimplify_and_add (incr, post_p);
5592 return build_va_arg_indirect_ref (addr);
5595 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5596 Specify whether the vector mode is supported by the hardware. */
5598 static bool
5599 sparc_vector_mode_supported_p (enum machine_mode mode)
5601 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5604 /* Return the string to output an unconditional branch to LABEL, which is
5605 the operand number of the label.
5607 DEST is the destination insn (i.e. the label), INSN is the source. */
5609 const char *
5610 output_ubranch (rtx dest, int label, rtx insn)
5612 static char string[64];
5613 bool v9_form = false;
5614 char *p;
5616 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5618 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5619 - INSN_ADDRESSES (INSN_UID (insn)));
5620 /* Leave some instructions for "slop". */
5621 if (delta >= -260000 && delta < 260000)
5622 v9_form = true;
5625 if (v9_form)
5626 strcpy (string, "ba%*,pt\t%%xcc, ");
5627 else
5628 strcpy (string, "b%*\t");
5630 p = strchr (string, '\0');
5631 *p++ = '%';
5632 *p++ = 'l';
5633 *p++ = '0' + label;
5634 *p++ = '%';
5635 *p++ = '(';
5636 *p = '\0';
5638 return string;
5641 /* Return the string to output a conditional branch to LABEL, which is
5642 the operand number of the label. OP is the conditional expression.
5643 XEXP (OP, 0) is assumed to be a condition code register (integer or
5644 floating point) and its mode specifies what kind of comparison we made.
5646 DEST is the destination insn (i.e. the label), INSN is the source.
5648 REVERSED is nonzero if we should reverse the sense of the comparison.
5650 ANNUL is nonzero if we should generate an annulling branch. */
5652 const char *
5653 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5654 rtx insn)
5656 static char string[64];
5657 enum rtx_code code = GET_CODE (op);
5658 rtx cc_reg = XEXP (op, 0);
5659 enum machine_mode mode = GET_MODE (cc_reg);
5660 const char *labelno, *branch;
5661 int spaces = 8, far;
5662 char *p;
5664 /* v9 branches are limited to +-1MB. If it is too far away,
5665 change
5667 bne,pt %xcc, .LC30
5671 be,pn %xcc, .+12
5673 ba .LC30
5677 fbne,a,pn %fcc2, .LC29
5681 fbe,pt %fcc2, .+16
5683 ba .LC29 */
5685 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5686 if (reversed ^ far)
5688 /* Reversal of FP compares takes care -- an ordered compare
5689 becomes an unordered compare and vice versa. */
5690 if (mode == CCFPmode || mode == CCFPEmode)
5691 code = reverse_condition_maybe_unordered (code);
5692 else
5693 code = reverse_condition (code);
5696 /* Start by writing the branch condition. */
5697 if (mode == CCFPmode || mode == CCFPEmode)
5699 switch (code)
5701 case NE:
5702 branch = "fbne";
5703 break;
5704 case EQ:
5705 branch = "fbe";
5706 break;
5707 case GE:
5708 branch = "fbge";
5709 break;
5710 case GT:
5711 branch = "fbg";
5712 break;
5713 case LE:
5714 branch = "fble";
5715 break;
5716 case LT:
5717 branch = "fbl";
5718 break;
5719 case UNORDERED:
5720 branch = "fbu";
5721 break;
5722 case ORDERED:
5723 branch = "fbo";
5724 break;
5725 case UNGT:
5726 branch = "fbug";
5727 break;
5728 case UNLT:
5729 branch = "fbul";
5730 break;
5731 case UNEQ:
5732 branch = "fbue";
5733 break;
5734 case UNGE:
5735 branch = "fbuge";
5736 break;
5737 case UNLE:
5738 branch = "fbule";
5739 break;
5740 case LTGT:
5741 branch = "fblg";
5742 break;
5744 default:
5745 gcc_unreachable ();
5748 /* ??? !v9: FP branches cannot be preceded by another floating point
5749 insn. Because there is currently no concept of pre-delay slots,
5750 we can fix this only by always emitting a nop before a floating
5751 point branch. */
5753 string[0] = '\0';
5754 if (! TARGET_V9)
5755 strcpy (string, "nop\n\t");
5756 strcat (string, branch);
5758 else
5760 switch (code)
5762 case NE:
5763 branch = "bne";
5764 break;
5765 case EQ:
5766 branch = "be";
5767 break;
5768 case GE:
5769 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5770 branch = "bpos";
5771 else
5772 branch = "bge";
5773 break;
5774 case GT:
5775 branch = "bg";
5776 break;
5777 case LE:
5778 branch = "ble";
5779 break;
5780 case LT:
5781 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5782 branch = "bneg";
5783 else
5784 branch = "bl";
5785 break;
5786 case GEU:
5787 branch = "bgeu";
5788 break;
5789 case GTU:
5790 branch = "bgu";
5791 break;
5792 case LEU:
5793 branch = "bleu";
5794 break;
5795 case LTU:
5796 branch = "blu";
5797 break;
5799 default:
5800 gcc_unreachable ();
5802 strcpy (string, branch);
5804 spaces -= strlen (branch);
5805 p = strchr (string, '\0');
5807 /* Now add the annulling, the label, and a possible noop. */
5808 if (annul && ! far)
5810 strcpy (p, ",a");
5811 p += 2;
5812 spaces -= 2;
5815 if (TARGET_V9)
5817 rtx note;
5818 int v8 = 0;
5820 if (! far && insn && INSN_ADDRESSES_SET_P ())
5822 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5823 - INSN_ADDRESSES (INSN_UID (insn)));
5824 /* Leave some instructions for "slop". */
5825 if (delta < -260000 || delta >= 260000)
5826 v8 = 1;
5829 if (mode == CCFPmode || mode == CCFPEmode)
5831 static char v9_fcc_labelno[] = "%%fccX, ";
5832 /* Set the char indicating the number of the fcc reg to use. */
5833 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
5834 labelno = v9_fcc_labelno;
5835 if (v8)
5837 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
5838 labelno = "";
5841 else if (mode == CCXmode || mode == CCX_NOOVmode)
5843 labelno = "%%xcc, ";
5844 gcc_assert (! v8);
5846 else
5848 labelno = "%%icc, ";
5849 if (v8)
5850 labelno = "";
5853 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
5855 strcpy (p,
5856 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
5857 ? ",pt" : ",pn");
5858 p += 3;
5859 spaces -= 3;
5862 else
5863 labelno = "";
5865 if (spaces > 0)
5866 *p++ = '\t';
5867 else
5868 *p++ = ' ';
5869 strcpy (p, labelno);
5870 p = strchr (p, '\0');
5871 if (far)
5873 strcpy (p, ".+12\n\t nop\n\tb\t");
5874 /* Skip the next insn if requested or
5875 if we know that it will be a nop. */
5876 if (annul || ! final_sequence)
5877 p[3] = '6';
5878 p += 14;
5880 *p++ = '%';
5881 *p++ = 'l';
5882 *p++ = label + '0';
5883 *p++ = '%';
5884 *p++ = '#';
5885 *p = '\0';
5887 return string;
5890 /* Emit a library call comparison between floating point X and Y.
5891 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
5892 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
5893 values as arguments instead of the TFmode registers themselves,
5894 that's why we cannot call emit_float_lib_cmp. */
5895 void
5896 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
5898 const char *qpfunc;
5899 rtx slot0, slot1, result, tem, tem2;
5900 enum machine_mode mode;
5902 switch (comparison)
5904 case EQ:
5905 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
5906 break;
5908 case NE:
5909 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
5910 break;
5912 case GT:
5913 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
5914 break;
5916 case GE:
5917 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
5918 break;
5920 case LT:
5921 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
5922 break;
5924 case LE:
5925 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
5926 break;
5928 case ORDERED:
5929 case UNORDERED:
5930 case UNGT:
5931 case UNLT:
5932 case UNEQ:
5933 case UNGE:
5934 case UNLE:
5935 case LTGT:
5936 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
5937 break;
5939 default:
5940 gcc_unreachable ();
5943 if (TARGET_ARCH64)
5945 if (GET_CODE (x) != MEM)
5947 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
5948 emit_move_insn (slot0, x);
5950 else
5951 slot0 = x;
5953 if (GET_CODE (y) != MEM)
5955 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
5956 emit_move_insn (slot1, y);
5958 else
5959 slot1 = y;
5961 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
5962 DImode, 2,
5963 XEXP (slot0, 0), Pmode,
5964 XEXP (slot1, 0), Pmode);
5966 mode = DImode;
5968 else
5970 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
5971 SImode, 2,
5972 x, TFmode, y, TFmode);
5974 mode = SImode;
5978 /* Immediately move the result of the libcall into a pseudo
5979 register so reload doesn't clobber the value if it needs
5980 the return register for a spill reg. */
5981 result = gen_reg_rtx (mode);
5982 emit_move_insn (result, hard_libcall_value (mode));
5984 switch (comparison)
5986 default:
5987 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
5988 break;
5989 case ORDERED:
5990 case UNORDERED:
5991 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
5992 NULL_RTX, mode, 0);
5993 break;
5994 case UNGT:
5995 case UNGE:
5996 emit_cmp_insn (result, const1_rtx,
5997 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
5998 break;
5999 case UNLE:
6000 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6001 break;
6002 case UNLT:
6003 tem = gen_reg_rtx (mode);
6004 if (TARGET_ARCH32)
6005 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6006 else
6007 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6008 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6009 break;
6010 case UNEQ:
6011 case LTGT:
6012 tem = gen_reg_rtx (mode);
6013 if (TARGET_ARCH32)
6014 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6015 else
6016 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6017 tem2 = gen_reg_rtx (mode);
6018 if (TARGET_ARCH32)
6019 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6020 else
6021 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6022 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6023 NULL_RTX, mode, 0);
6024 break;
6028 /* Generate an unsigned DImode to FP conversion. This is the same code
6029 optabs would emit if we didn't have TFmode patterns. */
6031 void
6032 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6034 rtx neglab, donelab, i0, i1, f0, in, out;
6036 out = operands[0];
6037 in = force_reg (DImode, operands[1]);
6038 neglab = gen_label_rtx ();
6039 donelab = gen_label_rtx ();
6040 i0 = gen_reg_rtx (DImode);
6041 i1 = gen_reg_rtx (DImode);
6042 f0 = gen_reg_rtx (mode);
6044 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6046 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6047 emit_jump_insn (gen_jump (donelab));
6048 emit_barrier ();
6050 emit_label (neglab);
6052 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6053 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6054 emit_insn (gen_iordi3 (i0, i0, i1));
6055 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6056 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6058 emit_label (donelab);
6061 /* Generate an FP to unsigned DImode conversion. This is the same code
6062 optabs would emit if we didn't have TFmode patterns. */
6064 void
6065 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6067 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6069 out = operands[0];
6070 in = force_reg (mode, operands[1]);
6071 neglab = gen_label_rtx ();
6072 donelab = gen_label_rtx ();
6073 i0 = gen_reg_rtx (DImode);
6074 i1 = gen_reg_rtx (DImode);
6075 limit = gen_reg_rtx (mode);
6076 f0 = gen_reg_rtx (mode);
6078 emit_move_insn (limit,
6079 CONST_DOUBLE_FROM_REAL_VALUE (
6080 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6081 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6083 emit_insn (gen_rtx_SET (VOIDmode,
6084 out,
6085 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6086 emit_jump_insn (gen_jump (donelab));
6087 emit_barrier ();
6089 emit_label (neglab);
6091 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6092 emit_insn (gen_rtx_SET (VOIDmode,
6094 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6095 emit_insn (gen_movdi (i1, const1_rtx));
6096 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6097 emit_insn (gen_xordi3 (out, i0, i1));
6099 emit_label (donelab);
6102 /* Return the string to output a conditional branch to LABEL, testing
6103 register REG. LABEL is the operand number of the label; REG is the
6104 operand number of the reg. OP is the conditional expression. The mode
6105 of REG says what kind of comparison we made.
6107 DEST is the destination insn (i.e. the label), INSN is the source.
6109 REVERSED is nonzero if we should reverse the sense of the comparison.
6111 ANNUL is nonzero if we should generate an annulling branch. */
6113 const char *
6114 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6115 int annul, rtx insn)
6117 static char string[64];
6118 enum rtx_code code = GET_CODE (op);
6119 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6120 rtx note;
6121 int far;
6122 char *p;
6124 /* branch on register are limited to +-128KB. If it is too far away,
6125 change
6127 brnz,pt %g1, .LC30
6131 brz,pn %g1, .+12
6133 ba,pt %xcc, .LC30
6137 brgez,a,pn %o1, .LC29
6141 brlz,pt %o1, .+16
6143 ba,pt %xcc, .LC29 */
6145 far = get_attr_length (insn) >= 3;
6147 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6148 if (reversed ^ far)
6149 code = reverse_condition (code);
6151 /* Only 64 bit versions of these instructions exist. */
6152 gcc_assert (mode == DImode);
6154 /* Start by writing the branch condition. */
6156 switch (code)
6158 case NE:
6159 strcpy (string, "brnz");
6160 break;
6162 case EQ:
6163 strcpy (string, "brz");
6164 break;
6166 case GE:
6167 strcpy (string, "brgez");
6168 break;
6170 case LT:
6171 strcpy (string, "brlz");
6172 break;
6174 case LE:
6175 strcpy (string, "brlez");
6176 break;
6178 case GT:
6179 strcpy (string, "brgz");
6180 break;
6182 default:
6183 gcc_unreachable ();
6186 p = strchr (string, '\0');
6188 /* Now add the annulling, reg, label, and nop. */
6189 if (annul && ! far)
6191 strcpy (p, ",a");
6192 p += 2;
6195 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6197 strcpy (p,
6198 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6199 ? ",pt" : ",pn");
6200 p += 3;
6203 *p = p < string + 8 ? '\t' : ' ';
6204 p++;
6205 *p++ = '%';
6206 *p++ = '0' + reg;
6207 *p++ = ',';
6208 *p++ = ' ';
6209 if (far)
6211 int veryfar = 1, delta;
6213 if (INSN_ADDRESSES_SET_P ())
6215 delta = (INSN_ADDRESSES (INSN_UID (dest))
6216 - INSN_ADDRESSES (INSN_UID (insn)));
6217 /* Leave some instructions for "slop". */
6218 if (delta >= -260000 && delta < 260000)
6219 veryfar = 0;
6222 strcpy (p, ".+12\n\t nop\n\t");
6223 /* Skip the next insn if requested or
6224 if we know that it will be a nop. */
6225 if (annul || ! final_sequence)
6226 p[3] = '6';
6227 p += 12;
6228 if (veryfar)
6230 strcpy (p, "b\t");
6231 p += 2;
6233 else
6235 strcpy (p, "ba,pt\t%%xcc, ");
6236 p += 13;
6239 *p++ = '%';
6240 *p++ = 'l';
6241 *p++ = '0' + label;
6242 *p++ = '%';
6243 *p++ = '#';
6244 *p = '\0';
6246 return string;
6249 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6250 Such instructions cannot be used in the delay slot of return insn on v9.
6251 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6254 static int
6255 epilogue_renumber (register rtx *where, int test)
6257 register const char *fmt;
6258 register int i;
6259 register enum rtx_code code;
6261 if (*where == 0)
6262 return 0;
6264 code = GET_CODE (*where);
6266 switch (code)
6268 case REG:
6269 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6270 return 1;
6271 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6272 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6273 case SCRATCH:
6274 case CC0:
6275 case PC:
6276 case CONST_INT:
6277 case CONST_DOUBLE:
6278 return 0;
6280 /* Do not replace the frame pointer with the stack pointer because
6281 it can cause the delayed instruction to load below the stack.
6282 This occurs when instructions like:
6284 (set (reg/i:SI 24 %i0)
6285 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6286 (const_int -20 [0xffffffec])) 0))
6288 are in the return delayed slot. */
6289 case PLUS:
6290 if (GET_CODE (XEXP (*where, 0)) == REG
6291 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6292 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6293 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6294 return 1;
6295 break;
6297 case MEM:
6298 if (SPARC_STACK_BIAS
6299 && GET_CODE (XEXP (*where, 0)) == REG
6300 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6301 return 1;
6302 break;
6304 default:
6305 break;
6308 fmt = GET_RTX_FORMAT (code);
6310 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6312 if (fmt[i] == 'E')
6314 register int j;
6315 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6316 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6317 return 1;
6319 else if (fmt[i] == 'e'
6320 && epilogue_renumber (&(XEXP (*where, i)), test))
6321 return 1;
6323 return 0;
6326 /* Leaf functions and non-leaf functions have different needs. */
6328 static const int
6329 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6331 static const int
6332 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6334 static const int *const reg_alloc_orders[] = {
6335 reg_leaf_alloc_order,
6336 reg_nonleaf_alloc_order};
6338 void
6339 order_regs_for_local_alloc (void)
6341 static int last_order_nonleaf = 1;
6343 if (regs_ever_live[15] != last_order_nonleaf)
6345 last_order_nonleaf = !last_order_nonleaf;
6346 memcpy ((char *) reg_alloc_order,
6347 (const char *) reg_alloc_orders[last_order_nonleaf],
6348 FIRST_PSEUDO_REGISTER * sizeof (int));
6352 /* Return 1 if REG and MEM are legitimate enough to allow the various
6353 mem<-->reg splits to be run. */
6356 sparc_splitdi_legitimate (rtx reg, rtx mem)
6358 /* Punt if we are here by mistake. */
6359 gcc_assert (reload_completed);
6361 /* We must have an offsettable memory reference. */
6362 if (! offsettable_memref_p (mem))
6363 return 0;
6365 /* If we have legitimate args for ldd/std, we do not want
6366 the split to happen. */
6367 if ((REGNO (reg) % 2) == 0
6368 && mem_min_alignment (mem, 8))
6369 return 0;
6371 /* Success. */
6372 return 1;
6375 /* Return 1 if x and y are some kind of REG and they refer to
6376 different hard registers. This test is guaranteed to be
6377 run after reload. */
6380 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6382 if (GET_CODE (x) != REG)
6383 return 0;
6384 if (GET_CODE (y) != REG)
6385 return 0;
6386 if (REGNO (x) == REGNO (y))
6387 return 0;
6388 return 1;
6391 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6392 This makes them candidates for using ldd and std insns.
6394 Note reg1 and reg2 *must* be hard registers. */
6397 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6399 /* We might have been passed a SUBREG. */
6400 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6401 return 0;
6403 if (REGNO (reg1) % 2 != 0)
6404 return 0;
6406 /* Integer ldd is deprecated in SPARC V9 */
6407 if (TARGET_V9 && REGNO (reg1) < 32)
6408 return 0;
6410 return (REGNO (reg1) == REGNO (reg2) - 1);
6413 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6414 an ldd or std insn.
6416 This can only happen when addr1 and addr2, the addresses in mem1
6417 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6418 addr1 must also be aligned on a 64-bit boundary.
6420 Also iff dependent_reg_rtx is not null it should not be used to
6421 compute the address for mem1, i.e. we cannot optimize a sequence
6422 like:
6423 ld [%o0], %o0
6424 ld [%o0 + 4], %o1
6426 ldd [%o0], %o0
6427 nor:
6428 ld [%g3 + 4], %g3
6429 ld [%g3], %g2
6431 ldd [%g3], %g2
6433 But, note that the transformation from:
6434 ld [%g2 + 4], %g3
6435 ld [%g2], %g2
6437 ldd [%g2], %g2
6438 is perfectly fine. Thus, the peephole2 patterns always pass us
6439 the destination register of the first load, never the second one.
6441 For stores we don't have a similar problem, so dependent_reg_rtx is
6442 NULL_RTX. */
6445 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6447 rtx addr1, addr2;
6448 unsigned int reg1;
6449 HOST_WIDE_INT offset1;
6451 /* The mems cannot be volatile. */
6452 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6453 return 0;
6455 /* MEM1 should be aligned on a 64-bit boundary. */
6456 if (MEM_ALIGN (mem1) < 64)
6457 return 0;
6459 addr1 = XEXP (mem1, 0);
6460 addr2 = XEXP (mem2, 0);
6462 /* Extract a register number and offset (if used) from the first addr. */
6463 if (GET_CODE (addr1) == PLUS)
6465 /* If not a REG, return zero. */
6466 if (GET_CODE (XEXP (addr1, 0)) != REG)
6467 return 0;
6468 else
6470 reg1 = REGNO (XEXP (addr1, 0));
6471 /* The offset must be constant! */
6472 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6473 return 0;
6474 offset1 = INTVAL (XEXP (addr1, 1));
6477 else if (GET_CODE (addr1) != REG)
6478 return 0;
6479 else
6481 reg1 = REGNO (addr1);
6482 /* This was a simple (mem (reg)) expression. Offset is 0. */
6483 offset1 = 0;
6486 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6487 if (GET_CODE (addr2) != PLUS)
6488 return 0;
6490 if (GET_CODE (XEXP (addr2, 0)) != REG
6491 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6492 return 0;
6494 if (reg1 != REGNO (XEXP (addr2, 0)))
6495 return 0;
6497 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6498 return 0;
6500 /* The first offset must be evenly divisible by 8 to ensure the
6501 address is 64 bit aligned. */
6502 if (offset1 % 8 != 0)
6503 return 0;
6505 /* The offset for the second addr must be 4 more than the first addr. */
6506 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6507 return 0;
6509 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6510 instructions. */
6511 return 1;
6514 /* Return 1 if reg is a pseudo, or is the first register in
6515 a hard register pair. This makes it a candidate for use in
6516 ldd and std insns. */
6519 register_ok_for_ldd (rtx reg)
6521 /* We might have been passed a SUBREG. */
6522 if (GET_CODE (reg) != REG)
6523 return 0;
6525 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6526 return (REGNO (reg) % 2 == 0);
6527 else
6528 return 1;
6531 /* Print operand X (an rtx) in assembler syntax to file FILE.
6532 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6533 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6535 void
6536 print_operand (FILE *file, rtx x, int code)
6538 switch (code)
6540 case '#':
6541 /* Output an insn in a delay slot. */
6542 if (final_sequence)
6543 sparc_indent_opcode = 1;
6544 else
6545 fputs ("\n\t nop", file);
6546 return;
6547 case '*':
6548 /* Output an annul flag if there's nothing for the delay slot and we
6549 are optimizing. This is always used with '(' below.
6550 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6551 this is a dbx bug. So, we only do this when optimizing.
6552 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6553 Always emit a nop in case the next instruction is a branch. */
6554 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6555 fputs (",a", file);
6556 return;
6557 case '(':
6558 /* Output a 'nop' if there's nothing for the delay slot and we are
6559 not optimizing. This is always used with '*' above. */
6560 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6561 fputs ("\n\t nop", file);
6562 else if (final_sequence)
6563 sparc_indent_opcode = 1;
6564 return;
6565 case ')':
6566 /* Output the right displacement from the saved PC on function return.
6567 The caller may have placed an "unimp" insn immediately after the call
6568 so we have to account for it. This insn is used in the 32-bit ABI
6569 when calling a function that returns a non zero-sized structure. The
6570 64-bit ABI doesn't have it. Be careful to have this test be the same
6571 as that used on the call. */
6572 if (! TARGET_ARCH64
6573 && current_function_returns_struct
6574 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6575 == INTEGER_CST)
6576 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6577 fputs ("12", file);
6578 else
6579 fputc ('8', file);
6580 return;
6581 case '_':
6582 /* Output the Embedded Medium/Anywhere code model base register. */
6583 fputs (EMBMEDANY_BASE_REG, file);
6584 return;
6585 case '&':
6586 /* Print some local dynamic TLS name. */
6587 assemble_name (file, get_some_local_dynamic_name ());
6588 return;
6590 case 'Y':
6591 /* Adjust the operand to take into account a RESTORE operation. */
6592 if (GET_CODE (x) == CONST_INT)
6593 break;
6594 else if (GET_CODE (x) != REG)
6595 output_operand_lossage ("invalid %%Y operand");
6596 else if (REGNO (x) < 8)
6597 fputs (reg_names[REGNO (x)], file);
6598 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6599 fputs (reg_names[REGNO (x)-16], file);
6600 else
6601 output_operand_lossage ("invalid %%Y operand");
6602 return;
6603 case 'L':
6604 /* Print out the low order register name of a register pair. */
6605 if (WORDS_BIG_ENDIAN)
6606 fputs (reg_names[REGNO (x)+1], file);
6607 else
6608 fputs (reg_names[REGNO (x)], file);
6609 return;
6610 case 'H':
6611 /* Print out the high order register name of a register pair. */
6612 if (WORDS_BIG_ENDIAN)
6613 fputs (reg_names[REGNO (x)], file);
6614 else
6615 fputs (reg_names[REGNO (x)+1], file);
6616 return;
6617 case 'R':
6618 /* Print out the second register name of a register pair or quad.
6619 I.e., R (%o0) => %o1. */
6620 fputs (reg_names[REGNO (x)+1], file);
6621 return;
6622 case 'S':
6623 /* Print out the third register name of a register quad.
6624 I.e., S (%o0) => %o2. */
6625 fputs (reg_names[REGNO (x)+2], file);
6626 return;
6627 case 'T':
6628 /* Print out the fourth register name of a register quad.
6629 I.e., T (%o0) => %o3. */
6630 fputs (reg_names[REGNO (x)+3], file);
6631 return;
6632 case 'x':
6633 /* Print a condition code register. */
6634 if (REGNO (x) == SPARC_ICC_REG)
6636 /* We don't handle CC[X]_NOOVmode because they're not supposed
6637 to occur here. */
6638 if (GET_MODE (x) == CCmode)
6639 fputs ("%icc", file);
6640 else if (GET_MODE (x) == CCXmode)
6641 fputs ("%xcc", file);
6642 else
6643 gcc_unreachable ();
6645 else
6646 /* %fccN register */
6647 fputs (reg_names[REGNO (x)], file);
6648 return;
6649 case 'm':
6650 /* Print the operand's address only. */
6651 output_address (XEXP (x, 0));
6652 return;
6653 case 'r':
6654 /* In this case we need a register. Use %g0 if the
6655 operand is const0_rtx. */
6656 if (x == const0_rtx
6657 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6659 fputs ("%g0", file);
6660 return;
6662 else
6663 break;
6665 case 'A':
6666 switch (GET_CODE (x))
6668 case IOR: fputs ("or", file); break;
6669 case AND: fputs ("and", file); break;
6670 case XOR: fputs ("xor", file); break;
6671 default: output_operand_lossage ("invalid %%A operand");
6673 return;
6675 case 'B':
6676 switch (GET_CODE (x))
6678 case IOR: fputs ("orn", file); break;
6679 case AND: fputs ("andn", file); break;
6680 case XOR: fputs ("xnor", file); break;
6681 default: output_operand_lossage ("invalid %%B operand");
6683 return;
6685 /* These are used by the conditional move instructions. */
6686 case 'c' :
6687 case 'C':
6689 enum rtx_code rc = GET_CODE (x);
6691 if (code == 'c')
6693 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6694 if (mode == CCFPmode || mode == CCFPEmode)
6695 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6696 else
6697 rc = reverse_condition (GET_CODE (x));
6699 switch (rc)
6701 case NE: fputs ("ne", file); break;
6702 case EQ: fputs ("e", file); break;
6703 case GE: fputs ("ge", file); break;
6704 case GT: fputs ("g", file); break;
6705 case LE: fputs ("le", file); break;
6706 case LT: fputs ("l", file); break;
6707 case GEU: fputs ("geu", file); break;
6708 case GTU: fputs ("gu", file); break;
6709 case LEU: fputs ("leu", file); break;
6710 case LTU: fputs ("lu", file); break;
6711 case LTGT: fputs ("lg", file); break;
6712 case UNORDERED: fputs ("u", file); break;
6713 case ORDERED: fputs ("o", file); break;
6714 case UNLT: fputs ("ul", file); break;
6715 case UNLE: fputs ("ule", file); break;
6716 case UNGT: fputs ("ug", file); break;
6717 case UNGE: fputs ("uge", file); break;
6718 case UNEQ: fputs ("ue", file); break;
6719 default: output_operand_lossage (code == 'c'
6720 ? "invalid %%c operand"
6721 : "invalid %%C operand");
6723 return;
6726 /* These are used by the movr instruction pattern. */
6727 case 'd':
6728 case 'D':
6730 enum rtx_code rc = (code == 'd'
6731 ? reverse_condition (GET_CODE (x))
6732 : GET_CODE (x));
6733 switch (rc)
6735 case NE: fputs ("ne", file); break;
6736 case EQ: fputs ("e", file); break;
6737 case GE: fputs ("gez", file); break;
6738 case LT: fputs ("lz", file); break;
6739 case LE: fputs ("lez", file); break;
6740 case GT: fputs ("gz", file); break;
6741 default: output_operand_lossage (code == 'd'
6742 ? "invalid %%d operand"
6743 : "invalid %%D operand");
6745 return;
6748 case 'b':
6750 /* Print a sign-extended character. */
6751 int i = trunc_int_for_mode (INTVAL (x), QImode);
6752 fprintf (file, "%d", i);
6753 return;
6756 case 'f':
6757 /* Operand must be a MEM; write its address. */
6758 if (GET_CODE (x) != MEM)
6759 output_operand_lossage ("invalid %%f operand");
6760 output_address (XEXP (x, 0));
6761 return;
6763 case 's':
6765 /* Print a sign-extended 32-bit value. */
6766 HOST_WIDE_INT i;
6767 if (GET_CODE(x) == CONST_INT)
6768 i = INTVAL (x);
6769 else if (GET_CODE(x) == CONST_DOUBLE)
6770 i = CONST_DOUBLE_LOW (x);
6771 else
6773 output_operand_lossage ("invalid %%s operand");
6774 return;
6776 i = trunc_int_for_mode (i, SImode);
6777 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6778 return;
6781 case 0:
6782 /* Do nothing special. */
6783 break;
6785 default:
6786 /* Undocumented flag. */
6787 output_operand_lossage ("invalid operand output code");
6790 if (GET_CODE (x) == REG)
6791 fputs (reg_names[REGNO (x)], file);
6792 else if (GET_CODE (x) == MEM)
6794 fputc ('[', file);
6795 /* Poor Sun assembler doesn't understand absolute addressing. */
6796 if (CONSTANT_P (XEXP (x, 0)))
6797 fputs ("%g0+", file);
6798 output_address (XEXP (x, 0));
6799 fputc (']', file);
6801 else if (GET_CODE (x) == HIGH)
6803 fputs ("%hi(", file);
6804 output_addr_const (file, XEXP (x, 0));
6805 fputc (')', file);
6807 else if (GET_CODE (x) == LO_SUM)
6809 print_operand (file, XEXP (x, 0), 0);
6810 if (TARGET_CM_MEDMID)
6811 fputs ("+%l44(", file);
6812 else
6813 fputs ("+%lo(", file);
6814 output_addr_const (file, XEXP (x, 1));
6815 fputc (')', file);
6817 else if (GET_CODE (x) == CONST_DOUBLE
6818 && (GET_MODE (x) == VOIDmode
6819 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
6821 if (CONST_DOUBLE_HIGH (x) == 0)
6822 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
6823 else if (CONST_DOUBLE_HIGH (x) == -1
6824 && CONST_DOUBLE_LOW (x) < 0)
6825 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
6826 else
6827 output_operand_lossage ("long long constant not a valid immediate operand");
6829 else if (GET_CODE (x) == CONST_DOUBLE)
6830 output_operand_lossage ("floating point constant not a valid immediate operand");
6831 else { output_addr_const (file, x); }
6834 /* Target hook for assembling integer objects. The sparc version has
6835 special handling for aligned DI-mode objects. */
6837 static bool
6838 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
6840 /* ??? We only output .xword's for symbols and only then in environments
6841 where the assembler can handle them. */
6842 if (aligned_p && size == 8
6843 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
6845 if (TARGET_V9)
6847 assemble_integer_with_op ("\t.xword\t", x);
6848 return true;
6850 else
6852 assemble_aligned_integer (4, const0_rtx);
6853 assemble_aligned_integer (4, x);
6854 return true;
6857 return default_assemble_integer (x, size, aligned_p);
6860 /* Return the value of a code used in the .proc pseudo-op that says
6861 what kind of result this function returns. For non-C types, we pick
6862 the closest C type. */
6864 #ifndef SHORT_TYPE_SIZE
6865 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
6866 #endif
6868 #ifndef INT_TYPE_SIZE
6869 #define INT_TYPE_SIZE BITS_PER_WORD
6870 #endif
6872 #ifndef LONG_TYPE_SIZE
6873 #define LONG_TYPE_SIZE BITS_PER_WORD
6874 #endif
6876 #ifndef LONG_LONG_TYPE_SIZE
6877 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
6878 #endif
6880 #ifndef FLOAT_TYPE_SIZE
6881 #define FLOAT_TYPE_SIZE BITS_PER_WORD
6882 #endif
6884 #ifndef DOUBLE_TYPE_SIZE
6885 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
6886 #endif
6888 #ifndef LONG_DOUBLE_TYPE_SIZE
6889 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
6890 #endif
6892 unsigned long
6893 sparc_type_code (register tree type)
6895 register unsigned long qualifiers = 0;
6896 register unsigned shift;
6898 /* Only the first 30 bits of the qualifier are valid. We must refrain from
6899 setting more, since some assemblers will give an error for this. Also,
6900 we must be careful to avoid shifts of 32 bits or more to avoid getting
6901 unpredictable results. */
6903 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
6905 switch (TREE_CODE (type))
6907 case ERROR_MARK:
6908 return qualifiers;
6910 case ARRAY_TYPE:
6911 qualifiers |= (3 << shift);
6912 break;
6914 case FUNCTION_TYPE:
6915 case METHOD_TYPE:
6916 qualifiers |= (2 << shift);
6917 break;
6919 case POINTER_TYPE:
6920 case REFERENCE_TYPE:
6921 case OFFSET_TYPE:
6922 qualifiers |= (1 << shift);
6923 break;
6925 case RECORD_TYPE:
6926 return (qualifiers | 8);
6928 case UNION_TYPE:
6929 case QUAL_UNION_TYPE:
6930 return (qualifiers | 9);
6932 case ENUMERAL_TYPE:
6933 return (qualifiers | 10);
6935 case VOID_TYPE:
6936 return (qualifiers | 16);
6938 case INTEGER_TYPE:
6939 /* If this is a range type, consider it to be the underlying
6940 type. */
6941 if (TREE_TYPE (type) != 0)
6942 break;
6944 /* Carefully distinguish all the standard types of C,
6945 without messing up if the language is not C. We do this by
6946 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
6947 look at both the names and the above fields, but that's redundant.
6948 Any type whose size is between two C types will be considered
6949 to be the wider of the two types. Also, we do not have a
6950 special code to use for "long long", so anything wider than
6951 long is treated the same. Note that we can't distinguish
6952 between "int" and "long" in this code if they are the same
6953 size, but that's fine, since neither can the assembler. */
6955 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
6956 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
6958 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
6959 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
6961 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
6962 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
6964 else
6965 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
6967 case REAL_TYPE:
6968 /* If this is a range type, consider it to be the underlying
6969 type. */
6970 if (TREE_TYPE (type) != 0)
6971 break;
6973 /* Carefully distinguish all the standard types of C,
6974 without messing up if the language is not C. */
6976 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
6977 return (qualifiers | 6);
6979 else
6980 return (qualifiers | 7);
6982 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
6983 /* ??? We need to distinguish between double and float complex types,
6984 but I don't know how yet because I can't reach this code from
6985 existing front-ends. */
6986 return (qualifiers | 7); /* Who knows? */
6988 case VECTOR_TYPE:
6989 case CHAR_TYPE: /* GNU Pascal CHAR type. Not used in C. */
6990 case BOOLEAN_TYPE: /* GNU Fortran BOOLEAN type. */
6991 case LANG_TYPE: /* ? */
6992 return qualifiers;
6994 default:
6995 gcc_unreachable (); /* Not a type! */
6999 return qualifiers;
7002 /* Nested function support. */
7004 /* Emit RTL insns to initialize the variable parts of a trampoline.
7005 FNADDR is an RTX for the address of the function's pure code.
7006 CXT is an RTX for the static chain value for the function.
7008 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7009 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7010 (to store insns). This is a bit excessive. Perhaps a different
7011 mechanism would be better here.
7013 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7015 void
7016 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7018 /* SPARC 32-bit trampoline:
7020 sethi %hi(fn), %g1
7021 sethi %hi(static), %g2
7022 jmp %g1+%lo(fn)
7023 or %g2, %lo(static), %g2
7025 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7026 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7029 emit_move_insn
7030 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7031 expand_binop (SImode, ior_optab,
7032 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7033 size_int (10), 0, 1),
7034 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7035 NULL_RTX, 1, OPTAB_DIRECT));
7037 emit_move_insn
7038 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7039 expand_binop (SImode, ior_optab,
7040 expand_shift (RSHIFT_EXPR, SImode, cxt,
7041 size_int (10), 0, 1),
7042 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7043 NULL_RTX, 1, OPTAB_DIRECT));
7045 emit_move_insn
7046 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7047 expand_binop (SImode, ior_optab,
7048 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7049 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7050 NULL_RTX, 1, OPTAB_DIRECT));
7052 emit_move_insn
7053 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7054 expand_binop (SImode, ior_optab,
7055 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7056 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7057 NULL_RTX, 1, OPTAB_DIRECT));
7059 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7060 aligned on a 16 byte boundary so one flush clears it all. */
7061 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7062 if (sparc_cpu != PROCESSOR_ULTRASPARC
7063 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7064 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7065 plus_constant (tramp, 8)))));
7067 /* Call __enable_execute_stack after writing onto the stack to make sure
7068 the stack address is accessible. */
7069 #ifdef ENABLE_EXECUTE_STACK
7070 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7071 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7072 #endif
7076 /* The 64-bit version is simpler because it makes more sense to load the
7077 values as "immediate" data out of the trampoline. It's also easier since
7078 we can read the PC without clobbering a register. */
7080 void
7081 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7083 /* SPARC 64-bit trampoline:
7085 rd %pc, %g1
7086 ldx [%g1+24], %g5
7087 jmp %g5
7088 ldx [%g1+16], %g5
7089 +16 bytes data
7092 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7093 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7094 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7095 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7096 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7097 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7098 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7099 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7100 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7101 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7102 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7104 if (sparc_cpu != PROCESSOR_ULTRASPARC
7105 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7106 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7108 /* Call __enable_execute_stack after writing onto the stack to make sure
7109 the stack address is accessible. */
7110 #ifdef ENABLE_EXECUTE_STACK
7111 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7112 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7113 #endif
7116 /* Adjust the cost of a scheduling dependency. Return the new cost of
7117 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7119 static int
7120 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7122 enum attr_type insn_type;
7124 if (! recog_memoized (insn))
7125 return 0;
7127 insn_type = get_attr_type (insn);
7129 if (REG_NOTE_KIND (link) == 0)
7131 /* Data dependency; DEP_INSN writes a register that INSN reads some
7132 cycles later. */
7134 /* if a load, then the dependence must be on the memory address;
7135 add an extra "cycle". Note that the cost could be two cycles
7136 if the reg was written late in an instruction group; we ca not tell
7137 here. */
7138 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7139 return cost + 3;
7141 /* Get the delay only if the address of the store is the dependence. */
7142 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7144 rtx pat = PATTERN(insn);
7145 rtx dep_pat = PATTERN (dep_insn);
7147 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7148 return cost; /* This should not happen! */
7150 /* The dependency between the two instructions was on the data that
7151 is being stored. Assume that this implies that the address of the
7152 store is not dependent. */
7153 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7154 return cost;
7156 return cost + 3; /* An approximation. */
7159 /* A shift instruction cannot receive its data from an instruction
7160 in the same cycle; add a one cycle penalty. */
7161 if (insn_type == TYPE_SHIFT)
7162 return cost + 3; /* Split before cascade into shift. */
7164 else
7166 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7167 INSN writes some cycles later. */
7169 /* These are only significant for the fpu unit; writing a fp reg before
7170 the fpu has finished with it stalls the processor. */
7172 /* Reusing an integer register causes no problems. */
7173 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7174 return 0;
7177 return cost;
7180 static int
7181 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7183 enum attr_type insn_type, dep_type;
7184 rtx pat = PATTERN(insn);
7185 rtx dep_pat = PATTERN (dep_insn);
7187 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7188 return cost;
7190 insn_type = get_attr_type (insn);
7191 dep_type = get_attr_type (dep_insn);
7193 switch (REG_NOTE_KIND (link))
7195 case 0:
7196 /* Data dependency; DEP_INSN writes a register that INSN reads some
7197 cycles later. */
7199 switch (insn_type)
7201 case TYPE_STORE:
7202 case TYPE_FPSTORE:
7203 /* Get the delay iff the address of the store is the dependence. */
7204 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7205 return cost;
7207 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7208 return cost;
7209 return cost + 3;
7211 case TYPE_LOAD:
7212 case TYPE_SLOAD:
7213 case TYPE_FPLOAD:
7214 /* If a load, then the dependence must be on the memory address. If
7215 the addresses aren't equal, then it might be a false dependency */
7216 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7218 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7219 || GET_CODE (SET_DEST (dep_pat)) != MEM
7220 || GET_CODE (SET_SRC (pat)) != MEM
7221 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7222 XEXP (SET_SRC (pat), 0)))
7223 return cost + 2;
7225 return cost + 8;
7227 break;
7229 case TYPE_BRANCH:
7230 /* Compare to branch latency is 0. There is no benefit from
7231 separating compare and branch. */
7232 if (dep_type == TYPE_COMPARE)
7233 return 0;
7234 /* Floating point compare to branch latency is less than
7235 compare to conditional move. */
7236 if (dep_type == TYPE_FPCMP)
7237 return cost - 1;
7238 break;
7239 default:
7240 break;
7242 break;
7244 case REG_DEP_ANTI:
7245 /* Anti-dependencies only penalize the fpu unit. */
7246 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7247 return 0;
7248 break;
7250 default:
7251 break;
7254 return cost;
7257 static int
7258 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7260 switch (sparc_cpu)
7262 case PROCESSOR_SUPERSPARC:
7263 cost = supersparc_adjust_cost (insn, link, dep, cost);
7264 break;
7265 case PROCESSOR_HYPERSPARC:
7266 case PROCESSOR_SPARCLITE86X:
7267 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7268 break;
7269 default:
7270 break;
7272 return cost;
7275 static void
7276 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7277 int sched_verbose ATTRIBUTE_UNUSED,
7278 int max_ready ATTRIBUTE_UNUSED)
7282 static int
7283 sparc_use_sched_lookahead (void)
7285 if (sparc_cpu == PROCESSOR_ULTRASPARC
7286 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7287 return 4;
7288 if ((1 << sparc_cpu) &
7289 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7290 (1 << PROCESSOR_SPARCLITE86X)))
7291 return 3;
7292 return 0;
7295 static int
7296 sparc_issue_rate (void)
7298 switch (sparc_cpu)
7300 default:
7301 return 1;
7302 case PROCESSOR_V9:
7303 /* Assume V9 processors are capable of at least dual-issue. */
7304 return 2;
7305 case PROCESSOR_SUPERSPARC:
7306 return 3;
7307 case PROCESSOR_HYPERSPARC:
7308 case PROCESSOR_SPARCLITE86X:
7309 return 2;
7310 case PROCESSOR_ULTRASPARC:
7311 case PROCESSOR_ULTRASPARC3:
7312 return 4;
7316 static int
7317 set_extends (rtx insn)
7319 register rtx pat = PATTERN (insn);
7321 switch (GET_CODE (SET_SRC (pat)))
7323 /* Load and some shift instructions zero extend. */
7324 case MEM:
7325 case ZERO_EXTEND:
7326 /* sethi clears the high bits */
7327 case HIGH:
7328 /* LO_SUM is used with sethi. sethi cleared the high
7329 bits and the values used with lo_sum are positive */
7330 case LO_SUM:
7331 /* Store flag stores 0 or 1 */
7332 case LT: case LTU:
7333 case GT: case GTU:
7334 case LE: case LEU:
7335 case GE: case GEU:
7336 case EQ:
7337 case NE:
7338 return 1;
7339 case AND:
7341 rtx op0 = XEXP (SET_SRC (pat), 0);
7342 rtx op1 = XEXP (SET_SRC (pat), 1);
7343 if (GET_CODE (op1) == CONST_INT)
7344 return INTVAL (op1) >= 0;
7345 if (GET_CODE (op0) != REG)
7346 return 0;
7347 if (sparc_check_64 (op0, insn) == 1)
7348 return 1;
7349 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7351 case IOR:
7352 case XOR:
7354 rtx op0 = XEXP (SET_SRC (pat), 0);
7355 rtx op1 = XEXP (SET_SRC (pat), 1);
7356 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7357 return 0;
7358 if (GET_CODE (op1) == CONST_INT)
7359 return INTVAL (op1) >= 0;
7360 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7362 case LSHIFTRT:
7363 return GET_MODE (SET_SRC (pat)) == SImode;
7364 /* Positive integers leave the high bits zero. */
7365 case CONST_DOUBLE:
7366 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7367 case CONST_INT:
7368 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7369 case ASHIFTRT:
7370 case SIGN_EXTEND:
7371 return - (GET_MODE (SET_SRC (pat)) == SImode);
7372 case REG:
7373 return sparc_check_64 (SET_SRC (pat), insn);
7374 default:
7375 return 0;
7379 /* We _ought_ to have only one kind per function, but... */
7380 static GTY(()) rtx sparc_addr_diff_list;
7381 static GTY(()) rtx sparc_addr_list;
7383 void
7384 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7386 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7387 if (diff)
7388 sparc_addr_diff_list
7389 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7390 else
7391 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7394 static void
7395 sparc_output_addr_vec (rtx vec)
7397 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7398 int idx, vlen = XVECLEN (body, 0);
7400 #ifdef ASM_OUTPUT_ADDR_VEC_START
7401 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7402 #endif
7404 #ifdef ASM_OUTPUT_CASE_LABEL
7405 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7406 NEXT_INSN (lab));
7407 #else
7408 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7409 #endif
7411 for (idx = 0; idx < vlen; idx++)
7413 ASM_OUTPUT_ADDR_VEC_ELT
7414 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7417 #ifdef ASM_OUTPUT_ADDR_VEC_END
7418 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7419 #endif
7422 static void
7423 sparc_output_addr_diff_vec (rtx vec)
7425 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7426 rtx base = XEXP (XEXP (body, 0), 0);
7427 int idx, vlen = XVECLEN (body, 1);
7429 #ifdef ASM_OUTPUT_ADDR_VEC_START
7430 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7431 #endif
7433 #ifdef ASM_OUTPUT_CASE_LABEL
7434 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7435 NEXT_INSN (lab));
7436 #else
7437 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7438 #endif
7440 for (idx = 0; idx < vlen; idx++)
7442 ASM_OUTPUT_ADDR_DIFF_ELT
7443 (asm_out_file,
7444 body,
7445 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7446 CODE_LABEL_NUMBER (base));
7449 #ifdef ASM_OUTPUT_ADDR_VEC_END
7450 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7451 #endif
7454 static void
7455 sparc_output_deferred_case_vectors (void)
7457 rtx t;
7458 int align;
7460 if (sparc_addr_list == NULL_RTX
7461 && sparc_addr_diff_list == NULL_RTX)
7462 return;
7464 /* Align to cache line in the function's code section. */
7465 current_function_section (current_function_decl);
7467 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7468 if (align > 0)
7469 ASM_OUTPUT_ALIGN (asm_out_file, align);
7471 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7472 sparc_output_addr_vec (XEXP (t, 0));
7473 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7474 sparc_output_addr_diff_vec (XEXP (t, 0));
7476 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7479 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7480 unknown. Return 1 if the high bits are zero, -1 if the register is
7481 sign extended. */
7483 sparc_check_64 (rtx x, rtx insn)
7485 /* If a register is set only once it is safe to ignore insns this
7486 code does not know how to handle. The loop will either recognize
7487 the single set and return the correct value or fail to recognize
7488 it and return 0. */
7489 int set_once = 0;
7490 rtx y = x;
7492 gcc_assert (GET_CODE (x) == REG);
7494 if (GET_MODE (x) == DImode)
7495 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7497 if (flag_expensive_optimizations
7498 && REG_N_SETS (REGNO (y)) == 1)
7499 set_once = 1;
7501 if (insn == 0)
7503 if (set_once)
7504 insn = get_last_insn_anywhere ();
7505 else
7506 return 0;
7509 while ((insn = PREV_INSN (insn)))
7511 switch (GET_CODE (insn))
7513 case JUMP_INSN:
7514 case NOTE:
7515 break;
7516 case CODE_LABEL:
7517 case CALL_INSN:
7518 default:
7519 if (! set_once)
7520 return 0;
7521 break;
7522 case INSN:
7524 rtx pat = PATTERN (insn);
7525 if (GET_CODE (pat) != SET)
7526 return 0;
7527 if (rtx_equal_p (x, SET_DEST (pat)))
7528 return set_extends (insn);
7529 if (y && rtx_equal_p (y, SET_DEST (pat)))
7530 return set_extends (insn);
7531 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7532 return 0;
7536 return 0;
7539 /* Returns assembly code to perform a DImode shift using
7540 a 64-bit global or out register on SPARC-V8+. */
7541 const char *
7542 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7544 static char asm_code[60];
7546 /* The scratch register is only required when the destination
7547 register is not a 64-bit global or out register. */
7548 if (which_alternative != 2)
7549 operands[3] = operands[0];
7551 /* We can only shift by constants <= 63. */
7552 if (GET_CODE (operands[2]) == CONST_INT)
7553 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7555 if (GET_CODE (operands[1]) == CONST_INT)
7557 output_asm_insn ("mov\t%1, %3", operands);
7559 else
7561 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7562 if (sparc_check_64 (operands[1], insn) <= 0)
7563 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7564 output_asm_insn ("or\t%L1, %3, %3", operands);
7567 strcpy(asm_code, opcode);
7569 if (which_alternative != 2)
7570 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7571 else
7572 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7575 /* Output rtl to increment the profiler label LABELNO
7576 for profiling a function entry. */
7578 void
7579 sparc_profile_hook (int labelno)
7581 char buf[32];
7582 rtx lab, fun;
7584 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7585 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7586 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7588 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7591 #ifdef OBJECT_FORMAT_ELF
7592 static void
7593 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7594 tree decl)
7596 if (flags & SECTION_MERGE)
7598 /* entsize cannot be expressed in this section attributes
7599 encoding style. */
7600 default_elf_asm_named_section (name, flags, decl);
7601 return;
7604 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7606 if (!(flags & SECTION_DEBUG))
7607 fputs (",#alloc", asm_out_file);
7608 if (flags & SECTION_WRITE)
7609 fputs (",#write", asm_out_file);
7610 if (flags & SECTION_TLS)
7611 fputs (",#tls", asm_out_file);
7612 if (flags & SECTION_CODE)
7613 fputs (",#execinstr", asm_out_file);
7615 /* ??? Handle SECTION_BSS. */
7617 fputc ('\n', asm_out_file);
7619 #endif /* OBJECT_FORMAT_ELF */
7621 /* We do not allow indirect calls to be optimized into sibling calls.
7623 We cannot use sibling calls when delayed branches are disabled
7624 because they will likely require the call delay slot to be filled.
7626 Also, on SPARC 32-bit we cannot emit a sibling call when the
7627 current function returns a structure. This is because the "unimp
7628 after call" convention would cause the callee to return to the
7629 wrong place. The generic code already disallows cases where the
7630 function being called returns a structure.
7632 It may seem strange how this last case could occur. Usually there
7633 is code after the call which jumps to epilogue code which dumps the
7634 return value into the struct return area. That ought to invalidate
7635 the sibling call right? Well, in the C++ case we can end up passing
7636 the pointer to the struct return area to a constructor (which returns
7637 void) and then nothing else happens. Such a sibling call would look
7638 valid without the added check here. */
7639 static bool
7640 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7642 return (decl
7643 && flag_delayed_branch
7644 && (TARGET_ARCH64 || ! current_function_returns_struct));
7647 /* libfunc renaming. */
7648 #include "config/gofast.h"
7650 static void
7651 sparc_init_libfuncs (void)
7653 if (TARGET_ARCH32)
7655 /* Use the subroutines that Sun's library provides for integer
7656 multiply and divide. The `*' prevents an underscore from
7657 being prepended by the compiler. .umul is a little faster
7658 than .mul. */
7659 set_optab_libfunc (smul_optab, SImode, "*.umul");
7660 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7661 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7662 set_optab_libfunc (smod_optab, SImode, "*.rem");
7663 set_optab_libfunc (umod_optab, SImode, "*.urem");
7665 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7666 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7667 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7668 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7669 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7670 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7672 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7673 is because with soft-float, the SFmode and DFmode sqrt
7674 instructions will be absent, and the compiler will notice and
7675 try to use the TFmode sqrt instruction for calls to the
7676 builtin function sqrt, but this fails. */
7677 if (TARGET_FPU)
7678 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7680 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7681 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7682 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7683 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7684 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7685 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7687 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7688 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7689 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7690 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7692 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7693 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7694 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7696 if (DITF_CONVERSION_LIBFUNCS)
7698 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7699 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7700 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7703 if (SUN_CONVERSION_LIBFUNCS)
7705 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7706 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7707 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7708 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7711 if (TARGET_ARCH64)
7713 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7714 do not exist in the library. Make sure the compiler does not
7715 emit calls to them by accident. (It should always use the
7716 hardware instructions.) */
7717 set_optab_libfunc (smul_optab, SImode, 0);
7718 set_optab_libfunc (sdiv_optab, SImode, 0);
7719 set_optab_libfunc (udiv_optab, SImode, 0);
7720 set_optab_libfunc (smod_optab, SImode, 0);
7721 set_optab_libfunc (umod_optab, SImode, 0);
7723 if (SUN_INTEGER_MULTIPLY_64)
7725 set_optab_libfunc (smul_optab, DImode, "__mul64");
7726 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7727 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7728 set_optab_libfunc (smod_optab, DImode, "__rem64");
7729 set_optab_libfunc (umod_optab, DImode, "__urem64");
7732 if (SUN_CONVERSION_LIBFUNCS)
7734 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7735 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7736 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7737 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7741 gofast_maybe_init_libfuncs ();
7744 #define def_builtin(NAME, CODE, TYPE) \
7745 lang_hooks.builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7746 NULL_TREE)
7748 /* Implement the TARGET_INIT_BUILTINS target hook.
7749 Create builtin functions for special SPARC instructions. */
7751 static void
7752 sparc_init_builtins (void)
7754 if (TARGET_VIS)
7755 sparc_vis_init_builtins ();
7758 /* Create builtin functions for VIS 1.0 instructions. */
7760 static void
7761 sparc_vis_init_builtins (void)
7763 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7764 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7765 tree v4hi = build_vector_type (intHI_type_node, 4);
7766 tree v2hi = build_vector_type (intHI_type_node, 2);
7767 tree v2si = build_vector_type (intSI_type_node, 2);
7769 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
7770 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
7771 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
7772 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
7773 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
7774 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
7775 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
7776 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
7777 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
7778 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
7779 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
7780 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
7781 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
7782 v8qi, v8qi,
7783 intDI_type_node, 0);
7784 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
7785 intDI_type_node,
7786 intDI_type_node, 0);
7787 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
7788 ptr_type_node,
7789 intSI_type_node, 0);
7790 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
7791 ptr_type_node,
7792 intDI_type_node, 0);
7794 /* Packing and expanding vectors. */
7795 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
7796 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
7797 v8qi_ftype_v2si_v8qi);
7798 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
7799 v2hi_ftype_v2si);
7800 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
7801 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
7802 v8qi_ftype_v4qi_v4qi);
7804 /* Multiplications. */
7805 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
7806 v4hi_ftype_v4qi_v4hi);
7807 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
7808 v4hi_ftype_v4qi_v2hi);
7809 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
7810 v4hi_ftype_v4qi_v2hi);
7811 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
7812 v4hi_ftype_v8qi_v4hi);
7813 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
7814 v4hi_ftype_v8qi_v4hi);
7815 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
7816 v2si_ftype_v4qi_v2hi);
7817 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
7818 v2si_ftype_v4qi_v2hi);
7820 /* Data aligning. */
7821 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
7822 v4hi_ftype_v4hi_v4hi);
7823 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
7824 v8qi_ftype_v8qi_v8qi);
7825 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
7826 v2si_ftype_v2si_v2si);
7827 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
7828 di_ftype_di_di);
7829 if (TARGET_ARCH64)
7830 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
7831 ptr_ftype_ptr_di);
7832 else
7833 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
7834 ptr_ftype_ptr_si);
7836 /* Pixel distance. */
7837 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
7838 di_ftype_v8qi_v8qi_di);
7841 /* Handle TARGET_EXPAND_BUILTIN target hook.
7842 Expand builtin functions for sparc instrinsics. */
7844 static rtx
7845 sparc_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
7846 enum machine_mode tmode, int ignore ATTRIBUTE_UNUSED)
7848 tree arglist;
7849 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7850 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
7851 rtx pat, op[4];
7852 enum machine_mode mode[4];
7853 int arg_count = 0;
7855 mode[arg_count] = tmode;
7857 if (target == 0
7858 || GET_MODE (target) != tmode
7859 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7860 op[arg_count] = gen_reg_rtx (tmode);
7861 else
7862 op[arg_count] = target;
7864 for (arglist = TREE_OPERAND (exp, 1); arglist;
7865 arglist = TREE_CHAIN (arglist))
7867 tree arg = TREE_VALUE (arglist);
7869 arg_count++;
7870 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
7871 op[arg_count] = expand_expr (arg, NULL_RTX, VOIDmode, 0);
7873 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
7874 mode[arg_count]))
7875 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
7878 switch (arg_count)
7880 case 1:
7881 pat = GEN_FCN (icode) (op[0], op[1]);
7882 break;
7883 case 2:
7884 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
7885 break;
7886 case 3:
7887 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
7888 break;
7889 default:
7890 gcc_unreachable ();
7893 if (!pat)
7894 return NULL_RTX;
7896 emit_insn (pat);
7898 return op[0];
7902 sparc_extra_constraint_check (rtx op, int c, int strict)
7904 int reload_ok_mem;
7906 if (TARGET_ARCH64
7907 && (c == 'T' || c == 'U'))
7908 return 0;
7910 switch (c)
7912 case 'Q':
7913 return fp_sethi_p (op);
7915 case 'R':
7916 return fp_mov_p (op);
7918 case 'S':
7919 return fp_high_losum_p (op);
7921 case 'U':
7922 if (! strict
7923 || (GET_CODE (op) == REG
7924 && (REGNO (op) < FIRST_PSEUDO_REGISTER
7925 || reg_renumber[REGNO (op)] >= 0)))
7926 return register_ok_for_ldd (op);
7928 return 0;
7930 case 'W':
7931 case 'T':
7932 break;
7934 case 'Y':
7935 return const_zero_operand (op, GET_MODE (op));
7937 default:
7938 return 0;
7941 /* Our memory extra constraints have to emulate the
7942 behavior of 'm' and 'o' in order for reload to work
7943 correctly. */
7944 if (GET_CODE (op) == MEM)
7946 reload_ok_mem = 0;
7947 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
7948 && (! strict
7949 || strict_memory_address_p (Pmode, XEXP (op, 0))))
7950 reload_ok_mem = 1;
7952 else
7954 reload_ok_mem = (reload_in_progress
7955 && GET_CODE (op) == REG
7956 && REGNO (op) >= FIRST_PSEUDO_REGISTER
7957 && reg_renumber [REGNO (op)] < 0);
7960 return reload_ok_mem;
7963 /* ??? This duplicates information provided to the compiler by the
7964 ??? scheduler description. Some day, teach genautomata to output
7965 ??? the latencies and then CSE will just use that. */
7967 static bool
7968 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
7970 enum machine_mode mode = GET_MODE (x);
7971 bool float_mode_p = FLOAT_MODE_P (mode);
7973 switch (code)
7975 case CONST_INT:
7976 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
7978 *total = 0;
7979 return true;
7981 /* FALLTHRU */
7983 case HIGH:
7984 *total = 2;
7985 return true;
7987 case CONST:
7988 case LABEL_REF:
7989 case SYMBOL_REF:
7990 *total = 4;
7991 return true;
7993 case CONST_DOUBLE:
7994 if (GET_MODE (x) == VOIDmode
7995 && ((CONST_DOUBLE_HIGH (x) == 0
7996 && CONST_DOUBLE_LOW (x) < 0x1000)
7997 || (CONST_DOUBLE_HIGH (x) == -1
7998 && CONST_DOUBLE_LOW (x) < 0
7999 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8000 *total = 0;
8001 else
8002 *total = 8;
8003 return true;
8005 case MEM:
8006 /* If outer-code was a sign or zero extension, a cost
8007 of COSTS_N_INSNS (1) was already added in. This is
8008 why we are subtracting it back out. */
8009 if (outer_code == ZERO_EXTEND)
8011 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8013 else if (outer_code == SIGN_EXTEND)
8015 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8017 else if (float_mode_p)
8019 *total = sparc_costs->float_load;
8021 else
8023 *total = sparc_costs->int_load;
8026 return true;
8028 case PLUS:
8029 case MINUS:
8030 if (float_mode_p)
8031 *total = sparc_costs->float_plusminus;
8032 else
8033 *total = COSTS_N_INSNS (1);
8034 return false;
8036 case MULT:
8037 if (float_mode_p)
8038 *total = sparc_costs->float_mul;
8039 else if (! TARGET_HARD_MUL)
8040 *total = COSTS_N_INSNS (25);
8041 else
8043 int bit_cost;
8045 bit_cost = 0;
8046 if (sparc_costs->int_mul_bit_factor)
8048 int nbits;
8050 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8052 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8053 for (nbits = 0; value != 0; value &= value - 1)
8054 nbits++;
8056 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8057 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8059 rtx x1 = XEXP (x, 1);
8060 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8061 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8063 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8064 nbits++;
8065 for (; value2 != 0; value2 &= value2 - 1)
8066 nbits++;
8068 else
8069 nbits = 7;
8071 if (nbits < 3)
8072 nbits = 3;
8073 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8074 bit_cost = COSTS_N_INSNS (bit_cost);
8077 if (mode == DImode)
8078 *total = sparc_costs->int_mulX + bit_cost;
8079 else
8080 *total = sparc_costs->int_mul + bit_cost;
8082 return false;
8084 case ASHIFT:
8085 case ASHIFTRT:
8086 case LSHIFTRT:
8087 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8088 return false;
8090 case DIV:
8091 case UDIV:
8092 case MOD:
8093 case UMOD:
8094 if (float_mode_p)
8096 if (mode == DFmode)
8097 *total = sparc_costs->float_div_df;
8098 else
8099 *total = sparc_costs->float_div_sf;
8101 else
8103 if (mode == DImode)
8104 *total = sparc_costs->int_divX;
8105 else
8106 *total = sparc_costs->int_div;
8108 return false;
8110 case NEG:
8111 if (! float_mode_p)
8113 *total = COSTS_N_INSNS (1);
8114 return false;
8116 /* FALLTHRU */
8118 case ABS:
8119 case FLOAT:
8120 case UNSIGNED_FLOAT:
8121 case FIX:
8122 case UNSIGNED_FIX:
8123 case FLOAT_EXTEND:
8124 case FLOAT_TRUNCATE:
8125 *total = sparc_costs->float_move;
8126 return false;
8128 case SQRT:
8129 if (mode == DFmode)
8130 *total = sparc_costs->float_sqrt_df;
8131 else
8132 *total = sparc_costs->float_sqrt_sf;
8133 return false;
8135 case COMPARE:
8136 if (float_mode_p)
8137 *total = sparc_costs->float_cmp;
8138 else
8139 *total = COSTS_N_INSNS (1);
8140 return false;
8142 case IF_THEN_ELSE:
8143 if (float_mode_p)
8144 *total = sparc_costs->float_cmove;
8145 else
8146 *total = sparc_costs->int_cmove;
8147 return false;
8149 case IOR:
8150 /* Handle the NAND vector patterns. */
8151 if (sparc_vector_mode_supported_p (GET_MODE (x))
8152 && GET_CODE (XEXP (x, 0)) == NOT
8153 && GET_CODE (XEXP (x, 1)) == NOT)
8155 *total = COSTS_N_INSNS (1);
8156 return true;
8158 else
8159 return false;
8161 default:
8162 return false;
8166 /* Emit the sequence of insns SEQ while preserving the register REG. */
8168 static void
8169 emit_and_preserve (rtx seq, rtx reg)
8171 rtx slot = gen_rtx_MEM (word_mode,
8172 plus_constant (stack_pointer_rtx, SPARC_STACK_BIAS));
8174 emit_insn (gen_stack_pointer_dec (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT)));
8175 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8176 emit_insn (seq);
8177 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8178 emit_insn (gen_stack_pointer_inc (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT)));
8181 /* Output the assembler code for a thunk function. THUNK_DECL is the
8182 declaration for the thunk function itself, FUNCTION is the decl for
8183 the target function. DELTA is an immediate constant offset to be
8184 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8185 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8187 static void
8188 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8189 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8190 tree function)
8192 rtx this, insn, funexp;
8193 unsigned int int_arg_first;
8195 reload_completed = 1;
8196 epilogue_completed = 1;
8197 no_new_pseudos = 1;
8198 reset_block_changes ();
8200 emit_note (NOTE_INSN_PROLOGUE_END);
8202 if (flag_delayed_branch)
8204 /* We will emit a regular sibcall below, so we need to instruct
8205 output_sibcall that we are in a leaf function. */
8206 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8208 /* This will cause final.c to invoke leaf_renumber_regs so we
8209 must behave as if we were in a not-yet-leafified function. */
8210 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8212 else
8214 /* We will emit the sibcall manually below, so we will need to
8215 manually spill non-leaf registers. */
8216 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8218 /* We really are in a leaf function. */
8219 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8222 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8223 returns a structure, the structure return pointer is there instead. */
8224 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8225 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8226 else
8227 this = gen_rtx_REG (Pmode, int_arg_first);
8229 /* Add DELTA. When possible use a plain add, otherwise load it into
8230 a register first. */
8231 if (delta)
8233 rtx delta_rtx = GEN_INT (delta);
8235 if (! SPARC_SIMM13_P (delta))
8237 rtx scratch = gen_rtx_REG (Pmode, 1);
8238 emit_move_insn (scratch, delta_rtx);
8239 delta_rtx = scratch;
8242 /* THIS += DELTA. */
8243 emit_insn (gen_add2_insn (this, delta_rtx));
8246 /* Add the word at address (*THIS + VCALL_OFFSET). */
8247 if (vcall_offset)
8249 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8250 rtx scratch = gen_rtx_REG (Pmode, 1);
8252 gcc_assert (vcall_offset < 0);
8254 /* SCRATCH = *THIS. */
8255 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8257 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8258 may not have any available scratch register at this point. */
8259 if (SPARC_SIMM13_P (vcall_offset))
8261 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8262 else if (! fixed_regs[5]
8263 /* The below sequence is made up of at least 2 insns,
8264 while the default method may need only one. */
8265 && vcall_offset < -8192)
8267 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8268 emit_move_insn (scratch2, vcall_offset_rtx);
8269 vcall_offset_rtx = scratch2;
8271 else
8273 rtx increment = GEN_INT (-4096);
8275 /* VCALL_OFFSET is a negative number whose typical range can be
8276 estimated as -32768..0 in 32-bit mode. In almost all cases
8277 it is therefore cheaper to emit multiple add insns than
8278 spilling and loading the constant into a register (at least
8279 6 insns). */
8280 while (! SPARC_SIMM13_P (vcall_offset))
8282 emit_insn (gen_add2_insn (scratch, increment));
8283 vcall_offset += 4096;
8285 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8288 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8289 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8290 gen_rtx_PLUS (Pmode,
8291 scratch,
8292 vcall_offset_rtx)));
8294 /* THIS += *(*THIS + VCALL_OFFSET). */
8295 emit_insn (gen_add2_insn (this, scratch));
8298 /* Generate a tail call to the target function. */
8299 if (! TREE_USED (function))
8301 assemble_external (function);
8302 TREE_USED (function) = 1;
8304 funexp = XEXP (DECL_RTL (function), 0);
8306 if (flag_delayed_branch)
8308 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8309 insn = emit_call_insn (gen_sibcall (funexp));
8310 SIBLING_CALL_P (insn) = 1;
8312 else
8314 /* The hoops we have to jump through in order to generate a sibcall
8315 without using delay slots... */
8316 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
8318 if (flag_pic)
8320 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8321 start_sequence ();
8322 /* Delay emitting the PIC helper function because it needs to
8323 change the section and we are emitting assembly code. */
8324 load_pic_register (true); /* clobbers %o7 */
8325 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8326 seq = get_insns ();
8327 end_sequence ();
8328 emit_and_preserve (seq, spill_reg);
8330 else if (TARGET_ARCH32)
8332 emit_insn (gen_rtx_SET (VOIDmode,
8333 scratch,
8334 gen_rtx_HIGH (SImode, funexp)));
8335 emit_insn (gen_rtx_SET (VOIDmode,
8336 scratch,
8337 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8339 else /* TARGET_ARCH64 */
8341 switch (sparc_cmodel)
8343 case CM_MEDLOW:
8344 case CM_MEDMID:
8345 /* The destination can serve as a temporary. */
8346 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8347 break;
8349 case CM_MEDANY:
8350 case CM_EMBMEDANY:
8351 /* The destination cannot serve as a temporary. */
8352 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8353 start_sequence ();
8354 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8355 seq = get_insns ();
8356 end_sequence ();
8357 emit_and_preserve (seq, spill_reg);
8358 break;
8360 default:
8361 gcc_unreachable ();
8365 emit_jump_insn (gen_indirect_jump (scratch));
8368 emit_barrier ();
8370 /* Run just enough of rest_of_compilation to get the insns emitted.
8371 There's not really enough bulk here to make other passes such as
8372 instruction scheduling worth while. Note that use_thunk calls
8373 assemble_start_function and assemble_end_function. */
8374 insn = get_insns ();
8375 insn_locators_initialize ();
8376 shorten_branches (insn);
8377 final_start_function (insn, file, 1);
8378 final (insn, file, 1);
8379 final_end_function ();
8381 reload_completed = 0;
8382 epilogue_completed = 0;
8383 no_new_pseudos = 0;
8386 /* Return true if sparc_output_mi_thunk would be able to output the
8387 assembler code for the thunk function specified by the arguments
8388 it is passed, and false otherwise. */
8389 static bool
8390 sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8391 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8392 HOST_WIDE_INT vcall_offset,
8393 tree function ATTRIBUTE_UNUSED)
8395 /* Bound the loop used in the default method above. */
8396 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8399 /* How to allocate a 'struct machine_function'. */
8401 static struct machine_function *
8402 sparc_init_machine_status (void)
8404 return ggc_alloc_cleared (sizeof (struct machine_function));
8407 /* Locate some local-dynamic symbol still in use by this function
8408 so that we can print its name in local-dynamic base patterns. */
8410 static const char *
8411 get_some_local_dynamic_name (void)
8413 rtx insn;
8415 if (cfun->machine->some_ld_name)
8416 return cfun->machine->some_ld_name;
8418 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8419 if (INSN_P (insn)
8420 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8421 return cfun->machine->some_ld_name;
8423 gcc_unreachable ();
8426 static int
8427 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8429 rtx x = *px;
8431 if (x
8432 && GET_CODE (x) == SYMBOL_REF
8433 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8435 cfun->machine->some_ld_name = XSTR (x, 0);
8436 return 1;
8439 return 0;
8442 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8443 This is called from dwarf2out.c to emit call frame instructions
8444 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8445 static void
8446 sparc_dwarf_handle_frame_unspec (const char *label,
8447 rtx pattern ATTRIBUTE_UNUSED,
8448 int index ATTRIBUTE_UNUSED)
8450 gcc_assert (index == UNSPECV_SAVEW);
8451 dwarf2out_window_save (label);
8454 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
8455 We need to emit DTP-relative relocations. */
8457 void
8458 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8460 switch (size)
8462 case 4:
8463 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8464 break;
8465 case 8:
8466 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8467 break;
8468 default:
8469 gcc_unreachable ();
8471 output_addr_const (file, x);
8472 fputs (")", file);
8475 static
8476 void sparc_file_end (void)
8478 /* If we haven't emitted the special PIC helper function, do so now. */
8479 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8480 emit_pic_helper ();
8482 if (NEED_INDICATE_EXEC_STACK)
8483 file_end_indicate_exec_stack ();
8486 #include "gt-sparc.h"