Merge from mainline (167278:168000).
[official-gcc/graphite-test-results.git] / gcc / config / sparc / sparc.c
blob760ff6e122f26c4c4d68fe7a2e9eb479c5bc09de
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "insn-codes.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "except.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "recog.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "tm_p.h"
47 #include "debug.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "cfglayout.h"
51 #include "gimple.h"
52 #include "langhooks.h"
53 #include "reload.h"
54 #include "params.h"
55 #include "df.h"
56 #include "dwarf2out.h"
58 /* Processor costs */
59 static const
60 struct processor_costs cypress_costs = {
61 COSTS_N_INSNS (2), /* int load */
62 COSTS_N_INSNS (2), /* int signed load */
63 COSTS_N_INSNS (2), /* int zeroed load */
64 COSTS_N_INSNS (2), /* float load */
65 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
66 COSTS_N_INSNS (5), /* fadd, fsub */
67 COSTS_N_INSNS (1), /* fcmp */
68 COSTS_N_INSNS (1), /* fmov, fmovr */
69 COSTS_N_INSNS (7), /* fmul */
70 COSTS_N_INSNS (37), /* fdivs */
71 COSTS_N_INSNS (37), /* fdivd */
72 COSTS_N_INSNS (63), /* fsqrts */
73 COSTS_N_INSNS (63), /* fsqrtd */
74 COSTS_N_INSNS (1), /* imul */
75 COSTS_N_INSNS (1), /* imulX */
76 0, /* imul bit factor */
77 COSTS_N_INSNS (1), /* idiv */
78 COSTS_N_INSNS (1), /* idivX */
79 COSTS_N_INSNS (1), /* movcc/movr */
80 0, /* shift penalty */
83 static const
84 struct processor_costs supersparc_costs = {
85 COSTS_N_INSNS (1), /* int load */
86 COSTS_N_INSNS (1), /* int signed load */
87 COSTS_N_INSNS (1), /* int zeroed load */
88 COSTS_N_INSNS (0), /* float load */
89 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
90 COSTS_N_INSNS (3), /* fadd, fsub */
91 COSTS_N_INSNS (3), /* fcmp */
92 COSTS_N_INSNS (1), /* fmov, fmovr */
93 COSTS_N_INSNS (3), /* fmul */
94 COSTS_N_INSNS (6), /* fdivs */
95 COSTS_N_INSNS (9), /* fdivd */
96 COSTS_N_INSNS (12), /* fsqrts */
97 COSTS_N_INSNS (12), /* fsqrtd */
98 COSTS_N_INSNS (4), /* imul */
99 COSTS_N_INSNS (4), /* imulX */
100 0, /* imul bit factor */
101 COSTS_N_INSNS (4), /* idiv */
102 COSTS_N_INSNS (4), /* idivX */
103 COSTS_N_INSNS (1), /* movcc/movr */
104 1, /* shift penalty */
107 static const
108 struct processor_costs hypersparc_costs = {
109 COSTS_N_INSNS (1), /* int load */
110 COSTS_N_INSNS (1), /* int signed load */
111 COSTS_N_INSNS (1), /* int zeroed load */
112 COSTS_N_INSNS (1), /* float load */
113 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
114 COSTS_N_INSNS (1), /* fadd, fsub */
115 COSTS_N_INSNS (1), /* fcmp */
116 COSTS_N_INSNS (1), /* fmov, fmovr */
117 COSTS_N_INSNS (1), /* fmul */
118 COSTS_N_INSNS (8), /* fdivs */
119 COSTS_N_INSNS (12), /* fdivd */
120 COSTS_N_INSNS (17), /* fsqrts */
121 COSTS_N_INSNS (17), /* fsqrtd */
122 COSTS_N_INSNS (17), /* imul */
123 COSTS_N_INSNS (17), /* imulX */
124 0, /* imul bit factor */
125 COSTS_N_INSNS (17), /* idiv */
126 COSTS_N_INSNS (17), /* idivX */
127 COSTS_N_INSNS (1), /* movcc/movr */
128 0, /* shift penalty */
131 static const
132 struct processor_costs leon_costs = {
133 COSTS_N_INSNS (1), /* int load */
134 COSTS_N_INSNS (1), /* int signed load */
135 COSTS_N_INSNS (1), /* int zeroed load */
136 COSTS_N_INSNS (1), /* float load */
137 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
138 COSTS_N_INSNS (1), /* fadd, fsub */
139 COSTS_N_INSNS (1), /* fcmp */
140 COSTS_N_INSNS (1), /* fmov, fmovr */
141 COSTS_N_INSNS (1), /* fmul */
142 COSTS_N_INSNS (15), /* fdivs */
143 COSTS_N_INSNS (15), /* fdivd */
144 COSTS_N_INSNS (23), /* fsqrts */
145 COSTS_N_INSNS (23), /* fsqrtd */
146 COSTS_N_INSNS (5), /* imul */
147 COSTS_N_INSNS (5), /* imulX */
148 0, /* imul bit factor */
149 COSTS_N_INSNS (5), /* idiv */
150 COSTS_N_INSNS (5), /* idivX */
151 COSTS_N_INSNS (1), /* movcc/movr */
152 0, /* shift penalty */
155 static const
156 struct processor_costs sparclet_costs = {
157 COSTS_N_INSNS (3), /* int load */
158 COSTS_N_INSNS (3), /* int signed load */
159 COSTS_N_INSNS (1), /* int zeroed load */
160 COSTS_N_INSNS (1), /* float load */
161 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
162 COSTS_N_INSNS (1), /* fadd, fsub */
163 COSTS_N_INSNS (1), /* fcmp */
164 COSTS_N_INSNS (1), /* fmov, fmovr */
165 COSTS_N_INSNS (1), /* fmul */
166 COSTS_N_INSNS (1), /* fdivs */
167 COSTS_N_INSNS (1), /* fdivd */
168 COSTS_N_INSNS (1), /* fsqrts */
169 COSTS_N_INSNS (1), /* fsqrtd */
170 COSTS_N_INSNS (5), /* imul */
171 COSTS_N_INSNS (5), /* imulX */
172 0, /* imul bit factor */
173 COSTS_N_INSNS (5), /* idiv */
174 COSTS_N_INSNS (5), /* idivX */
175 COSTS_N_INSNS (1), /* movcc/movr */
176 0, /* shift penalty */
179 static const
180 struct processor_costs ultrasparc_costs = {
181 COSTS_N_INSNS (2), /* int load */
182 COSTS_N_INSNS (3), /* int signed load */
183 COSTS_N_INSNS (2), /* int zeroed load */
184 COSTS_N_INSNS (2), /* float load */
185 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
186 COSTS_N_INSNS (4), /* fadd, fsub */
187 COSTS_N_INSNS (1), /* fcmp */
188 COSTS_N_INSNS (2), /* fmov, fmovr */
189 COSTS_N_INSNS (4), /* fmul */
190 COSTS_N_INSNS (13), /* fdivs */
191 COSTS_N_INSNS (23), /* fdivd */
192 COSTS_N_INSNS (13), /* fsqrts */
193 COSTS_N_INSNS (23), /* fsqrtd */
194 COSTS_N_INSNS (4), /* imul */
195 COSTS_N_INSNS (4), /* imulX */
196 2, /* imul bit factor */
197 COSTS_N_INSNS (37), /* idiv */
198 COSTS_N_INSNS (68), /* idivX */
199 COSTS_N_INSNS (2), /* movcc/movr */
200 2, /* shift penalty */
203 static const
204 struct processor_costs ultrasparc3_costs = {
205 COSTS_N_INSNS (2), /* int load */
206 COSTS_N_INSNS (3), /* int signed load */
207 COSTS_N_INSNS (3), /* int zeroed load */
208 COSTS_N_INSNS (2), /* float load */
209 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
210 COSTS_N_INSNS (4), /* fadd, fsub */
211 COSTS_N_INSNS (5), /* fcmp */
212 COSTS_N_INSNS (3), /* fmov, fmovr */
213 COSTS_N_INSNS (4), /* fmul */
214 COSTS_N_INSNS (17), /* fdivs */
215 COSTS_N_INSNS (20), /* fdivd */
216 COSTS_N_INSNS (20), /* fsqrts */
217 COSTS_N_INSNS (29), /* fsqrtd */
218 COSTS_N_INSNS (6), /* imul */
219 COSTS_N_INSNS (6), /* imulX */
220 0, /* imul bit factor */
221 COSTS_N_INSNS (40), /* idiv */
222 COSTS_N_INSNS (71), /* idivX */
223 COSTS_N_INSNS (2), /* movcc/movr */
224 0, /* shift penalty */
227 static const
228 struct processor_costs niagara_costs = {
229 COSTS_N_INSNS (3), /* int load */
230 COSTS_N_INSNS (3), /* int signed load */
231 COSTS_N_INSNS (3), /* int zeroed load */
232 COSTS_N_INSNS (9), /* float load */
233 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
234 COSTS_N_INSNS (8), /* fadd, fsub */
235 COSTS_N_INSNS (26), /* fcmp */
236 COSTS_N_INSNS (8), /* fmov, fmovr */
237 COSTS_N_INSNS (29), /* fmul */
238 COSTS_N_INSNS (54), /* fdivs */
239 COSTS_N_INSNS (83), /* fdivd */
240 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
241 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
242 COSTS_N_INSNS (11), /* imul */
243 COSTS_N_INSNS (11), /* imulX */
244 0, /* imul bit factor */
245 COSTS_N_INSNS (72), /* idiv */
246 COSTS_N_INSNS (72), /* idivX */
247 COSTS_N_INSNS (1), /* movcc/movr */
248 0, /* shift penalty */
251 static const
252 struct processor_costs niagara2_costs = {
253 COSTS_N_INSNS (3), /* int load */
254 COSTS_N_INSNS (3), /* int signed load */
255 COSTS_N_INSNS (3), /* int zeroed load */
256 COSTS_N_INSNS (3), /* float load */
257 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
258 COSTS_N_INSNS (6), /* fadd, fsub */
259 COSTS_N_INSNS (6), /* fcmp */
260 COSTS_N_INSNS (6), /* fmov, fmovr */
261 COSTS_N_INSNS (6), /* fmul */
262 COSTS_N_INSNS (19), /* fdivs */
263 COSTS_N_INSNS (33), /* fdivd */
264 COSTS_N_INSNS (19), /* fsqrts */
265 COSTS_N_INSNS (33), /* fsqrtd */
266 COSTS_N_INSNS (5), /* imul */
267 COSTS_N_INSNS (5), /* imulX */
268 0, /* imul bit factor */
269 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
270 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
271 COSTS_N_INSNS (1), /* movcc/movr */
272 0, /* shift penalty */
275 const struct processor_costs *sparc_costs = &cypress_costs;
277 #ifdef HAVE_AS_RELAX_OPTION
278 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
279 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
280 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
281 somebody does not branch between the sethi and jmp. */
282 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
283 #else
284 #define LEAF_SIBCALL_SLOT_RESERVED_P \
285 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
286 #endif
288 /* Global variables for machine-dependent things. */
290 /* Size of frame. Need to know this to emit return insns from leaf procedures.
291 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
292 reload pass. This is important as the value is later used for scheduling
293 (to see what can go in a delay slot).
294 APPARENT_FSIZE is the size of the stack less the register save area and less
295 the outgoing argument area. It is used when saving call preserved regs. */
296 static HOST_WIDE_INT apparent_fsize;
297 static HOST_WIDE_INT actual_fsize;
299 /* Number of live general or floating point registers needed to be
300 saved (as 4-byte quantities). */
301 static int num_gfregs;
303 /* The alias set for prologue/epilogue register save/restore. */
304 static GTY(()) alias_set_type sparc_sr_alias_set;
306 /* The alias set for the structure return value. */
307 static GTY(()) alias_set_type struct_value_alias_set;
309 /* Vector to say how input registers are mapped to output registers.
310 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
311 eliminate it. You must use -fomit-frame-pointer to get that. */
312 char leaf_reg_remap[] =
313 { 0, 1, 2, 3, 4, 5, 6, 7,
314 -1, -1, -1, -1, -1, -1, 14, -1,
315 -1, -1, -1, -1, -1, -1, -1, -1,
316 8, 9, 10, 11, 12, 13, -1, 15,
318 32, 33, 34, 35, 36, 37, 38, 39,
319 40, 41, 42, 43, 44, 45, 46, 47,
320 48, 49, 50, 51, 52, 53, 54, 55,
321 56, 57, 58, 59, 60, 61, 62, 63,
322 64, 65, 66, 67, 68, 69, 70, 71,
323 72, 73, 74, 75, 76, 77, 78, 79,
324 80, 81, 82, 83, 84, 85, 86, 87,
325 88, 89, 90, 91, 92, 93, 94, 95,
326 96, 97, 98, 99, 100};
328 /* Vector, indexed by hard register number, which contains 1
329 for a register that is allowable in a candidate for leaf
330 function treatment. */
331 char sparc_leaf_regs[] =
332 { 1, 1, 1, 1, 1, 1, 1, 1,
333 0, 0, 0, 0, 0, 0, 1, 0,
334 0, 0, 0, 0, 0, 0, 0, 0,
335 1, 1, 1, 1, 1, 1, 0, 1,
336 1, 1, 1, 1, 1, 1, 1, 1,
337 1, 1, 1, 1, 1, 1, 1, 1,
338 1, 1, 1, 1, 1, 1, 1, 1,
339 1, 1, 1, 1, 1, 1, 1, 1,
340 1, 1, 1, 1, 1, 1, 1, 1,
341 1, 1, 1, 1, 1, 1, 1, 1,
342 1, 1, 1, 1, 1, 1, 1, 1,
343 1, 1, 1, 1, 1, 1, 1, 1,
344 1, 1, 1, 1, 1};
346 struct GTY(()) machine_function
348 /* Some local-dynamic TLS symbol name. */
349 const char *some_ld_name;
351 /* True if the current function is leaf and uses only leaf regs,
352 so that the SPARC leaf function optimization can be applied.
353 Private version of current_function_uses_only_leaf_regs, see
354 sparc_expand_prologue for the rationale. */
355 int leaf_function_p;
357 /* True if the data calculated by sparc_expand_prologue are valid. */
358 bool prologue_data_valid_p;
361 #define sparc_leaf_function_p cfun->machine->leaf_function_p
362 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
364 /* Register we pretend to think the frame pointer is allocated to.
365 Normally, this is %fp, but if we are in a leaf procedure, this
366 is %sp+"something". We record "something" separately as it may
367 be too big for reg+constant addressing. */
368 static rtx frame_base_reg;
369 static HOST_WIDE_INT frame_base_offset;
371 /* 1 if the next opcode is to be specially indented. */
372 int sparc_indent_opcode = 0;
374 static bool sparc_handle_option (size_t, const char *, int);
375 static void sparc_option_override (void);
376 static void sparc_init_modes (void);
377 static void scan_record_type (const_tree, int *, int *, int *);
378 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
379 const_tree, bool, bool, int *, int *);
381 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
382 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
384 static void sparc_emit_set_const32 (rtx, rtx);
385 static void sparc_emit_set_const64 (rtx, rtx);
386 static void sparc_output_addr_vec (rtx);
387 static void sparc_output_addr_diff_vec (rtx);
388 static void sparc_output_deferred_case_vectors (void);
389 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
390 static rtx sparc_builtin_saveregs (void);
391 static int epilogue_renumber (rtx *, int);
392 static bool sparc_assemble_integer (rtx, unsigned int, int);
393 static int set_extends (rtx);
394 static void load_pic_register (void);
395 static int save_or_restore_regs (int, int, rtx, int, int);
396 static void emit_save_or_restore_regs (int);
397 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
398 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
399 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
400 tree) ATTRIBUTE_UNUSED;
401 static int sparc_adjust_cost (rtx, rtx, rtx, int);
402 static int sparc_issue_rate (void);
403 static void sparc_sched_init (FILE *, int, int);
404 static int sparc_use_sched_lookahead (void);
406 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
407 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
408 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
409 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
410 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
412 static bool sparc_function_ok_for_sibcall (tree, tree);
413 static void sparc_init_libfuncs (void);
414 static void sparc_init_builtins (void);
415 static void sparc_vis_init_builtins (void);
416 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
417 static tree sparc_fold_builtin (tree, int, tree *, bool);
418 static int sparc_vis_mul8x16 (int, int);
419 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
420 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
421 HOST_WIDE_INT, tree);
422 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
423 HOST_WIDE_INT, const_tree);
424 static struct machine_function * sparc_init_machine_status (void);
425 static bool sparc_cannot_force_const_mem (rtx);
426 static rtx sparc_tls_get_addr (void);
427 static rtx sparc_tls_got (void);
428 static const char *get_some_local_dynamic_name (void);
429 static int get_some_local_dynamic_name_1 (rtx *, void *);
430 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
431 static rtx sparc_function_value (const_tree, const_tree, bool);
432 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
433 static bool sparc_function_value_regno_p (const unsigned int);
434 static rtx sparc_struct_value_rtx (tree, int);
435 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
436 int *, const_tree, int);
437 static bool sparc_return_in_memory (const_tree, const_tree);
438 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
439 static void sparc_va_start (tree, rtx);
440 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
441 static bool sparc_vector_mode_supported_p (enum machine_mode);
442 static bool sparc_tls_referenced_p (rtx);
443 static rtx sparc_legitimize_tls_address (rtx);
444 static rtx sparc_legitimize_pic_address (rtx, rtx);
445 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
446 static rtx sparc_delegitimize_address (rtx);
447 static bool sparc_mode_dependent_address_p (const_rtx);
448 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
449 enum machine_mode, const_tree, bool);
450 static void sparc_function_arg_advance (CUMULATIVE_ARGS *,
451 enum machine_mode, const_tree, bool);
452 static rtx sparc_function_arg_1 (const CUMULATIVE_ARGS *,
453 enum machine_mode, const_tree, bool, bool);
454 static rtx sparc_function_arg (CUMULATIVE_ARGS *,
455 enum machine_mode, const_tree, bool);
456 static rtx sparc_function_incoming_arg (CUMULATIVE_ARGS *,
457 enum machine_mode, const_tree, bool);
458 static unsigned int sparc_function_arg_boundary (enum machine_mode,
459 const_tree);
460 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
461 enum machine_mode, tree, bool);
462 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
463 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
464 static void sparc_file_end (void);
465 static bool sparc_frame_pointer_required (void);
466 static bool sparc_can_eliminate (const int, const int);
467 static void sparc_conditional_register_usage (void);
468 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
469 static const char *sparc_mangle_type (const_tree);
470 #endif
471 static void sparc_trampoline_init (rtx, tree, rtx);
472 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
474 #ifdef SUBTARGET_ATTRIBUTE_TABLE
475 /* Table of valid machine attributes. */
476 static const struct attribute_spec sparc_attribute_table[] =
478 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
479 SUBTARGET_ATTRIBUTE_TABLE,
480 { NULL, 0, 0, false, false, false, NULL }
482 #endif
484 /* Option handling. */
486 /* Parsed value. */
487 enum cmodel sparc_cmodel;
489 char sparc_hard_reg_printed[8];
491 struct sparc_cpu_select sparc_select[] =
493 /* switch name, tune arch */
494 { (char *)0, "default", 1, 1 },
495 { (char *)0, "-mcpu=", 1, 1 },
496 { (char *)0, "-mtune=", 1, 0 },
497 { 0, 0, 0, 0 }
500 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
501 enum processor_type sparc_cpu;
503 /* Whether\fan FPU option was specified. */
504 static bool fpu_option_set = false;
506 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
507 static const struct default_options sparc_option_optimization_table[] =
509 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
510 { OPT_LEVELS_NONE, 0, NULL, 0 }
513 /* Initialize the GCC target structure. */
515 /* The default is to use .half rather than .short for aligned HI objects. */
516 #undef TARGET_ASM_ALIGNED_HI_OP
517 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
519 #undef TARGET_ASM_UNALIGNED_HI_OP
520 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
521 #undef TARGET_ASM_UNALIGNED_SI_OP
522 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
523 #undef TARGET_ASM_UNALIGNED_DI_OP
524 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
526 /* The target hook has to handle DI-mode values. */
527 #undef TARGET_ASM_INTEGER
528 #define TARGET_ASM_INTEGER sparc_assemble_integer
530 #undef TARGET_ASM_FUNCTION_PROLOGUE
531 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
532 #undef TARGET_ASM_FUNCTION_EPILOGUE
533 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
535 #undef TARGET_SCHED_ADJUST_COST
536 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
537 #undef TARGET_SCHED_ISSUE_RATE
538 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
539 #undef TARGET_SCHED_INIT
540 #define TARGET_SCHED_INIT sparc_sched_init
541 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
542 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
544 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
545 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
547 #undef TARGET_INIT_LIBFUNCS
548 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
549 #undef TARGET_INIT_BUILTINS
550 #define TARGET_INIT_BUILTINS sparc_init_builtins
552 #undef TARGET_LEGITIMIZE_ADDRESS
553 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
554 #undef TARGET_DELEGITIMIZE_ADDRESS
555 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
556 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
557 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
559 #undef TARGET_EXPAND_BUILTIN
560 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
561 #undef TARGET_FOLD_BUILTIN
562 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
564 #if TARGET_TLS
565 #undef TARGET_HAVE_TLS
566 #define TARGET_HAVE_TLS true
567 #endif
569 #undef TARGET_CANNOT_FORCE_CONST_MEM
570 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
572 #undef TARGET_ASM_OUTPUT_MI_THUNK
573 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
574 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
575 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
577 #undef TARGET_RTX_COSTS
578 #define TARGET_RTX_COSTS sparc_rtx_costs
579 #undef TARGET_ADDRESS_COST
580 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
582 #undef TARGET_PROMOTE_FUNCTION_MODE
583 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
585 #undef TARGET_FUNCTION_VALUE
586 #define TARGET_FUNCTION_VALUE sparc_function_value
587 #undef TARGET_LIBCALL_VALUE
588 #define TARGET_LIBCALL_VALUE sparc_libcall_value
589 #undef TARGET_FUNCTION_VALUE_REGNO_P
590 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
592 #undef TARGET_STRUCT_VALUE_RTX
593 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
594 #undef TARGET_RETURN_IN_MEMORY
595 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
596 #undef TARGET_MUST_PASS_IN_STACK
597 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
598 #undef TARGET_PASS_BY_REFERENCE
599 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
600 #undef TARGET_ARG_PARTIAL_BYTES
601 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
602 #undef TARGET_FUNCTION_ARG_ADVANCE
603 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
604 #undef TARGET_FUNCTION_ARG
605 #define TARGET_FUNCTION_ARG sparc_function_arg
606 #undef TARGET_FUNCTION_INCOMING_ARG
607 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
608 #undef TARGET_FUNCTION_ARG_BOUNDARY
609 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
611 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
612 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
613 #undef TARGET_STRICT_ARGUMENT_NAMING
614 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
616 #undef TARGET_EXPAND_BUILTIN_VA_START
617 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
618 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
619 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
621 #undef TARGET_VECTOR_MODE_SUPPORTED_P
622 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
624 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
625 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
627 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
628 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
630 #ifdef SUBTARGET_INSERT_ATTRIBUTES
631 #undef TARGET_INSERT_ATTRIBUTES
632 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
633 #endif
635 #ifdef SUBTARGET_ATTRIBUTE_TABLE
636 #undef TARGET_ATTRIBUTE_TABLE
637 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
638 #endif
640 #undef TARGET_RELAXED_ORDERING
641 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
643 #undef TARGET_DEFAULT_TARGET_FLAGS
644 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
645 #undef TARGET_HANDLE_OPTION
646 #define TARGET_HANDLE_OPTION sparc_handle_option
647 #undef TARGET_OPTION_OVERRIDE
648 #define TARGET_OPTION_OVERRIDE sparc_option_override
649 #undef TARGET_OPTION_OPTIMIZATION_TABLE
650 #define TARGET_OPTION_OPTIMIZATION_TABLE sparc_option_optimization_table
652 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
653 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
654 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
655 #endif
657 #undef TARGET_ASM_FILE_END
658 #define TARGET_ASM_FILE_END sparc_file_end
660 #undef TARGET_FRAME_POINTER_REQUIRED
661 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
663 #undef TARGET_CAN_ELIMINATE
664 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
666 #undef TARGET_CONDITIONAL_REGISTER_USAGE
667 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
669 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
670 #undef TARGET_MANGLE_TYPE
671 #define TARGET_MANGLE_TYPE sparc_mangle_type
672 #endif
674 #undef TARGET_LEGITIMATE_ADDRESS_P
675 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
677 #undef TARGET_TRAMPOLINE_INIT
678 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
680 struct gcc_target targetm = TARGET_INITIALIZER;
682 /* Implement TARGET_HANDLE_OPTION. */
684 static bool
685 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
687 switch (code)
689 case OPT_mfpu:
690 case OPT_mhard_float:
691 case OPT_msoft_float:
692 fpu_option_set = true;
693 break;
695 case OPT_mcpu_:
696 sparc_select[1].string = arg;
697 break;
699 case OPT_mtune_:
700 sparc_select[2].string = arg;
701 break;
704 return true;
707 /* Validate and override various options, and do some machine dependent
708 initialization. */
710 static void
711 sparc_option_override (void)
713 static struct code_model {
714 const char *const name;
715 const enum cmodel value;
716 } const cmodels[] = {
717 { "32", CM_32 },
718 { "medlow", CM_MEDLOW },
719 { "medmid", CM_MEDMID },
720 { "medany", CM_MEDANY },
721 { "embmedany", CM_EMBMEDANY },
722 { NULL, (enum cmodel) 0 }
724 const struct code_model *cmodel;
725 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
726 static struct cpu_default {
727 const int cpu;
728 const char *const name;
729 } const cpu_default[] = {
730 /* There must be one entry here for each TARGET_CPU value. */
731 { TARGET_CPU_sparc, "cypress" },
732 { TARGET_CPU_v8, "v8" },
733 { TARGET_CPU_supersparc, "supersparc" },
734 { TARGET_CPU_hypersparc, "hypersparc" },
735 { TARGET_CPU_leon, "leon" },
736 { TARGET_CPU_sparclite, "f930" },
737 { TARGET_CPU_sparclite86x, "sparclite86x" },
738 { TARGET_CPU_sparclet, "tsc701" },
739 { TARGET_CPU_v9, "v9" },
740 { TARGET_CPU_ultrasparc, "ultrasparc" },
741 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
742 { TARGET_CPU_niagara, "niagara" },
743 { TARGET_CPU_niagara2, "niagara2" },
744 { 0, 0 }
746 const struct cpu_default *def;
747 /* Table of values for -m{cpu,tune}=. */
748 static struct cpu_table {
749 const char *const name;
750 const enum processor_type processor;
751 const int disable;
752 const int enable;
753 } const cpu_table[] = {
754 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
755 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
756 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
757 /* TI TMS390Z55 supersparc */
758 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
759 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
760 /* LEON */
761 { "leon", PROCESSOR_LEON, MASK_ISA, MASK_V8|MASK_FPU },
762 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
763 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
764 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
765 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
766 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
767 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
768 MASK_SPARCLITE },
769 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
770 /* TEMIC sparclet */
771 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
772 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
773 /* UltraSPARC I, II, IIi */
774 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA,
775 /* Although insns using %y are deprecated, it is a clear win. */
776 MASK_V9|MASK_DEPRECATED_V8_INSNS},
777 /* UltraSPARC III */
778 /* ??? Check if %y issue still holds true. */
779 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA,
780 MASK_V9|MASK_DEPRECATED_V8_INSNS},
781 /* UltraSPARC T1 */
782 { "niagara", PROCESSOR_NIAGARA, MASK_ISA,
783 MASK_V9|MASK_DEPRECATED_V8_INSNS},
784 /* UltraSPARC T2 */
785 { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
786 { 0, (enum processor_type) 0, 0, 0 }
788 const struct cpu_table *cpu;
789 const struct sparc_cpu_select *sel;
790 int fpu;
792 #ifdef SUBTARGET_OVERRIDE_OPTIONS
793 SUBTARGET_OVERRIDE_OPTIONS;
794 #endif
796 #ifndef SPARC_BI_ARCH
797 /* Check for unsupported architecture size. */
798 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
799 error ("%s is not supported by this configuration",
800 DEFAULT_ARCH32_P ? "-m64" : "-m32");
801 #endif
803 /* We force all 64bit archs to use 128 bit long double */
804 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
806 error ("-mlong-double-64 not allowed with -m64");
807 target_flags |= MASK_LONG_DOUBLE_128;
810 /* Code model selection. */
811 sparc_cmodel = SPARC_DEFAULT_CMODEL;
813 #ifdef SPARC_BI_ARCH
814 if (TARGET_ARCH32)
815 sparc_cmodel = CM_32;
816 #endif
818 if (sparc_cmodel_string != NULL)
820 if (TARGET_ARCH64)
822 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
823 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
824 break;
825 if (cmodel->name == NULL)
826 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
827 else
828 sparc_cmodel = cmodel->value;
830 else
831 error ("-mcmodel= is not supported on 32 bit systems");
834 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
836 /* Set the default CPU. */
837 for (def = &cpu_default[0]; def->name; ++def)
838 if (def->cpu == TARGET_CPU_DEFAULT)
839 break;
840 gcc_assert (def->name);
841 sparc_select[0].string = def->name;
843 for (sel = &sparc_select[0]; sel->name; ++sel)
845 if (sel->string)
847 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
848 if (! strcmp (sel->string, cpu->name))
850 if (sel->set_tune_p)
851 sparc_cpu = cpu->processor;
853 if (sel->set_arch_p)
855 target_flags &= ~cpu->disable;
856 target_flags |= cpu->enable;
858 break;
861 if (! cpu->name)
862 error ("bad value (%s) for %s switch", sel->string, sel->name);
866 /* If -mfpu or -mno-fpu was explicitly used, don't override with
867 the processor default. */
868 if (fpu_option_set)
869 target_flags = (target_flags & ~MASK_FPU) | fpu;
871 /* Don't allow -mvis if FPU is disabled. */
872 if (! TARGET_FPU)
873 target_flags &= ~MASK_VIS;
875 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
876 are available.
877 -m64 also implies v9. */
878 if (TARGET_VIS || TARGET_ARCH64)
880 target_flags |= MASK_V9;
881 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
884 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
885 if (TARGET_V9 && TARGET_ARCH32)
886 target_flags |= MASK_DEPRECATED_V8_INSNS;
888 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
889 if (! TARGET_V9 || TARGET_ARCH64)
890 target_flags &= ~MASK_V8PLUS;
892 /* Don't use stack biasing in 32 bit mode. */
893 if (TARGET_ARCH32)
894 target_flags &= ~MASK_STACK_BIAS;
896 /* Supply a default value for align_functions. */
897 if (align_functions == 0
898 && (sparc_cpu == PROCESSOR_ULTRASPARC
899 || sparc_cpu == PROCESSOR_ULTRASPARC3
900 || sparc_cpu == PROCESSOR_NIAGARA
901 || sparc_cpu == PROCESSOR_NIAGARA2))
902 align_functions = 32;
904 /* Validate PCC_STRUCT_RETURN. */
905 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
906 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
908 /* Only use .uaxword when compiling for a 64-bit target. */
909 if (!TARGET_ARCH64)
910 targetm.asm_out.unaligned_op.di = NULL;
912 /* Do various machine dependent initializations. */
913 sparc_init_modes ();
915 /* Acquire unique alias sets for our private stuff. */
916 sparc_sr_alias_set = new_alias_set ();
917 struct_value_alias_set = new_alias_set ();
919 /* Set up function hooks. */
920 init_machine_status = sparc_init_machine_status;
922 switch (sparc_cpu)
924 case PROCESSOR_V7:
925 case PROCESSOR_CYPRESS:
926 sparc_costs = &cypress_costs;
927 break;
928 case PROCESSOR_V8:
929 case PROCESSOR_SPARCLITE:
930 case PROCESSOR_SUPERSPARC:
931 sparc_costs = &supersparc_costs;
932 break;
933 case PROCESSOR_F930:
934 case PROCESSOR_F934:
935 case PROCESSOR_HYPERSPARC:
936 case PROCESSOR_SPARCLITE86X:
937 sparc_costs = &hypersparc_costs;
938 break;
939 case PROCESSOR_LEON:
940 sparc_costs = &leon_costs;
941 break;
942 case PROCESSOR_SPARCLET:
943 case PROCESSOR_TSC701:
944 sparc_costs = &sparclet_costs;
945 break;
946 case PROCESSOR_V9:
947 case PROCESSOR_ULTRASPARC:
948 sparc_costs = &ultrasparc_costs;
949 break;
950 case PROCESSOR_ULTRASPARC3:
951 sparc_costs = &ultrasparc3_costs;
952 break;
953 case PROCESSOR_NIAGARA:
954 sparc_costs = &niagara_costs;
955 break;
956 case PROCESSOR_NIAGARA2:
957 sparc_costs = &niagara2_costs;
958 break;
961 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
962 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
963 target_flags |= MASK_LONG_DOUBLE_128;
964 #endif
966 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
967 ((sparc_cpu == PROCESSOR_ULTRASPARC
968 || sparc_cpu == PROCESSOR_NIAGARA
969 || sparc_cpu == PROCESSOR_NIAGARA2)
971 : (sparc_cpu == PROCESSOR_ULTRASPARC3
972 ? 8 : 3)),
973 global_options.x_param_values,
974 global_options_set.x_param_values);
975 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
976 ((sparc_cpu == PROCESSOR_ULTRASPARC
977 || sparc_cpu == PROCESSOR_ULTRASPARC3
978 || sparc_cpu == PROCESSOR_NIAGARA
979 || sparc_cpu == PROCESSOR_NIAGARA2)
980 ? 64 : 32),
981 global_options.x_param_values,
982 global_options_set.x_param_values);
985 /* Miscellaneous utilities. */
987 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
988 or branch on register contents instructions. */
991 v9_regcmp_p (enum rtx_code code)
993 return (code == EQ || code == NE || code == GE || code == LT
994 || code == LE || code == GT);
997 /* Nonzero if OP is a floating point constant which can
998 be loaded into an integer register using a single
999 sethi instruction. */
1002 fp_sethi_p (rtx op)
1004 if (GET_CODE (op) == CONST_DOUBLE)
1006 REAL_VALUE_TYPE r;
1007 long i;
1009 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1010 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1011 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1014 return 0;
1017 /* Nonzero if OP is a floating point constant which can
1018 be loaded into an integer register using a single
1019 mov instruction. */
1022 fp_mov_p (rtx op)
1024 if (GET_CODE (op) == CONST_DOUBLE)
1026 REAL_VALUE_TYPE r;
1027 long i;
1029 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1030 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1031 return SPARC_SIMM13_P (i);
1034 return 0;
1037 /* Nonzero if OP is a floating point constant which can
1038 be loaded into an integer register using a high/losum
1039 instruction sequence. */
1042 fp_high_losum_p (rtx op)
1044 /* The constraints calling this should only be in
1045 SFmode move insns, so any constant which cannot
1046 be moved using a single insn will do. */
1047 if (GET_CODE (op) == CONST_DOUBLE)
1049 REAL_VALUE_TYPE r;
1050 long i;
1052 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1053 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1054 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1057 return 0;
1060 /* Return true if the address of LABEL can be loaded by means of the
1061 mov{si,di}_pic_label_ref patterns in PIC mode. */
1063 static bool
1064 can_use_mov_pic_label_ref (rtx label)
1066 /* VxWorks does not impose a fixed gap between segments; the run-time
1067 gap can be different from the object-file gap. We therefore can't
1068 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1069 are absolutely sure that X is in the same segment as the GOT.
1070 Unfortunately, the flexibility of linker scripts means that we
1071 can't be sure of that in general, so assume that GOT-relative
1072 accesses are never valid on VxWorks. */
1073 if (TARGET_VXWORKS_RTP)
1074 return false;
1076 /* Similarly, if the label is non-local, it might end up being placed
1077 in a different section than the current one; now mov_pic_label_ref
1078 requires the label and the code to be in the same section. */
1079 if (LABEL_REF_NONLOCAL_P (label))
1080 return false;
1082 /* Finally, if we are reordering basic blocks and partition into hot
1083 and cold sections, this might happen for any label. */
1084 if (flag_reorder_blocks_and_partition)
1085 return false;
1087 return true;
1090 /* Expand a move instruction. Return true if all work is done. */
1092 bool
1093 sparc_expand_move (enum machine_mode mode, rtx *operands)
1095 /* Handle sets of MEM first. */
1096 if (GET_CODE (operands[0]) == MEM)
1098 /* 0 is a register (or a pair of registers) on SPARC. */
1099 if (register_or_zero_operand (operands[1], mode))
1100 return false;
1102 if (!reload_in_progress)
1104 operands[0] = validize_mem (operands[0]);
1105 operands[1] = force_reg (mode, operands[1]);
1109 /* Fixup TLS cases. */
1110 if (TARGET_HAVE_TLS
1111 && CONSTANT_P (operands[1])
1112 && sparc_tls_referenced_p (operands [1]))
1114 operands[1] = sparc_legitimize_tls_address (operands[1]);
1115 return false;
1118 /* Fixup PIC cases. */
1119 if (flag_pic && CONSTANT_P (operands[1]))
1121 if (pic_address_needs_scratch (operands[1]))
1122 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1124 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1125 if (GET_CODE (operands[1]) == LABEL_REF
1126 && can_use_mov_pic_label_ref (operands[1]))
1128 if (mode == SImode)
1130 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1131 return true;
1134 if (mode == DImode)
1136 gcc_assert (TARGET_ARCH64);
1137 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1138 return true;
1142 if (symbolic_operand (operands[1], mode))
1144 operands[1]
1145 = sparc_legitimize_pic_address (operands[1],
1146 reload_in_progress
1147 ? operands[0] : NULL_RTX);
1148 return false;
1152 /* If we are trying to toss an integer constant into FP registers,
1153 or loading a FP or vector constant, force it into memory. */
1154 if (CONSTANT_P (operands[1])
1155 && REG_P (operands[0])
1156 && (SPARC_FP_REG_P (REGNO (operands[0]))
1157 || SCALAR_FLOAT_MODE_P (mode)
1158 || VECTOR_MODE_P (mode)))
1160 /* emit_group_store will send such bogosity to us when it is
1161 not storing directly into memory. So fix this up to avoid
1162 crashes in output_constant_pool. */
1163 if (operands [1] == const0_rtx)
1164 operands[1] = CONST0_RTX (mode);
1166 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1167 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1168 && const_zero_operand (operands[1], mode))
1169 return false;
1171 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1172 /* We are able to build any SF constant in integer registers
1173 with at most 2 instructions. */
1174 && (mode == SFmode
1175 /* And any DF constant in integer registers. */
1176 || (mode == DFmode
1177 && (reload_completed || reload_in_progress))))
1178 return false;
1180 operands[1] = force_const_mem (mode, operands[1]);
1181 if (!reload_in_progress)
1182 operands[1] = validize_mem (operands[1]);
1183 return false;
1186 /* Accept non-constants and valid constants unmodified. */
1187 if (!CONSTANT_P (operands[1])
1188 || GET_CODE (operands[1]) == HIGH
1189 || input_operand (operands[1], mode))
1190 return false;
1192 switch (mode)
1194 case QImode:
1195 /* All QImode constants require only one insn, so proceed. */
1196 break;
1198 case HImode:
1199 case SImode:
1200 sparc_emit_set_const32 (operands[0], operands[1]);
1201 return true;
1203 case DImode:
1204 /* input_operand should have filtered out 32-bit mode. */
1205 sparc_emit_set_const64 (operands[0], operands[1]);
1206 return true;
1208 default:
1209 gcc_unreachable ();
1212 return false;
1215 /* Load OP1, a 32-bit constant, into OP0, a register.
1216 We know it can't be done in one insn when we get
1217 here, the move expander guarantees this. */
1219 static void
1220 sparc_emit_set_const32 (rtx op0, rtx op1)
1222 enum machine_mode mode = GET_MODE (op0);
1223 rtx temp;
1225 if (reload_in_progress || reload_completed)
1226 temp = op0;
1227 else
1228 temp = gen_reg_rtx (mode);
1230 if (GET_CODE (op1) == CONST_INT)
1232 gcc_assert (!small_int_operand (op1, mode)
1233 && !const_high_operand (op1, mode));
1235 /* Emit them as real moves instead of a HIGH/LO_SUM,
1236 this way CSE can see everything and reuse intermediate
1237 values if it wants. */
1238 emit_insn (gen_rtx_SET (VOIDmode, temp,
1239 GEN_INT (INTVAL (op1)
1240 & ~(HOST_WIDE_INT)0x3ff)));
1242 emit_insn (gen_rtx_SET (VOIDmode,
1243 op0,
1244 gen_rtx_IOR (mode, temp,
1245 GEN_INT (INTVAL (op1) & 0x3ff))));
1247 else
1249 /* A symbol, emit in the traditional way. */
1250 emit_insn (gen_rtx_SET (VOIDmode, temp,
1251 gen_rtx_HIGH (mode, op1)));
1252 emit_insn (gen_rtx_SET (VOIDmode,
1253 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1257 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1258 If TEMP is nonzero, we are forbidden to use any other scratch
1259 registers. Otherwise, we are allowed to generate them as needed.
1261 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1262 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1264 void
1265 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1267 rtx temp1, temp2, temp3, temp4, temp5;
1268 rtx ti_temp = 0;
1270 if (temp && GET_MODE (temp) == TImode)
1272 ti_temp = temp;
1273 temp = gen_rtx_REG (DImode, REGNO (temp));
1276 /* SPARC-V9 code-model support. */
1277 switch (sparc_cmodel)
1279 case CM_MEDLOW:
1280 /* The range spanned by all instructions in the object is less
1281 than 2^31 bytes (2GB) and the distance from any instruction
1282 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1283 than 2^31 bytes (2GB).
1285 The executable must be in the low 4TB of the virtual address
1286 space.
1288 sethi %hi(symbol), %temp1
1289 or %temp1, %lo(symbol), %reg */
1290 if (temp)
1291 temp1 = temp; /* op0 is allowed. */
1292 else
1293 temp1 = gen_reg_rtx (DImode);
1295 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1296 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1297 break;
1299 case CM_MEDMID:
1300 /* The range spanned by all instructions in the object is less
1301 than 2^31 bytes (2GB) and the distance from any instruction
1302 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1303 than 2^31 bytes (2GB).
1305 The executable must be in the low 16TB of the virtual address
1306 space.
1308 sethi %h44(symbol), %temp1
1309 or %temp1, %m44(symbol), %temp2
1310 sllx %temp2, 12, %temp3
1311 or %temp3, %l44(symbol), %reg */
1312 if (temp)
1314 temp1 = op0;
1315 temp2 = op0;
1316 temp3 = temp; /* op0 is allowed. */
1318 else
1320 temp1 = gen_reg_rtx (DImode);
1321 temp2 = gen_reg_rtx (DImode);
1322 temp3 = gen_reg_rtx (DImode);
1325 emit_insn (gen_seth44 (temp1, op1));
1326 emit_insn (gen_setm44 (temp2, temp1, op1));
1327 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1328 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1329 emit_insn (gen_setl44 (op0, temp3, op1));
1330 break;
1332 case CM_MEDANY:
1333 /* The range spanned by all instructions in the object is less
1334 than 2^31 bytes (2GB) and the distance from any instruction
1335 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1336 than 2^31 bytes (2GB).
1338 The executable can be placed anywhere in the virtual address
1339 space.
1341 sethi %hh(symbol), %temp1
1342 sethi %lm(symbol), %temp2
1343 or %temp1, %hm(symbol), %temp3
1344 sllx %temp3, 32, %temp4
1345 or %temp4, %temp2, %temp5
1346 or %temp5, %lo(symbol), %reg */
1347 if (temp)
1349 /* It is possible that one of the registers we got for operands[2]
1350 might coincide with that of operands[0] (which is why we made
1351 it TImode). Pick the other one to use as our scratch. */
1352 if (rtx_equal_p (temp, op0))
1354 gcc_assert (ti_temp);
1355 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1357 temp1 = op0;
1358 temp2 = temp; /* op0 is _not_ allowed, see above. */
1359 temp3 = op0;
1360 temp4 = op0;
1361 temp5 = op0;
1363 else
1365 temp1 = gen_reg_rtx (DImode);
1366 temp2 = gen_reg_rtx (DImode);
1367 temp3 = gen_reg_rtx (DImode);
1368 temp4 = gen_reg_rtx (DImode);
1369 temp5 = gen_reg_rtx (DImode);
1372 emit_insn (gen_sethh (temp1, op1));
1373 emit_insn (gen_setlm (temp2, op1));
1374 emit_insn (gen_sethm (temp3, temp1, op1));
1375 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1376 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1377 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1378 gen_rtx_PLUS (DImode, temp4, temp2)));
1379 emit_insn (gen_setlo (op0, temp5, op1));
1380 break;
1382 case CM_EMBMEDANY:
1383 /* Old old old backwards compatibility kruft here.
1384 Essentially it is MEDLOW with a fixed 64-bit
1385 virtual base added to all data segment addresses.
1386 Text-segment stuff is computed like MEDANY, we can't
1387 reuse the code above because the relocation knobs
1388 look different.
1390 Data segment: sethi %hi(symbol), %temp1
1391 add %temp1, EMBMEDANY_BASE_REG, %temp2
1392 or %temp2, %lo(symbol), %reg */
1393 if (data_segment_operand (op1, GET_MODE (op1)))
1395 if (temp)
1397 temp1 = temp; /* op0 is allowed. */
1398 temp2 = op0;
1400 else
1402 temp1 = gen_reg_rtx (DImode);
1403 temp2 = gen_reg_rtx (DImode);
1406 emit_insn (gen_embmedany_sethi (temp1, op1));
1407 emit_insn (gen_embmedany_brsum (temp2, temp1));
1408 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1411 /* Text segment: sethi %uhi(symbol), %temp1
1412 sethi %hi(symbol), %temp2
1413 or %temp1, %ulo(symbol), %temp3
1414 sllx %temp3, 32, %temp4
1415 or %temp4, %temp2, %temp5
1416 or %temp5, %lo(symbol), %reg */
1417 else
1419 if (temp)
1421 /* It is possible that one of the registers we got for operands[2]
1422 might coincide with that of operands[0] (which is why we made
1423 it TImode). Pick the other one to use as our scratch. */
1424 if (rtx_equal_p (temp, op0))
1426 gcc_assert (ti_temp);
1427 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1429 temp1 = op0;
1430 temp2 = temp; /* op0 is _not_ allowed, see above. */
1431 temp3 = op0;
1432 temp4 = op0;
1433 temp5 = op0;
1435 else
1437 temp1 = gen_reg_rtx (DImode);
1438 temp2 = gen_reg_rtx (DImode);
1439 temp3 = gen_reg_rtx (DImode);
1440 temp4 = gen_reg_rtx (DImode);
1441 temp5 = gen_reg_rtx (DImode);
1444 emit_insn (gen_embmedany_textuhi (temp1, op1));
1445 emit_insn (gen_embmedany_texthi (temp2, op1));
1446 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1447 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1448 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1449 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1450 gen_rtx_PLUS (DImode, temp4, temp2)));
1451 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1453 break;
1455 default:
1456 gcc_unreachable ();
1460 #if HOST_BITS_PER_WIDE_INT == 32
1461 static void
1462 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1464 gcc_unreachable ();
1466 #else
1467 /* These avoid problems when cross compiling. If we do not
1468 go through all this hair then the optimizer will see
1469 invalid REG_EQUAL notes or in some cases none at all. */
1470 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1471 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1472 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1473 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1475 /* The optimizer is not to assume anything about exactly
1476 which bits are set for a HIGH, they are unspecified.
1477 Unfortunately this leads to many missed optimizations
1478 during CSE. We mask out the non-HIGH bits, and matches
1479 a plain movdi, to alleviate this problem. */
1480 static rtx
1481 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1483 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1486 static rtx
1487 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1489 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1492 static rtx
1493 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1495 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1498 static rtx
1499 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1501 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1504 /* Worker routines for 64-bit constant formation on arch64.
1505 One of the key things to be doing in these emissions is
1506 to create as many temp REGs as possible. This makes it
1507 possible for half-built constants to be used later when
1508 such values are similar to something required later on.
1509 Without doing this, the optimizer cannot see such
1510 opportunities. */
1512 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1513 unsigned HOST_WIDE_INT, int);
1515 static void
1516 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1517 unsigned HOST_WIDE_INT low_bits, int is_neg)
1519 unsigned HOST_WIDE_INT high_bits;
1521 if (is_neg)
1522 high_bits = (~low_bits) & 0xffffffff;
1523 else
1524 high_bits = low_bits;
1526 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1527 if (!is_neg)
1529 emit_insn (gen_rtx_SET (VOIDmode, op0,
1530 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1532 else
1534 /* If we are XOR'ing with -1, then we should emit a one's complement
1535 instead. This way the combiner will notice logical operations
1536 such as ANDN later on and substitute. */
1537 if ((low_bits & 0x3ff) == 0x3ff)
1539 emit_insn (gen_rtx_SET (VOIDmode, op0,
1540 gen_rtx_NOT (DImode, temp)));
1542 else
1544 emit_insn (gen_rtx_SET (VOIDmode, op0,
1545 gen_safe_XOR64 (temp,
1546 (-(HOST_WIDE_INT)0x400
1547 | (low_bits & 0x3ff)))));
1552 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1553 unsigned HOST_WIDE_INT, int);
1555 static void
1556 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1557 unsigned HOST_WIDE_INT high_bits,
1558 unsigned HOST_WIDE_INT low_immediate,
1559 int shift_count)
1561 rtx temp2 = op0;
1563 if ((high_bits & 0xfffffc00) != 0)
1565 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1566 if ((high_bits & ~0xfffffc00) != 0)
1567 emit_insn (gen_rtx_SET (VOIDmode, op0,
1568 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1569 else
1570 temp2 = temp;
1572 else
1574 emit_insn (gen_safe_SET64 (temp, high_bits));
1575 temp2 = temp;
1578 /* Now shift it up into place. */
1579 emit_insn (gen_rtx_SET (VOIDmode, op0,
1580 gen_rtx_ASHIFT (DImode, temp2,
1581 GEN_INT (shift_count))));
1583 /* If there is a low immediate part piece, finish up by
1584 putting that in as well. */
1585 if (low_immediate != 0)
1586 emit_insn (gen_rtx_SET (VOIDmode, op0,
1587 gen_safe_OR64 (op0, low_immediate)));
1590 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1591 unsigned HOST_WIDE_INT);
1593 /* Full 64-bit constant decomposition. Even though this is the
1594 'worst' case, we still optimize a few things away. */
1595 static void
1596 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1597 unsigned HOST_WIDE_INT high_bits,
1598 unsigned HOST_WIDE_INT low_bits)
1600 rtx sub_temp;
1602 if (reload_in_progress || reload_completed)
1603 sub_temp = op0;
1604 else
1605 sub_temp = gen_reg_rtx (DImode);
1607 if ((high_bits & 0xfffffc00) != 0)
1609 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1610 if ((high_bits & ~0xfffffc00) != 0)
1611 emit_insn (gen_rtx_SET (VOIDmode,
1612 sub_temp,
1613 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1614 else
1615 sub_temp = temp;
1617 else
1619 emit_insn (gen_safe_SET64 (temp, high_bits));
1620 sub_temp = temp;
1623 if (!reload_in_progress && !reload_completed)
1625 rtx temp2 = gen_reg_rtx (DImode);
1626 rtx temp3 = gen_reg_rtx (DImode);
1627 rtx temp4 = gen_reg_rtx (DImode);
1629 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1630 gen_rtx_ASHIFT (DImode, sub_temp,
1631 GEN_INT (32))));
1633 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1634 if ((low_bits & ~0xfffffc00) != 0)
1636 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1637 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1638 emit_insn (gen_rtx_SET (VOIDmode, op0,
1639 gen_rtx_PLUS (DImode, temp4, temp3)));
1641 else
1643 emit_insn (gen_rtx_SET (VOIDmode, op0,
1644 gen_rtx_PLUS (DImode, temp4, temp2)));
1647 else
1649 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1650 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1651 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1652 int to_shift = 12;
1654 /* We are in the middle of reload, so this is really
1655 painful. However we do still make an attempt to
1656 avoid emitting truly stupid code. */
1657 if (low1 != const0_rtx)
1659 emit_insn (gen_rtx_SET (VOIDmode, op0,
1660 gen_rtx_ASHIFT (DImode, sub_temp,
1661 GEN_INT (to_shift))));
1662 emit_insn (gen_rtx_SET (VOIDmode, op0,
1663 gen_rtx_IOR (DImode, op0, low1)));
1664 sub_temp = op0;
1665 to_shift = 12;
1667 else
1669 to_shift += 12;
1671 if (low2 != const0_rtx)
1673 emit_insn (gen_rtx_SET (VOIDmode, op0,
1674 gen_rtx_ASHIFT (DImode, sub_temp,
1675 GEN_INT (to_shift))));
1676 emit_insn (gen_rtx_SET (VOIDmode, op0,
1677 gen_rtx_IOR (DImode, op0, low2)));
1678 sub_temp = op0;
1679 to_shift = 8;
1681 else
1683 to_shift += 8;
1685 emit_insn (gen_rtx_SET (VOIDmode, op0,
1686 gen_rtx_ASHIFT (DImode, sub_temp,
1687 GEN_INT (to_shift))));
1688 if (low3 != const0_rtx)
1689 emit_insn (gen_rtx_SET (VOIDmode, op0,
1690 gen_rtx_IOR (DImode, op0, low3)));
1691 /* phew... */
1695 /* Analyze a 64-bit constant for certain properties. */
1696 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1697 unsigned HOST_WIDE_INT,
1698 int *, int *, int *);
1700 static void
1701 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1702 unsigned HOST_WIDE_INT low_bits,
1703 int *hbsp, int *lbsp, int *abbasp)
1705 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1706 int i;
1708 lowest_bit_set = highest_bit_set = -1;
1709 i = 0;
1712 if ((lowest_bit_set == -1)
1713 && ((low_bits >> i) & 1))
1714 lowest_bit_set = i;
1715 if ((highest_bit_set == -1)
1716 && ((high_bits >> (32 - i - 1)) & 1))
1717 highest_bit_set = (64 - i - 1);
1719 while (++i < 32
1720 && ((highest_bit_set == -1)
1721 || (lowest_bit_set == -1)));
1722 if (i == 32)
1724 i = 0;
1727 if ((lowest_bit_set == -1)
1728 && ((high_bits >> i) & 1))
1729 lowest_bit_set = i + 32;
1730 if ((highest_bit_set == -1)
1731 && ((low_bits >> (32 - i - 1)) & 1))
1732 highest_bit_set = 32 - i - 1;
1734 while (++i < 32
1735 && ((highest_bit_set == -1)
1736 || (lowest_bit_set == -1)));
1738 /* If there are no bits set this should have gone out
1739 as one instruction! */
1740 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1741 all_bits_between_are_set = 1;
1742 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1744 if (i < 32)
1746 if ((low_bits & (1 << i)) != 0)
1747 continue;
1749 else
1751 if ((high_bits & (1 << (i - 32))) != 0)
1752 continue;
1754 all_bits_between_are_set = 0;
1755 break;
1757 *hbsp = highest_bit_set;
1758 *lbsp = lowest_bit_set;
1759 *abbasp = all_bits_between_are_set;
1762 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1764 static int
1765 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1766 unsigned HOST_WIDE_INT low_bits)
1768 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1770 if (high_bits == 0
1771 || high_bits == 0xffffffff)
1772 return 1;
1774 analyze_64bit_constant (high_bits, low_bits,
1775 &highest_bit_set, &lowest_bit_set,
1776 &all_bits_between_are_set);
1778 if ((highest_bit_set == 63
1779 || lowest_bit_set == 0)
1780 && all_bits_between_are_set != 0)
1781 return 1;
1783 if ((highest_bit_set - lowest_bit_set) < 21)
1784 return 1;
1786 return 0;
1789 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1790 unsigned HOST_WIDE_INT,
1791 int, int);
1793 static unsigned HOST_WIDE_INT
1794 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1795 unsigned HOST_WIDE_INT low_bits,
1796 int lowest_bit_set, int shift)
1798 HOST_WIDE_INT hi, lo;
1800 if (lowest_bit_set < 32)
1802 lo = (low_bits >> lowest_bit_set) << shift;
1803 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1805 else
1807 lo = 0;
1808 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1810 gcc_assert (! (hi & lo));
1811 return (hi | lo);
1814 /* Here we are sure to be arch64 and this is an integer constant
1815 being loaded into a register. Emit the most efficient
1816 insn sequence possible. Detection of all the 1-insn cases
1817 has been done already. */
1818 static void
1819 sparc_emit_set_const64 (rtx op0, rtx op1)
1821 unsigned HOST_WIDE_INT high_bits, low_bits;
1822 int lowest_bit_set, highest_bit_set;
1823 int all_bits_between_are_set;
1824 rtx temp = 0;
1826 /* Sanity check that we know what we are working with. */
1827 gcc_assert (TARGET_ARCH64
1828 && (GET_CODE (op0) == SUBREG
1829 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1831 if (reload_in_progress || reload_completed)
1832 temp = op0;
1834 if (GET_CODE (op1) != CONST_INT)
1836 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1837 return;
1840 if (! temp)
1841 temp = gen_reg_rtx (DImode);
1843 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1844 low_bits = (INTVAL (op1) & 0xffffffff);
1846 /* low_bits bits 0 --> 31
1847 high_bits bits 32 --> 63 */
1849 analyze_64bit_constant (high_bits, low_bits,
1850 &highest_bit_set, &lowest_bit_set,
1851 &all_bits_between_are_set);
1853 /* First try for a 2-insn sequence. */
1855 /* These situations are preferred because the optimizer can
1856 * do more things with them:
1857 * 1) mov -1, %reg
1858 * sllx %reg, shift, %reg
1859 * 2) mov -1, %reg
1860 * srlx %reg, shift, %reg
1861 * 3) mov some_small_const, %reg
1862 * sllx %reg, shift, %reg
1864 if (((highest_bit_set == 63
1865 || lowest_bit_set == 0)
1866 && all_bits_between_are_set != 0)
1867 || ((highest_bit_set - lowest_bit_set) < 12))
1869 HOST_WIDE_INT the_const = -1;
1870 int shift = lowest_bit_set;
1872 if ((highest_bit_set != 63
1873 && lowest_bit_set != 0)
1874 || all_bits_between_are_set == 0)
1876 the_const =
1877 create_simple_focus_bits (high_bits, low_bits,
1878 lowest_bit_set, 0);
1880 else if (lowest_bit_set == 0)
1881 shift = -(63 - highest_bit_set);
1883 gcc_assert (SPARC_SIMM13_P (the_const));
1884 gcc_assert (shift != 0);
1886 emit_insn (gen_safe_SET64 (temp, the_const));
1887 if (shift > 0)
1888 emit_insn (gen_rtx_SET (VOIDmode,
1889 op0,
1890 gen_rtx_ASHIFT (DImode,
1891 temp,
1892 GEN_INT (shift))));
1893 else if (shift < 0)
1894 emit_insn (gen_rtx_SET (VOIDmode,
1895 op0,
1896 gen_rtx_LSHIFTRT (DImode,
1897 temp,
1898 GEN_INT (-shift))));
1899 return;
1902 /* Now a range of 22 or less bits set somewhere.
1903 * 1) sethi %hi(focus_bits), %reg
1904 * sllx %reg, shift, %reg
1905 * 2) sethi %hi(focus_bits), %reg
1906 * srlx %reg, shift, %reg
1908 if ((highest_bit_set - lowest_bit_set) < 21)
1910 unsigned HOST_WIDE_INT focus_bits =
1911 create_simple_focus_bits (high_bits, low_bits,
1912 lowest_bit_set, 10);
1914 gcc_assert (SPARC_SETHI_P (focus_bits));
1915 gcc_assert (lowest_bit_set != 10);
1917 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1919 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1920 if (lowest_bit_set < 10)
1921 emit_insn (gen_rtx_SET (VOIDmode,
1922 op0,
1923 gen_rtx_LSHIFTRT (DImode, temp,
1924 GEN_INT (10 - lowest_bit_set))));
1925 else if (lowest_bit_set > 10)
1926 emit_insn (gen_rtx_SET (VOIDmode,
1927 op0,
1928 gen_rtx_ASHIFT (DImode, temp,
1929 GEN_INT (lowest_bit_set - 10))));
1930 return;
1933 /* 1) sethi %hi(low_bits), %reg
1934 * or %reg, %lo(low_bits), %reg
1935 * 2) sethi %hi(~low_bits), %reg
1936 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1938 if (high_bits == 0
1939 || high_bits == 0xffffffff)
1941 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1942 (high_bits == 0xffffffff));
1943 return;
1946 /* Now, try 3-insn sequences. */
1948 /* 1) sethi %hi(high_bits), %reg
1949 * or %reg, %lo(high_bits), %reg
1950 * sllx %reg, 32, %reg
1952 if (low_bits == 0)
1954 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1955 return;
1958 /* We may be able to do something quick
1959 when the constant is negated, so try that. */
1960 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1961 (~low_bits) & 0xfffffc00))
1963 /* NOTE: The trailing bits get XOR'd so we need the
1964 non-negated bits, not the negated ones. */
1965 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1967 if ((((~high_bits) & 0xffffffff) == 0
1968 && ((~low_bits) & 0x80000000) == 0)
1969 || (((~high_bits) & 0xffffffff) == 0xffffffff
1970 && ((~low_bits) & 0x80000000) != 0))
1972 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1974 if ((SPARC_SETHI_P (fast_int)
1975 && (~high_bits & 0xffffffff) == 0)
1976 || SPARC_SIMM13_P (fast_int))
1977 emit_insn (gen_safe_SET64 (temp, fast_int));
1978 else
1979 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1981 else
1983 rtx negated_const;
1984 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1985 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1986 sparc_emit_set_const64 (temp, negated_const);
1989 /* If we are XOR'ing with -1, then we should emit a one's complement
1990 instead. This way the combiner will notice logical operations
1991 such as ANDN later on and substitute. */
1992 if (trailing_bits == 0x3ff)
1994 emit_insn (gen_rtx_SET (VOIDmode, op0,
1995 gen_rtx_NOT (DImode, temp)));
1997 else
1999 emit_insn (gen_rtx_SET (VOIDmode,
2000 op0,
2001 gen_safe_XOR64 (temp,
2002 (-0x400 | trailing_bits))));
2004 return;
2007 /* 1) sethi %hi(xxx), %reg
2008 * or %reg, %lo(xxx), %reg
2009 * sllx %reg, yyy, %reg
2011 * ??? This is just a generalized version of the low_bits==0
2012 * thing above, FIXME...
2014 if ((highest_bit_set - lowest_bit_set) < 32)
2016 unsigned HOST_WIDE_INT focus_bits =
2017 create_simple_focus_bits (high_bits, low_bits,
2018 lowest_bit_set, 0);
2020 /* We can't get here in this state. */
2021 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2023 /* So what we know is that the set bits straddle the
2024 middle of the 64-bit word. */
2025 sparc_emit_set_const64_quick2 (op0, temp,
2026 focus_bits, 0,
2027 lowest_bit_set);
2028 return;
2031 /* 1) sethi %hi(high_bits), %reg
2032 * or %reg, %lo(high_bits), %reg
2033 * sllx %reg, 32, %reg
2034 * or %reg, low_bits, %reg
2036 if (SPARC_SIMM13_P(low_bits)
2037 && ((int)low_bits > 0))
2039 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2040 return;
2043 /* The easiest way when all else fails, is full decomposition. */
2044 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2046 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2048 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2049 return the mode to be used for the comparison. For floating-point,
2050 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2051 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2052 processing is needed. */
2054 enum machine_mode
2055 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2057 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2059 switch (op)
2061 case EQ:
2062 case NE:
2063 case UNORDERED:
2064 case ORDERED:
2065 case UNLT:
2066 case UNLE:
2067 case UNGT:
2068 case UNGE:
2069 case UNEQ:
2070 case LTGT:
2071 return CCFPmode;
2073 case LT:
2074 case LE:
2075 case GT:
2076 case GE:
2077 return CCFPEmode;
2079 default:
2080 gcc_unreachable ();
2083 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2084 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2086 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2087 return CCX_NOOVmode;
2088 else
2089 return CC_NOOVmode;
2091 else
2093 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2094 return CCXmode;
2095 else
2096 return CCmode;
2100 /* Emit the compare insn and return the CC reg for a CODE comparison
2101 with operands X and Y. */
2103 static rtx
2104 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2106 enum machine_mode mode;
2107 rtx cc_reg;
2109 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2110 return x;
2112 mode = SELECT_CC_MODE (code, x, y);
2114 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2115 fcc regs (cse can't tell they're really call clobbered regs and will
2116 remove a duplicate comparison even if there is an intervening function
2117 call - it will then try to reload the cc reg via an int reg which is why
2118 we need the movcc patterns). It is possible to provide the movcc
2119 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2120 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2121 to tell cse that CCFPE mode registers (even pseudos) are call
2122 clobbered. */
2124 /* ??? This is an experiment. Rather than making changes to cse which may
2125 or may not be easy/clean, we do our own cse. This is possible because
2126 we will generate hard registers. Cse knows they're call clobbered (it
2127 doesn't know the same thing about pseudos). If we guess wrong, no big
2128 deal, but if we win, great! */
2130 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2131 #if 1 /* experiment */
2133 int reg;
2134 /* We cycle through the registers to ensure they're all exercised. */
2135 static int next_fcc_reg = 0;
2136 /* Previous x,y for each fcc reg. */
2137 static rtx prev_args[4][2];
2139 /* Scan prev_args for x,y. */
2140 for (reg = 0; reg < 4; reg++)
2141 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2142 break;
2143 if (reg == 4)
2145 reg = next_fcc_reg;
2146 prev_args[reg][0] = x;
2147 prev_args[reg][1] = y;
2148 next_fcc_reg = (next_fcc_reg + 1) & 3;
2150 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2152 #else
2153 cc_reg = gen_reg_rtx (mode);
2154 #endif /* ! experiment */
2155 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2156 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2157 else
2158 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2160 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2161 will only result in an unrecognizable insn so no point in asserting. */
2162 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2164 return cc_reg;
2168 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2171 gen_compare_reg (rtx cmp)
2173 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2176 /* This function is used for v9 only.
2177 DEST is the target of the Scc insn.
2178 CODE is the code for an Scc's comparison.
2179 X and Y are the values we compare.
2181 This function is needed to turn
2183 (set (reg:SI 110)
2184 (gt (reg:CCX 100 %icc)
2185 (const_int 0)))
2186 into
2187 (set (reg:SI 110)
2188 (gt:DI (reg:CCX 100 %icc)
2189 (const_int 0)))
2191 IE: The instruction recognizer needs to see the mode of the comparison to
2192 find the right instruction. We could use "gt:DI" right in the
2193 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2195 static int
2196 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2198 if (! TARGET_ARCH64
2199 && (GET_MODE (x) == DImode
2200 || GET_MODE (dest) == DImode))
2201 return 0;
2203 /* Try to use the movrCC insns. */
2204 if (TARGET_ARCH64
2205 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2206 && y == const0_rtx
2207 && v9_regcmp_p (compare_code))
2209 rtx op0 = x;
2210 rtx temp;
2212 /* Special case for op0 != 0. This can be done with one instruction if
2213 dest == x. */
2215 if (compare_code == NE
2216 && GET_MODE (dest) == DImode
2217 && rtx_equal_p (op0, dest))
2219 emit_insn (gen_rtx_SET (VOIDmode, dest,
2220 gen_rtx_IF_THEN_ELSE (DImode,
2221 gen_rtx_fmt_ee (compare_code, DImode,
2222 op0, const0_rtx),
2223 const1_rtx,
2224 dest)));
2225 return 1;
2228 if (reg_overlap_mentioned_p (dest, op0))
2230 /* Handle the case where dest == x.
2231 We "early clobber" the result. */
2232 op0 = gen_reg_rtx (GET_MODE (x));
2233 emit_move_insn (op0, x);
2236 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2237 if (GET_MODE (op0) != DImode)
2239 temp = gen_reg_rtx (DImode);
2240 convert_move (temp, op0, 0);
2242 else
2243 temp = op0;
2244 emit_insn (gen_rtx_SET (VOIDmode, dest,
2245 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2246 gen_rtx_fmt_ee (compare_code, DImode,
2247 temp, const0_rtx),
2248 const1_rtx,
2249 dest)));
2250 return 1;
2252 else
2254 x = gen_compare_reg_1 (compare_code, x, y);
2255 y = const0_rtx;
2257 gcc_assert (GET_MODE (x) != CC_NOOVmode
2258 && GET_MODE (x) != CCX_NOOVmode);
2260 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2261 emit_insn (gen_rtx_SET (VOIDmode, dest,
2262 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2263 gen_rtx_fmt_ee (compare_code,
2264 GET_MODE (x), x, y),
2265 const1_rtx, dest)));
2266 return 1;
2271 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2272 without jumps using the addx/subx instructions. */
2274 bool
2275 emit_scc_insn (rtx operands[])
2277 rtx tem;
2278 rtx x;
2279 rtx y;
2280 enum rtx_code code;
2282 /* The quad-word fp compare library routines all return nonzero to indicate
2283 true, which is different from the equivalent libgcc routines, so we must
2284 handle them specially here. */
2285 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2287 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2288 GET_CODE (operands[1]));
2289 operands[2] = XEXP (operands[1], 0);
2290 operands[3] = XEXP (operands[1], 1);
2293 code = GET_CODE (operands[1]);
2294 x = operands[2];
2295 y = operands[3];
2297 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2298 more applications). The exception to this is "reg != 0" which can
2299 be done in one instruction on v9 (so we do it). */
2300 if (code == EQ)
2302 if (GET_MODE (x) == SImode)
2304 rtx pat = gen_seqsi_special (operands[0], x, y);
2305 emit_insn (pat);
2306 return true;
2308 else if (GET_MODE (x) == DImode)
2310 rtx pat = gen_seqdi_special (operands[0], x, y);
2311 emit_insn (pat);
2312 return true;
2316 if (code == NE)
2318 if (GET_MODE (x) == SImode)
2320 rtx pat = gen_snesi_special (operands[0], x, y);
2321 emit_insn (pat);
2322 return true;
2324 else if (GET_MODE (x) == DImode)
2326 rtx pat = gen_snedi_special (operands[0], x, y);
2327 emit_insn (pat);
2328 return true;
2332 /* For the rest, on v9 we can use conditional moves. */
2334 if (TARGET_V9)
2336 if (gen_v9_scc (operands[0], code, x, y))
2337 return true;
2340 /* We can do LTU and GEU using the addx/subx instructions too. And
2341 for GTU/LEU, if both operands are registers swap them and fall
2342 back to the easy case. */
2343 if (code == GTU || code == LEU)
2345 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2346 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2348 tem = x;
2349 x = y;
2350 y = tem;
2351 code = swap_condition (code);
2355 if (code == LTU || code == GEU)
2357 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2358 gen_rtx_fmt_ee (code, SImode,
2359 gen_compare_reg_1 (code, x, y),
2360 const0_rtx)));
2361 return true;
2364 /* Nope, do branches. */
2365 return false;
2368 /* Emit a conditional jump insn for the v9 architecture using comparison code
2369 CODE and jump target LABEL.
2370 This function exists to take advantage of the v9 brxx insns. */
2372 static void
2373 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2375 emit_jump_insn (gen_rtx_SET (VOIDmode,
2376 pc_rtx,
2377 gen_rtx_IF_THEN_ELSE (VOIDmode,
2378 gen_rtx_fmt_ee (code, GET_MODE (op0),
2379 op0, const0_rtx),
2380 gen_rtx_LABEL_REF (VOIDmode, label),
2381 pc_rtx)));
2384 void
2385 emit_conditional_branch_insn (rtx operands[])
2387 /* The quad-word fp compare library routines all return nonzero to indicate
2388 true, which is different from the equivalent libgcc routines, so we must
2389 handle them specially here. */
2390 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2392 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2393 GET_CODE (operands[0]));
2394 operands[1] = XEXP (operands[0], 0);
2395 operands[2] = XEXP (operands[0], 1);
2398 if (TARGET_ARCH64 && operands[2] == const0_rtx
2399 && GET_CODE (operands[1]) == REG
2400 && GET_MODE (operands[1]) == DImode)
2402 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2403 return;
2406 operands[1] = gen_compare_reg (operands[0]);
2407 operands[2] = const0_rtx;
2408 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2409 operands[1], operands[2]);
2410 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2411 operands[3]));
2415 /* Generate a DFmode part of a hard TFmode register.
2416 REG is the TFmode hard register, LOW is 1 for the
2417 low 64bit of the register and 0 otherwise.
2420 gen_df_reg (rtx reg, int low)
2422 int regno = REGNO (reg);
2424 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2425 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2426 return gen_rtx_REG (DFmode, regno);
2429 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2430 Unlike normal calls, TFmode operands are passed by reference. It is
2431 assumed that no more than 3 operands are required. */
2433 static void
2434 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2436 rtx ret_slot = NULL, arg[3], func_sym;
2437 int i;
2439 /* We only expect to be called for conversions, unary, and binary ops. */
2440 gcc_assert (nargs == 2 || nargs == 3);
2442 for (i = 0; i < nargs; ++i)
2444 rtx this_arg = operands[i];
2445 rtx this_slot;
2447 /* TFmode arguments and return values are passed by reference. */
2448 if (GET_MODE (this_arg) == TFmode)
2450 int force_stack_temp;
2452 force_stack_temp = 0;
2453 if (TARGET_BUGGY_QP_LIB && i == 0)
2454 force_stack_temp = 1;
2456 if (GET_CODE (this_arg) == MEM
2457 && ! force_stack_temp)
2458 this_arg = XEXP (this_arg, 0);
2459 else if (CONSTANT_P (this_arg)
2460 && ! force_stack_temp)
2462 this_slot = force_const_mem (TFmode, this_arg);
2463 this_arg = XEXP (this_slot, 0);
2465 else
2467 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2469 /* Operand 0 is the return value. We'll copy it out later. */
2470 if (i > 0)
2471 emit_move_insn (this_slot, this_arg);
2472 else
2473 ret_slot = this_slot;
2475 this_arg = XEXP (this_slot, 0);
2479 arg[i] = this_arg;
2482 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2484 if (GET_MODE (operands[0]) == TFmode)
2486 if (nargs == 2)
2487 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2488 arg[0], GET_MODE (arg[0]),
2489 arg[1], GET_MODE (arg[1]));
2490 else
2491 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2492 arg[0], GET_MODE (arg[0]),
2493 arg[1], GET_MODE (arg[1]),
2494 arg[2], GET_MODE (arg[2]));
2496 if (ret_slot)
2497 emit_move_insn (operands[0], ret_slot);
2499 else
2501 rtx ret;
2503 gcc_assert (nargs == 2);
2505 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2506 GET_MODE (operands[0]), 1,
2507 arg[1], GET_MODE (arg[1]));
2509 if (ret != operands[0])
2510 emit_move_insn (operands[0], ret);
2514 /* Expand soft-float TFmode calls to sparc abi routines. */
2516 static void
2517 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2519 const char *func;
2521 switch (code)
2523 case PLUS:
2524 func = "_Qp_add";
2525 break;
2526 case MINUS:
2527 func = "_Qp_sub";
2528 break;
2529 case MULT:
2530 func = "_Qp_mul";
2531 break;
2532 case DIV:
2533 func = "_Qp_div";
2534 break;
2535 default:
2536 gcc_unreachable ();
2539 emit_soft_tfmode_libcall (func, 3, operands);
2542 static void
2543 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2545 const char *func;
2547 gcc_assert (code == SQRT);
2548 func = "_Qp_sqrt";
2550 emit_soft_tfmode_libcall (func, 2, operands);
2553 static void
2554 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2556 const char *func;
2558 switch (code)
2560 case FLOAT_EXTEND:
2561 switch (GET_MODE (operands[1]))
2563 case SFmode:
2564 func = "_Qp_stoq";
2565 break;
2566 case DFmode:
2567 func = "_Qp_dtoq";
2568 break;
2569 default:
2570 gcc_unreachable ();
2572 break;
2574 case FLOAT_TRUNCATE:
2575 switch (GET_MODE (operands[0]))
2577 case SFmode:
2578 func = "_Qp_qtos";
2579 break;
2580 case DFmode:
2581 func = "_Qp_qtod";
2582 break;
2583 default:
2584 gcc_unreachable ();
2586 break;
2588 case FLOAT:
2589 switch (GET_MODE (operands[1]))
2591 case SImode:
2592 func = "_Qp_itoq";
2593 if (TARGET_ARCH64)
2594 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2595 break;
2596 case DImode:
2597 func = "_Qp_xtoq";
2598 break;
2599 default:
2600 gcc_unreachable ();
2602 break;
2604 case UNSIGNED_FLOAT:
2605 switch (GET_MODE (operands[1]))
2607 case SImode:
2608 func = "_Qp_uitoq";
2609 if (TARGET_ARCH64)
2610 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2611 break;
2612 case DImode:
2613 func = "_Qp_uxtoq";
2614 break;
2615 default:
2616 gcc_unreachable ();
2618 break;
2620 case FIX:
2621 switch (GET_MODE (operands[0]))
2623 case SImode:
2624 func = "_Qp_qtoi";
2625 break;
2626 case DImode:
2627 func = "_Qp_qtox";
2628 break;
2629 default:
2630 gcc_unreachable ();
2632 break;
2634 case UNSIGNED_FIX:
2635 switch (GET_MODE (operands[0]))
2637 case SImode:
2638 func = "_Qp_qtoui";
2639 break;
2640 case DImode:
2641 func = "_Qp_qtoux";
2642 break;
2643 default:
2644 gcc_unreachable ();
2646 break;
2648 default:
2649 gcc_unreachable ();
2652 emit_soft_tfmode_libcall (func, 2, operands);
2655 /* Expand a hard-float tfmode operation. All arguments must be in
2656 registers. */
2658 static void
2659 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2661 rtx op, dest;
2663 if (GET_RTX_CLASS (code) == RTX_UNARY)
2665 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2666 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2668 else
2670 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2671 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2672 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2673 operands[1], operands[2]);
2676 if (register_operand (operands[0], VOIDmode))
2677 dest = operands[0];
2678 else
2679 dest = gen_reg_rtx (GET_MODE (operands[0]));
2681 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2683 if (dest != operands[0])
2684 emit_move_insn (operands[0], dest);
2687 void
2688 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2690 if (TARGET_HARD_QUAD)
2691 emit_hard_tfmode_operation (code, operands);
2692 else
2693 emit_soft_tfmode_binop (code, operands);
2696 void
2697 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2699 if (TARGET_HARD_QUAD)
2700 emit_hard_tfmode_operation (code, operands);
2701 else
2702 emit_soft_tfmode_unop (code, operands);
2705 void
2706 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2708 if (TARGET_HARD_QUAD)
2709 emit_hard_tfmode_operation (code, operands);
2710 else
2711 emit_soft_tfmode_cvt (code, operands);
2714 /* Return nonzero if a branch/jump/call instruction will be emitting
2715 nop into its delay slot. */
2718 empty_delay_slot (rtx insn)
2720 rtx seq;
2722 /* If no previous instruction (should not happen), return true. */
2723 if (PREV_INSN (insn) == NULL)
2724 return 1;
2726 seq = NEXT_INSN (PREV_INSN (insn));
2727 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2728 return 0;
2730 return 1;
2733 /* Return nonzero if TRIAL can go into the call delay slot. */
2736 tls_call_delay (rtx trial)
2738 rtx pat;
2740 /* Binutils allows
2741 call __tls_get_addr, %tgd_call (foo)
2742 add %l7, %o0, %o0, %tgd_add (foo)
2743 while Sun as/ld does not. */
2744 if (TARGET_GNU_TLS || !TARGET_TLS)
2745 return 1;
2747 pat = PATTERN (trial);
2749 /* We must reject tgd_add{32|64}, i.e.
2750 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2751 and tldm_add{32|64}, i.e.
2752 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2753 for Sun as/ld. */
2754 if (GET_CODE (pat) == SET
2755 && GET_CODE (SET_SRC (pat)) == PLUS)
2757 rtx unspec = XEXP (SET_SRC (pat), 1);
2759 if (GET_CODE (unspec) == UNSPEC
2760 && (XINT (unspec, 1) == UNSPEC_TLSGD
2761 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2762 return 0;
2765 return 1;
2768 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2769 instruction. RETURN_P is true if the v9 variant 'return' is to be
2770 considered in the test too.
2772 TRIAL must be a SET whose destination is a REG appropriate for the
2773 'restore' instruction or, if RETURN_P is true, for the 'return'
2774 instruction. */
2776 static int
2777 eligible_for_restore_insn (rtx trial, bool return_p)
2779 rtx pat = PATTERN (trial);
2780 rtx src = SET_SRC (pat);
2782 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2783 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2784 && arith_operand (src, GET_MODE (src)))
2786 if (TARGET_ARCH64)
2787 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2788 else
2789 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2792 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2793 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2794 && arith_double_operand (src, GET_MODE (src)))
2795 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2797 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2798 else if (! TARGET_FPU && register_operand (src, SFmode))
2799 return 1;
2801 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2802 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2803 return 1;
2805 /* If we have the 'return' instruction, anything that does not use
2806 local or output registers and can go into a delay slot wins. */
2807 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2808 && (get_attr_in_uncond_branch_delay (trial)
2809 == IN_UNCOND_BRANCH_DELAY_TRUE))
2810 return 1;
2812 /* The 'restore src1,src2,dest' pattern for SImode. */
2813 else if (GET_CODE (src) == PLUS
2814 && register_operand (XEXP (src, 0), SImode)
2815 && arith_operand (XEXP (src, 1), SImode))
2816 return 1;
2818 /* The 'restore src1,src2,dest' pattern for DImode. */
2819 else if (GET_CODE (src) == PLUS
2820 && register_operand (XEXP (src, 0), DImode)
2821 && arith_double_operand (XEXP (src, 1), DImode))
2822 return 1;
2824 /* The 'restore src1,%lo(src2),dest' pattern. */
2825 else if (GET_CODE (src) == LO_SUM
2826 && ! TARGET_CM_MEDMID
2827 && ((register_operand (XEXP (src, 0), SImode)
2828 && immediate_operand (XEXP (src, 1), SImode))
2829 || (TARGET_ARCH64
2830 && register_operand (XEXP (src, 0), DImode)
2831 && immediate_operand (XEXP (src, 1), DImode))))
2832 return 1;
2834 /* The 'restore src,src,dest' pattern. */
2835 else if (GET_CODE (src) == ASHIFT
2836 && (register_operand (XEXP (src, 0), SImode)
2837 || register_operand (XEXP (src, 0), DImode))
2838 && XEXP (src, 1) == const1_rtx)
2839 return 1;
2841 return 0;
2844 /* Return nonzero if TRIAL can go into the function return's
2845 delay slot. */
2848 eligible_for_return_delay (rtx trial)
2850 rtx pat;
2852 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2853 return 0;
2855 if (get_attr_length (trial) != 1)
2856 return 0;
2858 /* If there are any call-saved registers, we should scan TRIAL if it
2859 does not reference them. For now just make it easy. */
2860 if (num_gfregs)
2861 return 0;
2863 /* If the function uses __builtin_eh_return, the eh_return machinery
2864 occupies the delay slot. */
2865 if (crtl->calls_eh_return)
2866 return 0;
2868 /* In the case of a true leaf function, anything can go into the slot. */
2869 if (sparc_leaf_function_p)
2870 return get_attr_in_uncond_branch_delay (trial)
2871 == IN_UNCOND_BRANCH_DELAY_TRUE;
2873 pat = PATTERN (trial);
2875 /* Otherwise, only operations which can be done in tandem with
2876 a `restore' or `return' insn can go into the delay slot. */
2877 if (GET_CODE (SET_DEST (pat)) != REG
2878 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2879 return 0;
2881 /* If this instruction sets up floating point register and we have a return
2882 instruction, it can probably go in. But restore will not work
2883 with FP_REGS. */
2884 if (REGNO (SET_DEST (pat)) >= 32)
2885 return (TARGET_V9
2886 && ! epilogue_renumber (&pat, 1)
2887 && (get_attr_in_uncond_branch_delay (trial)
2888 == IN_UNCOND_BRANCH_DELAY_TRUE));
2890 return eligible_for_restore_insn (trial, true);
2893 /* Return nonzero if TRIAL can go into the sibling call's
2894 delay slot. */
2897 eligible_for_sibcall_delay (rtx trial)
2899 rtx pat;
2901 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2902 return 0;
2904 if (get_attr_length (trial) != 1)
2905 return 0;
2907 pat = PATTERN (trial);
2909 if (sparc_leaf_function_p)
2911 /* If the tail call is done using the call instruction,
2912 we have to restore %o7 in the delay slot. */
2913 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2914 return 0;
2916 /* %g1 is used to build the function address */
2917 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2918 return 0;
2920 return 1;
2923 /* Otherwise, only operations which can be done in tandem with
2924 a `restore' insn can go into the delay slot. */
2925 if (GET_CODE (SET_DEST (pat)) != REG
2926 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2927 || REGNO (SET_DEST (pat)) >= 32)
2928 return 0;
2930 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2931 in most cases. */
2932 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2933 return 0;
2935 return eligible_for_restore_insn (trial, false);
2939 short_branch (int uid1, int uid2)
2941 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2943 /* Leave a few words of "slop". */
2944 if (delta >= -1023 && delta <= 1022)
2945 return 1;
2947 return 0;
2950 /* Return nonzero if REG is not used after INSN.
2951 We assume REG is a reload reg, and therefore does
2952 not live past labels or calls or jumps. */
2954 reg_unused_after (rtx reg, rtx insn)
2956 enum rtx_code code, prev_code = UNKNOWN;
2958 while ((insn = NEXT_INSN (insn)))
2960 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2961 return 1;
2963 code = GET_CODE (insn);
2964 if (GET_CODE (insn) == CODE_LABEL)
2965 return 1;
2967 if (INSN_P (insn))
2969 rtx set = single_set (insn);
2970 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2971 if (set && in_src)
2972 return 0;
2973 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2974 return 1;
2975 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2976 return 0;
2978 prev_code = code;
2980 return 1;
2983 /* Determine if it's legal to put X into the constant pool. This
2984 is not possible if X contains the address of a symbol that is
2985 not constant (TLS) or not known at final link time (PIC). */
2987 static bool
2988 sparc_cannot_force_const_mem (rtx x)
2990 switch (GET_CODE (x))
2992 case CONST_INT:
2993 case CONST_DOUBLE:
2994 case CONST_VECTOR:
2995 /* Accept all non-symbolic constants. */
2996 return false;
2998 case LABEL_REF:
2999 /* Labels are OK iff we are non-PIC. */
3000 return flag_pic != 0;
3002 case SYMBOL_REF:
3003 /* 'Naked' TLS symbol references are never OK,
3004 non-TLS symbols are OK iff we are non-PIC. */
3005 if (SYMBOL_REF_TLS_MODEL (x))
3006 return true;
3007 else
3008 return flag_pic != 0;
3010 case CONST:
3011 return sparc_cannot_force_const_mem (XEXP (x, 0));
3012 case PLUS:
3013 case MINUS:
3014 return sparc_cannot_force_const_mem (XEXP (x, 0))
3015 || sparc_cannot_force_const_mem (XEXP (x, 1));
3016 case UNSPEC:
3017 return true;
3018 default:
3019 gcc_unreachable ();
3023 /* PIC support. */
3024 static GTY(()) bool pic_helper_needed = false;
3025 static GTY(()) rtx pic_helper_symbol;
3026 static GTY(()) rtx global_offset_table;
3028 /* Ensure that we are not using patterns that are not OK with PIC. */
3031 check_pic (int i)
3033 switch (flag_pic)
3035 case 1:
3036 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
3037 && (GET_CODE (recog_data.operand[i]) != CONST
3038 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
3039 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
3040 == global_offset_table)
3041 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
3042 == CONST))));
3043 case 2:
3044 default:
3045 return 1;
3049 /* Return true if X is an address which needs a temporary register when
3050 reloaded while generating PIC code. */
3053 pic_address_needs_scratch (rtx x)
3055 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3056 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3057 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3058 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3059 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3060 return 1;
3062 return 0;
3065 /* Determine if a given RTX is a valid constant. We already know this
3066 satisfies CONSTANT_P. */
3068 bool
3069 legitimate_constant_p (rtx x)
3071 switch (GET_CODE (x))
3073 case CONST:
3074 case SYMBOL_REF:
3075 if (sparc_tls_referenced_p (x))
3076 return false;
3077 break;
3079 case CONST_DOUBLE:
3080 if (GET_MODE (x) == VOIDmode)
3081 return true;
3083 /* Floating point constants are generally not ok.
3084 The only exception is 0.0 in VIS. */
3085 if (TARGET_VIS
3086 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3087 && const_zero_operand (x, GET_MODE (x)))
3088 return true;
3090 return false;
3092 case CONST_VECTOR:
3093 /* Vector constants are generally not ok.
3094 The only exception is 0 in VIS. */
3095 if (TARGET_VIS
3096 && const_zero_operand (x, GET_MODE (x)))
3097 return true;
3099 return false;
3101 default:
3102 break;
3105 return true;
3108 /* Determine if a given RTX is a valid constant address. */
3110 bool
3111 constant_address_p (rtx x)
3113 switch (GET_CODE (x))
3115 case LABEL_REF:
3116 case CONST_INT:
3117 case HIGH:
3118 return true;
3120 case CONST:
3121 if (flag_pic && pic_address_needs_scratch (x))
3122 return false;
3123 return legitimate_constant_p (x);
3125 case SYMBOL_REF:
3126 return !flag_pic && legitimate_constant_p (x);
3128 default:
3129 return false;
3133 /* Nonzero if the constant value X is a legitimate general operand
3134 when generating PIC code. It is given that flag_pic is on and
3135 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3137 bool
3138 legitimate_pic_operand_p (rtx x)
3140 if (pic_address_needs_scratch (x))
3141 return false;
3142 if (sparc_tls_referenced_p (x))
3143 return false;
3144 return true;
3147 /* Return nonzero if ADDR is a valid memory address.
3148 STRICT specifies whether strict register checking applies. */
3150 static bool
3151 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3153 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3155 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3156 rs1 = addr;
3157 else if (GET_CODE (addr) == PLUS)
3159 rs1 = XEXP (addr, 0);
3160 rs2 = XEXP (addr, 1);
3162 /* Canonicalize. REG comes first, if there are no regs,
3163 LO_SUM comes first. */
3164 if (!REG_P (rs1)
3165 && GET_CODE (rs1) != SUBREG
3166 && (REG_P (rs2)
3167 || GET_CODE (rs2) == SUBREG
3168 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3170 rs1 = XEXP (addr, 1);
3171 rs2 = XEXP (addr, 0);
3174 if ((flag_pic == 1
3175 && rs1 == pic_offset_table_rtx
3176 && !REG_P (rs2)
3177 && GET_CODE (rs2) != SUBREG
3178 && GET_CODE (rs2) != LO_SUM
3179 && GET_CODE (rs2) != MEM
3180 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3181 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3182 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3183 || ((REG_P (rs1)
3184 || GET_CODE (rs1) == SUBREG)
3185 && RTX_OK_FOR_OFFSET_P (rs2)))
3187 imm1 = rs2;
3188 rs2 = NULL;
3190 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3191 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3193 /* We prohibit REG + REG for TFmode when there are no quad move insns
3194 and we consequently need to split. We do this because REG+REG
3195 is not an offsettable address. If we get the situation in reload
3196 where source and destination of a movtf pattern are both MEMs with
3197 REG+REG address, then only one of them gets converted to an
3198 offsettable address. */
3199 if (mode == TFmode
3200 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3201 return 0;
3203 /* We prohibit REG + REG on ARCH32 if not optimizing for
3204 DFmode/DImode because then mem_min_alignment is likely to be zero
3205 after reload and the forced split would lack a matching splitter
3206 pattern. */
3207 if (TARGET_ARCH32 && !optimize
3208 && (mode == DFmode || mode == DImode))
3209 return 0;
3211 else if (USE_AS_OFFSETABLE_LO10
3212 && GET_CODE (rs1) == LO_SUM
3213 && TARGET_ARCH64
3214 && ! TARGET_CM_MEDMID
3215 && RTX_OK_FOR_OLO10_P (rs2))
3217 rs2 = NULL;
3218 imm1 = XEXP (rs1, 1);
3219 rs1 = XEXP (rs1, 0);
3220 if (!CONSTANT_P (imm1)
3221 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3222 return 0;
3225 else if (GET_CODE (addr) == LO_SUM)
3227 rs1 = XEXP (addr, 0);
3228 imm1 = XEXP (addr, 1);
3230 if (!CONSTANT_P (imm1)
3231 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3232 return 0;
3234 /* We can't allow TFmode in 32-bit mode, because an offset greater
3235 than the alignment (8) may cause the LO_SUM to overflow. */
3236 if (mode == TFmode && TARGET_ARCH32)
3237 return 0;
3239 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3240 return 1;
3241 else
3242 return 0;
3244 if (GET_CODE (rs1) == SUBREG)
3245 rs1 = SUBREG_REG (rs1);
3246 if (!REG_P (rs1))
3247 return 0;
3249 if (rs2)
3251 if (GET_CODE (rs2) == SUBREG)
3252 rs2 = SUBREG_REG (rs2);
3253 if (!REG_P (rs2))
3254 return 0;
3257 if (strict)
3259 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3260 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3261 return 0;
3263 else
3265 if ((REGNO (rs1) >= 32
3266 && REGNO (rs1) != FRAME_POINTER_REGNUM
3267 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3268 || (rs2
3269 && (REGNO (rs2) >= 32
3270 && REGNO (rs2) != FRAME_POINTER_REGNUM
3271 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3272 return 0;
3274 return 1;
3277 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3279 static GTY(()) rtx sparc_tls_symbol;
3281 static rtx
3282 sparc_tls_get_addr (void)
3284 if (!sparc_tls_symbol)
3285 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3287 return sparc_tls_symbol;
3290 static rtx
3291 sparc_tls_got (void)
3293 rtx temp;
3294 if (flag_pic)
3296 crtl->uses_pic_offset_table = 1;
3297 return pic_offset_table_rtx;
3300 if (!global_offset_table)
3301 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3302 temp = gen_reg_rtx (Pmode);
3303 emit_move_insn (temp, global_offset_table);
3304 return temp;
3307 /* Return true if X contains a thread-local symbol. */
3309 static bool
3310 sparc_tls_referenced_p (rtx x)
3312 if (!TARGET_HAVE_TLS)
3313 return false;
3315 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3316 x = XEXP (XEXP (x, 0), 0);
3318 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3319 return true;
3321 /* That's all we handle in sparc_legitimize_tls_address for now. */
3322 return false;
3325 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3326 this (thread-local) address. */
3328 static rtx
3329 sparc_legitimize_tls_address (rtx addr)
3331 rtx temp1, temp2, temp3, ret, o0, got, insn;
3333 gcc_assert (can_create_pseudo_p ());
3335 if (GET_CODE (addr) == SYMBOL_REF)
3336 switch (SYMBOL_REF_TLS_MODEL (addr))
3338 case TLS_MODEL_GLOBAL_DYNAMIC:
3339 start_sequence ();
3340 temp1 = gen_reg_rtx (SImode);
3341 temp2 = gen_reg_rtx (SImode);
3342 ret = gen_reg_rtx (Pmode);
3343 o0 = gen_rtx_REG (Pmode, 8);
3344 got = sparc_tls_got ();
3345 emit_insn (gen_tgd_hi22 (temp1, addr));
3346 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3347 if (TARGET_ARCH32)
3349 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3350 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3351 addr, const1_rtx));
3353 else
3355 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3356 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3357 addr, const1_rtx));
3359 CALL_INSN_FUNCTION_USAGE (insn)
3360 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3361 CALL_INSN_FUNCTION_USAGE (insn));
3362 insn = get_insns ();
3363 end_sequence ();
3364 emit_libcall_block (insn, ret, o0, addr);
3365 break;
3367 case TLS_MODEL_LOCAL_DYNAMIC:
3368 start_sequence ();
3369 temp1 = gen_reg_rtx (SImode);
3370 temp2 = gen_reg_rtx (SImode);
3371 temp3 = gen_reg_rtx (Pmode);
3372 ret = gen_reg_rtx (Pmode);
3373 o0 = gen_rtx_REG (Pmode, 8);
3374 got = sparc_tls_got ();
3375 emit_insn (gen_tldm_hi22 (temp1));
3376 emit_insn (gen_tldm_lo10 (temp2, temp1));
3377 if (TARGET_ARCH32)
3379 emit_insn (gen_tldm_add32 (o0, got, temp2));
3380 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3381 const1_rtx));
3383 else
3385 emit_insn (gen_tldm_add64 (o0, got, temp2));
3386 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3387 const1_rtx));
3389 CALL_INSN_FUNCTION_USAGE (insn)
3390 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3391 CALL_INSN_FUNCTION_USAGE (insn));
3392 insn = get_insns ();
3393 end_sequence ();
3394 emit_libcall_block (insn, temp3, o0,
3395 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3396 UNSPEC_TLSLD_BASE));
3397 temp1 = gen_reg_rtx (SImode);
3398 temp2 = gen_reg_rtx (SImode);
3399 emit_insn (gen_tldo_hix22 (temp1, addr));
3400 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3401 if (TARGET_ARCH32)
3402 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3403 else
3404 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3405 break;
3407 case TLS_MODEL_INITIAL_EXEC:
3408 temp1 = gen_reg_rtx (SImode);
3409 temp2 = gen_reg_rtx (SImode);
3410 temp3 = gen_reg_rtx (Pmode);
3411 got = sparc_tls_got ();
3412 emit_insn (gen_tie_hi22 (temp1, addr));
3413 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3414 if (TARGET_ARCH32)
3415 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3416 else
3417 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3418 if (TARGET_SUN_TLS)
3420 ret = gen_reg_rtx (Pmode);
3421 if (TARGET_ARCH32)
3422 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3423 temp3, addr));
3424 else
3425 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3426 temp3, addr));
3428 else
3429 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3430 break;
3432 case TLS_MODEL_LOCAL_EXEC:
3433 temp1 = gen_reg_rtx (Pmode);
3434 temp2 = gen_reg_rtx (Pmode);
3435 if (TARGET_ARCH32)
3437 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3438 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3440 else
3442 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3443 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3445 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3446 break;
3448 default:
3449 gcc_unreachable ();
3452 else if (GET_CODE (addr) == CONST)
3454 rtx base, offset;
3456 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3458 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3459 offset = XEXP (XEXP (addr, 0), 1);
3461 base = force_operand (base, NULL_RTX);
3462 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3463 offset = force_reg (Pmode, offset);
3464 ret = gen_rtx_PLUS (Pmode, base, offset);
3467 else
3468 gcc_unreachable (); /* for now ... */
3470 return ret;
3473 /* Legitimize PIC addresses. If the address is already position-independent,
3474 we return ORIG. Newly generated position-independent addresses go into a
3475 reg. This is REG if nonzero, otherwise we allocate register(s) as
3476 necessary. */
3478 static rtx
3479 sparc_legitimize_pic_address (rtx orig, rtx reg)
3481 bool gotdata_op = false;
3483 if (GET_CODE (orig) == SYMBOL_REF
3484 /* See the comment in sparc_expand_move. */
3485 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3487 rtx pic_ref, address;
3488 rtx insn;
3490 if (reg == 0)
3492 gcc_assert (! reload_in_progress && ! reload_completed);
3493 reg = gen_reg_rtx (Pmode);
3496 if (flag_pic == 2)
3498 /* If not during reload, allocate another temp reg here for loading
3499 in the address, so that these instructions can be optimized
3500 properly. */
3501 rtx temp_reg = ((reload_in_progress || reload_completed)
3502 ? reg : gen_reg_rtx (Pmode));
3504 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3505 won't get confused into thinking that these two instructions
3506 are loading in the true address of the symbol. If in the
3507 future a PIC rtx exists, that should be used instead. */
3508 if (TARGET_ARCH64)
3510 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3511 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3513 else
3515 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3516 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3518 address = temp_reg;
3519 gotdata_op = true;
3521 else
3522 address = orig;
3524 crtl->uses_pic_offset_table = 1;
3525 if (gotdata_op)
3527 if (TARGET_ARCH64)
3528 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3529 pic_offset_table_rtx,
3530 address, orig));
3531 else
3532 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3533 pic_offset_table_rtx,
3534 address, orig));
3536 else
3538 pic_ref
3539 = gen_const_mem (Pmode,
3540 gen_rtx_PLUS (Pmode,
3541 pic_offset_table_rtx, address));
3542 insn = emit_move_insn (reg, pic_ref);
3545 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3546 by loop. */
3547 set_unique_reg_note (insn, REG_EQUAL, orig);
3548 return reg;
3550 else if (GET_CODE (orig) == CONST)
3552 rtx base, offset;
3554 if (GET_CODE (XEXP (orig, 0)) == PLUS
3555 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3556 return orig;
3558 if (reg == 0)
3560 gcc_assert (! reload_in_progress && ! reload_completed);
3561 reg = gen_reg_rtx (Pmode);
3564 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3565 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3566 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3567 base == reg ? NULL_RTX : reg);
3569 if (GET_CODE (offset) == CONST_INT)
3571 if (SMALL_INT (offset))
3572 return plus_constant (base, INTVAL (offset));
3573 else if (! reload_in_progress && ! reload_completed)
3574 offset = force_reg (Pmode, offset);
3575 else
3576 /* If we reach here, then something is seriously wrong. */
3577 gcc_unreachable ();
3579 return gen_rtx_PLUS (Pmode, base, offset);
3581 else if (GET_CODE (orig) == LABEL_REF)
3582 /* ??? We ought to be checking that the register is live instead, in case
3583 it is eliminated. */
3584 crtl->uses_pic_offset_table = 1;
3586 return orig;
3589 /* Try machine-dependent ways of modifying an illegitimate address X
3590 to be legitimate. If we find one, return the new, valid address.
3592 OLDX is the address as it was before break_out_memory_refs was called.
3593 In some cases it is useful to look at this to decide what needs to be done.
3595 MODE is the mode of the operand pointed to by X.
3597 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3599 static rtx
3600 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3601 enum machine_mode mode)
3603 rtx orig_x = x;
3605 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3606 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3607 force_operand (XEXP (x, 0), NULL_RTX));
3608 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3609 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3610 force_operand (XEXP (x, 1), NULL_RTX));
3611 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3612 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3613 XEXP (x, 1));
3614 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3615 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3616 force_operand (XEXP (x, 1), NULL_RTX));
3618 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3619 return x;
3621 if (sparc_tls_referenced_p (x))
3622 x = sparc_legitimize_tls_address (x);
3623 else if (flag_pic)
3624 x = sparc_legitimize_pic_address (x, NULL_RTX);
3625 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3626 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3627 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3628 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3629 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3630 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3631 else if (GET_CODE (x) == SYMBOL_REF
3632 || GET_CODE (x) == CONST
3633 || GET_CODE (x) == LABEL_REF)
3634 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3636 return x;
3639 /* Delegitimize an address that was legitimized by the above function. */
3641 static rtx
3642 sparc_delegitimize_address (rtx x)
3644 x = delegitimize_mem_from_attrs (x);
3646 if (GET_CODE (x) == LO_SUM
3647 && GET_CODE (XEXP (x, 1)) == UNSPEC
3648 && XINT (XEXP (x, 1), 1) == UNSPEC_TLSLE)
3650 x = XVECEXP (XEXP (x, 1), 0, 0);
3651 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3654 return x;
3657 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3658 replace the input X, or the original X if no replacement is called for.
3659 The output parameter *WIN is 1 if the calling macro should goto WIN,
3660 0 if it should not.
3662 For SPARC, we wish to handle addresses by splitting them into
3663 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3664 This cuts the number of extra insns by one.
3666 Do nothing when generating PIC code and the address is a symbolic
3667 operand or requires a scratch register. */
3670 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3671 int opnum, int type,
3672 int ind_levels ATTRIBUTE_UNUSED, int *win)
3674 /* Decompose SImode constants into HIGH+LO_SUM. */
3675 if (CONSTANT_P (x)
3676 && (mode != TFmode || TARGET_ARCH64)
3677 && GET_MODE (x) == SImode
3678 && GET_CODE (x) != LO_SUM
3679 && GET_CODE (x) != HIGH
3680 && sparc_cmodel <= CM_MEDLOW
3681 && !(flag_pic
3682 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3684 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3685 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3686 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3687 opnum, (enum reload_type)type);
3688 *win = 1;
3689 return x;
3692 /* We have to recognize what we have already generated above. */
3693 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3695 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3696 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3697 opnum, (enum reload_type)type);
3698 *win = 1;
3699 return x;
3702 *win = 0;
3703 return x;
3706 /* Return true if ADDR (a legitimate address expression)
3707 has an effect that depends on the machine mode it is used for.
3709 In PIC mode,
3711 (mem:HI [%l7+a])
3713 is not equivalent to
3715 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3717 because [%l7+a+1] is interpreted as the address of (a+1). */
3720 static bool
3721 sparc_mode_dependent_address_p (const_rtx addr)
3723 if (flag_pic && GET_CODE (addr) == PLUS)
3725 rtx op0 = XEXP (addr, 0);
3726 rtx op1 = XEXP (addr, 1);
3727 if (op0 == pic_offset_table_rtx
3728 && SYMBOLIC_CONST (op1))
3729 return true;
3732 return false;
3735 #ifdef HAVE_GAS_HIDDEN
3736 # define USE_HIDDEN_LINKONCE 1
3737 #else
3738 # define USE_HIDDEN_LINKONCE 0
3739 #endif
3741 static void
3742 get_pc_thunk_name (char name[32], unsigned int regno)
3744 const char *pic_name = reg_names[regno];
3746 /* Skip the leading '%' as that cannot be used in a
3747 symbol name. */
3748 pic_name += 1;
3750 if (USE_HIDDEN_LINKONCE)
3751 sprintf (name, "__sparc_get_pc_thunk.%s", pic_name);
3752 else
3753 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3756 /* Emit code to load the PIC register. */
3758 static void
3759 load_pic_register (void)
3761 int orig_flag_pic = flag_pic;
3763 if (TARGET_VXWORKS_RTP)
3765 emit_insn (gen_vxworks_load_got ());
3766 emit_use (pic_offset_table_rtx);
3767 return;
3770 /* If we haven't initialized the special PIC symbols, do so now. */
3771 if (!pic_helper_needed)
3773 char name[32];
3775 pic_helper_needed = true;
3777 get_pc_thunk_name (name, REGNO (pic_offset_table_rtx));
3778 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3780 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3783 flag_pic = 0;
3784 if (TARGET_ARCH64)
3785 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3786 pic_helper_symbol));
3787 else
3788 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3789 pic_helper_symbol));
3790 flag_pic = orig_flag_pic;
3792 /* Need to emit this whether or not we obey regdecls,
3793 since setjmp/longjmp can cause life info to screw up.
3794 ??? In the case where we don't obey regdecls, this is not sufficient
3795 since we may not fall out the bottom. */
3796 emit_use (pic_offset_table_rtx);
3799 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3800 address of the call target. */
3802 void
3803 sparc_emit_call_insn (rtx pat, rtx addr)
3805 rtx insn;
3807 insn = emit_call_insn (pat);
3809 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3810 if (TARGET_VXWORKS_RTP
3811 && flag_pic
3812 && GET_CODE (addr) == SYMBOL_REF
3813 && (SYMBOL_REF_DECL (addr)
3814 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3815 : !SYMBOL_REF_LOCAL_P (addr)))
3817 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3818 crtl->uses_pic_offset_table = 1;
3822 /* Return 1 if RTX is a MEM which is known to be aligned to at
3823 least a DESIRED byte boundary. */
3826 mem_min_alignment (rtx mem, int desired)
3828 rtx addr, base, offset;
3830 /* If it's not a MEM we can't accept it. */
3831 if (GET_CODE (mem) != MEM)
3832 return 0;
3834 /* Obviously... */
3835 if (!TARGET_UNALIGNED_DOUBLES
3836 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3837 return 1;
3839 /* ??? The rest of the function predates MEM_ALIGN so
3840 there is probably a bit of redundancy. */
3841 addr = XEXP (mem, 0);
3842 base = offset = NULL_RTX;
3843 if (GET_CODE (addr) == PLUS)
3845 if (GET_CODE (XEXP (addr, 0)) == REG)
3847 base = XEXP (addr, 0);
3849 /* What we are saying here is that if the base
3850 REG is aligned properly, the compiler will make
3851 sure any REG based index upon it will be so
3852 as well. */
3853 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3854 offset = XEXP (addr, 1);
3855 else
3856 offset = const0_rtx;
3859 else if (GET_CODE (addr) == REG)
3861 base = addr;
3862 offset = const0_rtx;
3865 if (base != NULL_RTX)
3867 int regno = REGNO (base);
3869 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3871 /* Check if the compiler has recorded some information
3872 about the alignment of the base REG. If reload has
3873 completed, we already matched with proper alignments.
3874 If not running global_alloc, reload might give us
3875 unaligned pointer to local stack though. */
3876 if (((cfun != 0
3877 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3878 || (optimize && reload_completed))
3879 && (INTVAL (offset) & (desired - 1)) == 0)
3880 return 1;
3882 else
3884 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3885 return 1;
3888 else if (! TARGET_UNALIGNED_DOUBLES
3889 || CONSTANT_P (addr)
3890 || GET_CODE (addr) == LO_SUM)
3892 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3893 is true, in which case we can only assume that an access is aligned if
3894 it is to a constant address, or the address involves a LO_SUM. */
3895 return 1;
3898 /* An obviously unaligned address. */
3899 return 0;
3903 /* Vectors to keep interesting information about registers where it can easily
3904 be got. We used to use the actual mode value as the bit number, but there
3905 are more than 32 modes now. Instead we use two tables: one indexed by
3906 hard register number, and one indexed by mode. */
3908 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3909 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3910 mapped into one sparc_mode_class mode. */
3912 enum sparc_mode_class {
3913 S_MODE, D_MODE, T_MODE, O_MODE,
3914 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3915 CC_MODE, CCFP_MODE
3918 /* Modes for single-word and smaller quantities. */
3919 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3921 /* Modes for double-word and smaller quantities. */
3922 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3924 /* Modes for quad-word and smaller quantities. */
3925 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3927 /* Modes for 8-word and smaller quantities. */
3928 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3930 /* Modes for single-float quantities. We must allow any single word or
3931 smaller quantity. This is because the fix/float conversion instructions
3932 take integer inputs/outputs from the float registers. */
3933 #define SF_MODES (S_MODES)
3935 /* Modes for double-float and smaller quantities. */
3936 #define DF_MODES (D_MODES)
3938 /* Modes for quad-float and smaller quantities. */
3939 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3941 /* Modes for quad-float pairs and smaller quantities. */
3942 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3944 /* Modes for double-float only quantities. */
3945 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3947 /* Modes for quad-float and double-float only quantities. */
3948 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3950 /* Modes for quad-float pairs and double-float only quantities. */
3951 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3953 /* Modes for condition codes. */
3954 #define CC_MODES (1 << (int) CC_MODE)
3955 #define CCFP_MODES (1 << (int) CCFP_MODE)
3957 /* Value is 1 if register/mode pair is acceptable on sparc.
3958 The funny mixture of D and T modes is because integer operations
3959 do not specially operate on tetra quantities, so non-quad-aligned
3960 registers can hold quadword quantities (except %o4 and %i4 because
3961 they cross fixed registers). */
3963 /* This points to either the 32 bit or the 64 bit version. */
3964 const int *hard_regno_mode_classes;
3966 static const int hard_32bit_mode_classes[] = {
3967 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3968 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3969 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3970 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3972 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3973 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3974 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3975 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3977 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3978 and none can hold SFmode/SImode values. */
3979 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3980 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3981 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3982 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3984 /* %fcc[0123] */
3985 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3987 /* %icc */
3988 CC_MODES
3991 static const int hard_64bit_mode_classes[] = {
3992 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3993 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3994 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3995 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3997 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3998 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3999 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4000 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4002 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4003 and none can hold SFmode/SImode values. */
4004 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4005 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4006 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4007 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4009 /* %fcc[0123] */
4010 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4012 /* %icc */
4013 CC_MODES
4016 int sparc_mode_class [NUM_MACHINE_MODES];
4018 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4020 static void
4021 sparc_init_modes (void)
4023 int i;
4025 for (i = 0; i < NUM_MACHINE_MODES; i++)
4027 switch (GET_MODE_CLASS (i))
4029 case MODE_INT:
4030 case MODE_PARTIAL_INT:
4031 case MODE_COMPLEX_INT:
4032 if (GET_MODE_SIZE (i) <= 4)
4033 sparc_mode_class[i] = 1 << (int) S_MODE;
4034 else if (GET_MODE_SIZE (i) == 8)
4035 sparc_mode_class[i] = 1 << (int) D_MODE;
4036 else if (GET_MODE_SIZE (i) == 16)
4037 sparc_mode_class[i] = 1 << (int) T_MODE;
4038 else if (GET_MODE_SIZE (i) == 32)
4039 sparc_mode_class[i] = 1 << (int) O_MODE;
4040 else
4041 sparc_mode_class[i] = 0;
4042 break;
4043 case MODE_VECTOR_INT:
4044 if (GET_MODE_SIZE (i) <= 4)
4045 sparc_mode_class[i] = 1 << (int)SF_MODE;
4046 else if (GET_MODE_SIZE (i) == 8)
4047 sparc_mode_class[i] = 1 << (int)DF_MODE;
4048 break;
4049 case MODE_FLOAT:
4050 case MODE_COMPLEX_FLOAT:
4051 if (GET_MODE_SIZE (i) <= 4)
4052 sparc_mode_class[i] = 1 << (int) SF_MODE;
4053 else if (GET_MODE_SIZE (i) == 8)
4054 sparc_mode_class[i] = 1 << (int) DF_MODE;
4055 else if (GET_MODE_SIZE (i) == 16)
4056 sparc_mode_class[i] = 1 << (int) TF_MODE;
4057 else if (GET_MODE_SIZE (i) == 32)
4058 sparc_mode_class[i] = 1 << (int) OF_MODE;
4059 else
4060 sparc_mode_class[i] = 0;
4061 break;
4062 case MODE_CC:
4063 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4064 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4065 else
4066 sparc_mode_class[i] = 1 << (int) CC_MODE;
4067 break;
4068 default:
4069 sparc_mode_class[i] = 0;
4070 break;
4074 if (TARGET_ARCH64)
4075 hard_regno_mode_classes = hard_64bit_mode_classes;
4076 else
4077 hard_regno_mode_classes = hard_32bit_mode_classes;
4079 /* Initialize the array used by REGNO_REG_CLASS. */
4080 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4082 if (i < 16 && TARGET_V8PLUS)
4083 sparc_regno_reg_class[i] = I64_REGS;
4084 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4085 sparc_regno_reg_class[i] = GENERAL_REGS;
4086 else if (i < 64)
4087 sparc_regno_reg_class[i] = FP_REGS;
4088 else if (i < 96)
4089 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4090 else if (i < 100)
4091 sparc_regno_reg_class[i] = FPCC_REGS;
4092 else
4093 sparc_regno_reg_class[i] = NO_REGS;
4097 /* Compute the frame size required by the function. This function is called
4098 during the reload pass and also by sparc_expand_prologue. */
4100 HOST_WIDE_INT
4101 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
4103 int outgoing_args_size = (crtl->outgoing_args_size
4104 + REG_PARM_STACK_SPACE (current_function_decl));
4105 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
4106 int i;
4108 if (TARGET_ARCH64)
4110 for (i = 0; i < 8; i++)
4111 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4112 n_regs += 2;
4114 else
4116 for (i = 0; i < 8; i += 2)
4117 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
4118 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
4119 n_regs += 2;
4122 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4123 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
4124 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
4125 n_regs += 2;
4127 /* Set up values for use in prologue and epilogue. */
4128 num_gfregs = n_regs;
4130 if (leaf_function_p
4131 && n_regs == 0
4132 && size == 0
4133 && crtl->outgoing_args_size == 0)
4134 actual_fsize = apparent_fsize = 0;
4135 else
4137 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4138 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
4139 apparent_fsize += n_regs * 4;
4140 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
4143 /* Make sure nothing can clobber our register windows.
4144 If a SAVE must be done, or there is a stack-local variable,
4145 the register window area must be allocated. */
4146 if (! leaf_function_p || size > 0)
4147 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
4149 return SPARC_STACK_ALIGN (actual_fsize);
4152 /* Output any necessary .register pseudo-ops. */
4154 void
4155 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4157 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4158 int i;
4160 if (TARGET_ARCH32)
4161 return;
4163 /* Check if %g[2367] were used without
4164 .register being printed for them already. */
4165 for (i = 2; i < 8; i++)
4167 if (df_regs_ever_live_p (i)
4168 && ! sparc_hard_reg_printed [i])
4170 sparc_hard_reg_printed [i] = 1;
4171 /* %g7 is used as TLS base register, use #ignore
4172 for it instead of #scratch. */
4173 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4174 i == 7 ? "ignore" : "scratch");
4176 if (i == 3) i = 5;
4178 #endif
4181 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4183 #if PROBE_INTERVAL > 4096
4184 #error Cannot use indexed addressing mode for stack probing
4185 #endif
4187 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4188 inclusive. These are offsets from the current stack pointer.
4190 Note that we don't use the REG+REG addressing mode for the probes because
4191 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4192 so the advantages of having a single code win here. */
4194 static void
4195 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4197 rtx g1 = gen_rtx_REG (Pmode, 1);
4199 /* See if we have a constant small number of probes to generate. If so,
4200 that's the easy case. */
4201 if (size <= PROBE_INTERVAL)
4203 emit_move_insn (g1, GEN_INT (first));
4204 emit_insn (gen_rtx_SET (VOIDmode, g1,
4205 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4206 emit_stack_probe (plus_constant (g1, -size));
4209 /* The run-time loop is made up of 10 insns in the generic case while the
4210 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4211 else if (size <= 5 * PROBE_INTERVAL)
4213 HOST_WIDE_INT i;
4215 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4216 emit_insn (gen_rtx_SET (VOIDmode, g1,
4217 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4218 emit_stack_probe (g1);
4220 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4221 it exceeds SIZE. If only two probes are needed, this will not
4222 generate any code. Then probe at FIRST + SIZE. */
4223 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4225 emit_insn (gen_rtx_SET (VOIDmode, g1,
4226 plus_constant (g1, -PROBE_INTERVAL)));
4227 emit_stack_probe (g1);
4230 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4233 /* Otherwise, do the same as above, but in a loop. Note that we must be
4234 extra careful with variables wrapping around because we might be at
4235 the very top (or the very bottom) of the address space and we have
4236 to be able to handle this case properly; in particular, we use an
4237 equality test for the loop condition. */
4238 else
4240 HOST_WIDE_INT rounded_size;
4241 rtx g4 = gen_rtx_REG (Pmode, 4);
4243 emit_move_insn (g1, GEN_INT (first));
4246 /* Step 1: round SIZE to the previous multiple of the interval. */
4248 rounded_size = size & -PROBE_INTERVAL;
4249 emit_move_insn (g4, GEN_INT (rounded_size));
4252 /* Step 2: compute initial and final value of the loop counter. */
4254 /* TEST_ADDR = SP + FIRST. */
4255 emit_insn (gen_rtx_SET (VOIDmode, g1,
4256 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4258 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4259 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4262 /* Step 3: the loop
4264 while (TEST_ADDR != LAST_ADDR)
4266 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4267 probe at TEST_ADDR
4270 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4271 until it is equal to ROUNDED_SIZE. */
4273 if (TARGET_64BIT)
4274 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4275 else
4276 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4279 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4280 that SIZE is equal to ROUNDED_SIZE. */
4282 if (size != rounded_size)
4283 emit_stack_probe (plus_constant (g4, rounded_size - size));
4286 /* Make sure nothing is scheduled before we are done. */
4287 emit_insn (gen_blockage ());
4290 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4291 absolute addresses. */
4293 const char *
4294 output_probe_stack_range (rtx reg1, rtx reg2)
4296 static int labelno = 0;
4297 char loop_lab[32], end_lab[32];
4298 rtx xops[2];
4300 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4301 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4303 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4305 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4306 xops[0] = reg1;
4307 xops[1] = reg2;
4308 output_asm_insn ("cmp\t%0, %1", xops);
4309 if (TARGET_ARCH64)
4310 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4311 else
4312 fputs ("\tbe\t", asm_out_file);
4313 assemble_name_raw (asm_out_file, end_lab);
4314 fputc ('\n', asm_out_file);
4316 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4317 xops[1] = GEN_INT (-PROBE_INTERVAL);
4318 output_asm_insn (" add\t%0, %1, %0", xops);
4320 /* Probe at TEST_ADDR and branch. */
4321 if (TARGET_ARCH64)
4322 fputs ("\tba,pt\t%xcc,", asm_out_file);
4323 else
4324 fputs ("\tba\t", asm_out_file);
4325 assemble_name_raw (asm_out_file, loop_lab);
4326 fputc ('\n', asm_out_file);
4327 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4328 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4330 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4332 return "";
4335 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
4336 as needed. LOW should be double-word aligned for 32-bit registers.
4337 Return the new OFFSET. */
4339 #define SORR_SAVE 0
4340 #define SORR_RESTORE 1
4342 static int
4343 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
4345 rtx mem, insn;
4346 int i;
4348 if (TARGET_ARCH64 && high <= 32)
4350 for (i = low; i < high; i++)
4352 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4354 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4355 set_mem_alias_set (mem, sparc_sr_alias_set);
4356 if (action == SORR_SAVE)
4358 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4359 RTX_FRAME_RELATED_P (insn) = 1;
4361 else /* action == SORR_RESTORE */
4362 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4363 offset += 8;
4367 else
4369 for (i = low; i < high; i += 2)
4371 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4372 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4373 enum machine_mode mode;
4374 int regno;
4376 if (reg0 && reg1)
4378 mode = i < 32 ? DImode : DFmode;
4379 regno = i;
4381 else if (reg0)
4383 mode = i < 32 ? SImode : SFmode;
4384 regno = i;
4386 else if (reg1)
4388 mode = i < 32 ? SImode : SFmode;
4389 regno = i + 1;
4390 offset += 4;
4392 else
4393 continue;
4395 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4396 set_mem_alias_set (mem, sparc_sr_alias_set);
4397 if (action == SORR_SAVE)
4399 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4400 RTX_FRAME_RELATED_P (insn) = 1;
4402 else /* action == SORR_RESTORE */
4403 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4405 /* Always preserve double-word alignment. */
4406 offset = (offset + 7) & -8;
4410 return offset;
4413 /* Emit code to save call-saved registers. */
4415 static void
4416 emit_save_or_restore_regs (int action)
4418 HOST_WIDE_INT offset;
4419 rtx base;
4421 offset = frame_base_offset - apparent_fsize;
4423 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4425 /* ??? This might be optimized a little as %g1 might already have a
4426 value close enough that a single add insn will do. */
4427 /* ??? Although, all of this is probably only a temporary fix
4428 because if %g1 can hold a function result, then
4429 sparc_expand_epilogue will lose (the result will be
4430 clobbered). */
4431 base = gen_rtx_REG (Pmode, 1);
4432 emit_move_insn (base, GEN_INT (offset));
4433 emit_insn (gen_rtx_SET (VOIDmode,
4434 base,
4435 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4436 offset = 0;
4438 else
4439 base = frame_base_reg;
4441 offset = save_or_restore_regs (0, 8, base, offset, action);
4442 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4445 /* Generate a save_register_window insn. */
4447 static rtx
4448 gen_save_register_window (rtx increment)
4450 if (TARGET_ARCH64)
4451 return gen_save_register_windowdi (increment);
4452 else
4453 return gen_save_register_windowsi (increment);
4456 /* Generate an increment for the stack pointer. */
4458 static rtx
4459 gen_stack_pointer_inc (rtx increment)
4461 return gen_rtx_SET (VOIDmode,
4462 stack_pointer_rtx,
4463 gen_rtx_PLUS (Pmode,
4464 stack_pointer_rtx,
4465 increment));
4468 /* Generate a decrement for the stack pointer. */
4470 static rtx
4471 gen_stack_pointer_dec (rtx decrement)
4473 return gen_rtx_SET (VOIDmode,
4474 stack_pointer_rtx,
4475 gen_rtx_MINUS (Pmode,
4476 stack_pointer_rtx,
4477 decrement));
4480 /* Expand the function prologue. The prologue is responsible for reserving
4481 storage for the frame, saving the call-saved registers and loading the
4482 PIC register if needed. */
4484 void
4485 sparc_expand_prologue (void)
4487 rtx insn;
4488 int i;
4490 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4491 on the final value of the flag means deferring the prologue/epilogue
4492 expansion until just before the second scheduling pass, which is too
4493 late to emit multiple epilogues or return insns.
4495 Of course we are making the assumption that the value of the flag
4496 will not change between now and its final value. Of the three parts
4497 of the formula, only the last one can reasonably vary. Let's take a
4498 closer look, after assuming that the first two ones are set to true
4499 (otherwise the last value is effectively silenced).
4501 If only_leaf_regs_used returns false, the global predicate will also
4502 be false so the actual frame size calculated below will be positive.
4503 As a consequence, the save_register_window insn will be emitted in
4504 the instruction stream; now this insn explicitly references %fp
4505 which is not a leaf register so only_leaf_regs_used will always
4506 return false subsequently.
4508 If only_leaf_regs_used returns true, we hope that the subsequent
4509 optimization passes won't cause non-leaf registers to pop up. For
4510 example, the regrename pass has special provisions to not rename to
4511 non-leaf registers in a leaf function. */
4512 sparc_leaf_function_p
4513 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4515 /* Need to use actual_fsize, since we are also allocating
4516 space for our callee (and our own register save area). */
4517 actual_fsize
4518 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4520 /* Advertise that the data calculated just above are now valid. */
4521 sparc_prologue_data_valid_p = true;
4523 if (flag_stack_usage)
4524 current_function_static_stack_size = actual_fsize;
4526 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && actual_fsize)
4527 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, actual_fsize);
4529 if (sparc_leaf_function_p)
4531 frame_base_reg = stack_pointer_rtx;
4532 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4534 else
4536 frame_base_reg = hard_frame_pointer_rtx;
4537 frame_base_offset = SPARC_STACK_BIAS;
4540 if (actual_fsize == 0)
4541 /* do nothing. */ ;
4542 else if (sparc_leaf_function_p)
4544 if (actual_fsize <= 4096)
4545 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4546 else if (actual_fsize <= 8192)
4548 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4549 /* %sp is still the CFA register. */
4550 RTX_FRAME_RELATED_P (insn) = 1;
4551 insn
4552 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4554 else
4556 rtx reg = gen_rtx_REG (Pmode, 1);
4557 emit_move_insn (reg, GEN_INT (-actual_fsize));
4558 insn = emit_insn (gen_stack_pointer_inc (reg));
4559 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4560 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4563 RTX_FRAME_RELATED_P (insn) = 1;
4565 else
4567 if (actual_fsize <= 4096)
4568 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4569 else if (actual_fsize <= 8192)
4571 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4572 /* %sp is not the CFA register anymore. */
4573 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4575 else
4577 rtx reg = gen_rtx_REG (Pmode, 1);
4578 emit_move_insn (reg, GEN_INT (-actual_fsize));
4579 insn = emit_insn (gen_save_register_window (reg));
4582 RTX_FRAME_RELATED_P (insn) = 1;
4583 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4584 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4587 if (num_gfregs)
4588 emit_save_or_restore_regs (SORR_SAVE);
4590 /* Load the PIC register if needed. */
4591 if (flag_pic && crtl->uses_pic_offset_table)
4592 load_pic_register ();
4595 /* This function generates the assembly code for function entry, which boils
4596 down to emitting the necessary .register directives. */
4598 static void
4599 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4601 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4602 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4604 sparc_output_scratch_registers (file);
4607 /* Expand the function epilogue, either normal or part of a sibcall.
4608 We emit all the instructions except the return or the call. */
4610 void
4611 sparc_expand_epilogue (void)
4613 if (num_gfregs)
4614 emit_save_or_restore_regs (SORR_RESTORE);
4616 if (actual_fsize == 0)
4617 /* do nothing. */ ;
4618 else if (sparc_leaf_function_p)
4620 if (actual_fsize <= 4096)
4621 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4622 else if (actual_fsize <= 8192)
4624 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4625 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4627 else
4629 rtx reg = gen_rtx_REG (Pmode, 1);
4630 emit_move_insn (reg, GEN_INT (-actual_fsize));
4631 emit_insn (gen_stack_pointer_dec (reg));
4636 /* Return true if it is appropriate to emit `return' instructions in the
4637 body of a function. */
4639 bool
4640 sparc_can_use_return_insn_p (void)
4642 return sparc_prologue_data_valid_p
4643 && (actual_fsize == 0 || !sparc_leaf_function_p);
4646 /* This function generates the assembly code for function exit. */
4648 static void
4649 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4651 /* If the last two instructions of a function are "call foo; dslot;"
4652 the return address might point to the first instruction in the next
4653 function and we have to output a dummy nop for the sake of sane
4654 backtraces in such cases. This is pointless for sibling calls since
4655 the return address is explicitly adjusted. */
4657 rtx insn, last_real_insn;
4659 insn = get_last_insn ();
4661 last_real_insn = prev_real_insn (insn);
4662 if (last_real_insn
4663 && GET_CODE (last_real_insn) == INSN
4664 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4665 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4667 if (last_real_insn
4668 && CALL_P (last_real_insn)
4669 && !SIBLING_CALL_P (last_real_insn))
4670 fputs("\tnop\n", file);
4672 sparc_output_deferred_case_vectors ();
4675 /* Output a 'restore' instruction. */
4677 static void
4678 output_restore (rtx pat)
4680 rtx operands[3];
4682 if (! pat)
4684 fputs ("\t restore\n", asm_out_file);
4685 return;
4688 gcc_assert (GET_CODE (pat) == SET);
4690 operands[0] = SET_DEST (pat);
4691 pat = SET_SRC (pat);
4693 switch (GET_CODE (pat))
4695 case PLUS:
4696 operands[1] = XEXP (pat, 0);
4697 operands[2] = XEXP (pat, 1);
4698 output_asm_insn (" restore %r1, %2, %Y0", operands);
4699 break;
4700 case LO_SUM:
4701 operands[1] = XEXP (pat, 0);
4702 operands[2] = XEXP (pat, 1);
4703 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4704 break;
4705 case ASHIFT:
4706 operands[1] = XEXP (pat, 0);
4707 gcc_assert (XEXP (pat, 1) == const1_rtx);
4708 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4709 break;
4710 default:
4711 operands[1] = pat;
4712 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4713 break;
4717 /* Output a return. */
4719 const char *
4720 output_return (rtx insn)
4722 if (sparc_leaf_function_p)
4724 /* This is a leaf function so we don't have to bother restoring the
4725 register window, which frees us from dealing with the convoluted
4726 semantics of restore/return. We simply output the jump to the
4727 return address and the insn in the delay slot (if any). */
4729 gcc_assert (! crtl->calls_eh_return);
4731 return "jmp\t%%o7+%)%#";
4733 else
4735 /* This is a regular function so we have to restore the register window.
4736 We may have a pending insn for the delay slot, which will be either
4737 combined with the 'restore' instruction or put in the delay slot of
4738 the 'return' instruction. */
4740 if (crtl->calls_eh_return)
4742 /* If the function uses __builtin_eh_return, the eh_return
4743 machinery occupies the delay slot. */
4744 gcc_assert (! final_sequence);
4746 if (! flag_delayed_branch)
4747 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4749 if (TARGET_V9)
4750 fputs ("\treturn\t%i7+8\n", asm_out_file);
4751 else
4752 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4754 if (flag_delayed_branch)
4755 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4756 else
4757 fputs ("\t nop\n", asm_out_file);
4759 else if (final_sequence)
4761 rtx delay, pat;
4763 delay = NEXT_INSN (insn);
4764 gcc_assert (delay);
4766 pat = PATTERN (delay);
4768 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4770 epilogue_renumber (&pat, 0);
4771 return "return\t%%i7+%)%#";
4773 else
4775 output_asm_insn ("jmp\t%%i7+%)", NULL);
4776 output_restore (pat);
4777 PATTERN (delay) = gen_blockage ();
4778 INSN_CODE (delay) = -1;
4781 else
4783 /* The delay slot is empty. */
4784 if (TARGET_V9)
4785 return "return\t%%i7+%)\n\t nop";
4786 else if (flag_delayed_branch)
4787 return "jmp\t%%i7+%)\n\t restore";
4788 else
4789 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4793 return "";
4796 /* Output a sibling call. */
4798 const char *
4799 output_sibcall (rtx insn, rtx call_operand)
4801 rtx operands[1];
4803 gcc_assert (flag_delayed_branch);
4805 operands[0] = call_operand;
4807 if (sparc_leaf_function_p)
4809 /* This is a leaf function so we don't have to bother restoring the
4810 register window. We simply output the jump to the function and
4811 the insn in the delay slot (if any). */
4813 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4815 if (final_sequence)
4816 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4817 operands);
4818 else
4819 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4820 it into branch if possible. */
4821 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4822 operands);
4824 else
4826 /* This is a regular function so we have to restore the register window.
4827 We may have a pending insn for the delay slot, which will be combined
4828 with the 'restore' instruction. */
4830 output_asm_insn ("call\t%a0, 0", operands);
4832 if (final_sequence)
4834 rtx delay = NEXT_INSN (insn);
4835 gcc_assert (delay);
4837 output_restore (PATTERN (delay));
4839 PATTERN (delay) = gen_blockage ();
4840 INSN_CODE (delay) = -1;
4842 else
4843 output_restore (NULL_RTX);
4846 return "";
4849 /* Functions for handling argument passing.
4851 For 32-bit, the first 6 args are normally in registers and the rest are
4852 pushed. Any arg that starts within the first 6 words is at least
4853 partially passed in a register unless its data type forbids.
4855 For 64-bit, the argument registers are laid out as an array of 16 elements
4856 and arguments are added sequentially. The first 6 int args and up to the
4857 first 16 fp args (depending on size) are passed in regs.
4859 Slot Stack Integral Float Float in structure Double Long Double
4860 ---- ----- -------- ----- ------------------ ------ -----------
4861 15 [SP+248] %f31 %f30,%f31 %d30
4862 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4863 13 [SP+232] %f27 %f26,%f27 %d26
4864 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4865 11 [SP+216] %f23 %f22,%f23 %d22
4866 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4867 9 [SP+200] %f19 %f18,%f19 %d18
4868 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4869 7 [SP+184] %f15 %f14,%f15 %d14
4870 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4871 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4872 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4873 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4874 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4875 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4876 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4878 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4880 Integral arguments are always passed as 64-bit quantities appropriately
4881 extended.
4883 Passing of floating point values is handled as follows.
4884 If a prototype is in scope:
4885 If the value is in a named argument (i.e. not a stdarg function or a
4886 value not part of the `...') then the value is passed in the appropriate
4887 fp reg.
4888 If the value is part of the `...' and is passed in one of the first 6
4889 slots then the value is passed in the appropriate int reg.
4890 If the value is part of the `...' and is not passed in one of the first 6
4891 slots then the value is passed in memory.
4892 If a prototype is not in scope:
4893 If the value is one of the first 6 arguments the value is passed in the
4894 appropriate integer reg and the appropriate fp reg.
4895 If the value is not one of the first 6 arguments the value is passed in
4896 the appropriate fp reg and in memory.
4899 Summary of the calling conventions implemented by GCC on the SPARC:
4901 32-bit ABI:
4902 size argument return value
4904 small integer <4 int. reg. int. reg.
4905 word 4 int. reg. int. reg.
4906 double word 8 int. reg. int. reg.
4908 _Complex small integer <8 int. reg. int. reg.
4909 _Complex word 8 int. reg. int. reg.
4910 _Complex double word 16 memory int. reg.
4912 vector integer <=8 int. reg. FP reg.
4913 vector integer >8 memory memory
4915 float 4 int. reg. FP reg.
4916 double 8 int. reg. FP reg.
4917 long double 16 memory memory
4919 _Complex float 8 memory FP reg.
4920 _Complex double 16 memory FP reg.
4921 _Complex long double 32 memory FP reg.
4923 vector float any memory memory
4925 aggregate any memory memory
4929 64-bit ABI:
4930 size argument return value
4932 small integer <8 int. reg. int. reg.
4933 word 8 int. reg. int. reg.
4934 double word 16 int. reg. int. reg.
4936 _Complex small integer <16 int. reg. int. reg.
4937 _Complex word 16 int. reg. int. reg.
4938 _Complex double word 32 memory int. reg.
4940 vector integer <=16 FP reg. FP reg.
4941 vector integer 16<s<=32 memory FP reg.
4942 vector integer >32 memory memory
4944 float 4 FP reg. FP reg.
4945 double 8 FP reg. FP reg.
4946 long double 16 FP reg. FP reg.
4948 _Complex float 8 FP reg. FP reg.
4949 _Complex double 16 FP reg. FP reg.
4950 _Complex long double 32 memory FP reg.
4952 vector float <=16 FP reg. FP reg.
4953 vector float 16<s<=32 memory FP reg.
4954 vector float >32 memory memory
4956 aggregate <=16 reg. reg.
4957 aggregate 16<s<=32 memory reg.
4958 aggregate >32 memory memory
4962 Note #1: complex floating-point types follow the extended SPARC ABIs as
4963 implemented by the Sun compiler.
4965 Note #2: integral vector types follow the scalar floating-point types
4966 conventions to match what is implemented by the Sun VIS SDK.
4968 Note #3: floating-point vector types follow the aggregate types
4969 conventions. */
4972 /* Maximum number of int regs for args. */
4973 #define SPARC_INT_ARG_MAX 6
4974 /* Maximum number of fp regs for args. */
4975 #define SPARC_FP_ARG_MAX 16
4977 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4979 /* Handle the INIT_CUMULATIVE_ARGS macro.
4980 Initialize a variable CUM of type CUMULATIVE_ARGS
4981 for a call to a function whose data type is FNTYPE.
4982 For a library call, FNTYPE is 0. */
4984 void
4985 init_cumulative_args (struct sparc_args *cum, tree fntype,
4986 rtx libname ATTRIBUTE_UNUSED,
4987 tree fndecl ATTRIBUTE_UNUSED)
4989 cum->words = 0;
4990 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4991 cum->libcall_p = fntype == 0;
4994 /* Handle promotion of pointer and integer arguments. */
4996 static enum machine_mode
4997 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4998 enum machine_mode mode,
4999 int *punsignedp ATTRIBUTE_UNUSED,
5000 const_tree fntype ATTRIBUTE_UNUSED,
5001 int for_return ATTRIBUTE_UNUSED)
5003 if (POINTER_TYPE_P (type))
5005 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5006 return Pmode;
5009 /* Integral arguments are passed as full words, as per the ABI. */
5010 if (GET_MODE_CLASS (mode) == MODE_INT
5011 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5012 return word_mode;
5014 return mode;
5017 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5019 static bool
5020 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
5022 return TARGET_ARCH64 ? true : false;
5025 /* Scan the record type TYPE and return the following predicates:
5026 - INTREGS_P: the record contains at least one field or sub-field
5027 that is eligible for promotion in integer registers.
5028 - FP_REGS_P: the record contains at least one field or sub-field
5029 that is eligible for promotion in floating-point registers.
5030 - PACKED_P: the record contains at least one field that is packed.
5032 Sub-fields are not taken into account for the PACKED_P predicate. */
5034 static void
5035 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5036 int *packed_p)
5038 tree field;
5040 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5042 if (TREE_CODE (field) == FIELD_DECL)
5044 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5045 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5046 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5047 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5048 && TARGET_FPU)
5049 *fpregs_p = 1;
5050 else
5051 *intregs_p = 1;
5053 if (packed_p && DECL_PACKED (field))
5054 *packed_p = 1;
5059 /* Compute the slot number to pass an argument in.
5060 Return the slot number or -1 if passing on the stack.
5062 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5063 the preceding args and about the function being called.
5064 MODE is the argument's machine mode.
5065 TYPE is the data type of the argument (as a tree).
5066 This is null for libcalls where that information may
5067 not be available.
5068 NAMED is nonzero if this argument is a named parameter
5069 (otherwise it is an extra parameter matching an ellipsis).
5070 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5071 *PREGNO records the register number to use if scalar type.
5072 *PPADDING records the amount of padding needed in words. */
5074 static int
5075 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5076 const_tree type, bool named, bool incoming_p,
5077 int *pregno, int *ppadding)
5079 int regbase = (incoming_p
5080 ? SPARC_INCOMING_INT_ARG_FIRST
5081 : SPARC_OUTGOING_INT_ARG_FIRST);
5082 int slotno = cum->words;
5083 enum mode_class mclass;
5084 int regno;
5086 *ppadding = 0;
5088 if (type && TREE_ADDRESSABLE (type))
5089 return -1;
5091 if (TARGET_ARCH32
5092 && mode == BLKmode
5093 && type
5094 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5095 return -1;
5097 /* For SPARC64, objects requiring 16-byte alignment get it. */
5098 if (TARGET_ARCH64
5099 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5100 && (slotno & 1) != 0)
5101 slotno++, *ppadding = 1;
5103 mclass = GET_MODE_CLASS (mode);
5104 if (type && TREE_CODE (type) == VECTOR_TYPE)
5106 /* Vector types deserve special treatment because they are
5107 polymorphic wrt their mode, depending upon whether VIS
5108 instructions are enabled. */
5109 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5111 /* The SPARC port defines no floating-point vector modes. */
5112 gcc_assert (mode == BLKmode);
5114 else
5116 /* Integral vector types should either have a vector
5117 mode or an integral mode, because we are guaranteed
5118 by pass_by_reference that their size is not greater
5119 than 16 bytes and TImode is 16-byte wide. */
5120 gcc_assert (mode != BLKmode);
5122 /* Vector integers are handled like floats according to
5123 the Sun VIS SDK. */
5124 mclass = MODE_FLOAT;
5128 switch (mclass)
5130 case MODE_FLOAT:
5131 case MODE_COMPLEX_FLOAT:
5132 case MODE_VECTOR_INT:
5133 if (TARGET_ARCH64 && TARGET_FPU && named)
5135 if (slotno >= SPARC_FP_ARG_MAX)
5136 return -1;
5137 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5138 /* Arguments filling only one single FP register are
5139 right-justified in the outer double FP register. */
5140 if (GET_MODE_SIZE (mode) <= 4)
5141 regno++;
5142 break;
5144 /* fallthrough */
5146 case MODE_INT:
5147 case MODE_COMPLEX_INT:
5148 if (slotno >= SPARC_INT_ARG_MAX)
5149 return -1;
5150 regno = regbase + slotno;
5151 break;
5153 case MODE_RANDOM:
5154 if (mode == VOIDmode)
5155 /* MODE is VOIDmode when generating the actual call. */
5156 return -1;
5158 gcc_assert (mode == BLKmode);
5160 if (TARGET_ARCH32
5161 || !type
5162 || (TREE_CODE (type) != VECTOR_TYPE
5163 && TREE_CODE (type) != RECORD_TYPE))
5165 if (slotno >= SPARC_INT_ARG_MAX)
5166 return -1;
5167 regno = regbase + slotno;
5169 else /* TARGET_ARCH64 && type */
5171 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5173 /* First see what kinds of registers we would need. */
5174 if (TREE_CODE (type) == VECTOR_TYPE)
5175 fpregs_p = 1;
5176 else
5177 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5179 /* The ABI obviously doesn't specify how packed structures
5180 are passed. These are defined to be passed in int regs
5181 if possible, otherwise memory. */
5182 if (packed_p || !named)
5183 fpregs_p = 0, intregs_p = 1;
5185 /* If all arg slots are filled, then must pass on stack. */
5186 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5187 return -1;
5189 /* If there are only int args and all int arg slots are filled,
5190 then must pass on stack. */
5191 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5192 return -1;
5194 /* Note that even if all int arg slots are filled, fp members may
5195 still be passed in regs if such regs are available.
5196 *PREGNO isn't set because there may be more than one, it's up
5197 to the caller to compute them. */
5198 return slotno;
5200 break;
5202 default :
5203 gcc_unreachable ();
5206 *pregno = regno;
5207 return slotno;
5210 /* Handle recursive register counting for structure field layout. */
5212 struct function_arg_record_value_parms
5214 rtx ret; /* return expression being built. */
5215 int slotno; /* slot number of the argument. */
5216 int named; /* whether the argument is named. */
5217 int regbase; /* regno of the base register. */
5218 int stack; /* 1 if part of the argument is on the stack. */
5219 int intoffset; /* offset of the first pending integer field. */
5220 unsigned int nregs; /* number of words passed in registers. */
5223 static void function_arg_record_value_3
5224 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5225 static void function_arg_record_value_2
5226 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5227 static void function_arg_record_value_1
5228 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5229 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5230 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5232 /* A subroutine of function_arg_record_value. Traverse the structure
5233 recursively and determine how many registers will be required. */
5235 static void
5236 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5237 struct function_arg_record_value_parms *parms,
5238 bool packed_p)
5240 tree field;
5242 /* We need to compute how many registers are needed so we can
5243 allocate the PARALLEL but before we can do that we need to know
5244 whether there are any packed fields. The ABI obviously doesn't
5245 specify how structures are passed in this case, so they are
5246 defined to be passed in int regs if possible, otherwise memory,
5247 regardless of whether there are fp values present. */
5249 if (! packed_p)
5250 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5252 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5254 packed_p = true;
5255 break;
5259 /* Compute how many registers we need. */
5260 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5262 if (TREE_CODE (field) == FIELD_DECL)
5264 HOST_WIDE_INT bitpos = startbitpos;
5266 if (DECL_SIZE (field) != 0)
5268 if (integer_zerop (DECL_SIZE (field)))
5269 continue;
5271 if (host_integerp (bit_position (field), 1))
5272 bitpos += int_bit_position (field);
5275 /* ??? FIXME: else assume zero offset. */
5277 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5278 function_arg_record_value_1 (TREE_TYPE (field),
5279 bitpos,
5280 parms,
5281 packed_p);
5282 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5283 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5284 && TARGET_FPU
5285 && parms->named
5286 && ! packed_p)
5288 if (parms->intoffset != -1)
5290 unsigned int startbit, endbit;
5291 int intslots, this_slotno;
5293 startbit = parms->intoffset & -BITS_PER_WORD;
5294 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5296 intslots = (endbit - startbit) / BITS_PER_WORD;
5297 this_slotno = parms->slotno + parms->intoffset
5298 / BITS_PER_WORD;
5300 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5302 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5303 /* We need to pass this field on the stack. */
5304 parms->stack = 1;
5307 parms->nregs += intslots;
5308 parms->intoffset = -1;
5311 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5312 If it wasn't true we wouldn't be here. */
5313 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5314 && DECL_MODE (field) == BLKmode)
5315 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5316 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5317 parms->nregs += 2;
5318 else
5319 parms->nregs += 1;
5321 else
5323 if (parms->intoffset == -1)
5324 parms->intoffset = bitpos;
5330 /* A subroutine of function_arg_record_value. Assign the bits of the
5331 structure between parms->intoffset and bitpos to integer registers. */
5333 static void
5334 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5335 struct function_arg_record_value_parms *parms)
5337 enum machine_mode mode;
5338 unsigned int regno;
5339 unsigned int startbit, endbit;
5340 int this_slotno, intslots, intoffset;
5341 rtx reg;
5343 if (parms->intoffset == -1)
5344 return;
5346 intoffset = parms->intoffset;
5347 parms->intoffset = -1;
5349 startbit = intoffset & -BITS_PER_WORD;
5350 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5351 intslots = (endbit - startbit) / BITS_PER_WORD;
5352 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5354 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5355 if (intslots <= 0)
5356 return;
5358 /* If this is the trailing part of a word, only load that much into
5359 the register. Otherwise load the whole register. Note that in
5360 the latter case we may pick up unwanted bits. It's not a problem
5361 at the moment but may wish to revisit. */
5363 if (intoffset % BITS_PER_WORD != 0)
5364 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5365 MODE_INT);
5366 else
5367 mode = word_mode;
5369 intoffset /= BITS_PER_UNIT;
5372 regno = parms->regbase + this_slotno;
5373 reg = gen_rtx_REG (mode, regno);
5374 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5375 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5377 this_slotno += 1;
5378 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5379 mode = word_mode;
5380 parms->nregs += 1;
5381 intslots -= 1;
5383 while (intslots > 0);
5386 /* A subroutine of function_arg_record_value. Traverse the structure
5387 recursively and assign bits to floating point registers. Track which
5388 bits in between need integer registers; invoke function_arg_record_value_3
5389 to make that happen. */
5391 static void
5392 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5393 struct function_arg_record_value_parms *parms,
5394 bool packed_p)
5396 tree field;
5398 if (! packed_p)
5399 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5401 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5403 packed_p = true;
5404 break;
5408 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5410 if (TREE_CODE (field) == FIELD_DECL)
5412 HOST_WIDE_INT bitpos = startbitpos;
5414 if (DECL_SIZE (field) != 0)
5416 if (integer_zerop (DECL_SIZE (field)))
5417 continue;
5419 if (host_integerp (bit_position (field), 1))
5420 bitpos += int_bit_position (field);
5423 /* ??? FIXME: else assume zero offset. */
5425 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5426 function_arg_record_value_2 (TREE_TYPE (field),
5427 bitpos,
5428 parms,
5429 packed_p);
5430 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5431 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5432 && TARGET_FPU
5433 && parms->named
5434 && ! packed_p)
5436 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5437 int regno, nregs, pos;
5438 enum machine_mode mode = DECL_MODE (field);
5439 rtx reg;
5441 function_arg_record_value_3 (bitpos, parms);
5443 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5444 && mode == BLKmode)
5446 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5447 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5449 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5451 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5452 nregs = 2;
5454 else
5455 nregs = 1;
5457 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5458 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5459 regno++;
5460 reg = gen_rtx_REG (mode, regno);
5461 pos = bitpos / BITS_PER_UNIT;
5462 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5463 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5464 parms->nregs += 1;
5465 while (--nregs > 0)
5467 regno += GET_MODE_SIZE (mode) / 4;
5468 reg = gen_rtx_REG (mode, regno);
5469 pos += GET_MODE_SIZE (mode);
5470 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5471 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5472 parms->nregs += 1;
5475 else
5477 if (parms->intoffset == -1)
5478 parms->intoffset = bitpos;
5484 /* Used by function_arg and sparc_function_value_1 to implement the complex
5485 conventions of the 64-bit ABI for passing and returning structures.
5486 Return an expression valid as a return value for the FUNCTION_ARG
5487 and TARGET_FUNCTION_VALUE.
5489 TYPE is the data type of the argument (as a tree).
5490 This is null for libcalls where that information may
5491 not be available.
5492 MODE is the argument's machine mode.
5493 SLOTNO is the index number of the argument's slot in the parameter array.
5494 NAMED is nonzero if this argument is a named parameter
5495 (otherwise it is an extra parameter matching an ellipsis).
5496 REGBASE is the regno of the base register for the parameter array. */
5498 static rtx
5499 function_arg_record_value (const_tree type, enum machine_mode mode,
5500 int slotno, int named, int regbase)
5502 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5503 struct function_arg_record_value_parms parms;
5504 unsigned int nregs;
5506 parms.ret = NULL_RTX;
5507 parms.slotno = slotno;
5508 parms.named = named;
5509 parms.regbase = regbase;
5510 parms.stack = 0;
5512 /* Compute how many registers we need. */
5513 parms.nregs = 0;
5514 parms.intoffset = 0;
5515 function_arg_record_value_1 (type, 0, &parms, false);
5517 /* Take into account pending integer fields. */
5518 if (parms.intoffset != -1)
5520 unsigned int startbit, endbit;
5521 int intslots, this_slotno;
5523 startbit = parms.intoffset & -BITS_PER_WORD;
5524 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5525 intslots = (endbit - startbit) / BITS_PER_WORD;
5526 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5528 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5530 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5531 /* We need to pass this field on the stack. */
5532 parms.stack = 1;
5535 parms.nregs += intslots;
5537 nregs = parms.nregs;
5539 /* Allocate the vector and handle some annoying special cases. */
5540 if (nregs == 0)
5542 /* ??? Empty structure has no value? Duh? */
5543 if (typesize <= 0)
5545 /* Though there's nothing really to store, return a word register
5546 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5547 leads to breakage due to the fact that there are zero bytes to
5548 load. */
5549 return gen_rtx_REG (mode, regbase);
5551 else
5553 /* ??? C++ has structures with no fields, and yet a size. Give up
5554 for now and pass everything back in integer registers. */
5555 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5557 if (nregs + slotno > SPARC_INT_ARG_MAX)
5558 nregs = SPARC_INT_ARG_MAX - slotno;
5560 gcc_assert (nregs != 0);
5562 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5564 /* If at least one field must be passed on the stack, generate
5565 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5566 also be passed on the stack. We can't do much better because the
5567 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5568 of structures for which the fields passed exclusively in registers
5569 are not at the beginning of the structure. */
5570 if (parms.stack)
5571 XVECEXP (parms.ret, 0, 0)
5572 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5574 /* Fill in the entries. */
5575 parms.nregs = 0;
5576 parms.intoffset = 0;
5577 function_arg_record_value_2 (type, 0, &parms, false);
5578 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5580 gcc_assert (parms.nregs == nregs);
5582 return parms.ret;
5585 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5586 of the 64-bit ABI for passing and returning unions.
5587 Return an expression valid as a return value for the FUNCTION_ARG
5588 and TARGET_FUNCTION_VALUE.
5590 SIZE is the size in bytes of the union.
5591 MODE is the argument's machine mode.
5592 REGNO is the hard register the union will be passed in. */
5594 static rtx
5595 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5596 int regno)
5598 int nwords = ROUND_ADVANCE (size), i;
5599 rtx regs;
5601 /* See comment in previous function for empty structures. */
5602 if (nwords == 0)
5603 return gen_rtx_REG (mode, regno);
5605 if (slotno == SPARC_INT_ARG_MAX - 1)
5606 nwords = 1;
5608 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5610 for (i = 0; i < nwords; i++)
5612 /* Unions are passed left-justified. */
5613 XVECEXP (regs, 0, i)
5614 = gen_rtx_EXPR_LIST (VOIDmode,
5615 gen_rtx_REG (word_mode, regno),
5616 GEN_INT (UNITS_PER_WORD * i));
5617 regno++;
5620 return regs;
5623 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5624 for passing and returning large (BLKmode) vectors.
5625 Return an expression valid as a return value for the FUNCTION_ARG
5626 and TARGET_FUNCTION_VALUE.
5628 SIZE is the size in bytes of the vector (at least 8 bytes).
5629 REGNO is the FP hard register the vector will be passed in. */
5631 static rtx
5632 function_arg_vector_value (int size, int regno)
5634 int i, nregs = size / 8;
5635 rtx regs;
5637 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5639 for (i = 0; i < nregs; i++)
5641 XVECEXP (regs, 0, i)
5642 = gen_rtx_EXPR_LIST (VOIDmode,
5643 gen_rtx_REG (DImode, regno + 2*i),
5644 GEN_INT (i*8));
5647 return regs;
5650 /* Determine where to put an argument to a function.
5651 Value is zero to push the argument on the stack,
5652 or a hard register in which to store the argument.
5654 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5655 the preceding args and about the function being called.
5656 MODE is the argument's machine mode.
5657 TYPE is the data type of the argument (as a tree).
5658 This is null for libcalls where that information may
5659 not be available.
5660 NAMED is true if this argument is a named parameter
5661 (otherwise it is an extra parameter matching an ellipsis).
5662 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
5663 TARGET_FUNCTION_INCOMING_ARG. */
5665 static rtx
5666 sparc_function_arg_1 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
5667 const_tree type, bool named, bool incoming_p)
5669 int regbase = (incoming_p
5670 ? SPARC_INCOMING_INT_ARG_FIRST
5671 : SPARC_OUTGOING_INT_ARG_FIRST);
5672 int slotno, regno, padding;
5673 enum mode_class mclass = GET_MODE_CLASS (mode);
5675 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5676 &regno, &padding);
5677 if (slotno == -1)
5678 return 0;
5680 /* Vector types deserve special treatment because they are polymorphic wrt
5681 their mode, depending upon whether VIS instructions are enabled. */
5682 if (type && TREE_CODE (type) == VECTOR_TYPE)
5684 HOST_WIDE_INT size = int_size_in_bytes (type);
5685 gcc_assert ((TARGET_ARCH32 && size <= 8)
5686 || (TARGET_ARCH64 && size <= 16));
5688 if (mode == BLKmode)
5689 return function_arg_vector_value (size,
5690 SPARC_FP_ARG_FIRST + 2*slotno);
5691 else
5692 mclass = MODE_FLOAT;
5695 if (TARGET_ARCH32)
5696 return gen_rtx_REG (mode, regno);
5698 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5699 and are promoted to registers if possible. */
5700 if (type && TREE_CODE (type) == RECORD_TYPE)
5702 HOST_WIDE_INT size = int_size_in_bytes (type);
5703 gcc_assert (size <= 16);
5705 return function_arg_record_value (type, mode, slotno, named, regbase);
5708 /* Unions up to 16 bytes in size are passed in integer registers. */
5709 else if (type && TREE_CODE (type) == UNION_TYPE)
5711 HOST_WIDE_INT size = int_size_in_bytes (type);
5712 gcc_assert (size <= 16);
5714 return function_arg_union_value (size, mode, slotno, regno);
5717 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5718 but also have the slot allocated for them.
5719 If no prototype is in scope fp values in register slots get passed
5720 in two places, either fp regs and int regs or fp regs and memory. */
5721 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5722 && SPARC_FP_REG_P (regno))
5724 rtx reg = gen_rtx_REG (mode, regno);
5725 if (cum->prototype_p || cum->libcall_p)
5727 /* "* 2" because fp reg numbers are recorded in 4 byte
5728 quantities. */
5729 #if 0
5730 /* ??? This will cause the value to be passed in the fp reg and
5731 in the stack. When a prototype exists we want to pass the
5732 value in the reg but reserve space on the stack. That's an
5733 optimization, and is deferred [for a bit]. */
5734 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5735 return gen_rtx_PARALLEL (mode,
5736 gen_rtvec (2,
5737 gen_rtx_EXPR_LIST (VOIDmode,
5738 NULL_RTX, const0_rtx),
5739 gen_rtx_EXPR_LIST (VOIDmode,
5740 reg, const0_rtx)));
5741 else
5742 #else
5743 /* ??? It seems that passing back a register even when past
5744 the area declared by REG_PARM_STACK_SPACE will allocate
5745 space appropriately, and will not copy the data onto the
5746 stack, exactly as we desire.
5748 This is due to locate_and_pad_parm being called in
5749 expand_call whenever reg_parm_stack_space > 0, which
5750 while beneficial to our example here, would seem to be
5751 in error from what had been intended. Ho hum... -- r~ */
5752 #endif
5753 return reg;
5755 else
5757 rtx v0, v1;
5759 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5761 int intreg;
5763 /* On incoming, we don't need to know that the value
5764 is passed in %f0 and %i0, and it confuses other parts
5765 causing needless spillage even on the simplest cases. */
5766 if (incoming_p)
5767 return reg;
5769 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5770 + (regno - SPARC_FP_ARG_FIRST) / 2);
5772 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5773 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5774 const0_rtx);
5775 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5777 else
5779 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5780 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5781 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5786 /* All other aggregate types are passed in an integer register in a mode
5787 corresponding to the size of the type. */
5788 else if (type && AGGREGATE_TYPE_P (type))
5790 HOST_WIDE_INT size = int_size_in_bytes (type);
5791 gcc_assert (size <= 16);
5793 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5796 return gen_rtx_REG (mode, regno);
5799 /* Handle the TARGET_FUNCTION_ARG target hook. */
5801 static rtx
5802 sparc_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5803 const_tree type, bool named)
5805 return sparc_function_arg_1 (cum, mode, type, named, false);
5808 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
5810 static rtx
5811 sparc_function_incoming_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5812 const_tree type, bool named)
5814 return sparc_function_arg_1 (cum, mode, type, named, true);
5817 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
5819 static unsigned int
5820 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
5822 return ((TARGET_ARCH64
5823 && (GET_MODE_ALIGNMENT (mode) == 128
5824 || (type && TYPE_ALIGN (type) == 128)))
5825 ? 128
5826 : PARM_BOUNDARY);
5829 /* For an arg passed partly in registers and partly in memory,
5830 this is the number of bytes of registers used.
5831 For args passed entirely in registers or entirely in memory, zero.
5833 Any arg that starts in the first 6 regs but won't entirely fit in them
5834 needs partial registers on v8. On v9, structures with integer
5835 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5836 values that begin in the last fp reg [where "last fp reg" varies with the
5837 mode] will be split between that reg and memory. */
5839 static int
5840 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5841 tree type, bool named)
5843 int slotno, regno, padding;
5845 /* We pass false for incoming_p here, it doesn't matter. */
5846 slotno = function_arg_slotno (cum, mode, type, named, false,
5847 &regno, &padding);
5849 if (slotno == -1)
5850 return 0;
5852 if (TARGET_ARCH32)
5854 if ((slotno + (mode == BLKmode
5855 ? ROUND_ADVANCE (int_size_in_bytes (type))
5856 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5857 > SPARC_INT_ARG_MAX)
5858 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5860 else
5862 /* We are guaranteed by pass_by_reference that the size of the
5863 argument is not greater than 16 bytes, so we only need to return
5864 one word if the argument is partially passed in registers. */
5866 if (type && AGGREGATE_TYPE_P (type))
5868 int size = int_size_in_bytes (type);
5870 if (size > UNITS_PER_WORD
5871 && slotno == SPARC_INT_ARG_MAX - 1)
5872 return UNITS_PER_WORD;
5874 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5875 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5876 && ! (TARGET_FPU && named)))
5878 /* The complex types are passed as packed types. */
5879 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5880 && slotno == SPARC_INT_ARG_MAX - 1)
5881 return UNITS_PER_WORD;
5883 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5885 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5886 > SPARC_FP_ARG_MAX)
5887 return UNITS_PER_WORD;
5891 return 0;
5894 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5895 Specify whether to pass the argument by reference. */
5897 static bool
5898 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5899 enum machine_mode mode, const_tree type,
5900 bool named ATTRIBUTE_UNUSED)
5902 if (TARGET_ARCH32)
5903 /* Original SPARC 32-bit ABI says that structures and unions,
5904 and quad-precision floats are passed by reference. For Pascal,
5905 also pass arrays by reference. All other base types are passed
5906 in registers.
5908 Extended ABI (as implemented by the Sun compiler) says that all
5909 complex floats are passed by reference. Pass complex integers
5910 in registers up to 8 bytes. More generally, enforce the 2-word
5911 cap for passing arguments in registers.
5913 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5914 integers are passed like floats of the same size, that is in
5915 registers up to 8 bytes. Pass all vector floats by reference
5916 like structure and unions. */
5917 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5918 || mode == SCmode
5919 /* Catch CDImode, TFmode, DCmode and TCmode. */
5920 || GET_MODE_SIZE (mode) > 8
5921 || (type
5922 && TREE_CODE (type) == VECTOR_TYPE
5923 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5924 else
5925 /* Original SPARC 64-bit ABI says that structures and unions
5926 smaller than 16 bytes are passed in registers, as well as
5927 all other base types.
5929 Extended ABI (as implemented by the Sun compiler) says that
5930 complex floats are passed in registers up to 16 bytes. Pass
5931 all complex integers in registers up to 16 bytes. More generally,
5932 enforce the 2-word cap for passing arguments in registers.
5934 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5935 integers are passed like floats of the same size, that is in
5936 registers (up to 16 bytes). Pass all vector floats like structure
5937 and unions. */
5938 return ((type
5939 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5940 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5941 /* Catch CTImode and TCmode. */
5942 || GET_MODE_SIZE (mode) > 16);
5945 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
5946 Update the data in CUM to advance over an argument
5947 of mode MODE and data type TYPE.
5948 TYPE is null for libcalls where that information may not be available. */
5950 static void
5951 sparc_function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5952 const_tree type, bool named)
5954 int regno, padding;
5956 /* We pass false for incoming_p here, it doesn't matter. */
5957 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
5959 /* If argument requires leading padding, add it. */
5960 cum->words += padding;
5962 if (TARGET_ARCH32)
5964 cum->words += (mode != BLKmode
5965 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5966 : ROUND_ADVANCE (int_size_in_bytes (type)));
5968 else
5970 if (type && AGGREGATE_TYPE_P (type))
5972 int size = int_size_in_bytes (type);
5974 if (size <= 8)
5975 ++cum->words;
5976 else if (size <= 16)
5977 cum->words += 2;
5978 else /* passed by reference */
5979 ++cum->words;
5981 else
5983 cum->words += (mode != BLKmode
5984 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5985 : ROUND_ADVANCE (int_size_in_bytes (type)));
5990 /* Handle the FUNCTION_ARG_PADDING macro.
5991 For the 64 bit ABI structs are always stored left shifted in their
5992 argument slot. */
5994 enum direction
5995 function_arg_padding (enum machine_mode mode, const_tree type)
5997 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5998 return upward;
6000 /* Fall back to the default. */
6001 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6004 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6005 Specify whether to return the return value in memory. */
6007 static bool
6008 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6010 if (TARGET_ARCH32)
6011 /* Original SPARC 32-bit ABI says that structures and unions,
6012 and quad-precision floats are returned in memory. All other
6013 base types are returned in registers.
6015 Extended ABI (as implemented by the Sun compiler) says that
6016 all complex floats are returned in registers (8 FP registers
6017 at most for '_Complex long double'). Return all complex integers
6018 in registers (4 at most for '_Complex long long').
6020 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6021 integers are returned like floats of the same size, that is in
6022 registers up to 8 bytes and in memory otherwise. Return all
6023 vector floats in memory like structure and unions; note that
6024 they always have BLKmode like the latter. */
6025 return (TYPE_MODE (type) == BLKmode
6026 || TYPE_MODE (type) == TFmode
6027 || (TREE_CODE (type) == VECTOR_TYPE
6028 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6029 else
6030 /* Original SPARC 64-bit ABI says that structures and unions
6031 smaller than 32 bytes are returned in registers, as well as
6032 all other base types.
6034 Extended ABI (as implemented by the Sun compiler) says that all
6035 complex floats are returned in registers (8 FP registers at most
6036 for '_Complex long double'). Return all complex integers in
6037 registers (4 at most for '_Complex TItype').
6039 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6040 integers are returned like floats of the same size, that is in
6041 registers. Return all vector floats like structure and unions;
6042 note that they always have BLKmode like the latter. */
6043 return (TYPE_MODE (type) == BLKmode
6044 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6047 /* Handle the TARGET_STRUCT_VALUE target hook.
6048 Return where to find the structure return value address. */
6050 static rtx
6051 sparc_struct_value_rtx (tree fndecl, int incoming)
6053 if (TARGET_ARCH64)
6054 return 0;
6055 else
6057 rtx mem;
6059 if (incoming)
6060 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
6061 STRUCT_VALUE_OFFSET));
6062 else
6063 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
6064 STRUCT_VALUE_OFFSET));
6066 /* Only follow the SPARC ABI for fixed-size structure returns.
6067 Variable size structure returns are handled per the normal
6068 procedures in GCC. This is enabled by -mstd-struct-return */
6069 if (incoming == 2
6070 && sparc_std_struct_return
6071 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6072 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6074 /* We must check and adjust the return address, as it is
6075 optional as to whether the return object is really
6076 provided. */
6077 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
6078 rtx scratch = gen_reg_rtx (SImode);
6079 rtx endlab = gen_label_rtx ();
6081 /* Calculate the return object size */
6082 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6083 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6084 /* Construct a temporary return value */
6085 rtx temp_val
6086 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6088 /* Implement SPARC 32-bit psABI callee return struct checking:
6090 Fetch the instruction where we will return to and see if
6091 it's an unimp instruction (the most significant 10 bits
6092 will be zero). */
6093 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6094 plus_constant (ret_rtx, 8)));
6095 /* Assume the size is valid and pre-adjust */
6096 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
6097 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6098 0, endlab);
6099 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
6100 /* Write the address of the memory pointed to by temp_val into
6101 the memory pointed to by mem */
6102 emit_move_insn (mem, XEXP (temp_val, 0));
6103 emit_label (endlab);
6106 set_mem_alias_set (mem, struct_value_alias_set);
6107 return mem;
6111 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6112 For v9, function return values are subject to the same rules as arguments,
6113 except that up to 32 bytes may be returned in registers. */
6115 static rtx
6116 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6117 bool outgoing)
6119 /* Beware that the two values are swapped here wrt function_arg. */
6120 int regbase = (outgoing
6121 ? SPARC_INCOMING_INT_ARG_FIRST
6122 : SPARC_OUTGOING_INT_ARG_FIRST);
6123 enum mode_class mclass = GET_MODE_CLASS (mode);
6124 int regno;
6126 /* Vector types deserve special treatment because they are polymorphic wrt
6127 their mode, depending upon whether VIS instructions are enabled. */
6128 if (type && TREE_CODE (type) == VECTOR_TYPE)
6130 HOST_WIDE_INT size = int_size_in_bytes (type);
6131 gcc_assert ((TARGET_ARCH32 && size <= 8)
6132 || (TARGET_ARCH64 && size <= 32));
6134 if (mode == BLKmode)
6135 return function_arg_vector_value (size,
6136 SPARC_FP_ARG_FIRST);
6137 else
6138 mclass = MODE_FLOAT;
6141 if (TARGET_ARCH64 && type)
6143 /* Structures up to 32 bytes in size are returned in registers. */
6144 if (TREE_CODE (type) == RECORD_TYPE)
6146 HOST_WIDE_INT size = int_size_in_bytes (type);
6147 gcc_assert (size <= 32);
6149 return function_arg_record_value (type, mode, 0, 1, regbase);
6152 /* Unions up to 32 bytes in size are returned in integer registers. */
6153 else if (TREE_CODE (type) == UNION_TYPE)
6155 HOST_WIDE_INT size = int_size_in_bytes (type);
6156 gcc_assert (size <= 32);
6158 return function_arg_union_value (size, mode, 0, regbase);
6161 /* Objects that require it are returned in FP registers. */
6162 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6165 /* All other aggregate types are returned in an integer register in a
6166 mode corresponding to the size of the type. */
6167 else if (AGGREGATE_TYPE_P (type))
6169 /* All other aggregate types are passed in an integer register
6170 in a mode corresponding to the size of the type. */
6171 HOST_WIDE_INT size = int_size_in_bytes (type);
6172 gcc_assert (size <= 32);
6174 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6176 /* ??? We probably should have made the same ABI change in
6177 3.4.0 as the one we made for unions. The latter was
6178 required by the SCD though, while the former is not
6179 specified, so we favored compatibility and efficiency.
6181 Now we're stuck for aggregates larger than 16 bytes,
6182 because OImode vanished in the meantime. Let's not
6183 try to be unduly clever, and simply follow the ABI
6184 for unions in that case. */
6185 if (mode == BLKmode)
6186 return function_arg_union_value (size, mode, 0, regbase);
6187 else
6188 mclass = MODE_INT;
6191 /* We should only have pointer and integer types at this point. This
6192 must match sparc_promote_function_mode. */
6193 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6194 mode = word_mode;
6197 /* We should only have pointer and integer types at this point. This must
6198 match sparc_promote_function_mode. */
6199 else if (TARGET_ARCH32
6200 && mclass == MODE_INT
6201 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6202 mode = word_mode;
6204 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6205 regno = SPARC_FP_ARG_FIRST;
6206 else
6207 regno = regbase;
6209 return gen_rtx_REG (mode, regno);
6212 /* Handle TARGET_FUNCTION_VALUE.
6213 On the SPARC, the value is found in the first "output" register, but the
6214 called function leaves it in the first "input" register. */
6216 static rtx
6217 sparc_function_value (const_tree valtype,
6218 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6219 bool outgoing)
6221 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6224 /* Handle TARGET_LIBCALL_VALUE. */
6226 static rtx
6227 sparc_libcall_value (enum machine_mode mode,
6228 const_rtx fun ATTRIBUTE_UNUSED)
6230 return sparc_function_value_1 (NULL_TREE, mode, false);
6233 /* Handle FUNCTION_VALUE_REGNO_P.
6234 On the SPARC, the first "output" reg is used for integer values, and the
6235 first floating point register is used for floating point values. */
6237 static bool
6238 sparc_function_value_regno_p (const unsigned int regno)
6240 return (regno == 8 || regno == 32);
6243 /* Do what is necessary for `va_start'. We look at the current function
6244 to determine if stdarg or varargs is used and return the address of
6245 the first unnamed parameter. */
6247 static rtx
6248 sparc_builtin_saveregs (void)
6250 int first_reg = crtl->args.info.words;
6251 rtx address;
6252 int regno;
6254 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6255 emit_move_insn (gen_rtx_MEM (word_mode,
6256 gen_rtx_PLUS (Pmode,
6257 frame_pointer_rtx,
6258 GEN_INT (FIRST_PARM_OFFSET (0)
6259 + (UNITS_PER_WORD
6260 * regno)))),
6261 gen_rtx_REG (word_mode,
6262 SPARC_INCOMING_INT_ARG_FIRST + regno));
6264 address = gen_rtx_PLUS (Pmode,
6265 frame_pointer_rtx,
6266 GEN_INT (FIRST_PARM_OFFSET (0)
6267 + UNITS_PER_WORD * first_reg));
6269 return address;
6272 /* Implement `va_start' for stdarg. */
6274 static void
6275 sparc_va_start (tree valist, rtx nextarg)
6277 nextarg = expand_builtin_saveregs ();
6278 std_expand_builtin_va_start (valist, nextarg);
6281 /* Implement `va_arg' for stdarg. */
6283 static tree
6284 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6285 gimple_seq *post_p)
6287 HOST_WIDE_INT size, rsize, align;
6288 tree addr, incr;
6289 bool indirect;
6290 tree ptrtype = build_pointer_type (type);
6292 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6294 indirect = true;
6295 size = rsize = UNITS_PER_WORD;
6296 align = 0;
6298 else
6300 indirect = false;
6301 size = int_size_in_bytes (type);
6302 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6303 align = 0;
6305 if (TARGET_ARCH64)
6307 /* For SPARC64, objects requiring 16-byte alignment get it. */
6308 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6309 align = 2 * UNITS_PER_WORD;
6311 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6312 are left-justified in their slots. */
6313 if (AGGREGATE_TYPE_P (type))
6315 if (size == 0)
6316 size = rsize = UNITS_PER_WORD;
6317 else
6318 size = rsize;
6323 incr = valist;
6324 if (align)
6326 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6327 size_int (align - 1));
6328 incr = fold_convert (sizetype, incr);
6329 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6330 size_int (-align));
6331 incr = fold_convert (ptr_type_node, incr);
6334 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6335 addr = incr;
6337 if (BYTES_BIG_ENDIAN && size < rsize)
6338 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6339 size_int (rsize - size));
6341 if (indirect)
6343 addr = fold_convert (build_pointer_type (ptrtype), addr);
6344 addr = build_va_arg_indirect_ref (addr);
6347 /* If the address isn't aligned properly for the type, we need a temporary.
6348 FIXME: This is inefficient, usually we can do this in registers. */
6349 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6351 tree tmp = create_tmp_var (type, "va_arg_tmp");
6352 tree dest_addr = build_fold_addr_expr (tmp);
6353 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
6354 3, dest_addr, addr, size_int (rsize));
6355 TREE_ADDRESSABLE (tmp) = 1;
6356 gimplify_and_add (copy, pre_p);
6357 addr = dest_addr;
6360 else
6361 addr = fold_convert (ptrtype, addr);
6363 incr
6364 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
6365 gimplify_assign (valist, incr, post_p);
6367 return build_va_arg_indirect_ref (addr);
6370 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6371 Specify whether the vector mode is supported by the hardware. */
6373 static bool
6374 sparc_vector_mode_supported_p (enum machine_mode mode)
6376 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6379 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6381 static enum machine_mode
6382 sparc_preferred_simd_mode (enum machine_mode mode)
6384 if (TARGET_VIS)
6385 switch (mode)
6387 case SImode:
6388 return V2SImode;
6389 case HImode:
6390 return V4HImode;
6391 case QImode:
6392 return V8QImode;
6394 default:;
6397 return word_mode;
6400 /* Return the string to output an unconditional branch to LABEL, which is
6401 the operand number of the label.
6403 DEST is the destination insn (i.e. the label), INSN is the source. */
6405 const char *
6406 output_ubranch (rtx dest, int label, rtx insn)
6408 static char string[64];
6409 bool v9_form = false;
6410 char *p;
6412 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6414 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6415 - INSN_ADDRESSES (INSN_UID (insn)));
6416 /* Leave some instructions for "slop". */
6417 if (delta >= -260000 && delta < 260000)
6418 v9_form = true;
6421 if (v9_form)
6422 strcpy (string, "ba%*,pt\t%%xcc, ");
6423 else
6424 strcpy (string, "b%*\t");
6426 p = strchr (string, '\0');
6427 *p++ = '%';
6428 *p++ = 'l';
6429 *p++ = '0' + label;
6430 *p++ = '%';
6431 *p++ = '(';
6432 *p = '\0';
6434 return string;
6437 /* Return the string to output a conditional branch to LABEL, which is
6438 the operand number of the label. OP is the conditional expression.
6439 XEXP (OP, 0) is assumed to be a condition code register (integer or
6440 floating point) and its mode specifies what kind of comparison we made.
6442 DEST is the destination insn (i.e. the label), INSN is the source.
6444 REVERSED is nonzero if we should reverse the sense of the comparison.
6446 ANNUL is nonzero if we should generate an annulling branch. */
6448 const char *
6449 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6450 rtx insn)
6452 static char string[64];
6453 enum rtx_code code = GET_CODE (op);
6454 rtx cc_reg = XEXP (op, 0);
6455 enum machine_mode mode = GET_MODE (cc_reg);
6456 const char *labelno, *branch;
6457 int spaces = 8, far;
6458 char *p;
6460 /* v9 branches are limited to +-1MB. If it is too far away,
6461 change
6463 bne,pt %xcc, .LC30
6467 be,pn %xcc, .+12
6469 ba .LC30
6473 fbne,a,pn %fcc2, .LC29
6477 fbe,pt %fcc2, .+16
6479 ba .LC29 */
6481 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6482 if (reversed ^ far)
6484 /* Reversal of FP compares takes care -- an ordered compare
6485 becomes an unordered compare and vice versa. */
6486 if (mode == CCFPmode || mode == CCFPEmode)
6487 code = reverse_condition_maybe_unordered (code);
6488 else
6489 code = reverse_condition (code);
6492 /* Start by writing the branch condition. */
6493 if (mode == CCFPmode || mode == CCFPEmode)
6495 switch (code)
6497 case NE:
6498 branch = "fbne";
6499 break;
6500 case EQ:
6501 branch = "fbe";
6502 break;
6503 case GE:
6504 branch = "fbge";
6505 break;
6506 case GT:
6507 branch = "fbg";
6508 break;
6509 case LE:
6510 branch = "fble";
6511 break;
6512 case LT:
6513 branch = "fbl";
6514 break;
6515 case UNORDERED:
6516 branch = "fbu";
6517 break;
6518 case ORDERED:
6519 branch = "fbo";
6520 break;
6521 case UNGT:
6522 branch = "fbug";
6523 break;
6524 case UNLT:
6525 branch = "fbul";
6526 break;
6527 case UNEQ:
6528 branch = "fbue";
6529 break;
6530 case UNGE:
6531 branch = "fbuge";
6532 break;
6533 case UNLE:
6534 branch = "fbule";
6535 break;
6536 case LTGT:
6537 branch = "fblg";
6538 break;
6540 default:
6541 gcc_unreachable ();
6544 /* ??? !v9: FP branches cannot be preceded by another floating point
6545 insn. Because there is currently no concept of pre-delay slots,
6546 we can fix this only by always emitting a nop before a floating
6547 point branch. */
6549 string[0] = '\0';
6550 if (! TARGET_V9)
6551 strcpy (string, "nop\n\t");
6552 strcat (string, branch);
6554 else
6556 switch (code)
6558 case NE:
6559 branch = "bne";
6560 break;
6561 case EQ:
6562 branch = "be";
6563 break;
6564 case GE:
6565 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6566 branch = "bpos";
6567 else
6568 branch = "bge";
6569 break;
6570 case GT:
6571 branch = "bg";
6572 break;
6573 case LE:
6574 branch = "ble";
6575 break;
6576 case LT:
6577 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6578 branch = "bneg";
6579 else
6580 branch = "bl";
6581 break;
6582 case GEU:
6583 branch = "bgeu";
6584 break;
6585 case GTU:
6586 branch = "bgu";
6587 break;
6588 case LEU:
6589 branch = "bleu";
6590 break;
6591 case LTU:
6592 branch = "blu";
6593 break;
6595 default:
6596 gcc_unreachable ();
6598 strcpy (string, branch);
6600 spaces -= strlen (branch);
6601 p = strchr (string, '\0');
6603 /* Now add the annulling, the label, and a possible noop. */
6604 if (annul && ! far)
6606 strcpy (p, ",a");
6607 p += 2;
6608 spaces -= 2;
6611 if (TARGET_V9)
6613 rtx note;
6614 int v8 = 0;
6616 if (! far && insn && INSN_ADDRESSES_SET_P ())
6618 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6619 - INSN_ADDRESSES (INSN_UID (insn)));
6620 /* Leave some instructions for "slop". */
6621 if (delta < -260000 || delta >= 260000)
6622 v8 = 1;
6625 if (mode == CCFPmode || mode == CCFPEmode)
6627 static char v9_fcc_labelno[] = "%%fccX, ";
6628 /* Set the char indicating the number of the fcc reg to use. */
6629 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6630 labelno = v9_fcc_labelno;
6631 if (v8)
6633 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6634 labelno = "";
6637 else if (mode == CCXmode || mode == CCX_NOOVmode)
6639 labelno = "%%xcc, ";
6640 gcc_assert (! v8);
6642 else
6644 labelno = "%%icc, ";
6645 if (v8)
6646 labelno = "";
6649 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6651 strcpy (p,
6652 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6653 ? ",pt" : ",pn");
6654 p += 3;
6655 spaces -= 3;
6658 else
6659 labelno = "";
6661 if (spaces > 0)
6662 *p++ = '\t';
6663 else
6664 *p++ = ' ';
6665 strcpy (p, labelno);
6666 p = strchr (p, '\0');
6667 if (far)
6669 strcpy (p, ".+12\n\t nop\n\tb\t");
6670 /* Skip the next insn if requested or
6671 if we know that it will be a nop. */
6672 if (annul || ! final_sequence)
6673 p[3] = '6';
6674 p += 14;
6676 *p++ = '%';
6677 *p++ = 'l';
6678 *p++ = label + '0';
6679 *p++ = '%';
6680 *p++ = '#';
6681 *p = '\0';
6683 return string;
6686 /* Emit a library call comparison between floating point X and Y.
6687 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6688 Return the new operator to be used in the comparison sequence.
6690 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6691 values as arguments instead of the TFmode registers themselves,
6692 that's why we cannot call emit_float_lib_cmp. */
6695 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6697 const char *qpfunc;
6698 rtx slot0, slot1, result, tem, tem2, libfunc;
6699 enum machine_mode mode;
6700 enum rtx_code new_comparison;
6702 switch (comparison)
6704 case EQ:
6705 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6706 break;
6708 case NE:
6709 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6710 break;
6712 case GT:
6713 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6714 break;
6716 case GE:
6717 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6718 break;
6720 case LT:
6721 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6722 break;
6724 case LE:
6725 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6726 break;
6728 case ORDERED:
6729 case UNORDERED:
6730 case UNGT:
6731 case UNLT:
6732 case UNEQ:
6733 case UNGE:
6734 case UNLE:
6735 case LTGT:
6736 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6737 break;
6739 default:
6740 gcc_unreachable ();
6743 if (TARGET_ARCH64)
6745 if (MEM_P (x))
6746 slot0 = x;
6747 else
6749 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6750 emit_move_insn (slot0, x);
6753 if (MEM_P (y))
6754 slot1 = y;
6755 else
6757 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6758 emit_move_insn (slot1, y);
6761 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6762 emit_library_call (libfunc, LCT_NORMAL,
6763 DImode, 2,
6764 XEXP (slot0, 0), Pmode,
6765 XEXP (slot1, 0), Pmode);
6766 mode = DImode;
6768 else
6770 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6771 emit_library_call (libfunc, LCT_NORMAL,
6772 SImode, 2,
6773 x, TFmode, y, TFmode);
6774 mode = SImode;
6778 /* Immediately move the result of the libcall into a pseudo
6779 register so reload doesn't clobber the value if it needs
6780 the return register for a spill reg. */
6781 result = gen_reg_rtx (mode);
6782 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6784 switch (comparison)
6786 default:
6787 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6788 case ORDERED:
6789 case UNORDERED:
6790 new_comparison = (comparison == UNORDERED ? EQ : NE);
6791 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6792 case UNGT:
6793 case UNGE:
6794 new_comparison = (comparison == UNGT ? GT : NE);
6795 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6796 case UNLE:
6797 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6798 case UNLT:
6799 tem = gen_reg_rtx (mode);
6800 if (TARGET_ARCH32)
6801 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6802 else
6803 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6804 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6805 case UNEQ:
6806 case LTGT:
6807 tem = gen_reg_rtx (mode);
6808 if (TARGET_ARCH32)
6809 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6810 else
6811 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6812 tem2 = gen_reg_rtx (mode);
6813 if (TARGET_ARCH32)
6814 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6815 else
6816 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6817 new_comparison = (comparison == UNEQ ? EQ : NE);
6818 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6821 gcc_unreachable ();
6824 /* Generate an unsigned DImode to FP conversion. This is the same code
6825 optabs would emit if we didn't have TFmode patterns. */
6827 void
6828 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6830 rtx neglab, donelab, i0, i1, f0, in, out;
6832 out = operands[0];
6833 in = force_reg (DImode, operands[1]);
6834 neglab = gen_label_rtx ();
6835 donelab = gen_label_rtx ();
6836 i0 = gen_reg_rtx (DImode);
6837 i1 = gen_reg_rtx (DImode);
6838 f0 = gen_reg_rtx (mode);
6840 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6842 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6843 emit_jump_insn (gen_jump (donelab));
6844 emit_barrier ();
6846 emit_label (neglab);
6848 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6849 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6850 emit_insn (gen_iordi3 (i0, i0, i1));
6851 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6852 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6854 emit_label (donelab);
6857 /* Generate an FP to unsigned DImode conversion. This is the same code
6858 optabs would emit if we didn't have TFmode patterns. */
6860 void
6861 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6863 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6865 out = operands[0];
6866 in = force_reg (mode, operands[1]);
6867 neglab = gen_label_rtx ();
6868 donelab = gen_label_rtx ();
6869 i0 = gen_reg_rtx (DImode);
6870 i1 = gen_reg_rtx (DImode);
6871 limit = gen_reg_rtx (mode);
6872 f0 = gen_reg_rtx (mode);
6874 emit_move_insn (limit,
6875 CONST_DOUBLE_FROM_REAL_VALUE (
6876 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6877 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6879 emit_insn (gen_rtx_SET (VOIDmode,
6880 out,
6881 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6882 emit_jump_insn (gen_jump (donelab));
6883 emit_barrier ();
6885 emit_label (neglab);
6887 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6888 emit_insn (gen_rtx_SET (VOIDmode,
6890 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6891 emit_insn (gen_movdi (i1, const1_rtx));
6892 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6893 emit_insn (gen_xordi3 (out, i0, i1));
6895 emit_label (donelab);
6898 /* Return the string to output a conditional branch to LABEL, testing
6899 register REG. LABEL is the operand number of the label; REG is the
6900 operand number of the reg. OP is the conditional expression. The mode
6901 of REG says what kind of comparison we made.
6903 DEST is the destination insn (i.e. the label), INSN is the source.
6905 REVERSED is nonzero if we should reverse the sense of the comparison.
6907 ANNUL is nonzero if we should generate an annulling branch. */
6909 const char *
6910 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6911 int annul, rtx insn)
6913 static char string[64];
6914 enum rtx_code code = GET_CODE (op);
6915 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6916 rtx note;
6917 int far;
6918 char *p;
6920 /* branch on register are limited to +-128KB. If it is too far away,
6921 change
6923 brnz,pt %g1, .LC30
6927 brz,pn %g1, .+12
6929 ba,pt %xcc, .LC30
6933 brgez,a,pn %o1, .LC29
6937 brlz,pt %o1, .+16
6939 ba,pt %xcc, .LC29 */
6941 far = get_attr_length (insn) >= 3;
6943 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6944 if (reversed ^ far)
6945 code = reverse_condition (code);
6947 /* Only 64 bit versions of these instructions exist. */
6948 gcc_assert (mode == DImode);
6950 /* Start by writing the branch condition. */
6952 switch (code)
6954 case NE:
6955 strcpy (string, "brnz");
6956 break;
6958 case EQ:
6959 strcpy (string, "brz");
6960 break;
6962 case GE:
6963 strcpy (string, "brgez");
6964 break;
6966 case LT:
6967 strcpy (string, "brlz");
6968 break;
6970 case LE:
6971 strcpy (string, "brlez");
6972 break;
6974 case GT:
6975 strcpy (string, "brgz");
6976 break;
6978 default:
6979 gcc_unreachable ();
6982 p = strchr (string, '\0');
6984 /* Now add the annulling, reg, label, and nop. */
6985 if (annul && ! far)
6987 strcpy (p, ",a");
6988 p += 2;
6991 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6993 strcpy (p,
6994 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6995 ? ",pt" : ",pn");
6996 p += 3;
6999 *p = p < string + 8 ? '\t' : ' ';
7000 p++;
7001 *p++ = '%';
7002 *p++ = '0' + reg;
7003 *p++ = ',';
7004 *p++ = ' ';
7005 if (far)
7007 int veryfar = 1, delta;
7009 if (INSN_ADDRESSES_SET_P ())
7011 delta = (INSN_ADDRESSES (INSN_UID (dest))
7012 - INSN_ADDRESSES (INSN_UID (insn)));
7013 /* Leave some instructions for "slop". */
7014 if (delta >= -260000 && delta < 260000)
7015 veryfar = 0;
7018 strcpy (p, ".+12\n\t nop\n\t");
7019 /* Skip the next insn if requested or
7020 if we know that it will be a nop. */
7021 if (annul || ! final_sequence)
7022 p[3] = '6';
7023 p += 12;
7024 if (veryfar)
7026 strcpy (p, "b\t");
7027 p += 2;
7029 else
7031 strcpy (p, "ba,pt\t%%xcc, ");
7032 p += 13;
7035 *p++ = '%';
7036 *p++ = 'l';
7037 *p++ = '0' + label;
7038 *p++ = '%';
7039 *p++ = '#';
7040 *p = '\0';
7042 return string;
7045 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7046 Such instructions cannot be used in the delay slot of return insn on v9.
7047 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7050 static int
7051 epilogue_renumber (register rtx *where, int test)
7053 register const char *fmt;
7054 register int i;
7055 register enum rtx_code code;
7057 if (*where == 0)
7058 return 0;
7060 code = GET_CODE (*where);
7062 switch (code)
7064 case REG:
7065 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7066 return 1;
7067 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7068 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7069 case SCRATCH:
7070 case CC0:
7071 case PC:
7072 case CONST_INT:
7073 case CONST_DOUBLE:
7074 return 0;
7076 /* Do not replace the frame pointer with the stack pointer because
7077 it can cause the delayed instruction to load below the stack.
7078 This occurs when instructions like:
7080 (set (reg/i:SI 24 %i0)
7081 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7082 (const_int -20 [0xffffffec])) 0))
7084 are in the return delayed slot. */
7085 case PLUS:
7086 if (GET_CODE (XEXP (*where, 0)) == REG
7087 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7088 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7089 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7090 return 1;
7091 break;
7093 case MEM:
7094 if (SPARC_STACK_BIAS
7095 && GET_CODE (XEXP (*where, 0)) == REG
7096 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7097 return 1;
7098 break;
7100 default:
7101 break;
7104 fmt = GET_RTX_FORMAT (code);
7106 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7108 if (fmt[i] == 'E')
7110 register int j;
7111 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7112 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7113 return 1;
7115 else if (fmt[i] == 'e'
7116 && epilogue_renumber (&(XEXP (*where, i)), test))
7117 return 1;
7119 return 0;
7122 /* Leaf functions and non-leaf functions have different needs. */
7124 static const int
7125 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7127 static const int
7128 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7130 static const int *const reg_alloc_orders[] = {
7131 reg_leaf_alloc_order,
7132 reg_nonleaf_alloc_order};
7134 void
7135 order_regs_for_local_alloc (void)
7137 static int last_order_nonleaf = 1;
7139 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7141 last_order_nonleaf = !last_order_nonleaf;
7142 memcpy ((char *) reg_alloc_order,
7143 (const char *) reg_alloc_orders[last_order_nonleaf],
7144 FIRST_PSEUDO_REGISTER * sizeof (int));
7148 /* Return 1 if REG and MEM are legitimate enough to allow the various
7149 mem<-->reg splits to be run. */
7152 sparc_splitdi_legitimate (rtx reg, rtx mem)
7154 /* Punt if we are here by mistake. */
7155 gcc_assert (reload_completed);
7157 /* We must have an offsettable memory reference. */
7158 if (! offsettable_memref_p (mem))
7159 return 0;
7161 /* If we have legitimate args for ldd/std, we do not want
7162 the split to happen. */
7163 if ((REGNO (reg) % 2) == 0
7164 && mem_min_alignment (mem, 8))
7165 return 0;
7167 /* Success. */
7168 return 1;
7171 /* Return 1 if x and y are some kind of REG and they refer to
7172 different hard registers. This test is guaranteed to be
7173 run after reload. */
7176 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7178 if (GET_CODE (x) != REG)
7179 return 0;
7180 if (GET_CODE (y) != REG)
7181 return 0;
7182 if (REGNO (x) == REGNO (y))
7183 return 0;
7184 return 1;
7187 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7188 This makes them candidates for using ldd and std insns.
7190 Note reg1 and reg2 *must* be hard registers. */
7193 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7195 /* We might have been passed a SUBREG. */
7196 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7197 return 0;
7199 if (REGNO (reg1) % 2 != 0)
7200 return 0;
7202 /* Integer ldd is deprecated in SPARC V9 */
7203 if (TARGET_V9 && REGNO (reg1) < 32)
7204 return 0;
7206 return (REGNO (reg1) == REGNO (reg2) - 1);
7209 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7210 an ldd or std insn.
7212 This can only happen when addr1 and addr2, the addresses in mem1
7213 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7214 addr1 must also be aligned on a 64-bit boundary.
7216 Also iff dependent_reg_rtx is not null it should not be used to
7217 compute the address for mem1, i.e. we cannot optimize a sequence
7218 like:
7219 ld [%o0], %o0
7220 ld [%o0 + 4], %o1
7222 ldd [%o0], %o0
7223 nor:
7224 ld [%g3 + 4], %g3
7225 ld [%g3], %g2
7227 ldd [%g3], %g2
7229 But, note that the transformation from:
7230 ld [%g2 + 4], %g3
7231 ld [%g2], %g2
7233 ldd [%g2], %g2
7234 is perfectly fine. Thus, the peephole2 patterns always pass us
7235 the destination register of the first load, never the second one.
7237 For stores we don't have a similar problem, so dependent_reg_rtx is
7238 NULL_RTX. */
7241 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7243 rtx addr1, addr2;
7244 unsigned int reg1;
7245 HOST_WIDE_INT offset1;
7247 /* The mems cannot be volatile. */
7248 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7249 return 0;
7251 /* MEM1 should be aligned on a 64-bit boundary. */
7252 if (MEM_ALIGN (mem1) < 64)
7253 return 0;
7255 addr1 = XEXP (mem1, 0);
7256 addr2 = XEXP (mem2, 0);
7258 /* Extract a register number and offset (if used) from the first addr. */
7259 if (GET_CODE (addr1) == PLUS)
7261 /* If not a REG, return zero. */
7262 if (GET_CODE (XEXP (addr1, 0)) != REG)
7263 return 0;
7264 else
7266 reg1 = REGNO (XEXP (addr1, 0));
7267 /* The offset must be constant! */
7268 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7269 return 0;
7270 offset1 = INTVAL (XEXP (addr1, 1));
7273 else if (GET_CODE (addr1) != REG)
7274 return 0;
7275 else
7277 reg1 = REGNO (addr1);
7278 /* This was a simple (mem (reg)) expression. Offset is 0. */
7279 offset1 = 0;
7282 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7283 if (GET_CODE (addr2) != PLUS)
7284 return 0;
7286 if (GET_CODE (XEXP (addr2, 0)) != REG
7287 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7288 return 0;
7290 if (reg1 != REGNO (XEXP (addr2, 0)))
7291 return 0;
7293 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7294 return 0;
7296 /* The first offset must be evenly divisible by 8 to ensure the
7297 address is 64 bit aligned. */
7298 if (offset1 % 8 != 0)
7299 return 0;
7301 /* The offset for the second addr must be 4 more than the first addr. */
7302 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7303 return 0;
7305 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7306 instructions. */
7307 return 1;
7310 /* Return 1 if reg is a pseudo, or is the first register in
7311 a hard register pair. This makes it suitable for use in
7312 ldd and std insns. */
7315 register_ok_for_ldd (rtx reg)
7317 /* We might have been passed a SUBREG. */
7318 if (!REG_P (reg))
7319 return 0;
7321 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7322 return (REGNO (reg) % 2 == 0);
7324 return 1;
7327 /* Return 1 if OP is a memory whose address is known to be
7328 aligned to 8-byte boundary, or a pseudo during reload.
7329 This makes it suitable for use in ldd and std insns. */
7332 memory_ok_for_ldd (rtx op)
7334 if (MEM_P (op))
7336 /* In 64-bit mode, we assume that the address is word-aligned. */
7337 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7338 return 0;
7340 if ((reload_in_progress || reload_completed)
7341 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7342 return 0;
7344 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7346 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7347 return 0;
7349 else
7350 return 0;
7352 return 1;
7355 /* Print operand X (an rtx) in assembler syntax to file FILE.
7356 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7357 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7359 void
7360 print_operand (FILE *file, rtx x, int code)
7362 switch (code)
7364 case '#':
7365 /* Output an insn in a delay slot. */
7366 if (final_sequence)
7367 sparc_indent_opcode = 1;
7368 else
7369 fputs ("\n\t nop", file);
7370 return;
7371 case '*':
7372 /* Output an annul flag if there's nothing for the delay slot and we
7373 are optimizing. This is always used with '(' below.
7374 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7375 this is a dbx bug. So, we only do this when optimizing.
7376 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7377 Always emit a nop in case the next instruction is a branch. */
7378 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7379 fputs (",a", file);
7380 return;
7381 case '(':
7382 /* Output a 'nop' if there's nothing for the delay slot and we are
7383 not optimizing. This is always used with '*' above. */
7384 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7385 fputs ("\n\t nop", file);
7386 else if (final_sequence)
7387 sparc_indent_opcode = 1;
7388 return;
7389 case ')':
7390 /* Output the right displacement from the saved PC on function return.
7391 The caller may have placed an "unimp" insn immediately after the call
7392 so we have to account for it. This insn is used in the 32-bit ABI
7393 when calling a function that returns a non zero-sized structure. The
7394 64-bit ABI doesn't have it. Be careful to have this test be the same
7395 as that for the call. The exception is when sparc_std_struct_return
7396 is enabled, the psABI is followed exactly and the adjustment is made
7397 by the code in sparc_struct_value_rtx. The call emitted is the same
7398 when sparc_std_struct_return is enabled. */
7399 if (!TARGET_ARCH64
7400 && cfun->returns_struct
7401 && !sparc_std_struct_return
7402 && DECL_SIZE (DECL_RESULT (current_function_decl))
7403 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7404 == INTEGER_CST
7405 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7406 fputs ("12", file);
7407 else
7408 fputc ('8', file);
7409 return;
7410 case '_':
7411 /* Output the Embedded Medium/Anywhere code model base register. */
7412 fputs (EMBMEDANY_BASE_REG, file);
7413 return;
7414 case '&':
7415 /* Print some local dynamic TLS name. */
7416 assemble_name (file, get_some_local_dynamic_name ());
7417 return;
7419 case 'Y':
7420 /* Adjust the operand to take into account a RESTORE operation. */
7421 if (GET_CODE (x) == CONST_INT)
7422 break;
7423 else if (GET_CODE (x) != REG)
7424 output_operand_lossage ("invalid %%Y operand");
7425 else if (REGNO (x) < 8)
7426 fputs (reg_names[REGNO (x)], file);
7427 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7428 fputs (reg_names[REGNO (x)-16], file);
7429 else
7430 output_operand_lossage ("invalid %%Y operand");
7431 return;
7432 case 'L':
7433 /* Print out the low order register name of a register pair. */
7434 if (WORDS_BIG_ENDIAN)
7435 fputs (reg_names[REGNO (x)+1], file);
7436 else
7437 fputs (reg_names[REGNO (x)], file);
7438 return;
7439 case 'H':
7440 /* Print out the high order register name of a register pair. */
7441 if (WORDS_BIG_ENDIAN)
7442 fputs (reg_names[REGNO (x)], file);
7443 else
7444 fputs (reg_names[REGNO (x)+1], file);
7445 return;
7446 case 'R':
7447 /* Print out the second register name of a register pair or quad.
7448 I.e., R (%o0) => %o1. */
7449 fputs (reg_names[REGNO (x)+1], file);
7450 return;
7451 case 'S':
7452 /* Print out the third register name of a register quad.
7453 I.e., S (%o0) => %o2. */
7454 fputs (reg_names[REGNO (x)+2], file);
7455 return;
7456 case 'T':
7457 /* Print out the fourth register name of a register quad.
7458 I.e., T (%o0) => %o3. */
7459 fputs (reg_names[REGNO (x)+3], file);
7460 return;
7461 case 'x':
7462 /* Print a condition code register. */
7463 if (REGNO (x) == SPARC_ICC_REG)
7465 /* We don't handle CC[X]_NOOVmode because they're not supposed
7466 to occur here. */
7467 if (GET_MODE (x) == CCmode)
7468 fputs ("%icc", file);
7469 else if (GET_MODE (x) == CCXmode)
7470 fputs ("%xcc", file);
7471 else
7472 gcc_unreachable ();
7474 else
7475 /* %fccN register */
7476 fputs (reg_names[REGNO (x)], file);
7477 return;
7478 case 'm':
7479 /* Print the operand's address only. */
7480 output_address (XEXP (x, 0));
7481 return;
7482 case 'r':
7483 /* In this case we need a register. Use %g0 if the
7484 operand is const0_rtx. */
7485 if (x == const0_rtx
7486 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7488 fputs ("%g0", file);
7489 return;
7491 else
7492 break;
7494 case 'A':
7495 switch (GET_CODE (x))
7497 case IOR: fputs ("or", file); break;
7498 case AND: fputs ("and", file); break;
7499 case XOR: fputs ("xor", file); break;
7500 default: output_operand_lossage ("invalid %%A operand");
7502 return;
7504 case 'B':
7505 switch (GET_CODE (x))
7507 case IOR: fputs ("orn", file); break;
7508 case AND: fputs ("andn", file); break;
7509 case XOR: fputs ("xnor", file); break;
7510 default: output_operand_lossage ("invalid %%B operand");
7512 return;
7514 /* These are used by the conditional move instructions. */
7515 case 'c' :
7516 case 'C':
7518 enum rtx_code rc = GET_CODE (x);
7520 if (code == 'c')
7522 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7523 if (mode == CCFPmode || mode == CCFPEmode)
7524 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7525 else
7526 rc = reverse_condition (GET_CODE (x));
7528 switch (rc)
7530 case NE: fputs ("ne", file); break;
7531 case EQ: fputs ("e", file); break;
7532 case GE: fputs ("ge", file); break;
7533 case GT: fputs ("g", file); break;
7534 case LE: fputs ("le", file); break;
7535 case LT: fputs ("l", file); break;
7536 case GEU: fputs ("geu", file); break;
7537 case GTU: fputs ("gu", file); break;
7538 case LEU: fputs ("leu", file); break;
7539 case LTU: fputs ("lu", file); break;
7540 case LTGT: fputs ("lg", file); break;
7541 case UNORDERED: fputs ("u", file); break;
7542 case ORDERED: fputs ("o", file); break;
7543 case UNLT: fputs ("ul", file); break;
7544 case UNLE: fputs ("ule", file); break;
7545 case UNGT: fputs ("ug", file); break;
7546 case UNGE: fputs ("uge", file); break;
7547 case UNEQ: fputs ("ue", file); break;
7548 default: output_operand_lossage (code == 'c'
7549 ? "invalid %%c operand"
7550 : "invalid %%C operand");
7552 return;
7555 /* These are used by the movr instruction pattern. */
7556 case 'd':
7557 case 'D':
7559 enum rtx_code rc = (code == 'd'
7560 ? reverse_condition (GET_CODE (x))
7561 : GET_CODE (x));
7562 switch (rc)
7564 case NE: fputs ("ne", file); break;
7565 case EQ: fputs ("e", file); break;
7566 case GE: fputs ("gez", file); break;
7567 case LT: fputs ("lz", file); break;
7568 case LE: fputs ("lez", file); break;
7569 case GT: fputs ("gz", file); break;
7570 default: output_operand_lossage (code == 'd'
7571 ? "invalid %%d operand"
7572 : "invalid %%D operand");
7574 return;
7577 case 'b':
7579 /* Print a sign-extended character. */
7580 int i = trunc_int_for_mode (INTVAL (x), QImode);
7581 fprintf (file, "%d", i);
7582 return;
7585 case 'f':
7586 /* Operand must be a MEM; write its address. */
7587 if (GET_CODE (x) != MEM)
7588 output_operand_lossage ("invalid %%f operand");
7589 output_address (XEXP (x, 0));
7590 return;
7592 case 's':
7594 /* Print a sign-extended 32-bit value. */
7595 HOST_WIDE_INT i;
7596 if (GET_CODE(x) == CONST_INT)
7597 i = INTVAL (x);
7598 else if (GET_CODE(x) == CONST_DOUBLE)
7599 i = CONST_DOUBLE_LOW (x);
7600 else
7602 output_operand_lossage ("invalid %%s operand");
7603 return;
7605 i = trunc_int_for_mode (i, SImode);
7606 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7607 return;
7610 case 0:
7611 /* Do nothing special. */
7612 break;
7614 default:
7615 /* Undocumented flag. */
7616 output_operand_lossage ("invalid operand output code");
7619 if (GET_CODE (x) == REG)
7620 fputs (reg_names[REGNO (x)], file);
7621 else if (GET_CODE (x) == MEM)
7623 fputc ('[', file);
7624 /* Poor Sun assembler doesn't understand absolute addressing. */
7625 if (CONSTANT_P (XEXP (x, 0)))
7626 fputs ("%g0+", file);
7627 output_address (XEXP (x, 0));
7628 fputc (']', file);
7630 else if (GET_CODE (x) == HIGH)
7632 fputs ("%hi(", file);
7633 output_addr_const (file, XEXP (x, 0));
7634 fputc (')', file);
7636 else if (GET_CODE (x) == LO_SUM)
7638 print_operand (file, XEXP (x, 0), 0);
7639 if (TARGET_CM_MEDMID)
7640 fputs ("+%l44(", file);
7641 else
7642 fputs ("+%lo(", file);
7643 output_addr_const (file, XEXP (x, 1));
7644 fputc (')', file);
7646 else if (GET_CODE (x) == CONST_DOUBLE
7647 && (GET_MODE (x) == VOIDmode
7648 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7650 if (CONST_DOUBLE_HIGH (x) == 0)
7651 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7652 else if (CONST_DOUBLE_HIGH (x) == -1
7653 && CONST_DOUBLE_LOW (x) < 0)
7654 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7655 else
7656 output_operand_lossage ("long long constant not a valid immediate operand");
7658 else if (GET_CODE (x) == CONST_DOUBLE)
7659 output_operand_lossage ("floating point constant not a valid immediate operand");
7660 else { output_addr_const (file, x); }
7663 /* Target hook for assembling integer objects. The sparc version has
7664 special handling for aligned DI-mode objects. */
7666 static bool
7667 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7669 /* ??? We only output .xword's for symbols and only then in environments
7670 where the assembler can handle them. */
7671 if (aligned_p && size == 8
7672 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7674 if (TARGET_V9)
7676 assemble_integer_with_op ("\t.xword\t", x);
7677 return true;
7679 else
7681 assemble_aligned_integer (4, const0_rtx);
7682 assemble_aligned_integer (4, x);
7683 return true;
7686 return default_assemble_integer (x, size, aligned_p);
7689 /* Return the value of a code used in the .proc pseudo-op that says
7690 what kind of result this function returns. For non-C types, we pick
7691 the closest C type. */
7693 #ifndef SHORT_TYPE_SIZE
7694 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7695 #endif
7697 #ifndef INT_TYPE_SIZE
7698 #define INT_TYPE_SIZE BITS_PER_WORD
7699 #endif
7701 #ifndef LONG_TYPE_SIZE
7702 #define LONG_TYPE_SIZE BITS_PER_WORD
7703 #endif
7705 #ifndef LONG_LONG_TYPE_SIZE
7706 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7707 #endif
7709 #ifndef FLOAT_TYPE_SIZE
7710 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7711 #endif
7713 #ifndef DOUBLE_TYPE_SIZE
7714 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7715 #endif
7717 #ifndef LONG_DOUBLE_TYPE_SIZE
7718 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7719 #endif
7721 unsigned long
7722 sparc_type_code (register tree type)
7724 register unsigned long qualifiers = 0;
7725 register unsigned shift;
7727 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7728 setting more, since some assemblers will give an error for this. Also,
7729 we must be careful to avoid shifts of 32 bits or more to avoid getting
7730 unpredictable results. */
7732 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7734 switch (TREE_CODE (type))
7736 case ERROR_MARK:
7737 return qualifiers;
7739 case ARRAY_TYPE:
7740 qualifiers |= (3 << shift);
7741 break;
7743 case FUNCTION_TYPE:
7744 case METHOD_TYPE:
7745 qualifiers |= (2 << shift);
7746 break;
7748 case POINTER_TYPE:
7749 case REFERENCE_TYPE:
7750 case OFFSET_TYPE:
7751 qualifiers |= (1 << shift);
7752 break;
7754 case RECORD_TYPE:
7755 return (qualifiers | 8);
7757 case UNION_TYPE:
7758 case QUAL_UNION_TYPE:
7759 return (qualifiers | 9);
7761 case ENUMERAL_TYPE:
7762 return (qualifiers | 10);
7764 case VOID_TYPE:
7765 return (qualifiers | 16);
7767 case INTEGER_TYPE:
7768 /* If this is a range type, consider it to be the underlying
7769 type. */
7770 if (TREE_TYPE (type) != 0)
7771 break;
7773 /* Carefully distinguish all the standard types of C,
7774 without messing up if the language is not C. We do this by
7775 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7776 look at both the names and the above fields, but that's redundant.
7777 Any type whose size is between two C types will be considered
7778 to be the wider of the two types. Also, we do not have a
7779 special code to use for "long long", so anything wider than
7780 long is treated the same. Note that we can't distinguish
7781 between "int" and "long" in this code if they are the same
7782 size, but that's fine, since neither can the assembler. */
7784 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7785 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7787 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7788 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7790 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7791 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7793 else
7794 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7796 case REAL_TYPE:
7797 /* If this is a range type, consider it to be the underlying
7798 type. */
7799 if (TREE_TYPE (type) != 0)
7800 break;
7802 /* Carefully distinguish all the standard types of C,
7803 without messing up if the language is not C. */
7805 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7806 return (qualifiers | 6);
7808 else
7809 return (qualifiers | 7);
7811 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7812 /* ??? We need to distinguish between double and float complex types,
7813 but I don't know how yet because I can't reach this code from
7814 existing front-ends. */
7815 return (qualifiers | 7); /* Who knows? */
7817 case VECTOR_TYPE:
7818 case BOOLEAN_TYPE: /* Boolean truth value type. */
7819 case LANG_TYPE:
7820 case NULLPTR_TYPE:
7821 return qualifiers;
7823 default:
7824 gcc_unreachable (); /* Not a type! */
7828 return qualifiers;
7831 /* Nested function support. */
7833 /* Emit RTL insns to initialize the variable parts of a trampoline.
7834 FNADDR is an RTX for the address of the function's pure code.
7835 CXT is an RTX for the static chain value for the function.
7837 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7838 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7839 (to store insns). This is a bit excessive. Perhaps a different
7840 mechanism would be better here.
7842 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7844 static void
7845 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7847 /* SPARC 32-bit trampoline:
7849 sethi %hi(fn), %g1
7850 sethi %hi(static), %g2
7851 jmp %g1+%lo(fn)
7852 or %g2, %lo(static), %g2
7854 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7855 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7858 emit_move_insn
7859 (adjust_address (m_tramp, SImode, 0),
7860 expand_binop (SImode, ior_optab,
7861 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7862 size_int (10), 0, 1),
7863 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7864 NULL_RTX, 1, OPTAB_DIRECT));
7866 emit_move_insn
7867 (adjust_address (m_tramp, SImode, 4),
7868 expand_binop (SImode, ior_optab,
7869 expand_shift (RSHIFT_EXPR, SImode, cxt,
7870 size_int (10), 0, 1),
7871 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7872 NULL_RTX, 1, OPTAB_DIRECT));
7874 emit_move_insn
7875 (adjust_address (m_tramp, SImode, 8),
7876 expand_binop (SImode, ior_optab,
7877 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7878 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7879 NULL_RTX, 1, OPTAB_DIRECT));
7881 emit_move_insn
7882 (adjust_address (m_tramp, SImode, 12),
7883 expand_binop (SImode, ior_optab,
7884 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7885 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7886 NULL_RTX, 1, OPTAB_DIRECT));
7888 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7889 aligned on a 16 byte boundary so one flush clears it all. */
7890 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7891 if (sparc_cpu != PROCESSOR_ULTRASPARC
7892 && sparc_cpu != PROCESSOR_ULTRASPARC3
7893 && sparc_cpu != PROCESSOR_NIAGARA
7894 && sparc_cpu != PROCESSOR_NIAGARA2)
7895 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7897 /* Call __enable_execute_stack after writing onto the stack to make sure
7898 the stack address is accessible. */
7899 #ifdef ENABLE_EXECUTE_STACK
7900 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7901 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7902 #endif
7906 /* The 64-bit version is simpler because it makes more sense to load the
7907 values as "immediate" data out of the trampoline. It's also easier since
7908 we can read the PC without clobbering a register. */
7910 static void
7911 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7913 /* SPARC 64-bit trampoline:
7915 rd %pc, %g1
7916 ldx [%g1+24], %g5
7917 jmp %g5
7918 ldx [%g1+16], %g5
7919 +16 bytes data
7922 emit_move_insn (adjust_address (m_tramp, SImode, 0),
7923 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7924 emit_move_insn (adjust_address (m_tramp, SImode, 4),
7925 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7926 emit_move_insn (adjust_address (m_tramp, SImode, 8),
7927 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7928 emit_move_insn (adjust_address (m_tramp, SImode, 12),
7929 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7930 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
7931 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
7932 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
7934 if (sparc_cpu != PROCESSOR_ULTRASPARC
7935 && sparc_cpu != PROCESSOR_ULTRASPARC3
7936 && sparc_cpu != PROCESSOR_NIAGARA
7937 && sparc_cpu != PROCESSOR_NIAGARA2)
7938 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
7940 /* Call __enable_execute_stack after writing onto the stack to make sure
7941 the stack address is accessible. */
7942 #ifdef ENABLE_EXECUTE_STACK
7943 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7944 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7945 #endif
7948 /* Worker for TARGET_TRAMPOLINE_INIT. */
7950 static void
7951 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
7953 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
7954 cxt = force_reg (Pmode, cxt);
7955 if (TARGET_ARCH64)
7956 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
7957 else
7958 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
7961 /* Adjust the cost of a scheduling dependency. Return the new cost of
7962 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7964 static int
7965 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7967 enum attr_type insn_type;
7969 if (! recog_memoized (insn))
7970 return 0;
7972 insn_type = get_attr_type (insn);
7974 if (REG_NOTE_KIND (link) == 0)
7976 /* Data dependency; DEP_INSN writes a register that INSN reads some
7977 cycles later. */
7979 /* if a load, then the dependence must be on the memory address;
7980 add an extra "cycle". Note that the cost could be two cycles
7981 if the reg was written late in an instruction group; we ca not tell
7982 here. */
7983 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7984 return cost + 3;
7986 /* Get the delay only if the address of the store is the dependence. */
7987 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7989 rtx pat = PATTERN(insn);
7990 rtx dep_pat = PATTERN (dep_insn);
7992 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7993 return cost; /* This should not happen! */
7995 /* The dependency between the two instructions was on the data that
7996 is being stored. Assume that this implies that the address of the
7997 store is not dependent. */
7998 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7999 return cost;
8001 return cost + 3; /* An approximation. */
8004 /* A shift instruction cannot receive its data from an instruction
8005 in the same cycle; add a one cycle penalty. */
8006 if (insn_type == TYPE_SHIFT)
8007 return cost + 3; /* Split before cascade into shift. */
8009 else
8011 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8012 INSN writes some cycles later. */
8014 /* These are only significant for the fpu unit; writing a fp reg before
8015 the fpu has finished with it stalls the processor. */
8017 /* Reusing an integer register causes no problems. */
8018 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8019 return 0;
8022 return cost;
8025 static int
8026 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8028 enum attr_type insn_type, dep_type;
8029 rtx pat = PATTERN(insn);
8030 rtx dep_pat = PATTERN (dep_insn);
8032 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8033 return cost;
8035 insn_type = get_attr_type (insn);
8036 dep_type = get_attr_type (dep_insn);
8038 switch (REG_NOTE_KIND (link))
8040 case 0:
8041 /* Data dependency; DEP_INSN writes a register that INSN reads some
8042 cycles later. */
8044 switch (insn_type)
8046 case TYPE_STORE:
8047 case TYPE_FPSTORE:
8048 /* Get the delay iff the address of the store is the dependence. */
8049 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8050 return cost;
8052 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8053 return cost;
8054 return cost + 3;
8056 case TYPE_LOAD:
8057 case TYPE_SLOAD:
8058 case TYPE_FPLOAD:
8059 /* If a load, then the dependence must be on the memory address. If
8060 the addresses aren't equal, then it might be a false dependency */
8061 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8063 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8064 || GET_CODE (SET_DEST (dep_pat)) != MEM
8065 || GET_CODE (SET_SRC (pat)) != MEM
8066 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8067 XEXP (SET_SRC (pat), 0)))
8068 return cost + 2;
8070 return cost + 8;
8072 break;
8074 case TYPE_BRANCH:
8075 /* Compare to branch latency is 0. There is no benefit from
8076 separating compare and branch. */
8077 if (dep_type == TYPE_COMPARE)
8078 return 0;
8079 /* Floating point compare to branch latency is less than
8080 compare to conditional move. */
8081 if (dep_type == TYPE_FPCMP)
8082 return cost - 1;
8083 break;
8084 default:
8085 break;
8087 break;
8089 case REG_DEP_ANTI:
8090 /* Anti-dependencies only penalize the fpu unit. */
8091 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8092 return 0;
8093 break;
8095 default:
8096 break;
8099 return cost;
8102 static int
8103 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8105 switch (sparc_cpu)
8107 case PROCESSOR_SUPERSPARC:
8108 cost = supersparc_adjust_cost (insn, link, dep, cost);
8109 break;
8110 case PROCESSOR_HYPERSPARC:
8111 case PROCESSOR_SPARCLITE86X:
8112 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8113 break;
8114 default:
8115 break;
8117 return cost;
8120 static void
8121 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8122 int sched_verbose ATTRIBUTE_UNUSED,
8123 int max_ready ATTRIBUTE_UNUSED)
8126 static int
8127 sparc_use_sched_lookahead (void)
8129 if (sparc_cpu == PROCESSOR_NIAGARA
8130 || sparc_cpu == PROCESSOR_NIAGARA2)
8131 return 0;
8132 if (sparc_cpu == PROCESSOR_ULTRASPARC
8133 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8134 return 4;
8135 if ((1 << sparc_cpu) &
8136 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8137 (1 << PROCESSOR_SPARCLITE86X)))
8138 return 3;
8139 return 0;
8142 static int
8143 sparc_issue_rate (void)
8145 switch (sparc_cpu)
8147 case PROCESSOR_NIAGARA:
8148 case PROCESSOR_NIAGARA2:
8149 default:
8150 return 1;
8151 case PROCESSOR_V9:
8152 /* Assume V9 processors are capable of at least dual-issue. */
8153 return 2;
8154 case PROCESSOR_SUPERSPARC:
8155 return 3;
8156 case PROCESSOR_HYPERSPARC:
8157 case PROCESSOR_SPARCLITE86X:
8158 return 2;
8159 case PROCESSOR_ULTRASPARC:
8160 case PROCESSOR_ULTRASPARC3:
8161 return 4;
8165 static int
8166 set_extends (rtx insn)
8168 register rtx pat = PATTERN (insn);
8170 switch (GET_CODE (SET_SRC (pat)))
8172 /* Load and some shift instructions zero extend. */
8173 case MEM:
8174 case ZERO_EXTEND:
8175 /* sethi clears the high bits */
8176 case HIGH:
8177 /* LO_SUM is used with sethi. sethi cleared the high
8178 bits and the values used with lo_sum are positive */
8179 case LO_SUM:
8180 /* Store flag stores 0 or 1 */
8181 case LT: case LTU:
8182 case GT: case GTU:
8183 case LE: case LEU:
8184 case GE: case GEU:
8185 case EQ:
8186 case NE:
8187 return 1;
8188 case AND:
8190 rtx op0 = XEXP (SET_SRC (pat), 0);
8191 rtx op1 = XEXP (SET_SRC (pat), 1);
8192 if (GET_CODE (op1) == CONST_INT)
8193 return INTVAL (op1) >= 0;
8194 if (GET_CODE (op0) != REG)
8195 return 0;
8196 if (sparc_check_64 (op0, insn) == 1)
8197 return 1;
8198 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8200 case IOR:
8201 case XOR:
8203 rtx op0 = XEXP (SET_SRC (pat), 0);
8204 rtx op1 = XEXP (SET_SRC (pat), 1);
8205 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8206 return 0;
8207 if (GET_CODE (op1) == CONST_INT)
8208 return INTVAL (op1) >= 0;
8209 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8211 case LSHIFTRT:
8212 return GET_MODE (SET_SRC (pat)) == SImode;
8213 /* Positive integers leave the high bits zero. */
8214 case CONST_DOUBLE:
8215 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8216 case CONST_INT:
8217 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8218 case ASHIFTRT:
8219 case SIGN_EXTEND:
8220 return - (GET_MODE (SET_SRC (pat)) == SImode);
8221 case REG:
8222 return sparc_check_64 (SET_SRC (pat), insn);
8223 default:
8224 return 0;
8228 /* We _ought_ to have only one kind per function, but... */
8229 static GTY(()) rtx sparc_addr_diff_list;
8230 static GTY(()) rtx sparc_addr_list;
8232 void
8233 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8235 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8236 if (diff)
8237 sparc_addr_diff_list
8238 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8239 else
8240 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8243 static void
8244 sparc_output_addr_vec (rtx vec)
8246 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8247 int idx, vlen = XVECLEN (body, 0);
8249 #ifdef ASM_OUTPUT_ADDR_VEC_START
8250 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8251 #endif
8253 #ifdef ASM_OUTPUT_CASE_LABEL
8254 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8255 NEXT_INSN (lab));
8256 #else
8257 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8258 #endif
8260 for (idx = 0; idx < vlen; idx++)
8262 ASM_OUTPUT_ADDR_VEC_ELT
8263 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8266 #ifdef ASM_OUTPUT_ADDR_VEC_END
8267 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8268 #endif
8271 static void
8272 sparc_output_addr_diff_vec (rtx vec)
8274 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8275 rtx base = XEXP (XEXP (body, 0), 0);
8276 int idx, vlen = XVECLEN (body, 1);
8278 #ifdef ASM_OUTPUT_ADDR_VEC_START
8279 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8280 #endif
8282 #ifdef ASM_OUTPUT_CASE_LABEL
8283 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8284 NEXT_INSN (lab));
8285 #else
8286 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8287 #endif
8289 for (idx = 0; idx < vlen; idx++)
8291 ASM_OUTPUT_ADDR_DIFF_ELT
8292 (asm_out_file,
8293 body,
8294 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8295 CODE_LABEL_NUMBER (base));
8298 #ifdef ASM_OUTPUT_ADDR_VEC_END
8299 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8300 #endif
8303 static void
8304 sparc_output_deferred_case_vectors (void)
8306 rtx t;
8307 int align;
8309 if (sparc_addr_list == NULL_RTX
8310 && sparc_addr_diff_list == NULL_RTX)
8311 return;
8313 /* Align to cache line in the function's code section. */
8314 switch_to_section (current_function_section ());
8316 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8317 if (align > 0)
8318 ASM_OUTPUT_ALIGN (asm_out_file, align);
8320 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8321 sparc_output_addr_vec (XEXP (t, 0));
8322 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8323 sparc_output_addr_diff_vec (XEXP (t, 0));
8325 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8328 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8329 unknown. Return 1 if the high bits are zero, -1 if the register is
8330 sign extended. */
8332 sparc_check_64 (rtx x, rtx insn)
8334 /* If a register is set only once it is safe to ignore insns this
8335 code does not know how to handle. The loop will either recognize
8336 the single set and return the correct value or fail to recognize
8337 it and return 0. */
8338 int set_once = 0;
8339 rtx y = x;
8341 gcc_assert (GET_CODE (x) == REG);
8343 if (GET_MODE (x) == DImode)
8344 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8346 if (flag_expensive_optimizations
8347 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8348 set_once = 1;
8350 if (insn == 0)
8352 if (set_once)
8353 insn = get_last_insn_anywhere ();
8354 else
8355 return 0;
8358 while ((insn = PREV_INSN (insn)))
8360 switch (GET_CODE (insn))
8362 case JUMP_INSN:
8363 case NOTE:
8364 break;
8365 case CODE_LABEL:
8366 case CALL_INSN:
8367 default:
8368 if (! set_once)
8369 return 0;
8370 break;
8371 case INSN:
8373 rtx pat = PATTERN (insn);
8374 if (GET_CODE (pat) != SET)
8375 return 0;
8376 if (rtx_equal_p (x, SET_DEST (pat)))
8377 return set_extends (insn);
8378 if (y && rtx_equal_p (y, SET_DEST (pat)))
8379 return set_extends (insn);
8380 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8381 return 0;
8385 return 0;
8388 /* Returns assembly code to perform a DImode shift using
8389 a 64-bit global or out register on SPARC-V8+. */
8390 const char *
8391 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8393 static char asm_code[60];
8395 /* The scratch register is only required when the destination
8396 register is not a 64-bit global or out register. */
8397 if (which_alternative != 2)
8398 operands[3] = operands[0];
8400 /* We can only shift by constants <= 63. */
8401 if (GET_CODE (operands[2]) == CONST_INT)
8402 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8404 if (GET_CODE (operands[1]) == CONST_INT)
8406 output_asm_insn ("mov\t%1, %3", operands);
8408 else
8410 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8411 if (sparc_check_64 (operands[1], insn) <= 0)
8412 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8413 output_asm_insn ("or\t%L1, %3, %3", operands);
8416 strcpy(asm_code, opcode);
8418 if (which_alternative != 2)
8419 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8420 else
8421 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8424 /* Output rtl to increment the profiler label LABELNO
8425 for profiling a function entry. */
8427 void
8428 sparc_profile_hook (int labelno)
8430 char buf[32];
8431 rtx lab, fun;
8433 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8434 if (NO_PROFILE_COUNTERS)
8436 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8438 else
8440 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8441 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8442 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8446 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
8448 static void
8449 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
8450 tree decl ATTRIBUTE_UNUSED)
8452 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8454 if (!(flags & SECTION_DEBUG))
8455 fputs (",#alloc", asm_out_file);
8456 if (flags & SECTION_WRITE)
8457 fputs (",#write", asm_out_file);
8458 if (flags & SECTION_TLS)
8459 fputs (",#tls", asm_out_file);
8460 if (flags & SECTION_CODE)
8461 fputs (",#execinstr", asm_out_file);
8463 /* ??? Handle SECTION_BSS. */
8465 fputc ('\n', asm_out_file);
8468 /* We do not allow indirect calls to be optimized into sibling calls.
8470 We cannot use sibling calls when delayed branches are disabled
8471 because they will likely require the call delay slot to be filled.
8473 Also, on SPARC 32-bit we cannot emit a sibling call when the
8474 current function returns a structure. This is because the "unimp
8475 after call" convention would cause the callee to return to the
8476 wrong place. The generic code already disallows cases where the
8477 function being called returns a structure.
8479 It may seem strange how this last case could occur. Usually there
8480 is code after the call which jumps to epilogue code which dumps the
8481 return value into the struct return area. That ought to invalidate
8482 the sibling call right? Well, in the C++ case we can end up passing
8483 the pointer to the struct return area to a constructor (which returns
8484 void) and then nothing else happens. Such a sibling call would look
8485 valid without the added check here.
8487 VxWorks PIC PLT entries require the global pointer to be initialized
8488 on entry. We therefore can't emit sibling calls to them. */
8489 static bool
8490 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8492 return (decl
8493 && flag_delayed_branch
8494 && (TARGET_ARCH64 || ! cfun->returns_struct)
8495 && !(TARGET_VXWORKS_RTP
8496 && flag_pic
8497 && !targetm.binds_local_p (decl)));
8500 /* libfunc renaming. */
8502 static void
8503 sparc_init_libfuncs (void)
8505 if (TARGET_ARCH32)
8507 /* Use the subroutines that Sun's library provides for integer
8508 multiply and divide. The `*' prevents an underscore from
8509 being prepended by the compiler. .umul is a little faster
8510 than .mul. */
8511 set_optab_libfunc (smul_optab, SImode, "*.umul");
8512 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8513 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8514 set_optab_libfunc (smod_optab, SImode, "*.rem");
8515 set_optab_libfunc (umod_optab, SImode, "*.urem");
8517 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8518 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8519 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8520 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8521 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8522 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8524 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8525 is because with soft-float, the SFmode and DFmode sqrt
8526 instructions will be absent, and the compiler will notice and
8527 try to use the TFmode sqrt instruction for calls to the
8528 builtin function sqrt, but this fails. */
8529 if (TARGET_FPU)
8530 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8532 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8533 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8534 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8535 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8536 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8537 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8539 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8540 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8541 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8542 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8544 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8545 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8546 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8547 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8549 if (DITF_CONVERSION_LIBFUNCS)
8551 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8552 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8553 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8554 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8557 if (SUN_CONVERSION_LIBFUNCS)
8559 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8560 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8561 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8562 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8565 if (TARGET_ARCH64)
8567 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8568 do not exist in the library. Make sure the compiler does not
8569 emit calls to them by accident. (It should always use the
8570 hardware instructions.) */
8571 set_optab_libfunc (smul_optab, SImode, 0);
8572 set_optab_libfunc (sdiv_optab, SImode, 0);
8573 set_optab_libfunc (udiv_optab, SImode, 0);
8574 set_optab_libfunc (smod_optab, SImode, 0);
8575 set_optab_libfunc (umod_optab, SImode, 0);
8577 if (SUN_INTEGER_MULTIPLY_64)
8579 set_optab_libfunc (smul_optab, DImode, "__mul64");
8580 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8581 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8582 set_optab_libfunc (smod_optab, DImode, "__rem64");
8583 set_optab_libfunc (umod_optab, DImode, "__urem64");
8586 if (SUN_CONVERSION_LIBFUNCS)
8588 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8589 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8590 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8591 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8596 #define def_builtin(NAME, CODE, TYPE) \
8597 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8598 NULL_TREE)
8600 /* Implement the TARGET_INIT_BUILTINS target hook.
8601 Create builtin functions for special SPARC instructions. */
8603 static void
8604 sparc_init_builtins (void)
8606 if (TARGET_VIS)
8607 sparc_vis_init_builtins ();
8610 /* Create builtin functions for VIS 1.0 instructions. */
8612 static void
8613 sparc_vis_init_builtins (void)
8615 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8616 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8617 tree v4hi = build_vector_type (intHI_type_node, 4);
8618 tree v2hi = build_vector_type (intHI_type_node, 2);
8619 tree v2si = build_vector_type (intSI_type_node, 2);
8621 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8622 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8623 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8624 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8625 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8626 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8627 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8628 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8629 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8630 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8631 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8632 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8633 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8634 v8qi, v8qi,
8635 intDI_type_node, 0);
8636 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8637 intDI_type_node,
8638 intDI_type_node, 0);
8639 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8640 ptr_type_node,
8641 intSI_type_node, 0);
8642 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8643 ptr_type_node,
8644 intDI_type_node, 0);
8646 /* Packing and expanding vectors. */
8647 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8648 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8649 v8qi_ftype_v2si_v8qi);
8650 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8651 v2hi_ftype_v2si);
8652 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8653 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8654 v8qi_ftype_v4qi_v4qi);
8656 /* Multiplications. */
8657 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8658 v4hi_ftype_v4qi_v4hi);
8659 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8660 v4hi_ftype_v4qi_v2hi);
8661 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8662 v4hi_ftype_v4qi_v2hi);
8663 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8664 v4hi_ftype_v8qi_v4hi);
8665 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8666 v4hi_ftype_v8qi_v4hi);
8667 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8668 v2si_ftype_v4qi_v2hi);
8669 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8670 v2si_ftype_v4qi_v2hi);
8672 /* Data aligning. */
8673 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8674 v4hi_ftype_v4hi_v4hi);
8675 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8676 v8qi_ftype_v8qi_v8qi);
8677 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8678 v2si_ftype_v2si_v2si);
8679 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8680 di_ftype_di_di);
8681 if (TARGET_ARCH64)
8682 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8683 ptr_ftype_ptr_di);
8684 else
8685 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8686 ptr_ftype_ptr_si);
8688 /* Pixel distance. */
8689 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8690 di_ftype_v8qi_v8qi_di);
8693 /* Handle TARGET_EXPAND_BUILTIN target hook.
8694 Expand builtin functions for sparc intrinsics. */
8696 static rtx
8697 sparc_expand_builtin (tree exp, rtx target,
8698 rtx subtarget ATTRIBUTE_UNUSED,
8699 enum machine_mode tmode ATTRIBUTE_UNUSED,
8700 int ignore ATTRIBUTE_UNUSED)
8702 tree arg;
8703 call_expr_arg_iterator iter;
8704 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8705 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8706 rtx pat, op[4];
8707 enum machine_mode mode[4];
8708 int arg_count = 0;
8710 mode[0] = insn_data[icode].operand[0].mode;
8711 if (!target
8712 || GET_MODE (target) != mode[0]
8713 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8714 op[0] = gen_reg_rtx (mode[0]);
8715 else
8716 op[0] = target;
8718 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8720 arg_count++;
8721 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8722 op[arg_count] = expand_normal (arg);
8724 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8725 mode[arg_count]))
8726 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8729 switch (arg_count)
8731 case 1:
8732 pat = GEN_FCN (icode) (op[0], op[1]);
8733 break;
8734 case 2:
8735 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8736 break;
8737 case 3:
8738 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8739 break;
8740 default:
8741 gcc_unreachable ();
8744 if (!pat)
8745 return NULL_RTX;
8747 emit_insn (pat);
8749 return op[0];
8752 static int
8753 sparc_vis_mul8x16 (int e8, int e16)
8755 return (e8 * e16 + 128) / 256;
8758 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8759 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8760 constants. A tree list with the results of the multiplications is returned,
8761 and each element in the list is of INNER_TYPE. */
8763 static tree
8764 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8766 tree n_elts = NULL_TREE;
8767 int scale;
8769 switch (fncode)
8771 case CODE_FOR_fmul8x16_vis:
8772 for (; elts0 && elts1;
8773 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8775 int val
8776 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8777 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8778 n_elts = tree_cons (NULL_TREE,
8779 build_int_cst (inner_type, val),
8780 n_elts);
8782 break;
8784 case CODE_FOR_fmul8x16au_vis:
8785 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8787 for (; elts0; elts0 = TREE_CHAIN (elts0))
8789 int val
8790 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8791 scale);
8792 n_elts = tree_cons (NULL_TREE,
8793 build_int_cst (inner_type, val),
8794 n_elts);
8796 break;
8798 case CODE_FOR_fmul8x16al_vis:
8799 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8801 for (; elts0; elts0 = TREE_CHAIN (elts0))
8803 int val
8804 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8805 scale);
8806 n_elts = tree_cons (NULL_TREE,
8807 build_int_cst (inner_type, val),
8808 n_elts);
8810 break;
8812 default:
8813 gcc_unreachable ();
8816 return nreverse (n_elts);
8819 /* Handle TARGET_FOLD_BUILTIN target hook.
8820 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8821 result of the function call is ignored. NULL_TREE is returned if the
8822 function could not be folded. */
8824 static tree
8825 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
8826 tree *args, bool ignore)
8828 tree arg0, arg1, arg2;
8829 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8830 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8832 if (ignore
8833 && icode != CODE_FOR_alignaddrsi_vis
8834 && icode != CODE_FOR_alignaddrdi_vis)
8835 return build_zero_cst (rtype);
8837 switch (icode)
8839 case CODE_FOR_fexpand_vis:
8840 arg0 = args[0];
8841 STRIP_NOPS (arg0);
8843 if (TREE_CODE (arg0) == VECTOR_CST)
8845 tree inner_type = TREE_TYPE (rtype);
8846 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8847 tree n_elts = NULL_TREE;
8849 for (; elts; elts = TREE_CHAIN (elts))
8851 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8852 n_elts = tree_cons (NULL_TREE,
8853 build_int_cst (inner_type, val),
8854 n_elts);
8856 return build_vector (rtype, nreverse (n_elts));
8858 break;
8860 case CODE_FOR_fmul8x16_vis:
8861 case CODE_FOR_fmul8x16au_vis:
8862 case CODE_FOR_fmul8x16al_vis:
8863 arg0 = args[0];
8864 arg1 = args[1];
8865 STRIP_NOPS (arg0);
8866 STRIP_NOPS (arg1);
8868 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8870 tree inner_type = TREE_TYPE (rtype);
8871 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8872 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8873 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8874 elts1);
8876 return build_vector (rtype, n_elts);
8878 break;
8880 case CODE_FOR_fpmerge_vis:
8881 arg0 = args[0];
8882 arg1 = args[1];
8883 STRIP_NOPS (arg0);
8884 STRIP_NOPS (arg1);
8886 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8888 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8889 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8890 tree n_elts = NULL_TREE;
8892 for (; elts0 && elts1;
8893 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8895 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8896 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8899 return build_vector (rtype, nreverse (n_elts));
8901 break;
8903 case CODE_FOR_pdist_vis:
8904 arg0 = args[0];
8905 arg1 = args[1];
8906 arg2 = args[2];
8907 STRIP_NOPS (arg0);
8908 STRIP_NOPS (arg1);
8909 STRIP_NOPS (arg2);
8911 if (TREE_CODE (arg0) == VECTOR_CST
8912 && TREE_CODE (arg1) == VECTOR_CST
8913 && TREE_CODE (arg2) == INTEGER_CST)
8915 int overflow = 0;
8916 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8917 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8918 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8919 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8921 for (; elts0 && elts1;
8922 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8924 unsigned HOST_WIDE_INT
8925 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8926 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8927 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8928 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8930 unsigned HOST_WIDE_INT l;
8931 HOST_WIDE_INT h;
8933 overflow |= neg_double (low1, high1, &l, &h);
8934 overflow |= add_double (low0, high0, l, h, &l, &h);
8935 if (h < 0)
8936 overflow |= neg_double (l, h, &l, &h);
8938 overflow |= add_double (low, high, l, h, &low, &high);
8941 gcc_assert (overflow == 0);
8943 return build_int_cst_wide (rtype, low, high);
8946 default:
8947 break;
8950 return NULL_TREE;
8953 /* ??? This duplicates information provided to the compiler by the
8954 ??? scheduler description. Some day, teach genautomata to output
8955 ??? the latencies and then CSE will just use that. */
8957 static bool
8958 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8959 bool speed ATTRIBUTE_UNUSED)
8961 enum machine_mode mode = GET_MODE (x);
8962 bool float_mode_p = FLOAT_MODE_P (mode);
8964 switch (code)
8966 case CONST_INT:
8967 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8969 *total = 0;
8970 return true;
8972 /* FALLTHRU */
8974 case HIGH:
8975 *total = 2;
8976 return true;
8978 case CONST:
8979 case LABEL_REF:
8980 case SYMBOL_REF:
8981 *total = 4;
8982 return true;
8984 case CONST_DOUBLE:
8985 if (GET_MODE (x) == VOIDmode
8986 && ((CONST_DOUBLE_HIGH (x) == 0
8987 && CONST_DOUBLE_LOW (x) < 0x1000)
8988 || (CONST_DOUBLE_HIGH (x) == -1
8989 && CONST_DOUBLE_LOW (x) < 0
8990 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8991 *total = 0;
8992 else
8993 *total = 8;
8994 return true;
8996 case MEM:
8997 /* If outer-code was a sign or zero extension, a cost
8998 of COSTS_N_INSNS (1) was already added in. This is
8999 why we are subtracting it back out. */
9000 if (outer_code == ZERO_EXTEND)
9002 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
9004 else if (outer_code == SIGN_EXTEND)
9006 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
9008 else if (float_mode_p)
9010 *total = sparc_costs->float_load;
9012 else
9014 *total = sparc_costs->int_load;
9017 return true;
9019 case PLUS:
9020 case MINUS:
9021 if (float_mode_p)
9022 *total = sparc_costs->float_plusminus;
9023 else
9024 *total = COSTS_N_INSNS (1);
9025 return false;
9027 case MULT:
9028 if (float_mode_p)
9029 *total = sparc_costs->float_mul;
9030 else if (! TARGET_HARD_MUL)
9031 *total = COSTS_N_INSNS (25);
9032 else
9034 int bit_cost;
9036 bit_cost = 0;
9037 if (sparc_costs->int_mul_bit_factor)
9039 int nbits;
9041 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
9043 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
9044 for (nbits = 0; value != 0; value &= value - 1)
9045 nbits++;
9047 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
9048 && GET_MODE (XEXP (x, 1)) == VOIDmode)
9050 rtx x1 = XEXP (x, 1);
9051 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
9052 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
9054 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
9055 nbits++;
9056 for (; value2 != 0; value2 &= value2 - 1)
9057 nbits++;
9059 else
9060 nbits = 7;
9062 if (nbits < 3)
9063 nbits = 3;
9064 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
9065 bit_cost = COSTS_N_INSNS (bit_cost);
9068 if (mode == DImode)
9069 *total = sparc_costs->int_mulX + bit_cost;
9070 else
9071 *total = sparc_costs->int_mul + bit_cost;
9073 return false;
9075 case ASHIFT:
9076 case ASHIFTRT:
9077 case LSHIFTRT:
9078 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
9079 return false;
9081 case DIV:
9082 case UDIV:
9083 case MOD:
9084 case UMOD:
9085 if (float_mode_p)
9087 if (mode == DFmode)
9088 *total = sparc_costs->float_div_df;
9089 else
9090 *total = sparc_costs->float_div_sf;
9092 else
9094 if (mode == DImode)
9095 *total = sparc_costs->int_divX;
9096 else
9097 *total = sparc_costs->int_div;
9099 return false;
9101 case NEG:
9102 if (! float_mode_p)
9104 *total = COSTS_N_INSNS (1);
9105 return false;
9107 /* FALLTHRU */
9109 case ABS:
9110 case FLOAT:
9111 case UNSIGNED_FLOAT:
9112 case FIX:
9113 case UNSIGNED_FIX:
9114 case FLOAT_EXTEND:
9115 case FLOAT_TRUNCATE:
9116 *total = sparc_costs->float_move;
9117 return false;
9119 case SQRT:
9120 if (mode == DFmode)
9121 *total = sparc_costs->float_sqrt_df;
9122 else
9123 *total = sparc_costs->float_sqrt_sf;
9124 return false;
9126 case COMPARE:
9127 if (float_mode_p)
9128 *total = sparc_costs->float_cmp;
9129 else
9130 *total = COSTS_N_INSNS (1);
9131 return false;
9133 case IF_THEN_ELSE:
9134 if (float_mode_p)
9135 *total = sparc_costs->float_cmove;
9136 else
9137 *total = sparc_costs->int_cmove;
9138 return false;
9140 case IOR:
9141 /* Handle the NAND vector patterns. */
9142 if (sparc_vector_mode_supported_p (GET_MODE (x))
9143 && GET_CODE (XEXP (x, 0)) == NOT
9144 && GET_CODE (XEXP (x, 1)) == NOT)
9146 *total = COSTS_N_INSNS (1);
9147 return true;
9149 else
9150 return false;
9152 default:
9153 return false;
9157 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
9158 This is achieved by means of a manual dynamic stack space allocation in
9159 the current frame. We make the assumption that SEQ doesn't contain any
9160 function calls, with the possible exception of calls to the PIC helper. */
9162 static void
9163 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
9165 /* We must preserve the lowest 16 words for the register save area. */
9166 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
9167 /* We really need only 2 words of fresh stack space. */
9168 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
9170 rtx slot
9171 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
9172 SPARC_STACK_BIAS + offset));
9174 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
9175 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
9176 if (reg2)
9177 emit_insn (gen_rtx_SET (VOIDmode,
9178 adjust_address (slot, word_mode, UNITS_PER_WORD),
9179 reg2));
9180 emit_insn (seq);
9181 if (reg2)
9182 emit_insn (gen_rtx_SET (VOIDmode,
9183 reg2,
9184 adjust_address (slot, word_mode, UNITS_PER_WORD)));
9185 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
9186 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
9189 /* Output the assembler code for a thunk function. THUNK_DECL is the
9190 declaration for the thunk function itself, FUNCTION is the decl for
9191 the target function. DELTA is an immediate constant offset to be
9192 added to THIS. If VCALL_OFFSET is nonzero, the word at address
9193 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
9195 static void
9196 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
9197 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9198 tree function)
9200 rtx this_rtx, insn, funexp;
9201 unsigned int int_arg_first;
9203 reload_completed = 1;
9204 epilogue_completed = 1;
9206 emit_note (NOTE_INSN_PROLOGUE_END);
9208 if (flag_delayed_branch)
9210 /* We will emit a regular sibcall below, so we need to instruct
9211 output_sibcall that we are in a leaf function. */
9212 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
9214 /* This will cause final.c to invoke leaf_renumber_regs so we
9215 must behave as if we were in a not-yet-leafified function. */
9216 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
9218 else
9220 /* We will emit the sibcall manually below, so we will need to
9221 manually spill non-leaf registers. */
9222 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
9224 /* We really are in a leaf function. */
9225 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9228 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
9229 returns a structure, the structure return pointer is there instead. */
9230 if (TARGET_ARCH64
9231 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9232 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
9233 else
9234 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
9236 /* Add DELTA. When possible use a plain add, otherwise load it into
9237 a register first. */
9238 if (delta)
9240 rtx delta_rtx = GEN_INT (delta);
9242 if (! SPARC_SIMM13_P (delta))
9244 rtx scratch = gen_rtx_REG (Pmode, 1);
9245 emit_move_insn (scratch, delta_rtx);
9246 delta_rtx = scratch;
9249 /* THIS_RTX += DELTA. */
9250 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
9253 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
9254 if (vcall_offset)
9256 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9257 rtx scratch = gen_rtx_REG (Pmode, 1);
9259 gcc_assert (vcall_offset < 0);
9261 /* SCRATCH = *THIS_RTX. */
9262 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
9264 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
9265 may not have any available scratch register at this point. */
9266 if (SPARC_SIMM13_P (vcall_offset))
9268 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
9269 else if (! fixed_regs[5]
9270 /* The below sequence is made up of at least 2 insns,
9271 while the default method may need only one. */
9272 && vcall_offset < -8192)
9274 rtx scratch2 = gen_rtx_REG (Pmode, 5);
9275 emit_move_insn (scratch2, vcall_offset_rtx);
9276 vcall_offset_rtx = scratch2;
9278 else
9280 rtx increment = GEN_INT (-4096);
9282 /* VCALL_OFFSET is a negative number whose typical range can be
9283 estimated as -32768..0 in 32-bit mode. In almost all cases
9284 it is therefore cheaper to emit multiple add insns than
9285 spilling and loading the constant into a register (at least
9286 6 insns). */
9287 while (! SPARC_SIMM13_P (vcall_offset))
9289 emit_insn (gen_add2_insn (scratch, increment));
9290 vcall_offset += 4096;
9292 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
9295 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
9296 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
9297 gen_rtx_PLUS (Pmode,
9298 scratch,
9299 vcall_offset_rtx)));
9301 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
9302 emit_insn (gen_add2_insn (this_rtx, scratch));
9305 /* Generate a tail call to the target function. */
9306 if (! TREE_USED (function))
9308 assemble_external (function);
9309 TREE_USED (function) = 1;
9311 funexp = XEXP (DECL_RTL (function), 0);
9313 if (flag_delayed_branch)
9315 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9316 insn = emit_call_insn (gen_sibcall (funexp));
9317 SIBLING_CALL_P (insn) = 1;
9319 else
9321 /* The hoops we have to jump through in order to generate a sibcall
9322 without using delay slots... */
9323 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
9325 if (flag_pic)
9327 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
9328 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
9329 start_sequence ();
9330 /* Delay emitting the PIC helper function because it needs to
9331 change the section and we are emitting assembly code. */
9332 load_pic_register (); /* clobbers %o7 */
9333 scratch = sparc_legitimize_pic_address (funexp, scratch);
9334 seq = get_insns ();
9335 end_sequence ();
9336 emit_and_preserve (seq, spill_reg, spill_reg2);
9338 else if (TARGET_ARCH32)
9340 emit_insn (gen_rtx_SET (VOIDmode,
9341 scratch,
9342 gen_rtx_HIGH (SImode, funexp)));
9343 emit_insn (gen_rtx_SET (VOIDmode,
9344 scratch,
9345 gen_rtx_LO_SUM (SImode, scratch, funexp)));
9347 else /* TARGET_ARCH64 */
9349 switch (sparc_cmodel)
9351 case CM_MEDLOW:
9352 case CM_MEDMID:
9353 /* The destination can serve as a temporary. */
9354 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
9355 break;
9357 case CM_MEDANY:
9358 case CM_EMBMEDANY:
9359 /* The destination cannot serve as a temporary. */
9360 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
9361 start_sequence ();
9362 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
9363 seq = get_insns ();
9364 end_sequence ();
9365 emit_and_preserve (seq, spill_reg, 0);
9366 break;
9368 default:
9369 gcc_unreachable ();
9373 emit_jump_insn (gen_indirect_jump (scratch));
9376 emit_barrier ();
9378 /* Run just enough of rest_of_compilation to get the insns emitted.
9379 There's not really enough bulk here to make other passes such as
9380 instruction scheduling worth while. Note that use_thunk calls
9381 assemble_start_function and assemble_end_function. */
9382 insn = get_insns ();
9383 insn_locators_alloc ();
9384 shorten_branches (insn);
9385 final_start_function (insn, file, 1);
9386 final (insn, file, 1);
9387 final_end_function ();
9389 reload_completed = 0;
9390 epilogue_completed = 0;
9393 /* Return true if sparc_output_mi_thunk would be able to output the
9394 assembler code for the thunk function specified by the arguments
9395 it is passed, and false otherwise. */
9396 static bool
9397 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
9398 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
9399 HOST_WIDE_INT vcall_offset,
9400 const_tree function ATTRIBUTE_UNUSED)
9402 /* Bound the loop used in the default method above. */
9403 return (vcall_offset >= -32768 || ! fixed_regs[5]);
9406 /* How to allocate a 'struct machine_function'. */
9408 static struct machine_function *
9409 sparc_init_machine_status (void)
9411 return ggc_alloc_cleared_machine_function ();
9414 /* Locate some local-dynamic symbol still in use by this function
9415 so that we can print its name in local-dynamic base patterns. */
9417 static const char *
9418 get_some_local_dynamic_name (void)
9420 rtx insn;
9422 if (cfun->machine->some_ld_name)
9423 return cfun->machine->some_ld_name;
9425 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
9426 if (INSN_P (insn)
9427 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9428 return cfun->machine->some_ld_name;
9430 gcc_unreachable ();
9433 static int
9434 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9436 rtx x = *px;
9438 if (x
9439 && GET_CODE (x) == SYMBOL_REF
9440 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9442 cfun->machine->some_ld_name = XSTR (x, 0);
9443 return 1;
9446 return 0;
9449 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
9450 This is called from dwarf2out.c to emit call frame instructions
9451 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9452 static void
9453 sparc_dwarf_handle_frame_unspec (const char *label,
9454 rtx pattern ATTRIBUTE_UNUSED,
9455 int index ATTRIBUTE_UNUSED)
9457 gcc_assert (index == UNSPECV_SAVEW);
9458 dwarf2out_window_save (label);
9461 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9462 We need to emit DTP-relative relocations. */
9464 static void
9465 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9467 switch (size)
9469 case 4:
9470 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9471 break;
9472 case 8:
9473 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9474 break;
9475 default:
9476 gcc_unreachable ();
9478 output_addr_const (file, x);
9479 fputs (")", file);
9482 /* Do whatever processing is required at the end of a file. */
9484 static void
9485 sparc_file_end (void)
9487 /* If need to emit the special PIC helper function, do so now. */
9488 if (pic_helper_needed)
9490 unsigned int regno = REGNO (pic_offset_table_rtx);
9491 const char *pic_name = reg_names[regno];
9492 char name[32];
9493 #ifdef DWARF2_UNWIND_INFO
9494 bool do_cfi;
9495 #endif
9497 get_pc_thunk_name (name, regno);
9498 if (USE_HIDDEN_LINKONCE)
9500 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
9501 get_identifier (name),
9502 build_function_type (void_type_node,
9503 void_list_node));
9504 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
9505 NULL_TREE, void_type_node);
9506 TREE_STATIC (decl) = 1;
9507 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
9508 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
9509 DECL_VISIBILITY_SPECIFIED (decl) = 1;
9510 allocate_struct_function (decl, true);
9511 cfun->is_thunk = 1;
9512 current_function_decl = decl;
9513 init_varasm_status ();
9514 assemble_start_function (decl, name);
9516 else
9518 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9519 switch_to_section (text_section);
9520 if (align > 0)
9521 ASM_OUTPUT_ALIGN (asm_out_file, align);
9522 ASM_OUTPUT_LABEL (asm_out_file, name);
9525 #ifdef DWARF2_UNWIND_INFO
9526 do_cfi = dwarf2out_do_cfi_asm ();
9527 if (do_cfi)
9528 fprintf (asm_out_file, "\t.cfi_startproc\n");
9529 #endif
9530 if (flag_delayed_branch)
9531 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
9532 pic_name, pic_name);
9533 else
9534 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
9535 pic_name, pic_name);
9536 #ifdef DWARF2_UNWIND_INFO
9537 if (do_cfi)
9538 fprintf (asm_out_file, "\t.cfi_endproc\n");
9539 #endif
9542 if (NEED_INDICATE_EXEC_STACK)
9543 file_end_indicate_exec_stack ();
9546 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9547 /* Implement TARGET_MANGLE_TYPE. */
9549 static const char *
9550 sparc_mangle_type (const_tree type)
9552 if (!TARGET_64BIT
9553 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9554 && TARGET_LONG_DOUBLE_128)
9555 return "g";
9557 /* For all other types, use normal C++ mangling. */
9558 return NULL;
9560 #endif
9562 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9563 compare and swap on the word containing the byte or half-word. */
9565 void
9566 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9568 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9569 rtx addr = gen_reg_rtx (Pmode);
9570 rtx off = gen_reg_rtx (SImode);
9571 rtx oldv = gen_reg_rtx (SImode);
9572 rtx newv = gen_reg_rtx (SImode);
9573 rtx oldvalue = gen_reg_rtx (SImode);
9574 rtx newvalue = gen_reg_rtx (SImode);
9575 rtx res = gen_reg_rtx (SImode);
9576 rtx resv = gen_reg_rtx (SImode);
9577 rtx memsi, val, mask, end_label, loop_label, cc;
9579 emit_insn (gen_rtx_SET (VOIDmode, addr,
9580 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9582 if (Pmode != SImode)
9583 addr1 = gen_lowpart (SImode, addr1);
9584 emit_insn (gen_rtx_SET (VOIDmode, off,
9585 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9587 memsi = gen_rtx_MEM (SImode, addr);
9588 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9589 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9591 val = force_reg (SImode, memsi);
9593 emit_insn (gen_rtx_SET (VOIDmode, off,
9594 gen_rtx_XOR (SImode, off,
9595 GEN_INT (GET_MODE (mem) == QImode
9596 ? 3 : 2))));
9598 emit_insn (gen_rtx_SET (VOIDmode, off,
9599 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9601 if (GET_MODE (mem) == QImode)
9602 mask = force_reg (SImode, GEN_INT (0xff));
9603 else
9604 mask = force_reg (SImode, GEN_INT (0xffff));
9606 emit_insn (gen_rtx_SET (VOIDmode, mask,
9607 gen_rtx_ASHIFT (SImode, mask, off)));
9609 emit_insn (gen_rtx_SET (VOIDmode, val,
9610 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9611 val)));
9613 oldval = gen_lowpart (SImode, oldval);
9614 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9615 gen_rtx_ASHIFT (SImode, oldval, off)));
9617 newval = gen_lowpart_common (SImode, newval);
9618 emit_insn (gen_rtx_SET (VOIDmode, newv,
9619 gen_rtx_ASHIFT (SImode, newval, off)));
9621 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9622 gen_rtx_AND (SImode, oldv, mask)));
9624 emit_insn (gen_rtx_SET (VOIDmode, newv,
9625 gen_rtx_AND (SImode, newv, mask)));
9627 end_label = gen_label_rtx ();
9628 loop_label = gen_label_rtx ();
9629 emit_label (loop_label);
9631 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9632 gen_rtx_IOR (SImode, oldv, val)));
9634 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9635 gen_rtx_IOR (SImode, newv, val)));
9637 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9639 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9641 emit_insn (gen_rtx_SET (VOIDmode, resv,
9642 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9643 res)));
9645 cc = gen_compare_reg_1 (NE, resv, val);
9646 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9648 /* Use cbranchcc4 to separate the compare and branch! */
9649 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9650 cc, const0_rtx, loop_label));
9652 emit_label (end_label);
9654 emit_insn (gen_rtx_SET (VOIDmode, res,
9655 gen_rtx_AND (SImode, res, mask)));
9657 emit_insn (gen_rtx_SET (VOIDmode, res,
9658 gen_rtx_LSHIFTRT (SImode, res, off)));
9660 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9663 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9665 bool
9666 sparc_frame_pointer_required (void)
9668 return !(leaf_function_p () && only_leaf_regs_used ());
9671 /* The way this is structured, we can't eliminate SFP in favor of SP
9672 if the frame pointer is required: we want to use the SFP->HFP elimination
9673 in that case. But the test in update_eliminables doesn't know we are
9674 assuming below that we only do the former elimination. */
9676 bool
9677 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9679 return (to == HARD_FRAME_POINTER_REGNUM
9680 || !targetm.frame_pointer_required ());
9683 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
9684 they won't be allocated. */
9686 static void
9687 sparc_conditional_register_usage (void)
9689 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
9691 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9692 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9694 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
9695 /* then honor it. */
9696 if (TARGET_ARCH32 && fixed_regs[5])
9697 fixed_regs[5] = 1;
9698 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
9699 fixed_regs[5] = 0;
9700 if (! TARGET_V9)
9702 int regno;
9703 for (regno = SPARC_FIRST_V9_FP_REG;
9704 regno <= SPARC_LAST_V9_FP_REG;
9705 regno++)
9706 fixed_regs[regno] = 1;
9707 /* %fcc0 is used by v8 and v9. */
9708 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
9709 regno <= SPARC_LAST_V9_FCC_REG;
9710 regno++)
9711 fixed_regs[regno] = 1;
9713 if (! TARGET_FPU)
9715 int regno;
9716 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
9717 fixed_regs[regno] = 1;
9719 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
9720 /* then honor it. Likewise with g3 and g4. */
9721 if (fixed_regs[2] == 2)
9722 fixed_regs[2] = ! TARGET_APP_REGS;
9723 if (fixed_regs[3] == 2)
9724 fixed_regs[3] = ! TARGET_APP_REGS;
9725 if (TARGET_ARCH32 && fixed_regs[4] == 2)
9726 fixed_regs[4] = ! TARGET_APP_REGS;
9727 else if (TARGET_CM_EMBMEDANY)
9728 fixed_regs[4] = 1;
9729 else if (fixed_regs[4] == 2)
9730 fixed_regs[4] = 0;
9733 #include "gt-sparc.h"