Rebase.
[official-gcc.git] / gcc / config / c6x / c6x.c
blob7fa60b96e1d0a260e9c421908bce1a03236eec79
1 /* Target Code for TI C6X
2 Copyright (C) 2010-2014 Free Software Foundation, Inc.
3 Contributed by Andrew Jenner <andrew@codesourcery.com>
4 Contributed by Bernd Schmidt <bernds@codesourcery.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "varasm.h"
30 #include "calls.h"
31 #include "stringpool.h"
32 #include "insn-flags.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "insn-codes.h"
36 #include "expr.h"
37 #include "regs.h"
38 #include "optabs.h"
39 #include "recog.h"
40 #include "ggc.h"
41 #include "sched-int.h"
42 #include "timevar.h"
43 #include "tm_p.h"
44 #include "tm-preds.h"
45 #include "tm-constrs.h"
46 #include "df.h"
47 #include "function.h"
48 #include "diagnostic-core.h"
49 #include "cgraph.h"
50 #include "langhooks.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "sel-sched.h"
54 #include "debug.h"
55 #include "opts.h"
56 #include "hw-doloop.h"
57 #include "regrename.h"
58 #include "dumpfile.h"
59 #include "gimple-expr.h"
60 #include "builtins.h"
62 /* Table of supported architecture variants. */
63 typedef struct
65 const char *arch;
66 enum c6x_cpu_type type;
67 unsigned short features;
68 } c6x_arch_table;
70 /* A list of all ISAs, mapping each one to a representative device.
71 Used for -march selection. */
72 static const c6x_arch_table all_isas[] =
74 #define C6X_ISA(NAME,DEVICE,FLAGS) \
75 { NAME, DEVICE, FLAGS },
76 #include "c6x-isas.def"
77 #undef C6X_ISA
78 { NULL, C6X_CPU_C62X, 0 }
81 /* This is the parsed result of the "-march=" option, if given. */
82 enum c6x_cpu_type c6x_arch = C6X_DEFAULT_ARCH;
84 /* A mask of insn types that are allowed by the architecture selected by
85 the -march option. */
86 unsigned long c6x_insn_mask = C6X_DEFAULT_INSN_MASK;
88 /* The instruction that is being output (as obtained from FINAL_PRESCAN_INSN).
90 static rtx c6x_current_insn = NULL_RTX;
92 /* A decl we build to access __c6xabi_DSBT_base. */
93 static GTY(()) tree dsbt_decl;
95 /* Determines whether we run our final scheduling pass or not. We always
96 avoid the normal second scheduling pass. */
97 static int c6x_flag_schedule_insns2;
99 /* Determines whether we run variable tracking in machine dependent
100 reorganization. */
101 static int c6x_flag_var_tracking;
103 /* Determines whether we use modulo scheduling. */
104 static int c6x_flag_modulo_sched;
106 /* Record the state of flag_pic before we set it to 1 for DSBT. */
107 int c6x_initial_flag_pic;
109 typedef struct
111 /* We record the clock cycle for every insn during scheduling. */
112 int clock;
113 /* After scheduling, we run assign_reservations to choose unit
114 reservations for all insns. These are recorded here. */
115 int reservation;
116 /* Records the new condition for insns which must be made
117 conditional after scheduling. An entry of NULL_RTX means no such
118 change is necessary. */
119 rtx new_cond;
120 /* True for the first insn that was scheduled in an ebb. */
121 bool ebb_start;
122 /* The scheduler state after the insn, transformed into a mask of UNIT_QID
123 bits rather than storing the state. Meaningful only for the last
124 insn in a cycle. */
125 unsigned int unit_mask;
126 } c6x_sched_insn_info;
129 /* Record a c6x_sched_insn_info structure for every insn in the function. */
130 static vec<c6x_sched_insn_info> insn_info;
132 #define INSN_INFO_LENGTH (insn_info).length ()
133 #define INSN_INFO_ENTRY(N) (insn_info[(N)])
135 static bool done_cfi_sections;
137 #define RESERVATION_FLAG_D 1
138 #define RESERVATION_FLAG_L 2
139 #define RESERVATION_FLAG_S 4
140 #define RESERVATION_FLAG_M 8
141 #define RESERVATION_FLAG_DL (RESERVATION_FLAG_D | RESERVATION_FLAG_L)
142 #define RESERVATION_FLAG_DS (RESERVATION_FLAG_D | RESERVATION_FLAG_S)
143 #define RESERVATION_FLAG_LS (RESERVATION_FLAG_L | RESERVATION_FLAG_S)
144 #define RESERVATION_FLAG_DLS (RESERVATION_FLAG_D | RESERVATION_FLAG_LS)
146 /* The DFA names of the units. */
147 static const char *const c6x_unit_names[] =
149 "d1", "l1", "s1", "m1", "fps1", "fpl1", "adddps1", "adddpl1",
150 "d2", "l2", "s2", "m2", "fps2", "fpl2", "adddps2", "adddpl2"
153 /* The DFA unit number for each unit in c6x_unit_names[]. */
154 static int c6x_unit_codes[ARRAY_SIZE (c6x_unit_names)];
156 /* Unit query IDs. */
157 #define UNIT_QID_D1 0
158 #define UNIT_QID_L1 1
159 #define UNIT_QID_S1 2
160 #define UNIT_QID_M1 3
161 #define UNIT_QID_FPS1 4
162 #define UNIT_QID_FPL1 5
163 #define UNIT_QID_ADDDPS1 6
164 #define UNIT_QID_ADDDPL1 7
165 #define UNIT_QID_SIDE_OFFSET 8
167 #define RESERVATION_S1 2
168 #define RESERVATION_S2 10
170 /* An enum for the unit requirements we count in the UNIT_REQS table. */
171 enum unitreqs
173 UNIT_REQ_D,
174 UNIT_REQ_L,
175 UNIT_REQ_S,
176 UNIT_REQ_M,
177 UNIT_REQ_DL,
178 UNIT_REQ_DS,
179 UNIT_REQ_LS,
180 UNIT_REQ_DLS,
181 UNIT_REQ_T,
182 UNIT_REQ_X,
183 UNIT_REQ_MAX
186 /* A table used to count unit requirements. Used when computing minimum
187 iteration intervals. */
188 typedef int unit_req_table[2][UNIT_REQ_MAX];
189 static unit_req_table unit_reqs;
191 /* Register map for debugging. */
192 unsigned const dbx_register_map[FIRST_PSEUDO_REGISTER] =
194 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* A0 - A15. */
195 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, /* A16 - A32. */
196 50, 51, 52,
197 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, /* B0 - B15. */
198 29, 30, 31,
199 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, /* B16 - B32. */
200 66, 67, 68,
201 -1, -1, -1 /* FP, ARGP, ILC. */
204 /* Allocate a new, cleared machine_function structure. */
206 static struct machine_function *
207 c6x_init_machine_status (void)
209 return ggc_cleared_alloc<machine_function> ();
212 /* Implement TARGET_OPTION_OVERRIDE. */
214 static void
215 c6x_option_override (void)
217 unsigned i;
219 if (global_options_set.x_c6x_arch_option)
221 c6x_arch = all_isas[c6x_arch_option].type;
222 c6x_insn_mask &= ~C6X_INSNS_ALL_CPU_BITS;
223 c6x_insn_mask |= all_isas[c6x_arch_option].features;
226 c6x_flag_schedule_insns2 = flag_schedule_insns_after_reload;
227 flag_schedule_insns_after_reload = 0;
229 c6x_flag_modulo_sched = flag_modulo_sched;
230 flag_modulo_sched = 0;
232 init_machine_status = c6x_init_machine_status;
234 for (i = 0; i < ARRAY_SIZE (c6x_unit_names); i++)
235 c6x_unit_codes[i] = get_cpu_unit_code (c6x_unit_names[i]);
237 if (flag_pic && !TARGET_DSBT)
239 error ("-fpic and -fPIC not supported without -mdsbt on this target");
240 flag_pic = 0;
242 c6x_initial_flag_pic = flag_pic;
243 if (TARGET_DSBT && !flag_pic)
244 flag_pic = 1;
248 /* Implement the TARGET_CONDITIONAL_REGISTER_USAGE hook. */
250 static void
251 c6x_conditional_register_usage (void)
253 int i;
254 if (c6x_arch == C6X_CPU_C62X || c6x_arch == C6X_CPU_C67X)
255 for (i = 16; i < 32; i++)
257 fixed_regs[i] = 1;
258 fixed_regs[32 + i] = 1;
260 if (TARGET_INSNS_64)
262 SET_HARD_REG_BIT (reg_class_contents[(int)PREDICATE_A_REGS],
263 REG_A0);
264 SET_HARD_REG_BIT (reg_class_contents[(int)PREDICATE_REGS],
265 REG_A0);
266 CLEAR_HARD_REG_BIT (reg_class_contents[(int)NONPREDICATE_A_REGS],
267 REG_A0);
268 CLEAR_HARD_REG_BIT (reg_class_contents[(int)NONPREDICATE_REGS],
269 REG_A0);
273 static GTY(()) rtx eqdf_libfunc;
274 static GTY(()) rtx nedf_libfunc;
275 static GTY(()) rtx ledf_libfunc;
276 static GTY(()) rtx ltdf_libfunc;
277 static GTY(()) rtx gedf_libfunc;
278 static GTY(()) rtx gtdf_libfunc;
279 static GTY(()) rtx eqsf_libfunc;
280 static GTY(()) rtx nesf_libfunc;
281 static GTY(()) rtx lesf_libfunc;
282 static GTY(()) rtx ltsf_libfunc;
283 static GTY(()) rtx gesf_libfunc;
284 static GTY(()) rtx gtsf_libfunc;
285 static GTY(()) rtx strasgi_libfunc;
286 static GTY(()) rtx strasgi64p_libfunc;
288 /* Implement the TARGET_INIT_LIBFUNCS macro. We use this to rename library
289 functions to match the C6x ABI. */
291 static void
292 c6x_init_libfuncs (void)
294 /* Double-precision floating-point arithmetic. */
295 set_optab_libfunc (add_optab, DFmode, "__c6xabi_addd");
296 set_optab_libfunc (sdiv_optab, DFmode, "__c6xabi_divd");
297 set_optab_libfunc (smul_optab, DFmode, "__c6xabi_mpyd");
298 set_optab_libfunc (neg_optab, DFmode, "__c6xabi_negd");
299 set_optab_libfunc (sub_optab, DFmode, "__c6xabi_subd");
301 /* Single-precision floating-point arithmetic. */
302 set_optab_libfunc (add_optab, SFmode, "__c6xabi_addf");
303 set_optab_libfunc (sdiv_optab, SFmode, "__c6xabi_divf");
304 set_optab_libfunc (smul_optab, SFmode, "__c6xabi_mpyf");
305 set_optab_libfunc (neg_optab, SFmode, "__c6xabi_negf");
306 set_optab_libfunc (sub_optab, SFmode, "__c6xabi_subf");
308 /* Floating-point comparisons. */
309 eqsf_libfunc = init_one_libfunc ("__c6xabi_eqf");
310 nesf_libfunc = init_one_libfunc ("__c6xabi_neqf");
311 lesf_libfunc = init_one_libfunc ("__c6xabi_lef");
312 ltsf_libfunc = init_one_libfunc ("__c6xabi_ltf");
313 gesf_libfunc = init_one_libfunc ("__c6xabi_gef");
314 gtsf_libfunc = init_one_libfunc ("__c6xabi_gtf");
315 eqdf_libfunc = init_one_libfunc ("__c6xabi_eqd");
316 nedf_libfunc = init_one_libfunc ("__c6xabi_neqd");
317 ledf_libfunc = init_one_libfunc ("__c6xabi_led");
318 ltdf_libfunc = init_one_libfunc ("__c6xabi_ltd");
319 gedf_libfunc = init_one_libfunc ("__c6xabi_ged");
320 gtdf_libfunc = init_one_libfunc ("__c6xabi_gtd");
322 set_optab_libfunc (eq_optab, SFmode, NULL);
323 set_optab_libfunc (ne_optab, SFmode, "__c6xabi_neqf");
324 set_optab_libfunc (gt_optab, SFmode, NULL);
325 set_optab_libfunc (ge_optab, SFmode, NULL);
326 set_optab_libfunc (lt_optab, SFmode, NULL);
327 set_optab_libfunc (le_optab, SFmode, NULL);
328 set_optab_libfunc (unord_optab, SFmode, "__c6xabi_unordf");
329 set_optab_libfunc (eq_optab, DFmode, NULL);
330 set_optab_libfunc (ne_optab, DFmode, "__c6xabi_neqd");
331 set_optab_libfunc (gt_optab, DFmode, NULL);
332 set_optab_libfunc (ge_optab, DFmode, NULL);
333 set_optab_libfunc (lt_optab, DFmode, NULL);
334 set_optab_libfunc (le_optab, DFmode, NULL);
335 set_optab_libfunc (unord_optab, DFmode, "__c6xabi_unordd");
337 /* Floating-point to integer conversions. */
338 set_conv_libfunc (sfix_optab, SImode, DFmode, "__c6xabi_fixdi");
339 set_conv_libfunc (ufix_optab, SImode, DFmode, "__c6xabi_fixdu");
340 set_conv_libfunc (sfix_optab, DImode, DFmode, "__c6xabi_fixdlli");
341 set_conv_libfunc (ufix_optab, DImode, DFmode, "__c6xabi_fixdull");
342 set_conv_libfunc (sfix_optab, SImode, SFmode, "__c6xabi_fixfi");
343 set_conv_libfunc (ufix_optab, SImode, SFmode, "__c6xabi_fixfu");
344 set_conv_libfunc (sfix_optab, DImode, SFmode, "__c6xabi_fixflli");
345 set_conv_libfunc (ufix_optab, DImode, SFmode, "__c6xabi_fixfull");
347 /* Conversions between floating types. */
348 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__c6xabi_cvtdf");
349 set_conv_libfunc (sext_optab, DFmode, SFmode, "__c6xabi_cvtfd");
351 /* Integer to floating-point conversions. */
352 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__c6xabi_fltid");
353 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__c6xabi_fltud");
354 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__c6xabi_fltllid");
355 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__c6xabi_fltulld");
356 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__c6xabi_fltif");
357 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__c6xabi_fltuf");
358 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__c6xabi_fltllif");
359 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__c6xabi_fltullf");
361 /* Long long. */
362 set_optab_libfunc (smul_optab, DImode, "__c6xabi_mpyll");
363 set_optab_libfunc (ashl_optab, DImode, "__c6xabi_llshl");
364 set_optab_libfunc (lshr_optab, DImode, "__c6xabi_llshru");
365 set_optab_libfunc (ashr_optab, DImode, "__c6xabi_llshr");
367 set_optab_libfunc (sdiv_optab, SImode, "__c6xabi_divi");
368 set_optab_libfunc (udiv_optab, SImode, "__c6xabi_divu");
369 set_optab_libfunc (smod_optab, SImode, "__c6xabi_remi");
370 set_optab_libfunc (umod_optab, SImode, "__c6xabi_remu");
371 set_optab_libfunc (sdivmod_optab, SImode, "__c6xabi_divremi");
372 set_optab_libfunc (udivmod_optab, SImode, "__c6xabi_divremu");
373 set_optab_libfunc (sdiv_optab, DImode, "__c6xabi_divlli");
374 set_optab_libfunc (udiv_optab, DImode, "__c6xabi_divull");
375 set_optab_libfunc (smod_optab, DImode, "__c6xabi_remlli");
376 set_optab_libfunc (umod_optab, DImode, "__c6xabi_remull");
377 set_optab_libfunc (udivmod_optab, DImode, "__c6xabi_divremull");
379 /* Block move. */
380 strasgi_libfunc = init_one_libfunc ("__c6xabi_strasgi");
381 strasgi64p_libfunc = init_one_libfunc ("__c6xabi_strasgi_64plus");
384 /* Begin the assembly file. */
386 static void
387 c6x_file_start (void)
389 /* Variable tracking should be run after all optimizations which change order
390 of insns. It also needs a valid CFG. This can't be done in
391 c6x_override_options, because flag_var_tracking is finalized after
392 that. */
393 c6x_flag_var_tracking = flag_var_tracking;
394 flag_var_tracking = 0;
396 done_cfi_sections = false;
397 default_file_start ();
399 /* Arrays are aligned to 8-byte boundaries. */
400 asm_fprintf (asm_out_file,
401 "\t.c6xabi_attribute Tag_ABI_array_object_alignment, 0\n");
402 asm_fprintf (asm_out_file,
403 "\t.c6xabi_attribute Tag_ABI_array_object_align_expected, 0\n");
405 /* Stack alignment is 8 bytes. */
406 asm_fprintf (asm_out_file,
407 "\t.c6xabi_attribute Tag_ABI_stack_align_needed, 0\n");
408 asm_fprintf (asm_out_file,
409 "\t.c6xabi_attribute Tag_ABI_stack_align_preserved, 0\n");
411 #if 0 /* FIXME: Reenable when TI's tools are fixed. */
412 /* ??? Ideally we'd check flag_short_wchar somehow. */
413 asm_fprintf (asm_out_file, "\t.c6xabi_attribute Tag_ABI_wchar_t, %d\n", 2);
414 #endif
416 /* We conform to version 1.0 of the ABI. */
417 asm_fprintf (asm_out_file,
418 "\t.c6xabi_attribute Tag_ABI_conformance, \"1.0\"\n");
422 /* The LTO frontend only enables exceptions when it sees a function that
423 uses it. This changes the return value of dwarf2out_do_frame, so we
424 have to check before every function. */
426 void
427 c6x_output_file_unwind (FILE * f)
429 if (done_cfi_sections)
430 return;
432 /* Output a .cfi_sections directive. */
433 if (dwarf2out_do_frame ())
435 if (flag_unwind_tables || flag_exceptions)
437 if (write_symbols == DWARF2_DEBUG
438 || write_symbols == VMS_AND_DWARF2_DEBUG)
439 asm_fprintf (f, "\t.cfi_sections .debug_frame, .c6xabi.exidx\n");
440 else
441 asm_fprintf (f, "\t.cfi_sections .c6xabi.exidx\n");
443 else
444 asm_fprintf (f, "\t.cfi_sections .debug_frame\n");
445 done_cfi_sections = true;
449 /* Output unwind directives at the end of a function. */
451 static void
452 c6x_output_fn_unwind (FILE * f)
454 /* Return immediately if we are not generating unwinding tables. */
455 if (! (flag_unwind_tables || flag_exceptions))
456 return;
458 /* If this function will never be unwound, then mark it as such. */
459 if (!(flag_unwind_tables || crtl->uses_eh_lsda)
460 && (TREE_NOTHROW (current_function_decl)
461 || crtl->all_throwers_are_sibcalls))
462 fputs("\t.cantunwind\n", f);
464 fputs ("\t.endp\n", f);
468 /* Stack and Calling. */
470 int argument_registers[10] =
472 REG_A4, REG_B4,
473 REG_A6, REG_B6,
474 REG_A8, REG_B8,
475 REG_A10, REG_B10,
476 REG_A12, REG_B12
479 /* Implements the macro INIT_CUMULATIVE_ARGS defined in c6x.h. */
481 void
482 c6x_init_cumulative_args (CUMULATIVE_ARGS *cum, const_tree fntype, rtx libname,
483 int n_named_args ATTRIBUTE_UNUSED)
485 cum->count = 0;
486 cum->nregs = 10;
487 if (!libname && fntype)
489 /* We need to find out the number of named arguments. Unfortunately,
490 for incoming arguments, N_NAMED_ARGS is set to -1. */
491 if (stdarg_p (fntype))
492 cum->nregs = type_num_arguments (fntype) - 1;
493 if (cum->nregs > 10)
494 cum->nregs = 10;
498 /* Implements the macro FUNCTION_ARG defined in c6x.h. */
500 static rtx
501 c6x_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
502 const_tree type, bool named ATTRIBUTE_UNUSED)
504 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
505 if (cum->count >= cum->nregs)
506 return NULL_RTX;
507 if (type)
509 HOST_WIDE_INT size = int_size_in_bytes (type);
510 if (TARGET_BIG_ENDIAN && AGGREGATE_TYPE_P (type))
512 if (size > 4)
514 rtx reg1 = gen_rtx_REG (SImode, argument_registers[cum->count] + 1);
515 rtx reg2 = gen_rtx_REG (SImode, argument_registers[cum->count]);
516 rtvec vec = gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode, reg1, const0_rtx),
517 gen_rtx_EXPR_LIST (VOIDmode, reg2, GEN_INT (4)));
518 return gen_rtx_PARALLEL (mode, vec);
522 return gen_rtx_REG (mode, argument_registers[cum->count]);
525 static void
526 c6x_function_arg_advance (cumulative_args_t cum_v,
527 enum machine_mode mode ATTRIBUTE_UNUSED,
528 const_tree type ATTRIBUTE_UNUSED,
529 bool named ATTRIBUTE_UNUSED)
531 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
532 cum->count++;
536 /* Return true if BLOCK_REG_PADDING (MODE, TYPE, FIRST) should return
537 upward rather than downward. */
539 bool
540 c6x_block_reg_pad_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
541 const_tree type, bool first)
543 HOST_WIDE_INT size;
545 if (!TARGET_BIG_ENDIAN)
546 return true;
547 if (!first)
548 return true;
549 if (!type)
550 return true;
551 size = int_size_in_bytes (type);
552 return size == 3;
555 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. */
557 static unsigned int
558 c6x_function_arg_boundary (enum machine_mode mode, const_tree type)
560 unsigned int boundary = type ? TYPE_ALIGN (type) : GET_MODE_BITSIZE (mode);
562 if (boundary > BITS_PER_WORD)
563 return 2 * BITS_PER_WORD;
565 if (mode == BLKmode)
567 HOST_WIDE_INT size = int_size_in_bytes (type);
568 if (size > 4)
569 return 2 * BITS_PER_WORD;
570 if (boundary < BITS_PER_WORD)
572 if (size >= 3)
573 return BITS_PER_WORD;
574 if (size >= 2)
575 return 2 * BITS_PER_UNIT;
578 return boundary;
581 /* Implement TARGET_FUNCTION_ARG_ROUND_BOUNDARY. */
582 static unsigned int
583 c6x_function_arg_round_boundary (enum machine_mode mode, const_tree type)
585 return c6x_function_arg_boundary (mode, type);
588 /* TARGET_FUNCTION_VALUE implementation. Returns an RTX representing the place
589 where function FUNC returns or receives a value of data type TYPE. */
591 static rtx
592 c6x_function_value (const_tree type, const_tree func ATTRIBUTE_UNUSED,
593 bool outgoing ATTRIBUTE_UNUSED)
595 /* Functions return values in register A4. When returning aggregates, we may
596 have to adjust for endianness. */
597 if (TARGET_BIG_ENDIAN && type && AGGREGATE_TYPE_P (type))
599 HOST_WIDE_INT size = int_size_in_bytes (type);
600 if (size > 4)
603 rtx reg1 = gen_rtx_REG (SImode, REG_A4 + 1);
604 rtx reg2 = gen_rtx_REG (SImode, REG_A4);
605 rtvec vec = gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode, reg1, const0_rtx),
606 gen_rtx_EXPR_LIST (VOIDmode, reg2, GEN_INT (4)));
607 return gen_rtx_PARALLEL (TYPE_MODE (type), vec);
610 return gen_rtx_REG (TYPE_MODE (type), REG_A4);
613 /* Implement TARGET_LIBCALL_VALUE. */
615 static rtx
616 c6x_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
618 return gen_rtx_REG (mode, REG_A4);
621 /* TARGET_STRUCT_VALUE_RTX implementation. */
623 static rtx
624 c6x_struct_value_rtx (tree type ATTRIBUTE_UNUSED, int incoming ATTRIBUTE_UNUSED)
626 return gen_rtx_REG (Pmode, REG_A3);
629 /* Implement TARGET_FUNCTION_VALUE_REGNO_P. */
631 static bool
632 c6x_function_value_regno_p (const unsigned int regno)
634 return regno == REG_A4;
637 /* Types larger than 64 bit, and variable sized types, are passed by
638 reference. The callee must copy them; see c6x_callee_copies. */
640 static bool
641 c6x_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
642 enum machine_mode mode, const_tree type,
643 bool named ATTRIBUTE_UNUSED)
645 int size = -1;
646 if (type)
647 size = int_size_in_bytes (type);
648 else if (mode != VOIDmode)
649 size = GET_MODE_SIZE (mode);
650 return size > 2 * UNITS_PER_WORD || size == -1;
653 /* Decide whether a type should be returned in memory (true)
654 or in a register (false). This is called by the macro
655 TARGET_RETURN_IN_MEMORY. */
657 static bool
658 c6x_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
660 int size = int_size_in_bytes (type);
661 return size > 2 * UNITS_PER_WORD || size == -1;
664 /* Values which must be returned in the most-significant end of the return
665 register. */
667 static bool
668 c6x_return_in_msb (const_tree valtype)
670 HOST_WIDE_INT size = int_size_in_bytes (valtype);
671 return TARGET_BIG_ENDIAN && AGGREGATE_TYPE_P (valtype) && size == 3;
674 /* Implement TARGET_CALLEE_COPIES. */
676 static bool
677 c6x_callee_copies (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
678 enum machine_mode mode ATTRIBUTE_UNUSED,
679 const_tree type ATTRIBUTE_UNUSED,
680 bool named ATTRIBUTE_UNUSED)
682 return true;
685 /* Return the type to use as __builtin_va_list. */
686 static tree
687 c6x_build_builtin_va_list (void)
689 return build_pointer_type (char_type_node);
692 static void
693 c6x_asm_trampoline_template (FILE *f)
695 fprintf (f, "\t.long\t0x0000002b\n"); /* mvkl .s2 fnlow,B0 */
696 fprintf (f, "\t.long\t0x01000028\n"); /* || mvkl .s1 sclow,A2 */
697 fprintf (f, "\t.long\t0x0000006b\n"); /* mvkh .s2 fnhigh,B0 */
698 fprintf (f, "\t.long\t0x01000068\n"); /* || mvkh .s1 schigh,A2 */
699 fprintf (f, "\t.long\t0x00000362\n"); /* b .s2 B0 */
700 fprintf (f, "\t.long\t0x00008000\n"); /* nop 5 */
701 fprintf (f, "\t.long\t0x00000000\n"); /* nop */
702 fprintf (f, "\t.long\t0x00000000\n"); /* nop */
705 /* Emit RTL insns to initialize the variable parts of a trampoline at
706 TRAMP. FNADDR is an RTX for the address of the function's pure
707 code. CXT is an RTX for the static chain value for the function. */
709 static void
710 c6x_initialize_trampoline (rtx tramp, tree fndecl, rtx cxt)
712 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
713 rtx t1 = copy_to_reg (fnaddr);
714 rtx t2 = copy_to_reg (cxt);
715 rtx mask = gen_reg_rtx (SImode);
716 int i;
718 emit_block_move (tramp, assemble_trampoline_template (),
719 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
721 emit_move_insn (mask, GEN_INT (0xffff << 7));
723 for (i = 0; i < 4; i++)
725 rtx mem = adjust_address (tramp, SImode, i * 4);
726 rtx t = (i & 1) ? t2 : t1;
727 rtx v1 = gen_reg_rtx (SImode);
728 rtx v2 = gen_reg_rtx (SImode);
729 emit_move_insn (v1, mem);
730 if (i < 2)
731 emit_insn (gen_ashlsi3 (v2, t, GEN_INT (7)));
732 else
733 emit_insn (gen_lshrsi3 (v2, t, GEN_INT (9)));
734 emit_insn (gen_andsi3 (v2, v2, mask));
735 emit_insn (gen_iorsi3 (v2, v2, v1));
736 emit_move_insn (mem, v2);
738 #ifdef CLEAR_INSN_CACHE
739 tramp = XEXP (tramp, 0);
740 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__gnu_clear_cache"),
741 LCT_NORMAL, VOIDmode, 2, tramp, Pmode,
742 plus_constant (Pmode, tramp, TRAMPOLINE_SIZE),
743 Pmode);
744 #endif
747 /* Determine whether c6x_output_mi_thunk can succeed. */
749 static bool
750 c6x_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
751 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
752 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
753 const_tree function ATTRIBUTE_UNUSED)
755 return !TARGET_LONG_CALLS;
758 /* Output the assembler code for a thunk function. THUNK is the
759 declaration for the thunk function itself, FUNCTION is the decl for
760 the target function. DELTA is an immediate constant offset to be
761 added to THIS. If VCALL_OFFSET is nonzero, the word at
762 *(*this + vcall_offset) should be added to THIS. */
764 static void
765 c6x_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
766 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
767 HOST_WIDE_INT vcall_offset, tree function)
769 rtx xops[5];
770 /* The this parameter is passed as the first argument. */
771 rtx this_rtx = gen_rtx_REG (Pmode, REG_A4);
773 c6x_current_insn = NULL_RTX;
775 xops[4] = XEXP (DECL_RTL (function), 0);
776 if (!vcall_offset)
778 output_asm_insn ("b .s2 \t%4", xops);
779 if (!delta)
780 output_asm_insn ("nop 5", xops);
783 /* Adjust the this parameter by a fixed constant. */
784 if (delta)
786 xops[0] = GEN_INT (delta);
787 xops[1] = this_rtx;
788 if (delta >= -16 && delta <= 15)
790 output_asm_insn ("add .s1 %0, %1, %1", xops);
791 if (!vcall_offset)
792 output_asm_insn ("nop 4", xops);
794 else if (delta >= 16 && delta < 32)
796 output_asm_insn ("add .d1 %0, %1, %1", xops);
797 if (!vcall_offset)
798 output_asm_insn ("nop 4", xops);
800 else if (delta >= -32768 && delta < 32768)
802 output_asm_insn ("mvk .s1 %0, A0", xops);
803 output_asm_insn ("add .d1 %1, A0, %1", xops);
804 if (!vcall_offset)
805 output_asm_insn ("nop 3", xops);
807 else
809 output_asm_insn ("mvkl .s1 %0, A0", xops);
810 output_asm_insn ("mvkh .s1 %0, A0", xops);
811 output_asm_insn ("add .d1 %1, A0, %1", xops);
812 if (!vcall_offset)
813 output_asm_insn ("nop 3", xops);
817 /* Adjust the this parameter by a value stored in the vtable. */
818 if (vcall_offset)
820 rtx a0tmp = gen_rtx_REG (Pmode, REG_A0);
821 rtx a3tmp = gen_rtx_REG (Pmode, REG_A3);
823 xops[1] = a3tmp;
824 xops[2] = a0tmp;
825 xops[3] = gen_rtx_MEM (Pmode, a0tmp);
826 output_asm_insn ("mv .s1 a4, %2", xops);
827 output_asm_insn ("ldw .d1t1 %3, %2", xops);
829 /* Adjust the this parameter. */
830 xops[0] = gen_rtx_MEM (Pmode, plus_constant (Pmode, a0tmp,
831 vcall_offset));
832 if (!memory_operand (xops[0], Pmode))
834 rtx tmp2 = gen_rtx_REG (Pmode, REG_A1);
835 xops[0] = GEN_INT (vcall_offset);
836 xops[1] = tmp2;
837 output_asm_insn ("mvkl .s1 %0, %1", xops);
838 output_asm_insn ("mvkh .s1 %0, %1", xops);
839 output_asm_insn ("nop 2", xops);
840 output_asm_insn ("add .d1 %2, %1, %2", xops);
841 xops[0] = gen_rtx_MEM (Pmode, a0tmp);
843 else
844 output_asm_insn ("nop 4", xops);
845 xops[2] = this_rtx;
846 output_asm_insn ("ldw .d1t1 %0, %1", xops);
847 output_asm_insn ("|| b .s2 \t%4", xops);
848 output_asm_insn ("nop 4", xops);
849 output_asm_insn ("add .d1 %2, %1, %2", xops);
853 /* Return true if EXP goes in small data/bss. */
855 static bool
856 c6x_in_small_data_p (const_tree exp)
858 /* We want to merge strings, so we never consider them small data. */
859 if (TREE_CODE (exp) == STRING_CST)
860 return false;
862 /* Functions are never small data. */
863 if (TREE_CODE (exp) == FUNCTION_DECL)
864 return false;
866 if (TREE_CODE (exp) == VAR_DECL && DECL_WEAK (exp))
867 return false;
869 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
871 const char *section = DECL_SECTION_NAME (exp);
873 if (strcmp (section, ".neardata") == 0
874 || strncmp (section, ".neardata.", 10) == 0
875 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
876 || strcmp (section, ".bss") == 0
877 || strncmp (section, ".bss.", 5) == 0
878 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0
879 || strcmp (section, ".rodata") == 0
880 || strncmp (section, ".rodata.", 8) == 0
881 || strncmp (section, ".gnu.linkonce.s2.", 17) == 0)
882 return true;
884 else
885 return PLACE_IN_SDATA_P (exp);
887 return false;
890 /* Return a section for X. The only special thing we do here is to
891 honor small data. We don't have a tree type, so we can't use the
892 PLACE_IN_SDATA_P macro we use everywhere else; we choose to place
893 everything sized 8 bytes or smaller into small data. */
895 static section *
896 c6x_select_rtx_section (enum machine_mode mode, rtx x,
897 unsigned HOST_WIDE_INT align)
899 if (c6x_sdata_mode == C6X_SDATA_ALL
900 || (c6x_sdata_mode != C6X_SDATA_NONE && GET_MODE_SIZE (mode) <= 8))
901 /* ??? Consider using mergeable sdata sections. */
902 return sdata_section;
903 else
904 return default_elf_select_rtx_section (mode, x, align);
907 static section *
908 c6x_elf_select_section (tree decl, int reloc,
909 unsigned HOST_WIDE_INT align)
911 const char *sname = NULL;
912 unsigned int flags = SECTION_WRITE;
913 if (c6x_in_small_data_p (decl))
915 switch (categorize_decl_for_section (decl, reloc))
917 case SECCAT_SRODATA:
918 sname = ".rodata";
919 flags = 0;
920 break;
921 case SECCAT_SDATA:
922 sname = ".neardata";
923 break;
924 case SECCAT_SBSS:
925 sname = ".bss";
926 flags |= SECTION_BSS;
927 default:
928 break;
931 else
933 switch (categorize_decl_for_section (decl, reloc))
935 case SECCAT_DATA:
936 sname = ".fardata";
937 break;
938 case SECCAT_DATA_REL:
939 sname = ".fardata.rel";
940 break;
941 case SECCAT_DATA_REL_LOCAL:
942 sname = ".fardata.rel.local";
943 break;
944 case SECCAT_DATA_REL_RO:
945 sname = ".fardata.rel.ro";
946 break;
947 case SECCAT_DATA_REL_RO_LOCAL:
948 sname = ".fardata.rel.ro.local";
949 break;
950 case SECCAT_BSS:
951 sname = ".far";
952 flags |= SECTION_BSS;
953 break;
954 case SECCAT_RODATA:
955 sname = ".const";
956 flags = 0;
957 break;
958 case SECCAT_SRODATA:
959 case SECCAT_SDATA:
960 case SECCAT_SBSS:
961 gcc_unreachable ();
962 default:
963 break;
966 if (sname)
968 /* We might get called with string constants, but get_named_section
969 doesn't like them as they are not DECLs. Also, we need to set
970 flags in that case. */
971 if (!DECL_P (decl))
972 return get_section (sname, flags, NULL);
973 return get_named_section (decl, sname, reloc);
976 return default_elf_select_section (decl, reloc, align);
979 /* Build up a unique section name, expressed as a
980 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
981 RELOC indicates whether the initial value of EXP requires
982 link-time relocations. */
984 static void ATTRIBUTE_UNUSED
985 c6x_elf_unique_section (tree decl, int reloc)
987 const char *prefix = NULL;
988 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
989 bool one_only = DECL_COMDAT_GROUP (decl) && !HAVE_COMDAT_GROUP;
991 if (c6x_in_small_data_p (decl))
993 switch (categorize_decl_for_section (decl, reloc))
995 case SECCAT_SDATA:
996 prefix = one_only ? ".s" : ".neardata";
997 break;
998 case SECCAT_SBSS:
999 prefix = one_only ? ".sb" : ".bss";
1000 break;
1001 case SECCAT_SRODATA:
1002 prefix = one_only ? ".s2" : ".rodata";
1003 break;
1004 case SECCAT_RODATA_MERGE_STR:
1005 case SECCAT_RODATA_MERGE_STR_INIT:
1006 case SECCAT_RODATA_MERGE_CONST:
1007 case SECCAT_RODATA:
1008 case SECCAT_DATA:
1009 case SECCAT_DATA_REL:
1010 case SECCAT_DATA_REL_LOCAL:
1011 case SECCAT_DATA_REL_RO:
1012 case SECCAT_DATA_REL_RO_LOCAL:
1013 gcc_unreachable ();
1014 default:
1015 /* Everything else we place into default sections and hope for the
1016 best. */
1017 break;
1020 else
1022 switch (categorize_decl_for_section (decl, reloc))
1024 case SECCAT_DATA:
1025 case SECCAT_DATA_REL:
1026 case SECCAT_DATA_REL_LOCAL:
1027 case SECCAT_DATA_REL_RO:
1028 case SECCAT_DATA_REL_RO_LOCAL:
1029 prefix = one_only ? ".fd" : ".fardata";
1030 break;
1031 case SECCAT_BSS:
1032 prefix = one_only ? ".fb" : ".far";
1033 break;
1034 case SECCAT_RODATA:
1035 case SECCAT_RODATA_MERGE_STR:
1036 case SECCAT_RODATA_MERGE_STR_INIT:
1037 case SECCAT_RODATA_MERGE_CONST:
1038 prefix = one_only ? ".fr" : ".const";
1039 break;
1040 case SECCAT_SRODATA:
1041 case SECCAT_SDATA:
1042 case SECCAT_SBSS:
1043 gcc_unreachable ();
1044 default:
1045 break;
1049 if (prefix)
1051 const char *name, *linkonce;
1052 char *string;
1054 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
1055 name = targetm.strip_name_encoding (name);
1057 /* If we're using one_only, then there needs to be a .gnu.linkonce
1058 prefix to the section name. */
1059 linkonce = one_only ? ".gnu.linkonce" : "";
1061 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
1063 set_decl_section_name (decl, string);
1064 return;
1066 default_unique_section (decl, reloc);
1069 static unsigned int
1070 c6x_section_type_flags (tree decl, const char *name, int reloc)
1072 unsigned int flags = 0;
1074 if (strcmp (name, ".far") == 0
1075 || strncmp (name, ".far.", 5) == 0)
1076 flags |= SECTION_BSS;
1078 flags |= default_section_type_flags (decl, name, reloc);
1080 return flags;
1083 /* Checks whether the given CALL_EXPR would use a caller saved
1084 register. This is used to decide whether sibling call optimization
1085 could be performed on the respective function call. */
1087 static bool
1088 c6x_call_saved_register_used (tree call_expr)
1090 CUMULATIVE_ARGS cum_v;
1091 cumulative_args_t cum;
1092 HARD_REG_SET call_saved_regset;
1093 tree parameter;
1094 enum machine_mode mode;
1095 tree type;
1096 rtx parm_rtx;
1097 int i;
1099 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
1100 cum = pack_cumulative_args (&cum_v);
1102 COMPL_HARD_REG_SET (call_saved_regset, call_used_reg_set);
1103 for (i = 0; i < call_expr_nargs (call_expr); i++)
1105 parameter = CALL_EXPR_ARG (call_expr, i);
1106 gcc_assert (parameter);
1108 /* For an undeclared variable passed as parameter we will get
1109 an ERROR_MARK node here. */
1110 if (TREE_CODE (parameter) == ERROR_MARK)
1111 return true;
1113 type = TREE_TYPE (parameter);
1114 gcc_assert (type);
1116 mode = TYPE_MODE (type);
1117 gcc_assert (mode);
1119 if (pass_by_reference (&cum_v, mode, type, true))
1121 mode = Pmode;
1122 type = build_pointer_type (type);
1125 parm_rtx = c6x_function_arg (cum, mode, type, 0);
1127 c6x_function_arg_advance (cum, mode, type, 0);
1129 if (!parm_rtx)
1130 continue;
1132 if (REG_P (parm_rtx)
1133 && overlaps_hard_reg_set_p (call_saved_regset, GET_MODE (parm_rtx),
1134 REGNO (parm_rtx)))
1135 return true;
1136 if (GET_CODE (parm_rtx) == PARALLEL)
1138 int n = XVECLEN (parm_rtx, 0);
1139 while (n-- > 0)
1141 rtx x = XEXP (XVECEXP (parm_rtx, 0, n), 0);
1142 if (REG_P (x)
1143 && overlaps_hard_reg_set_p (call_saved_regset,
1144 GET_MODE (x), REGNO (x)))
1145 return true;
1149 return false;
1152 /* Decide whether we can make a sibling call to a function. DECL is the
1153 declaration of the function being targeted by the call and EXP is the
1154 CALL_EXPR representing the call. */
1156 static bool
1157 c6x_function_ok_for_sibcall (tree decl, tree exp)
1159 /* Registers A10, A12, B10 and B12 are available as arguments
1160 register but unfortunately caller saved. This makes functions
1161 needing these registers for arguments not suitable for
1162 sibcalls. */
1163 if (c6x_call_saved_register_used (exp))
1164 return false;
1166 if (!flag_pic)
1167 return true;
1169 if (TARGET_DSBT)
1171 /* When compiling for DSBT, the calling function must be local,
1172 so that when we reload B14 in the sibcall epilogue, it will
1173 not change its value. */
1174 struct cgraph_local_info *this_func;
1176 if (!decl)
1177 /* Not enough information. */
1178 return false;
1180 this_func = cgraph_local_info (current_function_decl);
1181 return this_func->local;
1184 return true;
1187 /* Return true if DECL is known to be linked into section SECTION. */
1189 static bool
1190 c6x_function_in_section_p (tree decl, section *section)
1192 /* We can only be certain about functions defined in the same
1193 compilation unit. */
1194 if (!TREE_STATIC (decl))
1195 return false;
1197 /* Make sure that SYMBOL always binds to the definition in this
1198 compilation unit. */
1199 if (!targetm.binds_local_p (decl))
1200 return false;
1202 /* If DECL_SECTION_NAME is set, assume it is trustworthy. */
1203 if (!DECL_SECTION_NAME (decl))
1205 /* Make sure that we will not create a unique section for DECL. */
1206 if (flag_function_sections || DECL_COMDAT_GROUP (decl))
1207 return false;
1210 return function_section (decl) == section;
1213 /* Return true if a call to OP, which is a SYMBOL_REF, must be expanded
1214 as a long call. */
1215 bool
1216 c6x_long_call_p (rtx op)
1218 tree decl;
1220 if (!TARGET_LONG_CALLS)
1221 return false;
1223 decl = SYMBOL_REF_DECL (op);
1225 /* Try to determine whether the symbol is in the same section as the current
1226 function. Be conservative, and only cater for cases in which the
1227 whole of the current function is placed in the same section. */
1228 if (decl != NULL_TREE
1229 && !flag_reorder_blocks_and_partition
1230 && TREE_CODE (decl) == FUNCTION_DECL
1231 && c6x_function_in_section_p (decl, current_function_section ()))
1232 return false;
1234 return true;
1237 /* Emit the sequence for a call. */
1238 void
1239 c6x_expand_call (rtx retval, rtx address, bool sibcall)
1241 rtx callee = XEXP (address, 0);
1242 rtx call_insn;
1244 if (!c6x_call_operand (callee, Pmode))
1246 callee = force_reg (Pmode, callee);
1247 address = change_address (address, Pmode, callee);
1249 call_insn = gen_rtx_CALL (VOIDmode, address, const0_rtx);
1250 if (sibcall)
1252 call_insn = emit_call_insn (call_insn);
1253 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
1254 gen_rtx_REG (Pmode, REG_B3));
1256 else
1258 if (retval == NULL_RTX)
1259 call_insn = emit_call_insn (call_insn);
1260 else
1261 call_insn = emit_call_insn (gen_rtx_SET (GET_MODE (retval), retval,
1262 call_insn));
1264 if (flag_pic)
1265 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
1268 /* Legitimize PIC addresses. If the address is already position-independent,
1269 we return ORIG. Newly generated position-independent addresses go into a
1270 reg. This is REG if nonzero, otherwise we allocate register(s) as
1271 necessary. PICREG is the register holding the pointer to the PIC offset
1272 table. */
1274 static rtx
1275 legitimize_pic_address (rtx orig, rtx reg, rtx picreg)
1277 rtx addr = orig;
1278 rtx new_rtx = orig;
1280 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
1282 int unspec = UNSPEC_LOAD_GOT;
1283 rtx tmp;
1285 if (reg == 0)
1287 gcc_assert (can_create_pseudo_p ());
1288 reg = gen_reg_rtx (Pmode);
1290 if (flag_pic == 2)
1292 if (can_create_pseudo_p ())
1293 tmp = gen_reg_rtx (Pmode);
1294 else
1295 tmp = reg;
1296 emit_insn (gen_movsi_gotoff_high (tmp, addr));
1297 emit_insn (gen_movsi_gotoff_lo_sum (tmp, tmp, addr));
1298 emit_insn (gen_load_got_gotoff (reg, picreg, tmp));
1300 else
1302 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), unspec);
1303 new_rtx = gen_const_mem (Pmode, gen_rtx_PLUS (Pmode, picreg, tmp));
1305 emit_move_insn (reg, new_rtx);
1307 if (picreg == pic_offset_table_rtx)
1308 crtl->uses_pic_offset_table = 1;
1309 return reg;
1312 else if (GET_CODE (addr) == CONST || GET_CODE (addr) == PLUS)
1314 rtx base;
1316 if (GET_CODE (addr) == CONST)
1318 addr = XEXP (addr, 0);
1319 gcc_assert (GET_CODE (addr) == PLUS);
1322 if (XEXP (addr, 0) == picreg)
1323 return orig;
1325 if (reg == 0)
1327 gcc_assert (can_create_pseudo_p ());
1328 reg = gen_reg_rtx (Pmode);
1331 base = legitimize_pic_address (XEXP (addr, 0), reg, picreg);
1332 addr = legitimize_pic_address (XEXP (addr, 1),
1333 base == reg ? NULL_RTX : reg,
1334 picreg);
1336 if (GET_CODE (addr) == CONST_INT)
1338 gcc_assert (! reload_in_progress && ! reload_completed);
1339 addr = force_reg (Pmode, addr);
1342 if (GET_CODE (addr) == PLUS && CONSTANT_P (XEXP (addr, 1)))
1344 base = gen_rtx_PLUS (Pmode, base, XEXP (addr, 0));
1345 addr = XEXP (addr, 1);
1348 return gen_rtx_PLUS (Pmode, base, addr);
1351 return new_rtx;
1354 /* Expand a move operation in mode MODE. The operands are in OPERANDS.
1355 Returns true if no further code must be generated, false if the caller
1356 should generate an insn to move OPERANDS[1] to OPERANDS[0]. */
1358 bool
1359 expand_move (rtx *operands, enum machine_mode mode)
1361 rtx dest = operands[0];
1362 rtx op = operands[1];
1364 if ((reload_in_progress | reload_completed) == 0
1365 && GET_CODE (dest) == MEM && GET_CODE (op) != REG)
1366 operands[1] = force_reg (mode, op);
1367 else if (mode == SImode && symbolic_operand (op, SImode))
1369 if (flag_pic)
1371 if (sdata_symbolic_operand (op, SImode))
1373 emit_insn (gen_load_sdata_pic (dest, pic_offset_table_rtx, op));
1374 crtl->uses_pic_offset_table = 1;
1375 return true;
1377 else
1379 rtx temp = (reload_completed || reload_in_progress
1380 ? dest : gen_reg_rtx (Pmode));
1382 operands[1] = legitimize_pic_address (op, temp,
1383 pic_offset_table_rtx);
1386 else if (reload_completed
1387 && !sdata_symbolic_operand (op, SImode))
1389 emit_insn (gen_movsi_high (dest, op));
1390 emit_insn (gen_movsi_lo_sum (dest, dest, op));
1391 return true;
1394 return false;
1397 /* This function is called when we're about to expand an integer compare
1398 operation which performs COMPARISON. It examines the second operand,
1399 and if it is an integer constant that cannot be used directly on the
1400 current machine in a comparison insn, it returns true. */
1401 bool
1402 c6x_force_op_for_comparison_p (enum rtx_code code, rtx op)
1404 if (!CONST_INT_P (op) || satisfies_constraint_Iu4 (op))
1405 return false;
1407 if ((code == EQ || code == LT || code == GT)
1408 && !satisfies_constraint_Is5 (op))
1409 return true;
1410 if ((code == GTU || code == LTU)
1411 && (!TARGET_INSNS_64 || !satisfies_constraint_Iu5 (op)))
1412 return true;
1414 return false;
1417 /* Emit comparison instruction if necessary, returning the expression
1418 that holds the compare result in the proper mode. Return the comparison
1419 that should be used in the jump insn. */
1422 c6x_expand_compare (rtx comparison, enum machine_mode mode)
1424 enum rtx_code code = GET_CODE (comparison);
1425 rtx op0 = XEXP (comparison, 0);
1426 rtx op1 = XEXP (comparison, 1);
1427 rtx cmp;
1428 enum rtx_code jump_code = code;
1429 enum machine_mode op_mode = GET_MODE (op0);
1431 if (op_mode == DImode && (code == NE || code == EQ) && op1 == const0_rtx)
1433 rtx t = gen_reg_rtx (SImode);
1434 emit_insn (gen_iorsi3 (t, gen_lowpart (SImode, op0),
1435 gen_highpart (SImode, op0)));
1436 op_mode = SImode;
1437 cmp = t;
1439 else if (op_mode == DImode)
1441 rtx lo[2], high[2];
1442 rtx cmp1, cmp2;
1444 if (code == NE || code == GEU || code == LEU || code == GE || code == LE)
1446 code = reverse_condition (code);
1447 jump_code = EQ;
1449 else
1450 jump_code = NE;
1452 split_di (&op0, 1, lo, high);
1453 split_di (&op1, 1, lo + 1, high + 1);
1455 if (c6x_force_op_for_comparison_p (code, high[1])
1456 || c6x_force_op_for_comparison_p (EQ, high[1]))
1457 high[1] = force_reg (SImode, high[1]);
1459 cmp1 = gen_reg_rtx (SImode);
1460 cmp2 = gen_reg_rtx (SImode);
1461 emit_insn (gen_rtx_SET (VOIDmode, cmp1,
1462 gen_rtx_fmt_ee (code, SImode, high[0], high[1])));
1463 if (code == EQ)
1465 if (c6x_force_op_for_comparison_p (code, lo[1]))
1466 lo[1] = force_reg (SImode, lo[1]);
1467 emit_insn (gen_rtx_SET (VOIDmode, cmp2,
1468 gen_rtx_fmt_ee (code, SImode, lo[0], lo[1])));
1469 emit_insn (gen_andsi3 (cmp1, cmp1, cmp2));
1471 else
1473 emit_insn (gen_rtx_SET (VOIDmode, cmp2,
1474 gen_rtx_EQ (SImode, high[0], high[1])));
1475 if (code == GT)
1476 code = GTU;
1477 else if (code == LT)
1478 code = LTU;
1479 if (c6x_force_op_for_comparison_p (code, lo[1]))
1480 lo[1] = force_reg (SImode, lo[1]);
1481 emit_insn (gen_cmpsi_and (cmp2, gen_rtx_fmt_ee (code, SImode,
1482 lo[0], lo[1]),
1483 lo[0], lo[1], cmp2));
1484 emit_insn (gen_iorsi3 (cmp1, cmp1, cmp2));
1486 cmp = cmp1;
1488 else if (TARGET_FP && !flag_finite_math_only
1489 && (op_mode == DFmode || op_mode == SFmode)
1490 && code != EQ && code != NE && code != LT && code != GT
1491 && code != UNLE && code != UNGE)
1493 enum rtx_code code1, code2, code3;
1494 rtx (*fn) (rtx, rtx, rtx, rtx, rtx);
1496 jump_code = NE;
1497 code3 = UNKNOWN;
1498 switch (code)
1500 case UNLT:
1501 case UNGT:
1502 jump_code = EQ;
1503 /* fall through */
1504 case LE:
1505 case GE:
1506 code1 = code == LE || code == UNGT ? LT : GT;
1507 code2 = EQ;
1508 break;
1510 case UNORDERED:
1511 jump_code = EQ;
1512 /* fall through */
1513 case ORDERED:
1514 code3 = EQ;
1515 /* fall through */
1516 case LTGT:
1517 code1 = LT;
1518 code2 = GT;
1519 break;
1521 case UNEQ:
1522 code1 = LT;
1523 code2 = GT;
1524 jump_code = EQ;
1525 break;
1527 default:
1528 gcc_unreachable ();
1531 cmp = gen_reg_rtx (SImode);
1532 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1533 gen_rtx_fmt_ee (code1, SImode, op0, op1)));
1534 fn = op_mode == DFmode ? gen_cmpdf_ior : gen_cmpsf_ior;
1535 emit_insn (fn (cmp, gen_rtx_fmt_ee (code2, SImode, op0, op1),
1536 op0, op1, cmp));
1537 if (code3 != UNKNOWN)
1538 emit_insn (fn (cmp, gen_rtx_fmt_ee (code3, SImode, op0, op1),
1539 op0, op1, cmp));
1541 else if (op_mode == SImode && (code == NE || code == EQ) && op1 == const0_rtx)
1542 cmp = op0;
1543 else
1545 bool is_fp_libfunc;
1546 is_fp_libfunc = !TARGET_FP && (op_mode == DFmode || op_mode == SFmode);
1548 if ((code == NE || code == GEU || code == LEU || code == GE || code == LE)
1549 && !is_fp_libfunc)
1551 code = reverse_condition (code);
1552 jump_code = EQ;
1554 else if (code == UNGE)
1556 code = LT;
1557 jump_code = EQ;
1559 else if (code == UNLE)
1561 code = GT;
1562 jump_code = EQ;
1564 else
1565 jump_code = NE;
1567 if (is_fp_libfunc)
1569 rtx insns;
1570 rtx libfunc;
1571 switch (code)
1573 case EQ:
1574 libfunc = op_mode == DFmode ? eqdf_libfunc : eqsf_libfunc;
1575 break;
1576 case NE:
1577 libfunc = op_mode == DFmode ? nedf_libfunc : nesf_libfunc;
1578 break;
1579 case GT:
1580 libfunc = op_mode == DFmode ? gtdf_libfunc : gtsf_libfunc;
1581 break;
1582 case GE:
1583 libfunc = op_mode == DFmode ? gedf_libfunc : gesf_libfunc;
1584 break;
1585 case LT:
1586 libfunc = op_mode == DFmode ? ltdf_libfunc : ltsf_libfunc;
1587 break;
1588 case LE:
1589 libfunc = op_mode == DFmode ? ledf_libfunc : lesf_libfunc;
1590 break;
1591 default:
1592 gcc_unreachable ();
1594 start_sequence ();
1596 cmp = emit_library_call_value (libfunc, 0, LCT_CONST, SImode, 2,
1597 op0, op_mode, op1, op_mode);
1598 insns = get_insns ();
1599 end_sequence ();
1601 emit_libcall_block (insns, cmp, cmp,
1602 gen_rtx_fmt_ee (code, SImode, op0, op1));
1604 else
1606 cmp = gen_reg_rtx (SImode);
1607 if (c6x_force_op_for_comparison_p (code, op1))
1608 op1 = force_reg (SImode, op1);
1609 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1610 gen_rtx_fmt_ee (code, SImode, op0, op1)));
1614 return gen_rtx_fmt_ee (jump_code, mode, cmp, const0_rtx);
1617 /* Return one word of double-word value OP. HIGH_P is true to select the
1618 high part, false to select the low part. When encountering auto-increment
1619 addressing, we make the assumption that the low part is going to be accessed
1620 first. */
1623 c6x_subword (rtx op, bool high_p)
1625 unsigned int byte;
1626 enum machine_mode mode;
1628 mode = GET_MODE (op);
1629 if (mode == VOIDmode)
1630 mode = DImode;
1632 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
1633 byte = UNITS_PER_WORD;
1634 else
1635 byte = 0;
1637 if (MEM_P (op))
1639 rtx addr = XEXP (op, 0);
1640 if (GET_CODE (addr) == PLUS || REG_P (addr))
1641 return adjust_address (op, word_mode, byte);
1642 /* FIXME: should really support autoincrement addressing for
1643 multi-word modes. */
1644 gcc_unreachable ();
1647 return simplify_gen_subreg (word_mode, op, mode, byte);
1650 /* Split one or more DImode RTL references into pairs of SImode
1651 references. The RTL can be REG, offsettable MEM, integer constant, or
1652 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
1653 split and "num" is its length. lo_half and hi_half are output arrays
1654 that parallel "operands". */
1656 void
1657 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
1659 while (num--)
1661 rtx op = operands[num];
1663 lo_half[num] = c6x_subword (op, false);
1664 hi_half[num] = c6x_subword (op, true);
1668 /* Return true if VAL is a mask valid for a clr instruction. */
1669 bool
1670 c6x_valid_mask_p (HOST_WIDE_INT val)
1672 int i;
1673 for (i = 0; i < 32; i++)
1674 if (!(val & ((unsigned HOST_WIDE_INT)1 << i)))
1675 break;
1676 for (; i < 32; i++)
1677 if (val & ((unsigned HOST_WIDE_INT)1 << i))
1678 break;
1679 for (; i < 32; i++)
1680 if (!(val & ((unsigned HOST_WIDE_INT)1 << i)))
1681 return false;
1682 return true;
1685 /* Expand a block move for a movmemM pattern. */
1687 bool
1688 c6x_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
1689 rtx expected_align_exp ATTRIBUTE_UNUSED,
1690 rtx expected_size_exp ATTRIBUTE_UNUSED)
1692 unsigned HOST_WIDE_INT align = 1;
1693 unsigned HOST_WIDE_INT src_mem_align, dst_mem_align, min_mem_align;
1694 unsigned HOST_WIDE_INT count = 0, offset = 0;
1695 unsigned int biggest_move = TARGET_STDW ? 8 : 4;
1697 if (CONST_INT_P (align_exp))
1698 align = INTVAL (align_exp);
1700 src_mem_align = MEM_ALIGN (src) / BITS_PER_UNIT;
1701 dst_mem_align = MEM_ALIGN (dst) / BITS_PER_UNIT;
1702 min_mem_align = MIN (src_mem_align, dst_mem_align);
1704 if (min_mem_align > align)
1705 align = min_mem_align / BITS_PER_UNIT;
1706 if (src_mem_align < align)
1707 src_mem_align = align;
1708 if (dst_mem_align < align)
1709 dst_mem_align = align;
1711 if (CONST_INT_P (count_exp))
1712 count = INTVAL (count_exp);
1713 else
1714 return false;
1716 /* Make sure we don't need to care about overflow later on. */
1717 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
1718 return false;
1720 if (count >= 28 && (count & 3) == 0 && align >= 4)
1722 tree dst_expr = MEM_EXPR (dst);
1723 tree src_expr = MEM_EXPR (src);
1724 rtx fn = TARGET_INSNS_64PLUS ? strasgi64p_libfunc : strasgi_libfunc;
1725 rtx srcreg = force_reg (Pmode, XEXP (src, 0));
1726 rtx dstreg = force_reg (Pmode, XEXP (dst, 0));
1728 if (src_expr)
1729 mark_addressable (src_expr);
1730 if (dst_expr)
1731 mark_addressable (dst_expr);
1732 emit_library_call (fn, LCT_NORMAL, VOIDmode, 3,
1733 dstreg, Pmode, srcreg, Pmode, count_exp, SImode);
1734 return true;
1737 if (biggest_move > align && !TARGET_INSNS_64)
1738 biggest_move = align;
1740 if (count / biggest_move > 7)
1741 return false;
1743 while (count > 0)
1745 rtx reg, reg_lowpart;
1746 enum machine_mode srcmode, dstmode;
1747 unsigned HOST_WIDE_INT src_size, dst_size, src_left;
1748 int shift;
1749 rtx srcmem, dstmem;
1751 while (biggest_move > count)
1752 biggest_move /= 2;
1754 src_size = dst_size = biggest_move;
1755 if (src_size > src_mem_align && src_size == 2)
1756 src_size = 1;
1757 if (dst_size > dst_mem_align && dst_size == 2)
1758 dst_size = 1;
1760 if (dst_size > src_size)
1761 dst_size = src_size;
1763 srcmode = mode_for_size (src_size * BITS_PER_UNIT, MODE_INT, 0);
1764 dstmode = mode_for_size (dst_size * BITS_PER_UNIT, MODE_INT, 0);
1765 if (src_size >= 4)
1766 reg_lowpart = reg = gen_reg_rtx (srcmode);
1767 else
1769 reg = gen_reg_rtx (SImode);
1770 reg_lowpart = gen_lowpart (srcmode, reg);
1773 srcmem = adjust_address (copy_rtx (src), srcmode, offset);
1775 if (src_size > src_mem_align)
1777 enum insn_code icode = (srcmode == SImode ? CODE_FOR_movmisalignsi
1778 : CODE_FOR_movmisaligndi);
1779 emit_insn (GEN_FCN (icode) (reg_lowpart, srcmem));
1781 else
1782 emit_move_insn (reg_lowpart, srcmem);
1784 src_left = src_size;
1785 shift = TARGET_BIG_ENDIAN ? (src_size - dst_size) * BITS_PER_UNIT : 0;
1786 while (src_left > 0)
1788 rtx dstreg = reg_lowpart;
1790 if (src_size > dst_size)
1792 rtx srcword = reg;
1793 int shift_amount = shift & (BITS_PER_WORD - 1);
1794 if (src_size > 4)
1795 srcword = operand_subword_force (srcword, src_left >= 4 ? 0 : 4,
1796 SImode);
1797 if (shift_amount > 0)
1799 dstreg = gen_reg_rtx (SImode);
1800 emit_insn (gen_lshrsi3 (dstreg, srcword,
1801 GEN_INT (shift_amount)));
1803 else
1804 dstreg = srcword;
1805 dstreg = gen_lowpart (dstmode, dstreg);
1808 dstmem = adjust_address (copy_rtx (dst), dstmode, offset);
1809 if (dst_size > dst_mem_align)
1811 enum insn_code icode = (dstmode == SImode ? CODE_FOR_movmisalignsi
1812 : CODE_FOR_movmisaligndi);
1813 emit_insn (GEN_FCN (icode) (dstmem, dstreg));
1815 else
1816 emit_move_insn (dstmem, dstreg);
1818 if (TARGET_BIG_ENDIAN)
1819 shift -= dst_size * BITS_PER_UNIT;
1820 else
1821 shift += dst_size * BITS_PER_UNIT;
1822 offset += dst_size;
1823 src_left -= dst_size;
1825 count -= src_size;
1827 return true;
1830 /* Subroutine of print_address_operand, print a single address offset OFF for
1831 a memory access of mode MEM_MODE, choosing between normal form and scaled
1832 form depending on the type of the insn. Misaligned memory references must
1833 use the scaled form. */
1835 static void
1836 print_address_offset (FILE *file, rtx off, enum machine_mode mem_mode)
1838 rtx pat;
1840 if (c6x_current_insn != NULL_RTX)
1842 pat = PATTERN (c6x_current_insn);
1843 if (GET_CODE (pat) == COND_EXEC)
1844 pat = COND_EXEC_CODE (pat);
1845 if (GET_CODE (pat) == PARALLEL)
1846 pat = XVECEXP (pat, 0, 0);
1848 if (GET_CODE (pat) == SET
1849 && GET_CODE (SET_SRC (pat)) == UNSPEC
1850 && XINT (SET_SRC (pat), 1) == UNSPEC_MISALIGNED_ACCESS)
1852 gcc_assert (CONST_INT_P (off)
1853 && (INTVAL (off) & (GET_MODE_SIZE (mem_mode) - 1)) == 0);
1854 fprintf (file, "[" HOST_WIDE_INT_PRINT_DEC "]",
1855 INTVAL (off) / GET_MODE_SIZE (mem_mode));
1856 return;
1859 fputs ("(", file);
1860 output_address (off);
1861 fputs (")", file);
1864 static bool
1865 c6x_print_operand_punct_valid_p (unsigned char c)
1867 return c == '$' || c == '.' || c == '|';
1870 static void c6x_print_operand (FILE *, rtx, int);
1872 /* Subroutine of c6x_print_operand; used to print a memory reference X to FILE. */
1874 static void
1875 c6x_print_address_operand (FILE *file, rtx x, enum machine_mode mem_mode)
1877 rtx off;
1878 switch (GET_CODE (x))
1880 case PRE_MODIFY:
1881 case POST_MODIFY:
1882 if (GET_CODE (x) == POST_MODIFY)
1883 output_address (XEXP (x, 0));
1884 off = XEXP (XEXP (x, 1), 1);
1885 if (XEXP (x, 0) == stack_pointer_rtx)
1887 if (GET_CODE (x) == PRE_MODIFY)
1888 gcc_assert (INTVAL (off) > 0);
1889 else
1890 gcc_assert (INTVAL (off) < 0);
1892 if (CONST_INT_P (off) && INTVAL (off) < 0)
1894 fprintf (file, "--");
1895 off = GEN_INT (-INTVAL (off));
1897 else
1898 fprintf (file, "++");
1899 if (GET_CODE (x) == PRE_MODIFY)
1900 output_address (XEXP (x, 0));
1901 print_address_offset (file, off, mem_mode);
1902 break;
1904 case PLUS:
1905 off = XEXP (x, 1);
1906 if (CONST_INT_P (off) && INTVAL (off) < 0)
1908 fprintf (file, "-");
1909 off = GEN_INT (-INTVAL (off));
1911 else
1912 fprintf (file, "+");
1913 output_address (XEXP (x, 0));
1914 print_address_offset (file, off, mem_mode);
1915 break;
1917 case PRE_DEC:
1918 gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
1919 fprintf (file, "--");
1920 output_address (XEXP (x, 0));
1921 fprintf (file, "[1]");
1922 break;
1923 case PRE_INC:
1924 fprintf (file, "++");
1925 output_address (XEXP (x, 0));
1926 fprintf (file, "[1]");
1927 break;
1928 case POST_INC:
1929 gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
1930 output_address (XEXP (x, 0));
1931 fprintf (file, "++[1]");
1932 break;
1933 case POST_DEC:
1934 output_address (XEXP (x, 0));
1935 fprintf (file, "--[1]");
1936 break;
1938 case SYMBOL_REF:
1939 case CONST:
1940 case LABEL_REF:
1941 gcc_assert (sdata_symbolic_operand (x, Pmode));
1942 fprintf (file, "+B14(");
1943 output_addr_const (file, x);
1944 fprintf (file, ")");
1945 break;
1947 case UNSPEC:
1948 switch (XINT (x, 1))
1950 case UNSPEC_LOAD_GOT:
1951 fputs ("$GOT(", file);
1952 output_addr_const (file, XVECEXP (x, 0, 0));
1953 fputs (")", file);
1954 break;
1955 case UNSPEC_LOAD_SDATA:
1956 output_addr_const (file, XVECEXP (x, 0, 0));
1957 break;
1958 default:
1959 gcc_unreachable ();
1961 break;
1963 default:
1964 gcc_assert (GET_CODE (x) != MEM);
1965 c6x_print_operand (file, x, 0);
1966 break;
1970 /* Return a single character, which is either 'l', 's', 'd' or 'm', which
1971 specifies the functional unit used by INSN. */
1973 char
1974 c6x_get_unit_specifier (rtx insn)
1976 enum attr_units units;
1978 if (insn_info.exists ())
1980 int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation;
1981 return c6x_unit_names[unit][0];
1984 units = get_attr_units (insn);
1985 switch (units)
1987 case UNITS_D:
1988 case UNITS_DL:
1989 case UNITS_DS:
1990 case UNITS_DLS:
1991 case UNITS_D_ADDR:
1992 return 'd';
1993 break;
1994 case UNITS_L:
1995 case UNITS_LS:
1996 return 'l';
1997 break;
1998 case UNITS_S:
1999 return 's';
2000 break;
2001 case UNITS_M:
2002 return 'm';
2003 break;
2004 default:
2005 gcc_unreachable ();
2009 /* Prints the unit specifier field. */
2010 static void
2011 c6x_print_unit_specifier_field (FILE *file, rtx insn)
2013 enum attr_units units = get_attr_units (insn);
2014 enum attr_cross cross = get_attr_cross (insn);
2015 enum attr_dest_regfile rf = get_attr_dest_regfile (insn);
2016 int half;
2017 char unitspec;
2019 if (units == UNITS_D_ADDR)
2021 enum attr_addr_regfile arf = get_attr_addr_regfile (insn);
2022 int t_half;
2023 gcc_assert (arf != ADDR_REGFILE_UNKNOWN);
2024 half = arf == ADDR_REGFILE_A ? 1 : 2;
2025 t_half = rf == DEST_REGFILE_A ? 1 : 2;
2026 fprintf (file, ".d%dt%d", half, t_half);
2027 return;
2030 if (insn_info.exists ())
2032 int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation;
2033 fputs (".", file);
2034 fputs (c6x_unit_names[unit], file);
2035 if (cross == CROSS_Y)
2036 fputs ("x", file);
2037 return;
2040 gcc_assert (rf != DEST_REGFILE_UNKNOWN);
2041 unitspec = c6x_get_unit_specifier (insn);
2042 half = rf == DEST_REGFILE_A ? 1 : 2;
2043 fprintf (file, ".%c%d%s", unitspec, half, cross == CROSS_Y ? "x" : "");
2046 /* Output assembly language output for the address ADDR to FILE. */
2047 static void
2048 c6x_print_operand_address (FILE *file, rtx addr)
2050 c6x_print_address_operand (file, addr, VOIDmode);
2053 /* Print an operand, X, to FILE, with an optional modifier in CODE.
2055 Meaning of CODE:
2056 $ -- print the unit specifier field for the instruction.
2057 . -- print the predicate for the instruction or an emptry string for an
2058 unconditional one.
2059 | -- print "||" if the insn should be issued in parallel with the previous
2060 one.
2062 C -- print an opcode suffix for a reversed condition
2063 d -- H, W or D as a suffix for ADDA, based on the factor given by the
2064 operand
2065 D -- print either B, H, W or D as a suffix for ADDA, based on the size of
2066 the operand
2067 J -- print a predicate
2068 j -- like J, but use reverse predicate
2069 k -- treat a CONST_INT as a register number and print it as a register
2070 k -- like k, but print out a doubleword register
2071 n -- print an integer operand, negated
2072 p -- print the low part of a DImode register
2073 P -- print the high part of a DImode register
2074 r -- print the absolute value of an integer operand, shifted right by 1
2075 R -- print the absolute value of an integer operand, shifted right by 2
2076 f -- the first clear bit in an integer operand assumed to be a mask for
2077 a clr instruction
2078 F -- the last clear bit in such a mask
2079 s -- the first set bit in an integer operand assumed to be a mask for
2080 a set instruction
2081 S -- the last set bit in such a mask
2082 U -- print either 1 or 2, depending on the side of the machine used by
2083 the operand */
2085 static void
2086 c6x_print_operand (FILE *file, rtx x, int code)
2088 int i;
2089 HOST_WIDE_INT v;
2090 tree t;
2091 enum machine_mode mode;
2093 if (code == '|')
2095 if (GET_MODE (c6x_current_insn) != TImode)
2096 fputs ("||", file);
2097 return;
2099 if (code == '$')
2101 c6x_print_unit_specifier_field (file, c6x_current_insn);
2102 return;
2105 if (code == '.')
2107 x = current_insn_predicate;
2108 if (x)
2110 unsigned int regno = REGNO (XEXP (x, 0));
2111 fputs ("[", file);
2112 if (GET_CODE (x) == EQ)
2113 fputs ("!", file);
2114 fputs (reg_names [regno], file);
2115 fputs ("]", file);
2117 return;
2120 mode = GET_MODE (x);
2122 switch (code)
2124 case 'C':
2125 case 'c':
2127 enum rtx_code c = GET_CODE (x);
2128 if (code == 'C')
2129 c = swap_condition (c);
2130 fputs (GET_RTX_NAME (c), file);
2132 return;
2134 case 'J':
2135 case 'j':
2137 unsigned int regno = REGNO (XEXP (x, 0));
2138 if ((GET_CODE (x) == EQ) == (code == 'J'))
2139 fputs ("!", file);
2140 fputs (reg_names [regno], file);
2142 return;
2144 case 'k':
2145 gcc_assert (GET_CODE (x) == CONST_INT);
2146 v = INTVAL (x);
2147 fprintf (file, "%s", reg_names[v]);
2148 return;
2149 case 'K':
2150 gcc_assert (GET_CODE (x) == CONST_INT);
2151 v = INTVAL (x);
2152 gcc_assert ((v & 1) == 0);
2153 fprintf (file, "%s:%s", reg_names[v + 1], reg_names[v]);
2154 return;
2156 case 's':
2157 case 'S':
2158 case 'f':
2159 case 'F':
2160 gcc_assert (GET_CODE (x) == CONST_INT);
2161 v = INTVAL (x);
2162 for (i = 0; i < 32; i++)
2164 HOST_WIDE_INT tst = v & 1;
2165 if (((code == 'f' || code == 'F') && !tst)
2166 || ((code == 's' || code == 'S') && tst))
2167 break;
2168 v >>= 1;
2170 if (code == 'f' || code == 's')
2172 fprintf (file, "%d", i);
2173 return;
2175 for (;i < 32; i++)
2177 HOST_WIDE_INT tst = v & 1;
2178 if ((code == 'F' && tst) || (code == 'S' && !tst))
2179 break;
2180 v >>= 1;
2182 fprintf (file, "%d", i - 1);
2183 return;
2185 case 'n':
2186 gcc_assert (GET_CODE (x) == CONST_INT);
2187 output_addr_const (file, GEN_INT (-INTVAL (x)));
2188 return;
2190 case 'r':
2191 gcc_assert (GET_CODE (x) == CONST_INT);
2192 v = INTVAL (x);
2193 if (v < 0)
2194 v = -v;
2195 output_addr_const (file, GEN_INT (v >> 1));
2196 return;
2198 case 'R':
2199 gcc_assert (GET_CODE (x) == CONST_INT);
2200 v = INTVAL (x);
2201 if (v < 0)
2202 v = -v;
2203 output_addr_const (file, GEN_INT (v >> 2));
2204 return;
2206 case 'd':
2207 gcc_assert (GET_CODE (x) == CONST_INT);
2208 v = INTVAL (x);
2209 fputs (v == 2 ? "h" : v == 4 ? "w" : "d", file);
2210 return;
2212 case 'p':
2213 case 'P':
2214 gcc_assert (GET_CODE (x) == REG);
2215 v = REGNO (x);
2216 if (code == 'P')
2217 v++;
2218 fputs (reg_names[v], file);
2219 return;
2221 case 'D':
2222 v = 0;
2223 if (GET_CODE (x) == CONST)
2225 x = XEXP (x, 0);
2226 gcc_assert (GET_CODE (x) == PLUS);
2227 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
2228 v = INTVAL (XEXP (x, 1));
2229 x = XEXP (x, 0);
2232 gcc_assert (GET_CODE (x) == SYMBOL_REF);
2234 t = SYMBOL_REF_DECL (x);
2235 if (DECL_P (t))
2236 v |= DECL_ALIGN_UNIT (t);
2237 else
2238 v |= TYPE_ALIGN_UNIT (TREE_TYPE (t));
2239 if (v & 1)
2240 fputs ("b", file);
2241 else if (v & 2)
2242 fputs ("h", file);
2243 else
2244 fputs ("w", file);
2245 return;
2247 case 'U':
2248 if (MEM_P (x))
2250 x = XEXP (x, 0);
2251 if (GET_CODE (x) == PLUS
2252 || GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC)
2253 x = XEXP (x, 0);
2254 if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF)
2256 gcc_assert (sdata_symbolic_operand (x, Pmode));
2257 fputs ("2", file);
2258 return;
2261 gcc_assert (REG_P (x));
2262 if (A_REGNO_P (REGNO (x)))
2263 fputs ("1", file);
2264 if (B_REGNO_P (REGNO (x)))
2265 fputs ("2", file);
2266 return;
2268 default:
2269 switch (GET_CODE (x))
2271 case REG:
2272 if (GET_MODE_SIZE (mode) == 8)
2273 fprintf (file, "%s:%s", reg_names[REGNO (x) + 1],
2274 reg_names[REGNO (x)]);
2275 else
2276 fprintf (file, "%s", reg_names[REGNO (x)]);
2277 break;
2279 case MEM:
2280 fputc ('*', file);
2281 gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
2282 c6x_print_address_operand (file, XEXP (x, 0), GET_MODE (x));
2283 break;
2285 case SYMBOL_REF:
2286 fputc ('(', file);
2287 output_addr_const (file, x);
2288 fputc (')', file);
2289 break;
2291 case CONST_INT:
2292 output_addr_const (file, x);
2293 break;
2295 case CONST_DOUBLE:
2296 output_operand_lossage ("invalid const_double operand");
2297 break;
2299 default:
2300 output_addr_const (file, x);
2305 /* Return TRUE if OP is a valid memory address with a base register of
2306 class C. If SMALL_OFFSET is true, we disallow memory references which would
2307 require a long offset with B14/B15. */
2309 bool
2310 c6x_mem_operand (rtx op, enum reg_class c, bool small_offset)
2312 enum machine_mode mode = GET_MODE (op);
2313 rtx base = XEXP (op, 0);
2314 switch (GET_CODE (base))
2316 case REG:
2317 break;
2318 case PLUS:
2319 if (small_offset
2320 && (XEXP (base, 0) == stack_pointer_rtx
2321 || XEXP (base, 0) == pic_offset_table_rtx))
2323 if (!c6x_legitimate_address_p_1 (mode, base, true, true))
2324 return false;
2327 /* fall through */
2328 case PRE_INC:
2329 case PRE_DEC:
2330 case PRE_MODIFY:
2331 case POST_INC:
2332 case POST_DEC:
2333 case POST_MODIFY:
2334 base = XEXP (base, 0);
2335 break;
2337 case CONST:
2338 case LABEL_REF:
2339 case SYMBOL_REF:
2340 gcc_assert (sdata_symbolic_operand (base, Pmode));
2341 return !small_offset && c == B_REGS;
2343 default:
2344 return false;
2346 return TEST_HARD_REG_BIT (reg_class_contents[ (int) (c)], REGNO (base));
2349 /* Returns true if X is a valid address for use in a memory reference
2350 of mode MODE. If STRICT is true, we do not allow pseudo registers
2351 in the address. NO_LARGE_OFFSET is true if we are examining an
2352 address for use in a load or store misaligned instruction, or
2353 recursively examining an operand inside a PRE/POST_MODIFY. */
2355 bool
2356 c6x_legitimate_address_p_1 (enum machine_mode mode, rtx x, bool strict,
2357 bool no_large_offset)
2359 int size, size1;
2360 HOST_WIDE_INT off;
2361 enum rtx_code code = GET_CODE (x);
2363 switch (code)
2365 case PRE_MODIFY:
2366 case POST_MODIFY:
2367 /* We can't split these into word-sized pieces yet. */
2368 if (!TARGET_STDW && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
2369 return false;
2370 if (GET_CODE (XEXP (x, 1)) != PLUS)
2371 return false;
2372 if (!c6x_legitimate_address_p_1 (mode, XEXP (x, 1), strict, true))
2373 return false;
2374 if (!rtx_equal_p (XEXP (x, 0), XEXP (XEXP (x, 1), 0)))
2375 return false;
2377 /* fall through */
2378 case PRE_INC:
2379 case PRE_DEC:
2380 case POST_INC:
2381 case POST_DEC:
2382 /* We can't split these into word-sized pieces yet. */
2383 if (!TARGET_STDW && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
2384 return false;
2385 x = XEXP (x, 0);
2386 if (!REG_P (x))
2387 return false;
2389 /* fall through */
2390 case REG:
2391 if (strict)
2392 return REGNO_OK_FOR_BASE_STRICT_P (REGNO (x));
2393 else
2394 return REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x));
2396 case PLUS:
2397 if (!REG_P (XEXP (x, 0))
2398 || !c6x_legitimate_address_p_1 (mode, XEXP (x, 0), strict, false))
2399 return false;
2400 /* We cannot ensure currently that both registers end up in the
2401 same register file. */
2402 if (REG_P (XEXP (x, 1)))
2403 return false;
2405 if (mode == BLKmode)
2406 size = 4;
2407 else if (mode == VOIDmode)
2408 /* ??? This can happen during ivopts. */
2409 size = 1;
2410 else
2411 size = GET_MODE_SIZE (mode);
2413 if (flag_pic
2414 && GET_CODE (XEXP (x, 1)) == UNSPEC
2415 && XINT (XEXP (x, 1), 1) == UNSPEC_LOAD_SDATA
2416 && XEXP (x, 0) == pic_offset_table_rtx
2417 && sdata_symbolic_operand (XVECEXP (XEXP (x, 1), 0, 0), SImode))
2418 return !no_large_offset && size <= 4;
2419 if (flag_pic == 1
2420 && mode == Pmode
2421 && GET_CODE (XEXP (x, 1)) == UNSPEC
2422 && XINT (XEXP (x, 1), 1) == UNSPEC_LOAD_GOT
2423 && XEXP (x, 0) == pic_offset_table_rtx
2424 && (GET_CODE (XVECEXP (XEXP (x, 1), 0, 0)) == SYMBOL_REF
2425 || GET_CODE (XVECEXP (XEXP (x, 1), 0, 0)) == LABEL_REF))
2426 return !no_large_offset;
2427 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2428 return false;
2430 off = INTVAL (XEXP (x, 1));
2432 /* If the machine does not have doubleword load/stores, we'll use
2433 word size accesses. */
2434 size1 = size;
2435 if (size == 2 * UNITS_PER_WORD && !TARGET_STDW)
2436 size = UNITS_PER_WORD;
2438 if (((HOST_WIDE_INT)size1 - 1) & off)
2439 return false;
2440 off /= size;
2441 if (off > -32 && off < (size1 == size ? 32 : 28))
2442 return true;
2443 if (no_large_offset || code != PLUS || XEXP (x, 0) != stack_pointer_rtx
2444 || size1 > UNITS_PER_WORD)
2445 return false;
2446 return off >= 0 && off < 32768;
2448 case CONST:
2449 case SYMBOL_REF:
2450 case LABEL_REF:
2451 return (!no_large_offset
2452 /* With -fpic, we must wrap it in an unspec to show the B14
2453 dependency. */
2454 && !flag_pic
2455 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2456 && sdata_symbolic_operand (x, Pmode));
2458 default:
2459 return false;
2463 static bool
2464 c6x_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
2466 return c6x_legitimate_address_p_1 (mode, x, strict, false);
2469 static bool
2470 c6x_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
2471 rtx x ATTRIBUTE_UNUSED)
2473 return true;
2476 /* Implements TARGET_PREFERRED_RENAME_CLASS. */
2477 static reg_class_t
2478 c6x_preferred_rename_class (reg_class_t cl)
2480 if (cl == A_REGS)
2481 return NONPREDICATE_A_REGS;
2482 if (cl == B_REGS)
2483 return NONPREDICATE_B_REGS;
2484 if (cl == ALL_REGS || cl == GENERAL_REGS)
2485 return NONPREDICATE_REGS;
2486 return NO_REGS;
2489 /* Implements FINAL_PRESCAN_INSN. */
2490 void
2491 c6x_final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
2492 int noperands ATTRIBUTE_UNUSED)
2494 c6x_current_insn = insn;
2497 /* A structure to describe the stack layout of a function. The layout is
2498 as follows:
2500 [saved frame pointer (or possibly padding0)]
2501 --> incoming stack pointer, new hard frame pointer
2502 [saved call-used regs]
2503 [optional padding1]
2504 --> soft frame pointer
2505 [frame]
2506 [outgoing arguments]
2507 [optional padding2]
2509 The structure members are laid out in this order. */
2511 struct c6x_frame
2513 int padding0;
2514 /* Number of registers to save. */
2515 int nregs;
2516 int padding1;
2517 HOST_WIDE_INT frame;
2518 int outgoing_arguments_size;
2519 int padding2;
2521 HOST_WIDE_INT to_allocate;
2522 /* The offsets relative to the incoming stack pointer (which
2523 becomes HARD_FRAME_POINTER). */
2524 HOST_WIDE_INT frame_pointer_offset;
2525 HOST_WIDE_INT b3_offset;
2527 /* True if we should call push_rts/pop_rts to save and restore
2528 registers. */
2529 bool push_rts;
2532 /* Return true if we need to save and modify the PIC register in the
2533 prologue. */
2535 static bool
2536 must_reload_pic_reg_p (void)
2538 struct cgraph_local_info *i = NULL;
2540 if (!TARGET_DSBT)
2541 return false;
2543 i = cgraph_local_info (current_function_decl);
2545 if ((crtl->uses_pic_offset_table || !crtl->is_leaf) && !i->local)
2546 return true;
2547 return false;
2550 /* Return 1 if we need to save REGNO. */
2551 static int
2552 c6x_save_reg (unsigned int regno)
2554 return ((df_regs_ever_live_p (regno)
2555 && !call_used_regs[regno]
2556 && !fixed_regs[regno])
2557 || (regno == RETURN_ADDR_REGNO
2558 && (df_regs_ever_live_p (regno)
2559 || !crtl->is_leaf))
2560 || (regno == PIC_OFFSET_TABLE_REGNUM && must_reload_pic_reg_p ()));
2563 /* Examine the number of regs NREGS we've determined we must save.
2564 Return true if we should use __c6xabi_push_rts/__c6xabi_pop_rts for
2565 prologue and epilogue. */
2567 static bool
2568 use_push_rts_p (int nregs)
2570 if (TARGET_INSNS_64PLUS && optimize_function_for_size_p (cfun)
2571 && !cfun->machine->contains_sibcall
2572 && !cfun->returns_struct
2573 && !TARGET_LONG_CALLS
2574 && nregs >= 6 && !frame_pointer_needed)
2575 return true;
2576 return false;
2579 /* Return number of saved general prupose registers. */
2582 c6x_nsaved_regs (void)
2584 int nregs = 0;
2585 int regno;
2587 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2588 if (c6x_save_reg (regno))
2589 nregs++;
2590 return nregs;
2593 /* The safe debug order mandated by the ABI. */
2594 static unsigned reg_save_order[] =
2596 REG_A10, REG_A11, REG_A12, REG_A13,
2597 REG_A14, REG_B3,
2598 REG_B10, REG_B11, REG_B12, REG_B13,
2599 REG_B14, REG_A15
2602 #define N_SAVE_ORDER (sizeof reg_save_order / sizeof *reg_save_order)
2604 /* Compute the layout of the stack frame and store it in FRAME. */
2606 static void
2607 c6x_compute_frame_layout (struct c6x_frame *frame)
2609 HOST_WIDE_INT size = get_frame_size ();
2610 HOST_WIDE_INT offset;
2611 int nregs;
2613 /* We use the four bytes which are technically inside the caller's frame,
2614 usually to save the frame pointer. */
2615 offset = -4;
2616 frame->padding0 = 0;
2617 nregs = c6x_nsaved_regs ();
2618 frame->push_rts = false;
2619 frame->b3_offset = 0;
2620 if (use_push_rts_p (nregs))
2622 frame->push_rts = true;
2623 frame->b3_offset = (TARGET_BIG_ENDIAN ? -12 : -13) * 4;
2624 nregs = 14;
2626 else if (c6x_save_reg (REG_B3))
2628 int idx;
2629 for (idx = N_SAVE_ORDER - 1; reg_save_order[idx] != REG_B3; idx--)
2631 if (c6x_save_reg (reg_save_order[idx]))
2632 frame->b3_offset -= 4;
2635 frame->nregs = nregs;
2637 if (size == 0 && nregs == 0)
2639 frame->padding0 = 4;
2640 frame->padding1 = frame->padding2 = 0;
2641 frame->frame_pointer_offset = frame->to_allocate = 0;
2642 frame->outgoing_arguments_size = 0;
2643 return;
2646 if (!frame->push_rts)
2647 offset += frame->nregs * 4;
2649 if (offset == 0 && size == 0 && crtl->outgoing_args_size == 0
2650 && !crtl->is_leaf)
2651 /* Don't use the bottom of the caller's frame if we have no
2652 allocation of our own and call other functions. */
2653 frame->padding0 = frame->padding1 = 4;
2654 else if (offset & 4)
2655 frame->padding1 = 4;
2656 else
2657 frame->padding1 = 0;
2659 offset += frame->padding0 + frame->padding1;
2660 frame->frame_pointer_offset = offset;
2661 offset += size;
2663 frame->outgoing_arguments_size = crtl->outgoing_args_size;
2664 offset += frame->outgoing_arguments_size;
2666 if ((offset & 4) == 0)
2667 frame->padding2 = 8;
2668 else
2669 frame->padding2 = 4;
2670 frame->to_allocate = offset + frame->padding2;
2673 /* Return the offset between two registers, one to be eliminated, and the other
2674 its replacement, at the start of a routine. */
2676 HOST_WIDE_INT
2677 c6x_initial_elimination_offset (int from, int to)
2679 struct c6x_frame frame;
2680 c6x_compute_frame_layout (&frame);
2682 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
2683 return 0;
2684 else if (from == FRAME_POINTER_REGNUM
2685 && to == HARD_FRAME_POINTER_REGNUM)
2686 return -frame.frame_pointer_offset;
2687 else
2689 gcc_assert (to == STACK_POINTER_REGNUM);
2691 if (from == ARG_POINTER_REGNUM)
2692 return frame.to_allocate + (frame.push_rts ? 56 : 0);
2694 gcc_assert (from == FRAME_POINTER_REGNUM);
2695 return frame.to_allocate - frame.frame_pointer_offset;
2699 /* Given FROM and TO register numbers, say whether this elimination is
2700 allowed. Frame pointer elimination is automatically handled. */
2702 static bool
2703 c6x_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2705 if (to == STACK_POINTER_REGNUM)
2706 return !frame_pointer_needed;
2707 return true;
2710 /* Emit insns to increment the stack pointer by OFFSET. If
2711 FRAME_RELATED_P, set the RTX_FRAME_RELATED_P flag on the insns.
2712 Does nothing if the offset is zero. */
2714 static void
2715 emit_add_sp_const (HOST_WIDE_INT offset, bool frame_related_p)
2717 rtx to_add = GEN_INT (offset);
2718 rtx orig_to_add = to_add;
2719 rtx insn;
2721 if (offset == 0)
2722 return;
2724 if (offset < -32768 || offset > 32767)
2726 rtx reg = gen_rtx_REG (SImode, REG_A0);
2727 rtx low = GEN_INT (trunc_int_for_mode (offset, HImode));
2729 insn = emit_insn (gen_movsi_high (reg, low));
2730 if (frame_related_p)
2731 RTX_FRAME_RELATED_P (insn) = 1;
2732 insn = emit_insn (gen_movsi_lo_sum (reg, reg, to_add));
2733 if (frame_related_p)
2734 RTX_FRAME_RELATED_P (insn) = 1;
2735 to_add = reg;
2737 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
2738 to_add));
2739 if (frame_related_p)
2741 if (REG_P (to_add))
2742 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
2743 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
2744 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
2745 orig_to_add)));
2747 RTX_FRAME_RELATED_P (insn) = 1;
2751 /* Prologue and epilogue. */
2752 void
2753 c6x_expand_prologue (void)
2755 struct c6x_frame frame;
2756 rtx insn, mem;
2757 int nsaved = 0;
2758 HOST_WIDE_INT initial_offset, off, added_already;
2760 c6x_compute_frame_layout (&frame);
2762 if (flag_stack_usage_info)
2763 current_function_static_stack_size = frame.to_allocate;
2765 initial_offset = -frame.to_allocate;
2766 if (frame.push_rts)
2768 emit_insn (gen_push_rts ());
2769 nsaved = frame.nregs;
2772 /* If the offsets would be too large for the memory references we will
2773 create to save registers, do the stack allocation in two parts.
2774 Ensure by subtracting 8 that we don't store to the word pointed to
2775 by the stack pointer. */
2776 if (initial_offset < -32768)
2777 initial_offset = -frame.frame_pointer_offset - 8;
2779 if (frame.to_allocate > 0)
2780 gcc_assert (initial_offset != 0);
2782 off = -initial_offset + 4 - frame.padding0;
2784 mem = gen_frame_mem (Pmode, stack_pointer_rtx);
2786 added_already = 0;
2787 if (frame_pointer_needed)
2789 rtx fp_reg = gen_rtx_REG (SImode, REG_A15);
2790 /* We go through some contortions here to both follow the ABI's
2791 recommendation that FP == incoming SP, and to avoid writing or
2792 reading the word pointed to by the stack pointer. */
2793 rtx addr = gen_rtx_POST_MODIFY (Pmode, stack_pointer_rtx,
2794 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
2795 GEN_INT (-8)));
2796 insn = emit_move_insn (gen_frame_mem (Pmode, addr), fp_reg);
2797 RTX_FRAME_RELATED_P (insn) = 1;
2798 nsaved++;
2799 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, stack_pointer_rtx,
2800 GEN_INT (8)));
2801 RTX_FRAME_RELATED_P (insn) = 1;
2802 off -= 4;
2803 added_already = -8;
2806 emit_add_sp_const (initial_offset - added_already, true);
2808 if (nsaved < frame.nregs)
2810 unsigned i;
2812 for (i = 0; i < N_SAVE_ORDER; i++)
2814 int idx = N_SAVE_ORDER - i - 1;
2815 unsigned regno = reg_save_order[idx];
2816 rtx reg;
2817 enum machine_mode save_mode = SImode;
2819 if (regno == REG_A15 && frame_pointer_needed)
2820 /* Already saved. */
2821 continue;
2822 if (!c6x_save_reg (regno))
2823 continue;
2825 if (TARGET_STDW && (off & 4) == 0 && off <= 256
2826 && (regno & 1) == 1
2827 && i + 1 < N_SAVE_ORDER
2828 && reg_save_order[idx - 1] == regno - 1
2829 && c6x_save_reg (regno - 1))
2831 save_mode = DImode;
2832 regno--;
2833 i++;
2835 reg = gen_rtx_REG (save_mode, regno);
2836 off -= GET_MODE_SIZE (save_mode);
2838 insn = emit_move_insn (adjust_address (mem, save_mode, off),
2839 reg);
2840 RTX_FRAME_RELATED_P (insn) = 1;
2842 nsaved += HARD_REGNO_NREGS (regno, save_mode);
2845 gcc_assert (nsaved == frame.nregs);
2846 emit_add_sp_const (-frame.to_allocate - initial_offset, true);
2847 if (must_reload_pic_reg_p ())
2849 if (dsbt_decl == NULL)
2851 tree t;
2853 t = build_index_type (integer_one_node);
2854 t = build_array_type (integer_type_node, t);
2855 t = build_decl (BUILTINS_LOCATION, VAR_DECL,
2856 get_identifier ("__c6xabi_DSBT_BASE"), t);
2857 DECL_ARTIFICIAL (t) = 1;
2858 DECL_IGNORED_P (t) = 1;
2859 DECL_EXTERNAL (t) = 1;
2860 TREE_STATIC (t) = 1;
2861 TREE_PUBLIC (t) = 1;
2862 TREE_USED (t) = 1;
2864 dsbt_decl = t;
2866 emit_insn (gen_setup_dsbt (pic_offset_table_rtx,
2867 XEXP (DECL_RTL (dsbt_decl), 0)));
2871 void
2872 c6x_expand_epilogue (bool sibcall)
2874 unsigned i;
2875 struct c6x_frame frame;
2876 rtx mem;
2877 HOST_WIDE_INT off;
2878 int nsaved = 0;
2880 c6x_compute_frame_layout (&frame);
2882 mem = gen_frame_mem (Pmode, stack_pointer_rtx);
2884 /* Insert a dummy set/use of the stack pointer. This creates a
2885 scheduler barrier between the prologue saves and epilogue restores. */
2886 emit_insn (gen_epilogue_barrier (stack_pointer_rtx, stack_pointer_rtx));
2888 /* If the offsets would be too large for the memory references we will
2889 create to restore registers, do a preliminary stack adjustment here. */
2890 off = frame.to_allocate - frame.frame_pointer_offset + frame.padding1;
2891 if (frame.push_rts)
2893 nsaved = frame.nregs;
2895 else
2897 if (frame.to_allocate > 32768)
2899 /* Don't add the entire offset so that we leave an unused word
2900 above the stack pointer. */
2901 emit_add_sp_const ((off - 16) & ~7, false);
2902 off &= 7;
2903 off += 16;
2905 for (i = 0; i < N_SAVE_ORDER; i++)
2907 unsigned regno = reg_save_order[i];
2908 rtx reg;
2909 enum machine_mode save_mode = SImode;
2911 if (!c6x_save_reg (regno))
2912 continue;
2913 if (regno == REG_A15 && frame_pointer_needed)
2914 continue;
2916 if (TARGET_STDW && (off & 4) == 0 && off < 256
2917 && (regno & 1) == 0
2918 && i + 1 < N_SAVE_ORDER
2919 && reg_save_order[i + 1] == regno + 1
2920 && c6x_save_reg (regno + 1))
2922 save_mode = DImode;
2923 i++;
2925 reg = gen_rtx_REG (save_mode, regno);
2927 emit_move_insn (reg, adjust_address (mem, save_mode, off));
2929 off += GET_MODE_SIZE (save_mode);
2930 nsaved += HARD_REGNO_NREGS (regno, save_mode);
2933 if (!frame_pointer_needed)
2934 emit_add_sp_const (off + frame.padding0 - 4, false);
2935 else
2937 rtx fp_reg = gen_rtx_REG (SImode, REG_A15);
2938 rtx addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx,
2939 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
2940 GEN_INT (8)));
2941 emit_insn (gen_addsi3 (stack_pointer_rtx, hard_frame_pointer_rtx,
2942 GEN_INT (-8)));
2943 emit_move_insn (fp_reg, gen_frame_mem (Pmode, addr));
2944 nsaved++;
2946 gcc_assert (nsaved == frame.nregs);
2947 if (!sibcall)
2949 if (frame.push_rts)
2950 emit_jump_insn (gen_pop_rts ());
2951 else
2952 emit_jump_insn (gen_return_internal (gen_rtx_REG (SImode,
2953 RETURN_ADDR_REGNO)));
2957 /* Return the value of the return address for the frame COUNT steps up
2958 from the current frame, after the prologue.
2959 We punt for everything but the current frame by returning const0_rtx. */
2962 c6x_return_addr_rtx (int count)
2964 if (count != 0)
2965 return const0_rtx;
2967 return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNO);
2970 /* Return true iff TYPE is one of the shadow types. */
2971 static bool
2972 shadow_type_p (enum attr_type type)
2974 return (type == TYPE_SHADOW || type == TYPE_LOAD_SHADOW
2975 || type == TYPE_MULT_SHADOW);
2978 /* Return true iff INSN is a shadow pattern. */
2979 static bool
2980 shadow_p (rtx insn)
2982 if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
2983 return false;
2984 return shadow_type_p (get_attr_type (insn));
2987 /* Return true iff INSN is a shadow or blockage pattern. */
2988 static bool
2989 shadow_or_blockage_p (rtx insn)
2991 enum attr_type type;
2992 if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
2993 return false;
2994 type = get_attr_type (insn);
2995 return shadow_type_p (type) || type == TYPE_BLOCKAGE;
2998 /* Translate UNITS into a bitmask of units we can reserve for this
2999 insn. */
3000 static int
3001 get_reservation_flags (enum attr_units units)
3003 switch (units)
3005 case UNITS_D:
3006 case UNITS_D_ADDR:
3007 return RESERVATION_FLAG_D;
3008 case UNITS_L:
3009 return RESERVATION_FLAG_L;
3010 case UNITS_S:
3011 return RESERVATION_FLAG_S;
3012 case UNITS_M:
3013 return RESERVATION_FLAG_M;
3014 case UNITS_LS:
3015 return RESERVATION_FLAG_LS;
3016 case UNITS_DL:
3017 return RESERVATION_FLAG_DL;
3018 case UNITS_DS:
3019 return RESERVATION_FLAG_DS;
3020 case UNITS_DLS:
3021 return RESERVATION_FLAG_DLS;
3022 default:
3023 return 0;
3027 /* Compute the side of the machine used by INSN, which reserves UNITS.
3028 This must match the reservations in the scheduling description. */
3029 static int
3030 get_insn_side (rtx insn, enum attr_units units)
3032 if (units == UNITS_D_ADDR)
3033 return (get_attr_addr_regfile (insn) == ADDR_REGFILE_A ? 0 : 1);
3034 else
3036 enum attr_dest_regfile rf = get_attr_dest_regfile (insn);
3037 if (rf == DEST_REGFILE_ANY)
3038 return get_attr_type (insn) == TYPE_BRANCH ? 0 : 1;
3039 else
3040 return rf == DEST_REGFILE_A ? 0 : 1;
3044 /* After scheduling, walk the insns between HEAD and END and assign unit
3045 reservations. */
3046 static void
3047 assign_reservations (rtx head, rtx end)
3049 rtx insn;
3050 for (insn = head; insn != NEXT_INSN (end); insn = NEXT_INSN (insn))
3052 unsigned int sched_mask, reserved;
3053 rtx within, last;
3054 int pass;
3055 int rsrv[2];
3056 int rsrv_count[2][4];
3057 int i;
3059 if (GET_MODE (insn) != TImode)
3060 continue;
3062 reserved = 0;
3063 last = NULL_RTX;
3064 /* Find the last insn in the packet. It has a state recorded for it,
3065 which we can use to determine the units we should be using. */
3066 for (within = insn;
3067 (within != NEXT_INSN (end)
3068 && (within == insn || GET_MODE (within) != TImode));
3069 within = NEXT_INSN (within))
3071 int icode;
3072 if (!NONDEBUG_INSN_P (within))
3073 continue;
3074 icode = recog_memoized (within);
3075 if (icode < 0)
3076 continue;
3077 if (shadow_p (within))
3078 continue;
3079 if (INSN_INFO_ENTRY (INSN_UID (within)).reservation != 0)
3080 reserved |= 1 << INSN_INFO_ENTRY (INSN_UID (within)).reservation;
3081 last = within;
3083 if (last == NULL_RTX)
3084 continue;
3086 sched_mask = INSN_INFO_ENTRY (INSN_UID (last)).unit_mask;
3087 sched_mask &= ~reserved;
3089 memset (rsrv_count, 0, sizeof rsrv_count);
3090 rsrv[0] = rsrv[1] = ~0;
3091 for (i = 0; i < 8; i++)
3093 int side = i / 4;
3094 int unit = i & 3;
3095 unsigned unit_bit = 1 << (unit + side * UNIT_QID_SIDE_OFFSET);
3096 /* Clear the bits which we expect to reserve in the following loop,
3097 leaving the ones set which aren't present in the scheduler's
3098 state and shouldn't be reserved. */
3099 if (sched_mask & unit_bit)
3100 rsrv[i / 4] &= ~(1 << unit);
3103 /* Walk through the insns that occur in the same cycle. We use multiple
3104 passes to assign units, assigning for insns with the most specific
3105 requirements first. */
3106 for (pass = 0; pass < 4; pass++)
3107 for (within = insn;
3108 (within != NEXT_INSN (end)
3109 && (within == insn || GET_MODE (within) != TImode));
3110 within = NEXT_INSN (within))
3112 int uid = INSN_UID (within);
3113 int this_rsrv, side;
3114 int icode;
3115 enum attr_units units;
3116 enum attr_type type;
3117 int j;
3119 if (!NONDEBUG_INSN_P (within))
3120 continue;
3121 icode = recog_memoized (within);
3122 if (icode < 0)
3123 continue;
3124 if (INSN_INFO_ENTRY (uid).reservation != 0)
3125 continue;
3126 units = get_attr_units (within);
3127 type = get_attr_type (within);
3128 this_rsrv = get_reservation_flags (units);
3129 if (this_rsrv == 0)
3130 continue;
3131 side = get_insn_side (within, units);
3133 /* Certain floating point instructions are treated specially. If
3134 an insn can choose between units it can reserve, and its
3135 reservation spans more than one cycle, the reservation contains
3136 special markers in the first cycle to help us reconstruct what
3137 the automaton chose. */
3138 if ((type == TYPE_ADDDP || type == TYPE_FP4)
3139 && units == UNITS_LS)
3141 int test1_code = ((type == TYPE_FP4 ? UNIT_QID_FPL1 : UNIT_QID_ADDDPL1)
3142 + side * UNIT_QID_SIDE_OFFSET);
3143 int test2_code = ((type == TYPE_FP4 ? UNIT_QID_FPS1 : UNIT_QID_ADDDPS1)
3144 + side * UNIT_QID_SIDE_OFFSET);
3145 if ((sched_mask & (1 << test1_code)) != 0)
3147 this_rsrv = RESERVATION_FLAG_L;
3148 sched_mask &= ~(1 << test1_code);
3150 else if ((sched_mask & (1 << test2_code)) != 0)
3152 this_rsrv = RESERVATION_FLAG_S;
3153 sched_mask &= ~(1 << test2_code);
3157 if ((this_rsrv & (this_rsrv - 1)) == 0)
3159 int t = exact_log2 (this_rsrv) + side * UNIT_QID_SIDE_OFFSET;
3160 rsrv[side] |= this_rsrv;
3161 INSN_INFO_ENTRY (uid).reservation = t;
3162 continue;
3165 if (pass == 1)
3167 for (j = 0; j < 4; j++)
3168 if (this_rsrv & (1 << j))
3169 rsrv_count[side][j]++;
3170 continue;
3172 if ((pass == 2 && this_rsrv != RESERVATION_FLAG_DLS)
3173 || (pass == 3 && this_rsrv == RESERVATION_FLAG_DLS))
3175 int best = -1, best_cost = INT_MAX;
3176 for (j = 0; j < 4; j++)
3177 if ((this_rsrv & (1 << j))
3178 && !(rsrv[side] & (1 << j))
3179 && rsrv_count[side][j] < best_cost)
3181 best_cost = rsrv_count[side][j];
3182 best = j;
3184 gcc_assert (best != -1);
3185 rsrv[side] |= 1 << best;
3186 for (j = 0; j < 4; j++)
3187 if ((this_rsrv & (1 << j)) && j != best)
3188 rsrv_count[side][j]--;
3190 INSN_INFO_ENTRY (uid).reservation
3191 = best + side * UNIT_QID_SIDE_OFFSET;
3197 /* Return a factor by which to weight unit imbalances for a reservation
3198 R. */
3199 static int
3200 unit_req_factor (enum unitreqs r)
3202 switch (r)
3204 case UNIT_REQ_D:
3205 case UNIT_REQ_L:
3206 case UNIT_REQ_S:
3207 case UNIT_REQ_M:
3208 case UNIT_REQ_X:
3209 case UNIT_REQ_T:
3210 return 1;
3211 case UNIT_REQ_DL:
3212 case UNIT_REQ_LS:
3213 case UNIT_REQ_DS:
3214 return 2;
3215 case UNIT_REQ_DLS:
3216 return 3;
3217 default:
3218 gcc_unreachable ();
3222 /* Examine INSN, and store in REQ1/SIDE1 and REQ2/SIDE2 the unit
3223 requirements. Returns zero if INSN can't be handled, otherwise
3224 either one or two to show how many of the two pairs are in use.
3225 REQ1 is always used, it holds what is normally thought of as the
3226 instructions reservation, e.g. UNIT_REQ_DL. REQ2 is used to either
3227 describe a cross path, or for loads/stores, the T unit. */
3228 static int
3229 get_unit_reqs (rtx insn, int *req1, int *side1, int *req2, int *side2)
3231 enum attr_units units;
3232 enum attr_cross cross;
3233 int side, req;
3235 if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
3236 return 0;
3237 units = get_attr_units (insn);
3238 if (units == UNITS_UNKNOWN)
3239 return 0;
3240 side = get_insn_side (insn, units);
3241 cross = get_attr_cross (insn);
3243 req = (units == UNITS_D ? UNIT_REQ_D
3244 : units == UNITS_D_ADDR ? UNIT_REQ_D
3245 : units == UNITS_DL ? UNIT_REQ_DL
3246 : units == UNITS_DS ? UNIT_REQ_DS
3247 : units == UNITS_L ? UNIT_REQ_L
3248 : units == UNITS_LS ? UNIT_REQ_LS
3249 : units == UNITS_S ? UNIT_REQ_S
3250 : units == UNITS_M ? UNIT_REQ_M
3251 : units == UNITS_DLS ? UNIT_REQ_DLS
3252 : -1);
3253 gcc_assert (req != -1);
3254 *req1 = req;
3255 *side1 = side;
3256 if (units == UNITS_D_ADDR)
3258 *req2 = UNIT_REQ_T;
3259 *side2 = side ^ (cross == CROSS_Y ? 1 : 0);
3260 return 2;
3262 else if (cross == CROSS_Y)
3264 *req2 = UNIT_REQ_X;
3265 *side2 = side;
3266 return 2;
3268 return 1;
3271 /* Walk the insns between and including HEAD and TAIL, and mark the
3272 resource requirements in the unit_reqs table. */
3273 static void
3274 count_unit_reqs (unit_req_table reqs, rtx head, rtx tail)
3276 rtx insn;
3278 memset (reqs, 0, sizeof (unit_req_table));
3280 for (insn = head; insn != NEXT_INSN (tail); insn = NEXT_INSN (insn))
3282 int side1, side2, req1, req2;
3284 switch (get_unit_reqs (insn, &req1, &side1, &req2, &side2))
3286 case 2:
3287 reqs[side2][req2]++;
3288 /* fall through */
3289 case 1:
3290 reqs[side1][req1]++;
3291 break;
3296 /* Update the table REQS by merging more specific unit reservations into
3297 more general ones, i.e. counting (for example) UNIT_REQ_D also in
3298 UNIT_REQ_DL, DS, and DLS. */
3299 static void
3300 merge_unit_reqs (unit_req_table reqs)
3302 int side;
3303 for (side = 0; side < 2; side++)
3305 int d = reqs[side][UNIT_REQ_D];
3306 int l = reqs[side][UNIT_REQ_L];
3307 int s = reqs[side][UNIT_REQ_S];
3308 int dl = reqs[side][UNIT_REQ_DL];
3309 int ls = reqs[side][UNIT_REQ_LS];
3310 int ds = reqs[side][UNIT_REQ_DS];
3312 reqs[side][UNIT_REQ_DL] += d;
3313 reqs[side][UNIT_REQ_DL] += l;
3314 reqs[side][UNIT_REQ_DS] += d;
3315 reqs[side][UNIT_REQ_DS] += s;
3316 reqs[side][UNIT_REQ_LS] += l;
3317 reqs[side][UNIT_REQ_LS] += s;
3318 reqs[side][UNIT_REQ_DLS] += ds + dl + ls + d + l + s;
3322 /* Examine the table REQS and return a measure of unit imbalance by comparing
3323 the two sides of the machine. If, for example, D1 is used twice and D2
3324 used not at all, the return value should be 1 in the absence of other
3325 imbalances. */
3326 static int
3327 unit_req_imbalance (unit_req_table reqs)
3329 int val = 0;
3330 int i;
3332 for (i = 0; i < UNIT_REQ_MAX; i++)
3334 int factor = unit_req_factor ((enum unitreqs) i);
3335 int diff = abs (reqs[0][i] - reqs[1][i]);
3336 val += (diff + factor - 1) / factor / 2;
3338 return val;
3341 /* Return the resource-constrained minimum iteration interval given the
3342 data in the REQS table. This must have been processed with
3343 merge_unit_reqs already. */
3344 static int
3345 res_mii (unit_req_table reqs)
3347 int side, req;
3348 int worst = 1;
3349 for (side = 0; side < 2; side++)
3350 for (req = 0; req < UNIT_REQ_MAX; req++)
3352 int factor = unit_req_factor ((enum unitreqs) req);
3353 worst = MAX ((reqs[side][UNIT_REQ_D] + factor - 1) / factor, worst);
3356 return worst;
3359 /* Examine INSN, and store in PMASK1 and PMASK2 bitmasks that represent
3360 the operands that are involved in the (up to) two reservations, as
3361 found by get_unit_reqs. Return true if we did this successfully, false
3362 if we couldn't identify what to do with INSN. */
3363 static bool
3364 get_unit_operand_masks (rtx insn, unsigned int *pmask1, unsigned int *pmask2)
3366 enum attr_op_pattern op_pat;
3368 if (recog_memoized (insn) < 0)
3369 return 0;
3370 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
3371 return false;
3372 extract_insn (insn);
3373 op_pat = get_attr_op_pattern (insn);
3374 if (op_pat == OP_PATTERN_DT)
3376 gcc_assert (recog_data.n_operands == 2);
3377 *pmask1 = 1 << 0;
3378 *pmask2 = 1 << 1;
3379 return true;
3381 else if (op_pat == OP_PATTERN_TD)
3383 gcc_assert (recog_data.n_operands == 2);
3384 *pmask1 = 1 << 1;
3385 *pmask2 = 1 << 0;
3386 return true;
3388 else if (op_pat == OP_PATTERN_SXS)
3390 gcc_assert (recog_data.n_operands == 3);
3391 *pmask1 = (1 << 0) | (1 << 2);
3392 *pmask2 = 1 << 1;
3393 return true;
3395 else if (op_pat == OP_PATTERN_SX)
3397 gcc_assert (recog_data.n_operands == 2);
3398 *pmask1 = 1 << 0;
3399 *pmask2 = 1 << 1;
3400 return true;
3402 else if (op_pat == OP_PATTERN_SSX)
3404 gcc_assert (recog_data.n_operands == 3);
3405 *pmask1 = (1 << 0) | (1 << 1);
3406 *pmask2 = 1 << 2;
3407 return true;
3409 return false;
3412 /* Try to replace a register in INSN, which has corresponding rename info
3413 from regrename_analyze in INFO. OP_MASK and ORIG_SIDE provide information
3414 about the operands that must be renamed and the side they are on.
3415 REQS is the table of unit reservations in the loop between HEAD and TAIL.
3416 We recompute this information locally after our transformation, and keep
3417 it only if we managed to improve the balance. */
3418 static void
3419 try_rename_operands (rtx head, rtx tail, unit_req_table reqs, rtx insn,
3420 insn_rr_info *info, unsigned int op_mask, int orig_side)
3422 enum reg_class super_class = orig_side == 0 ? B_REGS : A_REGS;
3423 HARD_REG_SET unavailable;
3424 du_head_p this_head;
3425 struct du_chain *chain;
3426 int i;
3427 unsigned tmp_mask;
3428 int best_reg, old_reg;
3429 vec<du_head_p> involved_chains = vNULL;
3430 unit_req_table new_reqs;
3432 for (i = 0, tmp_mask = op_mask; tmp_mask; i++)
3434 du_head_p op_chain;
3435 if ((tmp_mask & (1 << i)) == 0)
3436 continue;
3437 if (info->op_info[i].n_chains != 1)
3438 goto out_fail;
3439 op_chain = regrename_chain_from_id (info->op_info[i].heads[0]->id);
3440 involved_chains.safe_push (op_chain);
3441 tmp_mask &= ~(1 << i);
3444 if (involved_chains.length () > 1)
3445 goto out_fail;
3447 this_head = involved_chains[0];
3448 if (this_head->cannot_rename)
3449 goto out_fail;
3451 for (chain = this_head->first; chain; chain = chain->next_use)
3453 unsigned int mask1, mask2, mask_changed;
3454 int count, side1, side2, req1, req2;
3455 insn_rr_info *this_rr = &insn_rr[INSN_UID (chain->insn)];
3457 count = get_unit_reqs (chain->insn, &req1, &side1, &req2, &side2);
3459 if (count == 0)
3460 goto out_fail;
3462 if (!get_unit_operand_masks (chain->insn, &mask1, &mask2))
3463 goto out_fail;
3465 extract_insn (chain->insn);
3467 mask_changed = 0;
3468 for (i = 0; i < recog_data.n_operands; i++)
3470 int j;
3471 int n_this_op = this_rr->op_info[i].n_chains;
3472 for (j = 0; j < n_this_op; j++)
3474 du_head_p other = this_rr->op_info[i].heads[j];
3475 if (regrename_chain_from_id (other->id) == this_head)
3476 break;
3478 if (j == n_this_op)
3479 continue;
3481 if (n_this_op != 1)
3482 goto out_fail;
3483 mask_changed |= 1 << i;
3485 gcc_assert (mask_changed != 0);
3486 if (mask_changed != mask1 && mask_changed != mask2)
3487 goto out_fail;
3490 /* If we get here, we can do the renaming. */
3491 COMPL_HARD_REG_SET (unavailable, reg_class_contents[(int) super_class]);
3493 old_reg = this_head->regno;
3494 best_reg = find_best_rename_reg (this_head, super_class, &unavailable, old_reg);
3496 regrename_do_replace (this_head, best_reg);
3498 count_unit_reqs (new_reqs, head, PREV_INSN (tail));
3499 merge_unit_reqs (new_reqs);
3500 if (dump_file)
3502 fprintf (dump_file, "reshuffle for insn %d, op_mask %x, "
3503 "original side %d, new reg %d\n",
3504 INSN_UID (insn), op_mask, orig_side, best_reg);
3505 fprintf (dump_file, " imbalance %d -> %d\n",
3506 unit_req_imbalance (reqs), unit_req_imbalance (new_reqs));
3508 if (unit_req_imbalance (new_reqs) > unit_req_imbalance (reqs))
3509 regrename_do_replace (this_head, old_reg);
3510 else
3511 memcpy (reqs, new_reqs, sizeof (unit_req_table));
3513 out_fail:
3514 involved_chains.release ();
3517 /* Find insns in LOOP which would, if shifted to the other side
3518 of the machine, reduce an imbalance in the unit reservations. */
3519 static void
3520 reshuffle_units (basic_block loop)
3522 rtx head = BB_HEAD (loop);
3523 rtx tail = BB_END (loop);
3524 rtx insn;
3525 unit_req_table reqs;
3526 edge e;
3527 edge_iterator ei;
3528 bitmap_head bbs;
3530 count_unit_reqs (reqs, head, PREV_INSN (tail));
3531 merge_unit_reqs (reqs);
3533 regrename_init (true);
3535 bitmap_initialize (&bbs, &bitmap_default_obstack);
3537 FOR_EACH_EDGE (e, ei, loop->preds)
3538 bitmap_set_bit (&bbs, e->src->index);
3540 bitmap_set_bit (&bbs, loop->index);
3541 regrename_analyze (&bbs);
3543 for (insn = head; insn != NEXT_INSN (tail); insn = NEXT_INSN (insn))
3545 enum attr_units units;
3546 int count, side1, side2, req1, req2;
3547 unsigned int mask1, mask2;
3548 insn_rr_info *info;
3550 if (!NONDEBUG_INSN_P (insn))
3551 continue;
3553 count = get_unit_reqs (insn, &req1, &side1, &req2, &side2);
3555 if (count == 0)
3556 continue;
3558 if (!get_unit_operand_masks (insn, &mask1, &mask2))
3559 continue;
3561 info = &insn_rr[INSN_UID (insn)];
3562 if (info->op_info == NULL)
3563 continue;
3565 if (reqs[side1][req1] > 1
3566 && reqs[side1][req1] > 2 * reqs[side1 ^ 1][req1])
3568 try_rename_operands (head, tail, reqs, insn, info, mask1, side1);
3571 units = get_attr_units (insn);
3572 if (units == UNITS_D_ADDR)
3574 gcc_assert (count == 2);
3575 if (reqs[side2][req2] > 1
3576 && reqs[side2][req2] > 2 * reqs[side2 ^ 1][req2])
3578 try_rename_operands (head, tail, reqs, insn, info, mask2, side2);
3582 regrename_finish ();
3585 /* Backend scheduling state. */
3586 typedef struct c6x_sched_context
3588 /* The current scheduler clock, saved in the sched_reorder hook. */
3589 int curr_sched_clock;
3591 /* Number of insns issued so far in this cycle. */
3592 int issued_this_cycle;
3594 /* We record the time at which each jump occurs in JUMP_CYCLES. The
3595 theoretical maximum for number of jumps in flight is 12: 2 every
3596 cycle, with a latency of 6 cycles each. This is a circular
3597 buffer; JUMP_CYCLE_INDEX is the pointer to the start. Earlier
3598 jumps have a higher index. This array should be accessed through
3599 the jump_cycle function. */
3600 int jump_cycles[12];
3601 int jump_cycle_index;
3603 /* In parallel with jump_cycles, this array records the opposite of
3604 the condition used in each pending jump. This is used to
3605 predicate insns that are scheduled in the jump's delay slots. If
3606 this is NULL_RTX no such predication happens. */
3607 rtx jump_cond[12];
3609 /* Similar to the jump_cycles mechanism, but here we take into
3610 account all insns with delay slots, to avoid scheduling asms into
3611 the delay slots. */
3612 int delays_finished_at;
3614 /* The following variable value is the last issued insn. */
3615 rtx last_scheduled_insn;
3616 /* The last issued insn that isn't a shadow of another. */
3617 rtx last_scheduled_iter0;
3619 /* The following variable value is DFA state before issuing the
3620 first insn in the current clock cycle. We do not use this member
3621 of the structure directly; we copy the data in and out of
3622 prev_cycle_state. */
3623 state_t prev_cycle_state_ctx;
3625 int reg_n_accesses[FIRST_PSEUDO_REGISTER];
3626 int reg_n_xaccesses[FIRST_PSEUDO_REGISTER];
3627 int reg_set_in_cycle[FIRST_PSEUDO_REGISTER];
3629 int tmp_reg_n_accesses[FIRST_PSEUDO_REGISTER];
3630 int tmp_reg_n_xaccesses[FIRST_PSEUDO_REGISTER];
3631 } *c6x_sched_context_t;
3633 /* The current scheduling state. */
3634 static struct c6x_sched_context ss;
3636 /* The following variable value is DFA state before issuing the first insn
3637 in the current clock cycle. This is used in c6x_variable_issue for
3638 comparison with the state after issuing the last insn in a cycle. */
3639 static state_t prev_cycle_state;
3641 /* Set when we discover while processing an insn that it would lead to too
3642 many accesses of the same register. */
3643 static bool reg_access_stall;
3645 /* The highest insn uid after delayed insns were split, but before loop bodies
3646 were copied by the modulo scheduling code. */
3647 static int sploop_max_uid_iter0;
3649 /* Look up the jump cycle with index N. For an out-of-bounds N, we return 0,
3650 so the caller does not specifically have to test for it. */
3651 static int
3652 get_jump_cycle (int n)
3654 if (n >= 12)
3655 return 0;
3656 n += ss.jump_cycle_index;
3657 if (n >= 12)
3658 n -= 12;
3659 return ss.jump_cycles[n];
3662 /* Look up the jump condition with index N. */
3663 static rtx
3664 get_jump_cond (int n)
3666 if (n >= 12)
3667 return NULL_RTX;
3668 n += ss.jump_cycle_index;
3669 if (n >= 12)
3670 n -= 12;
3671 return ss.jump_cond[n];
3674 /* Return the index of the first jump that occurs after CLOCK_VAR. If no jump
3675 has delay slots beyond CLOCK_VAR, return -1. */
3676 static int
3677 first_jump_index (int clock_var)
3679 int retval = -1;
3680 int n = 0;
3681 for (;;)
3683 int t = get_jump_cycle (n);
3684 if (t <= clock_var)
3685 break;
3686 retval = n;
3687 n++;
3689 return retval;
3692 /* Add a new entry in our scheduling state for a jump that occurs in CYCLE
3693 and has the opposite condition of COND. */
3694 static void
3695 record_jump (int cycle, rtx cond)
3697 if (ss.jump_cycle_index == 0)
3698 ss.jump_cycle_index = 11;
3699 else
3700 ss.jump_cycle_index--;
3701 ss.jump_cycles[ss.jump_cycle_index] = cycle;
3702 ss.jump_cond[ss.jump_cycle_index] = cond;
3705 /* Set the clock cycle of INSN to CYCLE. Also clears the insn's entry in
3706 new_conditions. */
3707 static void
3708 insn_set_clock (rtx insn, int cycle)
3710 unsigned uid = INSN_UID (insn);
3712 if (uid >= INSN_INFO_LENGTH)
3713 insn_info.safe_grow (uid * 5 / 4 + 10);
3715 INSN_INFO_ENTRY (uid).clock = cycle;
3716 INSN_INFO_ENTRY (uid).new_cond = NULL;
3717 INSN_INFO_ENTRY (uid).reservation = 0;
3718 INSN_INFO_ENTRY (uid).ebb_start = false;
3721 /* Return the clock cycle we set for the insn with uid UID. */
3722 static int
3723 insn_uid_get_clock (int uid)
3725 return INSN_INFO_ENTRY (uid).clock;
3728 /* Return the clock cycle we set for INSN. */
3729 static int
3730 insn_get_clock (rtx insn)
3732 return insn_uid_get_clock (INSN_UID (insn));
3735 /* Examine INSN, and if it is a conditional jump of any kind, return
3736 the opposite of the condition in which it branches. Otherwise,
3737 return NULL_RTX. */
3738 static rtx
3739 condjump_opposite_condition (rtx insn)
3741 rtx pat = PATTERN (insn);
3742 int icode = INSN_CODE (insn);
3743 rtx x = NULL;
3745 if (icode == CODE_FOR_br_true || icode == CODE_FOR_br_false)
3747 x = XEXP (SET_SRC (pat), 0);
3748 if (icode == CODE_FOR_br_false)
3749 return x;
3751 if (GET_CODE (pat) == COND_EXEC)
3753 rtx t = COND_EXEC_CODE (pat);
3754 if ((GET_CODE (t) == PARALLEL
3755 && GET_CODE (XVECEXP (t, 0, 0)) == RETURN)
3756 || (GET_CODE (t) == UNSPEC && XINT (t, 1) == UNSPEC_REAL_JUMP)
3757 || (GET_CODE (t) == SET && SET_DEST (t) == pc_rtx))
3758 x = COND_EXEC_TEST (pat);
3761 if (x != NULL_RTX)
3763 enum rtx_code code = GET_CODE (x);
3764 x = gen_rtx_fmt_ee (code == EQ ? NE : EQ,
3765 GET_MODE (x), XEXP (x, 0),
3766 XEXP (x, 1));
3768 return x;
3771 /* Return true iff COND1 and COND2 are exactly opposite conditions
3772 one of them NE and the other EQ. */
3773 static bool
3774 conditions_opposite_p (rtx cond1, rtx cond2)
3776 return (rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
3777 && rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))
3778 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2)));
3781 /* Return true if we can add a predicate COND to INSN, or if INSN
3782 already has that predicate. If DOIT is true, also perform the
3783 modification. */
3784 static bool
3785 predicate_insn (rtx insn, rtx cond, bool doit)
3787 int icode;
3788 if (cond == NULL_RTX)
3790 gcc_assert (!doit);
3791 return false;
3794 if (get_attr_predicable (insn) == PREDICABLE_YES
3795 && GET_CODE (PATTERN (insn)) != COND_EXEC)
3797 if (doit)
3799 rtx newpat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (insn));
3800 PATTERN (insn) = newpat;
3801 INSN_CODE (insn) = -1;
3803 return true;
3805 if (GET_CODE (PATTERN (insn)) == COND_EXEC
3806 && rtx_equal_p (COND_EXEC_TEST (PATTERN (insn)), cond))
3807 return true;
3808 icode = INSN_CODE (insn);
3809 if (icode == CODE_FOR_real_jump
3810 || icode == CODE_FOR_jump
3811 || icode == CODE_FOR_indirect_jump)
3813 rtx pat = PATTERN (insn);
3814 rtx dest = (icode == CODE_FOR_real_jump ? XVECEXP (pat, 0, 0)
3815 : icode == CODE_FOR_jump ? XEXP (SET_SRC (pat), 0)
3816 : SET_SRC (pat));
3817 if (doit)
3819 rtx newpat;
3820 if (REG_P (dest))
3821 newpat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (insn));
3822 else
3823 newpat = gen_br_true (cond, XEXP (cond, 0), dest);
3824 PATTERN (insn) = newpat;
3825 INSN_CODE (insn) = -1;
3827 return true;
3829 if (INSN_CODE (insn) == CODE_FOR_br_true)
3831 rtx br_cond = XEXP (SET_SRC (PATTERN (insn)), 0);
3832 return rtx_equal_p (br_cond, cond);
3834 if (INSN_CODE (insn) == CODE_FOR_br_false)
3836 rtx br_cond = XEXP (SET_SRC (PATTERN (insn)), 0);
3837 return conditions_opposite_p (br_cond, cond);
3839 return false;
3842 /* Initialize SC. Used by c6x_init_sched_context and c6x_sched_init. */
3843 static void
3844 init_sched_state (c6x_sched_context_t sc)
3846 sc->last_scheduled_insn = NULL_RTX;
3847 sc->last_scheduled_iter0 = NULL_RTX;
3848 sc->issued_this_cycle = 0;
3849 memset (sc->jump_cycles, 0, sizeof sc->jump_cycles);
3850 memset (sc->jump_cond, 0, sizeof sc->jump_cond);
3851 sc->jump_cycle_index = 0;
3852 sc->delays_finished_at = 0;
3853 sc->curr_sched_clock = 0;
3855 sc->prev_cycle_state_ctx = xmalloc (dfa_state_size);
3857 memset (sc->reg_n_accesses, 0, sizeof sc->reg_n_accesses);
3858 memset (sc->reg_n_xaccesses, 0, sizeof sc->reg_n_xaccesses);
3859 memset (sc->reg_set_in_cycle, 0, sizeof sc->reg_set_in_cycle);
3861 state_reset (sc->prev_cycle_state_ctx);
3864 /* Allocate store for new scheduling context. */
3865 static void *
3866 c6x_alloc_sched_context (void)
3868 return xmalloc (sizeof (struct c6x_sched_context));
3871 /* If CLEAN_P is true then initializes _SC with clean data,
3872 and from the global context otherwise. */
3873 static void
3874 c6x_init_sched_context (void *_sc, bool clean_p)
3876 c6x_sched_context_t sc = (c6x_sched_context_t) _sc;
3878 if (clean_p)
3880 init_sched_state (sc);
3882 else
3884 *sc = ss;
3885 sc->prev_cycle_state_ctx = xmalloc (dfa_state_size);
3886 memcpy (sc->prev_cycle_state_ctx, prev_cycle_state, dfa_state_size);
3890 /* Sets the global scheduling context to the one pointed to by _SC. */
3891 static void
3892 c6x_set_sched_context (void *_sc)
3894 c6x_sched_context_t sc = (c6x_sched_context_t) _sc;
3896 gcc_assert (sc != NULL);
3897 ss = *sc;
3898 memcpy (prev_cycle_state, sc->prev_cycle_state_ctx, dfa_state_size);
3901 /* Clear data in _SC. */
3902 static void
3903 c6x_clear_sched_context (void *_sc)
3905 c6x_sched_context_t sc = (c6x_sched_context_t) _sc;
3906 gcc_assert (_sc != NULL);
3908 free (sc->prev_cycle_state_ctx);
3911 /* Free _SC. */
3912 static void
3913 c6x_free_sched_context (void *_sc)
3915 free (_sc);
3918 /* True if we are currently performing a preliminary scheduling
3919 pass before modulo scheduling; we can't allow the scheduler to
3920 modify instruction patterns using packetization assumptions,
3921 since there will be another scheduling pass later if modulo
3922 scheduling fails. */
3923 static bool in_hwloop;
3925 /* Provide information about speculation capabilities, and set the
3926 DO_BACKTRACKING flag. */
3927 static void
3928 c6x_set_sched_flags (spec_info_t spec_info)
3930 unsigned int *flags = &(current_sched_info->flags);
3932 if (*flags & SCHED_EBB)
3934 *flags |= DO_BACKTRACKING | DO_PREDICATION;
3936 if (in_hwloop)
3937 *flags |= DONT_BREAK_DEPENDENCIES;
3939 spec_info->mask = 0;
3942 /* Implement the TARGET_SCHED_ISSUE_RATE hook. */
3944 static int
3945 c6x_issue_rate (void)
3947 return 8;
3950 /* Used together with the collapse_ndfa option, this ensures that we reach a
3951 deterministic automaton state before trying to advance a cycle.
3952 With collapse_ndfa, genautomata creates advance cycle arcs only for
3953 such deterministic states. */
3955 static rtx
3956 c6x_sched_dfa_pre_cycle_insn (void)
3958 return const0_rtx;
3961 /* We're beginning a new block. Initialize data structures as necessary. */
3963 static void
3964 c6x_sched_init (FILE *dump ATTRIBUTE_UNUSED,
3965 int sched_verbose ATTRIBUTE_UNUSED,
3966 int max_ready ATTRIBUTE_UNUSED)
3968 if (prev_cycle_state == NULL)
3970 prev_cycle_state = xmalloc (dfa_state_size);
3972 init_sched_state (&ss);
3973 state_reset (prev_cycle_state);
3976 /* We are about to being issuing INSN. Return nonzero if we cannot
3977 issue it on given cycle CLOCK and return zero if we should not sort
3978 the ready queue on the next clock start.
3979 For C6X, we use this function just to copy the previous DFA state
3980 for comparison purposes. */
3982 static int
3983 c6x_dfa_new_cycle (FILE *dump ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
3984 rtx insn ATTRIBUTE_UNUSED, int last_clock ATTRIBUTE_UNUSED,
3985 int clock ATTRIBUTE_UNUSED, int *sort_p ATTRIBUTE_UNUSED)
3987 if (clock != last_clock)
3988 memcpy (prev_cycle_state, curr_state, dfa_state_size);
3989 return 0;
3992 static void
3993 c6x_mark_regno_read (int regno, bool cross)
3995 int t = ++ss.tmp_reg_n_accesses[regno];
3997 if (t > 4)
3998 reg_access_stall = true;
4000 if (cross)
4002 int set_cycle = ss.reg_set_in_cycle[regno];
4003 /* This must be done in this way rather than by tweaking things in
4004 adjust_cost, since the stall occurs even for insns with opposite
4005 predicates, and the scheduler may not even see a dependency. */
4006 if (set_cycle > 0 && set_cycle == ss.curr_sched_clock)
4007 reg_access_stall = true;
4008 /* This doesn't quite do anything yet as we're only modeling one
4009 x unit. */
4010 ++ss.tmp_reg_n_xaccesses[regno];
4014 /* Note that REG is read in the insn being examined. If CROSS, it
4015 means the access is through a cross path. Update the temporary reg
4016 access arrays, and set REG_ACCESS_STALL if the insn can't be issued
4017 in the current cycle. */
4019 static void
4020 c6x_mark_reg_read (rtx reg, bool cross)
4022 unsigned regno = REGNO (reg);
4023 unsigned nregs = hard_regno_nregs[regno][GET_MODE (reg)];
4025 while (nregs-- > 0)
4026 c6x_mark_regno_read (regno + nregs, cross);
4029 /* Note that register REG is written in cycle CYCLES. */
4031 static void
4032 c6x_mark_reg_written (rtx reg, int cycles)
4034 unsigned regno = REGNO (reg);
4035 unsigned nregs = hard_regno_nregs[regno][GET_MODE (reg)];
4037 while (nregs-- > 0)
4038 ss.reg_set_in_cycle[regno + nregs] = cycles;
4041 /* Update the register state information for an instruction whose
4042 body is X. Return true if the instruction has to be delayed until the
4043 next cycle. */
4045 static bool
4046 c6x_registers_update (rtx insn)
4048 enum attr_cross cross;
4049 enum attr_dest_regfile destrf;
4050 int i, nops;
4051 rtx x;
4053 if (!reload_completed || recog_memoized (insn) < 0)
4054 return false;
4056 reg_access_stall = false;
4057 memcpy (ss.tmp_reg_n_accesses, ss.reg_n_accesses,
4058 sizeof ss.tmp_reg_n_accesses);
4059 memcpy (ss.tmp_reg_n_xaccesses, ss.reg_n_xaccesses,
4060 sizeof ss.tmp_reg_n_xaccesses);
4062 extract_insn (insn);
4064 cross = get_attr_cross (insn);
4065 destrf = get_attr_dest_regfile (insn);
4067 nops = recog_data.n_operands;
4068 x = PATTERN (insn);
4069 if (GET_CODE (x) == COND_EXEC)
4071 c6x_mark_reg_read (XEXP (XEXP (x, 0), 0), false);
4072 nops -= 2;
4075 for (i = 0; i < nops; i++)
4077 rtx op = recog_data.operand[i];
4078 if (recog_data.operand_type[i] == OP_OUT)
4079 continue;
4080 if (REG_P (op))
4082 bool this_cross = cross;
4083 if (destrf == DEST_REGFILE_A && A_REGNO_P (REGNO (op)))
4084 this_cross = false;
4085 if (destrf == DEST_REGFILE_B && B_REGNO_P (REGNO (op)))
4086 this_cross = false;
4087 c6x_mark_reg_read (op, this_cross);
4089 else if (MEM_P (op))
4091 op = XEXP (op, 0);
4092 switch (GET_CODE (op))
4094 case POST_INC:
4095 case PRE_INC:
4096 case POST_DEC:
4097 case PRE_DEC:
4098 op = XEXP (op, 0);
4099 /* fall through */
4100 case REG:
4101 c6x_mark_reg_read (op, false);
4102 break;
4103 case POST_MODIFY:
4104 case PRE_MODIFY:
4105 op = XEXP (op, 1);
4106 gcc_assert (GET_CODE (op) == PLUS);
4107 /* fall through */
4108 case PLUS:
4109 c6x_mark_reg_read (XEXP (op, 0), false);
4110 if (REG_P (XEXP (op, 1)))
4111 c6x_mark_reg_read (XEXP (op, 1), false);
4112 break;
4113 case SYMBOL_REF:
4114 case LABEL_REF:
4115 case CONST:
4116 c6x_mark_regno_read (REG_B14, false);
4117 break;
4118 default:
4119 gcc_unreachable ();
4122 else if (!CONSTANT_P (op) && strlen (recog_data.constraints[i]) > 0)
4123 gcc_unreachable ();
4125 return reg_access_stall;
4128 /* Helper function for the TARGET_SCHED_REORDER and
4129 TARGET_SCHED_REORDER2 hooks. If scheduling an insn would be unsafe
4130 in the current cycle, move it down in the ready list and return the
4131 number of non-unsafe insns. */
4133 static int
4134 c6x_sched_reorder_1 (rtx *ready, int *pn_ready, int clock_var)
4136 int n_ready = *pn_ready;
4137 rtx *e_ready = ready + n_ready;
4138 rtx *insnp;
4139 int first_jump;
4141 /* Keep track of conflicts due to a limit number of register accesses,
4142 and due to stalls incurred by too early accesses of registers using
4143 cross paths. */
4145 for (insnp = ready; insnp < e_ready; insnp++)
4147 rtx insn = *insnp;
4148 int icode = recog_memoized (insn);
4149 bool is_asm = (icode < 0
4150 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
4151 || asm_noperands (PATTERN (insn)) >= 0));
4152 bool no_parallel = (is_asm || icode == CODE_FOR_sploop
4153 || (icode >= 0
4154 && get_attr_type (insn) == TYPE_ATOMIC));
4156 /* We delay asm insns until all delay slots are exhausted. We can't
4157 accurately tell how many cycles an asm takes, and the main scheduling
4158 code always assumes at least 1 cycle, which may be wrong. */
4159 if ((no_parallel
4160 && (ss.issued_this_cycle > 0 || clock_var < ss.delays_finished_at))
4161 || c6x_registers_update (insn)
4162 || (ss.issued_this_cycle > 0 && icode == CODE_FOR_sploop))
4164 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
4165 *ready = insn;
4166 n_ready--;
4167 ready++;
4169 else if (shadow_p (insn))
4171 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
4172 *ready = insn;
4176 /* Ensure that no other jump is scheduled in jump delay slots, since
4177 it would put the machine into the wrong state. Also, we must
4178 avoid scheduling insns that have a latency longer than the
4179 remaining jump delay slots, as the code at the jump destination
4180 won't be prepared for it.
4182 However, we can relax this condition somewhat. The rest of the
4183 scheduler will automatically avoid scheduling an insn on which
4184 the jump shadow depends so late that its side effect happens
4185 after the jump. This means that if we see an insn with a longer
4186 latency here, it can safely be scheduled if we can ensure that it
4187 has a predicate opposite of the previous jump: the side effect
4188 will happen in what we think of as the same basic block. In
4189 c6x_variable_issue, we will record the necessary predicate in
4190 new_conditions, and after scheduling is finished, we will modify
4191 the insn.
4193 Special care must be taken whenever there is more than one jump
4194 in flight. */
4196 first_jump = first_jump_index (clock_var);
4197 if (first_jump != -1)
4199 int first_cycle = get_jump_cycle (first_jump);
4200 rtx first_cond = get_jump_cond (first_jump);
4201 int second_cycle = 0;
4203 if (first_jump > 0)
4204 second_cycle = get_jump_cycle (first_jump - 1);
4206 for (insnp = ready; insnp < e_ready; insnp++)
4208 rtx insn = *insnp;
4209 int icode = recog_memoized (insn);
4210 bool is_asm = (icode < 0
4211 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
4212 || asm_noperands (PATTERN (insn)) >= 0));
4213 int this_cycles, rsrv_cycles;
4214 enum attr_type type;
4216 gcc_assert (!is_asm);
4217 if (icode < 0)
4218 continue;
4219 this_cycles = get_attr_cycles (insn);
4220 rsrv_cycles = get_attr_reserve_cycles (insn);
4221 type = get_attr_type (insn);
4222 /* Treat branches specially; there is also a hazard if two jumps
4223 end at the same cycle. */
4224 if (type == TYPE_BRANCH || type == TYPE_CALL)
4225 this_cycles++;
4226 if (clock_var + this_cycles <= first_cycle)
4227 continue;
4228 if ((first_jump > 0 && clock_var + this_cycles > second_cycle)
4229 || clock_var + rsrv_cycles > first_cycle
4230 || !predicate_insn (insn, first_cond, false))
4232 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
4233 *ready = insn;
4234 n_ready--;
4235 ready++;
4240 return n_ready;
4243 /* Implement the TARGET_SCHED_REORDER hook. We save the current clock
4244 for later and clear the register access information for the new
4245 cycle. We also move asm statements out of the way if they would be
4246 scheduled in a delay slot. */
4248 static int
4249 c6x_sched_reorder (FILE *dump ATTRIBUTE_UNUSED,
4250 int sched_verbose ATTRIBUTE_UNUSED,
4251 rtx *ready ATTRIBUTE_UNUSED,
4252 int *pn_ready ATTRIBUTE_UNUSED, int clock_var)
4254 ss.curr_sched_clock = clock_var;
4255 ss.issued_this_cycle = 0;
4256 memset (ss.reg_n_accesses, 0, sizeof ss.reg_n_accesses);
4257 memset (ss.reg_n_xaccesses, 0, sizeof ss.reg_n_xaccesses);
4259 if (ready == NULL)
4260 return 0;
4262 return c6x_sched_reorder_1 (ready, pn_ready, clock_var);
4265 /* Implement the TARGET_SCHED_REORDER2 hook. We use this to record the clock
4266 cycle for every insn. */
4268 static int
4269 c6x_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
4270 int sched_verbose ATTRIBUTE_UNUSED,
4271 rtx *ready ATTRIBUTE_UNUSED,
4272 int *pn_ready ATTRIBUTE_UNUSED, int clock_var)
4274 /* FIXME: the assembler rejects labels inside an execute packet.
4275 This can occur if prologue insns are scheduled in parallel with
4276 others, so we avoid this here. Also make sure that nothing is
4277 scheduled in parallel with a TYPE_ATOMIC insn or after a jump. */
4278 if (RTX_FRAME_RELATED_P (ss.last_scheduled_insn)
4279 || JUMP_P (ss.last_scheduled_insn)
4280 || (recog_memoized (ss.last_scheduled_insn) >= 0
4281 && get_attr_type (ss.last_scheduled_insn) == TYPE_ATOMIC))
4283 int n_ready = *pn_ready;
4284 rtx *e_ready = ready + n_ready;
4285 rtx *insnp;
4287 for (insnp = ready; insnp < e_ready; insnp++)
4289 rtx insn = *insnp;
4290 if (!shadow_p (insn))
4292 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
4293 *ready = insn;
4294 n_ready--;
4295 ready++;
4298 return n_ready;
4301 return c6x_sched_reorder_1 (ready, pn_ready, clock_var);
4304 /* Subroutine of maybe_clobber_cond, called through note_stores. */
4306 static void
4307 clobber_cond_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data1)
4309 rtx *cond = (rtx *)data1;
4310 if (*cond != NULL_RTX && reg_overlap_mentioned_p (x, *cond))
4311 *cond = NULL_RTX;
4314 /* Examine INSN, and if it destroys the conditions have recorded for
4315 any of the jumps in flight, clear that condition so that we don't
4316 predicate any more insns. CLOCK_VAR helps us limit the search to
4317 only those jumps which are still in flight. */
4319 static void
4320 maybe_clobber_cond (rtx insn, int clock_var)
4322 int n, idx;
4323 idx = ss.jump_cycle_index;
4324 for (n = 0; n < 12; n++, idx++)
4326 rtx cond, link;
4327 int cycle;
4329 if (idx >= 12)
4330 idx -= 12;
4331 cycle = ss.jump_cycles[idx];
4332 if (cycle <= clock_var)
4333 return;
4335 cond = ss.jump_cond[idx];
4336 if (cond == NULL_RTX)
4337 continue;
4339 if (CALL_P (insn))
4341 ss.jump_cond[idx] = NULL_RTX;
4342 continue;
4345 note_stores (PATTERN (insn), clobber_cond_1, ss.jump_cond + idx);
4346 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
4347 if (REG_NOTE_KIND (link) == REG_INC)
4348 clobber_cond_1 (XEXP (link, 0), NULL_RTX, ss.jump_cond + idx);
4352 /* Implement the TARGET_SCHED_VARIABLE_ISSUE hook. We are about to
4353 issue INSN. Return the number of insns left on the ready queue
4354 that can be issued this cycle.
4355 We use this hook to record clock cycles and reservations for every insn. */
4357 static int
4358 c6x_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
4359 int sched_verbose ATTRIBUTE_UNUSED,
4360 rtx insn, int can_issue_more ATTRIBUTE_UNUSED)
4362 ss.last_scheduled_insn = insn;
4363 if (INSN_UID (insn) < sploop_max_uid_iter0 && !JUMP_P (insn))
4364 ss.last_scheduled_iter0 = insn;
4365 if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)
4366 ss.issued_this_cycle++;
4367 if (insn_info.exists ())
4369 state_t st_after = alloca (dfa_state_size);
4370 int curr_clock = ss.curr_sched_clock;
4371 int uid = INSN_UID (insn);
4372 int icode = recog_memoized (insn);
4373 rtx first_cond;
4374 int first, first_cycle;
4375 unsigned int mask;
4376 int i;
4378 insn_set_clock (insn, curr_clock);
4379 INSN_INFO_ENTRY (uid).ebb_start
4380 = curr_clock == 0 && ss.issued_this_cycle == 1;
4382 first = first_jump_index (ss.curr_sched_clock);
4383 if (first == -1)
4385 first_cycle = 0;
4386 first_cond = NULL_RTX;
4388 else
4390 first_cycle = get_jump_cycle (first);
4391 first_cond = get_jump_cond (first);
4393 if (icode >= 0
4394 && first_cycle > curr_clock
4395 && first_cond != NULL_RTX
4396 && (curr_clock + get_attr_cycles (insn) > first_cycle
4397 || get_attr_type (insn) == TYPE_BRANCH
4398 || get_attr_type (insn) == TYPE_CALL))
4399 INSN_INFO_ENTRY (uid).new_cond = first_cond;
4401 memcpy (st_after, curr_state, dfa_state_size);
4402 state_transition (st_after, const0_rtx);
4404 mask = 0;
4405 for (i = 0; i < 2 * UNIT_QID_SIDE_OFFSET; i++)
4406 if (cpu_unit_reservation_p (st_after, c6x_unit_codes[i])
4407 && !cpu_unit_reservation_p (prev_cycle_state, c6x_unit_codes[i]))
4408 mask |= 1 << i;
4409 INSN_INFO_ENTRY (uid).unit_mask = mask;
4411 maybe_clobber_cond (insn, curr_clock);
4413 if (icode >= 0)
4415 int i, cycles;
4417 c6x_registers_update (insn);
4418 memcpy (ss.reg_n_accesses, ss.tmp_reg_n_accesses,
4419 sizeof ss.reg_n_accesses);
4420 memcpy (ss.reg_n_xaccesses, ss.tmp_reg_n_accesses,
4421 sizeof ss.reg_n_xaccesses);
4423 cycles = get_attr_cycles (insn);
4424 if (ss.delays_finished_at < ss.curr_sched_clock + cycles)
4425 ss.delays_finished_at = ss.curr_sched_clock + cycles;
4426 if (get_attr_type (insn) == TYPE_BRANCH
4427 || get_attr_type (insn) == TYPE_CALL)
4429 rtx opposite = condjump_opposite_condition (insn);
4430 record_jump (ss.curr_sched_clock + cycles, opposite);
4433 /* Mark the cycles in which the destination registers are written.
4434 This is used for calculating stalls when using cross units. */
4435 extract_insn (insn);
4436 /* Cross-path stalls don't apply to results of load insns. */
4437 if (get_attr_type (insn) == TYPE_LOAD
4438 || get_attr_type (insn) == TYPE_LOADN
4439 || get_attr_type (insn) == TYPE_LOAD_SHADOW)
4440 cycles--;
4441 for (i = 0; i < recog_data.n_operands; i++)
4443 rtx op = recog_data.operand[i];
4444 if (MEM_P (op))
4446 rtx addr = XEXP (op, 0);
4447 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4448 c6x_mark_reg_written (XEXP (addr, 0),
4449 insn_uid_get_clock (uid) + 1);
4451 if (recog_data.operand_type[i] != OP_IN
4452 && REG_P (op))
4454 c6x_mark_reg_written (op,
4455 insn_uid_get_clock (uid) + cycles);
4460 return can_issue_more;
4463 /* Implement the TARGET_SCHED_ADJUST_COST hook. We need special handling for
4464 anti- and output dependencies. */
4466 static int
4467 c6x_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4469 enum attr_type insn_type = TYPE_UNKNOWN, dep_insn_type = TYPE_UNKNOWN;
4470 int dep_insn_code_number, insn_code_number;
4471 int shadow_bonus = 0;
4472 enum reg_note kind;
4473 dep_insn_code_number = recog_memoized (dep_insn);
4474 insn_code_number = recog_memoized (insn);
4476 if (dep_insn_code_number >= 0)
4477 dep_insn_type = get_attr_type (dep_insn);
4479 if (insn_code_number >= 0)
4480 insn_type = get_attr_type (insn);
4482 kind = REG_NOTE_KIND (link);
4483 if (kind == 0)
4485 /* If we have a dependency on a load, and it's not for the result of
4486 the load, it must be for an autoincrement. Reduce the cost in that
4487 case. */
4488 if (dep_insn_type == TYPE_LOAD)
4490 rtx set = PATTERN (dep_insn);
4491 if (GET_CODE (set) == COND_EXEC)
4492 set = COND_EXEC_CODE (set);
4493 if (GET_CODE (set) == UNSPEC)
4494 cost = 1;
4495 else
4497 gcc_assert (GET_CODE (set) == SET);
4498 if (!reg_overlap_mentioned_p (SET_DEST (set), PATTERN (insn)))
4499 cost = 1;
4504 /* A jump shadow needs to have its latency decreased by one. Conceptually,
4505 it occurs in between two cycles, but we schedule it at the end of the
4506 first cycle. */
4507 if (shadow_type_p (insn_type))
4508 shadow_bonus = 1;
4510 /* Anti and output dependencies usually have zero cost, but we want
4511 to insert a stall after a jump, and after certain floating point
4512 insns that take more than one cycle to read their inputs. In the
4513 future, we should try to find a better algorithm for scheduling
4514 jumps. */
4515 if (kind != 0)
4517 /* We can get anti-dependencies against shadow insns. Treat these
4518 like output dependencies, so that the insn is entirely finished
4519 before the branch takes place. */
4520 if (kind == REG_DEP_ANTI && insn_type == TYPE_SHADOW)
4521 kind = REG_DEP_OUTPUT;
4522 switch (dep_insn_type)
4524 case TYPE_CALLP:
4525 return 1;
4526 case TYPE_BRANCH:
4527 case TYPE_CALL:
4528 if (get_attr_has_shadow (dep_insn) == HAS_SHADOW_Y)
4529 /* This is a real_jump/real_call insn. These don't have
4530 outputs, and ensuring the validity of scheduling things
4531 in the delay slot is the job of
4532 c6x_sched_reorder_1. */
4533 return 0;
4534 /* Unsplit calls can happen - e.g. for divide insns. */
4535 return 6;
4536 case TYPE_LOAD:
4537 case TYPE_LOADN:
4538 case TYPE_INTDP:
4539 if (kind == REG_DEP_OUTPUT)
4540 return 5 - shadow_bonus;
4541 return 0;
4542 case TYPE_MPY4:
4543 case TYPE_FP4:
4544 if (kind == REG_DEP_OUTPUT)
4545 return 4 - shadow_bonus;
4546 return 0;
4547 case TYPE_MPY2:
4548 if (kind == REG_DEP_OUTPUT)
4549 return 2 - shadow_bonus;
4550 return 0;
4551 case TYPE_CMPDP:
4552 if (kind == REG_DEP_OUTPUT)
4553 return 2 - shadow_bonus;
4554 return 2;
4555 case TYPE_ADDDP:
4556 case TYPE_MPYSPDP:
4557 if (kind == REG_DEP_OUTPUT)
4558 return 7 - shadow_bonus;
4559 return 2;
4560 case TYPE_MPYSP2DP:
4561 if (kind == REG_DEP_OUTPUT)
4562 return 5 - shadow_bonus;
4563 return 2;
4564 case TYPE_MPYI:
4565 if (kind == REG_DEP_OUTPUT)
4566 return 9 - shadow_bonus;
4567 return 4;
4568 case TYPE_MPYID:
4569 case TYPE_MPYDP:
4570 if (kind == REG_DEP_OUTPUT)
4571 return 10 - shadow_bonus;
4572 return 4;
4574 default:
4575 if (insn_type == TYPE_SPKERNEL)
4576 return 0;
4577 if (kind == REG_DEP_OUTPUT)
4578 return 1 - shadow_bonus;
4580 return 0;
4584 return cost - shadow_bonus;
4587 /* Create a SEQUENCE rtx to replace the instructions in SLOT, of which there
4588 are N_FILLED. REAL_FIRST identifies the slot if the insn that appears
4589 first in the original stream. */
4591 static void
4592 gen_one_bundle (rtx *slot, int n_filled, int real_first)
4594 rtx bundle;
4595 rtx t;
4596 int i;
4598 bundle = gen_rtx_SEQUENCE (VOIDmode, gen_rtvec_v (n_filled, slot));
4599 bundle = make_insn_raw (bundle);
4600 BLOCK_FOR_INSN (bundle) = BLOCK_FOR_INSN (slot[0]);
4601 INSN_LOCATION (bundle) = INSN_LOCATION (slot[0]);
4602 PREV_INSN (bundle) = PREV_INSN (slot[real_first]);
4604 t = NULL_RTX;
4606 for (i = 0; i < n_filled; i++)
4608 rtx insn = slot[i];
4609 remove_insn (insn);
4610 PREV_INSN (insn) = t ? t : PREV_INSN (bundle);
4611 if (t != NULL_RTX)
4612 NEXT_INSN (t) = insn;
4613 t = insn;
4614 if (i > 0)
4615 INSN_LOCATION (slot[i]) = INSN_LOCATION (bundle);
4618 NEXT_INSN (bundle) = NEXT_INSN (PREV_INSN (bundle));
4619 NEXT_INSN (t) = NEXT_INSN (bundle);
4620 NEXT_INSN (PREV_INSN (bundle)) = bundle;
4621 PREV_INSN (NEXT_INSN (bundle)) = bundle;
4624 /* Move all parallel instructions into SEQUENCEs, so that no subsequent passes
4625 try to insert labels in the middle. */
4627 static void
4628 c6x_gen_bundles (void)
4630 basic_block bb;
4631 rtx insn, next, last_call;
4633 FOR_EACH_BB_FN (bb, cfun)
4635 rtx insn, next;
4636 /* The machine is eight insns wide. We can have up to six shadow
4637 insns, plus an extra slot for merging the jump shadow. */
4638 rtx slot[15];
4639 int n_filled = 0;
4640 int first_slot = 0;
4642 for (insn = BB_HEAD (bb);; insn = next)
4644 int at_end;
4645 rtx delete_this = NULL_RTX;
4647 if (NONDEBUG_INSN_P (insn))
4649 /* Put calls at the start of the sequence. */
4650 if (CALL_P (insn))
4652 first_slot++;
4653 if (n_filled)
4655 memmove (&slot[1], &slot[0],
4656 n_filled * sizeof (slot[0]));
4658 if (!shadow_p (insn))
4660 PUT_MODE (insn, TImode);
4661 if (n_filled)
4662 PUT_MODE (slot[1], VOIDmode);
4664 n_filled++;
4665 slot[0] = insn;
4667 else
4669 slot[n_filled++] = insn;
4673 next = NEXT_INSN (insn);
4674 while (next && insn != BB_END (bb)
4675 && !(NONDEBUG_INSN_P (next)
4676 && GET_CODE (PATTERN (next)) != USE
4677 && GET_CODE (PATTERN (next)) != CLOBBER))
4679 insn = next;
4680 next = NEXT_INSN (insn);
4683 at_end = insn == BB_END (bb);
4684 if (delete_this == NULL_RTX
4685 && (at_end || (GET_MODE (next) == TImode
4686 && !(shadow_p (next) && CALL_P (next)))))
4688 if (n_filled >= 2)
4689 gen_one_bundle (slot, n_filled, first_slot);
4691 n_filled = 0;
4692 first_slot = 0;
4694 if (at_end)
4695 break;
4698 /* Bundling, and emitting nops, can separate
4699 NOTE_INSN_CALL_ARG_LOCATION from the corresponding calls. Fix
4700 that up here. */
4701 last_call = NULL_RTX;
4702 for (insn = get_insns (); insn; insn = next)
4704 next = NEXT_INSN (insn);
4705 if (CALL_P (insn)
4706 || (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE
4707 && CALL_P (XVECEXP (PATTERN (insn), 0, 0))))
4708 last_call = insn;
4709 if (!NOTE_P (insn) || NOTE_KIND (insn) != NOTE_INSN_CALL_ARG_LOCATION)
4710 continue;
4711 if (NEXT_INSN (last_call) == insn)
4712 continue;
4713 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4714 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4715 PREV_INSN (insn) = last_call;
4716 NEXT_INSN (insn) = NEXT_INSN (last_call);
4717 PREV_INSN (NEXT_INSN (insn)) = insn;
4718 NEXT_INSN (PREV_INSN (insn)) = insn;
4719 last_call = insn;
4723 /* Emit a NOP instruction for CYCLES cycles after insn AFTER. Return it. */
4725 static rtx
4726 emit_nop_after (int cycles, rtx after)
4728 rtx insn;
4730 /* mpydp has 9 delay slots, and we may schedule a stall for a cross-path
4731 operation. We don't need the extra NOP since in this case, the hardware
4732 will automatically insert the required stall. */
4733 if (cycles == 10)
4734 cycles--;
4736 gcc_assert (cycles < 10);
4738 insn = emit_insn_after (gen_nop_count (GEN_INT (cycles)), after);
4739 PUT_MODE (insn, TImode);
4741 return insn;
4744 /* Determine whether INSN is a call that needs to have a return label
4745 placed. */
4747 static bool
4748 returning_call_p (rtx insn)
4750 if (CALL_P (insn))
4751 return (!SIBLING_CALL_P (insn)
4752 && get_attr_type (insn) != TYPE_CALLP
4753 && get_attr_type (insn) != TYPE_SHADOW);
4754 if (recog_memoized (insn) < 0)
4755 return false;
4756 if (get_attr_type (insn) == TYPE_CALL)
4757 return true;
4758 return false;
4761 /* Determine whether INSN's pattern can be converted to use callp. */
4762 static bool
4763 can_use_callp (rtx insn)
4765 int icode = recog_memoized (insn);
4766 if (!TARGET_INSNS_64PLUS
4767 || icode < 0
4768 || GET_CODE (PATTERN (insn)) == COND_EXEC)
4769 return false;
4771 return ((icode == CODE_FOR_real_call
4772 || icode == CODE_FOR_call_internal
4773 || icode == CODE_FOR_call_value_internal)
4774 && get_attr_dest_regfile (insn) == DEST_REGFILE_ANY);
4777 /* Convert the pattern of INSN, which must be a CALL_INSN, into a callp. */
4778 static void
4779 convert_to_callp (rtx insn)
4781 rtx lab;
4782 extract_insn (insn);
4783 if (GET_CODE (PATTERN (insn)) == SET)
4785 rtx dest = recog_data.operand[0];
4786 lab = recog_data.operand[1];
4787 PATTERN (insn) = gen_callp_value (dest, lab);
4788 INSN_CODE (insn) = CODE_FOR_callp_value;
4790 else
4792 lab = recog_data.operand[0];
4793 PATTERN (insn) = gen_callp (lab);
4794 INSN_CODE (insn) = CODE_FOR_callp;
4798 /* Scan forwards from INSN until we find the next insn that has mode TImode
4799 (indicating it starts a new cycle), and occurs in cycle CLOCK.
4800 Return it if we find such an insn, NULL_RTX otherwise. */
4801 static rtx
4802 find_next_cycle_insn (rtx insn, int clock)
4804 rtx t = insn;
4805 if (GET_MODE (t) == TImode)
4806 t = next_real_insn (t);
4807 while (t && GET_MODE (t) != TImode)
4808 t = next_real_insn (t);
4810 if (t && insn_get_clock (t) == clock)
4811 return t;
4812 return NULL_RTX;
4815 /* If COND_INSN has a COND_EXEC condition, wrap the same condition
4816 around PAT. Return PAT either unchanged or modified in this
4817 way. */
4818 static rtx
4819 duplicate_cond (rtx pat, rtx cond_insn)
4821 rtx cond_pat = PATTERN (cond_insn);
4822 if (GET_CODE (cond_pat) == COND_EXEC)
4823 pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (cond_pat)),
4824 pat);
4825 return pat;
4828 /* Walk forward from INSN to find the last insn that issues in the same clock
4829 cycle. */
4830 static rtx
4831 find_last_same_clock (rtx insn)
4833 rtx retval = insn;
4834 rtx t = next_real_insn (insn);
4836 while (t && GET_MODE (t) != TImode)
4838 if (!DEBUG_INSN_P (t) && recog_memoized (t) >= 0)
4839 retval = t;
4840 t = next_real_insn (t);
4842 return retval;
4845 /* For every call insn in the function, emit code to load the return
4846 address. For each call we create a return label and store it in
4847 CALL_LABELS. If are not scheduling, we emit the labels here,
4848 otherwise the caller will do it later.
4849 This function is called after final insn scheduling, but before creating
4850 the SEQUENCEs that represent execute packets. */
4852 static void
4853 reorg_split_calls (rtx *call_labels)
4855 unsigned int reservation_mask = 0;
4856 rtx insn = get_insns ();
4857 gcc_assert (NOTE_P (insn));
4858 insn = next_real_insn (insn);
4859 while (insn)
4861 int uid;
4862 rtx next = next_real_insn (insn);
4864 if (DEBUG_INSN_P (insn))
4865 goto done;
4867 if (GET_MODE (insn) == TImode)
4868 reservation_mask = 0;
4869 uid = INSN_UID (insn);
4870 if (c6x_flag_schedule_insns2 && recog_memoized (insn) >= 0)
4871 reservation_mask |= 1 << INSN_INFO_ENTRY (uid).reservation;
4873 if (returning_call_p (insn))
4875 rtx label = gen_label_rtx ();
4876 rtx labelref = gen_rtx_LABEL_REF (Pmode, label);
4877 rtx reg = gen_rtx_REG (SImode, RETURN_ADDR_REGNO);
4879 LABEL_NUSES (label) = 2;
4880 if (!c6x_flag_schedule_insns2)
4882 if (can_use_callp (insn))
4883 convert_to_callp (insn);
4884 else
4886 rtx t;
4887 rtx slot[4];
4888 emit_label_after (label, insn);
4890 /* Bundle the call and its delay slots into a single
4891 SEQUENCE. While these do not issue in parallel
4892 we need to group them into a single EH region. */
4893 slot[0] = insn;
4894 PUT_MODE (insn, TImode);
4895 if (TARGET_INSNS_64)
4897 t = gen_addkpc (reg, labelref, GEN_INT (4));
4898 slot[1] = emit_insn_after (duplicate_cond (t, insn),
4899 insn);
4900 PUT_MODE (slot[1], TImode);
4901 gen_one_bundle (slot, 2, 0);
4903 else
4905 slot[3] = emit_insn_after (gen_nop_count (GEN_INT (3)),
4906 insn);
4907 PUT_MODE (slot[3], TImode);
4908 t = gen_movsi_lo_sum (reg, reg, labelref);
4909 slot[2] = emit_insn_after (duplicate_cond (t, insn),
4910 insn);
4911 PUT_MODE (slot[2], TImode);
4912 t = gen_movsi_high (reg, labelref);
4913 slot[1] = emit_insn_after (duplicate_cond (t, insn),
4914 insn);
4915 PUT_MODE (slot[1], TImode);
4916 gen_one_bundle (slot, 4, 0);
4920 else
4922 /* If we scheduled, we reserved the .S2 unit for one or two
4923 cycles after the call. Emit the insns in these slots,
4924 unless it's possible to create a CALLP insn.
4925 Note that this works because the dependencies ensure that
4926 no insn setting/using B3 is scheduled in the delay slots of
4927 a call. */
4928 int this_clock = insn_get_clock (insn);
4929 rtx last_same_clock;
4930 rtx after1;
4932 call_labels[INSN_UID (insn)] = label;
4934 last_same_clock = find_last_same_clock (insn);
4936 if (can_use_callp (insn))
4938 /* Find the first insn of the next execute packet. If it
4939 is the shadow insn corresponding to this call, we may
4940 use a CALLP insn. */
4941 rtx shadow = next_nonnote_nondebug_insn (last_same_clock);
4943 if (CALL_P (shadow)
4944 && insn_get_clock (shadow) == this_clock + 5)
4946 convert_to_callp (shadow);
4947 insn_set_clock (shadow, this_clock);
4948 INSN_INFO_ENTRY (INSN_UID (shadow)).reservation
4949 = RESERVATION_S2;
4950 INSN_INFO_ENTRY (INSN_UID (shadow)).unit_mask
4951 = INSN_INFO_ENTRY (INSN_UID (last_same_clock)).unit_mask;
4952 if (GET_MODE (insn) == TImode)
4954 rtx new_cycle_first = NEXT_INSN (insn);
4955 while (!NONDEBUG_INSN_P (new_cycle_first)
4956 || GET_CODE (PATTERN (new_cycle_first)) == USE
4957 || GET_CODE (PATTERN (new_cycle_first)) == CLOBBER)
4958 new_cycle_first = NEXT_INSN (new_cycle_first);
4959 PUT_MODE (new_cycle_first, TImode);
4960 if (new_cycle_first != shadow)
4961 PUT_MODE (shadow, VOIDmode);
4962 INSN_INFO_ENTRY (INSN_UID (new_cycle_first)).ebb_start
4963 = INSN_INFO_ENTRY (INSN_UID (insn)).ebb_start;
4965 else
4966 PUT_MODE (shadow, VOIDmode);
4967 delete_insn (insn);
4968 goto done;
4971 after1 = find_next_cycle_insn (last_same_clock, this_clock + 1);
4972 if (after1 == NULL_RTX)
4973 after1 = last_same_clock;
4974 else
4975 after1 = find_last_same_clock (after1);
4976 if (TARGET_INSNS_64)
4978 rtx x1 = gen_addkpc (reg, labelref, const0_rtx);
4979 x1 = emit_insn_after (duplicate_cond (x1, insn), after1);
4980 insn_set_clock (x1, this_clock + 1);
4981 INSN_INFO_ENTRY (INSN_UID (x1)).reservation = RESERVATION_S2;
4982 if (after1 == last_same_clock)
4983 PUT_MODE (x1, TImode);
4984 else
4985 INSN_INFO_ENTRY (INSN_UID (x1)).unit_mask
4986 = INSN_INFO_ENTRY (INSN_UID (after1)).unit_mask;
4988 else
4990 rtx x1, x2;
4991 rtx after2 = find_next_cycle_insn (after1, this_clock + 2);
4992 if (after2 == NULL_RTX)
4993 after2 = after1;
4994 x2 = gen_movsi_lo_sum (reg, reg, labelref);
4995 x2 = emit_insn_after (duplicate_cond (x2, insn), after2);
4996 x1 = gen_movsi_high (reg, labelref);
4997 x1 = emit_insn_after (duplicate_cond (x1, insn), after1);
4998 insn_set_clock (x1, this_clock + 1);
4999 insn_set_clock (x2, this_clock + 2);
5000 INSN_INFO_ENTRY (INSN_UID (x1)).reservation = RESERVATION_S2;
5001 INSN_INFO_ENTRY (INSN_UID (x2)).reservation = RESERVATION_S2;
5002 if (after1 == last_same_clock)
5003 PUT_MODE (x1, TImode);
5004 else
5005 INSN_INFO_ENTRY (INSN_UID (x1)).unit_mask
5006 = INSN_INFO_ENTRY (INSN_UID (after1)).unit_mask;
5007 if (after1 == after2)
5008 PUT_MODE (x2, TImode);
5009 else
5010 INSN_INFO_ENTRY (INSN_UID (x2)).unit_mask
5011 = INSN_INFO_ENTRY (INSN_UID (after2)).unit_mask;
5015 done:
5016 insn = next;
5020 /* Called as part of c6x_reorg. This function emits multi-cycle NOP
5021 insns as required for correctness. CALL_LABELS is the array that
5022 holds the return labels for call insns; we emit these here if
5023 scheduling was run earlier. */
5025 static void
5026 reorg_emit_nops (rtx *call_labels)
5028 bool first;
5029 rtx prev, last_call;
5030 int prev_clock, earliest_bb_end;
5031 int prev_implicit_nops;
5032 rtx insn = get_insns ();
5034 /* We look at one insn (or bundle inside a sequence) in each iteration, storing
5035 its issue time in PREV_CLOCK for the next iteration. If there is a gap in
5036 clocks, we must insert a NOP.
5037 EARLIEST_BB_END tracks in which cycle all insns that have been issued in the
5038 current basic block will finish. We must not allow the next basic block to
5039 begin before this cycle.
5040 PREV_IMPLICIT_NOPS tells us whether we've seen an insn that implicitly contains
5041 a multi-cycle nop. The code is scheduled such that subsequent insns will
5042 show the cycle gap, but we needn't insert a real NOP instruction. */
5043 insn = next_real_insn (insn);
5044 last_call = prev = NULL_RTX;
5045 prev_clock = -1;
5046 earliest_bb_end = 0;
5047 prev_implicit_nops = 0;
5048 first = true;
5049 while (insn)
5051 int this_clock = -1;
5052 rtx next;
5053 int max_cycles = 0;
5055 next = next_real_insn (insn);
5057 if (DEBUG_INSN_P (insn)
5058 || GET_CODE (PATTERN (insn)) == USE
5059 || GET_CODE (PATTERN (insn)) == CLOBBER
5060 || shadow_or_blockage_p (insn)
5061 || JUMP_TABLE_DATA_P (insn))
5062 goto next_insn;
5064 if (!c6x_flag_schedule_insns2)
5065 /* No scheduling; ensure that no parallel issue happens. */
5066 PUT_MODE (insn, TImode);
5067 else
5069 int cycles;
5071 this_clock = insn_get_clock (insn);
5072 if (this_clock != prev_clock)
5074 PUT_MODE (insn, TImode);
5076 if (!first)
5078 cycles = this_clock - prev_clock;
5080 cycles -= prev_implicit_nops;
5081 if (cycles > 1)
5083 rtx nop = emit_nop_after (cycles - 1, prev);
5084 insn_set_clock (nop, prev_clock + prev_implicit_nops + 1);
5087 prev_clock = this_clock;
5089 if (last_call
5090 && insn_get_clock (last_call) + 6 <= this_clock)
5092 emit_label_before (call_labels[INSN_UID (last_call)], insn);
5093 last_call = NULL_RTX;
5095 prev_implicit_nops = 0;
5099 /* Examine how many cycles the current insn takes, and adjust
5100 LAST_CALL, EARLIEST_BB_END and PREV_IMPLICIT_NOPS. */
5101 if (recog_memoized (insn) >= 0
5102 /* If not scheduling, we've emitted NOPs after calls already. */
5103 && (c6x_flag_schedule_insns2 || !returning_call_p (insn)))
5105 max_cycles = get_attr_cycles (insn);
5106 if (get_attr_type (insn) == TYPE_CALLP)
5107 prev_implicit_nops = 5;
5109 else
5110 max_cycles = 1;
5111 if (returning_call_p (insn))
5112 last_call = insn;
5114 if (c6x_flag_schedule_insns2)
5116 gcc_assert (this_clock >= 0);
5117 if (earliest_bb_end < this_clock + max_cycles)
5118 earliest_bb_end = this_clock + max_cycles;
5120 else if (max_cycles > 1)
5121 emit_nop_after (max_cycles - 1, insn);
5123 prev = insn;
5124 first = false;
5126 next_insn:
5127 if (c6x_flag_schedule_insns2
5128 && (next == NULL_RTX
5129 || (GET_MODE (next) == TImode
5130 && INSN_INFO_ENTRY (INSN_UID (next)).ebb_start))
5131 && earliest_bb_end > 0)
5133 int cycles = earliest_bb_end - prev_clock;
5134 if (cycles > 1)
5136 prev = emit_nop_after (cycles - 1, prev);
5137 insn_set_clock (prev, prev_clock + prev_implicit_nops + 1);
5139 earliest_bb_end = 0;
5140 prev_clock = -1;
5141 first = true;
5143 if (last_call)
5144 emit_label_after (call_labels[INSN_UID (last_call)], prev);
5145 last_call = NULL_RTX;
5147 insn = next;
5151 /* If possible, split INSN, which we know is either a jump or a call, into a real
5152 insn and its shadow. */
5153 static void
5154 split_delayed_branch (rtx insn)
5156 int code = recog_memoized (insn);
5157 rtx i1, newpat;
5158 rtx pat = PATTERN (insn);
5160 if (GET_CODE (pat) == COND_EXEC)
5161 pat = COND_EXEC_CODE (pat);
5163 if (CALL_P (insn))
5165 rtx src = pat, dest = NULL_RTX;
5166 rtx callee;
5167 if (GET_CODE (pat) == SET)
5169 dest = SET_DEST (pat);
5170 src = SET_SRC (pat);
5172 callee = XEXP (XEXP (src, 0), 0);
5173 if (SIBLING_CALL_P (insn))
5175 if (REG_P (callee))
5176 newpat = gen_indirect_sibcall_shadow ();
5177 else
5178 newpat = gen_sibcall_shadow (callee);
5179 pat = gen_real_jump (callee);
5181 else if (dest != NULL_RTX)
5183 if (REG_P (callee))
5184 newpat = gen_indirect_call_value_shadow (dest);
5185 else
5186 newpat = gen_call_value_shadow (dest, callee);
5187 pat = gen_real_call (callee);
5189 else
5191 if (REG_P (callee))
5192 newpat = gen_indirect_call_shadow ();
5193 else
5194 newpat = gen_call_shadow (callee);
5195 pat = gen_real_call (callee);
5197 pat = duplicate_cond (pat, insn);
5198 newpat = duplicate_cond (newpat, insn);
5200 else
5202 rtx src, op;
5203 if (GET_CODE (pat) == PARALLEL
5204 && GET_CODE (XVECEXP (pat, 0, 0)) == RETURN)
5206 newpat = gen_return_shadow ();
5207 pat = gen_real_ret (XEXP (XVECEXP (pat, 0, 1), 0));
5208 newpat = duplicate_cond (newpat, insn);
5210 else
5211 switch (code)
5213 case CODE_FOR_br_true:
5214 case CODE_FOR_br_false:
5215 src = SET_SRC (pat);
5216 op = XEXP (src, code == CODE_FOR_br_true ? 1 : 2);
5217 newpat = gen_condjump_shadow (op);
5218 pat = gen_real_jump (op);
5219 if (code == CODE_FOR_br_true)
5220 pat = gen_rtx_COND_EXEC (VOIDmode, XEXP (src, 0), pat);
5221 else
5222 pat = gen_rtx_COND_EXEC (VOIDmode,
5223 reversed_comparison (XEXP (src, 0),
5224 VOIDmode),
5225 pat);
5226 break;
5228 case CODE_FOR_jump:
5229 op = SET_SRC (pat);
5230 newpat = gen_jump_shadow (op);
5231 break;
5233 case CODE_FOR_indirect_jump:
5234 newpat = gen_indirect_jump_shadow ();
5235 break;
5237 case CODE_FOR_return_internal:
5238 newpat = gen_return_shadow ();
5239 pat = gen_real_ret (XEXP (XVECEXP (pat, 0, 1), 0));
5240 break;
5242 default:
5243 return;
5246 i1 = emit_insn_before (pat, insn);
5247 PATTERN (insn) = newpat;
5248 INSN_CODE (insn) = -1;
5249 record_delay_slot_pair (i1, insn, 5, 0);
5252 /* If INSN is a multi-cycle insn that should be handled properly in
5253 modulo-scheduling, split it into a real insn and a shadow.
5254 Return true if we made a change.
5256 It is valid for us to fail to split an insn; the caller has to deal
5257 with the possibility. Currently we handle loads and most mpy2 and
5258 mpy4 insns. */
5259 static bool
5260 split_delayed_nonbranch (rtx insn)
5262 int code = recog_memoized (insn);
5263 enum attr_type type;
5264 rtx i1, newpat, src, dest;
5265 rtx pat = PATTERN (insn);
5266 rtvec rtv;
5267 int delay;
5269 if (GET_CODE (pat) == COND_EXEC)
5270 pat = COND_EXEC_CODE (pat);
5272 if (code < 0 || GET_CODE (pat) != SET)
5273 return false;
5274 src = SET_SRC (pat);
5275 dest = SET_DEST (pat);
5276 if (!REG_P (dest))
5277 return false;
5279 type = get_attr_type (insn);
5280 if (code >= 0
5281 && (type == TYPE_LOAD
5282 || type == TYPE_LOADN))
5284 if (!MEM_P (src)
5285 && (GET_CODE (src) != ZERO_EXTEND
5286 || !MEM_P (XEXP (src, 0))))
5287 return false;
5289 if (GET_MODE_SIZE (GET_MODE (dest)) > 4
5290 && (GET_MODE_SIZE (GET_MODE (dest)) != 8 || !TARGET_LDDW))
5291 return false;
5293 rtv = gen_rtvec (2, GEN_INT (REGNO (SET_DEST (pat))),
5294 SET_SRC (pat));
5295 newpat = gen_load_shadow (SET_DEST (pat));
5296 pat = gen_rtx_UNSPEC (VOIDmode, rtv, UNSPEC_REAL_LOAD);
5297 delay = 4;
5299 else if (code >= 0
5300 && (type == TYPE_MPY2
5301 || type == TYPE_MPY4))
5303 /* We don't handle floating point multiplies yet. */
5304 if (GET_MODE (dest) == SFmode)
5305 return false;
5307 rtv = gen_rtvec (2, GEN_INT (REGNO (SET_DEST (pat))),
5308 SET_SRC (pat));
5309 newpat = gen_mult_shadow (SET_DEST (pat));
5310 pat = gen_rtx_UNSPEC (VOIDmode, rtv, UNSPEC_REAL_MULT);
5311 delay = type == TYPE_MPY2 ? 1 : 3;
5313 else
5314 return false;
5316 pat = duplicate_cond (pat, insn);
5317 newpat = duplicate_cond (newpat, insn);
5318 i1 = emit_insn_before (pat, insn);
5319 PATTERN (insn) = newpat;
5320 INSN_CODE (insn) = -1;
5321 recog_memoized (insn);
5322 recog_memoized (i1);
5323 record_delay_slot_pair (i1, insn, delay, 0);
5324 return true;
5327 /* Examine if INSN is the result of splitting a load into a real load and a
5328 shadow, and if so, undo the transformation. */
5329 static void
5330 undo_split_delayed_nonbranch (rtx insn)
5332 int icode = recog_memoized (insn);
5333 enum attr_type type;
5334 rtx prev_pat, insn_pat, prev;
5336 if (icode < 0)
5337 return;
5338 type = get_attr_type (insn);
5339 if (type != TYPE_LOAD_SHADOW && type != TYPE_MULT_SHADOW)
5340 return;
5341 prev = PREV_INSN (insn);
5342 prev_pat = PATTERN (prev);
5343 insn_pat = PATTERN (insn);
5344 if (GET_CODE (prev_pat) == COND_EXEC)
5346 prev_pat = COND_EXEC_CODE (prev_pat);
5347 insn_pat = COND_EXEC_CODE (insn_pat);
5350 gcc_assert (GET_CODE (prev_pat) == UNSPEC
5351 && ((XINT (prev_pat, 1) == UNSPEC_REAL_LOAD
5352 && type == TYPE_LOAD_SHADOW)
5353 || (XINT (prev_pat, 1) == UNSPEC_REAL_MULT
5354 && type == TYPE_MULT_SHADOW)));
5355 insn_pat = gen_rtx_SET (VOIDmode, SET_DEST (insn_pat),
5356 XVECEXP (prev_pat, 0, 1));
5357 insn_pat = duplicate_cond (insn_pat, prev);
5358 PATTERN (insn) = insn_pat;
5359 INSN_CODE (insn) = -1;
5360 delete_insn (prev);
5363 /* Split every insn (i.e. jumps and calls) which can have delay slots into
5364 two parts: the first one is scheduled normally and emits the instruction,
5365 while the second one is a shadow insn which shows the side effect taking
5366 place. The second one is placed in the right cycle by the scheduler, but
5367 not emitted as an assembly instruction. */
5369 static void
5370 split_delayed_insns (void)
5372 rtx insn;
5373 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5375 if (JUMP_P (insn) || CALL_P (insn))
5376 split_delayed_branch (insn);
5380 /* For every insn that has an entry in the new_conditions vector, give it
5381 the appropriate predicate. */
5382 static void
5383 conditionalize_after_sched (void)
5385 basic_block bb;
5386 rtx insn;
5387 FOR_EACH_BB_FN (bb, cfun)
5388 FOR_BB_INSNS (bb, insn)
5390 unsigned uid = INSN_UID (insn);
5391 rtx cond;
5392 if (!NONDEBUG_INSN_P (insn) || uid >= INSN_INFO_LENGTH)
5393 continue;
5394 cond = INSN_INFO_ENTRY (uid).new_cond;
5395 if (cond == NULL_RTX)
5396 continue;
5397 if (dump_file)
5398 fprintf (dump_file, "Conditionalizing insn %d\n", uid);
5399 predicate_insn (insn, cond, true);
5403 /* A callback for the hw-doloop pass. This function examines INSN; if
5404 it is a loop_end pattern we recognize, return the reg rtx for the
5405 loop counter. Otherwise, return NULL_RTX. */
5407 static rtx
5408 hwloop_pattern_reg (rtx insn)
5410 rtx pat, reg;
5412 if (!JUMP_P (insn) || recog_memoized (insn) != CODE_FOR_loop_end)
5413 return NULL_RTX;
5415 pat = PATTERN (insn);
5416 reg = SET_DEST (XVECEXP (pat, 0, 1));
5417 if (!REG_P (reg))
5418 return NULL_RTX;
5419 return reg;
5422 /* Return the number of cycles taken by BB, as computed by scheduling,
5423 including the latencies of all insns with delay slots. IGNORE is
5424 an insn we should ignore in the calculation, usually the final
5425 branch. */
5426 static int
5427 bb_earliest_end_cycle (basic_block bb, rtx ignore)
5429 int earliest = 0;
5430 rtx insn;
5432 FOR_BB_INSNS (bb, insn)
5434 int cycles, this_clock;
5436 if (LABEL_P (insn) || NOTE_P (insn) || DEBUG_INSN_P (insn)
5437 || GET_CODE (PATTERN (insn)) == USE
5438 || GET_CODE (PATTERN (insn)) == CLOBBER
5439 || insn == ignore)
5440 continue;
5442 this_clock = insn_get_clock (insn);
5443 cycles = get_attr_cycles (insn);
5445 if (earliest < this_clock + cycles)
5446 earliest = this_clock + cycles;
5448 return earliest;
5451 /* Examine the insns in BB and remove all which have a uid greater or
5452 equal to MAX_UID. */
5453 static void
5454 filter_insns_above (basic_block bb, int max_uid)
5456 rtx insn, next;
5457 bool prev_ti = false;
5458 int prev_cycle = -1;
5460 FOR_BB_INSNS_SAFE (bb, insn, next)
5462 int this_cycle;
5463 if (!NONDEBUG_INSN_P (insn))
5464 continue;
5465 if (insn == BB_END (bb))
5466 return;
5467 this_cycle = insn_get_clock (insn);
5468 if (prev_ti && this_cycle == prev_cycle)
5470 gcc_assert (GET_MODE (insn) != TImode);
5471 PUT_MODE (insn, TImode);
5473 prev_ti = false;
5474 if (INSN_UID (insn) >= max_uid)
5476 if (GET_MODE (insn) == TImode)
5478 prev_ti = true;
5479 prev_cycle = this_cycle;
5481 delete_insn (insn);
5486 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
5488 static void
5489 c6x_asm_emit_except_personality (rtx personality)
5491 fputs ("\t.personality\t", asm_out_file);
5492 output_addr_const (asm_out_file, personality);
5493 fputc ('\n', asm_out_file);
5496 /* Use a special assembly directive rather than a regular setion for
5497 unwind table data. */
5499 static void
5500 c6x_asm_init_sections (void)
5502 exception_section = get_unnamed_section (0, output_section_asm_op,
5503 "\t.handlerdata");
5506 /* A callback for the hw-doloop pass. Called to optimize LOOP in a
5507 machine-specific fashion; returns true if successful and false if
5508 the hwloop_fail function should be called. */
5510 static bool
5511 hwloop_optimize (hwloop_info loop)
5513 basic_block entry_bb, bb;
5514 rtx seq, insn, prev, entry_after, end_packet;
5515 rtx head_insn, tail_insn, new_insns, last_insn;
5516 int loop_earliest;
5517 int n_execute_packets;
5518 edge entry_edge;
5519 unsigned ix;
5520 int max_uid_before, delayed_splits;
5521 int i, sp_ii, min_ii, max_ii, max_parallel, n_insns, n_real_insns, stages;
5522 rtx *orig_vec;
5523 rtx *copies;
5524 rtx **insn_copies;
5526 if (!c6x_flag_modulo_sched || !c6x_flag_schedule_insns2
5527 || !TARGET_INSNS_64PLUS)
5528 return false;
5530 if (loop->iter_reg_used || loop->depth > 1)
5531 return false;
5532 if (loop->has_call || loop->has_asm)
5533 return false;
5535 if (loop->head != loop->tail)
5536 return false;
5538 gcc_assert (loop->incoming_dest == loop->head);
5540 entry_edge = NULL;
5541 FOR_EACH_VEC_SAFE_ELT (loop->incoming, i, entry_edge)
5542 if (entry_edge->flags & EDGE_FALLTHRU)
5543 break;
5544 if (entry_edge == NULL)
5545 return false;
5547 reshuffle_units (loop->head);
5549 in_hwloop = true;
5550 schedule_ebbs_init ();
5551 schedule_ebb (BB_HEAD (loop->tail), loop->loop_end, true);
5552 schedule_ebbs_finish ();
5553 in_hwloop = false;
5555 bb = loop->head;
5556 loop_earliest = bb_earliest_end_cycle (bb, loop->loop_end) + 1;
5558 max_uid_before = get_max_uid ();
5560 /* Split all multi-cycle operations, such as loads. For normal
5561 scheduling, we only do this for branches, as the generated code
5562 would otherwise not be interrupt-safe. When using sploop, it is
5563 safe and beneficial to split them. If any multi-cycle operations
5564 remain after splitting (because we don't handle them yet), we
5565 cannot pipeline the loop. */
5566 delayed_splits = 0;
5567 FOR_BB_INSNS (bb, insn)
5569 if (NONDEBUG_INSN_P (insn))
5571 recog_memoized (insn);
5572 if (split_delayed_nonbranch (insn))
5573 delayed_splits++;
5574 else if (INSN_CODE (insn) >= 0
5575 && get_attr_cycles (insn) > 1)
5576 goto undo_splits;
5580 /* Count the number of insns as well as the number real insns, and save
5581 the original sequence of insns in case we must restore it later. */
5582 n_insns = n_real_insns = 0;
5583 FOR_BB_INSNS (bb, insn)
5585 n_insns++;
5586 if (NONDEBUG_INSN_P (insn) && insn != loop->loop_end)
5587 n_real_insns++;
5589 orig_vec = XNEWVEC (rtx, n_insns);
5590 n_insns = 0;
5591 FOR_BB_INSNS (bb, insn)
5592 orig_vec[n_insns++] = insn;
5594 /* Count the unit reservations, and compute a minimum II from that
5595 table. */
5596 count_unit_reqs (unit_reqs, loop->start_label,
5597 PREV_INSN (loop->loop_end));
5598 merge_unit_reqs (unit_reqs);
5600 min_ii = res_mii (unit_reqs);
5601 max_ii = loop_earliest < 15 ? loop_earliest : 14;
5603 /* Make copies of the loop body, up to a maximum number of stages we want
5604 to handle. */
5605 max_parallel = loop_earliest / min_ii + 1;
5607 copies = XCNEWVEC (rtx, (max_parallel + 1) * n_real_insns);
5608 insn_copies = XNEWVEC (rtx *, max_parallel + 1);
5609 for (i = 0; i < max_parallel + 1; i++)
5610 insn_copies[i] = copies + i * n_real_insns;
5612 head_insn = next_nonnote_nondebug_insn (loop->start_label);
5613 tail_insn = prev_real_insn (BB_END (bb));
5615 i = 0;
5616 FOR_BB_INSNS (bb, insn)
5617 if (NONDEBUG_INSN_P (insn) && insn != loop->loop_end)
5618 insn_copies[0][i++] = insn;
5620 sploop_max_uid_iter0 = get_max_uid ();
5622 /* Generate the copies of the loop body, and save them in the
5623 INSN_COPIES array. */
5624 start_sequence ();
5625 for (i = 0; i < max_parallel; i++)
5627 int j;
5628 rtx this_iter;
5630 this_iter = duplicate_insn_chain (head_insn, tail_insn);
5631 j = 0;
5632 while (this_iter)
5634 rtx prev_stage_insn = insn_copies[i][j];
5635 gcc_assert (INSN_CODE (this_iter) == INSN_CODE (prev_stage_insn));
5637 if (INSN_CODE (this_iter) >= 0
5638 && (get_attr_type (this_iter) == TYPE_LOAD_SHADOW
5639 || get_attr_type (this_iter) == TYPE_MULT_SHADOW))
5641 rtx prev = PREV_INSN (this_iter);
5642 record_delay_slot_pair (prev, this_iter,
5643 get_attr_cycles (prev) - 1, 0);
5645 else
5646 record_delay_slot_pair (prev_stage_insn, this_iter, i, 1);
5648 insn_copies[i + 1][j] = this_iter;
5649 j++;
5650 this_iter = next_nonnote_nondebug_insn (this_iter);
5653 new_insns = get_insns ();
5654 last_insn = insn_copies[max_parallel][n_real_insns - 1];
5655 end_sequence ();
5656 emit_insn_before (new_insns, BB_END (bb));
5658 /* Try to schedule the loop using varying initiation intervals,
5659 starting with the smallest possible and incrementing it
5660 on failure. */
5661 for (sp_ii = min_ii; sp_ii <= max_ii; sp_ii++)
5663 basic_block tmp_bb;
5664 if (dump_file)
5665 fprintf (dump_file, "Trying to schedule for II %d\n", sp_ii);
5667 df_clear_flags (DF_LR_RUN_DCE);
5669 schedule_ebbs_init ();
5670 set_modulo_params (sp_ii, max_parallel, n_real_insns,
5671 sploop_max_uid_iter0);
5672 tmp_bb = schedule_ebb (BB_HEAD (bb), last_insn, true);
5673 schedule_ebbs_finish ();
5675 if (tmp_bb)
5677 if (dump_file)
5678 fprintf (dump_file, "Found schedule with II %d\n", sp_ii);
5679 break;
5683 discard_delay_pairs_above (max_uid_before);
5685 if (sp_ii > max_ii)
5686 goto restore_loop;
5688 stages = insn_get_clock (ss.last_scheduled_iter0) / sp_ii + 1;
5690 if (stages == 1 && sp_ii > 5)
5691 goto restore_loop;
5693 /* At this point, we know we've been successful, unless we find later that
5694 there are too many execute packets for the loop buffer to hold. */
5696 /* Assign reservations to the instructions in the loop. We must find
5697 the stage that contains the full loop kernel, and transfer the
5698 reservations of the instructions contained in it to the corresponding
5699 instructions from iteration 0, which are the only ones we'll keep. */
5700 assign_reservations (BB_HEAD (bb), ss.last_scheduled_insn);
5701 PREV_INSN (BB_END (bb)) = ss.last_scheduled_iter0;
5702 NEXT_INSN (ss.last_scheduled_iter0) = BB_END (bb);
5703 filter_insns_above (bb, sploop_max_uid_iter0);
5705 for (i = 0; i < n_real_insns; i++)
5707 rtx insn = insn_copies[0][i];
5708 int uid = INSN_UID (insn);
5709 int stage = insn_uid_get_clock (uid) / sp_ii;
5711 if (stage + 1 < stages)
5713 int copy_uid;
5714 stage = stages - stage - 1;
5715 copy_uid = INSN_UID (insn_copies[stage][i]);
5716 INSN_INFO_ENTRY (uid).reservation
5717 = INSN_INFO_ENTRY (copy_uid).reservation;
5720 if (stages == 1)
5721 stages++;
5723 /* Compute the number of execute packets the pipelined form of the loop will
5724 require. */
5725 prev = NULL_RTX;
5726 n_execute_packets = 0;
5727 for (insn = loop->start_label; insn != loop->loop_end; insn = NEXT_INSN (insn))
5729 if (NONDEBUG_INSN_P (insn) && GET_MODE (insn) == TImode
5730 && !shadow_p (insn))
5732 n_execute_packets++;
5733 if (prev && insn_get_clock (prev) + 1 != insn_get_clock (insn))
5734 /* We need an extra NOP instruction. */
5735 n_execute_packets++;
5737 prev = insn;
5741 end_packet = ss.last_scheduled_iter0;
5742 while (!NONDEBUG_INSN_P (end_packet) || GET_MODE (end_packet) != TImode)
5743 end_packet = PREV_INSN (end_packet);
5745 /* The earliest cycle in which we can emit the SPKERNEL instruction. */
5746 loop_earliest = (stages - 1) * sp_ii;
5747 if (loop_earliest > insn_get_clock (end_packet))
5749 n_execute_packets++;
5750 end_packet = loop->loop_end;
5752 else
5753 loop_earliest = insn_get_clock (end_packet);
5755 if (n_execute_packets > 14)
5756 goto restore_loop;
5758 /* Generate the spkernel instruction, and place it at the appropriate
5759 spot. */
5760 PUT_MODE (end_packet, VOIDmode);
5762 insn = gen_spkernel (GEN_INT (stages - 1),
5763 const0_rtx, JUMP_LABEL (loop->loop_end));
5764 insn = emit_jump_insn_before (insn, end_packet);
5765 JUMP_LABEL (insn) = JUMP_LABEL (loop->loop_end);
5766 insn_set_clock (insn, loop_earliest);
5767 PUT_MODE (insn, TImode);
5768 INSN_INFO_ENTRY (INSN_UID (insn)).ebb_start = false;
5769 delete_insn (loop->loop_end);
5771 /* Place the mvc and sploop instructions before the loop. */
5772 entry_bb = entry_edge->src;
5774 start_sequence ();
5776 insn = emit_insn (gen_mvilc (loop->iter_reg));
5777 insn = emit_insn (gen_sploop (GEN_INT (sp_ii)));
5779 seq = get_insns ();
5781 if (!single_succ_p (entry_bb) || vec_safe_length (loop->incoming) > 1)
5783 basic_block new_bb;
5784 edge e;
5785 edge_iterator ei;
5787 emit_insn_before (seq, BB_HEAD (loop->head));
5788 seq = emit_label_before (gen_label_rtx (), seq);
5790 new_bb = create_basic_block (seq, insn, entry_bb);
5791 FOR_EACH_EDGE (e, ei, loop->incoming)
5793 if (!(e->flags & EDGE_FALLTHRU))
5794 redirect_edge_and_branch_force (e, new_bb);
5795 else
5796 redirect_edge_succ (e, new_bb);
5798 make_edge (new_bb, loop->head, 0);
5800 else
5802 entry_after = BB_END (entry_bb);
5803 while (DEBUG_INSN_P (entry_after)
5804 || (NOTE_P (entry_after)
5805 && NOTE_KIND (entry_after) != NOTE_INSN_BASIC_BLOCK))
5806 entry_after = PREV_INSN (entry_after);
5807 emit_insn_after (seq, entry_after);
5810 end_sequence ();
5812 /* Make sure we don't try to schedule this loop again. */
5813 for (ix = 0; loop->blocks.iterate (ix, &bb); ix++)
5814 bb->flags |= BB_DISABLE_SCHEDULE;
5816 return true;
5818 restore_loop:
5819 if (dump_file)
5820 fprintf (dump_file, "Unable to pipeline loop.\n");
5822 for (i = 1; i < n_insns; i++)
5824 NEXT_INSN (orig_vec[i - 1]) = orig_vec[i];
5825 PREV_INSN (orig_vec[i]) = orig_vec[i - 1];
5827 PREV_INSN (orig_vec[0]) = PREV_INSN (BB_HEAD (bb));
5828 NEXT_INSN (PREV_INSN (BB_HEAD (bb))) = orig_vec[0];
5829 NEXT_INSN (orig_vec[n_insns - 1]) = NEXT_INSN (BB_END (bb));
5830 PREV_INSN (NEXT_INSN (BB_END (bb))) = orig_vec[n_insns - 1];
5831 BB_HEAD (bb) = orig_vec[0];
5832 BB_END (bb) = orig_vec[n_insns - 1];
5833 undo_splits:
5834 free_delay_pairs ();
5835 FOR_BB_INSNS (bb, insn)
5836 if (NONDEBUG_INSN_P (insn))
5837 undo_split_delayed_nonbranch (insn);
5838 return false;
5841 /* A callback for the hw-doloop pass. Called when a loop we have discovered
5842 turns out not to be optimizable; we have to split the doloop_end pattern
5843 into a subtract and a test. */
5844 static void
5845 hwloop_fail (hwloop_info loop)
5847 rtx insn, test, testreg;
5849 if (dump_file)
5850 fprintf (dump_file, "splitting doloop insn %d\n",
5851 INSN_UID (loop->loop_end));
5852 insn = gen_addsi3 (loop->iter_reg, loop->iter_reg, constm1_rtx);
5853 /* See if we can emit the add at the head of the loop rather than at the
5854 end. */
5855 if (loop->head == NULL
5856 || loop->iter_reg_used_outside
5857 || loop->iter_reg_used
5858 || TEST_HARD_REG_BIT (loop->regs_set_in_loop, REGNO (loop->iter_reg))
5859 || loop->incoming_dest != loop->head
5860 || EDGE_COUNT (loop->head->preds) != 2)
5861 emit_insn_before (insn, loop->loop_end);
5862 else
5864 rtx t = loop->start_label;
5865 while (!NOTE_P (t) || NOTE_KIND (t) != NOTE_INSN_BASIC_BLOCK)
5866 t = NEXT_INSN (t);
5867 emit_insn_after (insn, t);
5870 testreg = SET_DEST (XVECEXP (PATTERN (loop->loop_end), 0, 2));
5871 if (GET_CODE (testreg) == SCRATCH)
5872 testreg = loop->iter_reg;
5873 else
5874 emit_insn_before (gen_movsi (testreg, loop->iter_reg), loop->loop_end);
5876 test = gen_rtx_NE (VOIDmode, testreg, const0_rtx);
5877 insn = emit_jump_insn_before (gen_cbranchsi4 (test, testreg, const0_rtx,
5878 loop->start_label),
5879 loop->loop_end);
5881 JUMP_LABEL (insn) = loop->start_label;
5882 LABEL_NUSES (loop->start_label)++;
5883 delete_insn (loop->loop_end);
5886 static struct hw_doloop_hooks c6x_doloop_hooks =
5888 hwloop_pattern_reg,
5889 hwloop_optimize,
5890 hwloop_fail
5893 /* Run the hw-doloop pass to modulo-schedule hardware loops, or split the
5894 doloop_end patterns where such optimizations are impossible. */
5895 static void
5896 c6x_hwloops (void)
5898 if (optimize)
5899 reorg_loops (true, &c6x_doloop_hooks);
5902 /* Implement the TARGET_MACHINE_DEPENDENT_REORG pass. We split call insns here
5903 into a sequence that loads the return register and performs the call,
5904 and emit the return label.
5905 If scheduling after reload is requested, it happens here. */
5907 static void
5908 c6x_reorg (void)
5910 basic_block bb;
5911 rtx *call_labels;
5912 bool do_selsched = (c6x_flag_schedule_insns2 && flag_selective_scheduling2
5913 && !maybe_skip_selective_scheduling ());
5915 /* We are freeing block_for_insn in the toplev to keep compatibility
5916 with old MDEP_REORGS that are not CFG based. Recompute it now. */
5917 compute_bb_for_insn ();
5919 df_clear_flags (DF_LR_RUN_DCE);
5920 df_note_add_problem ();
5922 /* If optimizing, we'll have split before scheduling. */
5923 if (optimize == 0)
5924 split_all_insns ();
5926 df_analyze ();
5928 if (c6x_flag_schedule_insns2)
5930 int sz = get_max_uid () * 3 / 2 + 1;
5932 insn_info.create (sz);
5935 /* Make sure the real-jump insns we create are not deleted. When modulo-
5936 scheduling, situations where a reg is only stored in a loop can also
5937 cause dead code when doing the initial unrolling. */
5938 sched_no_dce = true;
5940 c6x_hwloops ();
5942 if (c6x_flag_schedule_insns2)
5944 split_delayed_insns ();
5945 timevar_push (TV_SCHED2);
5946 if (do_selsched)
5947 run_selective_scheduling ();
5948 else
5949 schedule_ebbs ();
5950 conditionalize_after_sched ();
5951 timevar_pop (TV_SCHED2);
5953 free_delay_pairs ();
5955 sched_no_dce = false;
5957 call_labels = XCNEWVEC (rtx, get_max_uid () + 1);
5959 reorg_split_calls (call_labels);
5961 if (c6x_flag_schedule_insns2)
5963 FOR_EACH_BB_FN (bb, cfun)
5964 if ((bb->flags & BB_DISABLE_SCHEDULE) == 0)
5965 assign_reservations (BB_HEAD (bb), BB_END (bb));
5968 if (c6x_flag_var_tracking)
5970 timevar_push (TV_VAR_TRACKING);
5971 variable_tracking_main ();
5972 timevar_pop (TV_VAR_TRACKING);
5975 reorg_emit_nops (call_labels);
5977 /* Post-process the schedule to move parallel insns into SEQUENCEs. */
5978 if (c6x_flag_schedule_insns2)
5980 free_delay_pairs ();
5981 c6x_gen_bundles ();
5984 df_finish_pass (false);
5987 /* Called when a function has been assembled. It should perform all the
5988 tasks of ASM_DECLARE_FUNCTION_SIZE in elfos.h, plus target-specific
5989 tasks.
5990 We free the reservation (and other scheduling) information here now that
5991 all insns have been output. */
5992 void
5993 c6x_function_end (FILE *file, const char *fname)
5995 c6x_output_fn_unwind (file);
5997 insn_info.release ();
5999 if (!flag_inhibit_size_directive)
6000 ASM_OUTPUT_MEASURED_SIZE (file, fname);
6003 /* Determine whether X is a shift with code CODE and an integer amount
6004 AMOUNT. */
6005 static bool
6006 shift_p (rtx x, enum rtx_code code, int amount)
6008 return (GET_CODE (x) == code && GET_CODE (XEXP (x, 1)) == CONST_INT
6009 && INTVAL (XEXP (x, 1)) == amount);
6012 /* Compute a (partial) cost for rtx X. Return true if the complete
6013 cost has been computed, and false if subexpressions should be
6014 scanned. In either case, *TOTAL contains the cost result. */
6016 static bool
6017 c6x_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
6018 bool speed)
6020 int cost2 = COSTS_N_INSNS (1);
6021 rtx op0, op1;
6023 switch (code)
6025 case CONST_INT:
6026 if (outer_code == SET || outer_code == PLUS)
6027 *total = satisfies_constraint_IsB (x) ? 0 : cost2;
6028 else if (outer_code == AND || outer_code == IOR || outer_code == XOR
6029 || outer_code == MINUS)
6030 *total = satisfies_constraint_Is5 (x) ? 0 : cost2;
6031 else if (GET_RTX_CLASS (outer_code) == RTX_COMPARE
6032 || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE)
6033 *total = satisfies_constraint_Iu4 (x) ? 0 : cost2;
6034 else if (outer_code == ASHIFT || outer_code == ASHIFTRT
6035 || outer_code == LSHIFTRT)
6036 *total = satisfies_constraint_Iu5 (x) ? 0 : cost2;
6037 else
6038 *total = cost2;
6039 return true;
6041 case CONST:
6042 case LABEL_REF:
6043 case SYMBOL_REF:
6044 case CONST_DOUBLE:
6045 *total = COSTS_N_INSNS (2);
6046 return true;
6048 case TRUNCATE:
6049 /* Recognize a mult_highpart operation. */
6050 if ((GET_MODE (x) == HImode || GET_MODE (x) == SImode)
6051 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
6052 && GET_MODE (XEXP (x, 0)) == GET_MODE_2XWIDER_MODE (GET_MODE (x))
6053 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6054 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6055 && INTVAL (XEXP (XEXP (x, 0), 1)) == GET_MODE_BITSIZE (GET_MODE (x)))
6057 rtx mul = XEXP (XEXP (x, 0), 0);
6058 rtx op0 = XEXP (mul, 0);
6059 rtx op1 = XEXP (mul, 1);
6060 enum rtx_code code0 = GET_CODE (op0);
6061 enum rtx_code code1 = GET_CODE (op1);
6063 if ((code0 == code1
6064 && (code0 == SIGN_EXTEND || code0 == ZERO_EXTEND))
6065 || (GET_MODE (x) == HImode
6066 && code0 == ZERO_EXTEND && code1 == SIGN_EXTEND))
6068 if (GET_MODE (x) == HImode)
6069 *total = COSTS_N_INSNS (2);
6070 else
6071 *total = COSTS_N_INSNS (12);
6072 *total += rtx_cost (XEXP (op0, 0), code0, 0, speed);
6073 *total += rtx_cost (XEXP (op1, 0), code1, 0, speed);
6074 return true;
6077 return false;
6079 case ASHIFT:
6080 case ASHIFTRT:
6081 case LSHIFTRT:
6082 if (GET_MODE (x) == DImode)
6083 *total = COSTS_N_INSNS (CONSTANT_P (XEXP (x, 1)) ? 4 : 15);
6084 else
6085 *total = COSTS_N_INSNS (1);
6086 return false;
6088 case PLUS:
6089 case MINUS:
6090 *total = COSTS_N_INSNS (1);
6091 op0 = code == PLUS ? XEXP (x, 0) : XEXP (x, 1);
6092 op1 = code == PLUS ? XEXP (x, 1) : XEXP (x, 0);
6093 if (GET_MODE_SIZE (GET_MODE (x)) <= UNITS_PER_WORD
6094 && INTEGRAL_MODE_P (GET_MODE (x))
6095 && GET_CODE (op0) == MULT
6096 && GET_CODE (XEXP (op0, 1)) == CONST_INT
6097 && (INTVAL (XEXP (op0, 1)) == 2
6098 || INTVAL (XEXP (op0, 1)) == 4
6099 || (code == PLUS && INTVAL (XEXP (op0, 1)) == 8)))
6101 *total += rtx_cost (XEXP (op0, 0), ASHIFT, 0, speed);
6102 *total += rtx_cost (op1, (enum rtx_code) code, 1, speed);
6103 return true;
6105 return false;
6107 case MULT:
6108 op0 = XEXP (x, 0);
6109 op1 = XEXP (x, 1);
6110 if (GET_MODE (x) == DFmode)
6112 if (TARGET_FP)
6113 *total = COSTS_N_INSNS (speed ? 10 : 1);
6114 else
6115 *total = COSTS_N_INSNS (speed ? 200 : 4);
6117 else if (GET_MODE (x) == SFmode)
6119 if (TARGET_FP)
6120 *total = COSTS_N_INSNS (speed ? 4 : 1);
6121 else
6122 *total = COSTS_N_INSNS (speed ? 100 : 4);
6124 else if (GET_MODE (x) == DImode)
6126 if (TARGET_MPY32
6127 && GET_CODE (op0) == GET_CODE (op1)
6128 && (GET_CODE (op0) == ZERO_EXTEND
6129 || GET_CODE (op0) == SIGN_EXTEND))
6131 *total = COSTS_N_INSNS (speed ? 2 : 1);
6132 op0 = XEXP (op0, 0);
6133 op1 = XEXP (op1, 0);
6135 else
6136 /* Maybe improve this laster. */
6137 *total = COSTS_N_INSNS (20);
6139 else if (GET_MODE (x) == SImode)
6141 if (((GET_CODE (op0) == ZERO_EXTEND
6142 || GET_CODE (op0) == SIGN_EXTEND
6143 || shift_p (op0, LSHIFTRT, 16))
6144 && (GET_CODE (op1) == SIGN_EXTEND
6145 || GET_CODE (op1) == ZERO_EXTEND
6146 || scst5_operand (op1, SImode)
6147 || shift_p (op1, ASHIFTRT, 16)
6148 || shift_p (op1, LSHIFTRT, 16)))
6149 || (shift_p (op0, ASHIFTRT, 16)
6150 && (GET_CODE (op1) == SIGN_EXTEND
6151 || shift_p (op1, ASHIFTRT, 16))))
6153 *total = COSTS_N_INSNS (speed ? 2 : 1);
6154 op0 = XEXP (op0, 0);
6155 if (scst5_operand (op1, SImode))
6156 op1 = NULL_RTX;
6157 else
6158 op1 = XEXP (op1, 0);
6160 else if (!speed)
6161 *total = COSTS_N_INSNS (1);
6162 else if (TARGET_MPY32)
6163 *total = COSTS_N_INSNS (4);
6164 else
6165 *total = COSTS_N_INSNS (6);
6167 else if (GET_MODE (x) == HImode)
6168 *total = COSTS_N_INSNS (speed ? 2 : 1);
6170 if (GET_CODE (op0) != REG
6171 && (GET_CODE (op0) != SUBREG || GET_CODE (SUBREG_REG (op0)) != REG))
6172 *total += rtx_cost (op0, MULT, 0, speed);
6173 if (op1 && GET_CODE (op1) != REG
6174 && (GET_CODE (op1) != SUBREG || GET_CODE (SUBREG_REG (op1)) != REG))
6175 *total += rtx_cost (op1, MULT, 1, speed);
6176 return true;
6178 case UDIV:
6179 case DIV:
6180 /* This is a bit random; assuming on average there'll be 16 leading
6181 zeros. FIXME: estimate better for constant dividends. */
6182 *total = COSTS_N_INSNS (6 + 3 * 16);
6183 return false;
6185 case IF_THEN_ELSE:
6186 /* Recognize the cmp_and/ior patterns. */
6187 op0 = XEXP (x, 0);
6188 if ((GET_CODE (op0) == EQ || GET_CODE (op0) == NE)
6189 && REG_P (XEXP (op0, 0))
6190 && XEXP (op0, 1) == const0_rtx
6191 && rtx_equal_p (XEXP (x, 1), XEXP (op0, 0)))
6193 *total = rtx_cost (XEXP (x, 1), (enum rtx_code) outer_code,
6194 opno, speed);
6195 return false;
6197 return false;
6199 default:
6200 return false;
6204 /* Implements target hook vector_mode_supported_p. */
6206 static bool
6207 c6x_vector_mode_supported_p (enum machine_mode mode)
6209 switch (mode)
6211 case V2HImode:
6212 case V4QImode:
6213 case V2SImode:
6214 case V4HImode:
6215 case V8QImode:
6216 return true;
6217 default:
6218 return false;
6222 /* Implements TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
6223 static enum machine_mode
6224 c6x_preferred_simd_mode (enum machine_mode mode)
6226 switch (mode)
6228 case HImode:
6229 return V2HImode;
6230 case QImode:
6231 return V4QImode;
6233 default:
6234 return word_mode;
6238 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
6240 static bool
6241 c6x_scalar_mode_supported_p (enum machine_mode mode)
6243 if (ALL_FIXED_POINT_MODE_P (mode)
6244 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
6245 return true;
6247 return default_scalar_mode_supported_p (mode);
6250 /* Output a reference from a function exception table to the type_info
6251 object X. Output these via a special assembly directive. */
6253 static bool
6254 c6x_output_ttype (rtx x)
6256 /* Use special relocations for symbol references. */
6257 if (GET_CODE (x) != CONST_INT)
6258 fputs ("\t.ehtype\t", asm_out_file);
6259 else
6260 fputs ("\t.word\t", asm_out_file);
6261 output_addr_const (asm_out_file, x);
6262 fputc ('\n', asm_out_file);
6264 return TRUE;
6267 /* Modify the return address of the current function. */
6269 void
6270 c6x_set_return_address (rtx source, rtx scratch)
6272 struct c6x_frame frame;
6273 rtx addr;
6274 HOST_WIDE_INT offset;
6276 c6x_compute_frame_layout (&frame);
6277 if (! c6x_save_reg (RETURN_ADDR_REGNO))
6278 emit_move_insn (gen_rtx_REG (Pmode, RETURN_ADDR_REGNO), source);
6279 else
6282 if (frame_pointer_needed)
6284 addr = hard_frame_pointer_rtx;
6285 offset = frame.b3_offset;
6287 else
6289 addr = stack_pointer_rtx;
6290 offset = frame.to_allocate - frame.b3_offset;
6293 /* TODO: Use base+offset loads where possible. */
6294 if (offset)
6296 HOST_WIDE_INT low = trunc_int_for_mode (offset, HImode);
6298 emit_insn (gen_movsi_high (scratch, GEN_INT (low)));
6299 if (low != offset)
6300 emit_insn (gen_movsi_lo_sum (scratch, scratch, GEN_INT(offset)));
6301 emit_insn (gen_addsi3 (scratch, addr, scratch));
6302 addr = scratch;
6305 emit_move_insn (gen_frame_mem (Pmode, addr), source);
6309 /* We save pairs of registers using a DImode store. Describe the component
6310 registers for DWARF generation code. */
6312 static rtx
6313 c6x_dwarf_register_span (rtx rtl)
6315 unsigned regno;
6316 unsigned real_regno;
6317 int nregs;
6318 int i;
6319 rtx p;
6321 regno = REGNO (rtl);
6322 nregs = HARD_REGNO_NREGS (regno, GET_MODE (rtl));
6323 if (nregs == 1)
6324 return NULL_RTX;
6326 p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc(nregs));
6327 for (i = 0; i < nregs; i++)
6329 if (TARGET_BIG_ENDIAN)
6330 real_regno = regno + nregs - (i + 1);
6331 else
6332 real_regno = regno + i;
6334 XVECEXP (p, 0, i) = gen_rtx_REG (SImode, real_regno);
6337 return p;
6340 /* Codes for all the C6X builtins. */
6341 enum c6x_builtins
6343 C6X_BUILTIN_SADD,
6344 C6X_BUILTIN_SSUB,
6345 C6X_BUILTIN_ADD2,
6346 C6X_BUILTIN_SUB2,
6347 C6X_BUILTIN_ADD4,
6348 C6X_BUILTIN_SUB4,
6349 C6X_BUILTIN_SADD2,
6350 C6X_BUILTIN_SSUB2,
6351 C6X_BUILTIN_SADDU4,
6353 C6X_BUILTIN_SMPY,
6354 C6X_BUILTIN_SMPYH,
6355 C6X_BUILTIN_SMPYHL,
6356 C6X_BUILTIN_SMPYLH,
6357 C6X_BUILTIN_MPY2,
6358 C6X_BUILTIN_SMPY2,
6360 C6X_BUILTIN_CLRR,
6361 C6X_BUILTIN_EXTR,
6362 C6X_BUILTIN_EXTRU,
6364 C6X_BUILTIN_SSHL,
6365 C6X_BUILTIN_SUBC,
6366 C6X_BUILTIN_ABS,
6367 C6X_BUILTIN_ABS2,
6368 C6X_BUILTIN_AVG2,
6369 C6X_BUILTIN_AVGU4,
6371 C6X_BUILTIN_MAX
6375 static GTY(()) tree c6x_builtin_decls[C6X_BUILTIN_MAX];
6377 /* Return the C6X builtin for CODE. */
6378 static tree
6379 c6x_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6381 if (code >= C6X_BUILTIN_MAX)
6382 return error_mark_node;
6384 return c6x_builtin_decls[code];
6387 #define def_builtin(NAME, TYPE, CODE) \
6388 do { \
6389 tree bdecl; \
6390 bdecl = add_builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
6391 NULL, NULL_TREE); \
6392 c6x_builtin_decls[CODE] = bdecl; \
6393 } while (0)
6395 /* Set up all builtin functions for this target. */
6396 static void
6397 c6x_init_builtins (void)
6399 tree V4QI_type_node = build_vector_type (unsigned_intQI_type_node, 4);
6400 tree V2HI_type_node = build_vector_type (intHI_type_node, 2);
6401 tree V2SI_type_node = build_vector_type (intSI_type_node, 2);
6402 tree int_ftype_int
6403 = build_function_type_list (integer_type_node, integer_type_node,
6404 NULL_TREE);
6405 tree int_ftype_int_int
6406 = build_function_type_list (integer_type_node, integer_type_node,
6407 integer_type_node, NULL_TREE);
6408 tree v2hi_ftype_v2hi
6409 = build_function_type_list (V2HI_type_node, V2HI_type_node, NULL_TREE);
6410 tree v4qi_ftype_v4qi_v4qi
6411 = build_function_type_list (V4QI_type_node, V4QI_type_node,
6412 V4QI_type_node, NULL_TREE);
6413 tree v2hi_ftype_v2hi_v2hi
6414 = build_function_type_list (V2HI_type_node, V2HI_type_node,
6415 V2HI_type_node, NULL_TREE);
6416 tree v2si_ftype_v2hi_v2hi
6417 = build_function_type_list (V2SI_type_node, V2HI_type_node,
6418 V2HI_type_node, NULL_TREE);
6420 def_builtin ("__builtin_c6x_sadd", int_ftype_int_int,
6421 C6X_BUILTIN_SADD);
6422 def_builtin ("__builtin_c6x_ssub", int_ftype_int_int,
6423 C6X_BUILTIN_SSUB);
6424 def_builtin ("__builtin_c6x_add2", v2hi_ftype_v2hi_v2hi,
6425 C6X_BUILTIN_ADD2);
6426 def_builtin ("__builtin_c6x_sub2", v2hi_ftype_v2hi_v2hi,
6427 C6X_BUILTIN_SUB2);
6428 def_builtin ("__builtin_c6x_add4", v4qi_ftype_v4qi_v4qi,
6429 C6X_BUILTIN_ADD4);
6430 def_builtin ("__builtin_c6x_sub4", v4qi_ftype_v4qi_v4qi,
6431 C6X_BUILTIN_SUB4);
6432 def_builtin ("__builtin_c6x_mpy2", v2si_ftype_v2hi_v2hi,
6433 C6X_BUILTIN_MPY2);
6434 def_builtin ("__builtin_c6x_sadd2", v2hi_ftype_v2hi_v2hi,
6435 C6X_BUILTIN_SADD2);
6436 def_builtin ("__builtin_c6x_ssub2", v2hi_ftype_v2hi_v2hi,
6437 C6X_BUILTIN_SSUB2);
6438 def_builtin ("__builtin_c6x_saddu4", v4qi_ftype_v4qi_v4qi,
6439 C6X_BUILTIN_SADDU4);
6440 def_builtin ("__builtin_c6x_smpy2", v2si_ftype_v2hi_v2hi,
6441 C6X_BUILTIN_SMPY2);
6443 def_builtin ("__builtin_c6x_smpy", int_ftype_int_int,
6444 C6X_BUILTIN_SMPY);
6445 def_builtin ("__builtin_c6x_smpyh", int_ftype_int_int,
6446 C6X_BUILTIN_SMPYH);
6447 def_builtin ("__builtin_c6x_smpyhl", int_ftype_int_int,
6448 C6X_BUILTIN_SMPYHL);
6449 def_builtin ("__builtin_c6x_smpylh", int_ftype_int_int,
6450 C6X_BUILTIN_SMPYLH);
6452 def_builtin ("__builtin_c6x_sshl", int_ftype_int_int,
6453 C6X_BUILTIN_SSHL);
6454 def_builtin ("__builtin_c6x_subc", int_ftype_int_int,
6455 C6X_BUILTIN_SUBC);
6457 def_builtin ("__builtin_c6x_avg2", v2hi_ftype_v2hi_v2hi,
6458 C6X_BUILTIN_AVG2);
6459 def_builtin ("__builtin_c6x_avgu4", v4qi_ftype_v4qi_v4qi,
6460 C6X_BUILTIN_AVGU4);
6462 def_builtin ("__builtin_c6x_clrr", int_ftype_int_int,
6463 C6X_BUILTIN_CLRR);
6464 def_builtin ("__builtin_c6x_extr", int_ftype_int_int,
6465 C6X_BUILTIN_EXTR);
6466 def_builtin ("__builtin_c6x_extru", int_ftype_int_int,
6467 C6X_BUILTIN_EXTRU);
6469 def_builtin ("__builtin_c6x_abs", int_ftype_int, C6X_BUILTIN_ABS);
6470 def_builtin ("__builtin_c6x_abs2", v2hi_ftype_v2hi, C6X_BUILTIN_ABS2);
6474 struct builtin_description
6476 const enum insn_code icode;
6477 const char *const name;
6478 const enum c6x_builtins code;
6481 static const struct builtin_description bdesc_2arg[] =
6483 { CODE_FOR_saddsi3, "__builtin_c6x_sadd", C6X_BUILTIN_SADD },
6484 { CODE_FOR_ssubsi3, "__builtin_c6x_ssub", C6X_BUILTIN_SSUB },
6485 { CODE_FOR_addv2hi3, "__builtin_c6x_add2", C6X_BUILTIN_ADD2 },
6486 { CODE_FOR_subv2hi3, "__builtin_c6x_sub2", C6X_BUILTIN_SUB2 },
6487 { CODE_FOR_addv4qi3, "__builtin_c6x_add4", C6X_BUILTIN_ADD4 },
6488 { CODE_FOR_subv4qi3, "__builtin_c6x_sub4", C6X_BUILTIN_SUB4 },
6489 { CODE_FOR_ss_addv2hi3, "__builtin_c6x_sadd2", C6X_BUILTIN_SADD2 },
6490 { CODE_FOR_ss_subv2hi3, "__builtin_c6x_ssub2", C6X_BUILTIN_SSUB2 },
6491 { CODE_FOR_us_addv4qi3, "__builtin_c6x_saddu4", C6X_BUILTIN_SADDU4 },
6493 { CODE_FOR_subcsi3, "__builtin_c6x_subc", C6X_BUILTIN_SUBC },
6494 { CODE_FOR_ss_ashlsi3, "__builtin_c6x_sshl", C6X_BUILTIN_SSHL },
6496 { CODE_FOR_avgv2hi3, "__builtin_c6x_avg2", C6X_BUILTIN_AVG2 },
6497 { CODE_FOR_uavgv4qi3, "__builtin_c6x_avgu4", C6X_BUILTIN_AVGU4 },
6499 { CODE_FOR_mulhqsq3, "__builtin_c6x_smpy", C6X_BUILTIN_SMPY },
6500 { CODE_FOR_mulhqsq3_hh, "__builtin_c6x_smpyh", C6X_BUILTIN_SMPYH },
6501 { CODE_FOR_mulhqsq3_lh, "__builtin_c6x_smpylh", C6X_BUILTIN_SMPYLH },
6502 { CODE_FOR_mulhqsq3_hl, "__builtin_c6x_smpyhl", C6X_BUILTIN_SMPYHL },
6504 { CODE_FOR_mulv2hqv2sq3, "__builtin_c6x_smpy2", C6X_BUILTIN_SMPY2 },
6506 { CODE_FOR_clrr, "__builtin_c6x_clrr", C6X_BUILTIN_CLRR },
6507 { CODE_FOR_extr, "__builtin_c6x_extr", C6X_BUILTIN_EXTR },
6508 { CODE_FOR_extru, "__builtin_c6x_extru", C6X_BUILTIN_EXTRU }
6511 static const struct builtin_description bdesc_1arg[] =
6513 { CODE_FOR_ssabssi2, "__builtin_c6x_abs", C6X_BUILTIN_ABS },
6514 { CODE_FOR_ssabsv2hi2, "__builtin_c6x_abs2", C6X_BUILTIN_ABS2 }
6517 /* Errors in the source file can cause expand_expr to return const0_rtx
6518 where we expect a vector. To avoid crashing, use one of the vector
6519 clear instructions. */
6520 static rtx
6521 safe_vector_operand (rtx x, enum machine_mode mode)
6523 if (x != const0_rtx)
6524 return x;
6525 x = gen_reg_rtx (SImode);
6527 emit_insn (gen_movsi (x, CONST0_RTX (SImode)));
6528 return gen_lowpart (mode, x);
6531 /* Subroutine of c6x_expand_builtin to take care of binop insns. MACFLAG is -1
6532 if this is a normal binary op, or one of the MACFLAG_xxx constants. */
6534 static rtx
6535 c6x_expand_binop_builtin (enum insn_code icode, tree exp, rtx target,
6536 bool match_op)
6538 int offs = match_op ? 1 : 0;
6539 rtx pat;
6540 tree arg0 = CALL_EXPR_ARG (exp, 0);
6541 tree arg1 = CALL_EXPR_ARG (exp, 1);
6542 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6543 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6544 enum machine_mode op0mode = GET_MODE (op0);
6545 enum machine_mode op1mode = GET_MODE (op1);
6546 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6547 enum machine_mode mode0 = insn_data[icode].operand[1 + offs].mode;
6548 enum machine_mode mode1 = insn_data[icode].operand[2 + offs].mode;
6549 rtx ret = target;
6551 if (VECTOR_MODE_P (mode0))
6552 op0 = safe_vector_operand (op0, mode0);
6553 if (VECTOR_MODE_P (mode1))
6554 op1 = safe_vector_operand (op1, mode1);
6556 if (! target
6557 || GET_MODE (target) != tmode
6558 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
6560 if (tmode == SQmode || tmode == V2SQmode)
6562 ret = gen_reg_rtx (tmode == SQmode ? SImode : V2SImode);
6563 target = gen_lowpart (tmode, ret);
6565 else
6566 target = gen_reg_rtx (tmode);
6569 if ((op0mode == V2HImode || op0mode == SImode || op0mode == VOIDmode)
6570 && (mode0 == V2HQmode || mode0 == HQmode || mode0 == SQmode))
6572 op0mode = mode0;
6573 op0 = gen_lowpart (mode0, op0);
6575 if ((op1mode == V2HImode || op1mode == SImode || op1mode == VOIDmode)
6576 && (mode1 == V2HQmode || mode1 == HQmode || mode1 == SQmode))
6578 op1mode = mode1;
6579 op1 = gen_lowpart (mode1, op1);
6581 /* In case the insn wants input operands in modes different from
6582 the result, abort. */
6583 gcc_assert ((op0mode == mode0 || op0mode == VOIDmode)
6584 && (op1mode == mode1 || op1mode == VOIDmode));
6586 if (! (*insn_data[icode].operand[1 + offs].predicate) (op0, mode0))
6587 op0 = copy_to_mode_reg (mode0, op0);
6588 if (! (*insn_data[icode].operand[2 + offs].predicate) (op1, mode1))
6589 op1 = copy_to_mode_reg (mode1, op1);
6591 if (match_op)
6592 pat = GEN_FCN (icode) (target, target, op0, op1);
6593 else
6594 pat = GEN_FCN (icode) (target, op0, op1);
6596 if (! pat)
6597 return 0;
6599 emit_insn (pat);
6601 return ret;
6604 /* Subroutine of c6x_expand_builtin to take care of unop insns. */
6606 static rtx
6607 c6x_expand_unop_builtin (enum insn_code icode, tree exp,
6608 rtx target)
6610 rtx pat;
6611 tree arg0 = CALL_EXPR_ARG (exp, 0);
6612 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6613 enum machine_mode op0mode = GET_MODE (op0);
6614 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6615 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
6617 if (! target
6618 || GET_MODE (target) != tmode
6619 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
6620 target = gen_reg_rtx (tmode);
6622 if (VECTOR_MODE_P (mode0))
6623 op0 = safe_vector_operand (op0, mode0);
6625 if (op0mode == SImode && mode0 == HImode)
6627 op0mode = HImode;
6628 op0 = gen_lowpart (HImode, op0);
6630 gcc_assert (op0mode == mode0 || op0mode == VOIDmode);
6632 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
6633 op0 = copy_to_mode_reg (mode0, op0);
6635 pat = GEN_FCN (icode) (target, op0);
6636 if (! pat)
6637 return 0;
6638 emit_insn (pat);
6639 return target;
6642 /* Expand an expression EXP that calls a built-in function,
6643 with result going to TARGET if that's convenient
6644 (and in mode MODE if that's convenient).
6645 SUBTARGET may be used as the target for computing one of EXP's operands.
6646 IGNORE is nonzero if the value is to be ignored. */
6648 static rtx
6649 c6x_expand_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
6650 rtx subtarget ATTRIBUTE_UNUSED,
6651 enum machine_mode mode ATTRIBUTE_UNUSED,
6652 int ignore ATTRIBUTE_UNUSED)
6654 size_t i;
6655 const struct builtin_description *d;
6656 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6657 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6659 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
6660 if (d->code == fcode)
6661 return c6x_expand_binop_builtin (d->icode, exp, target,
6662 fcode == C6X_BUILTIN_CLRR);
6664 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
6665 if (d->code == fcode)
6666 return c6x_expand_unop_builtin (d->icode, exp, target);
6668 gcc_unreachable ();
6671 /* Target unwind frame info is generated from dwarf CFI directives, so
6672 always output dwarf2 unwind info. */
6674 static enum unwind_info_type
6675 c6x_debug_unwind_info (void)
6677 if (flag_unwind_tables || flag_exceptions)
6678 return UI_DWARF2;
6680 return default_debug_unwind_info ();
6683 /* Target Structure. */
6685 /* Initialize the GCC target structure. */
6686 #undef TARGET_FUNCTION_ARG
6687 #define TARGET_FUNCTION_ARG c6x_function_arg
6688 #undef TARGET_FUNCTION_ARG_ADVANCE
6689 #define TARGET_FUNCTION_ARG_ADVANCE c6x_function_arg_advance
6690 #undef TARGET_FUNCTION_ARG_BOUNDARY
6691 #define TARGET_FUNCTION_ARG_BOUNDARY c6x_function_arg_boundary
6692 #undef TARGET_FUNCTION_ARG_ROUND_BOUNDARY
6693 #define TARGET_FUNCTION_ARG_ROUND_BOUNDARY \
6694 c6x_function_arg_round_boundary
6695 #undef TARGET_FUNCTION_VALUE_REGNO_P
6696 #define TARGET_FUNCTION_VALUE_REGNO_P c6x_function_value_regno_p
6697 #undef TARGET_FUNCTION_VALUE
6698 #define TARGET_FUNCTION_VALUE c6x_function_value
6699 #undef TARGET_LIBCALL_VALUE
6700 #define TARGET_LIBCALL_VALUE c6x_libcall_value
6701 #undef TARGET_RETURN_IN_MEMORY
6702 #define TARGET_RETURN_IN_MEMORY c6x_return_in_memory
6703 #undef TARGET_RETURN_IN_MSB
6704 #define TARGET_RETURN_IN_MSB c6x_return_in_msb
6705 #undef TARGET_PASS_BY_REFERENCE
6706 #define TARGET_PASS_BY_REFERENCE c6x_pass_by_reference
6707 #undef TARGET_CALLEE_COPIES
6708 #define TARGET_CALLEE_COPIES c6x_callee_copies
6709 #undef TARGET_STRUCT_VALUE_RTX
6710 #define TARGET_STRUCT_VALUE_RTX c6x_struct_value_rtx
6711 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
6712 #define TARGET_FUNCTION_OK_FOR_SIBCALL c6x_function_ok_for_sibcall
6714 #undef TARGET_ASM_OUTPUT_MI_THUNK
6715 #define TARGET_ASM_OUTPUT_MI_THUNK c6x_output_mi_thunk
6716 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
6717 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK c6x_can_output_mi_thunk
6719 #undef TARGET_BUILD_BUILTIN_VA_LIST
6720 #define TARGET_BUILD_BUILTIN_VA_LIST c6x_build_builtin_va_list
6722 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
6723 #define TARGET_ASM_TRAMPOLINE_TEMPLATE c6x_asm_trampoline_template
6724 #undef TARGET_TRAMPOLINE_INIT
6725 #define TARGET_TRAMPOLINE_INIT c6x_initialize_trampoline
6727 #undef TARGET_LEGITIMATE_CONSTANT_P
6728 #define TARGET_LEGITIMATE_CONSTANT_P c6x_legitimate_constant_p
6729 #undef TARGET_LEGITIMATE_ADDRESS_P
6730 #define TARGET_LEGITIMATE_ADDRESS_P c6x_legitimate_address_p
6732 #undef TARGET_IN_SMALL_DATA_P
6733 #define TARGET_IN_SMALL_DATA_P c6x_in_small_data_p
6734 #undef TARGET_ASM_SELECT_RTX_SECTION
6735 #define TARGET_ASM_SELECT_RTX_SECTION c6x_select_rtx_section
6736 #undef TARGET_ASM_SELECT_SECTION
6737 #define TARGET_ASM_SELECT_SECTION c6x_elf_select_section
6738 #undef TARGET_ASM_UNIQUE_SECTION
6739 #define TARGET_ASM_UNIQUE_SECTION c6x_elf_unique_section
6740 #undef TARGET_SECTION_TYPE_FLAGS
6741 #define TARGET_SECTION_TYPE_FLAGS c6x_section_type_flags
6742 #undef TARGET_HAVE_SRODATA_SECTION
6743 #define TARGET_HAVE_SRODATA_SECTION true
6744 #undef TARGET_ASM_MERGEABLE_RODATA_PREFIX
6745 #define TARGET_ASM_MERGEABLE_RODATA_PREFIX ".const"
6747 #undef TARGET_OPTION_OVERRIDE
6748 #define TARGET_OPTION_OVERRIDE c6x_option_override
6749 #undef TARGET_CONDITIONAL_REGISTER_USAGE
6750 #define TARGET_CONDITIONAL_REGISTER_USAGE c6x_conditional_register_usage
6752 #undef TARGET_INIT_LIBFUNCS
6753 #define TARGET_INIT_LIBFUNCS c6x_init_libfuncs
6754 #undef TARGET_LIBFUNC_GNU_PREFIX
6755 #define TARGET_LIBFUNC_GNU_PREFIX true
6757 #undef TARGET_SCALAR_MODE_SUPPORTED_P
6758 #define TARGET_SCALAR_MODE_SUPPORTED_P c6x_scalar_mode_supported_p
6759 #undef TARGET_VECTOR_MODE_SUPPORTED_P
6760 #define TARGET_VECTOR_MODE_SUPPORTED_P c6x_vector_mode_supported_p
6761 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
6762 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE c6x_preferred_simd_mode
6764 #undef TARGET_RTX_COSTS
6765 #define TARGET_RTX_COSTS c6x_rtx_costs
6767 #undef TARGET_SCHED_INIT
6768 #define TARGET_SCHED_INIT c6x_sched_init
6769 #undef TARGET_SCHED_SET_SCHED_FLAGS
6770 #define TARGET_SCHED_SET_SCHED_FLAGS c6x_set_sched_flags
6771 #undef TARGET_SCHED_ADJUST_COST
6772 #define TARGET_SCHED_ADJUST_COST c6x_adjust_cost
6773 #undef TARGET_SCHED_ISSUE_RATE
6774 #define TARGET_SCHED_ISSUE_RATE c6x_issue_rate
6775 #undef TARGET_SCHED_VARIABLE_ISSUE
6776 #define TARGET_SCHED_VARIABLE_ISSUE c6x_variable_issue
6777 #undef TARGET_SCHED_REORDER
6778 #define TARGET_SCHED_REORDER c6x_sched_reorder
6779 #undef TARGET_SCHED_REORDER2
6780 #define TARGET_SCHED_REORDER2 c6x_sched_reorder2
6781 #undef TARGET_SCHED_DFA_NEW_CYCLE
6782 #define TARGET_SCHED_DFA_NEW_CYCLE c6x_dfa_new_cycle
6783 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
6784 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN c6x_sched_dfa_pre_cycle_insn
6785 #undef TARGET_SCHED_EXPOSED_PIPELINE
6786 #define TARGET_SCHED_EXPOSED_PIPELINE true
6788 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
6789 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT c6x_alloc_sched_context
6790 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
6791 #define TARGET_SCHED_INIT_SCHED_CONTEXT c6x_init_sched_context
6792 #undef TARGET_SCHED_SET_SCHED_CONTEXT
6793 #define TARGET_SCHED_SET_SCHED_CONTEXT c6x_set_sched_context
6794 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
6795 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT c6x_clear_sched_context
6796 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
6797 #define TARGET_SCHED_FREE_SCHED_CONTEXT c6x_free_sched_context
6799 #undef TARGET_CAN_ELIMINATE
6800 #define TARGET_CAN_ELIMINATE c6x_can_eliminate
6802 #undef TARGET_PREFERRED_RENAME_CLASS
6803 #define TARGET_PREFERRED_RENAME_CLASS c6x_preferred_rename_class
6805 #undef TARGET_MACHINE_DEPENDENT_REORG
6806 #define TARGET_MACHINE_DEPENDENT_REORG c6x_reorg
6808 #undef TARGET_ASM_FILE_START
6809 #define TARGET_ASM_FILE_START c6x_file_start
6811 #undef TARGET_PRINT_OPERAND
6812 #define TARGET_PRINT_OPERAND c6x_print_operand
6813 #undef TARGET_PRINT_OPERAND_ADDRESS
6814 #define TARGET_PRINT_OPERAND_ADDRESS c6x_print_operand_address
6815 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
6816 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P c6x_print_operand_punct_valid_p
6818 /* C6x unwinding tables use a different format for the typeinfo tables. */
6819 #undef TARGET_ASM_TTYPE
6820 #define TARGET_ASM_TTYPE c6x_output_ttype
6822 /* The C6x ABI follows the ARM EABI exception handling rules. */
6823 #undef TARGET_ARM_EABI_UNWINDER
6824 #define TARGET_ARM_EABI_UNWINDER true
6826 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
6827 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY c6x_asm_emit_except_personality
6829 #undef TARGET_ASM_INIT_SECTIONS
6830 #define TARGET_ASM_INIT_SECTIONS c6x_asm_init_sections
6832 #undef TARGET_DEBUG_UNWIND_INFO
6833 #define TARGET_DEBUG_UNWIND_INFO c6x_debug_unwind_info
6835 #undef TARGET_DWARF_REGISTER_SPAN
6836 #define TARGET_DWARF_REGISTER_SPAN c6x_dwarf_register_span
6838 #undef TARGET_INIT_BUILTINS
6839 #define TARGET_INIT_BUILTINS c6x_init_builtins
6840 #undef TARGET_EXPAND_BUILTIN
6841 #define TARGET_EXPAND_BUILTIN c6x_expand_builtin
6842 #undef TARGET_BUILTIN_DECL
6843 #define TARGET_BUILTIN_DECL c6x_builtin_decl
6845 struct gcc_target targetm = TARGET_INITIALIZER;
6847 #include "gt-c6x.h"