Turn HARD_REGNO_MODE_OK into a target hook
[official-gcc.git] / gcc / config / c6x / c6x.c
blobaf0dae2d0fa6ab6b9d813a035c75df04635bd287
1 /* Target Code for TI C6X
2 Copyright (C) 2010-2017 Free Software Foundation, Inc.
3 Contributed by Andrew Jenner <andrew@codesourcery.com>
4 Contributed by Bernd Schmidt <bernds@codesourcery.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple-expr.h"
30 #include "cfghooks.h"
31 #include "df.h"
32 #include "memmodel.h"
33 #include "tm_p.h"
34 #include "stringpool.h"
35 #include "attribs.h"
36 #include "optabs.h"
37 #include "regs.h"
38 #include "emit-rtl.h"
39 #include "recog.h"
40 #include "cgraph.h"
41 #include "diagnostic-core.h"
42 #include "stor-layout.h"
43 #include "varasm.h"
44 #include "calls.h"
45 #include "output.h"
46 #include "insn-attr.h"
47 #include "explow.h"
48 #include "expr.h"
49 #include "cfgrtl.h"
50 #include "sched-int.h"
51 #include "tm-constrs.h"
52 #include "langhooks.h"
53 #include "sel-sched.h"
54 #include "debug.h"
55 #include "hw-doloop.h"
56 #include "regrename.h"
57 #include "dumpfile.h"
58 #include "builtins.h"
60 /* This file should be included last. */
61 #include "target-def.h"
63 /* Table of supported architecture variants. */
64 typedef struct
66 const char *arch;
67 enum c6x_cpu_type type;
68 unsigned short features;
69 } c6x_arch_table;
71 /* A list of all ISAs, mapping each one to a representative device.
72 Used for -march selection. */
73 static const c6x_arch_table all_isas[] =
75 #define C6X_ISA(NAME,DEVICE,FLAGS) \
76 { NAME, DEVICE, FLAGS },
77 #include "c6x-isas.def"
78 #undef C6X_ISA
79 { NULL, C6X_CPU_C62X, 0 }
82 /* This is the parsed result of the "-march=" option, if given. */
83 enum c6x_cpu_type c6x_arch = C6X_DEFAULT_ARCH;
85 /* A mask of insn types that are allowed by the architecture selected by
86 the -march option. */
87 unsigned long c6x_insn_mask = C6X_DEFAULT_INSN_MASK;
89 /* The instruction that is being output (as obtained from FINAL_PRESCAN_INSN).
91 static rtx_insn *c6x_current_insn = NULL;
93 /* A decl we build to access __c6xabi_DSBT_base. */
94 static GTY(()) tree dsbt_decl;
96 /* Determines whether we run our final scheduling pass or not. We always
97 avoid the normal second scheduling pass. */
98 static int c6x_flag_schedule_insns2;
100 /* Determines whether we run variable tracking in machine dependent
101 reorganization. */
102 static int c6x_flag_var_tracking;
104 /* Determines whether we use modulo scheduling. */
105 static int c6x_flag_modulo_sched;
107 /* Record the state of flag_pic before we set it to 1 for DSBT. */
108 int c6x_initial_flag_pic;
110 typedef struct
112 /* We record the clock cycle for every insn during scheduling. */
113 int clock;
114 /* After scheduling, we run assign_reservations to choose unit
115 reservations for all insns. These are recorded here. */
116 int reservation;
117 /* Records the new condition for insns which must be made
118 conditional after scheduling. An entry of NULL_RTX means no such
119 change is necessary. */
120 rtx new_cond;
121 /* True for the first insn that was scheduled in an ebb. */
122 bool ebb_start;
123 /* The scheduler state after the insn, transformed into a mask of UNIT_QID
124 bits rather than storing the state. Meaningful only for the last
125 insn in a cycle. */
126 unsigned int unit_mask;
127 } c6x_sched_insn_info;
130 /* Record a c6x_sched_insn_info structure for every insn in the function. */
131 static vec<c6x_sched_insn_info> insn_info;
133 #define INSN_INFO_LENGTH (insn_info).length ()
134 #define INSN_INFO_ENTRY(N) (insn_info[(N)])
136 static bool done_cfi_sections;
138 #define RESERVATION_FLAG_D 1
139 #define RESERVATION_FLAG_L 2
140 #define RESERVATION_FLAG_S 4
141 #define RESERVATION_FLAG_M 8
142 #define RESERVATION_FLAG_DL (RESERVATION_FLAG_D | RESERVATION_FLAG_L)
143 #define RESERVATION_FLAG_DS (RESERVATION_FLAG_D | RESERVATION_FLAG_S)
144 #define RESERVATION_FLAG_LS (RESERVATION_FLAG_L | RESERVATION_FLAG_S)
145 #define RESERVATION_FLAG_DLS (RESERVATION_FLAG_D | RESERVATION_FLAG_LS)
147 /* The DFA names of the units. */
148 static const char *const c6x_unit_names[] =
150 "d1", "l1", "s1", "m1", "fps1", "fpl1", "adddps1", "adddpl1",
151 "d2", "l2", "s2", "m2", "fps2", "fpl2", "adddps2", "adddpl2"
154 /* The DFA unit number for each unit in c6x_unit_names[]. */
155 static int c6x_unit_codes[ARRAY_SIZE (c6x_unit_names)];
157 /* Unit query IDs. */
158 #define UNIT_QID_D1 0
159 #define UNIT_QID_L1 1
160 #define UNIT_QID_S1 2
161 #define UNIT_QID_M1 3
162 #define UNIT_QID_FPS1 4
163 #define UNIT_QID_FPL1 5
164 #define UNIT_QID_ADDDPS1 6
165 #define UNIT_QID_ADDDPL1 7
166 #define UNIT_QID_SIDE_OFFSET 8
168 #define RESERVATION_S1 2
169 #define RESERVATION_S2 10
171 /* An enum for the unit requirements we count in the UNIT_REQS table. */
172 enum unitreqs
174 UNIT_REQ_D,
175 UNIT_REQ_L,
176 UNIT_REQ_S,
177 UNIT_REQ_M,
178 UNIT_REQ_DL,
179 UNIT_REQ_DS,
180 UNIT_REQ_LS,
181 UNIT_REQ_DLS,
182 UNIT_REQ_T,
183 UNIT_REQ_X,
184 UNIT_REQ_MAX
187 /* A table used to count unit requirements. Used when computing minimum
188 iteration intervals. */
189 typedef int unit_req_table[2][UNIT_REQ_MAX];
190 static unit_req_table unit_reqs;
192 /* Register map for debugging. */
193 unsigned const dbx_register_map[FIRST_PSEUDO_REGISTER] =
195 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* A0 - A15. */
196 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, /* A16 - A32. */
197 50, 51, 52,
198 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, /* B0 - B15. */
199 29, 30, 31,
200 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, /* B16 - B32. */
201 66, 67, 68,
202 -1, -1, -1 /* FP, ARGP, ILC. */
205 /* Allocate a new, cleared machine_function structure. */
207 static struct machine_function *
208 c6x_init_machine_status (void)
210 return ggc_cleared_alloc<machine_function> ();
213 /* Implement TARGET_OPTION_OVERRIDE. */
215 static void
216 c6x_option_override (void)
218 unsigned i;
220 if (global_options_set.x_c6x_arch_option)
222 c6x_arch = all_isas[c6x_arch_option].type;
223 c6x_insn_mask &= ~C6X_INSNS_ALL_CPU_BITS;
224 c6x_insn_mask |= all_isas[c6x_arch_option].features;
227 c6x_flag_schedule_insns2 = flag_schedule_insns_after_reload;
228 flag_schedule_insns_after_reload = 0;
230 c6x_flag_modulo_sched = flag_modulo_sched;
231 flag_modulo_sched = 0;
233 init_machine_status = c6x_init_machine_status;
235 for (i = 0; i < ARRAY_SIZE (c6x_unit_names); i++)
236 c6x_unit_codes[i] = get_cpu_unit_code (c6x_unit_names[i]);
238 if (flag_pic && !TARGET_DSBT)
240 error ("-fpic and -fPIC not supported without -mdsbt on this target");
241 flag_pic = 0;
243 c6x_initial_flag_pic = flag_pic;
244 if (TARGET_DSBT && !flag_pic)
245 flag_pic = 1;
249 /* Implement the TARGET_CONDITIONAL_REGISTER_USAGE hook. */
251 static void
252 c6x_conditional_register_usage (void)
254 int i;
255 if (c6x_arch == C6X_CPU_C62X || c6x_arch == C6X_CPU_C67X)
256 for (i = 16; i < 32; i++)
258 fixed_regs[i] = 1;
259 fixed_regs[32 + i] = 1;
261 if (TARGET_INSNS_64)
263 SET_HARD_REG_BIT (reg_class_contents[(int)PREDICATE_A_REGS],
264 REG_A0);
265 SET_HARD_REG_BIT (reg_class_contents[(int)PREDICATE_REGS],
266 REG_A0);
267 CLEAR_HARD_REG_BIT (reg_class_contents[(int)NONPREDICATE_A_REGS],
268 REG_A0);
269 CLEAR_HARD_REG_BIT (reg_class_contents[(int)NONPREDICATE_REGS],
270 REG_A0);
274 static GTY(()) rtx eqdf_libfunc;
275 static GTY(()) rtx nedf_libfunc;
276 static GTY(()) rtx ledf_libfunc;
277 static GTY(()) rtx ltdf_libfunc;
278 static GTY(()) rtx gedf_libfunc;
279 static GTY(()) rtx gtdf_libfunc;
280 static GTY(()) rtx eqsf_libfunc;
281 static GTY(()) rtx nesf_libfunc;
282 static GTY(()) rtx lesf_libfunc;
283 static GTY(()) rtx ltsf_libfunc;
284 static GTY(()) rtx gesf_libfunc;
285 static GTY(()) rtx gtsf_libfunc;
286 static GTY(()) rtx strasgi_libfunc;
287 static GTY(()) rtx strasgi64p_libfunc;
289 /* Implement the TARGET_INIT_LIBFUNCS macro. We use this to rename library
290 functions to match the C6x ABI. */
292 static void
293 c6x_init_libfuncs (void)
295 /* Double-precision floating-point arithmetic. */
296 set_optab_libfunc (add_optab, DFmode, "__c6xabi_addd");
297 set_optab_libfunc (sdiv_optab, DFmode, "__c6xabi_divd");
298 set_optab_libfunc (smul_optab, DFmode, "__c6xabi_mpyd");
299 set_optab_libfunc (neg_optab, DFmode, "__c6xabi_negd");
300 set_optab_libfunc (sub_optab, DFmode, "__c6xabi_subd");
302 /* Single-precision floating-point arithmetic. */
303 set_optab_libfunc (add_optab, SFmode, "__c6xabi_addf");
304 set_optab_libfunc (sdiv_optab, SFmode, "__c6xabi_divf");
305 set_optab_libfunc (smul_optab, SFmode, "__c6xabi_mpyf");
306 set_optab_libfunc (neg_optab, SFmode, "__c6xabi_negf");
307 set_optab_libfunc (sub_optab, SFmode, "__c6xabi_subf");
309 /* Floating-point comparisons. */
310 eqsf_libfunc = init_one_libfunc ("__c6xabi_eqf");
311 nesf_libfunc = init_one_libfunc ("__c6xabi_neqf");
312 lesf_libfunc = init_one_libfunc ("__c6xabi_lef");
313 ltsf_libfunc = init_one_libfunc ("__c6xabi_ltf");
314 gesf_libfunc = init_one_libfunc ("__c6xabi_gef");
315 gtsf_libfunc = init_one_libfunc ("__c6xabi_gtf");
316 eqdf_libfunc = init_one_libfunc ("__c6xabi_eqd");
317 nedf_libfunc = init_one_libfunc ("__c6xabi_neqd");
318 ledf_libfunc = init_one_libfunc ("__c6xabi_led");
319 ltdf_libfunc = init_one_libfunc ("__c6xabi_ltd");
320 gedf_libfunc = init_one_libfunc ("__c6xabi_ged");
321 gtdf_libfunc = init_one_libfunc ("__c6xabi_gtd");
323 set_optab_libfunc (eq_optab, SFmode, NULL);
324 set_optab_libfunc (ne_optab, SFmode, "__c6xabi_neqf");
325 set_optab_libfunc (gt_optab, SFmode, NULL);
326 set_optab_libfunc (ge_optab, SFmode, NULL);
327 set_optab_libfunc (lt_optab, SFmode, NULL);
328 set_optab_libfunc (le_optab, SFmode, NULL);
329 set_optab_libfunc (unord_optab, SFmode, "__c6xabi_unordf");
330 set_optab_libfunc (eq_optab, DFmode, NULL);
331 set_optab_libfunc (ne_optab, DFmode, "__c6xabi_neqd");
332 set_optab_libfunc (gt_optab, DFmode, NULL);
333 set_optab_libfunc (ge_optab, DFmode, NULL);
334 set_optab_libfunc (lt_optab, DFmode, NULL);
335 set_optab_libfunc (le_optab, DFmode, NULL);
336 set_optab_libfunc (unord_optab, DFmode, "__c6xabi_unordd");
338 /* Floating-point to integer conversions. */
339 set_conv_libfunc (sfix_optab, SImode, DFmode, "__c6xabi_fixdi");
340 set_conv_libfunc (ufix_optab, SImode, DFmode, "__c6xabi_fixdu");
341 set_conv_libfunc (sfix_optab, DImode, DFmode, "__c6xabi_fixdlli");
342 set_conv_libfunc (ufix_optab, DImode, DFmode, "__c6xabi_fixdull");
343 set_conv_libfunc (sfix_optab, SImode, SFmode, "__c6xabi_fixfi");
344 set_conv_libfunc (ufix_optab, SImode, SFmode, "__c6xabi_fixfu");
345 set_conv_libfunc (sfix_optab, DImode, SFmode, "__c6xabi_fixflli");
346 set_conv_libfunc (ufix_optab, DImode, SFmode, "__c6xabi_fixfull");
348 /* Conversions between floating types. */
349 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__c6xabi_cvtdf");
350 set_conv_libfunc (sext_optab, DFmode, SFmode, "__c6xabi_cvtfd");
352 /* Integer to floating-point conversions. */
353 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__c6xabi_fltid");
354 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__c6xabi_fltud");
355 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__c6xabi_fltllid");
356 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__c6xabi_fltulld");
357 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__c6xabi_fltif");
358 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__c6xabi_fltuf");
359 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__c6xabi_fltllif");
360 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__c6xabi_fltullf");
362 /* Long long. */
363 set_optab_libfunc (smul_optab, DImode, "__c6xabi_mpyll");
364 set_optab_libfunc (ashl_optab, DImode, "__c6xabi_llshl");
365 set_optab_libfunc (lshr_optab, DImode, "__c6xabi_llshru");
366 set_optab_libfunc (ashr_optab, DImode, "__c6xabi_llshr");
368 set_optab_libfunc (sdiv_optab, SImode, "__c6xabi_divi");
369 set_optab_libfunc (udiv_optab, SImode, "__c6xabi_divu");
370 set_optab_libfunc (smod_optab, SImode, "__c6xabi_remi");
371 set_optab_libfunc (umod_optab, SImode, "__c6xabi_remu");
372 set_optab_libfunc (sdivmod_optab, SImode, "__c6xabi_divremi");
373 set_optab_libfunc (udivmod_optab, SImode, "__c6xabi_divremu");
374 set_optab_libfunc (sdiv_optab, DImode, "__c6xabi_divlli");
375 set_optab_libfunc (udiv_optab, DImode, "__c6xabi_divull");
376 set_optab_libfunc (smod_optab, DImode, "__c6xabi_remlli");
377 set_optab_libfunc (umod_optab, DImode, "__c6xabi_remull");
378 set_optab_libfunc (udivmod_optab, DImode, "__c6xabi_divremull");
380 /* Block move. */
381 strasgi_libfunc = init_one_libfunc ("__c6xabi_strasgi");
382 strasgi64p_libfunc = init_one_libfunc ("__c6xabi_strasgi_64plus");
385 /* Begin the assembly file. */
387 static void
388 c6x_file_start (void)
390 /* Variable tracking should be run after all optimizations which change order
391 of insns. It also needs a valid CFG. This can't be done in
392 c6x_override_options, because flag_var_tracking is finalized after
393 that. */
394 c6x_flag_var_tracking = flag_var_tracking;
395 flag_var_tracking = 0;
397 done_cfi_sections = false;
398 default_file_start ();
400 /* Arrays are aligned to 8-byte boundaries. */
401 asm_fprintf (asm_out_file,
402 "\t.c6xabi_attribute Tag_ABI_array_object_alignment, 0\n");
403 asm_fprintf (asm_out_file,
404 "\t.c6xabi_attribute Tag_ABI_array_object_align_expected, 0\n");
406 /* Stack alignment is 8 bytes. */
407 asm_fprintf (asm_out_file,
408 "\t.c6xabi_attribute Tag_ABI_stack_align_needed, 0\n");
409 asm_fprintf (asm_out_file,
410 "\t.c6xabi_attribute Tag_ABI_stack_align_preserved, 0\n");
412 #if 0 /* FIXME: Reenable when TI's tools are fixed. */
413 /* ??? Ideally we'd check flag_short_wchar somehow. */
414 asm_fprintf (asm_out_file, "\t.c6xabi_attribute Tag_ABI_wchar_t, %d\n", 2);
415 #endif
417 /* We conform to version 1.0 of the ABI. */
418 asm_fprintf (asm_out_file,
419 "\t.c6xabi_attribute Tag_ABI_conformance, \"1.0\"\n");
423 /* The LTO frontend only enables exceptions when it sees a function that
424 uses it. This changes the return value of dwarf2out_do_frame, so we
425 have to check before every function. */
427 void
428 c6x_output_file_unwind (FILE * f)
430 if (done_cfi_sections)
431 return;
433 /* Output a .cfi_sections directive. */
434 if (dwarf2out_do_frame ())
436 if (flag_unwind_tables || flag_exceptions)
438 if (write_symbols == DWARF2_DEBUG
439 || write_symbols == VMS_AND_DWARF2_DEBUG)
440 asm_fprintf (f, "\t.cfi_sections .debug_frame, .c6xabi.exidx\n");
441 else
442 asm_fprintf (f, "\t.cfi_sections .c6xabi.exidx\n");
444 else
445 asm_fprintf (f, "\t.cfi_sections .debug_frame\n");
446 done_cfi_sections = true;
450 /* Output unwind directives at the end of a function. */
452 static void
453 c6x_output_fn_unwind (FILE * f)
455 /* Return immediately if we are not generating unwinding tables. */
456 if (! (flag_unwind_tables || flag_exceptions))
457 return;
459 /* If this function will never be unwound, then mark it as such. */
460 if (!(flag_unwind_tables || crtl->uses_eh_lsda)
461 && (TREE_NOTHROW (current_function_decl)
462 || crtl->all_throwers_are_sibcalls))
463 fputs("\t.cantunwind\n", f);
465 fputs ("\t.endp\n", f);
469 /* Stack and Calling. */
471 int argument_registers[10] =
473 REG_A4, REG_B4,
474 REG_A6, REG_B6,
475 REG_A8, REG_B8,
476 REG_A10, REG_B10,
477 REG_A12, REG_B12
480 /* Implements the macro INIT_CUMULATIVE_ARGS defined in c6x.h. */
482 void
483 c6x_init_cumulative_args (CUMULATIVE_ARGS *cum, const_tree fntype, rtx libname,
484 int n_named_args ATTRIBUTE_UNUSED)
486 cum->count = 0;
487 cum->nregs = 10;
488 if (!libname && fntype)
490 /* We need to find out the number of named arguments. Unfortunately,
491 for incoming arguments, N_NAMED_ARGS is set to -1. */
492 if (stdarg_p (fntype))
493 cum->nregs = type_num_arguments (fntype) - 1;
494 if (cum->nregs > 10)
495 cum->nregs = 10;
499 /* Implements the macro FUNCTION_ARG defined in c6x.h. */
501 static rtx
502 c6x_function_arg (cumulative_args_t cum_v, machine_mode mode,
503 const_tree type, bool named ATTRIBUTE_UNUSED)
505 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
506 if (cum->count >= cum->nregs)
507 return NULL_RTX;
508 if (type)
510 HOST_WIDE_INT size = int_size_in_bytes (type);
511 if (TARGET_BIG_ENDIAN && AGGREGATE_TYPE_P (type))
513 if (size > 4)
515 rtx reg1 = gen_rtx_REG (SImode, argument_registers[cum->count] + 1);
516 rtx reg2 = gen_rtx_REG (SImode, argument_registers[cum->count]);
517 rtvec vec = gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode, reg1, const0_rtx),
518 gen_rtx_EXPR_LIST (VOIDmode, reg2, GEN_INT (4)));
519 return gen_rtx_PARALLEL (mode, vec);
523 return gen_rtx_REG (mode, argument_registers[cum->count]);
526 static void
527 c6x_function_arg_advance (cumulative_args_t cum_v,
528 machine_mode mode ATTRIBUTE_UNUSED,
529 const_tree type ATTRIBUTE_UNUSED,
530 bool named ATTRIBUTE_UNUSED)
532 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
533 cum->count++;
537 /* Return true if BLOCK_REG_PADDING (MODE, TYPE, FIRST) should return
538 upward rather than downward. */
540 bool
541 c6x_block_reg_pad_upward (machine_mode mode ATTRIBUTE_UNUSED,
542 const_tree type, bool first)
544 HOST_WIDE_INT size;
546 if (!TARGET_BIG_ENDIAN)
547 return true;
548 if (!first)
549 return true;
550 if (!type)
551 return true;
552 size = int_size_in_bytes (type);
553 return size == 3;
556 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. */
558 static unsigned int
559 c6x_function_arg_boundary (machine_mode mode, const_tree type)
561 unsigned int boundary = type ? TYPE_ALIGN (type) : GET_MODE_BITSIZE (mode);
563 if (boundary > BITS_PER_WORD)
564 return 2 * BITS_PER_WORD;
566 if (mode == BLKmode)
568 HOST_WIDE_INT size = int_size_in_bytes (type);
569 if (size > 4)
570 return 2 * BITS_PER_WORD;
571 if (boundary < BITS_PER_WORD)
573 if (size >= 3)
574 return BITS_PER_WORD;
575 if (size >= 2)
576 return 2 * BITS_PER_UNIT;
579 return boundary;
582 /* Implement TARGET_FUNCTION_ARG_ROUND_BOUNDARY. */
583 static unsigned int
584 c6x_function_arg_round_boundary (machine_mode mode, const_tree type)
586 return c6x_function_arg_boundary (mode, type);
589 /* TARGET_FUNCTION_VALUE implementation. Returns an RTX representing the place
590 where function FUNC returns or receives a value of data type TYPE. */
592 static rtx
593 c6x_function_value (const_tree type, const_tree func ATTRIBUTE_UNUSED,
594 bool outgoing ATTRIBUTE_UNUSED)
596 /* Functions return values in register A4. When returning aggregates, we may
597 have to adjust for endianness. */
598 if (TARGET_BIG_ENDIAN && type && AGGREGATE_TYPE_P (type))
600 HOST_WIDE_INT size = int_size_in_bytes (type);
601 if (size > 4)
604 rtx reg1 = gen_rtx_REG (SImode, REG_A4 + 1);
605 rtx reg2 = gen_rtx_REG (SImode, REG_A4);
606 rtvec vec = gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode, reg1, const0_rtx),
607 gen_rtx_EXPR_LIST (VOIDmode, reg2, GEN_INT (4)));
608 return gen_rtx_PARALLEL (TYPE_MODE (type), vec);
611 return gen_rtx_REG (TYPE_MODE (type), REG_A4);
614 /* Implement TARGET_LIBCALL_VALUE. */
616 static rtx
617 c6x_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
619 return gen_rtx_REG (mode, REG_A4);
622 /* TARGET_STRUCT_VALUE_RTX implementation. */
624 static rtx
625 c6x_struct_value_rtx (tree type ATTRIBUTE_UNUSED, int incoming ATTRIBUTE_UNUSED)
627 return gen_rtx_REG (Pmode, REG_A3);
630 /* Implement TARGET_FUNCTION_VALUE_REGNO_P. */
632 static bool
633 c6x_function_value_regno_p (const unsigned int regno)
635 return regno == REG_A4;
638 /* Types larger than 64 bit, and variable sized types, are passed by
639 reference. The callee must copy them; see c6x_callee_copies. */
641 static bool
642 c6x_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
643 machine_mode mode, const_tree type,
644 bool named ATTRIBUTE_UNUSED)
646 int size = -1;
647 if (type)
648 size = int_size_in_bytes (type);
649 else if (mode != VOIDmode)
650 size = GET_MODE_SIZE (mode);
651 return size > 2 * UNITS_PER_WORD || size == -1;
654 /* Decide whether a type should be returned in memory (true)
655 or in a register (false). This is called by the macro
656 TARGET_RETURN_IN_MEMORY. */
658 static bool
659 c6x_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
661 int size = int_size_in_bytes (type);
662 return size > 2 * UNITS_PER_WORD || size == -1;
665 /* Values which must be returned in the most-significant end of the return
666 register. */
668 static bool
669 c6x_return_in_msb (const_tree valtype)
671 HOST_WIDE_INT size = int_size_in_bytes (valtype);
672 return TARGET_BIG_ENDIAN && AGGREGATE_TYPE_P (valtype) && size == 3;
675 /* Implement TARGET_CALLEE_COPIES. */
677 static bool
678 c6x_callee_copies (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
679 machine_mode mode ATTRIBUTE_UNUSED,
680 const_tree type ATTRIBUTE_UNUSED,
681 bool named ATTRIBUTE_UNUSED)
683 return true;
686 /* Return the type to use as __builtin_va_list. */
687 static tree
688 c6x_build_builtin_va_list (void)
690 return build_pointer_type (char_type_node);
693 static void
694 c6x_asm_trampoline_template (FILE *f)
696 fprintf (f, "\t.long\t0x0000002b\n"); /* mvkl .s2 fnlow,B0 */
697 fprintf (f, "\t.long\t0x01000028\n"); /* || mvkl .s1 sclow,A2 */
698 fprintf (f, "\t.long\t0x0000006b\n"); /* mvkh .s2 fnhigh,B0 */
699 fprintf (f, "\t.long\t0x01000068\n"); /* || mvkh .s1 schigh,A2 */
700 fprintf (f, "\t.long\t0x00000362\n"); /* b .s2 B0 */
701 fprintf (f, "\t.long\t0x00008000\n"); /* nop 5 */
702 fprintf (f, "\t.long\t0x00000000\n"); /* nop */
703 fprintf (f, "\t.long\t0x00000000\n"); /* nop */
706 /* Emit RTL insns to initialize the variable parts of a trampoline at
707 TRAMP. FNADDR is an RTX for the address of the function's pure
708 code. CXT is an RTX for the static chain value for the function. */
710 static void
711 c6x_initialize_trampoline (rtx tramp, tree fndecl, rtx cxt)
713 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
714 rtx t1 = copy_to_reg (fnaddr);
715 rtx t2 = copy_to_reg (cxt);
716 rtx mask = gen_reg_rtx (SImode);
717 int i;
719 emit_block_move (tramp, assemble_trampoline_template (),
720 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
722 emit_move_insn (mask, GEN_INT (0xffff << 7));
724 for (i = 0; i < 4; i++)
726 rtx mem = adjust_address (tramp, SImode, i * 4);
727 rtx t = (i & 1) ? t2 : t1;
728 rtx v1 = gen_reg_rtx (SImode);
729 rtx v2 = gen_reg_rtx (SImode);
730 emit_move_insn (v1, mem);
731 if (i < 2)
732 emit_insn (gen_ashlsi3 (v2, t, GEN_INT (7)));
733 else
734 emit_insn (gen_lshrsi3 (v2, t, GEN_INT (9)));
735 emit_insn (gen_andsi3 (v2, v2, mask));
736 emit_insn (gen_iorsi3 (v2, v2, v1));
737 emit_move_insn (mem, v2);
739 #ifdef CLEAR_INSN_CACHE
740 tramp = XEXP (tramp, 0);
741 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__gnu_clear_cache"),
742 LCT_NORMAL, VOIDmode, tramp, Pmode,
743 plus_constant (Pmode, tramp, TRAMPOLINE_SIZE), Pmode);
744 #endif
747 /* Determine whether c6x_output_mi_thunk can succeed. */
749 static bool
750 c6x_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
751 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
752 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
753 const_tree function ATTRIBUTE_UNUSED)
755 return !TARGET_LONG_CALLS;
758 /* Output the assembler code for a thunk function. THUNK is the
759 declaration for the thunk function itself, FUNCTION is the decl for
760 the target function. DELTA is an immediate constant offset to be
761 added to THIS. If VCALL_OFFSET is nonzero, the word at
762 *(*this + vcall_offset) should be added to THIS. */
764 static void
765 c6x_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
766 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
767 HOST_WIDE_INT vcall_offset, tree function)
769 rtx xops[5];
770 /* The this parameter is passed as the first argument. */
771 rtx this_rtx = gen_rtx_REG (Pmode, REG_A4);
773 c6x_current_insn = NULL;
775 xops[4] = XEXP (DECL_RTL (function), 0);
776 if (!vcall_offset)
778 output_asm_insn ("b .s2 \t%4", xops);
779 if (!delta)
780 output_asm_insn ("nop 5", xops);
783 /* Adjust the this parameter by a fixed constant. */
784 if (delta)
786 xops[0] = GEN_INT (delta);
787 xops[1] = this_rtx;
788 if (delta >= -16 && delta <= 15)
790 output_asm_insn ("add .s1 %0, %1, %1", xops);
791 if (!vcall_offset)
792 output_asm_insn ("nop 4", xops);
794 else if (delta >= 16 && delta < 32)
796 output_asm_insn ("add .d1 %0, %1, %1", xops);
797 if (!vcall_offset)
798 output_asm_insn ("nop 4", xops);
800 else if (delta >= -32768 && delta < 32768)
802 output_asm_insn ("mvk .s1 %0, A0", xops);
803 output_asm_insn ("add .d1 %1, A0, %1", xops);
804 if (!vcall_offset)
805 output_asm_insn ("nop 3", xops);
807 else
809 output_asm_insn ("mvkl .s1 %0, A0", xops);
810 output_asm_insn ("mvkh .s1 %0, A0", xops);
811 output_asm_insn ("add .d1 %1, A0, %1", xops);
812 if (!vcall_offset)
813 output_asm_insn ("nop 3", xops);
817 /* Adjust the this parameter by a value stored in the vtable. */
818 if (vcall_offset)
820 rtx a0tmp = gen_rtx_REG (Pmode, REG_A0);
821 rtx a3tmp = gen_rtx_REG (Pmode, REG_A3);
823 xops[1] = a3tmp;
824 xops[2] = a0tmp;
825 xops[3] = gen_rtx_MEM (Pmode, a0tmp);
826 output_asm_insn ("mv .s1 a4, %2", xops);
827 output_asm_insn ("ldw .d1t1 %3, %2", xops);
829 /* Adjust the this parameter. */
830 xops[0] = gen_rtx_MEM (Pmode, plus_constant (Pmode, a0tmp,
831 vcall_offset));
832 if (!memory_operand (xops[0], Pmode))
834 rtx tmp2 = gen_rtx_REG (Pmode, REG_A1);
835 xops[0] = GEN_INT (vcall_offset);
836 xops[1] = tmp2;
837 output_asm_insn ("mvkl .s1 %0, %1", xops);
838 output_asm_insn ("mvkh .s1 %0, %1", xops);
839 output_asm_insn ("nop 2", xops);
840 output_asm_insn ("add .d1 %2, %1, %2", xops);
841 xops[0] = gen_rtx_MEM (Pmode, a0tmp);
843 else
844 output_asm_insn ("nop 4", xops);
845 xops[2] = this_rtx;
846 output_asm_insn ("ldw .d1t1 %0, %1", xops);
847 output_asm_insn ("|| b .s2 \t%4", xops);
848 output_asm_insn ("nop 4", xops);
849 output_asm_insn ("add .d1 %2, %1, %2", xops);
853 /* Return true if EXP goes in small data/bss. */
855 static bool
856 c6x_in_small_data_p (const_tree exp)
858 /* We want to merge strings, so we never consider them small data. */
859 if (TREE_CODE (exp) == STRING_CST)
860 return false;
862 /* Functions are never small data. */
863 if (TREE_CODE (exp) == FUNCTION_DECL)
864 return false;
866 if (TREE_CODE (exp) == VAR_DECL && DECL_WEAK (exp))
867 return false;
869 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
871 const char *section = DECL_SECTION_NAME (exp);
873 if (strcmp (section, ".neardata") == 0
874 || strncmp (section, ".neardata.", 10) == 0
875 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
876 || strcmp (section, ".bss") == 0
877 || strncmp (section, ".bss.", 5) == 0
878 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0
879 || strcmp (section, ".rodata") == 0
880 || strncmp (section, ".rodata.", 8) == 0
881 || strncmp (section, ".gnu.linkonce.s2.", 17) == 0)
882 return true;
884 else
885 return PLACE_IN_SDATA_P (exp);
887 return false;
890 /* Return a section for X. The only special thing we do here is to
891 honor small data. We don't have a tree type, so we can't use the
892 PLACE_IN_SDATA_P macro we use everywhere else; we choose to place
893 everything sized 8 bytes or smaller into small data. */
895 static section *
896 c6x_select_rtx_section (machine_mode mode, rtx x,
897 unsigned HOST_WIDE_INT align)
899 if (c6x_sdata_mode == C6X_SDATA_ALL
900 || (c6x_sdata_mode != C6X_SDATA_NONE && GET_MODE_SIZE (mode) <= 8))
901 /* ??? Consider using mergeable sdata sections. */
902 return sdata_section;
903 else
904 return default_elf_select_rtx_section (mode, x, align);
907 static section *
908 c6x_elf_select_section (tree decl, int reloc,
909 unsigned HOST_WIDE_INT align)
911 const char *sname = NULL;
912 unsigned int flags = SECTION_WRITE;
913 if (c6x_in_small_data_p (decl))
915 switch (categorize_decl_for_section (decl, reloc))
917 case SECCAT_SRODATA:
918 sname = ".rodata";
919 flags = 0;
920 break;
921 case SECCAT_SDATA:
922 sname = ".neardata";
923 break;
924 case SECCAT_SBSS:
925 sname = ".bss";
926 flags |= SECTION_BSS;
927 default:
928 break;
931 else
933 switch (categorize_decl_for_section (decl, reloc))
935 case SECCAT_DATA:
936 sname = ".fardata";
937 break;
938 case SECCAT_DATA_REL:
939 sname = ".fardata.rel";
940 break;
941 case SECCAT_DATA_REL_LOCAL:
942 sname = ".fardata.rel.local";
943 break;
944 case SECCAT_DATA_REL_RO:
945 sname = ".fardata.rel.ro";
946 break;
947 case SECCAT_DATA_REL_RO_LOCAL:
948 sname = ".fardata.rel.ro.local";
949 break;
950 case SECCAT_BSS:
951 sname = ".far";
952 flags |= SECTION_BSS;
953 break;
954 case SECCAT_RODATA:
955 sname = ".const";
956 flags = 0;
957 break;
958 case SECCAT_SRODATA:
959 case SECCAT_SDATA:
960 case SECCAT_SBSS:
961 gcc_unreachable ();
962 default:
963 break;
966 if (sname)
968 /* We might get called with string constants, but get_named_section
969 doesn't like them as they are not DECLs. Also, we need to set
970 flags in that case. */
971 if (!DECL_P (decl))
972 return get_section (sname, flags, NULL);
973 return get_named_section (decl, sname, reloc);
976 return default_elf_select_section (decl, reloc, align);
979 /* Build up a unique section name, expressed as a
980 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
981 RELOC indicates whether the initial value of EXP requires
982 link-time relocations. */
984 static void ATTRIBUTE_UNUSED
985 c6x_elf_unique_section (tree decl, int reloc)
987 const char *prefix = NULL;
988 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
989 bool one_only = DECL_COMDAT_GROUP (decl) && !HAVE_COMDAT_GROUP;
991 if (c6x_in_small_data_p (decl))
993 switch (categorize_decl_for_section (decl, reloc))
995 case SECCAT_SDATA:
996 prefix = one_only ? ".s" : ".neardata";
997 break;
998 case SECCAT_SBSS:
999 prefix = one_only ? ".sb" : ".bss";
1000 break;
1001 case SECCAT_SRODATA:
1002 prefix = one_only ? ".s2" : ".rodata";
1003 break;
1004 case SECCAT_RODATA_MERGE_STR:
1005 case SECCAT_RODATA_MERGE_STR_INIT:
1006 case SECCAT_RODATA_MERGE_CONST:
1007 case SECCAT_RODATA:
1008 case SECCAT_DATA:
1009 case SECCAT_DATA_REL:
1010 case SECCAT_DATA_REL_LOCAL:
1011 case SECCAT_DATA_REL_RO:
1012 case SECCAT_DATA_REL_RO_LOCAL:
1013 gcc_unreachable ();
1014 default:
1015 /* Everything else we place into default sections and hope for the
1016 best. */
1017 break;
1020 else
1022 switch (categorize_decl_for_section (decl, reloc))
1024 case SECCAT_DATA:
1025 case SECCAT_DATA_REL:
1026 case SECCAT_DATA_REL_LOCAL:
1027 case SECCAT_DATA_REL_RO:
1028 case SECCAT_DATA_REL_RO_LOCAL:
1029 prefix = one_only ? ".fd" : ".fardata";
1030 break;
1031 case SECCAT_BSS:
1032 prefix = one_only ? ".fb" : ".far";
1033 break;
1034 case SECCAT_RODATA:
1035 case SECCAT_RODATA_MERGE_STR:
1036 case SECCAT_RODATA_MERGE_STR_INIT:
1037 case SECCAT_RODATA_MERGE_CONST:
1038 prefix = one_only ? ".fr" : ".const";
1039 break;
1040 case SECCAT_SRODATA:
1041 case SECCAT_SDATA:
1042 case SECCAT_SBSS:
1043 gcc_unreachable ();
1044 default:
1045 break;
1049 if (prefix)
1051 const char *name, *linkonce;
1052 char *string;
1054 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
1055 name = targetm.strip_name_encoding (name);
1057 /* If we're using one_only, then there needs to be a .gnu.linkonce
1058 prefix to the section name. */
1059 linkonce = one_only ? ".gnu.linkonce" : "";
1061 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
1063 set_decl_section_name (decl, string);
1064 return;
1066 default_unique_section (decl, reloc);
1069 static unsigned int
1070 c6x_section_type_flags (tree decl, const char *name, int reloc)
1072 unsigned int flags = 0;
1074 if (strcmp (name, ".far") == 0
1075 || strncmp (name, ".far.", 5) == 0)
1076 flags |= SECTION_BSS;
1078 flags |= default_section_type_flags (decl, name, reloc);
1080 return flags;
1083 /* Checks whether the given CALL_EXPR would use a caller saved
1084 register. This is used to decide whether sibling call optimization
1085 could be performed on the respective function call. */
1087 static bool
1088 c6x_call_saved_register_used (tree call_expr)
1090 CUMULATIVE_ARGS cum_v;
1091 cumulative_args_t cum;
1092 HARD_REG_SET call_saved_regset;
1093 tree parameter;
1094 machine_mode mode;
1095 tree type;
1096 rtx parm_rtx;
1097 int i;
1099 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
1100 cum = pack_cumulative_args (&cum_v);
1102 COMPL_HARD_REG_SET (call_saved_regset, call_used_reg_set);
1103 for (i = 0; i < call_expr_nargs (call_expr); i++)
1105 parameter = CALL_EXPR_ARG (call_expr, i);
1106 gcc_assert (parameter);
1108 /* For an undeclared variable passed as parameter we will get
1109 an ERROR_MARK node here. */
1110 if (TREE_CODE (parameter) == ERROR_MARK)
1111 return true;
1113 type = TREE_TYPE (parameter);
1114 gcc_assert (type);
1116 mode = TYPE_MODE (type);
1117 gcc_assert (mode);
1119 if (pass_by_reference (&cum_v, mode, type, true))
1121 mode = Pmode;
1122 type = build_pointer_type (type);
1125 parm_rtx = c6x_function_arg (cum, mode, type, 0);
1127 c6x_function_arg_advance (cum, mode, type, 0);
1129 if (!parm_rtx)
1130 continue;
1132 if (REG_P (parm_rtx)
1133 && overlaps_hard_reg_set_p (call_saved_regset, GET_MODE (parm_rtx),
1134 REGNO (parm_rtx)))
1135 return true;
1136 if (GET_CODE (parm_rtx) == PARALLEL)
1138 int n = XVECLEN (parm_rtx, 0);
1139 while (n-- > 0)
1141 rtx x = XEXP (XVECEXP (parm_rtx, 0, n), 0);
1142 if (REG_P (x)
1143 && overlaps_hard_reg_set_p (call_saved_regset,
1144 GET_MODE (x), REGNO (x)))
1145 return true;
1149 return false;
1152 /* Decide whether we can make a sibling call to a function. DECL is the
1153 declaration of the function being targeted by the call and EXP is the
1154 CALL_EXPR representing the call. */
1156 static bool
1157 c6x_function_ok_for_sibcall (tree decl, tree exp)
1159 /* Registers A10, A12, B10 and B12 are available as arguments
1160 register but unfortunately caller saved. This makes functions
1161 needing these registers for arguments not suitable for
1162 sibcalls. */
1163 if (c6x_call_saved_register_used (exp))
1164 return false;
1166 if (!flag_pic)
1167 return true;
1169 if (TARGET_DSBT)
1171 /* When compiling for DSBT, the calling function must be local,
1172 so that when we reload B14 in the sibcall epilogue, it will
1173 not change its value. */
1174 struct cgraph_local_info *this_func;
1176 if (!decl)
1177 /* Not enough information. */
1178 return false;
1180 this_func = cgraph_node::local_info (current_function_decl);
1181 return this_func->local;
1184 return true;
1187 /* Return true if DECL is known to be linked into section SECTION. */
1189 static bool
1190 c6x_function_in_section_p (tree decl, section *section)
1192 /* We can only be certain about functions defined in the same
1193 compilation unit. */
1194 if (!TREE_STATIC (decl))
1195 return false;
1197 /* Make sure that SYMBOL always binds to the definition in this
1198 compilation unit. */
1199 if (!targetm.binds_local_p (decl))
1200 return false;
1202 /* If DECL_SECTION_NAME is set, assume it is trustworthy. */
1203 if (!DECL_SECTION_NAME (decl))
1205 /* Make sure that we will not create a unique section for DECL. */
1206 if (flag_function_sections || DECL_COMDAT_GROUP (decl))
1207 return false;
1210 return function_section (decl) == section;
1213 /* Return true if a call to OP, which is a SYMBOL_REF, must be expanded
1214 as a long call. */
1215 bool
1216 c6x_long_call_p (rtx op)
1218 tree decl;
1220 if (!TARGET_LONG_CALLS)
1221 return false;
1223 decl = SYMBOL_REF_DECL (op);
1225 /* Try to determine whether the symbol is in the same section as the current
1226 function. Be conservative, and only cater for cases in which the
1227 whole of the current function is placed in the same section. */
1228 if (decl != NULL_TREE
1229 && !flag_reorder_blocks_and_partition
1230 && TREE_CODE (decl) == FUNCTION_DECL
1231 && c6x_function_in_section_p (decl, current_function_section ()))
1232 return false;
1234 return true;
1237 /* Emit the sequence for a call. */
1238 void
1239 c6x_expand_call (rtx retval, rtx address, bool sibcall)
1241 rtx callee = XEXP (address, 0);
1242 rtx call_insn;
1244 if (!c6x_call_operand (callee, Pmode))
1246 callee = force_reg (Pmode, callee);
1247 address = change_address (address, Pmode, callee);
1249 call_insn = gen_rtx_CALL (VOIDmode, address, const0_rtx);
1250 if (sibcall)
1252 call_insn = emit_call_insn (call_insn);
1253 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
1254 gen_rtx_REG (Pmode, REG_B3));
1256 else
1258 if (retval == NULL_RTX)
1259 call_insn = emit_call_insn (call_insn);
1260 else
1261 call_insn = emit_call_insn (gen_rtx_SET (retval, call_insn));
1263 if (flag_pic)
1264 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
1267 /* Legitimize PIC addresses. If the address is already position-independent,
1268 we return ORIG. Newly generated position-independent addresses go into a
1269 reg. This is REG if nonzero, otherwise we allocate register(s) as
1270 necessary. PICREG is the register holding the pointer to the PIC offset
1271 table. */
1273 static rtx
1274 legitimize_pic_address (rtx orig, rtx reg, rtx picreg)
1276 rtx addr = orig;
1277 rtx new_rtx = orig;
1279 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
1281 int unspec = UNSPEC_LOAD_GOT;
1282 rtx tmp;
1284 if (reg == 0)
1286 gcc_assert (can_create_pseudo_p ());
1287 reg = gen_reg_rtx (Pmode);
1289 if (flag_pic == 2)
1291 if (can_create_pseudo_p ())
1292 tmp = gen_reg_rtx (Pmode);
1293 else
1294 tmp = reg;
1295 emit_insn (gen_movsi_gotoff_high (tmp, addr));
1296 emit_insn (gen_movsi_gotoff_lo_sum (tmp, tmp, addr));
1297 emit_insn (gen_load_got_gotoff (reg, picreg, tmp));
1299 else
1301 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), unspec);
1302 new_rtx = gen_const_mem (Pmode, gen_rtx_PLUS (Pmode, picreg, tmp));
1304 emit_move_insn (reg, new_rtx);
1306 if (picreg == pic_offset_table_rtx)
1307 crtl->uses_pic_offset_table = 1;
1308 return reg;
1311 else if (GET_CODE (addr) == CONST || GET_CODE (addr) == PLUS)
1313 rtx base;
1315 if (GET_CODE (addr) == CONST)
1317 addr = XEXP (addr, 0);
1318 gcc_assert (GET_CODE (addr) == PLUS);
1321 if (XEXP (addr, 0) == picreg)
1322 return orig;
1324 if (reg == 0)
1326 gcc_assert (can_create_pseudo_p ());
1327 reg = gen_reg_rtx (Pmode);
1330 base = legitimize_pic_address (XEXP (addr, 0), reg, picreg);
1331 addr = legitimize_pic_address (XEXP (addr, 1),
1332 base == reg ? NULL_RTX : reg,
1333 picreg);
1335 if (GET_CODE (addr) == CONST_INT)
1337 gcc_assert (! reload_in_progress && ! reload_completed);
1338 addr = force_reg (Pmode, addr);
1341 if (GET_CODE (addr) == PLUS && CONSTANT_P (XEXP (addr, 1)))
1343 base = gen_rtx_PLUS (Pmode, base, XEXP (addr, 0));
1344 addr = XEXP (addr, 1);
1347 return gen_rtx_PLUS (Pmode, base, addr);
1350 return new_rtx;
1353 /* Expand a move operation in mode MODE. The operands are in OPERANDS.
1354 Returns true if no further code must be generated, false if the caller
1355 should generate an insn to move OPERANDS[1] to OPERANDS[0]. */
1357 bool
1358 expand_move (rtx *operands, machine_mode mode)
1360 rtx dest = operands[0];
1361 rtx op = operands[1];
1363 if ((reload_in_progress | reload_completed) == 0
1364 && GET_CODE (dest) == MEM && GET_CODE (op) != REG)
1365 operands[1] = force_reg (mode, op);
1366 else if (mode == SImode && symbolic_operand (op, SImode))
1368 if (flag_pic)
1370 if (sdata_symbolic_operand (op, SImode))
1372 emit_insn (gen_load_sdata_pic (dest, pic_offset_table_rtx, op));
1373 crtl->uses_pic_offset_table = 1;
1374 return true;
1376 else
1378 rtx temp = (reload_completed || reload_in_progress
1379 ? dest : gen_reg_rtx (Pmode));
1381 operands[1] = legitimize_pic_address (op, temp,
1382 pic_offset_table_rtx);
1385 else if (reload_completed
1386 && !sdata_symbolic_operand (op, SImode))
1388 emit_insn (gen_movsi_high (dest, op));
1389 emit_insn (gen_movsi_lo_sum (dest, dest, op));
1390 return true;
1393 return false;
1396 /* This function is called when we're about to expand an integer compare
1397 operation which performs COMPARISON. It examines the second operand,
1398 and if it is an integer constant that cannot be used directly on the
1399 current machine in a comparison insn, it returns true. */
1400 bool
1401 c6x_force_op_for_comparison_p (enum rtx_code code, rtx op)
1403 if (!CONST_INT_P (op) || satisfies_constraint_Iu4 (op))
1404 return false;
1406 if ((code == EQ || code == LT || code == GT)
1407 && !satisfies_constraint_Is5 (op))
1408 return true;
1409 if ((code == GTU || code == LTU)
1410 && (!TARGET_INSNS_64 || !satisfies_constraint_Iu5 (op)))
1411 return true;
1413 return false;
1416 /* Emit comparison instruction if necessary, returning the expression
1417 that holds the compare result in the proper mode. Return the comparison
1418 that should be used in the jump insn. */
1421 c6x_expand_compare (rtx comparison, machine_mode mode)
1423 enum rtx_code code = GET_CODE (comparison);
1424 rtx op0 = XEXP (comparison, 0);
1425 rtx op1 = XEXP (comparison, 1);
1426 rtx cmp;
1427 enum rtx_code jump_code = code;
1428 machine_mode op_mode = GET_MODE (op0);
1430 if (op_mode == DImode && (code == NE || code == EQ) && op1 == const0_rtx)
1432 rtx t = gen_reg_rtx (SImode);
1433 emit_insn (gen_iorsi3 (t, gen_lowpart (SImode, op0),
1434 gen_highpart (SImode, op0)));
1435 op_mode = SImode;
1436 cmp = t;
1438 else if (op_mode == DImode)
1440 rtx lo[2], high[2];
1441 rtx cmp1, cmp2;
1443 if (code == NE || code == GEU || code == LEU || code == GE || code == LE)
1445 code = reverse_condition (code);
1446 jump_code = EQ;
1448 else
1449 jump_code = NE;
1451 split_di (&op0, 1, lo, high);
1452 split_di (&op1, 1, lo + 1, high + 1);
1454 if (c6x_force_op_for_comparison_p (code, high[1])
1455 || c6x_force_op_for_comparison_p (EQ, high[1]))
1456 high[1] = force_reg (SImode, high[1]);
1458 cmp1 = gen_reg_rtx (SImode);
1459 cmp2 = gen_reg_rtx (SImode);
1460 emit_insn (gen_rtx_SET (cmp1, gen_rtx_fmt_ee (code, SImode,
1461 high[0], high[1])));
1462 if (code == EQ)
1464 if (c6x_force_op_for_comparison_p (code, lo[1]))
1465 lo[1] = force_reg (SImode, lo[1]);
1466 emit_insn (gen_rtx_SET (cmp2, gen_rtx_fmt_ee (code, SImode,
1467 lo[0], lo[1])));
1468 emit_insn (gen_andsi3 (cmp1, cmp1, cmp2));
1470 else
1472 emit_insn (gen_rtx_SET (cmp2, gen_rtx_EQ (SImode, high[0],
1473 high[1])));
1474 if (code == GT)
1475 code = GTU;
1476 else if (code == LT)
1477 code = LTU;
1478 if (c6x_force_op_for_comparison_p (code, lo[1]))
1479 lo[1] = force_reg (SImode, lo[1]);
1480 emit_insn (gen_cmpsi_and (cmp2, gen_rtx_fmt_ee (code, SImode,
1481 lo[0], lo[1]),
1482 lo[0], lo[1], cmp2));
1483 emit_insn (gen_iorsi3 (cmp1, cmp1, cmp2));
1485 cmp = cmp1;
1487 else if (TARGET_FP && !flag_finite_math_only
1488 && (op_mode == DFmode || op_mode == SFmode)
1489 && code != EQ && code != NE && code != LT && code != GT
1490 && code != UNLE && code != UNGE)
1492 enum rtx_code code1, code2, code3;
1493 rtx (*fn) (rtx, rtx, rtx, rtx, rtx);
1495 jump_code = NE;
1496 code3 = UNKNOWN;
1497 switch (code)
1499 case UNLT:
1500 case UNGT:
1501 jump_code = EQ;
1502 /* fall through */
1503 case LE:
1504 case GE:
1505 code1 = code == LE || code == UNGT ? LT : GT;
1506 code2 = EQ;
1507 break;
1509 case UNORDERED:
1510 jump_code = EQ;
1511 /* fall through */
1512 case ORDERED:
1513 code3 = EQ;
1514 /* fall through */
1515 case LTGT:
1516 code1 = LT;
1517 code2 = GT;
1518 break;
1520 case UNEQ:
1521 code1 = LT;
1522 code2 = GT;
1523 jump_code = EQ;
1524 break;
1526 default:
1527 gcc_unreachable ();
1530 cmp = gen_reg_rtx (SImode);
1531 emit_insn (gen_rtx_SET (cmp, gen_rtx_fmt_ee (code1, SImode, op0, op1)));
1532 fn = op_mode == DFmode ? gen_cmpdf_ior : gen_cmpsf_ior;
1533 emit_insn (fn (cmp, gen_rtx_fmt_ee (code2, SImode, op0, op1),
1534 op0, op1, cmp));
1535 if (code3 != UNKNOWN)
1536 emit_insn (fn (cmp, gen_rtx_fmt_ee (code3, SImode, op0, op1),
1537 op0, op1, cmp));
1539 else if (op_mode == SImode && (code == NE || code == EQ) && op1 == const0_rtx)
1540 cmp = op0;
1541 else
1543 bool is_fp_libfunc;
1544 is_fp_libfunc = !TARGET_FP && (op_mode == DFmode || op_mode == SFmode);
1546 if ((code == NE || code == GEU || code == LEU || code == GE || code == LE)
1547 && !is_fp_libfunc)
1549 code = reverse_condition (code);
1550 jump_code = EQ;
1552 else if (code == UNGE)
1554 code = LT;
1555 jump_code = EQ;
1557 else if (code == UNLE)
1559 code = GT;
1560 jump_code = EQ;
1562 else
1563 jump_code = NE;
1565 if (is_fp_libfunc)
1567 rtx_insn *insns;
1568 rtx libfunc;
1569 switch (code)
1571 case EQ:
1572 libfunc = op_mode == DFmode ? eqdf_libfunc : eqsf_libfunc;
1573 break;
1574 case NE:
1575 libfunc = op_mode == DFmode ? nedf_libfunc : nesf_libfunc;
1576 break;
1577 case GT:
1578 libfunc = op_mode == DFmode ? gtdf_libfunc : gtsf_libfunc;
1579 break;
1580 case GE:
1581 libfunc = op_mode == DFmode ? gedf_libfunc : gesf_libfunc;
1582 break;
1583 case LT:
1584 libfunc = op_mode == DFmode ? ltdf_libfunc : ltsf_libfunc;
1585 break;
1586 case LE:
1587 libfunc = op_mode == DFmode ? ledf_libfunc : lesf_libfunc;
1588 break;
1589 default:
1590 gcc_unreachable ();
1592 start_sequence ();
1594 cmp = emit_library_call_value (libfunc, 0, LCT_CONST, SImode,
1595 op0, op_mode, op1, op_mode);
1596 insns = get_insns ();
1597 end_sequence ();
1599 emit_libcall_block (insns, cmp, cmp,
1600 gen_rtx_fmt_ee (code, SImode, op0, op1));
1602 else
1604 cmp = gen_reg_rtx (SImode);
1605 if (c6x_force_op_for_comparison_p (code, op1))
1606 op1 = force_reg (SImode, op1);
1607 emit_insn (gen_rtx_SET (cmp, gen_rtx_fmt_ee (code, SImode,
1608 op0, op1)));
1612 return gen_rtx_fmt_ee (jump_code, mode, cmp, const0_rtx);
1615 /* Return one word of double-word value OP. HIGH_P is true to select the
1616 high part, false to select the low part. When encountering auto-increment
1617 addressing, we make the assumption that the low part is going to be accessed
1618 first. */
1621 c6x_subword (rtx op, bool high_p)
1623 unsigned int byte;
1624 machine_mode mode;
1626 mode = GET_MODE (op);
1627 if (mode == VOIDmode)
1628 mode = DImode;
1630 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
1631 byte = UNITS_PER_WORD;
1632 else
1633 byte = 0;
1635 if (MEM_P (op))
1637 rtx addr = XEXP (op, 0);
1638 if (GET_CODE (addr) == PLUS || REG_P (addr))
1639 return adjust_address (op, word_mode, byte);
1640 /* FIXME: should really support autoincrement addressing for
1641 multi-word modes. */
1642 gcc_unreachable ();
1645 return simplify_gen_subreg (word_mode, op, mode, byte);
1648 /* Split one or more DImode RTL references into pairs of SImode
1649 references. The RTL can be REG, offsettable MEM, integer constant, or
1650 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
1651 split and "num" is its length. lo_half and hi_half are output arrays
1652 that parallel "operands". */
1654 void
1655 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
1657 while (num--)
1659 rtx op = operands[num];
1661 lo_half[num] = c6x_subword (op, false);
1662 hi_half[num] = c6x_subword (op, true);
1666 /* Return true if VAL is a mask valid for a clr instruction. */
1667 bool
1668 c6x_valid_mask_p (HOST_WIDE_INT val)
1670 int i;
1671 for (i = 0; i < 32; i++)
1672 if (!(val & ((unsigned HOST_WIDE_INT)1 << i)))
1673 break;
1674 for (; i < 32; i++)
1675 if (val & ((unsigned HOST_WIDE_INT)1 << i))
1676 break;
1677 for (; i < 32; i++)
1678 if (!(val & ((unsigned HOST_WIDE_INT)1 << i)))
1679 return false;
1680 return true;
1683 /* Expand a block move for a movmemM pattern. */
1685 bool
1686 c6x_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
1687 rtx expected_align_exp ATTRIBUTE_UNUSED,
1688 rtx expected_size_exp ATTRIBUTE_UNUSED)
1690 unsigned HOST_WIDE_INT align = 1;
1691 unsigned HOST_WIDE_INT src_mem_align, dst_mem_align, min_mem_align;
1692 unsigned HOST_WIDE_INT count = 0, offset = 0;
1693 unsigned int biggest_move = TARGET_STDW ? 8 : 4;
1695 if (CONST_INT_P (align_exp))
1696 align = INTVAL (align_exp);
1698 src_mem_align = MEM_ALIGN (src) / BITS_PER_UNIT;
1699 dst_mem_align = MEM_ALIGN (dst) / BITS_PER_UNIT;
1700 min_mem_align = MIN (src_mem_align, dst_mem_align);
1702 if (min_mem_align > align)
1703 align = min_mem_align / BITS_PER_UNIT;
1704 if (src_mem_align < align)
1705 src_mem_align = align;
1706 if (dst_mem_align < align)
1707 dst_mem_align = align;
1709 if (CONST_INT_P (count_exp))
1710 count = INTVAL (count_exp);
1711 else
1712 return false;
1714 /* Make sure we don't need to care about overflow later on. */
1715 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
1716 return false;
1718 if (count >= 28 && (count & 3) == 0 && align >= 4)
1720 tree dst_expr = MEM_EXPR (dst);
1721 tree src_expr = MEM_EXPR (src);
1722 rtx fn = TARGET_INSNS_64PLUS ? strasgi64p_libfunc : strasgi_libfunc;
1723 rtx srcreg = force_reg (Pmode, XEXP (src, 0));
1724 rtx dstreg = force_reg (Pmode, XEXP (dst, 0));
1726 if (src_expr)
1727 mark_addressable (src_expr);
1728 if (dst_expr)
1729 mark_addressable (dst_expr);
1730 emit_library_call (fn, LCT_NORMAL, VOIDmode,
1731 dstreg, Pmode, srcreg, Pmode, count_exp, SImode);
1732 return true;
1735 if (biggest_move > align && !TARGET_INSNS_64)
1736 biggest_move = align;
1738 if (count / biggest_move > 7)
1739 return false;
1741 while (count > 0)
1743 rtx reg, reg_lowpart;
1744 machine_mode srcmode, dstmode;
1745 unsigned HOST_WIDE_INT src_size, dst_size, src_left;
1746 int shift;
1747 rtx srcmem, dstmem;
1749 while (biggest_move > count)
1750 biggest_move /= 2;
1752 src_size = dst_size = biggest_move;
1753 if (src_size > src_mem_align && src_size == 2)
1754 src_size = 1;
1755 if (dst_size > dst_mem_align && dst_size == 2)
1756 dst_size = 1;
1758 if (dst_size > src_size)
1759 dst_size = src_size;
1761 srcmode = mode_for_size (src_size * BITS_PER_UNIT, MODE_INT, 0);
1762 dstmode = mode_for_size (dst_size * BITS_PER_UNIT, MODE_INT, 0);
1763 if (src_size >= 4)
1764 reg_lowpart = reg = gen_reg_rtx (srcmode);
1765 else
1767 reg = gen_reg_rtx (SImode);
1768 reg_lowpart = gen_lowpart (srcmode, reg);
1771 srcmem = adjust_address (copy_rtx (src), srcmode, offset);
1773 if (src_size > src_mem_align)
1775 enum insn_code icode = (srcmode == SImode ? CODE_FOR_movmisalignsi
1776 : CODE_FOR_movmisaligndi);
1777 emit_insn (GEN_FCN (icode) (reg_lowpart, srcmem));
1779 else
1780 emit_move_insn (reg_lowpart, srcmem);
1782 src_left = src_size;
1783 shift = TARGET_BIG_ENDIAN ? (src_size - dst_size) * BITS_PER_UNIT : 0;
1784 while (src_left > 0)
1786 rtx dstreg = reg_lowpart;
1788 if (src_size > dst_size)
1790 rtx srcword = reg;
1791 int shift_amount = shift & (BITS_PER_WORD - 1);
1792 if (src_size > 4)
1793 srcword = operand_subword_force (srcword, src_left >= 4 ? 0 : 4,
1794 SImode);
1795 if (shift_amount > 0)
1797 dstreg = gen_reg_rtx (SImode);
1798 emit_insn (gen_lshrsi3 (dstreg, srcword,
1799 GEN_INT (shift_amount)));
1801 else
1802 dstreg = srcword;
1803 dstreg = gen_lowpart (dstmode, dstreg);
1806 dstmem = adjust_address (copy_rtx (dst), dstmode, offset);
1807 if (dst_size > dst_mem_align)
1809 enum insn_code icode = (dstmode == SImode ? CODE_FOR_movmisalignsi
1810 : CODE_FOR_movmisaligndi);
1811 emit_insn (GEN_FCN (icode) (dstmem, dstreg));
1813 else
1814 emit_move_insn (dstmem, dstreg);
1816 if (TARGET_BIG_ENDIAN)
1817 shift -= dst_size * BITS_PER_UNIT;
1818 else
1819 shift += dst_size * BITS_PER_UNIT;
1820 offset += dst_size;
1821 src_left -= dst_size;
1823 count -= src_size;
1825 return true;
1828 /* Subroutine of print_address_operand, print a single address offset OFF for
1829 a memory access of mode MEM_MODE, choosing between normal form and scaled
1830 form depending on the type of the insn. Misaligned memory references must
1831 use the scaled form. */
1833 static void
1834 print_address_offset (FILE *file, rtx off, machine_mode mem_mode)
1836 rtx pat;
1838 if (c6x_current_insn != NULL_RTX)
1840 pat = PATTERN (c6x_current_insn);
1841 if (GET_CODE (pat) == COND_EXEC)
1842 pat = COND_EXEC_CODE (pat);
1843 if (GET_CODE (pat) == PARALLEL)
1844 pat = XVECEXP (pat, 0, 0);
1846 if (GET_CODE (pat) == SET
1847 && GET_CODE (SET_SRC (pat)) == UNSPEC
1848 && XINT (SET_SRC (pat), 1) == UNSPEC_MISALIGNED_ACCESS)
1850 gcc_assert (CONST_INT_P (off)
1851 && (INTVAL (off) & (GET_MODE_SIZE (mem_mode) - 1)) == 0);
1852 fprintf (file, "[" HOST_WIDE_INT_PRINT_DEC "]",
1853 INTVAL (off) / GET_MODE_SIZE (mem_mode));
1854 return;
1857 fputs ("(", file);
1858 output_address (mem_mode, off);
1859 fputs (")", file);
1862 static bool
1863 c6x_print_operand_punct_valid_p (unsigned char c)
1865 return c == '$' || c == '.' || c == '|';
1868 static void c6x_print_operand (FILE *, rtx, int);
1870 /* Subroutine of c6x_print_operand; used to print a memory reference X to FILE. */
1872 static void
1873 c6x_print_address_operand (FILE *file, rtx x, machine_mode mem_mode)
1875 rtx off;
1876 switch (GET_CODE (x))
1878 case PRE_MODIFY:
1879 case POST_MODIFY:
1880 if (GET_CODE (x) == POST_MODIFY)
1881 output_address (mem_mode, XEXP (x, 0));
1882 off = XEXP (XEXP (x, 1), 1);
1883 if (XEXP (x, 0) == stack_pointer_rtx)
1885 if (GET_CODE (x) == PRE_MODIFY)
1886 gcc_assert (INTVAL (off) > 0);
1887 else
1888 gcc_assert (INTVAL (off) < 0);
1890 if (CONST_INT_P (off) && INTVAL (off) < 0)
1892 fprintf (file, "--");
1893 off = GEN_INT (-INTVAL (off));
1895 else
1896 fprintf (file, "++");
1897 if (GET_CODE (x) == PRE_MODIFY)
1898 output_address (mem_mode, XEXP (x, 0));
1899 print_address_offset (file, off, mem_mode);
1900 break;
1902 case PLUS:
1903 off = XEXP (x, 1);
1904 if (CONST_INT_P (off) && INTVAL (off) < 0)
1906 fprintf (file, "-");
1907 off = GEN_INT (-INTVAL (off));
1909 else
1910 fprintf (file, "+");
1911 output_address (mem_mode, XEXP (x, 0));
1912 print_address_offset (file, off, mem_mode);
1913 break;
1915 case PRE_DEC:
1916 gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
1917 fprintf (file, "--");
1918 output_address (mem_mode, XEXP (x, 0));
1919 fprintf (file, "[1]");
1920 break;
1921 case PRE_INC:
1922 fprintf (file, "++");
1923 output_address (mem_mode, XEXP (x, 0));
1924 fprintf (file, "[1]");
1925 break;
1926 case POST_INC:
1927 gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
1928 output_address (mem_mode, XEXP (x, 0));
1929 fprintf (file, "++[1]");
1930 break;
1931 case POST_DEC:
1932 output_address (mem_mode, XEXP (x, 0));
1933 fprintf (file, "--[1]");
1934 break;
1936 case SYMBOL_REF:
1937 case CONST:
1938 case LABEL_REF:
1939 gcc_assert (sdata_symbolic_operand (x, Pmode));
1940 fprintf (file, "+B14(");
1941 output_addr_const (file, x);
1942 fprintf (file, ")");
1943 break;
1945 case UNSPEC:
1946 switch (XINT (x, 1))
1948 case UNSPEC_LOAD_GOT:
1949 fputs ("$GOT(", file);
1950 output_addr_const (file, XVECEXP (x, 0, 0));
1951 fputs (")", file);
1952 break;
1953 case UNSPEC_LOAD_SDATA:
1954 output_addr_const (file, XVECEXP (x, 0, 0));
1955 break;
1956 default:
1957 gcc_unreachable ();
1959 break;
1961 default:
1962 gcc_assert (GET_CODE (x) != MEM);
1963 c6x_print_operand (file, x, 0);
1964 break;
1968 /* Return a single character, which is either 'l', 's', 'd' or 'm', which
1969 specifies the functional unit used by INSN. */
1971 char
1972 c6x_get_unit_specifier (rtx_insn *insn)
1974 enum attr_units units;
1976 if (insn_info.exists ())
1978 int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation;
1979 return c6x_unit_names[unit][0];
1982 units = get_attr_units (insn);
1983 switch (units)
1985 case UNITS_D:
1986 case UNITS_DL:
1987 case UNITS_DS:
1988 case UNITS_DLS:
1989 case UNITS_D_ADDR:
1990 return 'd';
1991 case UNITS_L:
1992 case UNITS_LS:
1993 return 'l';
1994 case UNITS_S:
1995 return 's';
1996 case UNITS_M:
1997 return 'm';
1998 default:
1999 gcc_unreachable ();
2003 /* Prints the unit specifier field. */
2004 static void
2005 c6x_print_unit_specifier_field (FILE *file, rtx_insn *insn)
2007 enum attr_units units = get_attr_units (insn);
2008 enum attr_cross cross = get_attr_cross (insn);
2009 enum attr_dest_regfile rf = get_attr_dest_regfile (insn);
2010 int half;
2011 char unitspec;
2013 if (units == UNITS_D_ADDR)
2015 enum attr_addr_regfile arf = get_attr_addr_regfile (insn);
2016 int t_half;
2017 gcc_assert (arf != ADDR_REGFILE_UNKNOWN);
2018 half = arf == ADDR_REGFILE_A ? 1 : 2;
2019 t_half = rf == DEST_REGFILE_A ? 1 : 2;
2020 fprintf (file, ".d%dt%d", half, t_half);
2021 return;
2024 if (insn_info.exists ())
2026 int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation;
2027 fputs (".", file);
2028 fputs (c6x_unit_names[unit], file);
2029 if (cross == CROSS_Y)
2030 fputs ("x", file);
2031 return;
2034 gcc_assert (rf != DEST_REGFILE_UNKNOWN);
2035 unitspec = c6x_get_unit_specifier (insn);
2036 half = rf == DEST_REGFILE_A ? 1 : 2;
2037 fprintf (file, ".%c%d%s", unitspec, half, cross == CROSS_Y ? "x" : "");
2040 /* Output assembly language output for the address ADDR to FILE. */
2041 static void
2042 c6x_print_operand_address (FILE *file, machine_mode mode, rtx addr)
2044 c6x_print_address_operand (file, addr, mode);
2047 /* Print an operand, X, to FILE, with an optional modifier in CODE.
2049 Meaning of CODE:
2050 $ -- print the unit specifier field for the instruction.
2051 . -- print the predicate for the instruction or an emptry string for an
2052 unconditional one.
2053 | -- print "||" if the insn should be issued in parallel with the previous
2054 one.
2056 C -- print an opcode suffix for a reversed condition
2057 d -- H, W or D as a suffix for ADDA, based on the factor given by the
2058 operand
2059 D -- print either B, H, W or D as a suffix for ADDA, based on the size of
2060 the operand
2061 J -- print a predicate
2062 j -- like J, but use reverse predicate
2063 k -- treat a CONST_INT as a register number and print it as a register
2064 k -- like k, but print out a doubleword register
2065 n -- print an integer operand, negated
2066 p -- print the low part of a DImode register
2067 P -- print the high part of a DImode register
2068 r -- print the absolute value of an integer operand, shifted right by 1
2069 R -- print the absolute value of an integer operand, shifted right by 2
2070 f -- the first clear bit in an integer operand assumed to be a mask for
2071 a clr instruction
2072 F -- the last clear bit in such a mask
2073 s -- the first set bit in an integer operand assumed to be a mask for
2074 a set instruction
2075 S -- the last set bit in such a mask
2076 U -- print either 1 or 2, depending on the side of the machine used by
2077 the operand */
2079 static void
2080 c6x_print_operand (FILE *file, rtx x, int code)
2082 int i;
2083 HOST_WIDE_INT v;
2084 tree t;
2085 machine_mode mode;
2087 if (code == '|')
2089 if (GET_MODE (c6x_current_insn) != TImode)
2090 fputs ("||", file);
2091 return;
2093 if (code == '$')
2095 c6x_print_unit_specifier_field (file, c6x_current_insn);
2096 return;
2099 if (code == '.')
2101 x = current_insn_predicate;
2102 if (x)
2104 unsigned int regno = REGNO (XEXP (x, 0));
2105 fputs ("[", file);
2106 if (GET_CODE (x) == EQ)
2107 fputs ("!", file);
2108 fputs (reg_names [regno], file);
2109 fputs ("]", file);
2111 return;
2114 mode = GET_MODE (x);
2116 switch (code)
2118 case 'C':
2119 case 'c':
2121 enum rtx_code c = GET_CODE (x);
2122 if (code == 'C')
2123 c = swap_condition (c);
2124 fputs (GET_RTX_NAME (c), file);
2126 return;
2128 case 'J':
2129 case 'j':
2131 unsigned int regno = REGNO (XEXP (x, 0));
2132 if ((GET_CODE (x) == EQ) == (code == 'J'))
2133 fputs ("!", file);
2134 fputs (reg_names [regno], file);
2136 return;
2138 case 'k':
2139 gcc_assert (GET_CODE (x) == CONST_INT);
2140 v = INTVAL (x);
2141 fprintf (file, "%s", reg_names[v]);
2142 return;
2143 case 'K':
2144 gcc_assert (GET_CODE (x) == CONST_INT);
2145 v = INTVAL (x);
2146 gcc_assert ((v & 1) == 0);
2147 fprintf (file, "%s:%s", reg_names[v + 1], reg_names[v]);
2148 return;
2150 case 's':
2151 case 'S':
2152 case 'f':
2153 case 'F':
2154 gcc_assert (GET_CODE (x) == CONST_INT);
2155 v = INTVAL (x);
2156 for (i = 0; i < 32; i++)
2158 HOST_WIDE_INT tst = v & 1;
2159 if (((code == 'f' || code == 'F') && !tst)
2160 || ((code == 's' || code == 'S') && tst))
2161 break;
2162 v >>= 1;
2164 if (code == 'f' || code == 's')
2166 fprintf (file, "%d", i);
2167 return;
2169 for (;i < 32; i++)
2171 HOST_WIDE_INT tst = v & 1;
2172 if ((code == 'F' && tst) || (code == 'S' && !tst))
2173 break;
2174 v >>= 1;
2176 fprintf (file, "%d", i - 1);
2177 return;
2179 case 'n':
2180 gcc_assert (GET_CODE (x) == CONST_INT);
2181 output_addr_const (file, GEN_INT (-INTVAL (x)));
2182 return;
2184 case 'r':
2185 gcc_assert (GET_CODE (x) == CONST_INT);
2186 v = INTVAL (x);
2187 if (v < 0)
2188 v = -v;
2189 output_addr_const (file, GEN_INT (v >> 1));
2190 return;
2192 case 'R':
2193 gcc_assert (GET_CODE (x) == CONST_INT);
2194 v = INTVAL (x);
2195 if (v < 0)
2196 v = -v;
2197 output_addr_const (file, GEN_INT (v >> 2));
2198 return;
2200 case 'd':
2201 gcc_assert (GET_CODE (x) == CONST_INT);
2202 v = INTVAL (x);
2203 fputs (v == 2 ? "h" : v == 4 ? "w" : "d", file);
2204 return;
2206 case 'p':
2207 case 'P':
2208 gcc_assert (GET_CODE (x) == REG);
2209 v = REGNO (x);
2210 if (code == 'P')
2211 v++;
2212 fputs (reg_names[v], file);
2213 return;
2215 case 'D':
2216 v = 0;
2217 if (GET_CODE (x) == CONST)
2219 x = XEXP (x, 0);
2220 gcc_assert (GET_CODE (x) == PLUS);
2221 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
2222 v = INTVAL (XEXP (x, 1));
2223 x = XEXP (x, 0);
2226 gcc_assert (GET_CODE (x) == SYMBOL_REF);
2228 t = SYMBOL_REF_DECL (x);
2229 if (DECL_P (t))
2230 v |= DECL_ALIGN_UNIT (t);
2231 else
2232 v |= TYPE_ALIGN_UNIT (TREE_TYPE (t));
2233 if (v & 1)
2234 fputs ("b", file);
2235 else if (v & 2)
2236 fputs ("h", file);
2237 else
2238 fputs ("w", file);
2239 return;
2241 case 'U':
2242 if (MEM_P (x))
2244 x = XEXP (x, 0);
2245 if (GET_CODE (x) == PLUS
2246 || GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC)
2247 x = XEXP (x, 0);
2248 if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF)
2250 gcc_assert (sdata_symbolic_operand (x, Pmode));
2251 fputs ("2", file);
2252 return;
2255 gcc_assert (REG_P (x));
2256 if (A_REGNO_P (REGNO (x)))
2257 fputs ("1", file);
2258 if (B_REGNO_P (REGNO (x)))
2259 fputs ("2", file);
2260 return;
2262 default:
2263 switch (GET_CODE (x))
2265 case REG:
2266 if (GET_MODE_SIZE (mode) == 8)
2267 fprintf (file, "%s:%s", reg_names[REGNO (x) + 1],
2268 reg_names[REGNO (x)]);
2269 else
2270 fprintf (file, "%s", reg_names[REGNO (x)]);
2271 break;
2273 case MEM:
2274 fputc ('*', file);
2275 gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
2276 c6x_print_address_operand (file, XEXP (x, 0), GET_MODE (x));
2277 break;
2279 case SYMBOL_REF:
2280 fputc ('(', file);
2281 output_addr_const (file, x);
2282 fputc (')', file);
2283 break;
2285 case CONST_INT:
2286 output_addr_const (file, x);
2287 break;
2289 case CONST_DOUBLE:
2290 output_operand_lossage ("invalid const_double operand");
2291 break;
2293 default:
2294 output_addr_const (file, x);
2299 /* Return TRUE if OP is a valid memory address with a base register of
2300 class C. If SMALL_OFFSET is true, we disallow memory references which would
2301 require a long offset with B14/B15. */
2303 bool
2304 c6x_mem_operand (rtx op, enum reg_class c, bool small_offset)
2306 machine_mode mode = GET_MODE (op);
2307 rtx base = XEXP (op, 0);
2308 switch (GET_CODE (base))
2310 case REG:
2311 break;
2312 case PLUS:
2313 if (small_offset
2314 && (XEXP (base, 0) == stack_pointer_rtx
2315 || XEXP (base, 0) == pic_offset_table_rtx))
2317 if (!c6x_legitimate_address_p_1 (mode, base, true, true))
2318 return false;
2321 /* fall through */
2322 case PRE_INC:
2323 case PRE_DEC:
2324 case PRE_MODIFY:
2325 case POST_INC:
2326 case POST_DEC:
2327 case POST_MODIFY:
2328 base = XEXP (base, 0);
2329 break;
2331 case CONST:
2332 case LABEL_REF:
2333 case SYMBOL_REF:
2334 gcc_assert (sdata_symbolic_operand (base, Pmode));
2335 return !small_offset && c == B_REGS;
2337 default:
2338 return false;
2340 return TEST_HARD_REG_BIT (reg_class_contents[ (int) (c)], REGNO (base));
2343 /* Returns true if X is a valid address for use in a memory reference
2344 of mode MODE. If STRICT is true, we do not allow pseudo registers
2345 in the address. NO_LARGE_OFFSET is true if we are examining an
2346 address for use in a load or store misaligned instruction, or
2347 recursively examining an operand inside a PRE/POST_MODIFY. */
2349 bool
2350 c6x_legitimate_address_p_1 (machine_mode mode, rtx x, bool strict,
2351 bool no_large_offset)
2353 int size, size1;
2354 HOST_WIDE_INT off;
2355 enum rtx_code code = GET_CODE (x);
2357 switch (code)
2359 case PRE_MODIFY:
2360 case POST_MODIFY:
2361 /* We can't split these into word-sized pieces yet. */
2362 if (!TARGET_STDW && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
2363 return false;
2364 if (GET_CODE (XEXP (x, 1)) != PLUS)
2365 return false;
2366 if (!c6x_legitimate_address_p_1 (mode, XEXP (x, 1), strict, true))
2367 return false;
2368 if (!rtx_equal_p (XEXP (x, 0), XEXP (XEXP (x, 1), 0)))
2369 return false;
2371 /* fall through */
2372 case PRE_INC:
2373 case PRE_DEC:
2374 case POST_INC:
2375 case POST_DEC:
2376 /* We can't split these into word-sized pieces yet. */
2377 if (!TARGET_STDW && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
2378 return false;
2379 x = XEXP (x, 0);
2380 if (!REG_P (x))
2381 return false;
2383 /* fall through */
2384 case REG:
2385 if (strict)
2386 return REGNO_OK_FOR_BASE_STRICT_P (REGNO (x));
2387 else
2388 return REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x));
2390 case PLUS:
2391 if (!REG_P (XEXP (x, 0))
2392 || !c6x_legitimate_address_p_1 (mode, XEXP (x, 0), strict, false))
2393 return false;
2394 /* We cannot ensure currently that both registers end up in the
2395 same register file. */
2396 if (REG_P (XEXP (x, 1)))
2397 return false;
2399 if (mode == BLKmode)
2400 size = 4;
2401 else if (mode == VOIDmode)
2402 /* ??? This can happen during ivopts. */
2403 size = 1;
2404 else
2405 size = GET_MODE_SIZE (mode);
2407 if (flag_pic
2408 && GET_CODE (XEXP (x, 1)) == UNSPEC
2409 && XINT (XEXP (x, 1), 1) == UNSPEC_LOAD_SDATA
2410 && XEXP (x, 0) == pic_offset_table_rtx
2411 && sdata_symbolic_operand (XVECEXP (XEXP (x, 1), 0, 0), SImode))
2412 return !no_large_offset && size <= 4;
2413 if (flag_pic == 1
2414 && mode == Pmode
2415 && GET_CODE (XEXP (x, 1)) == UNSPEC
2416 && XINT (XEXP (x, 1), 1) == UNSPEC_LOAD_GOT
2417 && XEXP (x, 0) == pic_offset_table_rtx
2418 && (GET_CODE (XVECEXP (XEXP (x, 1), 0, 0)) == SYMBOL_REF
2419 || GET_CODE (XVECEXP (XEXP (x, 1), 0, 0)) == LABEL_REF))
2420 return !no_large_offset;
2421 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2422 return false;
2424 off = INTVAL (XEXP (x, 1));
2426 /* If the machine does not have doubleword load/stores, we'll use
2427 word size accesses. */
2428 size1 = size;
2429 if (size == 2 * UNITS_PER_WORD && !TARGET_STDW)
2430 size = UNITS_PER_WORD;
2432 if (((HOST_WIDE_INT)size1 - 1) & off)
2433 return false;
2434 off /= size;
2435 if (off > -32 && off < (size1 == size ? 32 : 28))
2436 return true;
2437 if (no_large_offset || code != PLUS || XEXP (x, 0) != stack_pointer_rtx
2438 || size1 > UNITS_PER_WORD)
2439 return false;
2440 return off >= 0 && off < 32768;
2442 case CONST:
2443 case SYMBOL_REF:
2444 case LABEL_REF:
2445 return (!no_large_offset
2446 /* With -fpic, we must wrap it in an unspec to show the B14
2447 dependency. */
2448 && !flag_pic
2449 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2450 && sdata_symbolic_operand (x, Pmode));
2452 default:
2453 return false;
2457 static bool
2458 c6x_legitimate_address_p (machine_mode mode, rtx x, bool strict)
2460 return c6x_legitimate_address_p_1 (mode, x, strict, false);
2463 static bool
2464 c6x_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED,
2465 rtx x ATTRIBUTE_UNUSED)
2467 return true;
2470 /* Implements TARGET_PREFERRED_RENAME_CLASS. */
2471 static reg_class_t
2472 c6x_preferred_rename_class (reg_class_t cl)
2474 if (cl == A_REGS)
2475 return NONPREDICATE_A_REGS;
2476 if (cl == B_REGS)
2477 return NONPREDICATE_B_REGS;
2478 if (cl == ALL_REGS || cl == GENERAL_REGS)
2479 return NONPREDICATE_REGS;
2480 return NO_REGS;
2483 /* Implements FINAL_PRESCAN_INSN. */
2484 void
2485 c6x_final_prescan_insn (rtx_insn *insn, rtx *opvec ATTRIBUTE_UNUSED,
2486 int noperands ATTRIBUTE_UNUSED)
2488 c6x_current_insn = insn;
2491 /* A structure to describe the stack layout of a function. The layout is
2492 as follows:
2494 [saved frame pointer (or possibly padding0)]
2495 --> incoming stack pointer, new hard frame pointer
2496 [saved call-used regs]
2497 [optional padding1]
2498 --> soft frame pointer
2499 [frame]
2500 [outgoing arguments]
2501 [optional padding2]
2503 The structure members are laid out in this order. */
2505 struct c6x_frame
2507 int padding0;
2508 /* Number of registers to save. */
2509 int nregs;
2510 int padding1;
2511 HOST_WIDE_INT frame;
2512 int outgoing_arguments_size;
2513 int padding2;
2515 HOST_WIDE_INT to_allocate;
2516 /* The offsets relative to the incoming stack pointer (which
2517 becomes HARD_FRAME_POINTER). */
2518 HOST_WIDE_INT frame_pointer_offset;
2519 HOST_WIDE_INT b3_offset;
2521 /* True if we should call push_rts/pop_rts to save and restore
2522 registers. */
2523 bool push_rts;
2526 /* Return true if we need to save and modify the PIC register in the
2527 prologue. */
2529 static bool
2530 must_reload_pic_reg_p (void)
2532 struct cgraph_local_info *i = NULL;
2534 if (!TARGET_DSBT)
2535 return false;
2537 i = cgraph_node::local_info (current_function_decl);
2539 if ((crtl->uses_pic_offset_table || !crtl->is_leaf) && !i->local)
2540 return true;
2541 return false;
2544 /* Return 1 if we need to save REGNO. */
2545 static int
2546 c6x_save_reg (unsigned int regno)
2548 return ((df_regs_ever_live_p (regno)
2549 && !call_used_regs[regno]
2550 && !fixed_regs[regno])
2551 || (regno == RETURN_ADDR_REGNO
2552 && (df_regs_ever_live_p (regno)
2553 || !crtl->is_leaf))
2554 || (regno == PIC_OFFSET_TABLE_REGNUM && must_reload_pic_reg_p ()));
2557 /* Examine the number of regs NREGS we've determined we must save.
2558 Return true if we should use __c6xabi_push_rts/__c6xabi_pop_rts for
2559 prologue and epilogue. */
2561 static bool
2562 use_push_rts_p (int nregs)
2564 if (TARGET_INSNS_64PLUS && optimize_function_for_size_p (cfun)
2565 && !cfun->machine->contains_sibcall
2566 && !cfun->returns_struct
2567 && !TARGET_LONG_CALLS
2568 && nregs >= 6 && !frame_pointer_needed)
2569 return true;
2570 return false;
2573 /* Return number of saved general prupose registers. */
2576 c6x_nsaved_regs (void)
2578 int nregs = 0;
2579 int regno;
2581 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2582 if (c6x_save_reg (regno))
2583 nregs++;
2584 return nregs;
2587 /* The safe debug order mandated by the ABI. */
2588 static unsigned reg_save_order[] =
2590 REG_A10, REG_A11, REG_A12, REG_A13,
2591 REG_A14, REG_B3,
2592 REG_B10, REG_B11, REG_B12, REG_B13,
2593 REG_B14, REG_A15
2596 #define N_SAVE_ORDER (sizeof reg_save_order / sizeof *reg_save_order)
2598 /* Compute the layout of the stack frame and store it in FRAME. */
2600 static void
2601 c6x_compute_frame_layout (struct c6x_frame *frame)
2603 HOST_WIDE_INT size = get_frame_size ();
2604 HOST_WIDE_INT offset;
2605 int nregs;
2607 /* We use the four bytes which are technically inside the caller's frame,
2608 usually to save the frame pointer. */
2609 offset = -4;
2610 frame->padding0 = 0;
2611 nregs = c6x_nsaved_regs ();
2612 frame->push_rts = false;
2613 frame->b3_offset = 0;
2614 if (use_push_rts_p (nregs))
2616 frame->push_rts = true;
2617 frame->b3_offset = (TARGET_BIG_ENDIAN ? -12 : -13) * 4;
2618 nregs = 14;
2620 else if (c6x_save_reg (REG_B3))
2622 int idx;
2623 for (idx = N_SAVE_ORDER - 1; reg_save_order[idx] != REG_B3; idx--)
2625 if (c6x_save_reg (reg_save_order[idx]))
2626 frame->b3_offset -= 4;
2629 frame->nregs = nregs;
2631 if (size == 0 && nregs == 0)
2633 frame->padding0 = 4;
2634 frame->padding1 = frame->padding2 = 0;
2635 frame->frame_pointer_offset = frame->to_allocate = 0;
2636 frame->outgoing_arguments_size = 0;
2637 return;
2640 if (!frame->push_rts)
2641 offset += frame->nregs * 4;
2643 if (offset == 0 && size == 0 && crtl->outgoing_args_size == 0
2644 && !crtl->is_leaf)
2645 /* Don't use the bottom of the caller's frame if we have no
2646 allocation of our own and call other functions. */
2647 frame->padding0 = frame->padding1 = 4;
2648 else if (offset & 4)
2649 frame->padding1 = 4;
2650 else
2651 frame->padding1 = 0;
2653 offset += frame->padding0 + frame->padding1;
2654 frame->frame_pointer_offset = offset;
2655 offset += size;
2657 frame->outgoing_arguments_size = crtl->outgoing_args_size;
2658 offset += frame->outgoing_arguments_size;
2660 if ((offset & 4) == 0)
2661 frame->padding2 = 8;
2662 else
2663 frame->padding2 = 4;
2664 frame->to_allocate = offset + frame->padding2;
2667 /* Return the offset between two registers, one to be eliminated, and the other
2668 its replacement, at the start of a routine. */
2670 HOST_WIDE_INT
2671 c6x_initial_elimination_offset (int from, int to)
2673 struct c6x_frame frame;
2674 c6x_compute_frame_layout (&frame);
2676 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
2677 return 0;
2678 else if (from == FRAME_POINTER_REGNUM
2679 && to == HARD_FRAME_POINTER_REGNUM)
2680 return -frame.frame_pointer_offset;
2681 else
2683 gcc_assert (to == STACK_POINTER_REGNUM);
2685 if (from == ARG_POINTER_REGNUM)
2686 return frame.to_allocate + (frame.push_rts ? 56 : 0);
2688 gcc_assert (from == FRAME_POINTER_REGNUM);
2689 return frame.to_allocate - frame.frame_pointer_offset;
2693 /* Given FROM and TO register numbers, say whether this elimination is
2694 allowed. Frame pointer elimination is automatically handled. */
2696 static bool
2697 c6x_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2699 if (to == STACK_POINTER_REGNUM)
2700 return !frame_pointer_needed;
2701 return true;
2704 /* Emit insns to increment the stack pointer by OFFSET. If
2705 FRAME_RELATED_P, set the RTX_FRAME_RELATED_P flag on the insns.
2706 Does nothing if the offset is zero. */
2708 static void
2709 emit_add_sp_const (HOST_WIDE_INT offset, bool frame_related_p)
2711 rtx to_add = GEN_INT (offset);
2712 rtx orig_to_add = to_add;
2713 rtx_insn *insn;
2715 if (offset == 0)
2716 return;
2718 if (offset < -32768 || offset > 32767)
2720 rtx reg = gen_rtx_REG (SImode, REG_A0);
2721 rtx low = GEN_INT (trunc_int_for_mode (offset, HImode));
2723 insn = emit_insn (gen_movsi_high (reg, low));
2724 if (frame_related_p)
2725 RTX_FRAME_RELATED_P (insn) = 1;
2726 insn = emit_insn (gen_movsi_lo_sum (reg, reg, to_add));
2727 if (frame_related_p)
2728 RTX_FRAME_RELATED_P (insn) = 1;
2729 to_add = reg;
2731 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
2732 to_add));
2733 if (frame_related_p)
2735 if (REG_P (to_add))
2736 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
2737 gen_rtx_SET (stack_pointer_rtx,
2738 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
2739 orig_to_add)));
2741 RTX_FRAME_RELATED_P (insn) = 1;
2745 /* Prologue and epilogue. */
2746 void
2747 c6x_expand_prologue (void)
2749 struct c6x_frame frame;
2750 rtx_insn *insn;
2751 rtx mem;
2752 int nsaved = 0;
2753 HOST_WIDE_INT initial_offset, off, added_already;
2755 c6x_compute_frame_layout (&frame);
2757 if (flag_stack_usage_info)
2758 current_function_static_stack_size = frame.to_allocate;
2760 initial_offset = -frame.to_allocate;
2761 if (frame.push_rts)
2763 emit_insn (gen_push_rts ());
2764 nsaved = frame.nregs;
2767 /* If the offsets would be too large for the memory references we will
2768 create to save registers, do the stack allocation in two parts.
2769 Ensure by subtracting 8 that we don't store to the word pointed to
2770 by the stack pointer. */
2771 if (initial_offset < -32768)
2772 initial_offset = -frame.frame_pointer_offset - 8;
2774 if (frame.to_allocate > 0)
2775 gcc_assert (initial_offset != 0);
2777 off = -initial_offset + 4 - frame.padding0;
2779 mem = gen_frame_mem (Pmode, stack_pointer_rtx);
2781 added_already = 0;
2782 if (frame_pointer_needed)
2784 rtx fp_reg = gen_rtx_REG (SImode, REG_A15);
2785 /* We go through some contortions here to both follow the ABI's
2786 recommendation that FP == incoming SP, and to avoid writing or
2787 reading the word pointed to by the stack pointer. */
2788 rtx addr = gen_rtx_POST_MODIFY (Pmode, stack_pointer_rtx,
2789 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
2790 GEN_INT (-8)));
2791 insn = emit_move_insn (gen_frame_mem (Pmode, addr), fp_reg);
2792 RTX_FRAME_RELATED_P (insn) = 1;
2793 nsaved++;
2794 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, stack_pointer_rtx,
2795 GEN_INT (8)));
2796 RTX_FRAME_RELATED_P (insn) = 1;
2797 off -= 4;
2798 added_already = -8;
2801 emit_add_sp_const (initial_offset - added_already, true);
2803 if (nsaved < frame.nregs)
2805 unsigned i;
2807 for (i = 0; i < N_SAVE_ORDER; i++)
2809 int idx = N_SAVE_ORDER - i - 1;
2810 unsigned regno = reg_save_order[idx];
2811 rtx reg;
2812 machine_mode save_mode = SImode;
2814 if (regno == REG_A15 && frame_pointer_needed)
2815 /* Already saved. */
2816 continue;
2817 if (!c6x_save_reg (regno))
2818 continue;
2820 if (TARGET_STDW && (off & 4) == 0 && off <= 256
2821 && (regno & 1) == 1
2822 && i + 1 < N_SAVE_ORDER
2823 && reg_save_order[idx - 1] == regno - 1
2824 && c6x_save_reg (regno - 1))
2826 save_mode = DImode;
2827 regno--;
2828 i++;
2830 reg = gen_rtx_REG (save_mode, regno);
2831 off -= GET_MODE_SIZE (save_mode);
2833 insn = emit_move_insn (adjust_address (mem, save_mode, off),
2834 reg);
2835 RTX_FRAME_RELATED_P (insn) = 1;
2837 nsaved += HARD_REGNO_NREGS (regno, save_mode);
2840 gcc_assert (nsaved == frame.nregs);
2841 emit_add_sp_const (-frame.to_allocate - initial_offset, true);
2842 if (must_reload_pic_reg_p ())
2844 if (dsbt_decl == NULL)
2846 tree t;
2848 t = build_index_type (integer_one_node);
2849 t = build_array_type (integer_type_node, t);
2850 t = build_decl (BUILTINS_LOCATION, VAR_DECL,
2851 get_identifier ("__c6xabi_DSBT_BASE"), t);
2852 DECL_ARTIFICIAL (t) = 1;
2853 DECL_IGNORED_P (t) = 1;
2854 DECL_EXTERNAL (t) = 1;
2855 TREE_STATIC (t) = 1;
2856 TREE_PUBLIC (t) = 1;
2857 TREE_USED (t) = 1;
2859 dsbt_decl = t;
2861 emit_insn (gen_setup_dsbt (pic_offset_table_rtx,
2862 XEXP (DECL_RTL (dsbt_decl), 0)));
2866 void
2867 c6x_expand_epilogue (bool sibcall)
2869 unsigned i;
2870 struct c6x_frame frame;
2871 rtx mem;
2872 HOST_WIDE_INT off;
2873 int nsaved = 0;
2875 c6x_compute_frame_layout (&frame);
2877 mem = gen_frame_mem (Pmode, stack_pointer_rtx);
2879 /* Insert a dummy set/use of the stack pointer. This creates a
2880 scheduler barrier between the prologue saves and epilogue restores. */
2881 emit_insn (gen_epilogue_barrier (stack_pointer_rtx, stack_pointer_rtx));
2883 /* If the offsets would be too large for the memory references we will
2884 create to restore registers, do a preliminary stack adjustment here. */
2885 off = frame.to_allocate - frame.frame_pointer_offset + frame.padding1;
2886 if (frame.push_rts)
2888 nsaved = frame.nregs;
2890 else
2892 if (frame.to_allocate > 32768)
2894 /* Don't add the entire offset so that we leave an unused word
2895 above the stack pointer. */
2896 emit_add_sp_const ((off - 16) & ~7, false);
2897 off &= 7;
2898 off += 16;
2900 for (i = 0; i < N_SAVE_ORDER; i++)
2902 unsigned regno = reg_save_order[i];
2903 rtx reg;
2904 machine_mode save_mode = SImode;
2906 if (!c6x_save_reg (regno))
2907 continue;
2908 if (regno == REG_A15 && frame_pointer_needed)
2909 continue;
2911 if (TARGET_STDW && (off & 4) == 0 && off < 256
2912 && (regno & 1) == 0
2913 && i + 1 < N_SAVE_ORDER
2914 && reg_save_order[i + 1] == regno + 1
2915 && c6x_save_reg (regno + 1))
2917 save_mode = DImode;
2918 i++;
2920 reg = gen_rtx_REG (save_mode, regno);
2922 emit_move_insn (reg, adjust_address (mem, save_mode, off));
2924 off += GET_MODE_SIZE (save_mode);
2925 nsaved += HARD_REGNO_NREGS (regno, save_mode);
2928 if (!frame_pointer_needed)
2929 emit_add_sp_const (off + frame.padding0 - 4, false);
2930 else
2932 rtx fp_reg = gen_rtx_REG (SImode, REG_A15);
2933 rtx addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx,
2934 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
2935 GEN_INT (8)));
2936 emit_insn (gen_addsi3 (stack_pointer_rtx, hard_frame_pointer_rtx,
2937 GEN_INT (-8)));
2938 emit_move_insn (fp_reg, gen_frame_mem (Pmode, addr));
2939 nsaved++;
2941 gcc_assert (nsaved == frame.nregs);
2942 if (!sibcall)
2944 if (frame.push_rts)
2945 emit_jump_insn (gen_pop_rts ());
2946 else
2947 emit_jump_insn (gen_return_internal (gen_rtx_REG (SImode,
2948 RETURN_ADDR_REGNO)));
2952 /* Return the value of the return address for the frame COUNT steps up
2953 from the current frame, after the prologue.
2954 We punt for everything but the current frame by returning const0_rtx. */
2957 c6x_return_addr_rtx (int count)
2959 if (count != 0)
2960 return const0_rtx;
2962 return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNO);
2965 /* Return true iff TYPE is one of the shadow types. */
2966 static bool
2967 shadow_type_p (enum attr_type type)
2969 return (type == TYPE_SHADOW || type == TYPE_LOAD_SHADOW
2970 || type == TYPE_MULT_SHADOW);
2973 /* Return true iff INSN is a shadow pattern. */
2974 static bool
2975 shadow_p (rtx_insn *insn)
2977 if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
2978 return false;
2979 return shadow_type_p (get_attr_type (insn));
2982 /* Return true iff INSN is a shadow or blockage pattern. */
2983 static bool
2984 shadow_or_blockage_p (rtx_insn *insn)
2986 enum attr_type type;
2987 if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
2988 return false;
2989 type = get_attr_type (insn);
2990 return shadow_type_p (type) || type == TYPE_BLOCKAGE;
2993 /* Translate UNITS into a bitmask of units we can reserve for this
2994 insn. */
2995 static int
2996 get_reservation_flags (enum attr_units units)
2998 switch (units)
3000 case UNITS_D:
3001 case UNITS_D_ADDR:
3002 return RESERVATION_FLAG_D;
3003 case UNITS_L:
3004 return RESERVATION_FLAG_L;
3005 case UNITS_S:
3006 return RESERVATION_FLAG_S;
3007 case UNITS_M:
3008 return RESERVATION_FLAG_M;
3009 case UNITS_LS:
3010 return RESERVATION_FLAG_LS;
3011 case UNITS_DL:
3012 return RESERVATION_FLAG_DL;
3013 case UNITS_DS:
3014 return RESERVATION_FLAG_DS;
3015 case UNITS_DLS:
3016 return RESERVATION_FLAG_DLS;
3017 default:
3018 return 0;
3022 /* Compute the side of the machine used by INSN, which reserves UNITS.
3023 This must match the reservations in the scheduling description. */
3024 static int
3025 get_insn_side (rtx_insn *insn, enum attr_units units)
3027 if (units == UNITS_D_ADDR)
3028 return (get_attr_addr_regfile (insn) == ADDR_REGFILE_A ? 0 : 1);
3029 else
3031 enum attr_dest_regfile rf = get_attr_dest_regfile (insn);
3032 if (rf == DEST_REGFILE_ANY)
3033 return get_attr_type (insn) == TYPE_BRANCH ? 0 : 1;
3034 else
3035 return rf == DEST_REGFILE_A ? 0 : 1;
3039 /* After scheduling, walk the insns between HEAD and END and assign unit
3040 reservations. */
3041 static void
3042 assign_reservations (rtx_insn *head, rtx_insn *end)
3044 rtx_insn *insn;
3045 for (insn = head; insn != NEXT_INSN (end); insn = NEXT_INSN (insn))
3047 unsigned int sched_mask, reserved;
3048 rtx_insn *within, *last;
3049 int pass;
3050 int rsrv[2];
3051 int rsrv_count[2][4];
3052 int i;
3054 if (GET_MODE (insn) != TImode)
3055 continue;
3057 reserved = 0;
3058 last = NULL;
3059 /* Find the last insn in the packet. It has a state recorded for it,
3060 which we can use to determine the units we should be using. */
3061 for (within = insn;
3062 (within != NEXT_INSN (end)
3063 && (within == insn || GET_MODE (within) != TImode));
3064 within = NEXT_INSN (within))
3066 int icode;
3067 if (!NONDEBUG_INSN_P (within))
3068 continue;
3069 icode = recog_memoized (within);
3070 if (icode < 0)
3071 continue;
3072 if (shadow_p (within))
3073 continue;
3074 if (INSN_INFO_ENTRY (INSN_UID (within)).reservation != 0)
3075 reserved |= 1 << INSN_INFO_ENTRY (INSN_UID (within)).reservation;
3076 last = within;
3078 if (last == NULL_RTX)
3079 continue;
3081 sched_mask = INSN_INFO_ENTRY (INSN_UID (last)).unit_mask;
3082 sched_mask &= ~reserved;
3084 memset (rsrv_count, 0, sizeof rsrv_count);
3085 rsrv[0] = rsrv[1] = ~0;
3086 for (i = 0; i < 8; i++)
3088 int side = i / 4;
3089 int unit = i & 3;
3090 unsigned unit_bit = 1 << (unit + side * UNIT_QID_SIDE_OFFSET);
3091 /* Clear the bits which we expect to reserve in the following loop,
3092 leaving the ones set which aren't present in the scheduler's
3093 state and shouldn't be reserved. */
3094 if (sched_mask & unit_bit)
3095 rsrv[i / 4] &= ~(1 << unit);
3098 /* Walk through the insns that occur in the same cycle. We use multiple
3099 passes to assign units, assigning for insns with the most specific
3100 requirements first. */
3101 for (pass = 0; pass < 4; pass++)
3102 for (within = insn;
3103 (within != NEXT_INSN (end)
3104 && (within == insn || GET_MODE (within) != TImode));
3105 within = NEXT_INSN (within))
3107 int uid = INSN_UID (within);
3108 int this_rsrv, side;
3109 int icode;
3110 enum attr_units units;
3111 enum attr_type type;
3112 int j;
3114 if (!NONDEBUG_INSN_P (within))
3115 continue;
3116 icode = recog_memoized (within);
3117 if (icode < 0)
3118 continue;
3119 if (INSN_INFO_ENTRY (uid).reservation != 0)
3120 continue;
3121 units = get_attr_units (within);
3122 type = get_attr_type (within);
3123 this_rsrv = get_reservation_flags (units);
3124 if (this_rsrv == 0)
3125 continue;
3126 side = get_insn_side (within, units);
3128 /* Certain floating point instructions are treated specially. If
3129 an insn can choose between units it can reserve, and its
3130 reservation spans more than one cycle, the reservation contains
3131 special markers in the first cycle to help us reconstruct what
3132 the automaton chose. */
3133 if ((type == TYPE_ADDDP || type == TYPE_FP4)
3134 && units == UNITS_LS)
3136 int test1_code = ((type == TYPE_FP4 ? UNIT_QID_FPL1 : UNIT_QID_ADDDPL1)
3137 + side * UNIT_QID_SIDE_OFFSET);
3138 int test2_code = ((type == TYPE_FP4 ? UNIT_QID_FPS1 : UNIT_QID_ADDDPS1)
3139 + side * UNIT_QID_SIDE_OFFSET);
3140 if ((sched_mask & (1 << test1_code)) != 0)
3142 this_rsrv = RESERVATION_FLAG_L;
3143 sched_mask &= ~(1 << test1_code);
3145 else if ((sched_mask & (1 << test2_code)) != 0)
3147 this_rsrv = RESERVATION_FLAG_S;
3148 sched_mask &= ~(1 << test2_code);
3152 if ((this_rsrv & (this_rsrv - 1)) == 0)
3154 int t = exact_log2 (this_rsrv) + side * UNIT_QID_SIDE_OFFSET;
3155 rsrv[side] |= this_rsrv;
3156 INSN_INFO_ENTRY (uid).reservation = t;
3157 continue;
3160 if (pass == 1)
3162 for (j = 0; j < 4; j++)
3163 if (this_rsrv & (1 << j))
3164 rsrv_count[side][j]++;
3165 continue;
3167 if ((pass == 2 && this_rsrv != RESERVATION_FLAG_DLS)
3168 || (pass == 3 && this_rsrv == RESERVATION_FLAG_DLS))
3170 int best = -1, best_cost = INT_MAX;
3171 for (j = 0; j < 4; j++)
3172 if ((this_rsrv & (1 << j))
3173 && !(rsrv[side] & (1 << j))
3174 && rsrv_count[side][j] < best_cost)
3176 best_cost = rsrv_count[side][j];
3177 best = j;
3179 gcc_assert (best != -1);
3180 rsrv[side] |= 1 << best;
3181 for (j = 0; j < 4; j++)
3182 if ((this_rsrv & (1 << j)) && j != best)
3183 rsrv_count[side][j]--;
3185 INSN_INFO_ENTRY (uid).reservation
3186 = best + side * UNIT_QID_SIDE_OFFSET;
3192 /* Return a factor by which to weight unit imbalances for a reservation
3193 R. */
3194 static int
3195 unit_req_factor (enum unitreqs r)
3197 switch (r)
3199 case UNIT_REQ_D:
3200 case UNIT_REQ_L:
3201 case UNIT_REQ_S:
3202 case UNIT_REQ_M:
3203 case UNIT_REQ_X:
3204 case UNIT_REQ_T:
3205 return 1;
3206 case UNIT_REQ_DL:
3207 case UNIT_REQ_LS:
3208 case UNIT_REQ_DS:
3209 return 2;
3210 case UNIT_REQ_DLS:
3211 return 3;
3212 default:
3213 gcc_unreachable ();
3217 /* Examine INSN, and store in REQ1/SIDE1 and REQ2/SIDE2 the unit
3218 requirements. Returns zero if INSN can't be handled, otherwise
3219 either one or two to show how many of the two pairs are in use.
3220 REQ1 is always used, it holds what is normally thought of as the
3221 instructions reservation, e.g. UNIT_REQ_DL. REQ2 is used to either
3222 describe a cross path, or for loads/stores, the T unit. */
3223 static int
3224 get_unit_reqs (rtx_insn *insn, int *req1, int *side1, int *req2, int *side2)
3226 enum attr_units units;
3227 enum attr_cross cross;
3228 int side, req;
3230 if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
3231 return 0;
3232 units = get_attr_units (insn);
3233 if (units == UNITS_UNKNOWN)
3234 return 0;
3235 side = get_insn_side (insn, units);
3236 cross = get_attr_cross (insn);
3238 req = (units == UNITS_D ? UNIT_REQ_D
3239 : units == UNITS_D_ADDR ? UNIT_REQ_D
3240 : units == UNITS_DL ? UNIT_REQ_DL
3241 : units == UNITS_DS ? UNIT_REQ_DS
3242 : units == UNITS_L ? UNIT_REQ_L
3243 : units == UNITS_LS ? UNIT_REQ_LS
3244 : units == UNITS_S ? UNIT_REQ_S
3245 : units == UNITS_M ? UNIT_REQ_M
3246 : units == UNITS_DLS ? UNIT_REQ_DLS
3247 : -1);
3248 gcc_assert (req != -1);
3249 *req1 = req;
3250 *side1 = side;
3251 if (units == UNITS_D_ADDR)
3253 *req2 = UNIT_REQ_T;
3254 *side2 = side ^ (cross == CROSS_Y ? 1 : 0);
3255 return 2;
3257 else if (cross == CROSS_Y)
3259 *req2 = UNIT_REQ_X;
3260 *side2 = side;
3261 return 2;
3263 return 1;
3266 /* Walk the insns between and including HEAD and TAIL, and mark the
3267 resource requirements in the unit_reqs table. */
3268 static void
3269 count_unit_reqs (unit_req_table reqs, rtx_insn *head, rtx_insn *tail)
3271 rtx_insn *insn;
3273 memset (reqs, 0, sizeof (unit_req_table));
3275 for (insn = head; insn != NEXT_INSN (tail); insn = NEXT_INSN (insn))
3277 int side1, side2, req1, req2;
3279 switch (get_unit_reqs (insn, &req1, &side1, &req2, &side2))
3281 case 2:
3282 reqs[side2][req2]++;
3283 /* fall through */
3284 case 1:
3285 reqs[side1][req1]++;
3286 break;
3291 /* Update the table REQS by merging more specific unit reservations into
3292 more general ones, i.e. counting (for example) UNIT_REQ_D also in
3293 UNIT_REQ_DL, DS, and DLS. */
3294 static void
3295 merge_unit_reqs (unit_req_table reqs)
3297 int side;
3298 for (side = 0; side < 2; side++)
3300 int d = reqs[side][UNIT_REQ_D];
3301 int l = reqs[side][UNIT_REQ_L];
3302 int s = reqs[side][UNIT_REQ_S];
3303 int dl = reqs[side][UNIT_REQ_DL];
3304 int ls = reqs[side][UNIT_REQ_LS];
3305 int ds = reqs[side][UNIT_REQ_DS];
3307 reqs[side][UNIT_REQ_DL] += d;
3308 reqs[side][UNIT_REQ_DL] += l;
3309 reqs[side][UNIT_REQ_DS] += d;
3310 reqs[side][UNIT_REQ_DS] += s;
3311 reqs[side][UNIT_REQ_LS] += l;
3312 reqs[side][UNIT_REQ_LS] += s;
3313 reqs[side][UNIT_REQ_DLS] += ds + dl + ls + d + l + s;
3317 /* Examine the table REQS and return a measure of unit imbalance by comparing
3318 the two sides of the machine. If, for example, D1 is used twice and D2
3319 used not at all, the return value should be 1 in the absence of other
3320 imbalances. */
3321 static int
3322 unit_req_imbalance (unit_req_table reqs)
3324 int val = 0;
3325 int i;
3327 for (i = 0; i < UNIT_REQ_MAX; i++)
3329 int factor = unit_req_factor ((enum unitreqs) i);
3330 int diff = abs (reqs[0][i] - reqs[1][i]);
3331 val += (diff + factor - 1) / factor / 2;
3333 return val;
3336 /* Return the resource-constrained minimum iteration interval given the
3337 data in the REQS table. This must have been processed with
3338 merge_unit_reqs already. */
3339 static int
3340 res_mii (unit_req_table reqs)
3342 int side, req;
3343 int worst = 1;
3344 for (side = 0; side < 2; side++)
3345 for (req = 0; req < UNIT_REQ_MAX; req++)
3347 int factor = unit_req_factor ((enum unitreqs) req);
3348 worst = MAX ((reqs[side][UNIT_REQ_D] + factor - 1) / factor, worst);
3351 return worst;
3354 /* Examine INSN, and store in PMASK1 and PMASK2 bitmasks that represent
3355 the operands that are involved in the (up to) two reservations, as
3356 found by get_unit_reqs. Return true if we did this successfully, false
3357 if we couldn't identify what to do with INSN. */
3358 static bool
3359 get_unit_operand_masks (rtx_insn *insn, unsigned int *pmask1,
3360 unsigned int *pmask2)
3362 enum attr_op_pattern op_pat;
3364 if (recog_memoized (insn) < 0)
3365 return 0;
3366 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
3367 return false;
3368 extract_insn (insn);
3369 op_pat = get_attr_op_pattern (insn);
3370 if (op_pat == OP_PATTERN_DT)
3372 gcc_assert (recog_data.n_operands == 2);
3373 *pmask1 = 1 << 0;
3374 *pmask2 = 1 << 1;
3375 return true;
3377 else if (op_pat == OP_PATTERN_TD)
3379 gcc_assert (recog_data.n_operands == 2);
3380 *pmask1 = 1 << 1;
3381 *pmask2 = 1 << 0;
3382 return true;
3384 else if (op_pat == OP_PATTERN_SXS)
3386 gcc_assert (recog_data.n_operands == 3);
3387 *pmask1 = (1 << 0) | (1 << 2);
3388 *pmask2 = 1 << 1;
3389 return true;
3391 else if (op_pat == OP_PATTERN_SX)
3393 gcc_assert (recog_data.n_operands == 2);
3394 *pmask1 = 1 << 0;
3395 *pmask2 = 1 << 1;
3396 return true;
3398 else if (op_pat == OP_PATTERN_SSX)
3400 gcc_assert (recog_data.n_operands == 3);
3401 *pmask1 = (1 << 0) | (1 << 1);
3402 *pmask2 = 1 << 2;
3403 return true;
3405 return false;
3408 /* Try to replace a register in INSN, which has corresponding rename info
3409 from regrename_analyze in INFO. OP_MASK and ORIG_SIDE provide information
3410 about the operands that must be renamed and the side they are on.
3411 REQS is the table of unit reservations in the loop between HEAD and TAIL.
3412 We recompute this information locally after our transformation, and keep
3413 it only if we managed to improve the balance. */
3414 static void
3415 try_rename_operands (rtx_insn *head, rtx_insn *tail, unit_req_table reqs,
3416 rtx insn,
3417 insn_rr_info *info, unsigned int op_mask, int orig_side)
3419 enum reg_class super_class = orig_side == 0 ? B_REGS : A_REGS;
3420 HARD_REG_SET unavailable;
3421 du_head_p this_head;
3422 struct du_chain *chain;
3423 int i;
3424 unsigned tmp_mask;
3425 int best_reg, old_reg;
3426 vec<du_head_p> involved_chains = vNULL;
3427 unit_req_table new_reqs;
3428 bool ok;
3430 for (i = 0, tmp_mask = op_mask; tmp_mask; i++)
3432 du_head_p op_chain;
3433 if ((tmp_mask & (1 << i)) == 0)
3434 continue;
3435 if (info->op_info[i].n_chains != 1)
3436 goto out_fail;
3437 op_chain = regrename_chain_from_id (info->op_info[i].heads[0]->id);
3438 involved_chains.safe_push (op_chain);
3439 tmp_mask &= ~(1 << i);
3442 if (involved_chains.length () > 1)
3443 goto out_fail;
3445 this_head = involved_chains[0];
3446 if (this_head->cannot_rename)
3447 goto out_fail;
3449 for (chain = this_head->first; chain; chain = chain->next_use)
3451 unsigned int mask1, mask2, mask_changed;
3452 int count, side1, side2, req1, req2;
3453 insn_rr_info *this_rr = &insn_rr[INSN_UID (chain->insn)];
3455 count = get_unit_reqs (chain->insn, &req1, &side1, &req2, &side2);
3457 if (count == 0)
3458 goto out_fail;
3460 if (!get_unit_operand_masks (chain->insn, &mask1, &mask2))
3461 goto out_fail;
3463 extract_insn (chain->insn);
3465 mask_changed = 0;
3466 for (i = 0; i < recog_data.n_operands; i++)
3468 int j;
3469 int n_this_op = this_rr->op_info[i].n_chains;
3470 for (j = 0; j < n_this_op; j++)
3472 du_head_p other = this_rr->op_info[i].heads[j];
3473 if (regrename_chain_from_id (other->id) == this_head)
3474 break;
3476 if (j == n_this_op)
3477 continue;
3479 if (n_this_op != 1)
3480 goto out_fail;
3481 mask_changed |= 1 << i;
3483 gcc_assert (mask_changed != 0);
3484 if (mask_changed != mask1 && mask_changed != mask2)
3485 goto out_fail;
3488 /* If we get here, we can do the renaming. */
3489 COMPL_HARD_REG_SET (unavailable, reg_class_contents[(int) super_class]);
3491 old_reg = this_head->regno;
3492 best_reg =
3493 find_rename_reg (this_head, super_class, &unavailable, old_reg, true);
3495 ok = regrename_do_replace (this_head, best_reg);
3496 gcc_assert (ok);
3498 count_unit_reqs (new_reqs, head, PREV_INSN (tail));
3499 merge_unit_reqs (new_reqs);
3500 if (dump_file)
3502 fprintf (dump_file, "reshuffle for insn %d, op_mask %x, "
3503 "original side %d, new reg %d\n",
3504 INSN_UID (insn), op_mask, orig_side, best_reg);
3505 fprintf (dump_file, " imbalance %d -> %d\n",
3506 unit_req_imbalance (reqs), unit_req_imbalance (new_reqs));
3508 if (unit_req_imbalance (new_reqs) > unit_req_imbalance (reqs))
3510 ok = regrename_do_replace (this_head, old_reg);
3511 gcc_assert (ok);
3513 else
3514 memcpy (reqs, new_reqs, sizeof (unit_req_table));
3516 out_fail:
3517 involved_chains.release ();
3520 /* Find insns in LOOP which would, if shifted to the other side
3521 of the machine, reduce an imbalance in the unit reservations. */
3522 static void
3523 reshuffle_units (basic_block loop)
3525 rtx_insn *head = BB_HEAD (loop);
3526 rtx_insn *tail = BB_END (loop);
3527 rtx_insn *insn;
3528 unit_req_table reqs;
3529 edge e;
3530 edge_iterator ei;
3531 bitmap_head bbs;
3533 count_unit_reqs (reqs, head, PREV_INSN (tail));
3534 merge_unit_reqs (reqs);
3536 regrename_init (true);
3538 bitmap_initialize (&bbs, &bitmap_default_obstack);
3540 FOR_EACH_EDGE (e, ei, loop->preds)
3541 bitmap_set_bit (&bbs, e->src->index);
3543 bitmap_set_bit (&bbs, loop->index);
3544 regrename_analyze (&bbs);
3546 for (insn = head; insn != NEXT_INSN (tail); insn = NEXT_INSN (insn))
3548 enum attr_units units;
3549 int count, side1, side2, req1, req2;
3550 unsigned int mask1, mask2;
3551 insn_rr_info *info;
3553 if (!NONDEBUG_INSN_P (insn))
3554 continue;
3556 count = get_unit_reqs (insn, &req1, &side1, &req2, &side2);
3558 if (count == 0)
3559 continue;
3561 if (!get_unit_operand_masks (insn, &mask1, &mask2))
3562 continue;
3564 info = &insn_rr[INSN_UID (insn)];
3565 if (info->op_info == NULL)
3566 continue;
3568 if (reqs[side1][req1] > 1
3569 && reqs[side1][req1] > 2 * reqs[side1 ^ 1][req1])
3571 try_rename_operands (head, tail, reqs, insn, info, mask1, side1);
3574 units = get_attr_units (insn);
3575 if (units == UNITS_D_ADDR)
3577 gcc_assert (count == 2);
3578 if (reqs[side2][req2] > 1
3579 && reqs[side2][req2] > 2 * reqs[side2 ^ 1][req2])
3581 try_rename_operands (head, tail, reqs, insn, info, mask2, side2);
3585 regrename_finish ();
3588 /* Backend scheduling state. */
3589 typedef struct c6x_sched_context
3591 /* The current scheduler clock, saved in the sched_reorder hook. */
3592 int curr_sched_clock;
3594 /* Number of insns issued so far in this cycle. */
3595 int issued_this_cycle;
3597 /* We record the time at which each jump occurs in JUMP_CYCLES. The
3598 theoretical maximum for number of jumps in flight is 12: 2 every
3599 cycle, with a latency of 6 cycles each. This is a circular
3600 buffer; JUMP_CYCLE_INDEX is the pointer to the start. Earlier
3601 jumps have a higher index. This array should be accessed through
3602 the jump_cycle function. */
3603 int jump_cycles[12];
3604 int jump_cycle_index;
3606 /* In parallel with jump_cycles, this array records the opposite of
3607 the condition used in each pending jump. This is used to
3608 predicate insns that are scheduled in the jump's delay slots. If
3609 this is NULL_RTX no such predication happens. */
3610 rtx jump_cond[12];
3612 /* Similar to the jump_cycles mechanism, but here we take into
3613 account all insns with delay slots, to avoid scheduling asms into
3614 the delay slots. */
3615 int delays_finished_at;
3617 /* The following variable value is the last issued insn. */
3618 rtx_insn *last_scheduled_insn;
3619 /* The last issued insn that isn't a shadow of another. */
3620 rtx_insn *last_scheduled_iter0;
3622 /* The following variable value is DFA state before issuing the
3623 first insn in the current clock cycle. We do not use this member
3624 of the structure directly; we copy the data in and out of
3625 prev_cycle_state. */
3626 state_t prev_cycle_state_ctx;
3628 int reg_n_accesses[FIRST_PSEUDO_REGISTER];
3629 int reg_n_xaccesses[FIRST_PSEUDO_REGISTER];
3630 int reg_set_in_cycle[FIRST_PSEUDO_REGISTER];
3632 int tmp_reg_n_accesses[FIRST_PSEUDO_REGISTER];
3633 int tmp_reg_n_xaccesses[FIRST_PSEUDO_REGISTER];
3634 } *c6x_sched_context_t;
3636 /* The current scheduling state. */
3637 static struct c6x_sched_context ss;
3639 /* The following variable value is DFA state before issuing the first insn
3640 in the current clock cycle. This is used in c6x_variable_issue for
3641 comparison with the state after issuing the last insn in a cycle. */
3642 static state_t prev_cycle_state;
3644 /* Set when we discover while processing an insn that it would lead to too
3645 many accesses of the same register. */
3646 static bool reg_access_stall;
3648 /* The highest insn uid after delayed insns were split, but before loop bodies
3649 were copied by the modulo scheduling code. */
3650 static int sploop_max_uid_iter0;
3652 /* Look up the jump cycle with index N. For an out-of-bounds N, we return 0,
3653 so the caller does not specifically have to test for it. */
3654 static int
3655 get_jump_cycle (int n)
3657 if (n >= 12)
3658 return 0;
3659 n += ss.jump_cycle_index;
3660 if (n >= 12)
3661 n -= 12;
3662 return ss.jump_cycles[n];
3665 /* Look up the jump condition with index N. */
3666 static rtx
3667 get_jump_cond (int n)
3669 if (n >= 12)
3670 return NULL_RTX;
3671 n += ss.jump_cycle_index;
3672 if (n >= 12)
3673 n -= 12;
3674 return ss.jump_cond[n];
3677 /* Return the index of the first jump that occurs after CLOCK_VAR. If no jump
3678 has delay slots beyond CLOCK_VAR, return -1. */
3679 static int
3680 first_jump_index (int clock_var)
3682 int retval = -1;
3683 int n = 0;
3684 for (;;)
3686 int t = get_jump_cycle (n);
3687 if (t <= clock_var)
3688 break;
3689 retval = n;
3690 n++;
3692 return retval;
3695 /* Add a new entry in our scheduling state for a jump that occurs in CYCLE
3696 and has the opposite condition of COND. */
3697 static void
3698 record_jump (int cycle, rtx cond)
3700 if (ss.jump_cycle_index == 0)
3701 ss.jump_cycle_index = 11;
3702 else
3703 ss.jump_cycle_index--;
3704 ss.jump_cycles[ss.jump_cycle_index] = cycle;
3705 ss.jump_cond[ss.jump_cycle_index] = cond;
3708 /* Set the clock cycle of INSN to CYCLE. Also clears the insn's entry in
3709 new_conditions. */
3710 static void
3711 insn_set_clock (rtx insn, int cycle)
3713 unsigned uid = INSN_UID (insn);
3715 if (uid >= INSN_INFO_LENGTH)
3716 insn_info.safe_grow (uid * 5 / 4 + 10);
3718 INSN_INFO_ENTRY (uid).clock = cycle;
3719 INSN_INFO_ENTRY (uid).new_cond = NULL;
3720 INSN_INFO_ENTRY (uid).reservation = 0;
3721 INSN_INFO_ENTRY (uid).ebb_start = false;
3724 /* Return the clock cycle we set for the insn with uid UID. */
3725 static int
3726 insn_uid_get_clock (int uid)
3728 return INSN_INFO_ENTRY (uid).clock;
3731 /* Return the clock cycle we set for INSN. */
3732 static int
3733 insn_get_clock (rtx insn)
3735 return insn_uid_get_clock (INSN_UID (insn));
3738 /* Examine INSN, and if it is a conditional jump of any kind, return
3739 the opposite of the condition in which it branches. Otherwise,
3740 return NULL_RTX. */
3741 static rtx
3742 condjump_opposite_condition (rtx insn)
3744 rtx pat = PATTERN (insn);
3745 int icode = INSN_CODE (insn);
3746 rtx x = NULL;
3748 if (icode == CODE_FOR_br_true || icode == CODE_FOR_br_false)
3750 x = XEXP (SET_SRC (pat), 0);
3751 if (icode == CODE_FOR_br_false)
3752 return x;
3754 if (GET_CODE (pat) == COND_EXEC)
3756 rtx t = COND_EXEC_CODE (pat);
3757 if ((GET_CODE (t) == PARALLEL
3758 && GET_CODE (XVECEXP (t, 0, 0)) == RETURN)
3759 || (GET_CODE (t) == UNSPEC && XINT (t, 1) == UNSPEC_REAL_JUMP)
3760 || (GET_CODE (t) == SET && SET_DEST (t) == pc_rtx))
3761 x = COND_EXEC_TEST (pat);
3764 if (x != NULL_RTX)
3766 enum rtx_code code = GET_CODE (x);
3767 x = gen_rtx_fmt_ee (code == EQ ? NE : EQ,
3768 GET_MODE (x), XEXP (x, 0),
3769 XEXP (x, 1));
3771 return x;
3774 /* Return true iff COND1 and COND2 are exactly opposite conditions
3775 one of them NE and the other EQ. */
3776 static bool
3777 conditions_opposite_p (rtx cond1, rtx cond2)
3779 return (rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
3780 && rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))
3781 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2)));
3784 /* Return true if we can add a predicate COND to INSN, or if INSN
3785 already has that predicate. If DOIT is true, also perform the
3786 modification. */
3787 static bool
3788 predicate_insn (rtx_insn *insn, rtx cond, bool doit)
3790 int icode;
3791 if (cond == NULL_RTX)
3793 gcc_assert (!doit);
3794 return false;
3797 if (get_attr_predicable (insn) == PREDICABLE_YES
3798 && GET_CODE (PATTERN (insn)) != COND_EXEC)
3800 if (doit)
3802 cond = copy_rtx (cond);
3803 rtx newpat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (insn));
3804 PATTERN (insn) = newpat;
3805 INSN_CODE (insn) = -1;
3807 return true;
3809 if (GET_CODE (PATTERN (insn)) == COND_EXEC
3810 && rtx_equal_p (COND_EXEC_TEST (PATTERN (insn)), cond))
3811 return true;
3812 icode = INSN_CODE (insn);
3813 if (icode == CODE_FOR_real_jump
3814 || icode == CODE_FOR_jump
3815 || icode == CODE_FOR_indirect_jump)
3817 rtx pat = PATTERN (insn);
3818 rtx dest = (icode == CODE_FOR_real_jump ? XVECEXP (pat, 0, 0)
3819 : icode == CODE_FOR_jump ? XEXP (SET_SRC (pat), 0)
3820 : SET_SRC (pat));
3821 if (doit)
3823 rtx newpat;
3824 if (REG_P (dest))
3825 newpat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (insn));
3826 else
3827 newpat = gen_br_true (cond, XEXP (cond, 0), dest);
3828 PATTERN (insn) = newpat;
3829 INSN_CODE (insn) = -1;
3831 return true;
3833 if (INSN_CODE (insn) == CODE_FOR_br_true)
3835 rtx br_cond = XEXP (SET_SRC (PATTERN (insn)), 0);
3836 return rtx_equal_p (br_cond, cond);
3838 if (INSN_CODE (insn) == CODE_FOR_br_false)
3840 rtx br_cond = XEXP (SET_SRC (PATTERN (insn)), 0);
3841 return conditions_opposite_p (br_cond, cond);
3843 return false;
3846 /* Initialize SC. Used by c6x_init_sched_context and c6x_sched_init. */
3847 static void
3848 init_sched_state (c6x_sched_context_t sc)
3850 sc->last_scheduled_insn = NULL;
3851 sc->last_scheduled_iter0 = NULL;
3852 sc->issued_this_cycle = 0;
3853 memset (sc->jump_cycles, 0, sizeof sc->jump_cycles);
3854 memset (sc->jump_cond, 0, sizeof sc->jump_cond);
3855 sc->jump_cycle_index = 0;
3856 sc->delays_finished_at = 0;
3857 sc->curr_sched_clock = 0;
3859 sc->prev_cycle_state_ctx = xmalloc (dfa_state_size);
3861 memset (sc->reg_n_accesses, 0, sizeof sc->reg_n_accesses);
3862 memset (sc->reg_n_xaccesses, 0, sizeof sc->reg_n_xaccesses);
3863 memset (sc->reg_set_in_cycle, 0, sizeof sc->reg_set_in_cycle);
3865 state_reset (sc->prev_cycle_state_ctx);
3868 /* Allocate store for new scheduling context. */
3869 static void *
3870 c6x_alloc_sched_context (void)
3872 return xmalloc (sizeof (struct c6x_sched_context));
3875 /* If CLEAN_P is true then initializes _SC with clean data,
3876 and from the global context otherwise. */
3877 static void
3878 c6x_init_sched_context (void *_sc, bool clean_p)
3880 c6x_sched_context_t sc = (c6x_sched_context_t) _sc;
3882 if (clean_p)
3884 init_sched_state (sc);
3886 else
3888 *sc = ss;
3889 sc->prev_cycle_state_ctx = xmalloc (dfa_state_size);
3890 memcpy (sc->prev_cycle_state_ctx, prev_cycle_state, dfa_state_size);
3894 /* Sets the global scheduling context to the one pointed to by _SC. */
3895 static void
3896 c6x_set_sched_context (void *_sc)
3898 c6x_sched_context_t sc = (c6x_sched_context_t) _sc;
3900 gcc_assert (sc != NULL);
3901 ss = *sc;
3902 memcpy (prev_cycle_state, sc->prev_cycle_state_ctx, dfa_state_size);
3905 /* Clear data in _SC. */
3906 static void
3907 c6x_clear_sched_context (void *_sc)
3909 c6x_sched_context_t sc = (c6x_sched_context_t) _sc;
3910 gcc_assert (_sc != NULL);
3912 free (sc->prev_cycle_state_ctx);
3915 /* Free _SC. */
3916 static void
3917 c6x_free_sched_context (void *_sc)
3919 free (_sc);
3922 /* True if we are currently performing a preliminary scheduling
3923 pass before modulo scheduling; we can't allow the scheduler to
3924 modify instruction patterns using packetization assumptions,
3925 since there will be another scheduling pass later if modulo
3926 scheduling fails. */
3927 static bool in_hwloop;
3929 /* Provide information about speculation capabilities, and set the
3930 DO_BACKTRACKING flag. */
3931 static void
3932 c6x_set_sched_flags (spec_info_t spec_info)
3934 unsigned int *flags = &(current_sched_info->flags);
3936 if (*flags & SCHED_EBB)
3938 *flags |= DO_BACKTRACKING | DO_PREDICATION;
3940 if (in_hwloop)
3941 *flags |= DONT_BREAK_DEPENDENCIES;
3943 spec_info->mask = 0;
3946 /* Implement the TARGET_SCHED_ISSUE_RATE hook. */
3948 static int
3949 c6x_issue_rate (void)
3951 return 8;
3954 /* Used together with the collapse_ndfa option, this ensures that we reach a
3955 deterministic automaton state before trying to advance a cycle.
3956 With collapse_ndfa, genautomata creates advance cycle arcs only for
3957 such deterministic states. */
3959 static rtx
3960 c6x_sched_dfa_pre_cycle_insn (void)
3962 return const0_rtx;
3965 /* We're beginning a new block. Initialize data structures as necessary. */
3967 static void
3968 c6x_sched_init (FILE *dump ATTRIBUTE_UNUSED,
3969 int sched_verbose ATTRIBUTE_UNUSED,
3970 int max_ready ATTRIBUTE_UNUSED)
3972 if (prev_cycle_state == NULL)
3974 prev_cycle_state = xmalloc (dfa_state_size);
3976 init_sched_state (&ss);
3977 state_reset (prev_cycle_state);
3980 /* We are about to being issuing INSN. Return nonzero if we cannot
3981 issue it on given cycle CLOCK and return zero if we should not sort
3982 the ready queue on the next clock start.
3983 For C6X, we use this function just to copy the previous DFA state
3984 for comparison purposes. */
3986 static int
3987 c6x_dfa_new_cycle (FILE *dump ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
3988 rtx_insn *insn ATTRIBUTE_UNUSED,
3989 int last_clock ATTRIBUTE_UNUSED,
3990 int clock ATTRIBUTE_UNUSED, int *sort_p ATTRIBUTE_UNUSED)
3992 if (clock != last_clock)
3993 memcpy (prev_cycle_state, curr_state, dfa_state_size);
3994 return 0;
3997 static void
3998 c6x_mark_regno_read (int regno, bool cross)
4000 int t = ++ss.tmp_reg_n_accesses[regno];
4002 if (t > 4)
4003 reg_access_stall = true;
4005 if (cross)
4007 int set_cycle = ss.reg_set_in_cycle[regno];
4008 /* This must be done in this way rather than by tweaking things in
4009 adjust_cost, since the stall occurs even for insns with opposite
4010 predicates, and the scheduler may not even see a dependency. */
4011 if (set_cycle > 0 && set_cycle == ss.curr_sched_clock)
4012 reg_access_stall = true;
4013 /* This doesn't quite do anything yet as we're only modeling one
4014 x unit. */
4015 ++ss.tmp_reg_n_xaccesses[regno];
4019 /* Note that REG is read in the insn being examined. If CROSS, it
4020 means the access is through a cross path. Update the temporary reg
4021 access arrays, and set REG_ACCESS_STALL if the insn can't be issued
4022 in the current cycle. */
4024 static void
4025 c6x_mark_reg_read (rtx reg, bool cross)
4027 unsigned regno = REGNO (reg);
4028 unsigned nregs = hard_regno_nregs[regno][GET_MODE (reg)];
4030 while (nregs-- > 0)
4031 c6x_mark_regno_read (regno + nregs, cross);
4034 /* Note that register REG is written in cycle CYCLES. */
4036 static void
4037 c6x_mark_reg_written (rtx reg, int cycles)
4039 unsigned regno = REGNO (reg);
4040 unsigned nregs = hard_regno_nregs[regno][GET_MODE (reg)];
4042 while (nregs-- > 0)
4043 ss.reg_set_in_cycle[regno + nregs] = cycles;
4046 /* Update the register state information for an instruction whose
4047 body is X. Return true if the instruction has to be delayed until the
4048 next cycle. */
4050 static bool
4051 c6x_registers_update (rtx_insn *insn)
4053 enum attr_cross cross;
4054 enum attr_dest_regfile destrf;
4055 int i, nops;
4056 rtx x;
4058 if (!reload_completed || recog_memoized (insn) < 0)
4059 return false;
4061 reg_access_stall = false;
4062 memcpy (ss.tmp_reg_n_accesses, ss.reg_n_accesses,
4063 sizeof ss.tmp_reg_n_accesses);
4064 memcpy (ss.tmp_reg_n_xaccesses, ss.reg_n_xaccesses,
4065 sizeof ss.tmp_reg_n_xaccesses);
4067 extract_insn (insn);
4069 cross = get_attr_cross (insn);
4070 destrf = get_attr_dest_regfile (insn);
4072 nops = recog_data.n_operands;
4073 x = PATTERN (insn);
4074 if (GET_CODE (x) == COND_EXEC)
4076 c6x_mark_reg_read (XEXP (XEXP (x, 0), 0), false);
4077 nops -= 2;
4080 for (i = 0; i < nops; i++)
4082 rtx op = recog_data.operand[i];
4083 if (recog_data.operand_type[i] == OP_OUT)
4084 continue;
4085 if (REG_P (op))
4087 bool this_cross = cross;
4088 if (destrf == DEST_REGFILE_A && A_REGNO_P (REGNO (op)))
4089 this_cross = false;
4090 if (destrf == DEST_REGFILE_B && B_REGNO_P (REGNO (op)))
4091 this_cross = false;
4092 c6x_mark_reg_read (op, this_cross);
4094 else if (MEM_P (op))
4096 op = XEXP (op, 0);
4097 switch (GET_CODE (op))
4099 case POST_INC:
4100 case PRE_INC:
4101 case POST_DEC:
4102 case PRE_DEC:
4103 op = XEXP (op, 0);
4104 /* fall through */
4105 case REG:
4106 c6x_mark_reg_read (op, false);
4107 break;
4108 case POST_MODIFY:
4109 case PRE_MODIFY:
4110 op = XEXP (op, 1);
4111 gcc_assert (GET_CODE (op) == PLUS);
4112 /* fall through */
4113 case PLUS:
4114 c6x_mark_reg_read (XEXP (op, 0), false);
4115 if (REG_P (XEXP (op, 1)))
4116 c6x_mark_reg_read (XEXP (op, 1), false);
4117 break;
4118 case SYMBOL_REF:
4119 case LABEL_REF:
4120 case CONST:
4121 c6x_mark_regno_read (REG_B14, false);
4122 break;
4123 default:
4124 gcc_unreachable ();
4127 else if (!CONSTANT_P (op) && strlen (recog_data.constraints[i]) > 0)
4128 gcc_unreachable ();
4130 return reg_access_stall;
4133 /* Helper function for the TARGET_SCHED_REORDER and
4134 TARGET_SCHED_REORDER2 hooks. If scheduling an insn would be unsafe
4135 in the current cycle, move it down in the ready list and return the
4136 number of non-unsafe insns. */
4138 static int
4139 c6x_sched_reorder_1 (rtx_insn **ready, int *pn_ready, int clock_var)
4141 int n_ready = *pn_ready;
4142 rtx_insn **e_ready = ready + n_ready;
4143 rtx_insn **insnp;
4144 int first_jump;
4146 /* Keep track of conflicts due to a limit number of register accesses,
4147 and due to stalls incurred by too early accesses of registers using
4148 cross paths. */
4150 for (insnp = ready; insnp < e_ready; insnp++)
4152 rtx_insn *insn = *insnp;
4153 int icode = recog_memoized (insn);
4154 bool is_asm = (icode < 0
4155 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
4156 || asm_noperands (PATTERN (insn)) >= 0));
4157 bool no_parallel = (is_asm || icode == CODE_FOR_sploop
4158 || (icode >= 0
4159 && get_attr_type (insn) == TYPE_ATOMIC));
4161 /* We delay asm insns until all delay slots are exhausted. We can't
4162 accurately tell how many cycles an asm takes, and the main scheduling
4163 code always assumes at least 1 cycle, which may be wrong. */
4164 if ((no_parallel
4165 && (ss.issued_this_cycle > 0 || clock_var < ss.delays_finished_at))
4166 || c6x_registers_update (insn)
4167 || (ss.issued_this_cycle > 0 && icode == CODE_FOR_sploop))
4169 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
4170 *ready = insn;
4171 n_ready--;
4172 ready++;
4174 else if (shadow_p (insn))
4176 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
4177 *ready = insn;
4181 /* Ensure that no other jump is scheduled in jump delay slots, since
4182 it would put the machine into the wrong state. Also, we must
4183 avoid scheduling insns that have a latency longer than the
4184 remaining jump delay slots, as the code at the jump destination
4185 won't be prepared for it.
4187 However, we can relax this condition somewhat. The rest of the
4188 scheduler will automatically avoid scheduling an insn on which
4189 the jump shadow depends so late that its side effect happens
4190 after the jump. This means that if we see an insn with a longer
4191 latency here, it can safely be scheduled if we can ensure that it
4192 has a predicate opposite of the previous jump: the side effect
4193 will happen in what we think of as the same basic block. In
4194 c6x_variable_issue, we will record the necessary predicate in
4195 new_conditions, and after scheduling is finished, we will modify
4196 the insn.
4198 Special care must be taken whenever there is more than one jump
4199 in flight. */
4201 first_jump = first_jump_index (clock_var);
4202 if (first_jump != -1)
4204 int first_cycle = get_jump_cycle (first_jump);
4205 rtx first_cond = get_jump_cond (first_jump);
4206 int second_cycle = 0;
4208 if (first_jump > 0)
4209 second_cycle = get_jump_cycle (first_jump - 1);
4211 for (insnp = ready; insnp < e_ready; insnp++)
4213 rtx_insn *insn = *insnp;
4214 int icode = recog_memoized (insn);
4215 bool is_asm = (icode < 0
4216 && (GET_CODE (PATTERN (insn)) == ASM_INPUT
4217 || asm_noperands (PATTERN (insn)) >= 0));
4218 int this_cycles, rsrv_cycles;
4219 enum attr_type type;
4221 gcc_assert (!is_asm);
4222 if (icode < 0)
4223 continue;
4224 this_cycles = get_attr_cycles (insn);
4225 rsrv_cycles = get_attr_reserve_cycles (insn);
4226 type = get_attr_type (insn);
4227 /* Treat branches specially; there is also a hazard if two jumps
4228 end at the same cycle. */
4229 if (type == TYPE_BRANCH || type == TYPE_CALL)
4230 this_cycles++;
4231 if (clock_var + this_cycles <= first_cycle)
4232 continue;
4233 if ((first_jump > 0 && clock_var + this_cycles > second_cycle)
4234 || clock_var + rsrv_cycles > first_cycle
4235 || !predicate_insn (insn, first_cond, false))
4237 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
4238 *ready = insn;
4239 n_ready--;
4240 ready++;
4245 return n_ready;
4248 /* Implement the TARGET_SCHED_REORDER hook. We save the current clock
4249 for later and clear the register access information for the new
4250 cycle. We also move asm statements out of the way if they would be
4251 scheduled in a delay slot. */
4253 static int
4254 c6x_sched_reorder (FILE *dump ATTRIBUTE_UNUSED,
4255 int sched_verbose ATTRIBUTE_UNUSED,
4256 rtx_insn **ready ATTRIBUTE_UNUSED,
4257 int *pn_ready ATTRIBUTE_UNUSED, int clock_var)
4259 ss.curr_sched_clock = clock_var;
4260 ss.issued_this_cycle = 0;
4261 memset (ss.reg_n_accesses, 0, sizeof ss.reg_n_accesses);
4262 memset (ss.reg_n_xaccesses, 0, sizeof ss.reg_n_xaccesses);
4264 if (ready == NULL)
4265 return 0;
4267 return c6x_sched_reorder_1 (ready, pn_ready, clock_var);
4270 /* Implement the TARGET_SCHED_REORDER2 hook. We use this to record the clock
4271 cycle for every insn. */
4273 static int
4274 c6x_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
4275 int sched_verbose ATTRIBUTE_UNUSED,
4276 rtx_insn **ready ATTRIBUTE_UNUSED,
4277 int *pn_ready ATTRIBUTE_UNUSED, int clock_var)
4279 /* FIXME: the assembler rejects labels inside an execute packet.
4280 This can occur if prologue insns are scheduled in parallel with
4281 others, so we avoid this here. Also make sure that nothing is
4282 scheduled in parallel with a TYPE_ATOMIC insn or after a jump. */
4283 if (RTX_FRAME_RELATED_P (ss.last_scheduled_insn)
4284 || JUMP_P (ss.last_scheduled_insn)
4285 || (recog_memoized (ss.last_scheduled_insn) >= 0
4286 && get_attr_type (ss.last_scheduled_insn) == TYPE_ATOMIC))
4288 int n_ready = *pn_ready;
4289 rtx_insn **e_ready = ready + n_ready;
4290 rtx_insn **insnp;
4292 for (insnp = ready; insnp < e_ready; insnp++)
4294 rtx_insn *insn = *insnp;
4295 if (!shadow_p (insn))
4297 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
4298 *ready = insn;
4299 n_ready--;
4300 ready++;
4303 return n_ready;
4306 return c6x_sched_reorder_1 (ready, pn_ready, clock_var);
4309 /* Subroutine of maybe_clobber_cond, called through note_stores. */
4311 static void
4312 clobber_cond_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data1)
4314 rtx *cond = (rtx *)data1;
4315 if (*cond != NULL_RTX && reg_overlap_mentioned_p (x, *cond))
4316 *cond = NULL_RTX;
4319 /* Examine INSN, and if it destroys the conditions have recorded for
4320 any of the jumps in flight, clear that condition so that we don't
4321 predicate any more insns. CLOCK_VAR helps us limit the search to
4322 only those jumps which are still in flight. */
4324 static void
4325 maybe_clobber_cond (rtx insn, int clock_var)
4327 int n, idx;
4328 idx = ss.jump_cycle_index;
4329 for (n = 0; n < 12; n++, idx++)
4331 rtx cond, link;
4332 int cycle;
4334 if (idx >= 12)
4335 idx -= 12;
4336 cycle = ss.jump_cycles[idx];
4337 if (cycle <= clock_var)
4338 return;
4340 cond = ss.jump_cond[idx];
4341 if (cond == NULL_RTX)
4342 continue;
4344 if (CALL_P (insn))
4346 ss.jump_cond[idx] = NULL_RTX;
4347 continue;
4350 note_stores (PATTERN (insn), clobber_cond_1, ss.jump_cond + idx);
4351 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
4352 if (REG_NOTE_KIND (link) == REG_INC)
4353 clobber_cond_1 (XEXP (link, 0), NULL_RTX, ss.jump_cond + idx);
4357 /* Implement the TARGET_SCHED_VARIABLE_ISSUE hook. We are about to
4358 issue INSN. Return the number of insns left on the ready queue
4359 that can be issued this cycle.
4360 We use this hook to record clock cycles and reservations for every insn. */
4362 static int
4363 c6x_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
4364 int sched_verbose ATTRIBUTE_UNUSED,
4365 rtx_insn *insn, int can_issue_more ATTRIBUTE_UNUSED)
4367 ss.last_scheduled_insn = insn;
4368 if (INSN_UID (insn) < sploop_max_uid_iter0 && !JUMP_P (insn))
4369 ss.last_scheduled_iter0 = insn;
4370 if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)
4371 ss.issued_this_cycle++;
4372 if (insn_info.exists ())
4374 state_t st_after = alloca (dfa_state_size);
4375 int curr_clock = ss.curr_sched_clock;
4376 int uid = INSN_UID (insn);
4377 int icode = recog_memoized (insn);
4378 rtx first_cond;
4379 int first, first_cycle;
4380 unsigned int mask;
4381 int i;
4383 insn_set_clock (insn, curr_clock);
4384 INSN_INFO_ENTRY (uid).ebb_start
4385 = curr_clock == 0 && ss.issued_this_cycle == 1;
4387 first = first_jump_index (ss.curr_sched_clock);
4388 if (first == -1)
4390 first_cycle = 0;
4391 first_cond = NULL_RTX;
4393 else
4395 first_cycle = get_jump_cycle (first);
4396 first_cond = get_jump_cond (first);
4398 if (icode >= 0
4399 && first_cycle > curr_clock
4400 && first_cond != NULL_RTX
4401 && (curr_clock + get_attr_cycles (insn) > first_cycle
4402 || get_attr_type (insn) == TYPE_BRANCH
4403 || get_attr_type (insn) == TYPE_CALL))
4404 INSN_INFO_ENTRY (uid).new_cond = first_cond;
4406 memcpy (st_after, curr_state, dfa_state_size);
4407 state_transition (st_after, const0_rtx);
4409 mask = 0;
4410 for (i = 0; i < 2 * UNIT_QID_SIDE_OFFSET; i++)
4411 if (cpu_unit_reservation_p (st_after, c6x_unit_codes[i])
4412 && !cpu_unit_reservation_p (prev_cycle_state, c6x_unit_codes[i]))
4413 mask |= 1 << i;
4414 INSN_INFO_ENTRY (uid).unit_mask = mask;
4416 maybe_clobber_cond (insn, curr_clock);
4418 if (icode >= 0)
4420 int i, cycles;
4422 c6x_registers_update (insn);
4423 memcpy (ss.reg_n_accesses, ss.tmp_reg_n_accesses,
4424 sizeof ss.reg_n_accesses);
4425 memcpy (ss.reg_n_xaccesses, ss.tmp_reg_n_accesses,
4426 sizeof ss.reg_n_xaccesses);
4428 cycles = get_attr_cycles (insn);
4429 if (ss.delays_finished_at < ss.curr_sched_clock + cycles)
4430 ss.delays_finished_at = ss.curr_sched_clock + cycles;
4431 if (get_attr_type (insn) == TYPE_BRANCH
4432 || get_attr_type (insn) == TYPE_CALL)
4434 rtx opposite = condjump_opposite_condition (insn);
4435 record_jump (ss.curr_sched_clock + cycles, opposite);
4438 /* Mark the cycles in which the destination registers are written.
4439 This is used for calculating stalls when using cross units. */
4440 extract_insn (insn);
4441 /* Cross-path stalls don't apply to results of load insns. */
4442 if (get_attr_type (insn) == TYPE_LOAD
4443 || get_attr_type (insn) == TYPE_LOADN
4444 || get_attr_type (insn) == TYPE_LOAD_SHADOW)
4445 cycles--;
4446 for (i = 0; i < recog_data.n_operands; i++)
4448 rtx op = recog_data.operand[i];
4449 if (MEM_P (op))
4451 rtx addr = XEXP (op, 0);
4452 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4453 c6x_mark_reg_written (XEXP (addr, 0),
4454 insn_uid_get_clock (uid) + 1);
4456 if (recog_data.operand_type[i] != OP_IN
4457 && REG_P (op))
4459 c6x_mark_reg_written (op,
4460 insn_uid_get_clock (uid) + cycles);
4465 return can_issue_more;
4468 /* Implement the TARGET_SCHED_ADJUST_COST hook. We need special handling for
4469 anti- and output dependencies. */
4471 static int
4472 c6x_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4473 unsigned int)
4475 enum attr_type insn_type = TYPE_UNKNOWN, dep_insn_type = TYPE_UNKNOWN;
4476 int dep_insn_code_number, insn_code_number;
4477 int shadow_bonus = 0;
4478 enum reg_note kind;
4479 dep_insn_code_number = recog_memoized (dep_insn);
4480 insn_code_number = recog_memoized (insn);
4482 if (dep_insn_code_number >= 0)
4483 dep_insn_type = get_attr_type (dep_insn);
4485 if (insn_code_number >= 0)
4486 insn_type = get_attr_type (insn);
4488 kind = (reg_note) dep_type;
4489 if (kind == 0)
4491 /* If we have a dependency on a load, and it's not for the result of
4492 the load, it must be for an autoincrement. Reduce the cost in that
4493 case. */
4494 if (dep_insn_type == TYPE_LOAD)
4496 rtx set = PATTERN (dep_insn);
4497 if (GET_CODE (set) == COND_EXEC)
4498 set = COND_EXEC_CODE (set);
4499 if (GET_CODE (set) == UNSPEC)
4500 cost = 1;
4501 else
4503 gcc_assert (GET_CODE (set) == SET);
4504 if (!reg_overlap_mentioned_p (SET_DEST (set), PATTERN (insn)))
4505 cost = 1;
4510 /* A jump shadow needs to have its latency decreased by one. Conceptually,
4511 it occurs in between two cycles, but we schedule it at the end of the
4512 first cycle. */
4513 if (shadow_type_p (insn_type))
4514 shadow_bonus = 1;
4516 /* Anti and output dependencies usually have zero cost, but we want
4517 to insert a stall after a jump, and after certain floating point
4518 insns that take more than one cycle to read their inputs. In the
4519 future, we should try to find a better algorithm for scheduling
4520 jumps. */
4521 if (kind != 0)
4523 /* We can get anti-dependencies against shadow insns. Treat these
4524 like output dependencies, so that the insn is entirely finished
4525 before the branch takes place. */
4526 if (kind == REG_DEP_ANTI && insn_type == TYPE_SHADOW)
4527 kind = REG_DEP_OUTPUT;
4528 switch (dep_insn_type)
4530 case TYPE_CALLP:
4531 return 1;
4532 case TYPE_BRANCH:
4533 case TYPE_CALL:
4534 if (get_attr_has_shadow (dep_insn) == HAS_SHADOW_Y)
4535 /* This is a real_jump/real_call insn. These don't have
4536 outputs, and ensuring the validity of scheduling things
4537 in the delay slot is the job of
4538 c6x_sched_reorder_1. */
4539 return 0;
4540 /* Unsplit calls can happen - e.g. for divide insns. */
4541 return 6;
4542 case TYPE_LOAD:
4543 case TYPE_LOADN:
4544 case TYPE_INTDP:
4545 if (kind == REG_DEP_OUTPUT)
4546 return 5 - shadow_bonus;
4547 return 0;
4548 case TYPE_MPY4:
4549 case TYPE_FP4:
4550 if (kind == REG_DEP_OUTPUT)
4551 return 4 - shadow_bonus;
4552 return 0;
4553 case TYPE_MPY2:
4554 if (kind == REG_DEP_OUTPUT)
4555 return 2 - shadow_bonus;
4556 return 0;
4557 case TYPE_CMPDP:
4558 if (kind == REG_DEP_OUTPUT)
4559 return 2 - shadow_bonus;
4560 return 2;
4561 case TYPE_ADDDP:
4562 case TYPE_MPYSPDP:
4563 if (kind == REG_DEP_OUTPUT)
4564 return 7 - shadow_bonus;
4565 return 2;
4566 case TYPE_MPYSP2DP:
4567 if (kind == REG_DEP_OUTPUT)
4568 return 5 - shadow_bonus;
4569 return 2;
4570 case TYPE_MPYI:
4571 if (kind == REG_DEP_OUTPUT)
4572 return 9 - shadow_bonus;
4573 return 4;
4574 case TYPE_MPYID:
4575 case TYPE_MPYDP:
4576 if (kind == REG_DEP_OUTPUT)
4577 return 10 - shadow_bonus;
4578 return 4;
4580 default:
4581 if (insn_type == TYPE_SPKERNEL)
4582 return 0;
4583 if (kind == REG_DEP_OUTPUT)
4584 return 1 - shadow_bonus;
4586 return 0;
4590 return cost - shadow_bonus;
4593 /* Create a SEQUENCE rtx to replace the instructions in SLOT, of which there
4594 are N_FILLED. REAL_FIRST identifies the slot if the insn that appears
4595 first in the original stream. */
4597 static void
4598 gen_one_bundle (rtx_insn **slot, int n_filled, int real_first)
4600 rtx seq;
4601 rtx_insn *bundle;
4602 rtx_insn *t;
4603 int i;
4605 seq = gen_rtx_SEQUENCE (VOIDmode, gen_rtvec_v (n_filled, slot));
4606 bundle = make_insn_raw (seq);
4607 BLOCK_FOR_INSN (bundle) = BLOCK_FOR_INSN (slot[0]);
4608 INSN_LOCATION (bundle) = INSN_LOCATION (slot[0]);
4609 SET_PREV_INSN (bundle) = SET_PREV_INSN (slot[real_first]);
4611 t = NULL;
4613 for (i = 0; i < n_filled; i++)
4615 rtx_insn *insn = slot[i];
4616 remove_insn (insn);
4617 SET_PREV_INSN (insn) = t ? t : PREV_INSN (bundle);
4618 if (t != NULL_RTX)
4619 SET_NEXT_INSN (t) = insn;
4620 t = insn;
4621 if (i > 0)
4622 INSN_LOCATION (slot[i]) = INSN_LOCATION (bundle);
4625 SET_NEXT_INSN (bundle) = NEXT_INSN (PREV_INSN (bundle));
4626 SET_NEXT_INSN (t) = NEXT_INSN (bundle);
4627 SET_NEXT_INSN (PREV_INSN (bundle)) = bundle;
4628 SET_PREV_INSN (NEXT_INSN (bundle)) = bundle;
4631 /* Move all parallel instructions into SEQUENCEs, so that no subsequent passes
4632 try to insert labels in the middle. */
4634 static void
4635 c6x_gen_bundles (void)
4637 basic_block bb;
4638 rtx_insn *insn, *next, *last_call;
4640 FOR_EACH_BB_FN (bb, cfun)
4642 rtx_insn *insn, *next;
4643 /* The machine is eight insns wide. We can have up to six shadow
4644 insns, plus an extra slot for merging the jump shadow. */
4645 rtx_insn *slot[15];
4646 int n_filled = 0;
4647 int first_slot = 0;
4649 for (insn = BB_HEAD (bb);; insn = next)
4651 int at_end;
4652 rtx delete_this = NULL_RTX;
4654 if (NONDEBUG_INSN_P (insn))
4656 /* Put calls at the start of the sequence. */
4657 if (CALL_P (insn))
4659 first_slot++;
4660 if (n_filled)
4662 memmove (&slot[1], &slot[0],
4663 n_filled * sizeof (slot[0]));
4665 if (!shadow_p (insn))
4667 PUT_MODE (insn, TImode);
4668 if (n_filled)
4669 PUT_MODE (slot[1], VOIDmode);
4671 n_filled++;
4672 slot[0] = insn;
4674 else
4676 slot[n_filled++] = insn;
4680 next = NEXT_INSN (insn);
4681 while (next && insn != BB_END (bb)
4682 && !(NONDEBUG_INSN_P (next)
4683 && GET_CODE (PATTERN (next)) != USE
4684 && GET_CODE (PATTERN (next)) != CLOBBER))
4686 insn = next;
4687 next = NEXT_INSN (insn);
4690 at_end = insn == BB_END (bb);
4691 if (delete_this == NULL_RTX
4692 && (at_end || (GET_MODE (next) == TImode
4693 && !(shadow_p (next) && CALL_P (next)))))
4695 if (n_filled >= 2)
4696 gen_one_bundle (slot, n_filled, first_slot);
4698 n_filled = 0;
4699 first_slot = 0;
4701 if (at_end)
4702 break;
4705 /* Bundling, and emitting nops, can separate
4706 NOTE_INSN_CALL_ARG_LOCATION from the corresponding calls. Fix
4707 that up here. */
4708 last_call = NULL;
4709 for (insn = get_insns (); insn; insn = next)
4711 next = NEXT_INSN (insn);
4712 if (CALL_P (insn)
4713 || (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE
4714 && CALL_P (XVECEXP (PATTERN (insn), 0, 0))))
4715 last_call = insn;
4716 if (!NOTE_P (insn) || NOTE_KIND (insn) != NOTE_INSN_CALL_ARG_LOCATION)
4717 continue;
4718 if (NEXT_INSN (last_call) == insn)
4719 continue;
4720 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4721 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4722 SET_PREV_INSN (insn) = last_call;
4723 SET_NEXT_INSN (insn) = NEXT_INSN (last_call);
4724 SET_PREV_INSN (NEXT_INSN (insn)) = insn;
4725 SET_NEXT_INSN (PREV_INSN (insn)) = insn;
4726 last_call = insn;
4730 /* Emit a NOP instruction for CYCLES cycles after insn AFTER. Return it. */
4732 static rtx_insn *
4733 emit_nop_after (int cycles, rtx_insn *after)
4735 rtx_insn *insn;
4737 /* mpydp has 9 delay slots, and we may schedule a stall for a cross-path
4738 operation. We don't need the extra NOP since in this case, the hardware
4739 will automatically insert the required stall. */
4740 if (cycles == 10)
4741 cycles--;
4743 gcc_assert (cycles < 10);
4745 insn = emit_insn_after (gen_nop_count (GEN_INT (cycles)), after);
4746 PUT_MODE (insn, TImode);
4748 return insn;
4751 /* Determine whether INSN is a call that needs to have a return label
4752 placed. */
4754 static bool
4755 returning_call_p (rtx_insn *insn)
4757 if (CALL_P (insn))
4758 return (!SIBLING_CALL_P (insn)
4759 && get_attr_type (insn) != TYPE_CALLP
4760 && get_attr_type (insn) != TYPE_SHADOW);
4761 if (recog_memoized (insn) < 0)
4762 return false;
4763 if (get_attr_type (insn) == TYPE_CALL)
4764 return true;
4765 return false;
4768 /* Determine whether INSN's pattern can be converted to use callp. */
4769 static bool
4770 can_use_callp (rtx_insn *insn)
4772 int icode = recog_memoized (insn);
4773 if (!TARGET_INSNS_64PLUS
4774 || icode < 0
4775 || GET_CODE (PATTERN (insn)) == COND_EXEC)
4776 return false;
4778 return ((icode == CODE_FOR_real_call
4779 || icode == CODE_FOR_call_internal
4780 || icode == CODE_FOR_call_value_internal)
4781 && get_attr_dest_regfile (insn) == DEST_REGFILE_ANY);
4784 /* Convert the pattern of INSN, which must be a CALL_INSN, into a callp. */
4785 static void
4786 convert_to_callp (rtx_insn *insn)
4788 rtx lab;
4789 extract_insn (insn);
4790 if (GET_CODE (PATTERN (insn)) == SET)
4792 rtx dest = recog_data.operand[0];
4793 lab = recog_data.operand[1];
4794 PATTERN (insn) = gen_callp_value (dest, lab);
4795 INSN_CODE (insn) = CODE_FOR_callp_value;
4797 else
4799 lab = recog_data.operand[0];
4800 PATTERN (insn) = gen_callp (lab);
4801 INSN_CODE (insn) = CODE_FOR_callp;
4805 /* Scan forwards from INSN until we find the next insn that has mode TImode
4806 (indicating it starts a new cycle), and occurs in cycle CLOCK.
4807 Return it if we find such an insn, NULL_RTX otherwise. */
4808 static rtx_insn *
4809 find_next_cycle_insn (rtx_insn *insn, int clock)
4811 rtx_insn *t = insn;
4812 if (GET_MODE (t) == TImode)
4813 t = next_real_insn (t);
4814 while (t && GET_MODE (t) != TImode)
4815 t = next_real_insn (t);
4817 if (t && insn_get_clock (t) == clock)
4818 return t;
4819 return NULL;
4822 /* If COND_INSN has a COND_EXEC condition, wrap the same condition
4823 around PAT. Return PAT either unchanged or modified in this
4824 way. */
4825 static rtx
4826 duplicate_cond (rtx pat, rtx cond_insn)
4828 rtx cond_pat = PATTERN (cond_insn);
4829 if (GET_CODE (cond_pat) == COND_EXEC)
4830 pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (cond_pat)),
4831 pat);
4832 return pat;
4835 /* Walk forward from INSN to find the last insn that issues in the same clock
4836 cycle. */
4837 static rtx_insn *
4838 find_last_same_clock (rtx_insn *insn)
4840 rtx_insn *retval = insn;
4841 rtx_insn *t = next_real_insn (insn);
4843 while (t && GET_MODE (t) != TImode)
4845 if (!DEBUG_INSN_P (t) && recog_memoized (t) >= 0)
4846 retval = t;
4847 t = next_real_insn (t);
4849 return retval;
4852 /* For every call insn in the function, emit code to load the return
4853 address. For each call we create a return label and store it in
4854 CALL_LABELS. If are not scheduling, we emit the labels here,
4855 otherwise the caller will do it later.
4856 This function is called after final insn scheduling, but before creating
4857 the SEQUENCEs that represent execute packets. */
4859 static void
4860 reorg_split_calls (rtx_insn **call_labels)
4862 unsigned int reservation_mask = 0;
4863 rtx_insn *insn = get_insns ();
4864 gcc_assert (NOTE_P (insn));
4865 insn = next_real_insn (insn);
4866 while (insn)
4868 int uid;
4869 rtx_insn *next = next_real_insn (insn);
4871 if (DEBUG_INSN_P (insn))
4872 goto done;
4874 if (GET_MODE (insn) == TImode)
4875 reservation_mask = 0;
4876 uid = INSN_UID (insn);
4877 if (c6x_flag_schedule_insns2 && recog_memoized (insn) >= 0)
4878 reservation_mask |= 1 << INSN_INFO_ENTRY (uid).reservation;
4880 if (returning_call_p (insn))
4882 rtx_code_label *label = gen_label_rtx ();
4883 rtx labelref = gen_rtx_LABEL_REF (Pmode, label);
4884 rtx reg = gen_rtx_REG (SImode, RETURN_ADDR_REGNO);
4886 LABEL_NUSES (label) = 2;
4887 if (!c6x_flag_schedule_insns2)
4889 if (can_use_callp (insn))
4890 convert_to_callp (insn);
4891 else
4893 rtx t;
4894 rtx_insn *slot[4];
4895 emit_label_after (label, insn);
4897 /* Bundle the call and its delay slots into a single
4898 SEQUENCE. While these do not issue in parallel
4899 we need to group them into a single EH region. */
4900 slot[0] = insn;
4901 PUT_MODE (insn, TImode);
4902 if (TARGET_INSNS_64)
4904 t = gen_addkpc (reg, labelref, GEN_INT (4));
4905 slot[1] = emit_insn_after (duplicate_cond (t, insn),
4906 insn);
4907 PUT_MODE (slot[1], TImode);
4908 gen_one_bundle (slot, 2, 0);
4910 else
4912 slot[3] = emit_insn_after (gen_nop_count (GEN_INT (3)),
4913 insn);
4914 PUT_MODE (slot[3], TImode);
4915 t = gen_movsi_lo_sum (reg, reg, labelref);
4916 slot[2] = emit_insn_after (duplicate_cond (t, insn),
4917 insn);
4918 PUT_MODE (slot[2], TImode);
4919 t = gen_movsi_high (reg, labelref);
4920 slot[1] = emit_insn_after (duplicate_cond (t, insn),
4921 insn);
4922 PUT_MODE (slot[1], TImode);
4923 gen_one_bundle (slot, 4, 0);
4927 else
4929 /* If we scheduled, we reserved the .S2 unit for one or two
4930 cycles after the call. Emit the insns in these slots,
4931 unless it's possible to create a CALLP insn.
4932 Note that this works because the dependencies ensure that
4933 no insn setting/using B3 is scheduled in the delay slots of
4934 a call. */
4935 int this_clock = insn_get_clock (insn);
4936 rtx_insn *after1;
4938 call_labels[INSN_UID (insn)] = label;
4940 rtx_insn *last_same_clock = find_last_same_clock (insn);
4942 if (can_use_callp (insn))
4944 /* Find the first insn of the next execute packet. If it
4945 is the shadow insn corresponding to this call, we may
4946 use a CALLP insn. */
4947 rtx_insn *shadow =
4948 next_nonnote_nondebug_insn (last_same_clock);
4950 if (CALL_P (shadow)
4951 && insn_get_clock (shadow) == this_clock + 5)
4953 convert_to_callp (shadow);
4954 insn_set_clock (shadow, this_clock);
4955 INSN_INFO_ENTRY (INSN_UID (shadow)).reservation
4956 = RESERVATION_S2;
4957 INSN_INFO_ENTRY (INSN_UID (shadow)).unit_mask
4958 = INSN_INFO_ENTRY (INSN_UID (last_same_clock)).unit_mask;
4959 if (GET_MODE (insn) == TImode)
4961 rtx_insn *new_cycle_first = NEXT_INSN (insn);
4962 while (!NONDEBUG_INSN_P (new_cycle_first)
4963 || GET_CODE (PATTERN (new_cycle_first)) == USE
4964 || GET_CODE (PATTERN (new_cycle_first)) == CLOBBER)
4965 new_cycle_first = NEXT_INSN (new_cycle_first);
4966 PUT_MODE (new_cycle_first, TImode);
4967 if (new_cycle_first != shadow)
4968 PUT_MODE (shadow, VOIDmode);
4969 INSN_INFO_ENTRY (INSN_UID (new_cycle_first)).ebb_start
4970 = INSN_INFO_ENTRY (INSN_UID (insn)).ebb_start;
4972 else
4973 PUT_MODE (shadow, VOIDmode);
4974 delete_insn (insn);
4975 goto done;
4978 after1 = find_next_cycle_insn (last_same_clock, this_clock + 1);
4979 if (after1 == NULL_RTX)
4980 after1 = last_same_clock;
4981 else
4982 after1 = find_last_same_clock (after1);
4983 if (TARGET_INSNS_64)
4985 rtx x1 = gen_addkpc (reg, labelref, const0_rtx);
4986 x1 = emit_insn_after (duplicate_cond (x1, insn), after1);
4987 insn_set_clock (x1, this_clock + 1);
4988 INSN_INFO_ENTRY (INSN_UID (x1)).reservation = RESERVATION_S2;
4989 if (after1 == last_same_clock)
4990 PUT_MODE (x1, TImode);
4991 else
4992 INSN_INFO_ENTRY (INSN_UID (x1)).unit_mask
4993 = INSN_INFO_ENTRY (INSN_UID (after1)).unit_mask;
4995 else
4997 rtx x1, x2;
4998 rtx_insn *after2 = find_next_cycle_insn (after1,
4999 this_clock + 2);
5000 if (after2 == NULL_RTX)
5001 after2 = after1;
5002 x2 = gen_movsi_lo_sum (reg, reg, labelref);
5003 x2 = emit_insn_after (duplicate_cond (x2, insn), after2);
5004 x1 = gen_movsi_high (reg, labelref);
5005 x1 = emit_insn_after (duplicate_cond (x1, insn), after1);
5006 insn_set_clock (x1, this_clock + 1);
5007 insn_set_clock (x2, this_clock + 2);
5008 INSN_INFO_ENTRY (INSN_UID (x1)).reservation = RESERVATION_S2;
5009 INSN_INFO_ENTRY (INSN_UID (x2)).reservation = RESERVATION_S2;
5010 if (after1 == last_same_clock)
5011 PUT_MODE (x1, TImode);
5012 else
5013 INSN_INFO_ENTRY (INSN_UID (x1)).unit_mask
5014 = INSN_INFO_ENTRY (INSN_UID (after1)).unit_mask;
5015 if (after1 == after2)
5016 PUT_MODE (x2, TImode);
5017 else
5018 INSN_INFO_ENTRY (INSN_UID (x2)).unit_mask
5019 = INSN_INFO_ENTRY (INSN_UID (after2)).unit_mask;
5023 done:
5024 insn = next;
5028 /* Called as part of c6x_reorg. This function emits multi-cycle NOP
5029 insns as required for correctness. CALL_LABELS is the array that
5030 holds the return labels for call insns; we emit these here if
5031 scheduling was run earlier. */
5033 static void
5034 reorg_emit_nops (rtx_insn **call_labels)
5036 bool first;
5037 rtx last_call;
5038 rtx_insn *prev;
5039 int prev_clock, earliest_bb_end;
5040 int prev_implicit_nops;
5041 rtx_insn *insn = get_insns ();
5043 /* We look at one insn (or bundle inside a sequence) in each iteration, storing
5044 its issue time in PREV_CLOCK for the next iteration. If there is a gap in
5045 clocks, we must insert a NOP.
5046 EARLIEST_BB_END tracks in which cycle all insns that have been issued in the
5047 current basic block will finish. We must not allow the next basic block to
5048 begin before this cycle.
5049 PREV_IMPLICIT_NOPS tells us whether we've seen an insn that implicitly contains
5050 a multi-cycle nop. The code is scheduled such that subsequent insns will
5051 show the cycle gap, but we needn't insert a real NOP instruction. */
5052 insn = next_real_insn (insn);
5053 last_call = prev = NULL;
5054 prev_clock = -1;
5055 earliest_bb_end = 0;
5056 prev_implicit_nops = 0;
5057 first = true;
5058 while (insn)
5060 int this_clock = -1;
5061 rtx_insn *next;
5062 int max_cycles = 0;
5064 next = next_real_insn (insn);
5066 if (DEBUG_INSN_P (insn)
5067 || GET_CODE (PATTERN (insn)) == USE
5068 || GET_CODE (PATTERN (insn)) == CLOBBER
5069 || shadow_or_blockage_p (insn)
5070 || JUMP_TABLE_DATA_P (insn))
5071 goto next_insn;
5073 if (!c6x_flag_schedule_insns2)
5074 /* No scheduling; ensure that no parallel issue happens. */
5075 PUT_MODE (insn, TImode);
5076 else
5078 int cycles;
5080 this_clock = insn_get_clock (insn);
5081 if (this_clock != prev_clock)
5083 PUT_MODE (insn, TImode);
5085 if (!first)
5087 cycles = this_clock - prev_clock;
5089 cycles -= prev_implicit_nops;
5090 if (cycles > 1)
5092 rtx nop = emit_nop_after (cycles - 1, prev);
5093 insn_set_clock (nop, prev_clock + prev_implicit_nops + 1);
5096 prev_clock = this_clock;
5098 if (last_call
5099 && insn_get_clock (last_call) + 6 <= this_clock)
5101 emit_label_before (call_labels[INSN_UID (last_call)], insn);
5102 last_call = NULL_RTX;
5104 prev_implicit_nops = 0;
5108 /* Examine how many cycles the current insn takes, and adjust
5109 LAST_CALL, EARLIEST_BB_END and PREV_IMPLICIT_NOPS. */
5110 if (recog_memoized (insn) >= 0
5111 /* If not scheduling, we've emitted NOPs after calls already. */
5112 && (c6x_flag_schedule_insns2 || !returning_call_p (insn)))
5114 max_cycles = get_attr_cycles (insn);
5115 if (get_attr_type (insn) == TYPE_CALLP)
5116 prev_implicit_nops = 5;
5118 else
5119 max_cycles = 1;
5120 if (returning_call_p (insn))
5121 last_call = insn;
5123 if (c6x_flag_schedule_insns2)
5125 gcc_assert (this_clock >= 0);
5126 if (earliest_bb_end < this_clock + max_cycles)
5127 earliest_bb_end = this_clock + max_cycles;
5129 else if (max_cycles > 1)
5130 emit_nop_after (max_cycles - 1, insn);
5132 prev = insn;
5133 first = false;
5135 next_insn:
5136 if (c6x_flag_schedule_insns2
5137 && (next == NULL_RTX
5138 || (GET_MODE (next) == TImode
5139 && INSN_INFO_ENTRY (INSN_UID (next)).ebb_start))
5140 && earliest_bb_end > 0)
5142 int cycles = earliest_bb_end - prev_clock;
5143 if (cycles > 1)
5145 prev = emit_nop_after (cycles - 1, prev);
5146 insn_set_clock (prev, prev_clock + prev_implicit_nops + 1);
5148 earliest_bb_end = 0;
5149 prev_clock = -1;
5150 first = true;
5152 if (last_call)
5153 emit_label_after (call_labels[INSN_UID (last_call)], prev);
5154 last_call = NULL_RTX;
5156 insn = next;
5160 /* If possible, split INSN, which we know is either a jump or a call, into a real
5161 insn and its shadow. */
5162 static void
5163 split_delayed_branch (rtx_insn *insn)
5165 int code = recog_memoized (insn);
5166 rtx_insn *i1;
5167 rtx newpat;
5168 rtx pat = PATTERN (insn);
5170 if (GET_CODE (pat) == COND_EXEC)
5171 pat = COND_EXEC_CODE (pat);
5173 if (CALL_P (insn))
5175 rtx src = pat, dest = NULL_RTX;
5176 rtx callee;
5177 if (GET_CODE (pat) == SET)
5179 dest = SET_DEST (pat);
5180 src = SET_SRC (pat);
5182 callee = XEXP (XEXP (src, 0), 0);
5183 if (SIBLING_CALL_P (insn))
5185 if (REG_P (callee))
5186 newpat = gen_indirect_sibcall_shadow ();
5187 else
5188 newpat = gen_sibcall_shadow (callee);
5189 pat = gen_real_jump (callee);
5191 else if (dest != NULL_RTX)
5193 if (REG_P (callee))
5194 newpat = gen_indirect_call_value_shadow (dest);
5195 else
5196 newpat = gen_call_value_shadow (dest, callee);
5197 pat = gen_real_call (callee);
5199 else
5201 if (REG_P (callee))
5202 newpat = gen_indirect_call_shadow ();
5203 else
5204 newpat = gen_call_shadow (callee);
5205 pat = gen_real_call (callee);
5207 pat = duplicate_cond (pat, insn);
5208 newpat = duplicate_cond (newpat, insn);
5210 else
5212 rtx src, op;
5213 if (GET_CODE (pat) == PARALLEL
5214 && GET_CODE (XVECEXP (pat, 0, 0)) == RETURN)
5216 newpat = gen_return_shadow ();
5217 pat = gen_real_ret (XEXP (XVECEXP (pat, 0, 1), 0));
5218 newpat = duplicate_cond (newpat, insn);
5220 else
5221 switch (code)
5223 case CODE_FOR_br_true:
5224 case CODE_FOR_br_false:
5225 src = SET_SRC (pat);
5226 op = XEXP (src, code == CODE_FOR_br_true ? 1 : 2);
5227 newpat = gen_condjump_shadow (op);
5228 pat = gen_real_jump (op);
5229 if (code == CODE_FOR_br_true)
5230 pat = gen_rtx_COND_EXEC (VOIDmode, XEXP (src, 0), pat);
5231 else
5232 pat = gen_rtx_COND_EXEC (VOIDmode,
5233 reversed_comparison (XEXP (src, 0),
5234 VOIDmode),
5235 pat);
5236 break;
5238 case CODE_FOR_jump:
5239 op = SET_SRC (pat);
5240 newpat = gen_jump_shadow (op);
5241 break;
5243 case CODE_FOR_indirect_jump:
5244 newpat = gen_indirect_jump_shadow ();
5245 break;
5247 case CODE_FOR_return_internal:
5248 newpat = gen_return_shadow ();
5249 pat = gen_real_ret (XEXP (XVECEXP (pat, 0, 1), 0));
5250 break;
5252 default:
5253 return;
5256 i1 = emit_insn_before (pat, insn);
5257 PATTERN (insn) = newpat;
5258 INSN_CODE (insn) = -1;
5259 record_delay_slot_pair (i1, insn, 5, 0);
5262 /* If INSN is a multi-cycle insn that should be handled properly in
5263 modulo-scheduling, split it into a real insn and a shadow.
5264 Return true if we made a change.
5266 It is valid for us to fail to split an insn; the caller has to deal
5267 with the possibility. Currently we handle loads and most mpy2 and
5268 mpy4 insns. */
5269 static bool
5270 split_delayed_nonbranch (rtx_insn *insn)
5272 int code = recog_memoized (insn);
5273 enum attr_type type;
5274 rtx_insn *i1;
5275 rtx newpat, src, dest;
5276 rtx pat = PATTERN (insn);
5277 rtvec rtv;
5278 int delay;
5280 if (GET_CODE (pat) == COND_EXEC)
5281 pat = COND_EXEC_CODE (pat);
5283 if (code < 0 || GET_CODE (pat) != SET)
5284 return false;
5285 src = SET_SRC (pat);
5286 dest = SET_DEST (pat);
5287 if (!REG_P (dest))
5288 return false;
5290 type = get_attr_type (insn);
5291 if (code >= 0
5292 && (type == TYPE_LOAD
5293 || type == TYPE_LOADN))
5295 if (!MEM_P (src)
5296 && (GET_CODE (src) != ZERO_EXTEND
5297 || !MEM_P (XEXP (src, 0))))
5298 return false;
5300 if (GET_MODE_SIZE (GET_MODE (dest)) > 4
5301 && (GET_MODE_SIZE (GET_MODE (dest)) != 8 || !TARGET_LDDW))
5302 return false;
5304 rtv = gen_rtvec (2, GEN_INT (REGNO (SET_DEST (pat))),
5305 SET_SRC (pat));
5306 newpat = gen_load_shadow (SET_DEST (pat));
5307 pat = gen_rtx_UNSPEC (VOIDmode, rtv, UNSPEC_REAL_LOAD);
5308 delay = 4;
5310 else if (code >= 0
5311 && (type == TYPE_MPY2
5312 || type == TYPE_MPY4))
5314 /* We don't handle floating point multiplies yet. */
5315 if (GET_MODE (dest) == SFmode)
5316 return false;
5318 rtv = gen_rtvec (2, GEN_INT (REGNO (SET_DEST (pat))),
5319 SET_SRC (pat));
5320 newpat = gen_mult_shadow (SET_DEST (pat));
5321 pat = gen_rtx_UNSPEC (VOIDmode, rtv, UNSPEC_REAL_MULT);
5322 delay = type == TYPE_MPY2 ? 1 : 3;
5324 else
5325 return false;
5327 pat = duplicate_cond (pat, insn);
5328 newpat = duplicate_cond (newpat, insn);
5329 i1 = emit_insn_before (pat, insn);
5330 PATTERN (insn) = newpat;
5331 INSN_CODE (insn) = -1;
5332 recog_memoized (insn);
5333 recog_memoized (i1);
5334 record_delay_slot_pair (i1, insn, delay, 0);
5335 return true;
5338 /* Examine if INSN is the result of splitting a load into a real load and a
5339 shadow, and if so, undo the transformation. */
5340 static void
5341 undo_split_delayed_nonbranch (rtx_insn *insn)
5343 int icode = recog_memoized (insn);
5344 enum attr_type type;
5345 rtx prev_pat, insn_pat;
5346 rtx_insn *prev;
5348 if (icode < 0)
5349 return;
5350 type = get_attr_type (insn);
5351 if (type != TYPE_LOAD_SHADOW && type != TYPE_MULT_SHADOW)
5352 return;
5353 prev = PREV_INSN (insn);
5354 prev_pat = PATTERN (prev);
5355 insn_pat = PATTERN (insn);
5356 if (GET_CODE (prev_pat) == COND_EXEC)
5358 prev_pat = COND_EXEC_CODE (prev_pat);
5359 insn_pat = COND_EXEC_CODE (insn_pat);
5362 gcc_assert (GET_CODE (prev_pat) == UNSPEC
5363 && ((XINT (prev_pat, 1) == UNSPEC_REAL_LOAD
5364 && type == TYPE_LOAD_SHADOW)
5365 || (XINT (prev_pat, 1) == UNSPEC_REAL_MULT
5366 && type == TYPE_MULT_SHADOW)));
5367 insn_pat = gen_rtx_SET (SET_DEST (insn_pat),
5368 XVECEXP (prev_pat, 0, 1));
5369 insn_pat = duplicate_cond (insn_pat, prev);
5370 PATTERN (insn) = insn_pat;
5371 INSN_CODE (insn) = -1;
5372 delete_insn (prev);
5375 /* Split every insn (i.e. jumps and calls) which can have delay slots into
5376 two parts: the first one is scheduled normally and emits the instruction,
5377 while the second one is a shadow insn which shows the side effect taking
5378 place. The second one is placed in the right cycle by the scheduler, but
5379 not emitted as an assembly instruction. */
5381 static void
5382 split_delayed_insns (void)
5384 rtx_insn *insn;
5385 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5387 if (JUMP_P (insn) || CALL_P (insn))
5388 split_delayed_branch (insn);
5392 /* For every insn that has an entry in the new_conditions vector, give it
5393 the appropriate predicate. */
5394 static void
5395 conditionalize_after_sched (void)
5397 basic_block bb;
5398 rtx_insn *insn;
5399 FOR_EACH_BB_FN (bb, cfun)
5400 FOR_BB_INSNS (bb, insn)
5402 unsigned uid = INSN_UID (insn);
5403 rtx cond;
5404 if (!NONDEBUG_INSN_P (insn) || uid >= INSN_INFO_LENGTH)
5405 continue;
5406 cond = INSN_INFO_ENTRY (uid).new_cond;
5407 if (cond == NULL_RTX)
5408 continue;
5409 if (dump_file)
5410 fprintf (dump_file, "Conditionalizing insn %d\n", uid);
5411 predicate_insn (insn, cond, true);
5415 /* A callback for the hw-doloop pass. This function examines INSN; if
5416 it is a loop_end pattern we recognize, return the reg rtx for the
5417 loop counter. Otherwise, return NULL_RTX. */
5419 static rtx
5420 hwloop_pattern_reg (rtx_insn *insn)
5422 rtx pat, reg;
5424 if (!JUMP_P (insn) || recog_memoized (insn) != CODE_FOR_loop_end)
5425 return NULL_RTX;
5427 pat = PATTERN (insn);
5428 reg = SET_DEST (XVECEXP (pat, 0, 1));
5429 if (!REG_P (reg))
5430 return NULL_RTX;
5431 return reg;
5434 /* Return the number of cycles taken by BB, as computed by scheduling,
5435 including the latencies of all insns with delay slots. IGNORE is
5436 an insn we should ignore in the calculation, usually the final
5437 branch. */
5438 static int
5439 bb_earliest_end_cycle (basic_block bb, rtx ignore)
5441 int earliest = 0;
5442 rtx_insn *insn;
5444 FOR_BB_INSNS (bb, insn)
5446 int cycles, this_clock;
5448 if (LABEL_P (insn) || NOTE_P (insn) || DEBUG_INSN_P (insn)
5449 || GET_CODE (PATTERN (insn)) == USE
5450 || GET_CODE (PATTERN (insn)) == CLOBBER
5451 || insn == ignore)
5452 continue;
5454 this_clock = insn_get_clock (insn);
5455 cycles = get_attr_cycles (insn);
5457 if (earliest < this_clock + cycles)
5458 earliest = this_clock + cycles;
5460 return earliest;
5463 /* Examine the insns in BB and remove all which have a uid greater or
5464 equal to MAX_UID. */
5465 static void
5466 filter_insns_above (basic_block bb, int max_uid)
5468 rtx_insn *insn, *next;
5469 bool prev_ti = false;
5470 int prev_cycle = -1;
5472 FOR_BB_INSNS_SAFE (bb, insn, next)
5474 int this_cycle;
5475 if (!NONDEBUG_INSN_P (insn))
5476 continue;
5477 if (insn == BB_END (bb))
5478 return;
5479 this_cycle = insn_get_clock (insn);
5480 if (prev_ti && this_cycle == prev_cycle)
5482 gcc_assert (GET_MODE (insn) != TImode);
5483 PUT_MODE (insn, TImode);
5485 prev_ti = false;
5486 if (INSN_UID (insn) >= max_uid)
5488 if (GET_MODE (insn) == TImode)
5490 prev_ti = true;
5491 prev_cycle = this_cycle;
5493 delete_insn (insn);
5498 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
5500 static void
5501 c6x_asm_emit_except_personality (rtx personality)
5503 fputs ("\t.personality\t", asm_out_file);
5504 output_addr_const (asm_out_file, personality);
5505 fputc ('\n', asm_out_file);
5508 /* Use a special assembly directive rather than a regular setion for
5509 unwind table data. */
5511 static void
5512 c6x_asm_init_sections (void)
5514 exception_section = get_unnamed_section (0, output_section_asm_op,
5515 "\t.handlerdata");
5518 /* A callback for the hw-doloop pass. Called to optimize LOOP in a
5519 machine-specific fashion; returns true if successful and false if
5520 the hwloop_fail function should be called. */
5522 static bool
5523 hwloop_optimize (hwloop_info loop)
5525 basic_block entry_bb, bb;
5526 rtx_insn *seq, *insn, *prev, *entry_after, *end_packet;
5527 rtx_insn *head_insn, *tail_insn, *new_insns, *last_insn;
5528 int loop_earliest;
5529 int n_execute_packets;
5530 edge entry_edge;
5531 unsigned ix;
5532 int max_uid_before, delayed_splits;
5533 int i, sp_ii, min_ii, max_ii, max_parallel, n_insns, n_real_insns, stages;
5534 rtx_insn **orig_vec;
5535 rtx_insn **copies;
5536 rtx_insn ***insn_copies;
5538 if (!c6x_flag_modulo_sched || !c6x_flag_schedule_insns2
5539 || !TARGET_INSNS_64PLUS)
5540 return false;
5542 if (loop->iter_reg_used || loop->depth > 1)
5543 return false;
5544 if (loop->has_call || loop->has_asm)
5545 return false;
5547 if (loop->head != loop->tail)
5548 return false;
5550 gcc_assert (loop->incoming_dest == loop->head);
5552 entry_edge = NULL;
5553 FOR_EACH_VEC_SAFE_ELT (loop->incoming, i, entry_edge)
5554 if (entry_edge->flags & EDGE_FALLTHRU)
5555 break;
5556 if (entry_edge == NULL)
5557 return false;
5559 reshuffle_units (loop->head);
5561 in_hwloop = true;
5562 schedule_ebbs_init ();
5563 schedule_ebb (BB_HEAD (loop->tail), loop->loop_end, true);
5564 schedule_ebbs_finish ();
5565 in_hwloop = false;
5567 bb = loop->head;
5568 loop_earliest = bb_earliest_end_cycle (bb, loop->loop_end) + 1;
5570 max_uid_before = get_max_uid ();
5572 /* Split all multi-cycle operations, such as loads. For normal
5573 scheduling, we only do this for branches, as the generated code
5574 would otherwise not be interrupt-safe. When using sploop, it is
5575 safe and beneficial to split them. If any multi-cycle operations
5576 remain after splitting (because we don't handle them yet), we
5577 cannot pipeline the loop. */
5578 delayed_splits = 0;
5579 FOR_BB_INSNS (bb, insn)
5581 if (NONDEBUG_INSN_P (insn))
5583 recog_memoized (insn);
5584 if (split_delayed_nonbranch (insn))
5585 delayed_splits++;
5586 else if (INSN_CODE (insn) >= 0
5587 && get_attr_cycles (insn) > 1)
5588 goto undo_splits;
5592 /* Count the number of insns as well as the number real insns, and save
5593 the original sequence of insns in case we must restore it later. */
5594 n_insns = n_real_insns = 0;
5595 FOR_BB_INSNS (bb, insn)
5597 n_insns++;
5598 if (NONDEBUG_INSN_P (insn) && insn != loop->loop_end)
5599 n_real_insns++;
5601 orig_vec = XNEWVEC (rtx_insn *, n_insns);
5602 n_insns = 0;
5603 FOR_BB_INSNS (bb, insn)
5604 orig_vec[n_insns++] = insn;
5606 /* Count the unit reservations, and compute a minimum II from that
5607 table. */
5608 count_unit_reqs (unit_reqs, loop->start_label,
5609 PREV_INSN (loop->loop_end));
5610 merge_unit_reqs (unit_reqs);
5612 min_ii = res_mii (unit_reqs);
5613 max_ii = loop_earliest < 15 ? loop_earliest : 14;
5615 /* Make copies of the loop body, up to a maximum number of stages we want
5616 to handle. */
5617 max_parallel = loop_earliest / min_ii + 1;
5619 copies = XCNEWVEC (rtx_insn *, (max_parallel + 1) * n_real_insns);
5620 insn_copies = XNEWVEC (rtx_insn **, max_parallel + 1);
5621 for (i = 0; i < max_parallel + 1; i++)
5622 insn_copies[i] = copies + i * n_real_insns;
5624 head_insn = next_nonnote_nondebug_insn (loop->start_label);
5625 tail_insn = prev_real_insn (BB_END (bb));
5627 i = 0;
5628 FOR_BB_INSNS (bb, insn)
5629 if (NONDEBUG_INSN_P (insn) && insn != loop->loop_end)
5630 insn_copies[0][i++] = insn;
5632 sploop_max_uid_iter0 = get_max_uid ();
5634 /* Generate the copies of the loop body, and save them in the
5635 INSN_COPIES array. */
5636 start_sequence ();
5637 for (i = 0; i < max_parallel; i++)
5639 int j;
5640 rtx_insn *this_iter;
5642 this_iter = duplicate_insn_chain (head_insn, tail_insn);
5643 j = 0;
5644 while (this_iter)
5646 rtx_insn *prev_stage_insn = insn_copies[i][j];
5647 gcc_assert (INSN_CODE (this_iter) == INSN_CODE (prev_stage_insn));
5649 if (INSN_CODE (this_iter) >= 0
5650 && (get_attr_type (this_iter) == TYPE_LOAD_SHADOW
5651 || get_attr_type (this_iter) == TYPE_MULT_SHADOW))
5653 rtx_insn *prev = PREV_INSN (this_iter);
5654 record_delay_slot_pair (prev, this_iter,
5655 get_attr_cycles (prev) - 1, 0);
5657 else
5658 record_delay_slot_pair (prev_stage_insn, this_iter, i, 1);
5660 insn_copies[i + 1][j] = this_iter;
5661 j++;
5662 this_iter = next_nonnote_nondebug_insn (this_iter);
5665 new_insns = get_insns ();
5666 last_insn = insn_copies[max_parallel][n_real_insns - 1];
5667 end_sequence ();
5668 emit_insn_before (new_insns, BB_END (bb));
5670 /* Try to schedule the loop using varying initiation intervals,
5671 starting with the smallest possible and incrementing it
5672 on failure. */
5673 for (sp_ii = min_ii; sp_ii <= max_ii; sp_ii++)
5675 basic_block tmp_bb;
5676 if (dump_file)
5677 fprintf (dump_file, "Trying to schedule for II %d\n", sp_ii);
5679 df_clear_flags (DF_LR_RUN_DCE);
5681 schedule_ebbs_init ();
5682 set_modulo_params (sp_ii, max_parallel, n_real_insns,
5683 sploop_max_uid_iter0);
5684 tmp_bb = schedule_ebb (BB_HEAD (bb), last_insn, true);
5685 schedule_ebbs_finish ();
5687 if (tmp_bb)
5689 if (dump_file)
5690 fprintf (dump_file, "Found schedule with II %d\n", sp_ii);
5691 break;
5695 discard_delay_pairs_above (max_uid_before);
5697 if (sp_ii > max_ii)
5698 goto restore_loop;
5700 stages = insn_get_clock (ss.last_scheduled_iter0) / sp_ii + 1;
5702 if (stages == 1 && sp_ii > 5)
5703 goto restore_loop;
5705 /* At this point, we know we've been successful, unless we find later that
5706 there are too many execute packets for the loop buffer to hold. */
5708 /* Assign reservations to the instructions in the loop. We must find
5709 the stage that contains the full loop kernel, and transfer the
5710 reservations of the instructions contained in it to the corresponding
5711 instructions from iteration 0, which are the only ones we'll keep. */
5712 assign_reservations (BB_HEAD (bb), ss.last_scheduled_insn);
5713 SET_PREV_INSN (BB_END (bb)) = ss.last_scheduled_iter0;
5714 SET_NEXT_INSN (ss.last_scheduled_iter0) = BB_END (bb);
5715 filter_insns_above (bb, sploop_max_uid_iter0);
5717 for (i = 0; i < n_real_insns; i++)
5719 rtx insn = insn_copies[0][i];
5720 int uid = INSN_UID (insn);
5721 int stage = insn_uid_get_clock (uid) / sp_ii;
5723 if (stage + 1 < stages)
5725 int copy_uid;
5726 stage = stages - stage - 1;
5727 copy_uid = INSN_UID (insn_copies[stage][i]);
5728 INSN_INFO_ENTRY (uid).reservation
5729 = INSN_INFO_ENTRY (copy_uid).reservation;
5732 if (stages == 1)
5733 stages++;
5735 /* Compute the number of execute packets the pipelined form of the loop will
5736 require. */
5737 prev = NULL;
5738 n_execute_packets = 0;
5739 for (insn = loop->start_label;
5740 insn != loop->loop_end;
5741 insn = NEXT_INSN (insn))
5743 if (NONDEBUG_INSN_P (insn) && GET_MODE (insn) == TImode
5744 && !shadow_p (insn))
5746 n_execute_packets++;
5747 if (prev && insn_get_clock (prev) + 1 != insn_get_clock (insn))
5748 /* We need an extra NOP instruction. */
5749 n_execute_packets++;
5751 prev = insn;
5755 end_packet = ss.last_scheduled_iter0;
5756 while (!NONDEBUG_INSN_P (end_packet) || GET_MODE (end_packet) != TImode)
5757 end_packet = PREV_INSN (end_packet);
5759 /* The earliest cycle in which we can emit the SPKERNEL instruction. */
5760 loop_earliest = (stages - 1) * sp_ii;
5761 if (loop_earliest > insn_get_clock (end_packet))
5763 n_execute_packets++;
5764 end_packet = loop->loop_end;
5766 else
5767 loop_earliest = insn_get_clock (end_packet);
5769 if (n_execute_packets > 14)
5770 goto restore_loop;
5772 /* Generate the spkernel instruction, and place it at the appropriate
5773 spot. */
5774 PUT_MODE (end_packet, VOIDmode);
5776 insn = emit_jump_insn_before (
5777 gen_spkernel (GEN_INT (stages - 1),
5778 const0_rtx, JUMP_LABEL (loop->loop_end)),
5779 end_packet);
5780 JUMP_LABEL (insn) = JUMP_LABEL (loop->loop_end);
5781 insn_set_clock (insn, loop_earliest);
5782 PUT_MODE (insn, TImode);
5783 INSN_INFO_ENTRY (INSN_UID (insn)).ebb_start = false;
5784 delete_insn (loop->loop_end);
5786 /* Place the mvc and sploop instructions before the loop. */
5787 entry_bb = entry_edge->src;
5789 start_sequence ();
5791 insn = emit_insn (gen_mvilc (loop->iter_reg));
5792 if (loop->iter_reg_used_outside)
5793 insn = emit_move_insn (loop->iter_reg, const0_rtx);
5794 insn = emit_insn (gen_sploop (GEN_INT (sp_ii)));
5795 seq = get_insns ();
5797 if (!single_succ_p (entry_bb) || vec_safe_length (loop->incoming) > 1)
5799 basic_block new_bb;
5800 edge e;
5801 edge_iterator ei;
5803 emit_insn_before (seq, BB_HEAD (loop->head));
5804 seq = emit_label_before (gen_label_rtx (), seq);
5806 new_bb = create_basic_block (seq, insn, entry_bb);
5807 FOR_EACH_EDGE (e, ei, loop->incoming)
5809 if (!(e->flags & EDGE_FALLTHRU))
5810 redirect_edge_and_branch_force (e, new_bb);
5811 else
5812 redirect_edge_succ (e, new_bb);
5814 make_edge (new_bb, loop->head, 0);
5816 else
5818 entry_after = BB_END (entry_bb);
5819 while (DEBUG_INSN_P (entry_after)
5820 || (NOTE_P (entry_after)
5821 && NOTE_KIND (entry_after) != NOTE_INSN_BASIC_BLOCK))
5822 entry_after = PREV_INSN (entry_after);
5823 emit_insn_after (seq, entry_after);
5826 end_sequence ();
5828 /* Make sure we don't try to schedule this loop again. */
5829 for (ix = 0; loop->blocks.iterate (ix, &bb); ix++)
5830 bb->flags |= BB_DISABLE_SCHEDULE;
5832 return true;
5834 restore_loop:
5835 if (dump_file)
5836 fprintf (dump_file, "Unable to pipeline loop.\n");
5838 for (i = 1; i < n_insns; i++)
5840 SET_NEXT_INSN (orig_vec[i - 1]) = orig_vec[i];
5841 SET_PREV_INSN (orig_vec[i]) = orig_vec[i - 1];
5843 SET_PREV_INSN (orig_vec[0]) = PREV_INSN (BB_HEAD (bb));
5844 SET_NEXT_INSN (PREV_INSN (BB_HEAD (bb))) = orig_vec[0];
5845 SET_NEXT_INSN (orig_vec[n_insns - 1]) = NEXT_INSN (BB_END (bb));
5846 SET_PREV_INSN (NEXT_INSN (BB_END (bb))) = orig_vec[n_insns - 1];
5847 BB_HEAD (bb) = orig_vec[0];
5848 BB_END (bb) = orig_vec[n_insns - 1];
5849 undo_splits:
5850 free_delay_pairs ();
5851 FOR_BB_INSNS (bb, insn)
5852 if (NONDEBUG_INSN_P (insn))
5853 undo_split_delayed_nonbranch (insn);
5854 return false;
5857 /* A callback for the hw-doloop pass. Called when a loop we have discovered
5858 turns out not to be optimizable; we have to split the doloop_end pattern
5859 into a subtract and a test. */
5860 static void
5861 hwloop_fail (hwloop_info loop)
5863 rtx insn, test, testreg;
5865 if (dump_file)
5866 fprintf (dump_file, "splitting doloop insn %d\n",
5867 INSN_UID (loop->loop_end));
5868 insn = gen_addsi3 (loop->iter_reg, loop->iter_reg, constm1_rtx);
5869 /* See if we can emit the add at the head of the loop rather than at the
5870 end. */
5871 if (loop->head == NULL
5872 || loop->iter_reg_used_outside
5873 || loop->iter_reg_used
5874 || TEST_HARD_REG_BIT (loop->regs_set_in_loop, REGNO (loop->iter_reg))
5875 || loop->incoming_dest != loop->head
5876 || EDGE_COUNT (loop->head->preds) != 2)
5877 emit_insn_before (insn, loop->loop_end);
5878 else
5880 rtx_insn *t = loop->start_label;
5881 while (!NOTE_P (t) || NOTE_KIND (t) != NOTE_INSN_BASIC_BLOCK)
5882 t = NEXT_INSN (t);
5883 emit_insn_after (insn, t);
5886 testreg = SET_DEST (XVECEXP (PATTERN (loop->loop_end), 0, 2));
5887 if (GET_CODE (testreg) == SCRATCH)
5888 testreg = loop->iter_reg;
5889 else
5890 emit_insn_before (gen_movsi (testreg, loop->iter_reg), loop->loop_end);
5892 test = gen_rtx_NE (VOIDmode, testreg, const0_rtx);
5893 insn = emit_jump_insn_before (gen_cbranchsi4 (test, testreg, const0_rtx,
5894 loop->start_label),
5895 loop->loop_end);
5897 JUMP_LABEL (insn) = loop->start_label;
5898 LABEL_NUSES (loop->start_label)++;
5899 delete_insn (loop->loop_end);
5902 static struct hw_doloop_hooks c6x_doloop_hooks =
5904 hwloop_pattern_reg,
5905 hwloop_optimize,
5906 hwloop_fail
5909 /* Run the hw-doloop pass to modulo-schedule hardware loops, or split the
5910 doloop_end patterns where such optimizations are impossible. */
5911 static void
5912 c6x_hwloops (void)
5914 if (optimize)
5915 reorg_loops (true, &c6x_doloop_hooks);
5918 /* Implement the TARGET_MACHINE_DEPENDENT_REORG pass. We split call insns here
5919 into a sequence that loads the return register and performs the call,
5920 and emit the return label.
5921 If scheduling after reload is requested, it happens here. */
5923 static void
5924 c6x_reorg (void)
5926 basic_block bb;
5927 bool do_selsched = (c6x_flag_schedule_insns2 && flag_selective_scheduling2
5928 && !maybe_skip_selective_scheduling ());
5930 /* We are freeing block_for_insn in the toplev to keep compatibility
5931 with old MDEP_REORGS that are not CFG based. Recompute it now. */
5932 compute_bb_for_insn ();
5934 df_clear_flags (DF_LR_RUN_DCE);
5935 df_note_add_problem ();
5937 /* If optimizing, we'll have split before scheduling. */
5938 if (optimize == 0)
5939 split_all_insns ();
5941 df_analyze ();
5943 if (c6x_flag_schedule_insns2)
5945 int sz = get_max_uid () * 3 / 2 + 1;
5947 insn_info.create (sz);
5950 /* Make sure the real-jump insns we create are not deleted. When modulo-
5951 scheduling, situations where a reg is only stored in a loop can also
5952 cause dead code when doing the initial unrolling. */
5953 sched_no_dce = true;
5955 c6x_hwloops ();
5957 if (c6x_flag_schedule_insns2)
5959 split_delayed_insns ();
5960 timevar_push (TV_SCHED2);
5961 if (do_selsched)
5962 run_selective_scheduling ();
5963 else
5964 schedule_ebbs ();
5965 conditionalize_after_sched ();
5966 timevar_pop (TV_SCHED2);
5968 free_delay_pairs ();
5970 sched_no_dce = false;
5972 rtx_insn **call_labels = XCNEWVEC (rtx_insn *, get_max_uid () + 1);
5974 reorg_split_calls (call_labels);
5976 if (c6x_flag_schedule_insns2)
5978 FOR_EACH_BB_FN (bb, cfun)
5979 if ((bb->flags & BB_DISABLE_SCHEDULE) == 0)
5980 assign_reservations (BB_HEAD (bb), BB_END (bb));
5983 if (c6x_flag_var_tracking)
5985 timevar_push (TV_VAR_TRACKING);
5986 variable_tracking_main ();
5987 timevar_pop (TV_VAR_TRACKING);
5990 reorg_emit_nops (call_labels);
5992 /* Post-process the schedule to move parallel insns into SEQUENCEs. */
5993 if (c6x_flag_schedule_insns2)
5995 free_delay_pairs ();
5996 c6x_gen_bundles ();
5999 df_finish_pass (false);
6002 /* Called when a function has been assembled. It should perform all the
6003 tasks of ASM_DECLARE_FUNCTION_SIZE in elfos.h, plus target-specific
6004 tasks.
6005 We free the reservation (and other scheduling) information here now that
6006 all insns have been output. */
6007 void
6008 c6x_function_end (FILE *file, const char *fname)
6010 c6x_output_fn_unwind (file);
6012 insn_info.release ();
6014 if (!flag_inhibit_size_directive)
6015 ASM_OUTPUT_MEASURED_SIZE (file, fname);
6018 /* Determine whether X is a shift with code CODE and an integer amount
6019 AMOUNT. */
6020 static bool
6021 shift_p (rtx x, enum rtx_code code, int amount)
6023 return (GET_CODE (x) == code && GET_CODE (XEXP (x, 1)) == CONST_INT
6024 && INTVAL (XEXP (x, 1)) == amount);
6027 /* Compute a (partial) cost for rtx X. Return true if the complete
6028 cost has been computed, and false if subexpressions should be
6029 scanned. In either case, *TOTAL contains the cost result. */
6031 static bool
6032 c6x_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno, int *total,
6033 bool speed)
6035 int cost2 = COSTS_N_INSNS (1);
6036 rtx op0, op1;
6037 int code = GET_CODE (x);
6039 switch (code)
6041 case CONST_INT:
6042 if (outer_code == SET || outer_code == PLUS)
6043 *total = satisfies_constraint_IsB (x) ? 0 : cost2;
6044 else if (outer_code == AND || outer_code == IOR || outer_code == XOR
6045 || outer_code == MINUS)
6046 *total = satisfies_constraint_Is5 (x) ? 0 : cost2;
6047 else if (GET_RTX_CLASS (outer_code) == RTX_COMPARE
6048 || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE)
6049 *total = satisfies_constraint_Iu4 (x) ? 0 : cost2;
6050 else if (outer_code == ASHIFT || outer_code == ASHIFTRT
6051 || outer_code == LSHIFTRT)
6052 *total = satisfies_constraint_Iu5 (x) ? 0 : cost2;
6053 else
6054 *total = cost2;
6055 return true;
6057 case CONST:
6058 case LABEL_REF:
6059 case SYMBOL_REF:
6060 case CONST_DOUBLE:
6061 *total = COSTS_N_INSNS (2);
6062 return true;
6064 case TRUNCATE:
6065 /* Recognize a mult_highpart operation. */
6066 if ((mode == HImode || mode == SImode)
6067 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
6068 && GET_MODE (XEXP (x, 0)) == GET_MODE_2XWIDER_MODE (mode).require ()
6069 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6070 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6071 && INTVAL (XEXP (XEXP (x, 0), 1)) == GET_MODE_BITSIZE (mode))
6073 rtx mul = XEXP (XEXP (x, 0), 0);
6074 rtx op0 = XEXP (mul, 0);
6075 rtx op1 = XEXP (mul, 1);
6076 enum rtx_code code0 = GET_CODE (op0);
6077 enum rtx_code code1 = GET_CODE (op1);
6079 if ((code0 == code1
6080 && (code0 == SIGN_EXTEND || code0 == ZERO_EXTEND))
6081 || (mode == HImode
6082 && code0 == ZERO_EXTEND && code1 == SIGN_EXTEND))
6084 if (mode == HImode)
6085 *total = COSTS_N_INSNS (2);
6086 else
6087 *total = COSTS_N_INSNS (12);
6088 mode = GET_MODE (XEXP (op0, 0));
6089 *total += rtx_cost (XEXP (op0, 0), mode, code0, 0, speed);
6090 *total += rtx_cost (XEXP (op1, 0), mode, code1, 0, speed);
6091 return true;
6094 return false;
6096 case ASHIFT:
6097 case ASHIFTRT:
6098 case LSHIFTRT:
6099 if (mode == DImode)
6100 *total = COSTS_N_INSNS (CONSTANT_P (XEXP (x, 1)) ? 4 : 15);
6101 else
6102 *total = COSTS_N_INSNS (1);
6103 return false;
6105 case PLUS:
6106 case MINUS:
6107 *total = COSTS_N_INSNS (1);
6108 op0 = code == PLUS ? XEXP (x, 0) : XEXP (x, 1);
6109 op1 = code == PLUS ? XEXP (x, 1) : XEXP (x, 0);
6110 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6111 && INTEGRAL_MODE_P (mode)
6112 && GET_CODE (op0) == MULT
6113 && GET_CODE (XEXP (op0, 1)) == CONST_INT
6114 && (INTVAL (XEXP (op0, 1)) == 2
6115 || INTVAL (XEXP (op0, 1)) == 4
6116 || (code == PLUS && INTVAL (XEXP (op0, 1)) == 8)))
6118 *total += rtx_cost (XEXP (op0, 0), mode, ASHIFT, 0, speed);
6119 *total += rtx_cost (op1, mode, (enum rtx_code) code, 1, speed);
6120 return true;
6122 return false;
6124 case MULT:
6125 op0 = XEXP (x, 0);
6126 op1 = XEXP (x, 1);
6127 if (mode == DFmode)
6129 if (TARGET_FP)
6130 *total = COSTS_N_INSNS (speed ? 10 : 1);
6131 else
6132 *total = COSTS_N_INSNS (speed ? 200 : 4);
6134 else if (mode == SFmode)
6136 if (TARGET_FP)
6137 *total = COSTS_N_INSNS (speed ? 4 : 1);
6138 else
6139 *total = COSTS_N_INSNS (speed ? 100 : 4);
6141 else if (mode == DImode)
6143 if (TARGET_MPY32
6144 && GET_CODE (op0) == GET_CODE (op1)
6145 && (GET_CODE (op0) == ZERO_EXTEND
6146 || GET_CODE (op0) == SIGN_EXTEND))
6148 *total = COSTS_N_INSNS (speed ? 2 : 1);
6149 op0 = XEXP (op0, 0);
6150 op1 = XEXP (op1, 0);
6152 else
6153 /* Maybe improve this laster. */
6154 *total = COSTS_N_INSNS (20);
6156 else if (mode == SImode)
6158 if (((GET_CODE (op0) == ZERO_EXTEND
6159 || GET_CODE (op0) == SIGN_EXTEND
6160 || shift_p (op0, LSHIFTRT, 16))
6161 && (GET_CODE (op1) == SIGN_EXTEND
6162 || GET_CODE (op1) == ZERO_EXTEND
6163 || scst5_operand (op1, SImode)
6164 || shift_p (op1, ASHIFTRT, 16)
6165 || shift_p (op1, LSHIFTRT, 16)))
6166 || (shift_p (op0, ASHIFTRT, 16)
6167 && (GET_CODE (op1) == SIGN_EXTEND
6168 || shift_p (op1, ASHIFTRT, 16))))
6170 *total = COSTS_N_INSNS (speed ? 2 : 1);
6171 op0 = XEXP (op0, 0);
6172 if (scst5_operand (op1, SImode))
6173 op1 = NULL_RTX;
6174 else
6175 op1 = XEXP (op1, 0);
6177 else if (!speed)
6178 *total = COSTS_N_INSNS (1);
6179 else if (TARGET_MPY32)
6180 *total = COSTS_N_INSNS (4);
6181 else
6182 *total = COSTS_N_INSNS (6);
6184 else if (mode == HImode)
6185 *total = COSTS_N_INSNS (speed ? 2 : 1);
6187 if (GET_CODE (op0) != REG
6188 && (GET_CODE (op0) != SUBREG || GET_CODE (SUBREG_REG (op0)) != REG))
6189 *total += rtx_cost (op0, mode, MULT, 0, speed);
6190 if (op1 && GET_CODE (op1) != REG
6191 && (GET_CODE (op1) != SUBREG || GET_CODE (SUBREG_REG (op1)) != REG))
6192 *total += rtx_cost (op1, mode, MULT, 1, speed);
6193 return true;
6195 case UDIV:
6196 case DIV:
6197 /* This is a bit random; assuming on average there'll be 16 leading
6198 zeros. FIXME: estimate better for constant dividends. */
6199 *total = COSTS_N_INSNS (6 + 3 * 16);
6200 return false;
6202 case IF_THEN_ELSE:
6203 /* Recognize the cmp_and/ior patterns. */
6204 op0 = XEXP (x, 0);
6205 if ((GET_CODE (op0) == EQ || GET_CODE (op0) == NE)
6206 && REG_P (XEXP (op0, 0))
6207 && XEXP (op0, 1) == const0_rtx
6208 && rtx_equal_p (XEXP (x, 1), XEXP (op0, 0)))
6210 *total = rtx_cost (XEXP (x, 1), VOIDmode, (enum rtx_code) outer_code,
6211 opno, speed);
6212 return false;
6214 return false;
6216 default:
6217 return false;
6221 /* Implements target hook vector_mode_supported_p. */
6223 static bool
6224 c6x_vector_mode_supported_p (machine_mode mode)
6226 switch (mode)
6228 case E_V2HImode:
6229 case E_V4QImode:
6230 case E_V2SImode:
6231 case E_V4HImode:
6232 case E_V8QImode:
6233 return true;
6234 default:
6235 return false;
6239 /* Implements TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
6240 static machine_mode
6241 c6x_preferred_simd_mode (scalar_mode mode)
6243 switch (mode)
6245 case E_HImode:
6246 return V2HImode;
6247 case E_QImode:
6248 return V4QImode;
6250 default:
6251 return word_mode;
6255 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
6257 static bool
6258 c6x_scalar_mode_supported_p (scalar_mode mode)
6260 if (ALL_FIXED_POINT_MODE_P (mode)
6261 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
6262 return true;
6264 return default_scalar_mode_supported_p (mode);
6267 /* Output a reference from a function exception table to the type_info
6268 object X. Output these via a special assembly directive. */
6270 static bool
6271 c6x_output_ttype (rtx x)
6273 /* Use special relocations for symbol references. */
6274 if (GET_CODE (x) != CONST_INT)
6275 fputs ("\t.ehtype\t", asm_out_file);
6276 else
6277 fputs ("\t.word\t", asm_out_file);
6278 output_addr_const (asm_out_file, x);
6279 fputc ('\n', asm_out_file);
6281 return TRUE;
6284 /* Modify the return address of the current function. */
6286 void
6287 c6x_set_return_address (rtx source, rtx scratch)
6289 struct c6x_frame frame;
6290 rtx addr;
6291 HOST_WIDE_INT offset;
6293 c6x_compute_frame_layout (&frame);
6294 if (! c6x_save_reg (RETURN_ADDR_REGNO))
6295 emit_move_insn (gen_rtx_REG (Pmode, RETURN_ADDR_REGNO), source);
6296 else
6299 if (frame_pointer_needed)
6301 addr = hard_frame_pointer_rtx;
6302 offset = frame.b3_offset;
6304 else
6306 addr = stack_pointer_rtx;
6307 offset = frame.to_allocate - frame.b3_offset;
6310 /* TODO: Use base+offset loads where possible. */
6311 if (offset)
6313 HOST_WIDE_INT low = trunc_int_for_mode (offset, HImode);
6315 emit_insn (gen_movsi_high (scratch, GEN_INT (low)));
6316 if (low != offset)
6317 emit_insn (gen_movsi_lo_sum (scratch, scratch, GEN_INT(offset)));
6318 emit_insn (gen_addsi3 (scratch, addr, scratch));
6319 addr = scratch;
6322 emit_move_insn (gen_frame_mem (Pmode, addr), source);
6326 /* We save pairs of registers using a DImode store. Describe the component
6327 registers for DWARF generation code. */
6329 static rtx
6330 c6x_dwarf_register_span (rtx rtl)
6332 unsigned regno;
6333 unsigned real_regno;
6334 int nregs;
6335 int i;
6336 rtx p;
6338 regno = REGNO (rtl);
6339 nregs = HARD_REGNO_NREGS (regno, GET_MODE (rtl));
6340 if (nregs == 1)
6341 return NULL_RTX;
6343 p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc(nregs));
6344 for (i = 0; i < nregs; i++)
6346 if (TARGET_BIG_ENDIAN)
6347 real_regno = regno + nregs - (i + 1);
6348 else
6349 real_regno = regno + i;
6351 XVECEXP (p, 0, i) = gen_rtx_REG (SImode, real_regno);
6354 return p;
6357 /* Codes for all the C6X builtins. */
6358 enum c6x_builtins
6360 C6X_BUILTIN_SADD,
6361 C6X_BUILTIN_SSUB,
6362 C6X_BUILTIN_ADD2,
6363 C6X_BUILTIN_SUB2,
6364 C6X_BUILTIN_ADD4,
6365 C6X_BUILTIN_SUB4,
6366 C6X_BUILTIN_SADD2,
6367 C6X_BUILTIN_SSUB2,
6368 C6X_BUILTIN_SADDU4,
6370 C6X_BUILTIN_SMPY,
6371 C6X_BUILTIN_SMPYH,
6372 C6X_BUILTIN_SMPYHL,
6373 C6X_BUILTIN_SMPYLH,
6374 C6X_BUILTIN_MPY2,
6375 C6X_BUILTIN_SMPY2,
6377 C6X_BUILTIN_CLRR,
6378 C6X_BUILTIN_EXTR,
6379 C6X_BUILTIN_EXTRU,
6381 C6X_BUILTIN_SSHL,
6382 C6X_BUILTIN_SUBC,
6383 C6X_BUILTIN_ABS,
6384 C6X_BUILTIN_ABS2,
6385 C6X_BUILTIN_AVG2,
6386 C6X_BUILTIN_AVGU4,
6388 C6X_BUILTIN_MAX
6392 static GTY(()) tree c6x_builtin_decls[C6X_BUILTIN_MAX];
6394 /* Return the C6X builtin for CODE. */
6395 static tree
6396 c6x_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6398 if (code >= C6X_BUILTIN_MAX)
6399 return error_mark_node;
6401 return c6x_builtin_decls[code];
6404 #define def_builtin(NAME, TYPE, CODE) \
6405 do { \
6406 tree bdecl; \
6407 bdecl = add_builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
6408 NULL, NULL_TREE); \
6409 c6x_builtin_decls[CODE] = bdecl; \
6410 } while (0)
6412 /* Set up all builtin functions for this target. */
6413 static void
6414 c6x_init_builtins (void)
6416 tree V4QI_type_node = build_vector_type (unsigned_intQI_type_node, 4);
6417 tree V2HI_type_node = build_vector_type (intHI_type_node, 2);
6418 tree V2SI_type_node = build_vector_type (intSI_type_node, 2);
6419 tree int_ftype_int
6420 = build_function_type_list (integer_type_node, integer_type_node,
6421 NULL_TREE);
6422 tree int_ftype_int_int
6423 = build_function_type_list (integer_type_node, integer_type_node,
6424 integer_type_node, NULL_TREE);
6425 tree v2hi_ftype_v2hi
6426 = build_function_type_list (V2HI_type_node, V2HI_type_node, NULL_TREE);
6427 tree v4qi_ftype_v4qi_v4qi
6428 = build_function_type_list (V4QI_type_node, V4QI_type_node,
6429 V4QI_type_node, NULL_TREE);
6430 tree v2hi_ftype_v2hi_v2hi
6431 = build_function_type_list (V2HI_type_node, V2HI_type_node,
6432 V2HI_type_node, NULL_TREE);
6433 tree v2si_ftype_v2hi_v2hi
6434 = build_function_type_list (V2SI_type_node, V2HI_type_node,
6435 V2HI_type_node, NULL_TREE);
6437 def_builtin ("__builtin_c6x_sadd", int_ftype_int_int,
6438 C6X_BUILTIN_SADD);
6439 def_builtin ("__builtin_c6x_ssub", int_ftype_int_int,
6440 C6X_BUILTIN_SSUB);
6441 def_builtin ("__builtin_c6x_add2", v2hi_ftype_v2hi_v2hi,
6442 C6X_BUILTIN_ADD2);
6443 def_builtin ("__builtin_c6x_sub2", v2hi_ftype_v2hi_v2hi,
6444 C6X_BUILTIN_SUB2);
6445 def_builtin ("__builtin_c6x_add4", v4qi_ftype_v4qi_v4qi,
6446 C6X_BUILTIN_ADD4);
6447 def_builtin ("__builtin_c6x_sub4", v4qi_ftype_v4qi_v4qi,
6448 C6X_BUILTIN_SUB4);
6449 def_builtin ("__builtin_c6x_mpy2", v2si_ftype_v2hi_v2hi,
6450 C6X_BUILTIN_MPY2);
6451 def_builtin ("__builtin_c6x_sadd2", v2hi_ftype_v2hi_v2hi,
6452 C6X_BUILTIN_SADD2);
6453 def_builtin ("__builtin_c6x_ssub2", v2hi_ftype_v2hi_v2hi,
6454 C6X_BUILTIN_SSUB2);
6455 def_builtin ("__builtin_c6x_saddu4", v4qi_ftype_v4qi_v4qi,
6456 C6X_BUILTIN_SADDU4);
6457 def_builtin ("__builtin_c6x_smpy2", v2si_ftype_v2hi_v2hi,
6458 C6X_BUILTIN_SMPY2);
6460 def_builtin ("__builtin_c6x_smpy", int_ftype_int_int,
6461 C6X_BUILTIN_SMPY);
6462 def_builtin ("__builtin_c6x_smpyh", int_ftype_int_int,
6463 C6X_BUILTIN_SMPYH);
6464 def_builtin ("__builtin_c6x_smpyhl", int_ftype_int_int,
6465 C6X_BUILTIN_SMPYHL);
6466 def_builtin ("__builtin_c6x_smpylh", int_ftype_int_int,
6467 C6X_BUILTIN_SMPYLH);
6469 def_builtin ("__builtin_c6x_sshl", int_ftype_int_int,
6470 C6X_BUILTIN_SSHL);
6471 def_builtin ("__builtin_c6x_subc", int_ftype_int_int,
6472 C6X_BUILTIN_SUBC);
6474 def_builtin ("__builtin_c6x_avg2", v2hi_ftype_v2hi_v2hi,
6475 C6X_BUILTIN_AVG2);
6476 def_builtin ("__builtin_c6x_avgu4", v4qi_ftype_v4qi_v4qi,
6477 C6X_BUILTIN_AVGU4);
6479 def_builtin ("__builtin_c6x_clrr", int_ftype_int_int,
6480 C6X_BUILTIN_CLRR);
6481 def_builtin ("__builtin_c6x_extr", int_ftype_int_int,
6482 C6X_BUILTIN_EXTR);
6483 def_builtin ("__builtin_c6x_extru", int_ftype_int_int,
6484 C6X_BUILTIN_EXTRU);
6486 def_builtin ("__builtin_c6x_abs", int_ftype_int, C6X_BUILTIN_ABS);
6487 def_builtin ("__builtin_c6x_abs2", v2hi_ftype_v2hi, C6X_BUILTIN_ABS2);
6491 struct builtin_description
6493 const enum insn_code icode;
6494 const char *const name;
6495 const enum c6x_builtins code;
6498 static const struct builtin_description bdesc_2arg[] =
6500 { CODE_FOR_saddsi3, "__builtin_c6x_sadd", C6X_BUILTIN_SADD },
6501 { CODE_FOR_ssubsi3, "__builtin_c6x_ssub", C6X_BUILTIN_SSUB },
6502 { CODE_FOR_addv2hi3, "__builtin_c6x_add2", C6X_BUILTIN_ADD2 },
6503 { CODE_FOR_subv2hi3, "__builtin_c6x_sub2", C6X_BUILTIN_SUB2 },
6504 { CODE_FOR_addv4qi3, "__builtin_c6x_add4", C6X_BUILTIN_ADD4 },
6505 { CODE_FOR_subv4qi3, "__builtin_c6x_sub4", C6X_BUILTIN_SUB4 },
6506 { CODE_FOR_ss_addv2hi3, "__builtin_c6x_sadd2", C6X_BUILTIN_SADD2 },
6507 { CODE_FOR_ss_subv2hi3, "__builtin_c6x_ssub2", C6X_BUILTIN_SSUB2 },
6508 { CODE_FOR_us_addv4qi3, "__builtin_c6x_saddu4", C6X_BUILTIN_SADDU4 },
6510 { CODE_FOR_subcsi3, "__builtin_c6x_subc", C6X_BUILTIN_SUBC },
6511 { CODE_FOR_ss_ashlsi3, "__builtin_c6x_sshl", C6X_BUILTIN_SSHL },
6513 { CODE_FOR_avgv2hi3, "__builtin_c6x_avg2", C6X_BUILTIN_AVG2 },
6514 { CODE_FOR_uavgv4qi3, "__builtin_c6x_avgu4", C6X_BUILTIN_AVGU4 },
6516 { CODE_FOR_mulhqsq3, "__builtin_c6x_smpy", C6X_BUILTIN_SMPY },
6517 { CODE_FOR_mulhqsq3_hh, "__builtin_c6x_smpyh", C6X_BUILTIN_SMPYH },
6518 { CODE_FOR_mulhqsq3_lh, "__builtin_c6x_smpylh", C6X_BUILTIN_SMPYLH },
6519 { CODE_FOR_mulhqsq3_hl, "__builtin_c6x_smpyhl", C6X_BUILTIN_SMPYHL },
6521 { CODE_FOR_mulv2hqv2sq3, "__builtin_c6x_smpy2", C6X_BUILTIN_SMPY2 },
6523 { CODE_FOR_clrr, "__builtin_c6x_clrr", C6X_BUILTIN_CLRR },
6524 { CODE_FOR_extr, "__builtin_c6x_extr", C6X_BUILTIN_EXTR },
6525 { CODE_FOR_extru, "__builtin_c6x_extru", C6X_BUILTIN_EXTRU }
6528 static const struct builtin_description bdesc_1arg[] =
6530 { CODE_FOR_ssabssi2, "__builtin_c6x_abs", C6X_BUILTIN_ABS },
6531 { CODE_FOR_ssabsv2hi2, "__builtin_c6x_abs2", C6X_BUILTIN_ABS2 }
6534 /* Errors in the source file can cause expand_expr to return const0_rtx
6535 where we expect a vector. To avoid crashing, use one of the vector
6536 clear instructions. */
6537 static rtx
6538 safe_vector_operand (rtx x, machine_mode mode)
6540 if (x != const0_rtx)
6541 return x;
6542 x = gen_reg_rtx (SImode);
6544 emit_insn (gen_movsi (x, CONST0_RTX (SImode)));
6545 return gen_lowpart (mode, x);
6548 /* Subroutine of c6x_expand_builtin to take care of binop insns. MACFLAG is -1
6549 if this is a normal binary op, or one of the MACFLAG_xxx constants. */
6551 static rtx
6552 c6x_expand_binop_builtin (enum insn_code icode, tree exp, rtx target,
6553 bool match_op)
6555 int offs = match_op ? 1 : 0;
6556 rtx pat;
6557 tree arg0 = CALL_EXPR_ARG (exp, 0);
6558 tree arg1 = CALL_EXPR_ARG (exp, 1);
6559 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6560 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6561 machine_mode op0mode = GET_MODE (op0);
6562 machine_mode op1mode = GET_MODE (op1);
6563 machine_mode tmode = insn_data[icode].operand[0].mode;
6564 machine_mode mode0 = insn_data[icode].operand[1 + offs].mode;
6565 machine_mode mode1 = insn_data[icode].operand[2 + offs].mode;
6566 rtx ret = target;
6568 if (VECTOR_MODE_P (mode0))
6569 op0 = safe_vector_operand (op0, mode0);
6570 if (VECTOR_MODE_P (mode1))
6571 op1 = safe_vector_operand (op1, mode1);
6573 if (! target
6574 || GET_MODE (target) != tmode
6575 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
6577 if (tmode == SQmode || tmode == V2SQmode)
6579 ret = gen_reg_rtx (tmode == SQmode ? SImode : V2SImode);
6580 target = gen_lowpart (tmode, ret);
6582 else
6583 target = gen_reg_rtx (tmode);
6586 if ((op0mode == V2HImode || op0mode == SImode || op0mode == VOIDmode)
6587 && (mode0 == V2HQmode || mode0 == HQmode || mode0 == SQmode))
6589 op0mode = mode0;
6590 op0 = gen_lowpart (mode0, op0);
6592 if ((op1mode == V2HImode || op1mode == SImode || op1mode == VOIDmode)
6593 && (mode1 == V2HQmode || mode1 == HQmode || mode1 == SQmode))
6595 op1mode = mode1;
6596 op1 = gen_lowpart (mode1, op1);
6598 /* In case the insn wants input operands in modes different from
6599 the result, abort. */
6600 gcc_assert ((op0mode == mode0 || op0mode == VOIDmode)
6601 && (op1mode == mode1 || op1mode == VOIDmode));
6603 if (! (*insn_data[icode].operand[1 + offs].predicate) (op0, mode0))
6604 op0 = copy_to_mode_reg (mode0, op0);
6605 if (! (*insn_data[icode].operand[2 + offs].predicate) (op1, mode1))
6606 op1 = copy_to_mode_reg (mode1, op1);
6608 if (match_op)
6609 pat = GEN_FCN (icode) (target, target, op0, op1);
6610 else
6611 pat = GEN_FCN (icode) (target, op0, op1);
6613 if (! pat)
6614 return 0;
6616 emit_insn (pat);
6618 return ret;
6621 /* Subroutine of c6x_expand_builtin to take care of unop insns. */
6623 static rtx
6624 c6x_expand_unop_builtin (enum insn_code icode, tree exp,
6625 rtx target)
6627 rtx pat;
6628 tree arg0 = CALL_EXPR_ARG (exp, 0);
6629 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
6630 machine_mode op0mode = GET_MODE (op0);
6631 machine_mode tmode = insn_data[icode].operand[0].mode;
6632 machine_mode mode0 = insn_data[icode].operand[1].mode;
6634 if (! target
6635 || GET_MODE (target) != tmode
6636 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
6637 target = gen_reg_rtx (tmode);
6639 if (VECTOR_MODE_P (mode0))
6640 op0 = safe_vector_operand (op0, mode0);
6642 if (op0mode == SImode && mode0 == HImode)
6644 op0mode = HImode;
6645 op0 = gen_lowpart (HImode, op0);
6647 gcc_assert (op0mode == mode0 || op0mode == VOIDmode);
6649 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
6650 op0 = copy_to_mode_reg (mode0, op0);
6652 pat = GEN_FCN (icode) (target, op0);
6653 if (! pat)
6654 return 0;
6655 emit_insn (pat);
6656 return target;
6659 /* Expand an expression EXP that calls a built-in function,
6660 with result going to TARGET if that's convenient
6661 (and in mode MODE if that's convenient).
6662 SUBTARGET may be used as the target for computing one of EXP's operands.
6663 IGNORE is nonzero if the value is to be ignored. */
6665 static rtx
6666 c6x_expand_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
6667 rtx subtarget ATTRIBUTE_UNUSED,
6668 machine_mode mode ATTRIBUTE_UNUSED,
6669 int ignore ATTRIBUTE_UNUSED)
6671 size_t i;
6672 const struct builtin_description *d;
6673 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6674 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6676 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
6677 if (d->code == fcode)
6678 return c6x_expand_binop_builtin (d->icode, exp, target,
6679 fcode == C6X_BUILTIN_CLRR);
6681 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
6682 if (d->code == fcode)
6683 return c6x_expand_unop_builtin (d->icode, exp, target);
6685 gcc_unreachable ();
6688 /* Target unwind frame info is generated from dwarf CFI directives, so
6689 always output dwarf2 unwind info. */
6691 static enum unwind_info_type
6692 c6x_debug_unwind_info (void)
6694 if (flag_unwind_tables || flag_exceptions)
6695 return UI_DWARF2;
6697 return default_debug_unwind_info ();
6700 /* Implement TARGET_HARD_REGNO_MODE_OK. */
6702 static bool
6703 c6x_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
6705 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD || (regno & 1) == 0;
6708 /* Target Structure. */
6710 /* Initialize the GCC target structure. */
6711 #undef TARGET_FUNCTION_ARG
6712 #define TARGET_FUNCTION_ARG c6x_function_arg
6713 #undef TARGET_FUNCTION_ARG_ADVANCE
6714 #define TARGET_FUNCTION_ARG_ADVANCE c6x_function_arg_advance
6715 #undef TARGET_FUNCTION_ARG_BOUNDARY
6716 #define TARGET_FUNCTION_ARG_BOUNDARY c6x_function_arg_boundary
6717 #undef TARGET_FUNCTION_ARG_ROUND_BOUNDARY
6718 #define TARGET_FUNCTION_ARG_ROUND_BOUNDARY \
6719 c6x_function_arg_round_boundary
6720 #undef TARGET_FUNCTION_VALUE_REGNO_P
6721 #define TARGET_FUNCTION_VALUE_REGNO_P c6x_function_value_regno_p
6722 #undef TARGET_FUNCTION_VALUE
6723 #define TARGET_FUNCTION_VALUE c6x_function_value
6724 #undef TARGET_LIBCALL_VALUE
6725 #define TARGET_LIBCALL_VALUE c6x_libcall_value
6726 #undef TARGET_RETURN_IN_MEMORY
6727 #define TARGET_RETURN_IN_MEMORY c6x_return_in_memory
6728 #undef TARGET_RETURN_IN_MSB
6729 #define TARGET_RETURN_IN_MSB c6x_return_in_msb
6730 #undef TARGET_PASS_BY_REFERENCE
6731 #define TARGET_PASS_BY_REFERENCE c6x_pass_by_reference
6732 #undef TARGET_CALLEE_COPIES
6733 #define TARGET_CALLEE_COPIES c6x_callee_copies
6734 #undef TARGET_STRUCT_VALUE_RTX
6735 #define TARGET_STRUCT_VALUE_RTX c6x_struct_value_rtx
6736 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
6737 #define TARGET_FUNCTION_OK_FOR_SIBCALL c6x_function_ok_for_sibcall
6739 #undef TARGET_ASM_OUTPUT_MI_THUNK
6740 #define TARGET_ASM_OUTPUT_MI_THUNK c6x_output_mi_thunk
6741 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
6742 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK c6x_can_output_mi_thunk
6744 #undef TARGET_BUILD_BUILTIN_VA_LIST
6745 #define TARGET_BUILD_BUILTIN_VA_LIST c6x_build_builtin_va_list
6747 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
6748 #define TARGET_ASM_TRAMPOLINE_TEMPLATE c6x_asm_trampoline_template
6749 #undef TARGET_TRAMPOLINE_INIT
6750 #define TARGET_TRAMPOLINE_INIT c6x_initialize_trampoline
6752 #undef TARGET_LEGITIMATE_CONSTANT_P
6753 #define TARGET_LEGITIMATE_CONSTANT_P c6x_legitimate_constant_p
6754 #undef TARGET_LEGITIMATE_ADDRESS_P
6755 #define TARGET_LEGITIMATE_ADDRESS_P c6x_legitimate_address_p
6757 #undef TARGET_LRA_P
6758 #define TARGET_LRA_P hook_bool_void_false
6760 #undef TARGET_IN_SMALL_DATA_P
6761 #define TARGET_IN_SMALL_DATA_P c6x_in_small_data_p
6762 #undef TARGET_ASM_SELECT_RTX_SECTION
6763 #define TARGET_ASM_SELECT_RTX_SECTION c6x_select_rtx_section
6764 #undef TARGET_ASM_SELECT_SECTION
6765 #define TARGET_ASM_SELECT_SECTION c6x_elf_select_section
6766 #undef TARGET_ASM_UNIQUE_SECTION
6767 #define TARGET_ASM_UNIQUE_SECTION c6x_elf_unique_section
6768 #undef TARGET_SECTION_TYPE_FLAGS
6769 #define TARGET_SECTION_TYPE_FLAGS c6x_section_type_flags
6770 #undef TARGET_HAVE_SRODATA_SECTION
6771 #define TARGET_HAVE_SRODATA_SECTION true
6772 #undef TARGET_ASM_MERGEABLE_RODATA_PREFIX
6773 #define TARGET_ASM_MERGEABLE_RODATA_PREFIX ".const"
6775 #undef TARGET_OPTION_OVERRIDE
6776 #define TARGET_OPTION_OVERRIDE c6x_option_override
6777 #undef TARGET_CONDITIONAL_REGISTER_USAGE
6778 #define TARGET_CONDITIONAL_REGISTER_USAGE c6x_conditional_register_usage
6780 #undef TARGET_INIT_LIBFUNCS
6781 #define TARGET_INIT_LIBFUNCS c6x_init_libfuncs
6782 #undef TARGET_LIBFUNC_GNU_PREFIX
6783 #define TARGET_LIBFUNC_GNU_PREFIX true
6785 #undef TARGET_SCALAR_MODE_SUPPORTED_P
6786 #define TARGET_SCALAR_MODE_SUPPORTED_P c6x_scalar_mode_supported_p
6787 #undef TARGET_VECTOR_MODE_SUPPORTED_P
6788 #define TARGET_VECTOR_MODE_SUPPORTED_P c6x_vector_mode_supported_p
6789 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
6790 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE c6x_preferred_simd_mode
6792 #undef TARGET_RTX_COSTS
6793 #define TARGET_RTX_COSTS c6x_rtx_costs
6795 #undef TARGET_SCHED_INIT
6796 #define TARGET_SCHED_INIT c6x_sched_init
6797 #undef TARGET_SCHED_SET_SCHED_FLAGS
6798 #define TARGET_SCHED_SET_SCHED_FLAGS c6x_set_sched_flags
6799 #undef TARGET_SCHED_ADJUST_COST
6800 #define TARGET_SCHED_ADJUST_COST c6x_adjust_cost
6801 #undef TARGET_SCHED_ISSUE_RATE
6802 #define TARGET_SCHED_ISSUE_RATE c6x_issue_rate
6803 #undef TARGET_SCHED_VARIABLE_ISSUE
6804 #define TARGET_SCHED_VARIABLE_ISSUE c6x_variable_issue
6805 #undef TARGET_SCHED_REORDER
6806 #define TARGET_SCHED_REORDER c6x_sched_reorder
6807 #undef TARGET_SCHED_REORDER2
6808 #define TARGET_SCHED_REORDER2 c6x_sched_reorder2
6809 #undef TARGET_SCHED_DFA_NEW_CYCLE
6810 #define TARGET_SCHED_DFA_NEW_CYCLE c6x_dfa_new_cycle
6811 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
6812 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN c6x_sched_dfa_pre_cycle_insn
6813 #undef TARGET_SCHED_EXPOSED_PIPELINE
6814 #define TARGET_SCHED_EXPOSED_PIPELINE true
6816 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
6817 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT c6x_alloc_sched_context
6818 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
6819 #define TARGET_SCHED_INIT_SCHED_CONTEXT c6x_init_sched_context
6820 #undef TARGET_SCHED_SET_SCHED_CONTEXT
6821 #define TARGET_SCHED_SET_SCHED_CONTEXT c6x_set_sched_context
6822 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
6823 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT c6x_clear_sched_context
6824 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
6825 #define TARGET_SCHED_FREE_SCHED_CONTEXT c6x_free_sched_context
6827 #undef TARGET_CAN_ELIMINATE
6828 #define TARGET_CAN_ELIMINATE c6x_can_eliminate
6830 #undef TARGET_PREFERRED_RENAME_CLASS
6831 #define TARGET_PREFERRED_RENAME_CLASS c6x_preferred_rename_class
6833 #undef TARGET_MACHINE_DEPENDENT_REORG
6834 #define TARGET_MACHINE_DEPENDENT_REORG c6x_reorg
6836 #undef TARGET_ASM_FILE_START
6837 #define TARGET_ASM_FILE_START c6x_file_start
6839 #undef TARGET_PRINT_OPERAND
6840 #define TARGET_PRINT_OPERAND c6x_print_operand
6841 #undef TARGET_PRINT_OPERAND_ADDRESS
6842 #define TARGET_PRINT_OPERAND_ADDRESS c6x_print_operand_address
6843 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
6844 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P c6x_print_operand_punct_valid_p
6846 /* C6x unwinding tables use a different format for the typeinfo tables. */
6847 #undef TARGET_ASM_TTYPE
6848 #define TARGET_ASM_TTYPE c6x_output_ttype
6850 /* The C6x ABI follows the ARM EABI exception handling rules. */
6851 #undef TARGET_ARM_EABI_UNWINDER
6852 #define TARGET_ARM_EABI_UNWINDER true
6854 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
6855 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY c6x_asm_emit_except_personality
6857 #undef TARGET_ASM_INIT_SECTIONS
6858 #define TARGET_ASM_INIT_SECTIONS c6x_asm_init_sections
6860 #undef TARGET_DEBUG_UNWIND_INFO
6861 #define TARGET_DEBUG_UNWIND_INFO c6x_debug_unwind_info
6863 #undef TARGET_DWARF_REGISTER_SPAN
6864 #define TARGET_DWARF_REGISTER_SPAN c6x_dwarf_register_span
6866 #undef TARGET_INIT_BUILTINS
6867 #define TARGET_INIT_BUILTINS c6x_init_builtins
6868 #undef TARGET_EXPAND_BUILTIN
6869 #define TARGET_EXPAND_BUILTIN c6x_expand_builtin
6870 #undef TARGET_BUILTIN_DECL
6871 #define TARGET_BUILTIN_DECL c6x_builtin_decl
6873 #undef TARGET_HARD_REGNO_MODE_OK
6874 #define TARGET_HARD_REGNO_MODE_OK c6x_hard_regno_mode_ok
6876 struct gcc_target targetm = TARGET_INITIALIZER;
6878 #include "gt-c6x.h"