1 /* Target Code for TI C6X
2 Copyright (C) 2010-2017 Free Software Foundation, Inc.
3 Contributed by Andrew Jenner <andrew@codesourcery.com>
4 Contributed by Bernd Schmidt <bernds@codesourcery.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "gimple-expr.h"
34 #include "stringpool.h"
41 #include "diagnostic-core.h"
42 #include "stor-layout.h"
46 #include "insn-attr.h"
50 #include "sched-int.h"
51 #include "tm-constrs.h"
52 #include "langhooks.h"
53 #include "sel-sched.h"
55 #include "hw-doloop.h"
56 #include "regrename.h"
60 /* This file should be included last. */
61 #include "target-def.h"
63 /* Table of supported architecture variants. */
67 enum c6x_cpu_type type
;
68 unsigned short features
;
71 /* A list of all ISAs, mapping each one to a representative device.
72 Used for -march selection. */
73 static const c6x_arch_table all_isas
[] =
75 #define C6X_ISA(NAME,DEVICE,FLAGS) \
76 { NAME, DEVICE, FLAGS },
77 #include "c6x-isas.def"
79 { NULL
, C6X_CPU_C62X
, 0 }
82 /* This is the parsed result of the "-march=" option, if given. */
83 enum c6x_cpu_type c6x_arch
= C6X_DEFAULT_ARCH
;
85 /* A mask of insn types that are allowed by the architecture selected by
87 unsigned long c6x_insn_mask
= C6X_DEFAULT_INSN_MASK
;
89 /* The instruction that is being output (as obtained from FINAL_PRESCAN_INSN).
91 static rtx_insn
*c6x_current_insn
= NULL
;
93 /* A decl we build to access __c6xabi_DSBT_base. */
94 static GTY(()) tree dsbt_decl
;
96 /* Determines whether we run our final scheduling pass or not. We always
97 avoid the normal second scheduling pass. */
98 static int c6x_flag_schedule_insns2
;
100 /* Determines whether we run variable tracking in machine dependent
102 static int c6x_flag_var_tracking
;
104 /* Determines whether we use modulo scheduling. */
105 static int c6x_flag_modulo_sched
;
107 /* Record the state of flag_pic before we set it to 1 for DSBT. */
108 int c6x_initial_flag_pic
;
112 /* We record the clock cycle for every insn during scheduling. */
114 /* After scheduling, we run assign_reservations to choose unit
115 reservations for all insns. These are recorded here. */
117 /* Records the new condition for insns which must be made
118 conditional after scheduling. An entry of NULL_RTX means no such
119 change is necessary. */
121 /* True for the first insn that was scheduled in an ebb. */
123 /* The scheduler state after the insn, transformed into a mask of UNIT_QID
124 bits rather than storing the state. Meaningful only for the last
126 unsigned int unit_mask
;
127 } c6x_sched_insn_info
;
130 /* Record a c6x_sched_insn_info structure for every insn in the function. */
131 static vec
<c6x_sched_insn_info
> insn_info
;
133 #define INSN_INFO_LENGTH (insn_info).length ()
134 #define INSN_INFO_ENTRY(N) (insn_info[(N)])
136 static bool done_cfi_sections
;
138 #define RESERVATION_FLAG_D 1
139 #define RESERVATION_FLAG_L 2
140 #define RESERVATION_FLAG_S 4
141 #define RESERVATION_FLAG_M 8
142 #define RESERVATION_FLAG_DL (RESERVATION_FLAG_D | RESERVATION_FLAG_L)
143 #define RESERVATION_FLAG_DS (RESERVATION_FLAG_D | RESERVATION_FLAG_S)
144 #define RESERVATION_FLAG_LS (RESERVATION_FLAG_L | RESERVATION_FLAG_S)
145 #define RESERVATION_FLAG_DLS (RESERVATION_FLAG_D | RESERVATION_FLAG_LS)
147 /* The DFA names of the units. */
148 static const char *const c6x_unit_names
[] =
150 "d1", "l1", "s1", "m1", "fps1", "fpl1", "adddps1", "adddpl1",
151 "d2", "l2", "s2", "m2", "fps2", "fpl2", "adddps2", "adddpl2"
154 /* The DFA unit number for each unit in c6x_unit_names[]. */
155 static int c6x_unit_codes
[ARRAY_SIZE (c6x_unit_names
)];
157 /* Unit query IDs. */
158 #define UNIT_QID_D1 0
159 #define UNIT_QID_L1 1
160 #define UNIT_QID_S1 2
161 #define UNIT_QID_M1 3
162 #define UNIT_QID_FPS1 4
163 #define UNIT_QID_FPL1 5
164 #define UNIT_QID_ADDDPS1 6
165 #define UNIT_QID_ADDDPL1 7
166 #define UNIT_QID_SIDE_OFFSET 8
168 #define RESERVATION_S1 2
169 #define RESERVATION_S2 10
171 /* An enum for the unit requirements we count in the UNIT_REQS table. */
187 /* A table used to count unit requirements. Used when computing minimum
188 iteration intervals. */
189 typedef int unit_req_table
[2][UNIT_REQ_MAX
];
190 static unit_req_table unit_reqs
;
192 /* Register map for debugging. */
193 unsigned const dbx_register_map
[FIRST_PSEUDO_REGISTER
] =
195 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* A0 - A15. */
196 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, /* A16 - A32. */
198 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, /* B0 - B15. */
200 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, /* B16 - B32. */
202 -1, -1, -1 /* FP, ARGP, ILC. */
205 /* Allocate a new, cleared machine_function structure. */
207 static struct machine_function
*
208 c6x_init_machine_status (void)
210 return ggc_cleared_alloc
<machine_function
> ();
213 /* Implement TARGET_OPTION_OVERRIDE. */
216 c6x_option_override (void)
220 if (global_options_set
.x_c6x_arch_option
)
222 c6x_arch
= all_isas
[c6x_arch_option
].type
;
223 c6x_insn_mask
&= ~C6X_INSNS_ALL_CPU_BITS
;
224 c6x_insn_mask
|= all_isas
[c6x_arch_option
].features
;
227 c6x_flag_schedule_insns2
= flag_schedule_insns_after_reload
;
228 flag_schedule_insns_after_reload
= 0;
230 c6x_flag_modulo_sched
= flag_modulo_sched
;
231 flag_modulo_sched
= 0;
233 init_machine_status
= c6x_init_machine_status
;
235 for (i
= 0; i
< ARRAY_SIZE (c6x_unit_names
); i
++)
236 c6x_unit_codes
[i
] = get_cpu_unit_code (c6x_unit_names
[i
]);
238 if (flag_pic
&& !TARGET_DSBT
)
240 error ("-fpic and -fPIC not supported without -mdsbt on this target");
243 c6x_initial_flag_pic
= flag_pic
;
244 if (TARGET_DSBT
&& !flag_pic
)
249 /* Implement the TARGET_CONDITIONAL_REGISTER_USAGE hook. */
252 c6x_conditional_register_usage (void)
255 if (c6x_arch
== C6X_CPU_C62X
|| c6x_arch
== C6X_CPU_C67X
)
256 for (i
= 16; i
< 32; i
++)
259 fixed_regs
[32 + i
] = 1;
263 SET_HARD_REG_BIT (reg_class_contents
[(int)PREDICATE_A_REGS
],
265 SET_HARD_REG_BIT (reg_class_contents
[(int)PREDICATE_REGS
],
267 CLEAR_HARD_REG_BIT (reg_class_contents
[(int)NONPREDICATE_A_REGS
],
269 CLEAR_HARD_REG_BIT (reg_class_contents
[(int)NONPREDICATE_REGS
],
274 static GTY(()) rtx eqdf_libfunc
;
275 static GTY(()) rtx nedf_libfunc
;
276 static GTY(()) rtx ledf_libfunc
;
277 static GTY(()) rtx ltdf_libfunc
;
278 static GTY(()) rtx gedf_libfunc
;
279 static GTY(()) rtx gtdf_libfunc
;
280 static GTY(()) rtx eqsf_libfunc
;
281 static GTY(()) rtx nesf_libfunc
;
282 static GTY(()) rtx lesf_libfunc
;
283 static GTY(()) rtx ltsf_libfunc
;
284 static GTY(()) rtx gesf_libfunc
;
285 static GTY(()) rtx gtsf_libfunc
;
286 static GTY(()) rtx strasgi_libfunc
;
287 static GTY(()) rtx strasgi64p_libfunc
;
289 /* Implement the TARGET_INIT_LIBFUNCS macro. We use this to rename library
290 functions to match the C6x ABI. */
293 c6x_init_libfuncs (void)
295 /* Double-precision floating-point arithmetic. */
296 set_optab_libfunc (add_optab
, DFmode
, "__c6xabi_addd");
297 set_optab_libfunc (sdiv_optab
, DFmode
, "__c6xabi_divd");
298 set_optab_libfunc (smul_optab
, DFmode
, "__c6xabi_mpyd");
299 set_optab_libfunc (neg_optab
, DFmode
, "__c6xabi_negd");
300 set_optab_libfunc (sub_optab
, DFmode
, "__c6xabi_subd");
302 /* Single-precision floating-point arithmetic. */
303 set_optab_libfunc (add_optab
, SFmode
, "__c6xabi_addf");
304 set_optab_libfunc (sdiv_optab
, SFmode
, "__c6xabi_divf");
305 set_optab_libfunc (smul_optab
, SFmode
, "__c6xabi_mpyf");
306 set_optab_libfunc (neg_optab
, SFmode
, "__c6xabi_negf");
307 set_optab_libfunc (sub_optab
, SFmode
, "__c6xabi_subf");
309 /* Floating-point comparisons. */
310 eqsf_libfunc
= init_one_libfunc ("__c6xabi_eqf");
311 nesf_libfunc
= init_one_libfunc ("__c6xabi_neqf");
312 lesf_libfunc
= init_one_libfunc ("__c6xabi_lef");
313 ltsf_libfunc
= init_one_libfunc ("__c6xabi_ltf");
314 gesf_libfunc
= init_one_libfunc ("__c6xabi_gef");
315 gtsf_libfunc
= init_one_libfunc ("__c6xabi_gtf");
316 eqdf_libfunc
= init_one_libfunc ("__c6xabi_eqd");
317 nedf_libfunc
= init_one_libfunc ("__c6xabi_neqd");
318 ledf_libfunc
= init_one_libfunc ("__c6xabi_led");
319 ltdf_libfunc
= init_one_libfunc ("__c6xabi_ltd");
320 gedf_libfunc
= init_one_libfunc ("__c6xabi_ged");
321 gtdf_libfunc
= init_one_libfunc ("__c6xabi_gtd");
323 set_optab_libfunc (eq_optab
, SFmode
, NULL
);
324 set_optab_libfunc (ne_optab
, SFmode
, "__c6xabi_neqf");
325 set_optab_libfunc (gt_optab
, SFmode
, NULL
);
326 set_optab_libfunc (ge_optab
, SFmode
, NULL
);
327 set_optab_libfunc (lt_optab
, SFmode
, NULL
);
328 set_optab_libfunc (le_optab
, SFmode
, NULL
);
329 set_optab_libfunc (unord_optab
, SFmode
, "__c6xabi_unordf");
330 set_optab_libfunc (eq_optab
, DFmode
, NULL
);
331 set_optab_libfunc (ne_optab
, DFmode
, "__c6xabi_neqd");
332 set_optab_libfunc (gt_optab
, DFmode
, NULL
);
333 set_optab_libfunc (ge_optab
, DFmode
, NULL
);
334 set_optab_libfunc (lt_optab
, DFmode
, NULL
);
335 set_optab_libfunc (le_optab
, DFmode
, NULL
);
336 set_optab_libfunc (unord_optab
, DFmode
, "__c6xabi_unordd");
338 /* Floating-point to integer conversions. */
339 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__c6xabi_fixdi");
340 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, "__c6xabi_fixdu");
341 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, "__c6xabi_fixdlli");
342 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, "__c6xabi_fixdull");
343 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__c6xabi_fixfi");
344 set_conv_libfunc (ufix_optab
, SImode
, SFmode
, "__c6xabi_fixfu");
345 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, "__c6xabi_fixflli");
346 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, "__c6xabi_fixfull");
348 /* Conversions between floating types. */
349 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
, "__c6xabi_cvtdf");
350 set_conv_libfunc (sext_optab
, DFmode
, SFmode
, "__c6xabi_cvtfd");
352 /* Integer to floating-point conversions. */
353 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
, "__c6xabi_fltid");
354 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
, "__c6xabi_fltud");
355 set_conv_libfunc (sfloat_optab
, DFmode
, DImode
, "__c6xabi_fltllid");
356 set_conv_libfunc (ufloat_optab
, DFmode
, DImode
, "__c6xabi_fltulld");
357 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__c6xabi_fltif");
358 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__c6xabi_fltuf");
359 set_conv_libfunc (sfloat_optab
, SFmode
, DImode
, "__c6xabi_fltllif");
360 set_conv_libfunc (ufloat_optab
, SFmode
, DImode
, "__c6xabi_fltullf");
363 set_optab_libfunc (smul_optab
, DImode
, "__c6xabi_mpyll");
364 set_optab_libfunc (ashl_optab
, DImode
, "__c6xabi_llshl");
365 set_optab_libfunc (lshr_optab
, DImode
, "__c6xabi_llshru");
366 set_optab_libfunc (ashr_optab
, DImode
, "__c6xabi_llshr");
368 set_optab_libfunc (sdiv_optab
, SImode
, "__c6xabi_divi");
369 set_optab_libfunc (udiv_optab
, SImode
, "__c6xabi_divu");
370 set_optab_libfunc (smod_optab
, SImode
, "__c6xabi_remi");
371 set_optab_libfunc (umod_optab
, SImode
, "__c6xabi_remu");
372 set_optab_libfunc (sdivmod_optab
, SImode
, "__c6xabi_divremi");
373 set_optab_libfunc (udivmod_optab
, SImode
, "__c6xabi_divremu");
374 set_optab_libfunc (sdiv_optab
, DImode
, "__c6xabi_divlli");
375 set_optab_libfunc (udiv_optab
, DImode
, "__c6xabi_divull");
376 set_optab_libfunc (smod_optab
, DImode
, "__c6xabi_remlli");
377 set_optab_libfunc (umod_optab
, DImode
, "__c6xabi_remull");
378 set_optab_libfunc (udivmod_optab
, DImode
, "__c6xabi_divremull");
381 strasgi_libfunc
= init_one_libfunc ("__c6xabi_strasgi");
382 strasgi64p_libfunc
= init_one_libfunc ("__c6xabi_strasgi_64plus");
385 /* Begin the assembly file. */
388 c6x_file_start (void)
390 /* Variable tracking should be run after all optimizations which change order
391 of insns. It also needs a valid CFG. This can't be done in
392 c6x_override_options, because flag_var_tracking is finalized after
394 c6x_flag_var_tracking
= flag_var_tracking
;
395 flag_var_tracking
= 0;
397 done_cfi_sections
= false;
398 default_file_start ();
400 /* Arrays are aligned to 8-byte boundaries. */
401 asm_fprintf (asm_out_file
,
402 "\t.c6xabi_attribute Tag_ABI_array_object_alignment, 0\n");
403 asm_fprintf (asm_out_file
,
404 "\t.c6xabi_attribute Tag_ABI_array_object_align_expected, 0\n");
406 /* Stack alignment is 8 bytes. */
407 asm_fprintf (asm_out_file
,
408 "\t.c6xabi_attribute Tag_ABI_stack_align_needed, 0\n");
409 asm_fprintf (asm_out_file
,
410 "\t.c6xabi_attribute Tag_ABI_stack_align_preserved, 0\n");
412 #if 0 /* FIXME: Reenable when TI's tools are fixed. */
413 /* ??? Ideally we'd check flag_short_wchar somehow. */
414 asm_fprintf (asm_out_file
, "\t.c6xabi_attribute Tag_ABI_wchar_t, %d\n", 2);
417 /* We conform to version 1.0 of the ABI. */
418 asm_fprintf (asm_out_file
,
419 "\t.c6xabi_attribute Tag_ABI_conformance, \"1.0\"\n");
423 /* The LTO frontend only enables exceptions when it sees a function that
424 uses it. This changes the return value of dwarf2out_do_frame, so we
425 have to check before every function. */
428 c6x_output_file_unwind (FILE * f
)
430 if (done_cfi_sections
)
433 /* Output a .cfi_sections directive. */
434 if (dwarf2out_do_frame ())
436 if (flag_unwind_tables
|| flag_exceptions
)
438 if (write_symbols
== DWARF2_DEBUG
439 || write_symbols
== VMS_AND_DWARF2_DEBUG
)
440 asm_fprintf (f
, "\t.cfi_sections .debug_frame, .c6xabi.exidx\n");
442 asm_fprintf (f
, "\t.cfi_sections .c6xabi.exidx\n");
445 asm_fprintf (f
, "\t.cfi_sections .debug_frame\n");
446 done_cfi_sections
= true;
450 /* Output unwind directives at the end of a function. */
453 c6x_output_fn_unwind (FILE * f
)
455 /* Return immediately if we are not generating unwinding tables. */
456 if (! (flag_unwind_tables
|| flag_exceptions
))
459 /* If this function will never be unwound, then mark it as such. */
460 if (!(flag_unwind_tables
|| crtl
->uses_eh_lsda
)
461 && (TREE_NOTHROW (current_function_decl
)
462 || crtl
->all_throwers_are_sibcalls
))
463 fputs("\t.cantunwind\n", f
);
465 fputs ("\t.endp\n", f
);
469 /* Stack and Calling. */
471 int argument_registers
[10] =
480 /* Implements the macro INIT_CUMULATIVE_ARGS defined in c6x.h. */
483 c6x_init_cumulative_args (CUMULATIVE_ARGS
*cum
, const_tree fntype
, rtx libname
,
484 int n_named_args ATTRIBUTE_UNUSED
)
488 if (!libname
&& fntype
)
490 /* We need to find out the number of named arguments. Unfortunately,
491 for incoming arguments, N_NAMED_ARGS is set to -1. */
492 if (stdarg_p (fntype
))
493 cum
->nregs
= type_num_arguments (fntype
) - 1;
499 /* Implements the macro FUNCTION_ARG defined in c6x.h. */
502 c6x_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
503 const_tree type
, bool named ATTRIBUTE_UNUSED
)
505 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
506 if (cum
->count
>= cum
->nregs
)
510 HOST_WIDE_INT size
= int_size_in_bytes (type
);
511 if (TARGET_BIG_ENDIAN
&& AGGREGATE_TYPE_P (type
))
515 rtx reg1
= gen_rtx_REG (SImode
, argument_registers
[cum
->count
] + 1);
516 rtx reg2
= gen_rtx_REG (SImode
, argument_registers
[cum
->count
]);
517 rtvec vec
= gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode
, reg1
, const0_rtx
),
518 gen_rtx_EXPR_LIST (VOIDmode
, reg2
, GEN_INT (4)));
519 return gen_rtx_PARALLEL (mode
, vec
);
523 return gen_rtx_REG (mode
, argument_registers
[cum
->count
]);
527 c6x_function_arg_advance (cumulative_args_t cum_v
,
528 machine_mode mode ATTRIBUTE_UNUSED
,
529 const_tree type ATTRIBUTE_UNUSED
,
530 bool named ATTRIBUTE_UNUSED
)
532 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
537 /* Return true if BLOCK_REG_PADDING (MODE, TYPE, FIRST) should return
538 upward rather than downward. */
541 c6x_block_reg_pad_upward (machine_mode mode ATTRIBUTE_UNUSED
,
542 const_tree type
, bool first
)
546 if (!TARGET_BIG_ENDIAN
)
552 size
= int_size_in_bytes (type
);
556 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. */
559 c6x_function_arg_boundary (machine_mode mode
, const_tree type
)
561 unsigned int boundary
= type
? TYPE_ALIGN (type
) : GET_MODE_BITSIZE (mode
);
563 if (boundary
> BITS_PER_WORD
)
564 return 2 * BITS_PER_WORD
;
568 HOST_WIDE_INT size
= int_size_in_bytes (type
);
570 return 2 * BITS_PER_WORD
;
571 if (boundary
< BITS_PER_WORD
)
574 return BITS_PER_WORD
;
576 return 2 * BITS_PER_UNIT
;
582 /* Implement TARGET_FUNCTION_ARG_ROUND_BOUNDARY. */
584 c6x_function_arg_round_boundary (machine_mode mode
, const_tree type
)
586 return c6x_function_arg_boundary (mode
, type
);
589 /* TARGET_FUNCTION_VALUE implementation. Returns an RTX representing the place
590 where function FUNC returns or receives a value of data type TYPE. */
593 c6x_function_value (const_tree type
, const_tree func ATTRIBUTE_UNUSED
,
594 bool outgoing ATTRIBUTE_UNUSED
)
596 /* Functions return values in register A4. When returning aggregates, we may
597 have to adjust for endianness. */
598 if (TARGET_BIG_ENDIAN
&& type
&& AGGREGATE_TYPE_P (type
))
600 HOST_WIDE_INT size
= int_size_in_bytes (type
);
604 rtx reg1
= gen_rtx_REG (SImode
, REG_A4
+ 1);
605 rtx reg2
= gen_rtx_REG (SImode
, REG_A4
);
606 rtvec vec
= gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode
, reg1
, const0_rtx
),
607 gen_rtx_EXPR_LIST (VOIDmode
, reg2
, GEN_INT (4)));
608 return gen_rtx_PARALLEL (TYPE_MODE (type
), vec
);
611 return gen_rtx_REG (TYPE_MODE (type
), REG_A4
);
614 /* Implement TARGET_LIBCALL_VALUE. */
617 c6x_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
619 return gen_rtx_REG (mode
, REG_A4
);
622 /* TARGET_STRUCT_VALUE_RTX implementation. */
625 c6x_struct_value_rtx (tree type ATTRIBUTE_UNUSED
, int incoming ATTRIBUTE_UNUSED
)
627 return gen_rtx_REG (Pmode
, REG_A3
);
630 /* Implement TARGET_FUNCTION_VALUE_REGNO_P. */
633 c6x_function_value_regno_p (const unsigned int regno
)
635 return regno
== REG_A4
;
638 /* Types larger than 64 bit, and variable sized types, are passed by
639 reference. The callee must copy them; see c6x_callee_copies. */
642 c6x_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED
,
643 machine_mode mode
, const_tree type
,
644 bool named ATTRIBUTE_UNUSED
)
648 size
= int_size_in_bytes (type
);
649 else if (mode
!= VOIDmode
)
650 size
= GET_MODE_SIZE (mode
);
651 return size
> 2 * UNITS_PER_WORD
|| size
== -1;
654 /* Decide whether a type should be returned in memory (true)
655 or in a register (false). This is called by the macro
656 TARGET_RETURN_IN_MEMORY. */
659 c6x_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
661 int size
= int_size_in_bytes (type
);
662 return size
> 2 * UNITS_PER_WORD
|| size
== -1;
665 /* Values which must be returned in the most-significant end of the return
669 c6x_return_in_msb (const_tree valtype
)
671 HOST_WIDE_INT size
= int_size_in_bytes (valtype
);
672 return TARGET_BIG_ENDIAN
&& AGGREGATE_TYPE_P (valtype
) && size
== 3;
675 /* Implement TARGET_CALLEE_COPIES. */
678 c6x_callee_copies (cumulative_args_t cum_v ATTRIBUTE_UNUSED
,
679 machine_mode mode ATTRIBUTE_UNUSED
,
680 const_tree type ATTRIBUTE_UNUSED
,
681 bool named ATTRIBUTE_UNUSED
)
686 /* Return the type to use as __builtin_va_list. */
688 c6x_build_builtin_va_list (void)
690 return build_pointer_type (char_type_node
);
694 c6x_asm_trampoline_template (FILE *f
)
696 fprintf (f
, "\t.long\t0x0000002b\n"); /* mvkl .s2 fnlow,B0 */
697 fprintf (f
, "\t.long\t0x01000028\n"); /* || mvkl .s1 sclow,A2 */
698 fprintf (f
, "\t.long\t0x0000006b\n"); /* mvkh .s2 fnhigh,B0 */
699 fprintf (f
, "\t.long\t0x01000068\n"); /* || mvkh .s1 schigh,A2 */
700 fprintf (f
, "\t.long\t0x00000362\n"); /* b .s2 B0 */
701 fprintf (f
, "\t.long\t0x00008000\n"); /* nop 5 */
702 fprintf (f
, "\t.long\t0x00000000\n"); /* nop */
703 fprintf (f
, "\t.long\t0x00000000\n"); /* nop */
706 /* Emit RTL insns to initialize the variable parts of a trampoline at
707 TRAMP. FNADDR is an RTX for the address of the function's pure
708 code. CXT is an RTX for the static chain value for the function. */
711 c6x_initialize_trampoline (rtx tramp
, tree fndecl
, rtx cxt
)
713 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
714 rtx t1
= copy_to_reg (fnaddr
);
715 rtx t2
= copy_to_reg (cxt
);
716 rtx mask
= gen_reg_rtx (SImode
);
719 emit_block_move (tramp
, assemble_trampoline_template (),
720 GEN_INT (TRAMPOLINE_SIZE
), BLOCK_OP_NORMAL
);
722 emit_move_insn (mask
, GEN_INT (0xffff << 7));
724 for (i
= 0; i
< 4; i
++)
726 rtx mem
= adjust_address (tramp
, SImode
, i
* 4);
727 rtx t
= (i
& 1) ? t2
: t1
;
728 rtx v1
= gen_reg_rtx (SImode
);
729 rtx v2
= gen_reg_rtx (SImode
);
730 emit_move_insn (v1
, mem
);
732 emit_insn (gen_ashlsi3 (v2
, t
, GEN_INT (7)));
734 emit_insn (gen_lshrsi3 (v2
, t
, GEN_INT (9)));
735 emit_insn (gen_andsi3 (v2
, v2
, mask
));
736 emit_insn (gen_iorsi3 (v2
, v2
, v1
));
737 emit_move_insn (mem
, v2
);
739 #ifdef CLEAR_INSN_CACHE
740 tramp
= XEXP (tramp
, 0);
741 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__gnu_clear_cache"),
742 LCT_NORMAL
, VOIDmode
, 2, tramp
, Pmode
,
743 plus_constant (Pmode
, tramp
, TRAMPOLINE_SIZE
),
748 /* Determine whether c6x_output_mi_thunk can succeed. */
751 c6x_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED
,
752 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
753 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
754 const_tree function ATTRIBUTE_UNUSED
)
756 return !TARGET_LONG_CALLS
;
759 /* Output the assembler code for a thunk function. THUNK is the
760 declaration for the thunk function itself, FUNCTION is the decl for
761 the target function. DELTA is an immediate constant offset to be
762 added to THIS. If VCALL_OFFSET is nonzero, the word at
763 *(*this + vcall_offset) should be added to THIS. */
766 c6x_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED
,
767 tree thunk ATTRIBUTE_UNUSED
, HOST_WIDE_INT delta
,
768 HOST_WIDE_INT vcall_offset
, tree function
)
771 /* The this parameter is passed as the first argument. */
772 rtx this_rtx
= gen_rtx_REG (Pmode
, REG_A4
);
774 c6x_current_insn
= NULL
;
776 xops
[4] = XEXP (DECL_RTL (function
), 0);
779 output_asm_insn ("b .s2 \t%4", xops
);
781 output_asm_insn ("nop 5", xops
);
784 /* Adjust the this parameter by a fixed constant. */
787 xops
[0] = GEN_INT (delta
);
789 if (delta
>= -16 && delta
<= 15)
791 output_asm_insn ("add .s1 %0, %1, %1", xops
);
793 output_asm_insn ("nop 4", xops
);
795 else if (delta
>= 16 && delta
< 32)
797 output_asm_insn ("add .d1 %0, %1, %1", xops
);
799 output_asm_insn ("nop 4", xops
);
801 else if (delta
>= -32768 && delta
< 32768)
803 output_asm_insn ("mvk .s1 %0, A0", xops
);
804 output_asm_insn ("add .d1 %1, A0, %1", xops
);
806 output_asm_insn ("nop 3", xops
);
810 output_asm_insn ("mvkl .s1 %0, A0", xops
);
811 output_asm_insn ("mvkh .s1 %0, A0", xops
);
812 output_asm_insn ("add .d1 %1, A0, %1", xops
);
814 output_asm_insn ("nop 3", xops
);
818 /* Adjust the this parameter by a value stored in the vtable. */
821 rtx a0tmp
= gen_rtx_REG (Pmode
, REG_A0
);
822 rtx a3tmp
= gen_rtx_REG (Pmode
, REG_A3
);
826 xops
[3] = gen_rtx_MEM (Pmode
, a0tmp
);
827 output_asm_insn ("mv .s1 a4, %2", xops
);
828 output_asm_insn ("ldw .d1t1 %3, %2", xops
);
830 /* Adjust the this parameter. */
831 xops
[0] = gen_rtx_MEM (Pmode
, plus_constant (Pmode
, a0tmp
,
833 if (!memory_operand (xops
[0], Pmode
))
835 rtx tmp2
= gen_rtx_REG (Pmode
, REG_A1
);
836 xops
[0] = GEN_INT (vcall_offset
);
838 output_asm_insn ("mvkl .s1 %0, %1", xops
);
839 output_asm_insn ("mvkh .s1 %0, %1", xops
);
840 output_asm_insn ("nop 2", xops
);
841 output_asm_insn ("add .d1 %2, %1, %2", xops
);
842 xops
[0] = gen_rtx_MEM (Pmode
, a0tmp
);
845 output_asm_insn ("nop 4", xops
);
847 output_asm_insn ("ldw .d1t1 %0, %1", xops
);
848 output_asm_insn ("|| b .s2 \t%4", xops
);
849 output_asm_insn ("nop 4", xops
);
850 output_asm_insn ("add .d1 %2, %1, %2", xops
);
854 /* Return true if EXP goes in small data/bss. */
857 c6x_in_small_data_p (const_tree exp
)
859 /* We want to merge strings, so we never consider them small data. */
860 if (TREE_CODE (exp
) == STRING_CST
)
863 /* Functions are never small data. */
864 if (TREE_CODE (exp
) == FUNCTION_DECL
)
867 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_WEAK (exp
))
870 if (TREE_CODE (exp
) == VAR_DECL
&& DECL_SECTION_NAME (exp
))
872 const char *section
= DECL_SECTION_NAME (exp
);
874 if (strcmp (section
, ".neardata") == 0
875 || strncmp (section
, ".neardata.", 10) == 0
876 || strncmp (section
, ".gnu.linkonce.s.", 16) == 0
877 || strcmp (section
, ".bss") == 0
878 || strncmp (section
, ".bss.", 5) == 0
879 || strncmp (section
, ".gnu.linkonce.sb.", 17) == 0
880 || strcmp (section
, ".rodata") == 0
881 || strncmp (section
, ".rodata.", 8) == 0
882 || strncmp (section
, ".gnu.linkonce.s2.", 17) == 0)
886 return PLACE_IN_SDATA_P (exp
);
891 /* Return a section for X. The only special thing we do here is to
892 honor small data. We don't have a tree type, so we can't use the
893 PLACE_IN_SDATA_P macro we use everywhere else; we choose to place
894 everything sized 8 bytes or smaller into small data. */
897 c6x_select_rtx_section (machine_mode mode
, rtx x
,
898 unsigned HOST_WIDE_INT align
)
900 if (c6x_sdata_mode
== C6X_SDATA_ALL
901 || (c6x_sdata_mode
!= C6X_SDATA_NONE
&& GET_MODE_SIZE (mode
) <= 8))
902 /* ??? Consider using mergeable sdata sections. */
903 return sdata_section
;
905 return default_elf_select_rtx_section (mode
, x
, align
);
909 c6x_elf_select_section (tree decl
, int reloc
,
910 unsigned HOST_WIDE_INT align
)
912 const char *sname
= NULL
;
913 unsigned int flags
= SECTION_WRITE
;
914 if (c6x_in_small_data_p (decl
))
916 switch (categorize_decl_for_section (decl
, reloc
))
927 flags
|= SECTION_BSS
;
934 switch (categorize_decl_for_section (decl
, reloc
))
939 case SECCAT_DATA_REL
:
940 sname
= ".fardata.rel";
942 case SECCAT_DATA_REL_LOCAL
:
943 sname
= ".fardata.rel.local";
945 case SECCAT_DATA_REL_RO
:
946 sname
= ".fardata.rel.ro";
948 case SECCAT_DATA_REL_RO_LOCAL
:
949 sname
= ".fardata.rel.ro.local";
953 flags
|= SECTION_BSS
;
969 /* We might get called with string constants, but get_named_section
970 doesn't like them as they are not DECLs. Also, we need to set
971 flags in that case. */
973 return get_section (sname
, flags
, NULL
);
974 return get_named_section (decl
, sname
, reloc
);
977 return default_elf_select_section (decl
, reloc
, align
);
980 /* Build up a unique section name, expressed as a
981 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
982 RELOC indicates whether the initial value of EXP requires
983 link-time relocations. */
985 static void ATTRIBUTE_UNUSED
986 c6x_elf_unique_section (tree decl
, int reloc
)
988 const char *prefix
= NULL
;
989 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
990 bool one_only
= DECL_COMDAT_GROUP (decl
) && !HAVE_COMDAT_GROUP
;
992 if (c6x_in_small_data_p (decl
))
994 switch (categorize_decl_for_section (decl
, reloc
))
997 prefix
= one_only
? ".s" : ".neardata";
1000 prefix
= one_only
? ".sb" : ".bss";
1002 case SECCAT_SRODATA
:
1003 prefix
= one_only
? ".s2" : ".rodata";
1005 case SECCAT_RODATA_MERGE_STR
:
1006 case SECCAT_RODATA_MERGE_STR_INIT
:
1007 case SECCAT_RODATA_MERGE_CONST
:
1010 case SECCAT_DATA_REL
:
1011 case SECCAT_DATA_REL_LOCAL
:
1012 case SECCAT_DATA_REL_RO
:
1013 case SECCAT_DATA_REL_RO_LOCAL
:
1016 /* Everything else we place into default sections and hope for the
1023 switch (categorize_decl_for_section (decl
, reloc
))
1026 case SECCAT_DATA_REL
:
1027 case SECCAT_DATA_REL_LOCAL
:
1028 case SECCAT_DATA_REL_RO
:
1029 case SECCAT_DATA_REL_RO_LOCAL
:
1030 prefix
= one_only
? ".fd" : ".fardata";
1033 prefix
= one_only
? ".fb" : ".far";
1036 case SECCAT_RODATA_MERGE_STR
:
1037 case SECCAT_RODATA_MERGE_STR_INIT
:
1038 case SECCAT_RODATA_MERGE_CONST
:
1039 prefix
= one_only
? ".fr" : ".const";
1041 case SECCAT_SRODATA
:
1052 const char *name
, *linkonce
;
1055 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
1056 name
= targetm
.strip_name_encoding (name
);
1058 /* If we're using one_only, then there needs to be a .gnu.linkonce
1059 prefix to the section name. */
1060 linkonce
= one_only
? ".gnu.linkonce" : "";
1062 string
= ACONCAT ((linkonce
, prefix
, ".", name
, NULL
));
1064 set_decl_section_name (decl
, string
);
1067 default_unique_section (decl
, reloc
);
1071 c6x_section_type_flags (tree decl
, const char *name
, int reloc
)
1073 unsigned int flags
= 0;
1075 if (strcmp (name
, ".far") == 0
1076 || strncmp (name
, ".far.", 5) == 0)
1077 flags
|= SECTION_BSS
;
1079 flags
|= default_section_type_flags (decl
, name
, reloc
);
1084 /* Checks whether the given CALL_EXPR would use a caller saved
1085 register. This is used to decide whether sibling call optimization
1086 could be performed on the respective function call. */
1089 c6x_call_saved_register_used (tree call_expr
)
1091 CUMULATIVE_ARGS cum_v
;
1092 cumulative_args_t cum
;
1093 HARD_REG_SET call_saved_regset
;
1100 INIT_CUMULATIVE_ARGS (cum_v
, NULL
, NULL
, 0, 0);
1101 cum
= pack_cumulative_args (&cum_v
);
1103 COMPL_HARD_REG_SET (call_saved_regset
, call_used_reg_set
);
1104 for (i
= 0; i
< call_expr_nargs (call_expr
); i
++)
1106 parameter
= CALL_EXPR_ARG (call_expr
, i
);
1107 gcc_assert (parameter
);
1109 /* For an undeclared variable passed as parameter we will get
1110 an ERROR_MARK node here. */
1111 if (TREE_CODE (parameter
) == ERROR_MARK
)
1114 type
= TREE_TYPE (parameter
);
1117 mode
= TYPE_MODE (type
);
1120 if (pass_by_reference (&cum_v
, mode
, type
, true))
1123 type
= build_pointer_type (type
);
1126 parm_rtx
= c6x_function_arg (cum
, mode
, type
, 0);
1128 c6x_function_arg_advance (cum
, mode
, type
, 0);
1133 if (REG_P (parm_rtx
)
1134 && overlaps_hard_reg_set_p (call_saved_regset
, GET_MODE (parm_rtx
),
1137 if (GET_CODE (parm_rtx
) == PARALLEL
)
1139 int n
= XVECLEN (parm_rtx
, 0);
1142 rtx x
= XEXP (XVECEXP (parm_rtx
, 0, n
), 0);
1144 && overlaps_hard_reg_set_p (call_saved_regset
,
1145 GET_MODE (x
), REGNO (x
)))
1153 /* Decide whether we can make a sibling call to a function. DECL is the
1154 declaration of the function being targeted by the call and EXP is the
1155 CALL_EXPR representing the call. */
1158 c6x_function_ok_for_sibcall (tree decl
, tree exp
)
1160 /* Registers A10, A12, B10 and B12 are available as arguments
1161 register but unfortunately caller saved. This makes functions
1162 needing these registers for arguments not suitable for
1164 if (c6x_call_saved_register_used (exp
))
1172 /* When compiling for DSBT, the calling function must be local,
1173 so that when we reload B14 in the sibcall epilogue, it will
1174 not change its value. */
1175 struct cgraph_local_info
*this_func
;
1178 /* Not enough information. */
1181 this_func
= cgraph_node::local_info (current_function_decl
);
1182 return this_func
->local
;
1188 /* Return true if DECL is known to be linked into section SECTION. */
1191 c6x_function_in_section_p (tree decl
, section
*section
)
1193 /* We can only be certain about functions defined in the same
1194 compilation unit. */
1195 if (!TREE_STATIC (decl
))
1198 /* Make sure that SYMBOL always binds to the definition in this
1199 compilation unit. */
1200 if (!targetm
.binds_local_p (decl
))
1203 /* If DECL_SECTION_NAME is set, assume it is trustworthy. */
1204 if (!DECL_SECTION_NAME (decl
))
1206 /* Make sure that we will not create a unique section for DECL. */
1207 if (flag_function_sections
|| DECL_COMDAT_GROUP (decl
))
1211 return function_section (decl
) == section
;
1214 /* Return true if a call to OP, which is a SYMBOL_REF, must be expanded
1217 c6x_long_call_p (rtx op
)
1221 if (!TARGET_LONG_CALLS
)
1224 decl
= SYMBOL_REF_DECL (op
);
1226 /* Try to determine whether the symbol is in the same section as the current
1227 function. Be conservative, and only cater for cases in which the
1228 whole of the current function is placed in the same section. */
1229 if (decl
!= NULL_TREE
1230 && !flag_reorder_blocks_and_partition
1231 && TREE_CODE (decl
) == FUNCTION_DECL
1232 && c6x_function_in_section_p (decl
, current_function_section ()))
1238 /* Emit the sequence for a call. */
1240 c6x_expand_call (rtx retval
, rtx address
, bool sibcall
)
1242 rtx callee
= XEXP (address
, 0);
1245 if (!c6x_call_operand (callee
, Pmode
))
1247 callee
= force_reg (Pmode
, callee
);
1248 address
= change_address (address
, Pmode
, callee
);
1250 call_insn
= gen_rtx_CALL (VOIDmode
, address
, const0_rtx
);
1253 call_insn
= emit_call_insn (call_insn
);
1254 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
1255 gen_rtx_REG (Pmode
, REG_B3
));
1259 if (retval
== NULL_RTX
)
1260 call_insn
= emit_call_insn (call_insn
);
1262 call_insn
= emit_call_insn (gen_rtx_SET (retval
, call_insn
));
1265 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
), pic_offset_table_rtx
);
1268 /* Legitimize PIC addresses. If the address is already position-independent,
1269 we return ORIG. Newly generated position-independent addresses go into a
1270 reg. This is REG if nonzero, otherwise we allocate register(s) as
1271 necessary. PICREG is the register holding the pointer to the PIC offset
1275 legitimize_pic_address (rtx orig
, rtx reg
, rtx picreg
)
1280 if (GET_CODE (addr
) == SYMBOL_REF
|| GET_CODE (addr
) == LABEL_REF
)
1282 int unspec
= UNSPEC_LOAD_GOT
;
1287 gcc_assert (can_create_pseudo_p ());
1288 reg
= gen_reg_rtx (Pmode
);
1292 if (can_create_pseudo_p ())
1293 tmp
= gen_reg_rtx (Pmode
);
1296 emit_insn (gen_movsi_gotoff_high (tmp
, addr
));
1297 emit_insn (gen_movsi_gotoff_lo_sum (tmp
, tmp
, addr
));
1298 emit_insn (gen_load_got_gotoff (reg
, picreg
, tmp
));
1302 tmp
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, addr
), unspec
);
1303 new_rtx
= gen_const_mem (Pmode
, gen_rtx_PLUS (Pmode
, picreg
, tmp
));
1305 emit_move_insn (reg
, new_rtx
);
1307 if (picreg
== pic_offset_table_rtx
)
1308 crtl
->uses_pic_offset_table
= 1;
1312 else if (GET_CODE (addr
) == CONST
|| GET_CODE (addr
) == PLUS
)
1316 if (GET_CODE (addr
) == CONST
)
1318 addr
= XEXP (addr
, 0);
1319 gcc_assert (GET_CODE (addr
) == PLUS
);
1322 if (XEXP (addr
, 0) == picreg
)
1327 gcc_assert (can_create_pseudo_p ());
1328 reg
= gen_reg_rtx (Pmode
);
1331 base
= legitimize_pic_address (XEXP (addr
, 0), reg
, picreg
);
1332 addr
= legitimize_pic_address (XEXP (addr
, 1),
1333 base
== reg
? NULL_RTX
: reg
,
1336 if (GET_CODE (addr
) == CONST_INT
)
1338 gcc_assert (! reload_in_progress
&& ! reload_completed
);
1339 addr
= force_reg (Pmode
, addr
);
1342 if (GET_CODE (addr
) == PLUS
&& CONSTANT_P (XEXP (addr
, 1)))
1344 base
= gen_rtx_PLUS (Pmode
, base
, XEXP (addr
, 0));
1345 addr
= XEXP (addr
, 1);
1348 return gen_rtx_PLUS (Pmode
, base
, addr
);
1354 /* Expand a move operation in mode MODE. The operands are in OPERANDS.
1355 Returns true if no further code must be generated, false if the caller
1356 should generate an insn to move OPERANDS[1] to OPERANDS[0]. */
1359 expand_move (rtx
*operands
, machine_mode mode
)
1361 rtx dest
= operands
[0];
1362 rtx op
= operands
[1];
1364 if ((reload_in_progress
| reload_completed
) == 0
1365 && GET_CODE (dest
) == MEM
&& GET_CODE (op
) != REG
)
1366 operands
[1] = force_reg (mode
, op
);
1367 else if (mode
== SImode
&& symbolic_operand (op
, SImode
))
1371 if (sdata_symbolic_operand (op
, SImode
))
1373 emit_insn (gen_load_sdata_pic (dest
, pic_offset_table_rtx
, op
));
1374 crtl
->uses_pic_offset_table
= 1;
1379 rtx temp
= (reload_completed
|| reload_in_progress
1380 ? dest
: gen_reg_rtx (Pmode
));
1382 operands
[1] = legitimize_pic_address (op
, temp
,
1383 pic_offset_table_rtx
);
1386 else if (reload_completed
1387 && !sdata_symbolic_operand (op
, SImode
))
1389 emit_insn (gen_movsi_high (dest
, op
));
1390 emit_insn (gen_movsi_lo_sum (dest
, dest
, op
));
1397 /* This function is called when we're about to expand an integer compare
1398 operation which performs COMPARISON. It examines the second operand,
1399 and if it is an integer constant that cannot be used directly on the
1400 current machine in a comparison insn, it returns true. */
1402 c6x_force_op_for_comparison_p (enum rtx_code code
, rtx op
)
1404 if (!CONST_INT_P (op
) || satisfies_constraint_Iu4 (op
))
1407 if ((code
== EQ
|| code
== LT
|| code
== GT
)
1408 && !satisfies_constraint_Is5 (op
))
1410 if ((code
== GTU
|| code
== LTU
)
1411 && (!TARGET_INSNS_64
|| !satisfies_constraint_Iu5 (op
)))
1417 /* Emit comparison instruction if necessary, returning the expression
1418 that holds the compare result in the proper mode. Return the comparison
1419 that should be used in the jump insn. */
1422 c6x_expand_compare (rtx comparison
, machine_mode mode
)
1424 enum rtx_code code
= GET_CODE (comparison
);
1425 rtx op0
= XEXP (comparison
, 0);
1426 rtx op1
= XEXP (comparison
, 1);
1428 enum rtx_code jump_code
= code
;
1429 machine_mode op_mode
= GET_MODE (op0
);
1431 if (op_mode
== DImode
&& (code
== NE
|| code
== EQ
) && op1
== const0_rtx
)
1433 rtx t
= gen_reg_rtx (SImode
);
1434 emit_insn (gen_iorsi3 (t
, gen_lowpart (SImode
, op0
),
1435 gen_highpart (SImode
, op0
)));
1439 else if (op_mode
== DImode
)
1444 if (code
== NE
|| code
== GEU
|| code
== LEU
|| code
== GE
|| code
== LE
)
1446 code
= reverse_condition (code
);
1452 split_di (&op0
, 1, lo
, high
);
1453 split_di (&op1
, 1, lo
+ 1, high
+ 1);
1455 if (c6x_force_op_for_comparison_p (code
, high
[1])
1456 || c6x_force_op_for_comparison_p (EQ
, high
[1]))
1457 high
[1] = force_reg (SImode
, high
[1]);
1459 cmp1
= gen_reg_rtx (SImode
);
1460 cmp2
= gen_reg_rtx (SImode
);
1461 emit_insn (gen_rtx_SET (cmp1
, gen_rtx_fmt_ee (code
, SImode
,
1462 high
[0], high
[1])));
1465 if (c6x_force_op_for_comparison_p (code
, lo
[1]))
1466 lo
[1] = force_reg (SImode
, lo
[1]);
1467 emit_insn (gen_rtx_SET (cmp2
, gen_rtx_fmt_ee (code
, SImode
,
1469 emit_insn (gen_andsi3 (cmp1
, cmp1
, cmp2
));
1473 emit_insn (gen_rtx_SET (cmp2
, gen_rtx_EQ (SImode
, high
[0],
1477 else if (code
== LT
)
1479 if (c6x_force_op_for_comparison_p (code
, lo
[1]))
1480 lo
[1] = force_reg (SImode
, lo
[1]);
1481 emit_insn (gen_cmpsi_and (cmp2
, gen_rtx_fmt_ee (code
, SImode
,
1483 lo
[0], lo
[1], cmp2
));
1484 emit_insn (gen_iorsi3 (cmp1
, cmp1
, cmp2
));
1488 else if (TARGET_FP
&& !flag_finite_math_only
1489 && (op_mode
== DFmode
|| op_mode
== SFmode
)
1490 && code
!= EQ
&& code
!= NE
&& code
!= LT
&& code
!= GT
1491 && code
!= UNLE
&& code
!= UNGE
)
1493 enum rtx_code code1
, code2
, code3
;
1494 rtx (*fn
) (rtx
, rtx
, rtx
, rtx
, rtx
);
1506 code1
= code
== LE
|| code
== UNGT
? LT
: GT
;
1531 cmp
= gen_reg_rtx (SImode
);
1532 emit_insn (gen_rtx_SET (cmp
, gen_rtx_fmt_ee (code1
, SImode
, op0
, op1
)));
1533 fn
= op_mode
== DFmode
? gen_cmpdf_ior
: gen_cmpsf_ior
;
1534 emit_insn (fn (cmp
, gen_rtx_fmt_ee (code2
, SImode
, op0
, op1
),
1536 if (code3
!= UNKNOWN
)
1537 emit_insn (fn (cmp
, gen_rtx_fmt_ee (code3
, SImode
, op0
, op1
),
1540 else if (op_mode
== SImode
&& (code
== NE
|| code
== EQ
) && op1
== const0_rtx
)
1545 is_fp_libfunc
= !TARGET_FP
&& (op_mode
== DFmode
|| op_mode
== SFmode
);
1547 if ((code
== NE
|| code
== GEU
|| code
== LEU
|| code
== GE
|| code
== LE
)
1550 code
= reverse_condition (code
);
1553 else if (code
== UNGE
)
1558 else if (code
== UNLE
)
1573 libfunc
= op_mode
== DFmode
? eqdf_libfunc
: eqsf_libfunc
;
1576 libfunc
= op_mode
== DFmode
? nedf_libfunc
: nesf_libfunc
;
1579 libfunc
= op_mode
== DFmode
? gtdf_libfunc
: gtsf_libfunc
;
1582 libfunc
= op_mode
== DFmode
? gedf_libfunc
: gesf_libfunc
;
1585 libfunc
= op_mode
== DFmode
? ltdf_libfunc
: ltsf_libfunc
;
1588 libfunc
= op_mode
== DFmode
? ledf_libfunc
: lesf_libfunc
;
1595 cmp
= emit_library_call_value (libfunc
, 0, LCT_CONST
, SImode
, 2,
1596 op0
, op_mode
, op1
, op_mode
);
1597 insns
= get_insns ();
1600 emit_libcall_block (insns
, cmp
, cmp
,
1601 gen_rtx_fmt_ee (code
, SImode
, op0
, op1
));
1605 cmp
= gen_reg_rtx (SImode
);
1606 if (c6x_force_op_for_comparison_p (code
, op1
))
1607 op1
= force_reg (SImode
, op1
);
1608 emit_insn (gen_rtx_SET (cmp
, gen_rtx_fmt_ee (code
, SImode
,
1613 return gen_rtx_fmt_ee (jump_code
, mode
, cmp
, const0_rtx
);
1616 /* Return one word of double-word value OP. HIGH_P is true to select the
1617 high part, false to select the low part. When encountering auto-increment
1618 addressing, we make the assumption that the low part is going to be accessed
1622 c6x_subword (rtx op
, bool high_p
)
1627 mode
= GET_MODE (op
);
1628 if (mode
== VOIDmode
)
1631 if (TARGET_BIG_ENDIAN
? !high_p
: high_p
)
1632 byte
= UNITS_PER_WORD
;
1638 rtx addr
= XEXP (op
, 0);
1639 if (GET_CODE (addr
) == PLUS
|| REG_P (addr
))
1640 return adjust_address (op
, word_mode
, byte
);
1641 /* FIXME: should really support autoincrement addressing for
1642 multi-word modes. */
1646 return simplify_gen_subreg (word_mode
, op
, mode
, byte
);
1649 /* Split one or more DImode RTL references into pairs of SImode
1650 references. The RTL can be REG, offsettable MEM, integer constant, or
1651 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
1652 split and "num" is its length. lo_half and hi_half are output arrays
1653 that parallel "operands". */
1656 split_di (rtx operands
[], int num
, rtx lo_half
[], rtx hi_half
[])
1660 rtx op
= operands
[num
];
1662 lo_half
[num
] = c6x_subword (op
, false);
1663 hi_half
[num
] = c6x_subword (op
, true);
1667 /* Return true if VAL is a mask valid for a clr instruction. */
1669 c6x_valid_mask_p (HOST_WIDE_INT val
)
1672 for (i
= 0; i
< 32; i
++)
1673 if (!(val
& ((unsigned HOST_WIDE_INT
)1 << i
)))
1676 if (val
& ((unsigned HOST_WIDE_INT
)1 << i
))
1679 if (!(val
& ((unsigned HOST_WIDE_INT
)1 << i
)))
1684 /* Expand a block move for a movmemM pattern. */
1687 c6x_expand_movmem (rtx dst
, rtx src
, rtx count_exp
, rtx align_exp
,
1688 rtx expected_align_exp ATTRIBUTE_UNUSED
,
1689 rtx expected_size_exp ATTRIBUTE_UNUSED
)
1691 unsigned HOST_WIDE_INT align
= 1;
1692 unsigned HOST_WIDE_INT src_mem_align
, dst_mem_align
, min_mem_align
;
1693 unsigned HOST_WIDE_INT count
= 0, offset
= 0;
1694 unsigned int biggest_move
= TARGET_STDW
? 8 : 4;
1696 if (CONST_INT_P (align_exp
))
1697 align
= INTVAL (align_exp
);
1699 src_mem_align
= MEM_ALIGN (src
) / BITS_PER_UNIT
;
1700 dst_mem_align
= MEM_ALIGN (dst
) / BITS_PER_UNIT
;
1701 min_mem_align
= MIN (src_mem_align
, dst_mem_align
);
1703 if (min_mem_align
> align
)
1704 align
= min_mem_align
/ BITS_PER_UNIT
;
1705 if (src_mem_align
< align
)
1706 src_mem_align
= align
;
1707 if (dst_mem_align
< align
)
1708 dst_mem_align
= align
;
1710 if (CONST_INT_P (count_exp
))
1711 count
= INTVAL (count_exp
);
1715 /* Make sure we don't need to care about overflow later on. */
1716 if (count
> ((unsigned HOST_WIDE_INT
) 1 << 30))
1719 if (count
>= 28 && (count
& 3) == 0 && align
>= 4)
1721 tree dst_expr
= MEM_EXPR (dst
);
1722 tree src_expr
= MEM_EXPR (src
);
1723 rtx fn
= TARGET_INSNS_64PLUS
? strasgi64p_libfunc
: strasgi_libfunc
;
1724 rtx srcreg
= force_reg (Pmode
, XEXP (src
, 0));
1725 rtx dstreg
= force_reg (Pmode
, XEXP (dst
, 0));
1728 mark_addressable (src_expr
);
1730 mark_addressable (dst_expr
);
1731 emit_library_call (fn
, LCT_NORMAL
, VOIDmode
, 3,
1732 dstreg
, Pmode
, srcreg
, Pmode
, count_exp
, SImode
);
1736 if (biggest_move
> align
&& !TARGET_INSNS_64
)
1737 biggest_move
= align
;
1739 if (count
/ biggest_move
> 7)
1744 rtx reg
, reg_lowpart
;
1745 machine_mode srcmode
, dstmode
;
1746 unsigned HOST_WIDE_INT src_size
, dst_size
, src_left
;
1750 while (biggest_move
> count
)
1753 src_size
= dst_size
= biggest_move
;
1754 if (src_size
> src_mem_align
&& src_size
== 2)
1756 if (dst_size
> dst_mem_align
&& dst_size
== 2)
1759 if (dst_size
> src_size
)
1760 dst_size
= src_size
;
1762 srcmode
= mode_for_size (src_size
* BITS_PER_UNIT
, MODE_INT
, 0);
1763 dstmode
= mode_for_size (dst_size
* BITS_PER_UNIT
, MODE_INT
, 0);
1765 reg_lowpart
= reg
= gen_reg_rtx (srcmode
);
1768 reg
= gen_reg_rtx (SImode
);
1769 reg_lowpart
= gen_lowpart (srcmode
, reg
);
1772 srcmem
= adjust_address (copy_rtx (src
), srcmode
, offset
);
1774 if (src_size
> src_mem_align
)
1776 enum insn_code icode
= (srcmode
== SImode
? CODE_FOR_movmisalignsi
1777 : CODE_FOR_movmisaligndi
);
1778 emit_insn (GEN_FCN (icode
) (reg_lowpart
, srcmem
));
1781 emit_move_insn (reg_lowpart
, srcmem
);
1783 src_left
= src_size
;
1784 shift
= TARGET_BIG_ENDIAN
? (src_size
- dst_size
) * BITS_PER_UNIT
: 0;
1785 while (src_left
> 0)
1787 rtx dstreg
= reg_lowpart
;
1789 if (src_size
> dst_size
)
1792 int shift_amount
= shift
& (BITS_PER_WORD
- 1);
1794 srcword
= operand_subword_force (srcword
, src_left
>= 4 ? 0 : 4,
1796 if (shift_amount
> 0)
1798 dstreg
= gen_reg_rtx (SImode
);
1799 emit_insn (gen_lshrsi3 (dstreg
, srcword
,
1800 GEN_INT (shift_amount
)));
1804 dstreg
= gen_lowpart (dstmode
, dstreg
);
1807 dstmem
= adjust_address (copy_rtx (dst
), dstmode
, offset
);
1808 if (dst_size
> dst_mem_align
)
1810 enum insn_code icode
= (dstmode
== SImode
? CODE_FOR_movmisalignsi
1811 : CODE_FOR_movmisaligndi
);
1812 emit_insn (GEN_FCN (icode
) (dstmem
, dstreg
));
1815 emit_move_insn (dstmem
, dstreg
);
1817 if (TARGET_BIG_ENDIAN
)
1818 shift
-= dst_size
* BITS_PER_UNIT
;
1820 shift
+= dst_size
* BITS_PER_UNIT
;
1822 src_left
-= dst_size
;
1829 /* Subroutine of print_address_operand, print a single address offset OFF for
1830 a memory access of mode MEM_MODE, choosing between normal form and scaled
1831 form depending on the type of the insn. Misaligned memory references must
1832 use the scaled form. */
1835 print_address_offset (FILE *file
, rtx off
, machine_mode mem_mode
)
1839 if (c6x_current_insn
!= NULL_RTX
)
1841 pat
= PATTERN (c6x_current_insn
);
1842 if (GET_CODE (pat
) == COND_EXEC
)
1843 pat
= COND_EXEC_CODE (pat
);
1844 if (GET_CODE (pat
) == PARALLEL
)
1845 pat
= XVECEXP (pat
, 0, 0);
1847 if (GET_CODE (pat
) == SET
1848 && GET_CODE (SET_SRC (pat
)) == UNSPEC
1849 && XINT (SET_SRC (pat
), 1) == UNSPEC_MISALIGNED_ACCESS
)
1851 gcc_assert (CONST_INT_P (off
)
1852 && (INTVAL (off
) & (GET_MODE_SIZE (mem_mode
) - 1)) == 0);
1853 fprintf (file
, "[" HOST_WIDE_INT_PRINT_DEC
"]",
1854 INTVAL (off
) / GET_MODE_SIZE (mem_mode
));
1859 output_address (mem_mode
, off
);
1864 c6x_print_operand_punct_valid_p (unsigned char c
)
1866 return c
== '$' || c
== '.' || c
== '|';
1869 static void c6x_print_operand (FILE *, rtx
, int);
1871 /* Subroutine of c6x_print_operand; used to print a memory reference X to FILE. */
1874 c6x_print_address_operand (FILE *file
, rtx x
, machine_mode mem_mode
)
1877 switch (GET_CODE (x
))
1881 if (GET_CODE (x
) == POST_MODIFY
)
1882 output_address (mem_mode
, XEXP (x
, 0));
1883 off
= XEXP (XEXP (x
, 1), 1);
1884 if (XEXP (x
, 0) == stack_pointer_rtx
)
1886 if (GET_CODE (x
) == PRE_MODIFY
)
1887 gcc_assert (INTVAL (off
) > 0);
1889 gcc_assert (INTVAL (off
) < 0);
1891 if (CONST_INT_P (off
) && INTVAL (off
) < 0)
1893 fprintf (file
, "--");
1894 off
= GEN_INT (-INTVAL (off
));
1897 fprintf (file
, "++");
1898 if (GET_CODE (x
) == PRE_MODIFY
)
1899 output_address (mem_mode
, XEXP (x
, 0));
1900 print_address_offset (file
, off
, mem_mode
);
1905 if (CONST_INT_P (off
) && INTVAL (off
) < 0)
1907 fprintf (file
, "-");
1908 off
= GEN_INT (-INTVAL (off
));
1911 fprintf (file
, "+");
1912 output_address (mem_mode
, XEXP (x
, 0));
1913 print_address_offset (file
, off
, mem_mode
);
1917 gcc_assert (XEXP (x
, 0) != stack_pointer_rtx
);
1918 fprintf (file
, "--");
1919 output_address (mem_mode
, XEXP (x
, 0));
1920 fprintf (file
, "[1]");
1923 fprintf (file
, "++");
1924 output_address (mem_mode
, XEXP (x
, 0));
1925 fprintf (file
, "[1]");
1928 gcc_assert (XEXP (x
, 0) != stack_pointer_rtx
);
1929 output_address (mem_mode
, XEXP (x
, 0));
1930 fprintf (file
, "++[1]");
1933 output_address (mem_mode
, XEXP (x
, 0));
1934 fprintf (file
, "--[1]");
1940 gcc_assert (sdata_symbolic_operand (x
, Pmode
));
1941 fprintf (file
, "+B14(");
1942 output_addr_const (file
, x
);
1943 fprintf (file
, ")");
1947 switch (XINT (x
, 1))
1949 case UNSPEC_LOAD_GOT
:
1950 fputs ("$GOT(", file
);
1951 output_addr_const (file
, XVECEXP (x
, 0, 0));
1954 case UNSPEC_LOAD_SDATA
:
1955 output_addr_const (file
, XVECEXP (x
, 0, 0));
1963 gcc_assert (GET_CODE (x
) != MEM
);
1964 c6x_print_operand (file
, x
, 0);
1969 /* Return a single character, which is either 'l', 's', 'd' or 'm', which
1970 specifies the functional unit used by INSN. */
1973 c6x_get_unit_specifier (rtx_insn
*insn
)
1975 enum attr_units units
;
1977 if (insn_info
.exists ())
1979 int unit
= INSN_INFO_ENTRY (INSN_UID (insn
)).reservation
;
1980 return c6x_unit_names
[unit
][0];
1983 units
= get_attr_units (insn
);
2004 /* Prints the unit specifier field. */
2006 c6x_print_unit_specifier_field (FILE *file
, rtx_insn
*insn
)
2008 enum attr_units units
= get_attr_units (insn
);
2009 enum attr_cross cross
= get_attr_cross (insn
);
2010 enum attr_dest_regfile rf
= get_attr_dest_regfile (insn
);
2014 if (units
== UNITS_D_ADDR
)
2016 enum attr_addr_regfile arf
= get_attr_addr_regfile (insn
);
2018 gcc_assert (arf
!= ADDR_REGFILE_UNKNOWN
);
2019 half
= arf
== ADDR_REGFILE_A
? 1 : 2;
2020 t_half
= rf
== DEST_REGFILE_A
? 1 : 2;
2021 fprintf (file
, ".d%dt%d", half
, t_half
);
2025 if (insn_info
.exists ())
2027 int unit
= INSN_INFO_ENTRY (INSN_UID (insn
)).reservation
;
2029 fputs (c6x_unit_names
[unit
], file
);
2030 if (cross
== CROSS_Y
)
2035 gcc_assert (rf
!= DEST_REGFILE_UNKNOWN
);
2036 unitspec
= c6x_get_unit_specifier (insn
);
2037 half
= rf
== DEST_REGFILE_A
? 1 : 2;
2038 fprintf (file
, ".%c%d%s", unitspec
, half
, cross
== CROSS_Y
? "x" : "");
2041 /* Output assembly language output for the address ADDR to FILE. */
2043 c6x_print_operand_address (FILE *file
, machine_mode mode
, rtx addr
)
2045 c6x_print_address_operand (file
, addr
, mode
);
2048 /* Print an operand, X, to FILE, with an optional modifier in CODE.
2051 $ -- print the unit specifier field for the instruction.
2052 . -- print the predicate for the instruction or an emptry string for an
2054 | -- print "||" if the insn should be issued in parallel with the previous
2057 C -- print an opcode suffix for a reversed condition
2058 d -- H, W or D as a suffix for ADDA, based on the factor given by the
2060 D -- print either B, H, W or D as a suffix for ADDA, based on the size of
2062 J -- print a predicate
2063 j -- like J, but use reverse predicate
2064 k -- treat a CONST_INT as a register number and print it as a register
2065 k -- like k, but print out a doubleword register
2066 n -- print an integer operand, negated
2067 p -- print the low part of a DImode register
2068 P -- print the high part of a DImode register
2069 r -- print the absolute value of an integer operand, shifted right by 1
2070 R -- print the absolute value of an integer operand, shifted right by 2
2071 f -- the first clear bit in an integer operand assumed to be a mask for
2073 F -- the last clear bit in such a mask
2074 s -- the first set bit in an integer operand assumed to be a mask for
2076 S -- the last set bit in such a mask
2077 U -- print either 1 or 2, depending on the side of the machine used by
2081 c6x_print_operand (FILE *file
, rtx x
, int code
)
2090 if (GET_MODE (c6x_current_insn
) != TImode
)
2096 c6x_print_unit_specifier_field (file
, c6x_current_insn
);
2102 x
= current_insn_predicate
;
2105 unsigned int regno
= REGNO (XEXP (x
, 0));
2107 if (GET_CODE (x
) == EQ
)
2109 fputs (reg_names
[regno
], file
);
2115 mode
= GET_MODE (x
);
2122 enum rtx_code c
= GET_CODE (x
);
2124 c
= swap_condition (c
);
2125 fputs (GET_RTX_NAME (c
), file
);
2132 unsigned int regno
= REGNO (XEXP (x
, 0));
2133 if ((GET_CODE (x
) == EQ
) == (code
== 'J'))
2135 fputs (reg_names
[regno
], file
);
2140 gcc_assert (GET_CODE (x
) == CONST_INT
);
2142 fprintf (file
, "%s", reg_names
[v
]);
2145 gcc_assert (GET_CODE (x
) == CONST_INT
);
2147 gcc_assert ((v
& 1) == 0);
2148 fprintf (file
, "%s:%s", reg_names
[v
+ 1], reg_names
[v
]);
2155 gcc_assert (GET_CODE (x
) == CONST_INT
);
2157 for (i
= 0; i
< 32; i
++)
2159 HOST_WIDE_INT tst
= v
& 1;
2160 if (((code
== 'f' || code
== 'F') && !tst
)
2161 || ((code
== 's' || code
== 'S') && tst
))
2165 if (code
== 'f' || code
== 's')
2167 fprintf (file
, "%d", i
);
2172 HOST_WIDE_INT tst
= v
& 1;
2173 if ((code
== 'F' && tst
) || (code
== 'S' && !tst
))
2177 fprintf (file
, "%d", i
- 1);
2181 gcc_assert (GET_CODE (x
) == CONST_INT
);
2182 output_addr_const (file
, GEN_INT (-INTVAL (x
)));
2186 gcc_assert (GET_CODE (x
) == CONST_INT
);
2190 output_addr_const (file
, GEN_INT (v
>> 1));
2194 gcc_assert (GET_CODE (x
) == CONST_INT
);
2198 output_addr_const (file
, GEN_INT (v
>> 2));
2202 gcc_assert (GET_CODE (x
) == CONST_INT
);
2204 fputs (v
== 2 ? "h" : v
== 4 ? "w" : "d", file
);
2209 gcc_assert (GET_CODE (x
) == REG
);
2213 fputs (reg_names
[v
], file
);
2218 if (GET_CODE (x
) == CONST
)
2221 gcc_assert (GET_CODE (x
) == PLUS
);
2222 gcc_assert (GET_CODE (XEXP (x
, 1)) == CONST_INT
);
2223 v
= INTVAL (XEXP (x
, 1));
2227 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
2229 t
= SYMBOL_REF_DECL (x
);
2231 v
|= DECL_ALIGN_UNIT (t
);
2233 v
|= TYPE_ALIGN_UNIT (TREE_TYPE (t
));
2246 if (GET_CODE (x
) == PLUS
2247 || GET_RTX_CLASS (GET_CODE (x
)) == RTX_AUTOINC
)
2249 if (GET_CODE (x
) == CONST
|| GET_CODE (x
) == SYMBOL_REF
)
2251 gcc_assert (sdata_symbolic_operand (x
, Pmode
));
2256 gcc_assert (REG_P (x
));
2257 if (A_REGNO_P (REGNO (x
)))
2259 if (B_REGNO_P (REGNO (x
)))
2264 switch (GET_CODE (x
))
2267 if (GET_MODE_SIZE (mode
) == 8)
2268 fprintf (file
, "%s:%s", reg_names
[REGNO (x
) + 1],
2269 reg_names
[REGNO (x
)]);
2271 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
2276 gcc_assert (XEXP (x
, 0) != stack_pointer_rtx
);
2277 c6x_print_address_operand (file
, XEXP (x
, 0), GET_MODE (x
));
2282 output_addr_const (file
, x
);
2287 output_addr_const (file
, x
);
2291 output_operand_lossage ("invalid const_double operand");
2295 output_addr_const (file
, x
);
2300 /* Return TRUE if OP is a valid memory address with a base register of
2301 class C. If SMALL_OFFSET is true, we disallow memory references which would
2302 require a long offset with B14/B15. */
2305 c6x_mem_operand (rtx op
, enum reg_class c
, bool small_offset
)
2307 machine_mode mode
= GET_MODE (op
);
2308 rtx base
= XEXP (op
, 0);
2309 switch (GET_CODE (base
))
2315 && (XEXP (base
, 0) == stack_pointer_rtx
2316 || XEXP (base
, 0) == pic_offset_table_rtx
))
2318 if (!c6x_legitimate_address_p_1 (mode
, base
, true, true))
2329 base
= XEXP (base
, 0);
2335 gcc_assert (sdata_symbolic_operand (base
, Pmode
));
2336 return !small_offset
&& c
== B_REGS
;
2341 return TEST_HARD_REG_BIT (reg_class_contents
[ (int) (c
)], REGNO (base
));
2344 /* Returns true if X is a valid address for use in a memory reference
2345 of mode MODE. If STRICT is true, we do not allow pseudo registers
2346 in the address. NO_LARGE_OFFSET is true if we are examining an
2347 address for use in a load or store misaligned instruction, or
2348 recursively examining an operand inside a PRE/POST_MODIFY. */
2351 c6x_legitimate_address_p_1 (machine_mode mode
, rtx x
, bool strict
,
2352 bool no_large_offset
)
2356 enum rtx_code code
= GET_CODE (x
);
2362 /* We can't split these into word-sized pieces yet. */
2363 if (!TARGET_STDW
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
2365 if (GET_CODE (XEXP (x
, 1)) != PLUS
)
2367 if (!c6x_legitimate_address_p_1 (mode
, XEXP (x
, 1), strict
, true))
2369 if (!rtx_equal_p (XEXP (x
, 0), XEXP (XEXP (x
, 1), 0)))
2377 /* We can't split these into word-sized pieces yet. */
2378 if (!TARGET_STDW
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
2387 return REGNO_OK_FOR_BASE_STRICT_P (REGNO (x
));
2389 return REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x
));
2392 if (!REG_P (XEXP (x
, 0))
2393 || !c6x_legitimate_address_p_1 (mode
, XEXP (x
, 0), strict
, false))
2395 /* We cannot ensure currently that both registers end up in the
2396 same register file. */
2397 if (REG_P (XEXP (x
, 1)))
2400 if (mode
== BLKmode
)
2402 else if (mode
== VOIDmode
)
2403 /* ??? This can happen during ivopts. */
2406 size
= GET_MODE_SIZE (mode
);
2409 && GET_CODE (XEXP (x
, 1)) == UNSPEC
2410 && XINT (XEXP (x
, 1), 1) == UNSPEC_LOAD_SDATA
2411 && XEXP (x
, 0) == pic_offset_table_rtx
2412 && sdata_symbolic_operand (XVECEXP (XEXP (x
, 1), 0, 0), SImode
))
2413 return !no_large_offset
&& size
<= 4;
2416 && GET_CODE (XEXP (x
, 1)) == UNSPEC
2417 && XINT (XEXP (x
, 1), 1) == UNSPEC_LOAD_GOT
2418 && XEXP (x
, 0) == pic_offset_table_rtx
2419 && (GET_CODE (XVECEXP (XEXP (x
, 1), 0, 0)) == SYMBOL_REF
2420 || GET_CODE (XVECEXP (XEXP (x
, 1), 0, 0)) == LABEL_REF
))
2421 return !no_large_offset
;
2422 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
2425 off
= INTVAL (XEXP (x
, 1));
2427 /* If the machine does not have doubleword load/stores, we'll use
2428 word size accesses. */
2430 if (size
== 2 * UNITS_PER_WORD
&& !TARGET_STDW
)
2431 size
= UNITS_PER_WORD
;
2433 if (((HOST_WIDE_INT
)size1
- 1) & off
)
2436 if (off
> -32 && off
< (size1
== size
? 32 : 28))
2438 if (no_large_offset
|| code
!= PLUS
|| XEXP (x
, 0) != stack_pointer_rtx
2439 || size1
> UNITS_PER_WORD
)
2441 return off
>= 0 && off
< 32768;
2446 return (!no_large_offset
2447 /* With -fpic, we must wrap it in an unspec to show the B14
2450 && GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
2451 && sdata_symbolic_operand (x
, Pmode
));
2459 c6x_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
2461 return c6x_legitimate_address_p_1 (mode
, x
, strict
, false);
2465 c6x_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
,
2466 rtx x ATTRIBUTE_UNUSED
)
2471 /* Implements TARGET_PREFERRED_RENAME_CLASS. */
2473 c6x_preferred_rename_class (reg_class_t cl
)
2476 return NONPREDICATE_A_REGS
;
2478 return NONPREDICATE_B_REGS
;
2479 if (cl
== ALL_REGS
|| cl
== GENERAL_REGS
)
2480 return NONPREDICATE_REGS
;
2484 /* Implements FINAL_PRESCAN_INSN. */
2486 c6x_final_prescan_insn (rtx_insn
*insn
, rtx
*opvec ATTRIBUTE_UNUSED
,
2487 int noperands ATTRIBUTE_UNUSED
)
2489 c6x_current_insn
= insn
;
2492 /* A structure to describe the stack layout of a function. The layout is
2495 [saved frame pointer (or possibly padding0)]
2496 --> incoming stack pointer, new hard frame pointer
2497 [saved call-used regs]
2499 --> soft frame pointer
2501 [outgoing arguments]
2504 The structure members are laid out in this order. */
2509 /* Number of registers to save. */
2512 HOST_WIDE_INT frame
;
2513 int outgoing_arguments_size
;
2516 HOST_WIDE_INT to_allocate
;
2517 /* The offsets relative to the incoming stack pointer (which
2518 becomes HARD_FRAME_POINTER). */
2519 HOST_WIDE_INT frame_pointer_offset
;
2520 HOST_WIDE_INT b3_offset
;
2522 /* True if we should call push_rts/pop_rts to save and restore
2527 /* Return true if we need to save and modify the PIC register in the
2531 must_reload_pic_reg_p (void)
2533 struct cgraph_local_info
*i
= NULL
;
2538 i
= cgraph_node::local_info (current_function_decl
);
2540 if ((crtl
->uses_pic_offset_table
|| !crtl
->is_leaf
) && !i
->local
)
2545 /* Return 1 if we need to save REGNO. */
2547 c6x_save_reg (unsigned int regno
)
2549 return ((df_regs_ever_live_p (regno
)
2550 && !call_used_regs
[regno
]
2551 && !fixed_regs
[regno
])
2552 || (regno
== RETURN_ADDR_REGNO
2553 && (df_regs_ever_live_p (regno
)
2555 || (regno
== PIC_OFFSET_TABLE_REGNUM
&& must_reload_pic_reg_p ()));
2558 /* Examine the number of regs NREGS we've determined we must save.
2559 Return true if we should use __c6xabi_push_rts/__c6xabi_pop_rts for
2560 prologue and epilogue. */
2563 use_push_rts_p (int nregs
)
2565 if (TARGET_INSNS_64PLUS
&& optimize_function_for_size_p (cfun
)
2566 && !cfun
->machine
->contains_sibcall
2567 && !cfun
->returns_struct
2568 && !TARGET_LONG_CALLS
2569 && nregs
>= 6 && !frame_pointer_needed
)
2574 /* Return number of saved general prupose registers. */
2577 c6x_nsaved_regs (void)
2582 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
2583 if (c6x_save_reg (regno
))
2588 /* The safe debug order mandated by the ABI. */
2589 static unsigned reg_save_order
[] =
2591 REG_A10
, REG_A11
, REG_A12
, REG_A13
,
2593 REG_B10
, REG_B11
, REG_B12
, REG_B13
,
2597 #define N_SAVE_ORDER (sizeof reg_save_order / sizeof *reg_save_order)
2599 /* Compute the layout of the stack frame and store it in FRAME. */
2602 c6x_compute_frame_layout (struct c6x_frame
*frame
)
2604 HOST_WIDE_INT size
= get_frame_size ();
2605 HOST_WIDE_INT offset
;
2608 /* We use the four bytes which are technically inside the caller's frame,
2609 usually to save the frame pointer. */
2611 frame
->padding0
= 0;
2612 nregs
= c6x_nsaved_regs ();
2613 frame
->push_rts
= false;
2614 frame
->b3_offset
= 0;
2615 if (use_push_rts_p (nregs
))
2617 frame
->push_rts
= true;
2618 frame
->b3_offset
= (TARGET_BIG_ENDIAN
? -12 : -13) * 4;
2621 else if (c6x_save_reg (REG_B3
))
2624 for (idx
= N_SAVE_ORDER
- 1; reg_save_order
[idx
] != REG_B3
; idx
--)
2626 if (c6x_save_reg (reg_save_order
[idx
]))
2627 frame
->b3_offset
-= 4;
2630 frame
->nregs
= nregs
;
2632 if (size
== 0 && nregs
== 0)
2634 frame
->padding0
= 4;
2635 frame
->padding1
= frame
->padding2
= 0;
2636 frame
->frame_pointer_offset
= frame
->to_allocate
= 0;
2637 frame
->outgoing_arguments_size
= 0;
2641 if (!frame
->push_rts
)
2642 offset
+= frame
->nregs
* 4;
2644 if (offset
== 0 && size
== 0 && crtl
->outgoing_args_size
== 0
2646 /* Don't use the bottom of the caller's frame if we have no
2647 allocation of our own and call other functions. */
2648 frame
->padding0
= frame
->padding1
= 4;
2649 else if (offset
& 4)
2650 frame
->padding1
= 4;
2652 frame
->padding1
= 0;
2654 offset
+= frame
->padding0
+ frame
->padding1
;
2655 frame
->frame_pointer_offset
= offset
;
2658 frame
->outgoing_arguments_size
= crtl
->outgoing_args_size
;
2659 offset
+= frame
->outgoing_arguments_size
;
2661 if ((offset
& 4) == 0)
2662 frame
->padding2
= 8;
2664 frame
->padding2
= 4;
2665 frame
->to_allocate
= offset
+ frame
->padding2
;
2668 /* Return the offset between two registers, one to be eliminated, and the other
2669 its replacement, at the start of a routine. */
2672 c6x_initial_elimination_offset (int from
, int to
)
2674 struct c6x_frame frame
;
2675 c6x_compute_frame_layout (&frame
);
2677 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
2679 else if (from
== FRAME_POINTER_REGNUM
2680 && to
== HARD_FRAME_POINTER_REGNUM
)
2681 return -frame
.frame_pointer_offset
;
2684 gcc_assert (to
== STACK_POINTER_REGNUM
);
2686 if (from
== ARG_POINTER_REGNUM
)
2687 return frame
.to_allocate
+ (frame
.push_rts
? 56 : 0);
2689 gcc_assert (from
== FRAME_POINTER_REGNUM
);
2690 return frame
.to_allocate
- frame
.frame_pointer_offset
;
2694 /* Given FROM and TO register numbers, say whether this elimination is
2695 allowed. Frame pointer elimination is automatically handled. */
2698 c6x_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
2700 if (to
== STACK_POINTER_REGNUM
)
2701 return !frame_pointer_needed
;
2705 /* Emit insns to increment the stack pointer by OFFSET. If
2706 FRAME_RELATED_P, set the RTX_FRAME_RELATED_P flag on the insns.
2707 Does nothing if the offset is zero. */
2710 emit_add_sp_const (HOST_WIDE_INT offset
, bool frame_related_p
)
2712 rtx to_add
= GEN_INT (offset
);
2713 rtx orig_to_add
= to_add
;
2719 if (offset
< -32768 || offset
> 32767)
2721 rtx reg
= gen_rtx_REG (SImode
, REG_A0
);
2722 rtx low
= GEN_INT (trunc_int_for_mode (offset
, HImode
));
2724 insn
= emit_insn (gen_movsi_high (reg
, low
));
2725 if (frame_related_p
)
2726 RTX_FRAME_RELATED_P (insn
) = 1;
2727 insn
= emit_insn (gen_movsi_lo_sum (reg
, reg
, to_add
));
2728 if (frame_related_p
)
2729 RTX_FRAME_RELATED_P (insn
) = 1;
2732 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
2734 if (frame_related_p
)
2737 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
2738 gen_rtx_SET (stack_pointer_rtx
,
2739 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
2742 RTX_FRAME_RELATED_P (insn
) = 1;
2746 /* Prologue and epilogue. */
2748 c6x_expand_prologue (void)
2750 struct c6x_frame frame
;
2754 HOST_WIDE_INT initial_offset
, off
, added_already
;
2756 c6x_compute_frame_layout (&frame
);
2758 if (flag_stack_usage_info
)
2759 current_function_static_stack_size
= frame
.to_allocate
;
2761 initial_offset
= -frame
.to_allocate
;
2764 emit_insn (gen_push_rts ());
2765 nsaved
= frame
.nregs
;
2768 /* If the offsets would be too large for the memory references we will
2769 create to save registers, do the stack allocation in two parts.
2770 Ensure by subtracting 8 that we don't store to the word pointed to
2771 by the stack pointer. */
2772 if (initial_offset
< -32768)
2773 initial_offset
= -frame
.frame_pointer_offset
- 8;
2775 if (frame
.to_allocate
> 0)
2776 gcc_assert (initial_offset
!= 0);
2778 off
= -initial_offset
+ 4 - frame
.padding0
;
2780 mem
= gen_frame_mem (Pmode
, stack_pointer_rtx
);
2783 if (frame_pointer_needed
)
2785 rtx fp_reg
= gen_rtx_REG (SImode
, REG_A15
);
2786 /* We go through some contortions here to both follow the ABI's
2787 recommendation that FP == incoming SP, and to avoid writing or
2788 reading the word pointed to by the stack pointer. */
2789 rtx addr
= gen_rtx_POST_MODIFY (Pmode
, stack_pointer_rtx
,
2790 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
2792 insn
= emit_move_insn (gen_frame_mem (Pmode
, addr
), fp_reg
);
2793 RTX_FRAME_RELATED_P (insn
) = 1;
2795 insn
= emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, stack_pointer_rtx
,
2797 RTX_FRAME_RELATED_P (insn
) = 1;
2802 emit_add_sp_const (initial_offset
- added_already
, true);
2804 if (nsaved
< frame
.nregs
)
2808 for (i
= 0; i
< N_SAVE_ORDER
; i
++)
2810 int idx
= N_SAVE_ORDER
- i
- 1;
2811 unsigned regno
= reg_save_order
[idx
];
2813 machine_mode save_mode
= SImode
;
2815 if (regno
== REG_A15
&& frame_pointer_needed
)
2816 /* Already saved. */
2818 if (!c6x_save_reg (regno
))
2821 if (TARGET_STDW
&& (off
& 4) == 0 && off
<= 256
2823 && i
+ 1 < N_SAVE_ORDER
2824 && reg_save_order
[idx
- 1] == regno
- 1
2825 && c6x_save_reg (regno
- 1))
2831 reg
= gen_rtx_REG (save_mode
, regno
);
2832 off
-= GET_MODE_SIZE (save_mode
);
2834 insn
= emit_move_insn (adjust_address (mem
, save_mode
, off
),
2836 RTX_FRAME_RELATED_P (insn
) = 1;
2838 nsaved
+= HARD_REGNO_NREGS (regno
, save_mode
);
2841 gcc_assert (nsaved
== frame
.nregs
);
2842 emit_add_sp_const (-frame
.to_allocate
- initial_offset
, true);
2843 if (must_reload_pic_reg_p ())
2845 if (dsbt_decl
== NULL
)
2849 t
= build_index_type (integer_one_node
);
2850 t
= build_array_type (integer_type_node
, t
);
2851 t
= build_decl (BUILTINS_LOCATION
, VAR_DECL
,
2852 get_identifier ("__c6xabi_DSBT_BASE"), t
);
2853 DECL_ARTIFICIAL (t
) = 1;
2854 DECL_IGNORED_P (t
) = 1;
2855 DECL_EXTERNAL (t
) = 1;
2856 TREE_STATIC (t
) = 1;
2857 TREE_PUBLIC (t
) = 1;
2862 emit_insn (gen_setup_dsbt (pic_offset_table_rtx
,
2863 XEXP (DECL_RTL (dsbt_decl
), 0)));
2868 c6x_expand_epilogue (bool sibcall
)
2871 struct c6x_frame frame
;
2876 c6x_compute_frame_layout (&frame
);
2878 mem
= gen_frame_mem (Pmode
, stack_pointer_rtx
);
2880 /* Insert a dummy set/use of the stack pointer. This creates a
2881 scheduler barrier between the prologue saves and epilogue restores. */
2882 emit_insn (gen_epilogue_barrier (stack_pointer_rtx
, stack_pointer_rtx
));
2884 /* If the offsets would be too large for the memory references we will
2885 create to restore registers, do a preliminary stack adjustment here. */
2886 off
= frame
.to_allocate
- frame
.frame_pointer_offset
+ frame
.padding1
;
2889 nsaved
= frame
.nregs
;
2893 if (frame
.to_allocate
> 32768)
2895 /* Don't add the entire offset so that we leave an unused word
2896 above the stack pointer. */
2897 emit_add_sp_const ((off
- 16) & ~7, false);
2901 for (i
= 0; i
< N_SAVE_ORDER
; i
++)
2903 unsigned regno
= reg_save_order
[i
];
2905 machine_mode save_mode
= SImode
;
2907 if (!c6x_save_reg (regno
))
2909 if (regno
== REG_A15
&& frame_pointer_needed
)
2912 if (TARGET_STDW
&& (off
& 4) == 0 && off
< 256
2914 && i
+ 1 < N_SAVE_ORDER
2915 && reg_save_order
[i
+ 1] == regno
+ 1
2916 && c6x_save_reg (regno
+ 1))
2921 reg
= gen_rtx_REG (save_mode
, regno
);
2923 emit_move_insn (reg
, adjust_address (mem
, save_mode
, off
));
2925 off
+= GET_MODE_SIZE (save_mode
);
2926 nsaved
+= HARD_REGNO_NREGS (regno
, save_mode
);
2929 if (!frame_pointer_needed
)
2930 emit_add_sp_const (off
+ frame
.padding0
- 4, false);
2933 rtx fp_reg
= gen_rtx_REG (SImode
, REG_A15
);
2934 rtx addr
= gen_rtx_PRE_MODIFY (Pmode
, stack_pointer_rtx
,
2935 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
2937 emit_insn (gen_addsi3 (stack_pointer_rtx
, hard_frame_pointer_rtx
,
2939 emit_move_insn (fp_reg
, gen_frame_mem (Pmode
, addr
));
2942 gcc_assert (nsaved
== frame
.nregs
);
2946 emit_jump_insn (gen_pop_rts ());
2948 emit_jump_insn (gen_return_internal (gen_rtx_REG (SImode
,
2949 RETURN_ADDR_REGNO
)));
2953 /* Return the value of the return address for the frame COUNT steps up
2954 from the current frame, after the prologue.
2955 We punt for everything but the current frame by returning const0_rtx. */
2958 c6x_return_addr_rtx (int count
)
2963 return get_hard_reg_initial_val (Pmode
, RETURN_ADDR_REGNO
);
2966 /* Return true iff TYPE is one of the shadow types. */
2968 shadow_type_p (enum attr_type type
)
2970 return (type
== TYPE_SHADOW
|| type
== TYPE_LOAD_SHADOW
2971 || type
== TYPE_MULT_SHADOW
);
2974 /* Return true iff INSN is a shadow pattern. */
2976 shadow_p (rtx_insn
*insn
)
2978 if (!NONDEBUG_INSN_P (insn
) || recog_memoized (insn
) < 0)
2980 return shadow_type_p (get_attr_type (insn
));
2983 /* Return true iff INSN is a shadow or blockage pattern. */
2985 shadow_or_blockage_p (rtx_insn
*insn
)
2987 enum attr_type type
;
2988 if (!NONDEBUG_INSN_P (insn
) || recog_memoized (insn
) < 0)
2990 type
= get_attr_type (insn
);
2991 return shadow_type_p (type
) || type
== TYPE_BLOCKAGE
;
2994 /* Translate UNITS into a bitmask of units we can reserve for this
2997 get_reservation_flags (enum attr_units units
)
3003 return RESERVATION_FLAG_D
;
3005 return RESERVATION_FLAG_L
;
3007 return RESERVATION_FLAG_S
;
3009 return RESERVATION_FLAG_M
;
3011 return RESERVATION_FLAG_LS
;
3013 return RESERVATION_FLAG_DL
;
3015 return RESERVATION_FLAG_DS
;
3017 return RESERVATION_FLAG_DLS
;
3023 /* Compute the side of the machine used by INSN, which reserves UNITS.
3024 This must match the reservations in the scheduling description. */
3026 get_insn_side (rtx_insn
*insn
, enum attr_units units
)
3028 if (units
== UNITS_D_ADDR
)
3029 return (get_attr_addr_regfile (insn
) == ADDR_REGFILE_A
? 0 : 1);
3032 enum attr_dest_regfile rf
= get_attr_dest_regfile (insn
);
3033 if (rf
== DEST_REGFILE_ANY
)
3034 return get_attr_type (insn
) == TYPE_BRANCH
? 0 : 1;
3036 return rf
== DEST_REGFILE_A
? 0 : 1;
3040 /* After scheduling, walk the insns between HEAD and END and assign unit
3043 assign_reservations (rtx_insn
*head
, rtx_insn
*end
)
3046 for (insn
= head
; insn
!= NEXT_INSN (end
); insn
= NEXT_INSN (insn
))
3048 unsigned int sched_mask
, reserved
;
3049 rtx_insn
*within
, *last
;
3052 int rsrv_count
[2][4];
3055 if (GET_MODE (insn
) != TImode
)
3060 /* Find the last insn in the packet. It has a state recorded for it,
3061 which we can use to determine the units we should be using. */
3063 (within
!= NEXT_INSN (end
)
3064 && (within
== insn
|| GET_MODE (within
) != TImode
));
3065 within
= NEXT_INSN (within
))
3068 if (!NONDEBUG_INSN_P (within
))
3070 icode
= recog_memoized (within
);
3073 if (shadow_p (within
))
3075 if (INSN_INFO_ENTRY (INSN_UID (within
)).reservation
!= 0)
3076 reserved
|= 1 << INSN_INFO_ENTRY (INSN_UID (within
)).reservation
;
3079 if (last
== NULL_RTX
)
3082 sched_mask
= INSN_INFO_ENTRY (INSN_UID (last
)).unit_mask
;
3083 sched_mask
&= ~reserved
;
3085 memset (rsrv_count
, 0, sizeof rsrv_count
);
3086 rsrv
[0] = rsrv
[1] = ~0;
3087 for (i
= 0; i
< 8; i
++)
3091 unsigned unit_bit
= 1 << (unit
+ side
* UNIT_QID_SIDE_OFFSET
);
3092 /* Clear the bits which we expect to reserve in the following loop,
3093 leaving the ones set which aren't present in the scheduler's
3094 state and shouldn't be reserved. */
3095 if (sched_mask
& unit_bit
)
3096 rsrv
[i
/ 4] &= ~(1 << unit
);
3099 /* Walk through the insns that occur in the same cycle. We use multiple
3100 passes to assign units, assigning for insns with the most specific
3101 requirements first. */
3102 for (pass
= 0; pass
< 4; pass
++)
3104 (within
!= NEXT_INSN (end
)
3105 && (within
== insn
|| GET_MODE (within
) != TImode
));
3106 within
= NEXT_INSN (within
))
3108 int uid
= INSN_UID (within
);
3109 int this_rsrv
, side
;
3111 enum attr_units units
;
3112 enum attr_type type
;
3115 if (!NONDEBUG_INSN_P (within
))
3117 icode
= recog_memoized (within
);
3120 if (INSN_INFO_ENTRY (uid
).reservation
!= 0)
3122 units
= get_attr_units (within
);
3123 type
= get_attr_type (within
);
3124 this_rsrv
= get_reservation_flags (units
);
3127 side
= get_insn_side (within
, units
);
3129 /* Certain floating point instructions are treated specially. If
3130 an insn can choose between units it can reserve, and its
3131 reservation spans more than one cycle, the reservation contains
3132 special markers in the first cycle to help us reconstruct what
3133 the automaton chose. */
3134 if ((type
== TYPE_ADDDP
|| type
== TYPE_FP4
)
3135 && units
== UNITS_LS
)
3137 int test1_code
= ((type
== TYPE_FP4
? UNIT_QID_FPL1
: UNIT_QID_ADDDPL1
)
3138 + side
* UNIT_QID_SIDE_OFFSET
);
3139 int test2_code
= ((type
== TYPE_FP4
? UNIT_QID_FPS1
: UNIT_QID_ADDDPS1
)
3140 + side
* UNIT_QID_SIDE_OFFSET
);
3141 if ((sched_mask
& (1 << test1_code
)) != 0)
3143 this_rsrv
= RESERVATION_FLAG_L
;
3144 sched_mask
&= ~(1 << test1_code
);
3146 else if ((sched_mask
& (1 << test2_code
)) != 0)
3148 this_rsrv
= RESERVATION_FLAG_S
;
3149 sched_mask
&= ~(1 << test2_code
);
3153 if ((this_rsrv
& (this_rsrv
- 1)) == 0)
3155 int t
= exact_log2 (this_rsrv
) + side
* UNIT_QID_SIDE_OFFSET
;
3156 rsrv
[side
] |= this_rsrv
;
3157 INSN_INFO_ENTRY (uid
).reservation
= t
;
3163 for (j
= 0; j
< 4; j
++)
3164 if (this_rsrv
& (1 << j
))
3165 rsrv_count
[side
][j
]++;
3168 if ((pass
== 2 && this_rsrv
!= RESERVATION_FLAG_DLS
)
3169 || (pass
== 3 && this_rsrv
== RESERVATION_FLAG_DLS
))
3171 int best
= -1, best_cost
= INT_MAX
;
3172 for (j
= 0; j
< 4; j
++)
3173 if ((this_rsrv
& (1 << j
))
3174 && !(rsrv
[side
] & (1 << j
))
3175 && rsrv_count
[side
][j
] < best_cost
)
3177 best_cost
= rsrv_count
[side
][j
];
3180 gcc_assert (best
!= -1);
3181 rsrv
[side
] |= 1 << best
;
3182 for (j
= 0; j
< 4; j
++)
3183 if ((this_rsrv
& (1 << j
)) && j
!= best
)
3184 rsrv_count
[side
][j
]--;
3186 INSN_INFO_ENTRY (uid
).reservation
3187 = best
+ side
* UNIT_QID_SIDE_OFFSET
;
3193 /* Return a factor by which to weight unit imbalances for a reservation
3196 unit_req_factor (enum unitreqs r
)
3218 /* Examine INSN, and store in REQ1/SIDE1 and REQ2/SIDE2 the unit
3219 requirements. Returns zero if INSN can't be handled, otherwise
3220 either one or two to show how many of the two pairs are in use.
3221 REQ1 is always used, it holds what is normally thought of as the
3222 instructions reservation, e.g. UNIT_REQ_DL. REQ2 is used to either
3223 describe a cross path, or for loads/stores, the T unit. */
3225 get_unit_reqs (rtx_insn
*insn
, int *req1
, int *side1
, int *req2
, int *side2
)
3227 enum attr_units units
;
3228 enum attr_cross cross
;
3231 if (!NONDEBUG_INSN_P (insn
) || recog_memoized (insn
) < 0)
3233 units
= get_attr_units (insn
);
3234 if (units
== UNITS_UNKNOWN
)
3236 side
= get_insn_side (insn
, units
);
3237 cross
= get_attr_cross (insn
);
3239 req
= (units
== UNITS_D
? UNIT_REQ_D
3240 : units
== UNITS_D_ADDR
? UNIT_REQ_D
3241 : units
== UNITS_DL
? UNIT_REQ_DL
3242 : units
== UNITS_DS
? UNIT_REQ_DS
3243 : units
== UNITS_L
? UNIT_REQ_L
3244 : units
== UNITS_LS
? UNIT_REQ_LS
3245 : units
== UNITS_S
? UNIT_REQ_S
3246 : units
== UNITS_M
? UNIT_REQ_M
3247 : units
== UNITS_DLS
? UNIT_REQ_DLS
3249 gcc_assert (req
!= -1);
3252 if (units
== UNITS_D_ADDR
)
3255 *side2
= side
^ (cross
== CROSS_Y
? 1 : 0);
3258 else if (cross
== CROSS_Y
)
3267 /* Walk the insns between and including HEAD and TAIL, and mark the
3268 resource requirements in the unit_reqs table. */
3270 count_unit_reqs (unit_req_table reqs
, rtx_insn
*head
, rtx_insn
*tail
)
3274 memset (reqs
, 0, sizeof (unit_req_table
));
3276 for (insn
= head
; insn
!= NEXT_INSN (tail
); insn
= NEXT_INSN (insn
))
3278 int side1
, side2
, req1
, req2
;
3280 switch (get_unit_reqs (insn
, &req1
, &side1
, &req2
, &side2
))
3283 reqs
[side2
][req2
]++;
3286 reqs
[side1
][req1
]++;
3292 /* Update the table REQS by merging more specific unit reservations into
3293 more general ones, i.e. counting (for example) UNIT_REQ_D also in
3294 UNIT_REQ_DL, DS, and DLS. */
3296 merge_unit_reqs (unit_req_table reqs
)
3299 for (side
= 0; side
< 2; side
++)
3301 int d
= reqs
[side
][UNIT_REQ_D
];
3302 int l
= reqs
[side
][UNIT_REQ_L
];
3303 int s
= reqs
[side
][UNIT_REQ_S
];
3304 int dl
= reqs
[side
][UNIT_REQ_DL
];
3305 int ls
= reqs
[side
][UNIT_REQ_LS
];
3306 int ds
= reqs
[side
][UNIT_REQ_DS
];
3308 reqs
[side
][UNIT_REQ_DL
] += d
;
3309 reqs
[side
][UNIT_REQ_DL
] += l
;
3310 reqs
[side
][UNIT_REQ_DS
] += d
;
3311 reqs
[side
][UNIT_REQ_DS
] += s
;
3312 reqs
[side
][UNIT_REQ_LS
] += l
;
3313 reqs
[side
][UNIT_REQ_LS
] += s
;
3314 reqs
[side
][UNIT_REQ_DLS
] += ds
+ dl
+ ls
+ d
+ l
+ s
;
3318 /* Examine the table REQS and return a measure of unit imbalance by comparing
3319 the two sides of the machine. If, for example, D1 is used twice and D2
3320 used not at all, the return value should be 1 in the absence of other
3323 unit_req_imbalance (unit_req_table reqs
)
3328 for (i
= 0; i
< UNIT_REQ_MAX
; i
++)
3330 int factor
= unit_req_factor ((enum unitreqs
) i
);
3331 int diff
= abs (reqs
[0][i
] - reqs
[1][i
]);
3332 val
+= (diff
+ factor
- 1) / factor
/ 2;
3337 /* Return the resource-constrained minimum iteration interval given the
3338 data in the REQS table. This must have been processed with
3339 merge_unit_reqs already. */
3341 res_mii (unit_req_table reqs
)
3345 for (side
= 0; side
< 2; side
++)
3346 for (req
= 0; req
< UNIT_REQ_MAX
; req
++)
3348 int factor
= unit_req_factor ((enum unitreqs
) req
);
3349 worst
= MAX ((reqs
[side
][UNIT_REQ_D
] + factor
- 1) / factor
, worst
);
3355 /* Examine INSN, and store in PMASK1 and PMASK2 bitmasks that represent
3356 the operands that are involved in the (up to) two reservations, as
3357 found by get_unit_reqs. Return true if we did this successfully, false
3358 if we couldn't identify what to do with INSN. */
3360 get_unit_operand_masks (rtx_insn
*insn
, unsigned int *pmask1
,
3361 unsigned int *pmask2
)
3363 enum attr_op_pattern op_pat
;
3365 if (recog_memoized (insn
) < 0)
3367 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
3369 extract_insn (insn
);
3370 op_pat
= get_attr_op_pattern (insn
);
3371 if (op_pat
== OP_PATTERN_DT
)
3373 gcc_assert (recog_data
.n_operands
== 2);
3378 else if (op_pat
== OP_PATTERN_TD
)
3380 gcc_assert (recog_data
.n_operands
== 2);
3385 else if (op_pat
== OP_PATTERN_SXS
)
3387 gcc_assert (recog_data
.n_operands
== 3);
3388 *pmask1
= (1 << 0) | (1 << 2);
3392 else if (op_pat
== OP_PATTERN_SX
)
3394 gcc_assert (recog_data
.n_operands
== 2);
3399 else if (op_pat
== OP_PATTERN_SSX
)
3401 gcc_assert (recog_data
.n_operands
== 3);
3402 *pmask1
= (1 << 0) | (1 << 1);
3409 /* Try to replace a register in INSN, which has corresponding rename info
3410 from regrename_analyze in INFO. OP_MASK and ORIG_SIDE provide information
3411 about the operands that must be renamed and the side they are on.
3412 REQS is the table of unit reservations in the loop between HEAD and TAIL.
3413 We recompute this information locally after our transformation, and keep
3414 it only if we managed to improve the balance. */
3416 try_rename_operands (rtx_insn
*head
, rtx_insn
*tail
, unit_req_table reqs
,
3418 insn_rr_info
*info
, unsigned int op_mask
, int orig_side
)
3420 enum reg_class super_class
= orig_side
== 0 ? B_REGS
: A_REGS
;
3421 HARD_REG_SET unavailable
;
3422 du_head_p this_head
;
3423 struct du_chain
*chain
;
3426 int best_reg
, old_reg
;
3427 vec
<du_head_p
> involved_chains
= vNULL
;
3428 unit_req_table new_reqs
;
3431 for (i
= 0, tmp_mask
= op_mask
; tmp_mask
; i
++)
3434 if ((tmp_mask
& (1 << i
)) == 0)
3436 if (info
->op_info
[i
].n_chains
!= 1)
3438 op_chain
= regrename_chain_from_id (info
->op_info
[i
].heads
[0]->id
);
3439 involved_chains
.safe_push (op_chain
);
3440 tmp_mask
&= ~(1 << i
);
3443 if (involved_chains
.length () > 1)
3446 this_head
= involved_chains
[0];
3447 if (this_head
->cannot_rename
)
3450 for (chain
= this_head
->first
; chain
; chain
= chain
->next_use
)
3452 unsigned int mask1
, mask2
, mask_changed
;
3453 int count
, side1
, side2
, req1
, req2
;
3454 insn_rr_info
*this_rr
= &insn_rr
[INSN_UID (chain
->insn
)];
3456 count
= get_unit_reqs (chain
->insn
, &req1
, &side1
, &req2
, &side2
);
3461 if (!get_unit_operand_masks (chain
->insn
, &mask1
, &mask2
))
3464 extract_insn (chain
->insn
);
3467 for (i
= 0; i
< recog_data
.n_operands
; i
++)
3470 int n_this_op
= this_rr
->op_info
[i
].n_chains
;
3471 for (j
= 0; j
< n_this_op
; j
++)
3473 du_head_p other
= this_rr
->op_info
[i
].heads
[j
];
3474 if (regrename_chain_from_id (other
->id
) == this_head
)
3482 mask_changed
|= 1 << i
;
3484 gcc_assert (mask_changed
!= 0);
3485 if (mask_changed
!= mask1
&& mask_changed
!= mask2
)
3489 /* If we get here, we can do the renaming. */
3490 COMPL_HARD_REG_SET (unavailable
, reg_class_contents
[(int) super_class
]);
3492 old_reg
= this_head
->regno
;
3494 find_rename_reg (this_head
, super_class
, &unavailable
, old_reg
, true);
3496 ok
= regrename_do_replace (this_head
, best_reg
);
3499 count_unit_reqs (new_reqs
, head
, PREV_INSN (tail
));
3500 merge_unit_reqs (new_reqs
);
3503 fprintf (dump_file
, "reshuffle for insn %d, op_mask %x, "
3504 "original side %d, new reg %d\n",
3505 INSN_UID (insn
), op_mask
, orig_side
, best_reg
);
3506 fprintf (dump_file
, " imbalance %d -> %d\n",
3507 unit_req_imbalance (reqs
), unit_req_imbalance (new_reqs
));
3509 if (unit_req_imbalance (new_reqs
) > unit_req_imbalance (reqs
))
3511 ok
= regrename_do_replace (this_head
, old_reg
);
3515 memcpy (reqs
, new_reqs
, sizeof (unit_req_table
));
3518 involved_chains
.release ();
3521 /* Find insns in LOOP which would, if shifted to the other side
3522 of the machine, reduce an imbalance in the unit reservations. */
3524 reshuffle_units (basic_block loop
)
3526 rtx_insn
*head
= BB_HEAD (loop
);
3527 rtx_insn
*tail
= BB_END (loop
);
3529 unit_req_table reqs
;
3534 count_unit_reqs (reqs
, head
, PREV_INSN (tail
));
3535 merge_unit_reqs (reqs
);
3537 regrename_init (true);
3539 bitmap_initialize (&bbs
, &bitmap_default_obstack
);
3541 FOR_EACH_EDGE (e
, ei
, loop
->preds
)
3542 bitmap_set_bit (&bbs
, e
->src
->index
);
3544 bitmap_set_bit (&bbs
, loop
->index
);
3545 regrename_analyze (&bbs
);
3547 for (insn
= head
; insn
!= NEXT_INSN (tail
); insn
= NEXT_INSN (insn
))
3549 enum attr_units units
;
3550 int count
, side1
, side2
, req1
, req2
;
3551 unsigned int mask1
, mask2
;
3554 if (!NONDEBUG_INSN_P (insn
))
3557 count
= get_unit_reqs (insn
, &req1
, &side1
, &req2
, &side2
);
3562 if (!get_unit_operand_masks (insn
, &mask1
, &mask2
))
3565 info
= &insn_rr
[INSN_UID (insn
)];
3566 if (info
->op_info
== NULL
)
3569 if (reqs
[side1
][req1
] > 1
3570 && reqs
[side1
][req1
] > 2 * reqs
[side1
^ 1][req1
])
3572 try_rename_operands (head
, tail
, reqs
, insn
, info
, mask1
, side1
);
3575 units
= get_attr_units (insn
);
3576 if (units
== UNITS_D_ADDR
)
3578 gcc_assert (count
== 2);
3579 if (reqs
[side2
][req2
] > 1
3580 && reqs
[side2
][req2
] > 2 * reqs
[side2
^ 1][req2
])
3582 try_rename_operands (head
, tail
, reqs
, insn
, info
, mask2
, side2
);
3586 regrename_finish ();
3589 /* Backend scheduling state. */
3590 typedef struct c6x_sched_context
3592 /* The current scheduler clock, saved in the sched_reorder hook. */
3593 int curr_sched_clock
;
3595 /* Number of insns issued so far in this cycle. */
3596 int issued_this_cycle
;
3598 /* We record the time at which each jump occurs in JUMP_CYCLES. The
3599 theoretical maximum for number of jumps in flight is 12: 2 every
3600 cycle, with a latency of 6 cycles each. This is a circular
3601 buffer; JUMP_CYCLE_INDEX is the pointer to the start. Earlier
3602 jumps have a higher index. This array should be accessed through
3603 the jump_cycle function. */
3604 int jump_cycles
[12];
3605 int jump_cycle_index
;
3607 /* In parallel with jump_cycles, this array records the opposite of
3608 the condition used in each pending jump. This is used to
3609 predicate insns that are scheduled in the jump's delay slots. If
3610 this is NULL_RTX no such predication happens. */
3613 /* Similar to the jump_cycles mechanism, but here we take into
3614 account all insns with delay slots, to avoid scheduling asms into
3616 int delays_finished_at
;
3618 /* The following variable value is the last issued insn. */
3619 rtx_insn
*last_scheduled_insn
;
3620 /* The last issued insn that isn't a shadow of another. */
3621 rtx_insn
*last_scheduled_iter0
;
3623 /* The following variable value is DFA state before issuing the
3624 first insn in the current clock cycle. We do not use this member
3625 of the structure directly; we copy the data in and out of
3626 prev_cycle_state. */
3627 state_t prev_cycle_state_ctx
;
3629 int reg_n_accesses
[FIRST_PSEUDO_REGISTER
];
3630 int reg_n_xaccesses
[FIRST_PSEUDO_REGISTER
];
3631 int reg_set_in_cycle
[FIRST_PSEUDO_REGISTER
];
3633 int tmp_reg_n_accesses
[FIRST_PSEUDO_REGISTER
];
3634 int tmp_reg_n_xaccesses
[FIRST_PSEUDO_REGISTER
];
3635 } *c6x_sched_context_t
;
3637 /* The current scheduling state. */
3638 static struct c6x_sched_context ss
;
3640 /* The following variable value is DFA state before issuing the first insn
3641 in the current clock cycle. This is used in c6x_variable_issue for
3642 comparison with the state after issuing the last insn in a cycle. */
3643 static state_t prev_cycle_state
;
3645 /* Set when we discover while processing an insn that it would lead to too
3646 many accesses of the same register. */
3647 static bool reg_access_stall
;
3649 /* The highest insn uid after delayed insns were split, but before loop bodies
3650 were copied by the modulo scheduling code. */
3651 static int sploop_max_uid_iter0
;
3653 /* Look up the jump cycle with index N. For an out-of-bounds N, we return 0,
3654 so the caller does not specifically have to test for it. */
3656 get_jump_cycle (int n
)
3660 n
+= ss
.jump_cycle_index
;
3663 return ss
.jump_cycles
[n
];
3666 /* Look up the jump condition with index N. */
3668 get_jump_cond (int n
)
3672 n
+= ss
.jump_cycle_index
;
3675 return ss
.jump_cond
[n
];
3678 /* Return the index of the first jump that occurs after CLOCK_VAR. If no jump
3679 has delay slots beyond CLOCK_VAR, return -1. */
3681 first_jump_index (int clock_var
)
3687 int t
= get_jump_cycle (n
);
3696 /* Add a new entry in our scheduling state for a jump that occurs in CYCLE
3697 and has the opposite condition of COND. */
3699 record_jump (int cycle
, rtx cond
)
3701 if (ss
.jump_cycle_index
== 0)
3702 ss
.jump_cycle_index
= 11;
3704 ss
.jump_cycle_index
--;
3705 ss
.jump_cycles
[ss
.jump_cycle_index
] = cycle
;
3706 ss
.jump_cond
[ss
.jump_cycle_index
] = cond
;
3709 /* Set the clock cycle of INSN to CYCLE. Also clears the insn's entry in
3712 insn_set_clock (rtx insn
, int cycle
)
3714 unsigned uid
= INSN_UID (insn
);
3716 if (uid
>= INSN_INFO_LENGTH
)
3717 insn_info
.safe_grow (uid
* 5 / 4 + 10);
3719 INSN_INFO_ENTRY (uid
).clock
= cycle
;
3720 INSN_INFO_ENTRY (uid
).new_cond
= NULL
;
3721 INSN_INFO_ENTRY (uid
).reservation
= 0;
3722 INSN_INFO_ENTRY (uid
).ebb_start
= false;
3725 /* Return the clock cycle we set for the insn with uid UID. */
3727 insn_uid_get_clock (int uid
)
3729 return INSN_INFO_ENTRY (uid
).clock
;
3732 /* Return the clock cycle we set for INSN. */
3734 insn_get_clock (rtx insn
)
3736 return insn_uid_get_clock (INSN_UID (insn
));
3739 /* Examine INSN, and if it is a conditional jump of any kind, return
3740 the opposite of the condition in which it branches. Otherwise,
3743 condjump_opposite_condition (rtx insn
)
3745 rtx pat
= PATTERN (insn
);
3746 int icode
= INSN_CODE (insn
);
3749 if (icode
== CODE_FOR_br_true
|| icode
== CODE_FOR_br_false
)
3751 x
= XEXP (SET_SRC (pat
), 0);
3752 if (icode
== CODE_FOR_br_false
)
3755 if (GET_CODE (pat
) == COND_EXEC
)
3757 rtx t
= COND_EXEC_CODE (pat
);
3758 if ((GET_CODE (t
) == PARALLEL
3759 && GET_CODE (XVECEXP (t
, 0, 0)) == RETURN
)
3760 || (GET_CODE (t
) == UNSPEC
&& XINT (t
, 1) == UNSPEC_REAL_JUMP
)
3761 || (GET_CODE (t
) == SET
&& SET_DEST (t
) == pc_rtx
))
3762 x
= COND_EXEC_TEST (pat
);
3767 enum rtx_code code
= GET_CODE (x
);
3768 x
= gen_rtx_fmt_ee (code
== EQ
? NE
: EQ
,
3769 GET_MODE (x
), XEXP (x
, 0),
3775 /* Return true iff COND1 and COND2 are exactly opposite conditions
3776 one of them NE and the other EQ. */
3778 conditions_opposite_p (rtx cond1
, rtx cond2
)
3780 return (rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
3781 && rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1))
3782 && GET_CODE (cond1
) == reverse_condition (GET_CODE (cond2
)));
3785 /* Return true if we can add a predicate COND to INSN, or if INSN
3786 already has that predicate. If DOIT is true, also perform the
3789 predicate_insn (rtx_insn
*insn
, rtx cond
, bool doit
)
3792 if (cond
== NULL_RTX
)
3798 if (get_attr_predicable (insn
) == PREDICABLE_YES
3799 && GET_CODE (PATTERN (insn
)) != COND_EXEC
)
3803 cond
= copy_rtx (cond
);
3804 rtx newpat
= gen_rtx_COND_EXEC (VOIDmode
, cond
, PATTERN (insn
));
3805 PATTERN (insn
) = newpat
;
3806 INSN_CODE (insn
) = -1;
3810 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
3811 && rtx_equal_p (COND_EXEC_TEST (PATTERN (insn
)), cond
))
3813 icode
= INSN_CODE (insn
);
3814 if (icode
== CODE_FOR_real_jump
3815 || icode
== CODE_FOR_jump
3816 || icode
== CODE_FOR_indirect_jump
)
3818 rtx pat
= PATTERN (insn
);
3819 rtx dest
= (icode
== CODE_FOR_real_jump
? XVECEXP (pat
, 0, 0)
3820 : icode
== CODE_FOR_jump
? XEXP (SET_SRC (pat
), 0)
3826 newpat
= gen_rtx_COND_EXEC (VOIDmode
, cond
, PATTERN (insn
));
3828 newpat
= gen_br_true (cond
, XEXP (cond
, 0), dest
);
3829 PATTERN (insn
) = newpat
;
3830 INSN_CODE (insn
) = -1;
3834 if (INSN_CODE (insn
) == CODE_FOR_br_true
)
3836 rtx br_cond
= XEXP (SET_SRC (PATTERN (insn
)), 0);
3837 return rtx_equal_p (br_cond
, cond
);
3839 if (INSN_CODE (insn
) == CODE_FOR_br_false
)
3841 rtx br_cond
= XEXP (SET_SRC (PATTERN (insn
)), 0);
3842 return conditions_opposite_p (br_cond
, cond
);
3847 /* Initialize SC. Used by c6x_init_sched_context and c6x_sched_init. */
3849 init_sched_state (c6x_sched_context_t sc
)
3851 sc
->last_scheduled_insn
= NULL
;
3852 sc
->last_scheduled_iter0
= NULL
;
3853 sc
->issued_this_cycle
= 0;
3854 memset (sc
->jump_cycles
, 0, sizeof sc
->jump_cycles
);
3855 memset (sc
->jump_cond
, 0, sizeof sc
->jump_cond
);
3856 sc
->jump_cycle_index
= 0;
3857 sc
->delays_finished_at
= 0;
3858 sc
->curr_sched_clock
= 0;
3860 sc
->prev_cycle_state_ctx
= xmalloc (dfa_state_size
);
3862 memset (sc
->reg_n_accesses
, 0, sizeof sc
->reg_n_accesses
);
3863 memset (sc
->reg_n_xaccesses
, 0, sizeof sc
->reg_n_xaccesses
);
3864 memset (sc
->reg_set_in_cycle
, 0, sizeof sc
->reg_set_in_cycle
);
3866 state_reset (sc
->prev_cycle_state_ctx
);
3869 /* Allocate store for new scheduling context. */
3871 c6x_alloc_sched_context (void)
3873 return xmalloc (sizeof (struct c6x_sched_context
));
3876 /* If CLEAN_P is true then initializes _SC with clean data,
3877 and from the global context otherwise. */
3879 c6x_init_sched_context (void *_sc
, bool clean_p
)
3881 c6x_sched_context_t sc
= (c6x_sched_context_t
) _sc
;
3885 init_sched_state (sc
);
3890 sc
->prev_cycle_state_ctx
= xmalloc (dfa_state_size
);
3891 memcpy (sc
->prev_cycle_state_ctx
, prev_cycle_state
, dfa_state_size
);
3895 /* Sets the global scheduling context to the one pointed to by _SC. */
3897 c6x_set_sched_context (void *_sc
)
3899 c6x_sched_context_t sc
= (c6x_sched_context_t
) _sc
;
3901 gcc_assert (sc
!= NULL
);
3903 memcpy (prev_cycle_state
, sc
->prev_cycle_state_ctx
, dfa_state_size
);
3906 /* Clear data in _SC. */
3908 c6x_clear_sched_context (void *_sc
)
3910 c6x_sched_context_t sc
= (c6x_sched_context_t
) _sc
;
3911 gcc_assert (_sc
!= NULL
);
3913 free (sc
->prev_cycle_state_ctx
);
3918 c6x_free_sched_context (void *_sc
)
3923 /* True if we are currently performing a preliminary scheduling
3924 pass before modulo scheduling; we can't allow the scheduler to
3925 modify instruction patterns using packetization assumptions,
3926 since there will be another scheduling pass later if modulo
3927 scheduling fails. */
3928 static bool in_hwloop
;
3930 /* Provide information about speculation capabilities, and set the
3931 DO_BACKTRACKING flag. */
3933 c6x_set_sched_flags (spec_info_t spec_info
)
3935 unsigned int *flags
= &(current_sched_info
->flags
);
3937 if (*flags
& SCHED_EBB
)
3939 *flags
|= DO_BACKTRACKING
| DO_PREDICATION
;
3942 *flags
|= DONT_BREAK_DEPENDENCIES
;
3944 spec_info
->mask
= 0;
3947 /* Implement the TARGET_SCHED_ISSUE_RATE hook. */
3950 c6x_issue_rate (void)
3955 /* Used together with the collapse_ndfa option, this ensures that we reach a
3956 deterministic automaton state before trying to advance a cycle.
3957 With collapse_ndfa, genautomata creates advance cycle arcs only for
3958 such deterministic states. */
3961 c6x_sched_dfa_pre_cycle_insn (void)
3966 /* We're beginning a new block. Initialize data structures as necessary. */
3969 c6x_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
3970 int sched_verbose ATTRIBUTE_UNUSED
,
3971 int max_ready ATTRIBUTE_UNUSED
)
3973 if (prev_cycle_state
== NULL
)
3975 prev_cycle_state
= xmalloc (dfa_state_size
);
3977 init_sched_state (&ss
);
3978 state_reset (prev_cycle_state
);
3981 /* We are about to being issuing INSN. Return nonzero if we cannot
3982 issue it on given cycle CLOCK and return zero if we should not sort
3983 the ready queue on the next clock start.
3984 For C6X, we use this function just to copy the previous DFA state
3985 for comparison purposes. */
3988 c6x_dfa_new_cycle (FILE *dump ATTRIBUTE_UNUSED
, int verbose ATTRIBUTE_UNUSED
,
3989 rtx_insn
*insn ATTRIBUTE_UNUSED
,
3990 int last_clock ATTRIBUTE_UNUSED
,
3991 int clock ATTRIBUTE_UNUSED
, int *sort_p ATTRIBUTE_UNUSED
)
3993 if (clock
!= last_clock
)
3994 memcpy (prev_cycle_state
, curr_state
, dfa_state_size
);
3999 c6x_mark_regno_read (int regno
, bool cross
)
4001 int t
= ++ss
.tmp_reg_n_accesses
[regno
];
4004 reg_access_stall
= true;
4008 int set_cycle
= ss
.reg_set_in_cycle
[regno
];
4009 /* This must be done in this way rather than by tweaking things in
4010 adjust_cost, since the stall occurs even for insns with opposite
4011 predicates, and the scheduler may not even see a dependency. */
4012 if (set_cycle
> 0 && set_cycle
== ss
.curr_sched_clock
)
4013 reg_access_stall
= true;
4014 /* This doesn't quite do anything yet as we're only modeling one
4016 ++ss
.tmp_reg_n_xaccesses
[regno
];
4020 /* Note that REG is read in the insn being examined. If CROSS, it
4021 means the access is through a cross path. Update the temporary reg
4022 access arrays, and set REG_ACCESS_STALL if the insn can't be issued
4023 in the current cycle. */
4026 c6x_mark_reg_read (rtx reg
, bool cross
)
4028 unsigned regno
= REGNO (reg
);
4029 unsigned nregs
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
4032 c6x_mark_regno_read (regno
+ nregs
, cross
);
4035 /* Note that register REG is written in cycle CYCLES. */
4038 c6x_mark_reg_written (rtx reg
, int cycles
)
4040 unsigned regno
= REGNO (reg
);
4041 unsigned nregs
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
4044 ss
.reg_set_in_cycle
[regno
+ nregs
] = cycles
;
4047 /* Update the register state information for an instruction whose
4048 body is X. Return true if the instruction has to be delayed until the
4052 c6x_registers_update (rtx_insn
*insn
)
4054 enum attr_cross cross
;
4055 enum attr_dest_regfile destrf
;
4059 if (!reload_completed
|| recog_memoized (insn
) < 0)
4062 reg_access_stall
= false;
4063 memcpy (ss
.tmp_reg_n_accesses
, ss
.reg_n_accesses
,
4064 sizeof ss
.tmp_reg_n_accesses
);
4065 memcpy (ss
.tmp_reg_n_xaccesses
, ss
.reg_n_xaccesses
,
4066 sizeof ss
.tmp_reg_n_xaccesses
);
4068 extract_insn (insn
);
4070 cross
= get_attr_cross (insn
);
4071 destrf
= get_attr_dest_regfile (insn
);
4073 nops
= recog_data
.n_operands
;
4075 if (GET_CODE (x
) == COND_EXEC
)
4077 c6x_mark_reg_read (XEXP (XEXP (x
, 0), 0), false);
4081 for (i
= 0; i
< nops
; i
++)
4083 rtx op
= recog_data
.operand
[i
];
4084 if (recog_data
.operand_type
[i
] == OP_OUT
)
4088 bool this_cross
= cross
;
4089 if (destrf
== DEST_REGFILE_A
&& A_REGNO_P (REGNO (op
)))
4091 if (destrf
== DEST_REGFILE_B
&& B_REGNO_P (REGNO (op
)))
4093 c6x_mark_reg_read (op
, this_cross
);
4095 else if (MEM_P (op
))
4098 switch (GET_CODE (op
))
4107 c6x_mark_reg_read (op
, false);
4112 gcc_assert (GET_CODE (op
) == PLUS
);
4115 c6x_mark_reg_read (XEXP (op
, 0), false);
4116 if (REG_P (XEXP (op
, 1)))
4117 c6x_mark_reg_read (XEXP (op
, 1), false);
4122 c6x_mark_regno_read (REG_B14
, false);
4128 else if (!CONSTANT_P (op
) && strlen (recog_data
.constraints
[i
]) > 0)
4131 return reg_access_stall
;
4134 /* Helper function for the TARGET_SCHED_REORDER and
4135 TARGET_SCHED_REORDER2 hooks. If scheduling an insn would be unsafe
4136 in the current cycle, move it down in the ready list and return the
4137 number of non-unsafe insns. */
4140 c6x_sched_reorder_1 (rtx_insn
**ready
, int *pn_ready
, int clock_var
)
4142 int n_ready
= *pn_ready
;
4143 rtx_insn
**e_ready
= ready
+ n_ready
;
4147 /* Keep track of conflicts due to a limit number of register accesses,
4148 and due to stalls incurred by too early accesses of registers using
4151 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
4153 rtx_insn
*insn
= *insnp
;
4154 int icode
= recog_memoized (insn
);
4155 bool is_asm
= (icode
< 0
4156 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
4157 || asm_noperands (PATTERN (insn
)) >= 0));
4158 bool no_parallel
= (is_asm
|| icode
== CODE_FOR_sploop
4160 && get_attr_type (insn
) == TYPE_ATOMIC
));
4162 /* We delay asm insns until all delay slots are exhausted. We can't
4163 accurately tell how many cycles an asm takes, and the main scheduling
4164 code always assumes at least 1 cycle, which may be wrong. */
4166 && (ss
.issued_this_cycle
> 0 || clock_var
< ss
.delays_finished_at
))
4167 || c6x_registers_update (insn
)
4168 || (ss
.issued_this_cycle
> 0 && icode
== CODE_FOR_sploop
))
4170 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
4175 else if (shadow_p (insn
))
4177 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
4182 /* Ensure that no other jump is scheduled in jump delay slots, since
4183 it would put the machine into the wrong state. Also, we must
4184 avoid scheduling insns that have a latency longer than the
4185 remaining jump delay slots, as the code at the jump destination
4186 won't be prepared for it.
4188 However, we can relax this condition somewhat. The rest of the
4189 scheduler will automatically avoid scheduling an insn on which
4190 the jump shadow depends so late that its side effect happens
4191 after the jump. This means that if we see an insn with a longer
4192 latency here, it can safely be scheduled if we can ensure that it
4193 has a predicate opposite of the previous jump: the side effect
4194 will happen in what we think of as the same basic block. In
4195 c6x_variable_issue, we will record the necessary predicate in
4196 new_conditions, and after scheduling is finished, we will modify
4199 Special care must be taken whenever there is more than one jump
4202 first_jump
= first_jump_index (clock_var
);
4203 if (first_jump
!= -1)
4205 int first_cycle
= get_jump_cycle (first_jump
);
4206 rtx first_cond
= get_jump_cond (first_jump
);
4207 int second_cycle
= 0;
4210 second_cycle
= get_jump_cycle (first_jump
- 1);
4212 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
4214 rtx_insn
*insn
= *insnp
;
4215 int icode
= recog_memoized (insn
);
4216 bool is_asm
= (icode
< 0
4217 && (GET_CODE (PATTERN (insn
)) == ASM_INPUT
4218 || asm_noperands (PATTERN (insn
)) >= 0));
4219 int this_cycles
, rsrv_cycles
;
4220 enum attr_type type
;
4222 gcc_assert (!is_asm
);
4225 this_cycles
= get_attr_cycles (insn
);
4226 rsrv_cycles
= get_attr_reserve_cycles (insn
);
4227 type
= get_attr_type (insn
);
4228 /* Treat branches specially; there is also a hazard if two jumps
4229 end at the same cycle. */
4230 if (type
== TYPE_BRANCH
|| type
== TYPE_CALL
)
4232 if (clock_var
+ this_cycles
<= first_cycle
)
4234 if ((first_jump
> 0 && clock_var
+ this_cycles
> second_cycle
)
4235 || clock_var
+ rsrv_cycles
> first_cycle
4236 || !predicate_insn (insn
, first_cond
, false))
4238 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
4249 /* Implement the TARGET_SCHED_REORDER hook. We save the current clock
4250 for later and clear the register access information for the new
4251 cycle. We also move asm statements out of the way if they would be
4252 scheduled in a delay slot. */
4255 c6x_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
,
4256 int sched_verbose ATTRIBUTE_UNUSED
,
4257 rtx_insn
**ready ATTRIBUTE_UNUSED
,
4258 int *pn_ready ATTRIBUTE_UNUSED
, int clock_var
)
4260 ss
.curr_sched_clock
= clock_var
;
4261 ss
.issued_this_cycle
= 0;
4262 memset (ss
.reg_n_accesses
, 0, sizeof ss
.reg_n_accesses
);
4263 memset (ss
.reg_n_xaccesses
, 0, sizeof ss
.reg_n_xaccesses
);
4268 return c6x_sched_reorder_1 (ready
, pn_ready
, clock_var
);
4271 /* Implement the TARGET_SCHED_REORDER2 hook. We use this to record the clock
4272 cycle for every insn. */
4275 c6x_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED
,
4276 int sched_verbose ATTRIBUTE_UNUSED
,
4277 rtx_insn
**ready ATTRIBUTE_UNUSED
,
4278 int *pn_ready ATTRIBUTE_UNUSED
, int clock_var
)
4280 /* FIXME: the assembler rejects labels inside an execute packet.
4281 This can occur if prologue insns are scheduled in parallel with
4282 others, so we avoid this here. Also make sure that nothing is
4283 scheduled in parallel with a TYPE_ATOMIC insn or after a jump. */
4284 if (RTX_FRAME_RELATED_P (ss
.last_scheduled_insn
)
4285 || JUMP_P (ss
.last_scheduled_insn
)
4286 || (recog_memoized (ss
.last_scheduled_insn
) >= 0
4287 && get_attr_type (ss
.last_scheduled_insn
) == TYPE_ATOMIC
))
4289 int n_ready
= *pn_ready
;
4290 rtx_insn
**e_ready
= ready
+ n_ready
;
4293 for (insnp
= ready
; insnp
< e_ready
; insnp
++)
4295 rtx_insn
*insn
= *insnp
;
4296 if (!shadow_p (insn
))
4298 memmove (ready
+ 1, ready
, (insnp
- ready
) * sizeof (rtx
));
4307 return c6x_sched_reorder_1 (ready
, pn_ready
, clock_var
);
4310 /* Subroutine of maybe_clobber_cond, called through note_stores. */
4313 clobber_cond_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data1
)
4315 rtx
*cond
= (rtx
*)data1
;
4316 if (*cond
!= NULL_RTX
&& reg_overlap_mentioned_p (x
, *cond
))
4320 /* Examine INSN, and if it destroys the conditions have recorded for
4321 any of the jumps in flight, clear that condition so that we don't
4322 predicate any more insns. CLOCK_VAR helps us limit the search to
4323 only those jumps which are still in flight. */
4326 maybe_clobber_cond (rtx insn
, int clock_var
)
4329 idx
= ss
.jump_cycle_index
;
4330 for (n
= 0; n
< 12; n
++, idx
++)
4337 cycle
= ss
.jump_cycles
[idx
];
4338 if (cycle
<= clock_var
)
4341 cond
= ss
.jump_cond
[idx
];
4342 if (cond
== NULL_RTX
)
4347 ss
.jump_cond
[idx
] = NULL_RTX
;
4351 note_stores (PATTERN (insn
), clobber_cond_1
, ss
.jump_cond
+ idx
);
4352 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
4353 if (REG_NOTE_KIND (link
) == REG_INC
)
4354 clobber_cond_1 (XEXP (link
, 0), NULL_RTX
, ss
.jump_cond
+ idx
);
4358 /* Implement the TARGET_SCHED_VARIABLE_ISSUE hook. We are about to
4359 issue INSN. Return the number of insns left on the ready queue
4360 that can be issued this cycle.
4361 We use this hook to record clock cycles and reservations for every insn. */
4364 c6x_variable_issue (FILE *dump ATTRIBUTE_UNUSED
,
4365 int sched_verbose ATTRIBUTE_UNUSED
,
4366 rtx_insn
*insn
, int can_issue_more ATTRIBUTE_UNUSED
)
4368 ss
.last_scheduled_insn
= insn
;
4369 if (INSN_UID (insn
) < sploop_max_uid_iter0
&& !JUMP_P (insn
))
4370 ss
.last_scheduled_iter0
= insn
;
4371 if (GET_CODE (PATTERN (insn
)) != USE
&& GET_CODE (PATTERN (insn
)) != CLOBBER
)
4372 ss
.issued_this_cycle
++;
4373 if (insn_info
.exists ())
4375 state_t st_after
= alloca (dfa_state_size
);
4376 int curr_clock
= ss
.curr_sched_clock
;
4377 int uid
= INSN_UID (insn
);
4378 int icode
= recog_memoized (insn
);
4380 int first
, first_cycle
;
4384 insn_set_clock (insn
, curr_clock
);
4385 INSN_INFO_ENTRY (uid
).ebb_start
4386 = curr_clock
== 0 && ss
.issued_this_cycle
== 1;
4388 first
= first_jump_index (ss
.curr_sched_clock
);
4392 first_cond
= NULL_RTX
;
4396 first_cycle
= get_jump_cycle (first
);
4397 first_cond
= get_jump_cond (first
);
4400 && first_cycle
> curr_clock
4401 && first_cond
!= NULL_RTX
4402 && (curr_clock
+ get_attr_cycles (insn
) > first_cycle
4403 || get_attr_type (insn
) == TYPE_BRANCH
4404 || get_attr_type (insn
) == TYPE_CALL
))
4405 INSN_INFO_ENTRY (uid
).new_cond
= first_cond
;
4407 memcpy (st_after
, curr_state
, dfa_state_size
);
4408 state_transition (st_after
, const0_rtx
);
4411 for (i
= 0; i
< 2 * UNIT_QID_SIDE_OFFSET
; i
++)
4412 if (cpu_unit_reservation_p (st_after
, c6x_unit_codes
[i
])
4413 && !cpu_unit_reservation_p (prev_cycle_state
, c6x_unit_codes
[i
]))
4415 INSN_INFO_ENTRY (uid
).unit_mask
= mask
;
4417 maybe_clobber_cond (insn
, curr_clock
);
4423 c6x_registers_update (insn
);
4424 memcpy (ss
.reg_n_accesses
, ss
.tmp_reg_n_accesses
,
4425 sizeof ss
.reg_n_accesses
);
4426 memcpy (ss
.reg_n_xaccesses
, ss
.tmp_reg_n_accesses
,
4427 sizeof ss
.reg_n_xaccesses
);
4429 cycles
= get_attr_cycles (insn
);
4430 if (ss
.delays_finished_at
< ss
.curr_sched_clock
+ cycles
)
4431 ss
.delays_finished_at
= ss
.curr_sched_clock
+ cycles
;
4432 if (get_attr_type (insn
) == TYPE_BRANCH
4433 || get_attr_type (insn
) == TYPE_CALL
)
4435 rtx opposite
= condjump_opposite_condition (insn
);
4436 record_jump (ss
.curr_sched_clock
+ cycles
, opposite
);
4439 /* Mark the cycles in which the destination registers are written.
4440 This is used for calculating stalls when using cross units. */
4441 extract_insn (insn
);
4442 /* Cross-path stalls don't apply to results of load insns. */
4443 if (get_attr_type (insn
) == TYPE_LOAD
4444 || get_attr_type (insn
) == TYPE_LOADN
4445 || get_attr_type (insn
) == TYPE_LOAD_SHADOW
)
4447 for (i
= 0; i
< recog_data
.n_operands
; i
++)
4449 rtx op
= recog_data
.operand
[i
];
4452 rtx addr
= XEXP (op
, 0);
4453 if (GET_RTX_CLASS (GET_CODE (addr
)) == RTX_AUTOINC
)
4454 c6x_mark_reg_written (XEXP (addr
, 0),
4455 insn_uid_get_clock (uid
) + 1);
4457 if (recog_data
.operand_type
[i
] != OP_IN
4460 c6x_mark_reg_written (op
,
4461 insn_uid_get_clock (uid
) + cycles
);
4466 return can_issue_more
;
4469 /* Implement the TARGET_SCHED_ADJUST_COST hook. We need special handling for
4470 anti- and output dependencies. */
4473 c6x_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
4476 enum attr_type insn_type
= TYPE_UNKNOWN
, dep_insn_type
= TYPE_UNKNOWN
;
4477 int dep_insn_code_number
, insn_code_number
;
4478 int shadow_bonus
= 0;
4480 dep_insn_code_number
= recog_memoized (dep_insn
);
4481 insn_code_number
= recog_memoized (insn
);
4483 if (dep_insn_code_number
>= 0)
4484 dep_insn_type
= get_attr_type (dep_insn
);
4486 if (insn_code_number
>= 0)
4487 insn_type
= get_attr_type (insn
);
4489 kind
= (reg_note
) dep_type
;
4492 /* If we have a dependency on a load, and it's not for the result of
4493 the load, it must be for an autoincrement. Reduce the cost in that
4495 if (dep_insn_type
== TYPE_LOAD
)
4497 rtx set
= PATTERN (dep_insn
);
4498 if (GET_CODE (set
) == COND_EXEC
)
4499 set
= COND_EXEC_CODE (set
);
4500 if (GET_CODE (set
) == UNSPEC
)
4504 gcc_assert (GET_CODE (set
) == SET
);
4505 if (!reg_overlap_mentioned_p (SET_DEST (set
), PATTERN (insn
)))
4511 /* A jump shadow needs to have its latency decreased by one. Conceptually,
4512 it occurs in between two cycles, but we schedule it at the end of the
4514 if (shadow_type_p (insn_type
))
4517 /* Anti and output dependencies usually have zero cost, but we want
4518 to insert a stall after a jump, and after certain floating point
4519 insns that take more than one cycle to read their inputs. In the
4520 future, we should try to find a better algorithm for scheduling
4524 /* We can get anti-dependencies against shadow insns. Treat these
4525 like output dependencies, so that the insn is entirely finished
4526 before the branch takes place. */
4527 if (kind
== REG_DEP_ANTI
&& insn_type
== TYPE_SHADOW
)
4528 kind
= REG_DEP_OUTPUT
;
4529 switch (dep_insn_type
)
4535 if (get_attr_has_shadow (dep_insn
) == HAS_SHADOW_Y
)
4536 /* This is a real_jump/real_call insn. These don't have
4537 outputs, and ensuring the validity of scheduling things
4538 in the delay slot is the job of
4539 c6x_sched_reorder_1. */
4541 /* Unsplit calls can happen - e.g. for divide insns. */
4546 if (kind
== REG_DEP_OUTPUT
)
4547 return 5 - shadow_bonus
;
4551 if (kind
== REG_DEP_OUTPUT
)
4552 return 4 - shadow_bonus
;
4555 if (kind
== REG_DEP_OUTPUT
)
4556 return 2 - shadow_bonus
;
4559 if (kind
== REG_DEP_OUTPUT
)
4560 return 2 - shadow_bonus
;
4564 if (kind
== REG_DEP_OUTPUT
)
4565 return 7 - shadow_bonus
;
4568 if (kind
== REG_DEP_OUTPUT
)
4569 return 5 - shadow_bonus
;
4572 if (kind
== REG_DEP_OUTPUT
)
4573 return 9 - shadow_bonus
;
4577 if (kind
== REG_DEP_OUTPUT
)
4578 return 10 - shadow_bonus
;
4582 if (insn_type
== TYPE_SPKERNEL
)
4584 if (kind
== REG_DEP_OUTPUT
)
4585 return 1 - shadow_bonus
;
4591 return cost
- shadow_bonus
;
4594 /* Create a SEQUENCE rtx to replace the instructions in SLOT, of which there
4595 are N_FILLED. REAL_FIRST identifies the slot if the insn that appears
4596 first in the original stream. */
4599 gen_one_bundle (rtx_insn
**slot
, int n_filled
, int real_first
)
4606 seq
= gen_rtx_SEQUENCE (VOIDmode
, gen_rtvec_v (n_filled
, slot
));
4607 bundle
= make_insn_raw (seq
);
4608 BLOCK_FOR_INSN (bundle
) = BLOCK_FOR_INSN (slot
[0]);
4609 INSN_LOCATION (bundle
) = INSN_LOCATION (slot
[0]);
4610 SET_PREV_INSN (bundle
) = SET_PREV_INSN (slot
[real_first
]);
4614 for (i
= 0; i
< n_filled
; i
++)
4616 rtx_insn
*insn
= slot
[i
];
4618 SET_PREV_INSN (insn
) = t
? t
: PREV_INSN (bundle
);
4620 SET_NEXT_INSN (t
) = insn
;
4623 INSN_LOCATION (slot
[i
]) = INSN_LOCATION (bundle
);
4626 SET_NEXT_INSN (bundle
) = NEXT_INSN (PREV_INSN (bundle
));
4627 SET_NEXT_INSN (t
) = NEXT_INSN (bundle
);
4628 SET_NEXT_INSN (PREV_INSN (bundle
)) = bundle
;
4629 SET_PREV_INSN (NEXT_INSN (bundle
)) = bundle
;
4632 /* Move all parallel instructions into SEQUENCEs, so that no subsequent passes
4633 try to insert labels in the middle. */
4636 c6x_gen_bundles (void)
4639 rtx_insn
*insn
, *next
, *last_call
;
4641 FOR_EACH_BB_FN (bb
, cfun
)
4643 rtx_insn
*insn
, *next
;
4644 /* The machine is eight insns wide. We can have up to six shadow
4645 insns, plus an extra slot for merging the jump shadow. */
4650 for (insn
= BB_HEAD (bb
);; insn
= next
)
4653 rtx delete_this
= NULL_RTX
;
4655 if (NONDEBUG_INSN_P (insn
))
4657 /* Put calls at the start of the sequence. */
4663 memmove (&slot
[1], &slot
[0],
4664 n_filled
* sizeof (slot
[0]));
4666 if (!shadow_p (insn
))
4668 PUT_MODE (insn
, TImode
);
4670 PUT_MODE (slot
[1], VOIDmode
);
4677 slot
[n_filled
++] = insn
;
4681 next
= NEXT_INSN (insn
);
4682 while (next
&& insn
!= BB_END (bb
)
4683 && !(NONDEBUG_INSN_P (next
)
4684 && GET_CODE (PATTERN (next
)) != USE
4685 && GET_CODE (PATTERN (next
)) != CLOBBER
))
4688 next
= NEXT_INSN (insn
);
4691 at_end
= insn
== BB_END (bb
);
4692 if (delete_this
== NULL_RTX
4693 && (at_end
|| (GET_MODE (next
) == TImode
4694 && !(shadow_p (next
) && CALL_P (next
)))))
4697 gen_one_bundle (slot
, n_filled
, first_slot
);
4706 /* Bundling, and emitting nops, can separate
4707 NOTE_INSN_CALL_ARG_LOCATION from the corresponding calls. Fix
4710 for (insn
= get_insns (); insn
; insn
= next
)
4712 next
= NEXT_INSN (insn
);
4714 || (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
4715 && CALL_P (XVECEXP (PATTERN (insn
), 0, 0))))
4717 if (!NOTE_P (insn
) || NOTE_KIND (insn
) != NOTE_INSN_CALL_ARG_LOCATION
)
4719 if (NEXT_INSN (last_call
) == insn
)
4721 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
4722 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
4723 SET_PREV_INSN (insn
) = last_call
;
4724 SET_NEXT_INSN (insn
) = NEXT_INSN (last_call
);
4725 SET_PREV_INSN (NEXT_INSN (insn
)) = insn
;
4726 SET_NEXT_INSN (PREV_INSN (insn
)) = insn
;
4731 /* Emit a NOP instruction for CYCLES cycles after insn AFTER. Return it. */
4734 emit_nop_after (int cycles
, rtx_insn
*after
)
4738 /* mpydp has 9 delay slots, and we may schedule a stall for a cross-path
4739 operation. We don't need the extra NOP since in this case, the hardware
4740 will automatically insert the required stall. */
4744 gcc_assert (cycles
< 10);
4746 insn
= emit_insn_after (gen_nop_count (GEN_INT (cycles
)), after
);
4747 PUT_MODE (insn
, TImode
);
4752 /* Determine whether INSN is a call that needs to have a return label
4756 returning_call_p (rtx_insn
*insn
)
4759 return (!SIBLING_CALL_P (insn
)
4760 && get_attr_type (insn
) != TYPE_CALLP
4761 && get_attr_type (insn
) != TYPE_SHADOW
);
4762 if (recog_memoized (insn
) < 0)
4764 if (get_attr_type (insn
) == TYPE_CALL
)
4769 /* Determine whether INSN's pattern can be converted to use callp. */
4771 can_use_callp (rtx_insn
*insn
)
4773 int icode
= recog_memoized (insn
);
4774 if (!TARGET_INSNS_64PLUS
4776 || GET_CODE (PATTERN (insn
)) == COND_EXEC
)
4779 return ((icode
== CODE_FOR_real_call
4780 || icode
== CODE_FOR_call_internal
4781 || icode
== CODE_FOR_call_value_internal
)
4782 && get_attr_dest_regfile (insn
) == DEST_REGFILE_ANY
);
4785 /* Convert the pattern of INSN, which must be a CALL_INSN, into a callp. */
4787 convert_to_callp (rtx_insn
*insn
)
4790 extract_insn (insn
);
4791 if (GET_CODE (PATTERN (insn
)) == SET
)
4793 rtx dest
= recog_data
.operand
[0];
4794 lab
= recog_data
.operand
[1];
4795 PATTERN (insn
) = gen_callp_value (dest
, lab
);
4796 INSN_CODE (insn
) = CODE_FOR_callp_value
;
4800 lab
= recog_data
.operand
[0];
4801 PATTERN (insn
) = gen_callp (lab
);
4802 INSN_CODE (insn
) = CODE_FOR_callp
;
4806 /* Scan forwards from INSN until we find the next insn that has mode TImode
4807 (indicating it starts a new cycle), and occurs in cycle CLOCK.
4808 Return it if we find such an insn, NULL_RTX otherwise. */
4810 find_next_cycle_insn (rtx_insn
*insn
, int clock
)
4813 if (GET_MODE (t
) == TImode
)
4814 t
= next_real_insn (t
);
4815 while (t
&& GET_MODE (t
) != TImode
)
4816 t
= next_real_insn (t
);
4818 if (t
&& insn_get_clock (t
) == clock
)
4823 /* If COND_INSN has a COND_EXEC condition, wrap the same condition
4824 around PAT. Return PAT either unchanged or modified in this
4827 duplicate_cond (rtx pat
, rtx cond_insn
)
4829 rtx cond_pat
= PATTERN (cond_insn
);
4830 if (GET_CODE (cond_pat
) == COND_EXEC
)
4831 pat
= gen_rtx_COND_EXEC (VOIDmode
, copy_rtx (COND_EXEC_TEST (cond_pat
)),
4836 /* Walk forward from INSN to find the last insn that issues in the same clock
4839 find_last_same_clock (rtx_insn
*insn
)
4841 rtx_insn
*retval
= insn
;
4842 rtx_insn
*t
= next_real_insn (insn
);
4844 while (t
&& GET_MODE (t
) != TImode
)
4846 if (!DEBUG_INSN_P (t
) && recog_memoized (t
) >= 0)
4848 t
= next_real_insn (t
);
4853 /* For every call insn in the function, emit code to load the return
4854 address. For each call we create a return label and store it in
4855 CALL_LABELS. If are not scheduling, we emit the labels here,
4856 otherwise the caller will do it later.
4857 This function is called after final insn scheduling, but before creating
4858 the SEQUENCEs that represent execute packets. */
4861 reorg_split_calls (rtx_insn
**call_labels
)
4863 unsigned int reservation_mask
= 0;
4864 rtx_insn
*insn
= get_insns ();
4865 gcc_assert (NOTE_P (insn
));
4866 insn
= next_real_insn (insn
);
4870 rtx_insn
*next
= next_real_insn (insn
);
4872 if (DEBUG_INSN_P (insn
))
4875 if (GET_MODE (insn
) == TImode
)
4876 reservation_mask
= 0;
4877 uid
= INSN_UID (insn
);
4878 if (c6x_flag_schedule_insns2
&& recog_memoized (insn
) >= 0)
4879 reservation_mask
|= 1 << INSN_INFO_ENTRY (uid
).reservation
;
4881 if (returning_call_p (insn
))
4883 rtx_code_label
*label
= gen_label_rtx ();
4884 rtx labelref
= gen_rtx_LABEL_REF (Pmode
, label
);
4885 rtx reg
= gen_rtx_REG (SImode
, RETURN_ADDR_REGNO
);
4887 LABEL_NUSES (label
) = 2;
4888 if (!c6x_flag_schedule_insns2
)
4890 if (can_use_callp (insn
))
4891 convert_to_callp (insn
);
4896 emit_label_after (label
, insn
);
4898 /* Bundle the call and its delay slots into a single
4899 SEQUENCE. While these do not issue in parallel
4900 we need to group them into a single EH region. */
4902 PUT_MODE (insn
, TImode
);
4903 if (TARGET_INSNS_64
)
4905 t
= gen_addkpc (reg
, labelref
, GEN_INT (4));
4906 slot
[1] = emit_insn_after (duplicate_cond (t
, insn
),
4908 PUT_MODE (slot
[1], TImode
);
4909 gen_one_bundle (slot
, 2, 0);
4913 slot
[3] = emit_insn_after (gen_nop_count (GEN_INT (3)),
4915 PUT_MODE (slot
[3], TImode
);
4916 t
= gen_movsi_lo_sum (reg
, reg
, labelref
);
4917 slot
[2] = emit_insn_after (duplicate_cond (t
, insn
),
4919 PUT_MODE (slot
[2], TImode
);
4920 t
= gen_movsi_high (reg
, labelref
);
4921 slot
[1] = emit_insn_after (duplicate_cond (t
, insn
),
4923 PUT_MODE (slot
[1], TImode
);
4924 gen_one_bundle (slot
, 4, 0);
4930 /* If we scheduled, we reserved the .S2 unit for one or two
4931 cycles after the call. Emit the insns in these slots,
4932 unless it's possible to create a CALLP insn.
4933 Note that this works because the dependencies ensure that
4934 no insn setting/using B3 is scheduled in the delay slots of
4936 int this_clock
= insn_get_clock (insn
);
4939 call_labels
[INSN_UID (insn
)] = label
;
4941 rtx_insn
*last_same_clock
= find_last_same_clock (insn
);
4943 if (can_use_callp (insn
))
4945 /* Find the first insn of the next execute packet. If it
4946 is the shadow insn corresponding to this call, we may
4947 use a CALLP insn. */
4949 next_nonnote_nondebug_insn (last_same_clock
);
4952 && insn_get_clock (shadow
) == this_clock
+ 5)
4954 convert_to_callp (shadow
);
4955 insn_set_clock (shadow
, this_clock
);
4956 INSN_INFO_ENTRY (INSN_UID (shadow
)).reservation
4958 INSN_INFO_ENTRY (INSN_UID (shadow
)).unit_mask
4959 = INSN_INFO_ENTRY (INSN_UID (last_same_clock
)).unit_mask
;
4960 if (GET_MODE (insn
) == TImode
)
4962 rtx_insn
*new_cycle_first
= NEXT_INSN (insn
);
4963 while (!NONDEBUG_INSN_P (new_cycle_first
)
4964 || GET_CODE (PATTERN (new_cycle_first
)) == USE
4965 || GET_CODE (PATTERN (new_cycle_first
)) == CLOBBER
)
4966 new_cycle_first
= NEXT_INSN (new_cycle_first
);
4967 PUT_MODE (new_cycle_first
, TImode
);
4968 if (new_cycle_first
!= shadow
)
4969 PUT_MODE (shadow
, VOIDmode
);
4970 INSN_INFO_ENTRY (INSN_UID (new_cycle_first
)).ebb_start
4971 = INSN_INFO_ENTRY (INSN_UID (insn
)).ebb_start
;
4974 PUT_MODE (shadow
, VOIDmode
);
4979 after1
= find_next_cycle_insn (last_same_clock
, this_clock
+ 1);
4980 if (after1
== NULL_RTX
)
4981 after1
= last_same_clock
;
4983 after1
= find_last_same_clock (after1
);
4984 if (TARGET_INSNS_64
)
4986 rtx x1
= gen_addkpc (reg
, labelref
, const0_rtx
);
4987 x1
= emit_insn_after (duplicate_cond (x1
, insn
), after1
);
4988 insn_set_clock (x1
, this_clock
+ 1);
4989 INSN_INFO_ENTRY (INSN_UID (x1
)).reservation
= RESERVATION_S2
;
4990 if (after1
== last_same_clock
)
4991 PUT_MODE (x1
, TImode
);
4993 INSN_INFO_ENTRY (INSN_UID (x1
)).unit_mask
4994 = INSN_INFO_ENTRY (INSN_UID (after1
)).unit_mask
;
4999 rtx_insn
*after2
= find_next_cycle_insn (after1
,
5001 if (after2
== NULL_RTX
)
5003 x2
= gen_movsi_lo_sum (reg
, reg
, labelref
);
5004 x2
= emit_insn_after (duplicate_cond (x2
, insn
), after2
);
5005 x1
= gen_movsi_high (reg
, labelref
);
5006 x1
= emit_insn_after (duplicate_cond (x1
, insn
), after1
);
5007 insn_set_clock (x1
, this_clock
+ 1);
5008 insn_set_clock (x2
, this_clock
+ 2);
5009 INSN_INFO_ENTRY (INSN_UID (x1
)).reservation
= RESERVATION_S2
;
5010 INSN_INFO_ENTRY (INSN_UID (x2
)).reservation
= RESERVATION_S2
;
5011 if (after1
== last_same_clock
)
5012 PUT_MODE (x1
, TImode
);
5014 INSN_INFO_ENTRY (INSN_UID (x1
)).unit_mask
5015 = INSN_INFO_ENTRY (INSN_UID (after1
)).unit_mask
;
5016 if (after1
== after2
)
5017 PUT_MODE (x2
, TImode
);
5019 INSN_INFO_ENTRY (INSN_UID (x2
)).unit_mask
5020 = INSN_INFO_ENTRY (INSN_UID (after2
)).unit_mask
;
5029 /* Called as part of c6x_reorg. This function emits multi-cycle NOP
5030 insns as required for correctness. CALL_LABELS is the array that
5031 holds the return labels for call insns; we emit these here if
5032 scheduling was run earlier. */
5035 reorg_emit_nops (rtx_insn
**call_labels
)
5040 int prev_clock
, earliest_bb_end
;
5041 int prev_implicit_nops
;
5042 rtx_insn
*insn
= get_insns ();
5044 /* We look at one insn (or bundle inside a sequence) in each iteration, storing
5045 its issue time in PREV_CLOCK for the next iteration. If there is a gap in
5046 clocks, we must insert a NOP.
5047 EARLIEST_BB_END tracks in which cycle all insns that have been issued in the
5048 current basic block will finish. We must not allow the next basic block to
5049 begin before this cycle.
5050 PREV_IMPLICIT_NOPS tells us whether we've seen an insn that implicitly contains
5051 a multi-cycle nop. The code is scheduled such that subsequent insns will
5052 show the cycle gap, but we needn't insert a real NOP instruction. */
5053 insn
= next_real_insn (insn
);
5054 last_call
= prev
= NULL
;
5056 earliest_bb_end
= 0;
5057 prev_implicit_nops
= 0;
5061 int this_clock
= -1;
5065 next
= next_real_insn (insn
);
5067 if (DEBUG_INSN_P (insn
)
5068 || GET_CODE (PATTERN (insn
)) == USE
5069 || GET_CODE (PATTERN (insn
)) == CLOBBER
5070 || shadow_or_blockage_p (insn
)
5071 || JUMP_TABLE_DATA_P (insn
))
5074 if (!c6x_flag_schedule_insns2
)
5075 /* No scheduling; ensure that no parallel issue happens. */
5076 PUT_MODE (insn
, TImode
);
5081 this_clock
= insn_get_clock (insn
);
5082 if (this_clock
!= prev_clock
)
5084 PUT_MODE (insn
, TImode
);
5088 cycles
= this_clock
- prev_clock
;
5090 cycles
-= prev_implicit_nops
;
5093 rtx nop
= emit_nop_after (cycles
- 1, prev
);
5094 insn_set_clock (nop
, prev_clock
+ prev_implicit_nops
+ 1);
5097 prev_clock
= this_clock
;
5100 && insn_get_clock (last_call
) + 6 <= this_clock
)
5102 emit_label_before (call_labels
[INSN_UID (last_call
)], insn
);
5103 last_call
= NULL_RTX
;
5105 prev_implicit_nops
= 0;
5109 /* Examine how many cycles the current insn takes, and adjust
5110 LAST_CALL, EARLIEST_BB_END and PREV_IMPLICIT_NOPS. */
5111 if (recog_memoized (insn
) >= 0
5112 /* If not scheduling, we've emitted NOPs after calls already. */
5113 && (c6x_flag_schedule_insns2
|| !returning_call_p (insn
)))
5115 max_cycles
= get_attr_cycles (insn
);
5116 if (get_attr_type (insn
) == TYPE_CALLP
)
5117 prev_implicit_nops
= 5;
5121 if (returning_call_p (insn
))
5124 if (c6x_flag_schedule_insns2
)
5126 gcc_assert (this_clock
>= 0);
5127 if (earliest_bb_end
< this_clock
+ max_cycles
)
5128 earliest_bb_end
= this_clock
+ max_cycles
;
5130 else if (max_cycles
> 1)
5131 emit_nop_after (max_cycles
- 1, insn
);
5137 if (c6x_flag_schedule_insns2
5138 && (next
== NULL_RTX
5139 || (GET_MODE (next
) == TImode
5140 && INSN_INFO_ENTRY (INSN_UID (next
)).ebb_start
))
5141 && earliest_bb_end
> 0)
5143 int cycles
= earliest_bb_end
- prev_clock
;
5146 prev
= emit_nop_after (cycles
- 1, prev
);
5147 insn_set_clock (prev
, prev_clock
+ prev_implicit_nops
+ 1);
5149 earliest_bb_end
= 0;
5154 emit_label_after (call_labels
[INSN_UID (last_call
)], prev
);
5155 last_call
= NULL_RTX
;
5161 /* If possible, split INSN, which we know is either a jump or a call, into a real
5162 insn and its shadow. */
5164 split_delayed_branch (rtx_insn
*insn
)
5166 int code
= recog_memoized (insn
);
5169 rtx pat
= PATTERN (insn
);
5171 if (GET_CODE (pat
) == COND_EXEC
)
5172 pat
= COND_EXEC_CODE (pat
);
5176 rtx src
= pat
, dest
= NULL_RTX
;
5178 if (GET_CODE (pat
) == SET
)
5180 dest
= SET_DEST (pat
);
5181 src
= SET_SRC (pat
);
5183 callee
= XEXP (XEXP (src
, 0), 0);
5184 if (SIBLING_CALL_P (insn
))
5187 newpat
= gen_indirect_sibcall_shadow ();
5189 newpat
= gen_sibcall_shadow (callee
);
5190 pat
= gen_real_jump (callee
);
5192 else if (dest
!= NULL_RTX
)
5195 newpat
= gen_indirect_call_value_shadow (dest
);
5197 newpat
= gen_call_value_shadow (dest
, callee
);
5198 pat
= gen_real_call (callee
);
5203 newpat
= gen_indirect_call_shadow ();
5205 newpat
= gen_call_shadow (callee
);
5206 pat
= gen_real_call (callee
);
5208 pat
= duplicate_cond (pat
, insn
);
5209 newpat
= duplicate_cond (newpat
, insn
);
5214 if (GET_CODE (pat
) == PARALLEL
5215 && GET_CODE (XVECEXP (pat
, 0, 0)) == RETURN
)
5217 newpat
= gen_return_shadow ();
5218 pat
= gen_real_ret (XEXP (XVECEXP (pat
, 0, 1), 0));
5219 newpat
= duplicate_cond (newpat
, insn
);
5224 case CODE_FOR_br_true
:
5225 case CODE_FOR_br_false
:
5226 src
= SET_SRC (pat
);
5227 op
= XEXP (src
, code
== CODE_FOR_br_true
? 1 : 2);
5228 newpat
= gen_condjump_shadow (op
);
5229 pat
= gen_real_jump (op
);
5230 if (code
== CODE_FOR_br_true
)
5231 pat
= gen_rtx_COND_EXEC (VOIDmode
, XEXP (src
, 0), pat
);
5233 pat
= gen_rtx_COND_EXEC (VOIDmode
,
5234 reversed_comparison (XEXP (src
, 0),
5241 newpat
= gen_jump_shadow (op
);
5244 case CODE_FOR_indirect_jump
:
5245 newpat
= gen_indirect_jump_shadow ();
5248 case CODE_FOR_return_internal
:
5249 newpat
= gen_return_shadow ();
5250 pat
= gen_real_ret (XEXP (XVECEXP (pat
, 0, 1), 0));
5257 i1
= emit_insn_before (pat
, insn
);
5258 PATTERN (insn
) = newpat
;
5259 INSN_CODE (insn
) = -1;
5260 record_delay_slot_pair (i1
, insn
, 5, 0);
5263 /* If INSN is a multi-cycle insn that should be handled properly in
5264 modulo-scheduling, split it into a real insn and a shadow.
5265 Return true if we made a change.
5267 It is valid for us to fail to split an insn; the caller has to deal
5268 with the possibility. Currently we handle loads and most mpy2 and
5271 split_delayed_nonbranch (rtx_insn
*insn
)
5273 int code
= recog_memoized (insn
);
5274 enum attr_type type
;
5276 rtx newpat
, src
, dest
;
5277 rtx pat
= PATTERN (insn
);
5281 if (GET_CODE (pat
) == COND_EXEC
)
5282 pat
= COND_EXEC_CODE (pat
);
5284 if (code
< 0 || GET_CODE (pat
) != SET
)
5286 src
= SET_SRC (pat
);
5287 dest
= SET_DEST (pat
);
5291 type
= get_attr_type (insn
);
5293 && (type
== TYPE_LOAD
5294 || type
== TYPE_LOADN
))
5297 && (GET_CODE (src
) != ZERO_EXTEND
5298 || !MEM_P (XEXP (src
, 0))))
5301 if (GET_MODE_SIZE (GET_MODE (dest
)) > 4
5302 && (GET_MODE_SIZE (GET_MODE (dest
)) != 8 || !TARGET_LDDW
))
5305 rtv
= gen_rtvec (2, GEN_INT (REGNO (SET_DEST (pat
))),
5307 newpat
= gen_load_shadow (SET_DEST (pat
));
5308 pat
= gen_rtx_UNSPEC (VOIDmode
, rtv
, UNSPEC_REAL_LOAD
);
5312 && (type
== TYPE_MPY2
5313 || type
== TYPE_MPY4
))
5315 /* We don't handle floating point multiplies yet. */
5316 if (GET_MODE (dest
) == SFmode
)
5319 rtv
= gen_rtvec (2, GEN_INT (REGNO (SET_DEST (pat
))),
5321 newpat
= gen_mult_shadow (SET_DEST (pat
));
5322 pat
= gen_rtx_UNSPEC (VOIDmode
, rtv
, UNSPEC_REAL_MULT
);
5323 delay
= type
== TYPE_MPY2
? 1 : 3;
5328 pat
= duplicate_cond (pat
, insn
);
5329 newpat
= duplicate_cond (newpat
, insn
);
5330 i1
= emit_insn_before (pat
, insn
);
5331 PATTERN (insn
) = newpat
;
5332 INSN_CODE (insn
) = -1;
5333 recog_memoized (insn
);
5334 recog_memoized (i1
);
5335 record_delay_slot_pair (i1
, insn
, delay
, 0);
5339 /* Examine if INSN is the result of splitting a load into a real load and a
5340 shadow, and if so, undo the transformation. */
5342 undo_split_delayed_nonbranch (rtx_insn
*insn
)
5344 int icode
= recog_memoized (insn
);
5345 enum attr_type type
;
5346 rtx prev_pat
, insn_pat
;
5351 type
= get_attr_type (insn
);
5352 if (type
!= TYPE_LOAD_SHADOW
&& type
!= TYPE_MULT_SHADOW
)
5354 prev
= PREV_INSN (insn
);
5355 prev_pat
= PATTERN (prev
);
5356 insn_pat
= PATTERN (insn
);
5357 if (GET_CODE (prev_pat
) == COND_EXEC
)
5359 prev_pat
= COND_EXEC_CODE (prev_pat
);
5360 insn_pat
= COND_EXEC_CODE (insn_pat
);
5363 gcc_assert (GET_CODE (prev_pat
) == UNSPEC
5364 && ((XINT (prev_pat
, 1) == UNSPEC_REAL_LOAD
5365 && type
== TYPE_LOAD_SHADOW
)
5366 || (XINT (prev_pat
, 1) == UNSPEC_REAL_MULT
5367 && type
== TYPE_MULT_SHADOW
)));
5368 insn_pat
= gen_rtx_SET (SET_DEST (insn_pat
),
5369 XVECEXP (prev_pat
, 0, 1));
5370 insn_pat
= duplicate_cond (insn_pat
, prev
);
5371 PATTERN (insn
) = insn_pat
;
5372 INSN_CODE (insn
) = -1;
5376 /* Split every insn (i.e. jumps and calls) which can have delay slots into
5377 two parts: the first one is scheduled normally and emits the instruction,
5378 while the second one is a shadow insn which shows the side effect taking
5379 place. The second one is placed in the right cycle by the scheduler, but
5380 not emitted as an assembly instruction. */
5383 split_delayed_insns (void)
5386 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
5388 if (JUMP_P (insn
) || CALL_P (insn
))
5389 split_delayed_branch (insn
);
5393 /* For every insn that has an entry in the new_conditions vector, give it
5394 the appropriate predicate. */
5396 conditionalize_after_sched (void)
5400 FOR_EACH_BB_FN (bb
, cfun
)
5401 FOR_BB_INSNS (bb
, insn
)
5403 unsigned uid
= INSN_UID (insn
);
5405 if (!NONDEBUG_INSN_P (insn
) || uid
>= INSN_INFO_LENGTH
)
5407 cond
= INSN_INFO_ENTRY (uid
).new_cond
;
5408 if (cond
== NULL_RTX
)
5411 fprintf (dump_file
, "Conditionalizing insn %d\n", uid
);
5412 predicate_insn (insn
, cond
, true);
5416 /* A callback for the hw-doloop pass. This function examines INSN; if
5417 it is a loop_end pattern we recognize, return the reg rtx for the
5418 loop counter. Otherwise, return NULL_RTX. */
5421 hwloop_pattern_reg (rtx_insn
*insn
)
5425 if (!JUMP_P (insn
) || recog_memoized (insn
) != CODE_FOR_loop_end
)
5428 pat
= PATTERN (insn
);
5429 reg
= SET_DEST (XVECEXP (pat
, 0, 1));
5435 /* Return the number of cycles taken by BB, as computed by scheduling,
5436 including the latencies of all insns with delay slots. IGNORE is
5437 an insn we should ignore in the calculation, usually the final
5440 bb_earliest_end_cycle (basic_block bb
, rtx ignore
)
5445 FOR_BB_INSNS (bb
, insn
)
5447 int cycles
, this_clock
;
5449 if (LABEL_P (insn
) || NOTE_P (insn
) || DEBUG_INSN_P (insn
)
5450 || GET_CODE (PATTERN (insn
)) == USE
5451 || GET_CODE (PATTERN (insn
)) == CLOBBER
5455 this_clock
= insn_get_clock (insn
);
5456 cycles
= get_attr_cycles (insn
);
5458 if (earliest
< this_clock
+ cycles
)
5459 earliest
= this_clock
+ cycles
;
5464 /* Examine the insns in BB and remove all which have a uid greater or
5465 equal to MAX_UID. */
5467 filter_insns_above (basic_block bb
, int max_uid
)
5469 rtx_insn
*insn
, *next
;
5470 bool prev_ti
= false;
5471 int prev_cycle
= -1;
5473 FOR_BB_INSNS_SAFE (bb
, insn
, next
)
5476 if (!NONDEBUG_INSN_P (insn
))
5478 if (insn
== BB_END (bb
))
5480 this_cycle
= insn_get_clock (insn
);
5481 if (prev_ti
&& this_cycle
== prev_cycle
)
5483 gcc_assert (GET_MODE (insn
) != TImode
);
5484 PUT_MODE (insn
, TImode
);
5487 if (INSN_UID (insn
) >= max_uid
)
5489 if (GET_MODE (insn
) == TImode
)
5492 prev_cycle
= this_cycle
;
5499 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
5502 c6x_asm_emit_except_personality (rtx personality
)
5504 fputs ("\t.personality\t", asm_out_file
);
5505 output_addr_const (asm_out_file
, personality
);
5506 fputc ('\n', asm_out_file
);
5509 /* Use a special assembly directive rather than a regular setion for
5510 unwind table data. */
5513 c6x_asm_init_sections (void)
5515 exception_section
= get_unnamed_section (0, output_section_asm_op
,
5519 /* A callback for the hw-doloop pass. Called to optimize LOOP in a
5520 machine-specific fashion; returns true if successful and false if
5521 the hwloop_fail function should be called. */
5524 hwloop_optimize (hwloop_info loop
)
5526 basic_block entry_bb
, bb
;
5527 rtx_insn
*seq
, *insn
, *prev
, *entry_after
, *end_packet
;
5528 rtx_insn
*head_insn
, *tail_insn
, *new_insns
, *last_insn
;
5530 int n_execute_packets
;
5533 int max_uid_before
, delayed_splits
;
5534 int i
, sp_ii
, min_ii
, max_ii
, max_parallel
, n_insns
, n_real_insns
, stages
;
5535 rtx_insn
**orig_vec
;
5537 rtx_insn
***insn_copies
;
5539 if (!c6x_flag_modulo_sched
|| !c6x_flag_schedule_insns2
5540 || !TARGET_INSNS_64PLUS
)
5543 if (loop
->iter_reg_used
|| loop
->depth
> 1)
5545 if (loop
->has_call
|| loop
->has_asm
)
5548 if (loop
->head
!= loop
->tail
)
5551 gcc_assert (loop
->incoming_dest
== loop
->head
);
5554 FOR_EACH_VEC_SAFE_ELT (loop
->incoming
, i
, entry_edge
)
5555 if (entry_edge
->flags
& EDGE_FALLTHRU
)
5557 if (entry_edge
== NULL
)
5560 reshuffle_units (loop
->head
);
5563 schedule_ebbs_init ();
5564 schedule_ebb (BB_HEAD (loop
->tail
), loop
->loop_end
, true);
5565 schedule_ebbs_finish ();
5569 loop_earliest
= bb_earliest_end_cycle (bb
, loop
->loop_end
) + 1;
5571 max_uid_before
= get_max_uid ();
5573 /* Split all multi-cycle operations, such as loads. For normal
5574 scheduling, we only do this for branches, as the generated code
5575 would otherwise not be interrupt-safe. When using sploop, it is
5576 safe and beneficial to split them. If any multi-cycle operations
5577 remain after splitting (because we don't handle them yet), we
5578 cannot pipeline the loop. */
5580 FOR_BB_INSNS (bb
, insn
)
5582 if (NONDEBUG_INSN_P (insn
))
5584 recog_memoized (insn
);
5585 if (split_delayed_nonbranch (insn
))
5587 else if (INSN_CODE (insn
) >= 0
5588 && get_attr_cycles (insn
) > 1)
5593 /* Count the number of insns as well as the number real insns, and save
5594 the original sequence of insns in case we must restore it later. */
5595 n_insns
= n_real_insns
= 0;
5596 FOR_BB_INSNS (bb
, insn
)
5599 if (NONDEBUG_INSN_P (insn
) && insn
!= loop
->loop_end
)
5602 orig_vec
= XNEWVEC (rtx_insn
*, n_insns
);
5604 FOR_BB_INSNS (bb
, insn
)
5605 orig_vec
[n_insns
++] = insn
;
5607 /* Count the unit reservations, and compute a minimum II from that
5609 count_unit_reqs (unit_reqs
, loop
->start_label
,
5610 PREV_INSN (loop
->loop_end
));
5611 merge_unit_reqs (unit_reqs
);
5613 min_ii
= res_mii (unit_reqs
);
5614 max_ii
= loop_earliest
< 15 ? loop_earliest
: 14;
5616 /* Make copies of the loop body, up to a maximum number of stages we want
5618 max_parallel
= loop_earliest
/ min_ii
+ 1;
5620 copies
= XCNEWVEC (rtx_insn
*, (max_parallel
+ 1) * n_real_insns
);
5621 insn_copies
= XNEWVEC (rtx_insn
**, max_parallel
+ 1);
5622 for (i
= 0; i
< max_parallel
+ 1; i
++)
5623 insn_copies
[i
] = copies
+ i
* n_real_insns
;
5625 head_insn
= next_nonnote_nondebug_insn (loop
->start_label
);
5626 tail_insn
= prev_real_insn (BB_END (bb
));
5629 FOR_BB_INSNS (bb
, insn
)
5630 if (NONDEBUG_INSN_P (insn
) && insn
!= loop
->loop_end
)
5631 insn_copies
[0][i
++] = insn
;
5633 sploop_max_uid_iter0
= get_max_uid ();
5635 /* Generate the copies of the loop body, and save them in the
5636 INSN_COPIES array. */
5638 for (i
= 0; i
< max_parallel
; i
++)
5641 rtx_insn
*this_iter
;
5643 this_iter
= duplicate_insn_chain (head_insn
, tail_insn
);
5647 rtx_insn
*prev_stage_insn
= insn_copies
[i
][j
];
5648 gcc_assert (INSN_CODE (this_iter
) == INSN_CODE (prev_stage_insn
));
5650 if (INSN_CODE (this_iter
) >= 0
5651 && (get_attr_type (this_iter
) == TYPE_LOAD_SHADOW
5652 || get_attr_type (this_iter
) == TYPE_MULT_SHADOW
))
5654 rtx_insn
*prev
= PREV_INSN (this_iter
);
5655 record_delay_slot_pair (prev
, this_iter
,
5656 get_attr_cycles (prev
) - 1, 0);
5659 record_delay_slot_pair (prev_stage_insn
, this_iter
, i
, 1);
5661 insn_copies
[i
+ 1][j
] = this_iter
;
5663 this_iter
= next_nonnote_nondebug_insn (this_iter
);
5666 new_insns
= get_insns ();
5667 last_insn
= insn_copies
[max_parallel
][n_real_insns
- 1];
5669 emit_insn_before (new_insns
, BB_END (bb
));
5671 /* Try to schedule the loop using varying initiation intervals,
5672 starting with the smallest possible and incrementing it
5674 for (sp_ii
= min_ii
; sp_ii
<= max_ii
; sp_ii
++)
5678 fprintf (dump_file
, "Trying to schedule for II %d\n", sp_ii
);
5680 df_clear_flags (DF_LR_RUN_DCE
);
5682 schedule_ebbs_init ();
5683 set_modulo_params (sp_ii
, max_parallel
, n_real_insns
,
5684 sploop_max_uid_iter0
);
5685 tmp_bb
= schedule_ebb (BB_HEAD (bb
), last_insn
, true);
5686 schedule_ebbs_finish ();
5691 fprintf (dump_file
, "Found schedule with II %d\n", sp_ii
);
5696 discard_delay_pairs_above (max_uid_before
);
5701 stages
= insn_get_clock (ss
.last_scheduled_iter0
) / sp_ii
+ 1;
5703 if (stages
== 1 && sp_ii
> 5)
5706 /* At this point, we know we've been successful, unless we find later that
5707 there are too many execute packets for the loop buffer to hold. */
5709 /* Assign reservations to the instructions in the loop. We must find
5710 the stage that contains the full loop kernel, and transfer the
5711 reservations of the instructions contained in it to the corresponding
5712 instructions from iteration 0, which are the only ones we'll keep. */
5713 assign_reservations (BB_HEAD (bb
), ss
.last_scheduled_insn
);
5714 SET_PREV_INSN (BB_END (bb
)) = ss
.last_scheduled_iter0
;
5715 SET_NEXT_INSN (ss
.last_scheduled_iter0
) = BB_END (bb
);
5716 filter_insns_above (bb
, sploop_max_uid_iter0
);
5718 for (i
= 0; i
< n_real_insns
; i
++)
5720 rtx insn
= insn_copies
[0][i
];
5721 int uid
= INSN_UID (insn
);
5722 int stage
= insn_uid_get_clock (uid
) / sp_ii
;
5724 if (stage
+ 1 < stages
)
5727 stage
= stages
- stage
- 1;
5728 copy_uid
= INSN_UID (insn_copies
[stage
][i
]);
5729 INSN_INFO_ENTRY (uid
).reservation
5730 = INSN_INFO_ENTRY (copy_uid
).reservation
;
5736 /* Compute the number of execute packets the pipelined form of the loop will
5739 n_execute_packets
= 0;
5740 for (insn
= loop
->start_label
;
5741 insn
!= loop
->loop_end
;
5742 insn
= NEXT_INSN (insn
))
5744 if (NONDEBUG_INSN_P (insn
) && GET_MODE (insn
) == TImode
5745 && !shadow_p (insn
))
5747 n_execute_packets
++;
5748 if (prev
&& insn_get_clock (prev
) + 1 != insn_get_clock (insn
))
5749 /* We need an extra NOP instruction. */
5750 n_execute_packets
++;
5756 end_packet
= ss
.last_scheduled_iter0
;
5757 while (!NONDEBUG_INSN_P (end_packet
) || GET_MODE (end_packet
) != TImode
)
5758 end_packet
= PREV_INSN (end_packet
);
5760 /* The earliest cycle in which we can emit the SPKERNEL instruction. */
5761 loop_earliest
= (stages
- 1) * sp_ii
;
5762 if (loop_earliest
> insn_get_clock (end_packet
))
5764 n_execute_packets
++;
5765 end_packet
= loop
->loop_end
;
5768 loop_earliest
= insn_get_clock (end_packet
);
5770 if (n_execute_packets
> 14)
5773 /* Generate the spkernel instruction, and place it at the appropriate
5775 PUT_MODE (end_packet
, VOIDmode
);
5777 insn
= emit_jump_insn_before (
5778 gen_spkernel (GEN_INT (stages
- 1),
5779 const0_rtx
, JUMP_LABEL (loop
->loop_end
)),
5781 JUMP_LABEL (insn
) = JUMP_LABEL (loop
->loop_end
);
5782 insn_set_clock (insn
, loop_earliest
);
5783 PUT_MODE (insn
, TImode
);
5784 INSN_INFO_ENTRY (INSN_UID (insn
)).ebb_start
= false;
5785 delete_insn (loop
->loop_end
);
5787 /* Place the mvc and sploop instructions before the loop. */
5788 entry_bb
= entry_edge
->src
;
5792 insn
= emit_insn (gen_mvilc (loop
->iter_reg
));
5793 if (loop
->iter_reg_used_outside
)
5794 insn
= emit_move_insn (loop
->iter_reg
, const0_rtx
);
5795 insn
= emit_insn (gen_sploop (GEN_INT (sp_ii
)));
5798 if (!single_succ_p (entry_bb
) || vec_safe_length (loop
->incoming
) > 1)
5804 emit_insn_before (seq
, BB_HEAD (loop
->head
));
5805 seq
= emit_label_before (gen_label_rtx (), seq
);
5807 new_bb
= create_basic_block (seq
, insn
, entry_bb
);
5808 FOR_EACH_EDGE (e
, ei
, loop
->incoming
)
5810 if (!(e
->flags
& EDGE_FALLTHRU
))
5811 redirect_edge_and_branch_force (e
, new_bb
);
5813 redirect_edge_succ (e
, new_bb
);
5815 make_edge (new_bb
, loop
->head
, 0);
5819 entry_after
= BB_END (entry_bb
);
5820 while (DEBUG_INSN_P (entry_after
)
5821 || (NOTE_P (entry_after
)
5822 && NOTE_KIND (entry_after
) != NOTE_INSN_BASIC_BLOCK
))
5823 entry_after
= PREV_INSN (entry_after
);
5824 emit_insn_after (seq
, entry_after
);
5829 /* Make sure we don't try to schedule this loop again. */
5830 for (ix
= 0; loop
->blocks
.iterate (ix
, &bb
); ix
++)
5831 bb
->flags
|= BB_DISABLE_SCHEDULE
;
5837 fprintf (dump_file
, "Unable to pipeline loop.\n");
5839 for (i
= 1; i
< n_insns
; i
++)
5841 SET_NEXT_INSN (orig_vec
[i
- 1]) = orig_vec
[i
];
5842 SET_PREV_INSN (orig_vec
[i
]) = orig_vec
[i
- 1];
5844 SET_PREV_INSN (orig_vec
[0]) = PREV_INSN (BB_HEAD (bb
));
5845 SET_NEXT_INSN (PREV_INSN (BB_HEAD (bb
))) = orig_vec
[0];
5846 SET_NEXT_INSN (orig_vec
[n_insns
- 1]) = NEXT_INSN (BB_END (bb
));
5847 SET_PREV_INSN (NEXT_INSN (BB_END (bb
))) = orig_vec
[n_insns
- 1];
5848 BB_HEAD (bb
) = orig_vec
[0];
5849 BB_END (bb
) = orig_vec
[n_insns
- 1];
5851 free_delay_pairs ();
5852 FOR_BB_INSNS (bb
, insn
)
5853 if (NONDEBUG_INSN_P (insn
))
5854 undo_split_delayed_nonbranch (insn
);
5858 /* A callback for the hw-doloop pass. Called when a loop we have discovered
5859 turns out not to be optimizable; we have to split the doloop_end pattern
5860 into a subtract and a test. */
5862 hwloop_fail (hwloop_info loop
)
5864 rtx insn
, test
, testreg
;
5867 fprintf (dump_file
, "splitting doloop insn %d\n",
5868 INSN_UID (loop
->loop_end
));
5869 insn
= gen_addsi3 (loop
->iter_reg
, loop
->iter_reg
, constm1_rtx
);
5870 /* See if we can emit the add at the head of the loop rather than at the
5872 if (loop
->head
== NULL
5873 || loop
->iter_reg_used_outside
5874 || loop
->iter_reg_used
5875 || TEST_HARD_REG_BIT (loop
->regs_set_in_loop
, REGNO (loop
->iter_reg
))
5876 || loop
->incoming_dest
!= loop
->head
5877 || EDGE_COUNT (loop
->head
->preds
) != 2)
5878 emit_insn_before (insn
, loop
->loop_end
);
5881 rtx_insn
*t
= loop
->start_label
;
5882 while (!NOTE_P (t
) || NOTE_KIND (t
) != NOTE_INSN_BASIC_BLOCK
)
5884 emit_insn_after (insn
, t
);
5887 testreg
= SET_DEST (XVECEXP (PATTERN (loop
->loop_end
), 0, 2));
5888 if (GET_CODE (testreg
) == SCRATCH
)
5889 testreg
= loop
->iter_reg
;
5891 emit_insn_before (gen_movsi (testreg
, loop
->iter_reg
), loop
->loop_end
);
5893 test
= gen_rtx_NE (VOIDmode
, testreg
, const0_rtx
);
5894 insn
= emit_jump_insn_before (gen_cbranchsi4 (test
, testreg
, const0_rtx
,
5898 JUMP_LABEL (insn
) = loop
->start_label
;
5899 LABEL_NUSES (loop
->start_label
)++;
5900 delete_insn (loop
->loop_end
);
5903 static struct hw_doloop_hooks c6x_doloop_hooks
=
5910 /* Run the hw-doloop pass to modulo-schedule hardware loops, or split the
5911 doloop_end patterns where such optimizations are impossible. */
5916 reorg_loops (true, &c6x_doloop_hooks
);
5919 /* Implement the TARGET_MACHINE_DEPENDENT_REORG pass. We split call insns here
5920 into a sequence that loads the return register and performs the call,
5921 and emit the return label.
5922 If scheduling after reload is requested, it happens here. */
5928 bool do_selsched
= (c6x_flag_schedule_insns2
&& flag_selective_scheduling2
5929 && !maybe_skip_selective_scheduling ());
5931 /* We are freeing block_for_insn in the toplev to keep compatibility
5932 with old MDEP_REORGS that are not CFG based. Recompute it now. */
5933 compute_bb_for_insn ();
5935 df_clear_flags (DF_LR_RUN_DCE
);
5936 df_note_add_problem ();
5938 /* If optimizing, we'll have split before scheduling. */
5944 if (c6x_flag_schedule_insns2
)
5946 int sz
= get_max_uid () * 3 / 2 + 1;
5948 insn_info
.create (sz
);
5951 /* Make sure the real-jump insns we create are not deleted. When modulo-
5952 scheduling, situations where a reg is only stored in a loop can also
5953 cause dead code when doing the initial unrolling. */
5954 sched_no_dce
= true;
5958 if (c6x_flag_schedule_insns2
)
5960 split_delayed_insns ();
5961 timevar_push (TV_SCHED2
);
5963 run_selective_scheduling ();
5966 conditionalize_after_sched ();
5967 timevar_pop (TV_SCHED2
);
5969 free_delay_pairs ();
5971 sched_no_dce
= false;
5973 rtx_insn
**call_labels
= XCNEWVEC (rtx_insn
*, get_max_uid () + 1);
5975 reorg_split_calls (call_labels
);
5977 if (c6x_flag_schedule_insns2
)
5979 FOR_EACH_BB_FN (bb
, cfun
)
5980 if ((bb
->flags
& BB_DISABLE_SCHEDULE
) == 0)
5981 assign_reservations (BB_HEAD (bb
), BB_END (bb
));
5984 if (c6x_flag_var_tracking
)
5986 timevar_push (TV_VAR_TRACKING
);
5987 variable_tracking_main ();
5988 timevar_pop (TV_VAR_TRACKING
);
5991 reorg_emit_nops (call_labels
);
5993 /* Post-process the schedule to move parallel insns into SEQUENCEs. */
5994 if (c6x_flag_schedule_insns2
)
5996 free_delay_pairs ();
6000 df_finish_pass (false);
6003 /* Called when a function has been assembled. It should perform all the
6004 tasks of ASM_DECLARE_FUNCTION_SIZE in elfos.h, plus target-specific
6006 We free the reservation (and other scheduling) information here now that
6007 all insns have been output. */
6009 c6x_function_end (FILE *file
, const char *fname
)
6011 c6x_output_fn_unwind (file
);
6013 insn_info
.release ();
6015 if (!flag_inhibit_size_directive
)
6016 ASM_OUTPUT_MEASURED_SIZE (file
, fname
);
6019 /* Determine whether X is a shift with code CODE and an integer amount
6022 shift_p (rtx x
, enum rtx_code code
, int amount
)
6024 return (GET_CODE (x
) == code
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
6025 && INTVAL (XEXP (x
, 1)) == amount
);
6028 /* Compute a (partial) cost for rtx X. Return true if the complete
6029 cost has been computed, and false if subexpressions should be
6030 scanned. In either case, *TOTAL contains the cost result. */
6033 c6x_rtx_costs (rtx x
, machine_mode mode
, int outer_code
, int opno
, int *total
,
6036 int cost2
= COSTS_N_INSNS (1);
6038 int code
= GET_CODE (x
);
6043 if (outer_code
== SET
|| outer_code
== PLUS
)
6044 *total
= satisfies_constraint_IsB (x
) ? 0 : cost2
;
6045 else if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
6046 || outer_code
== MINUS
)
6047 *total
= satisfies_constraint_Is5 (x
) ? 0 : cost2
;
6048 else if (GET_RTX_CLASS (outer_code
) == RTX_COMPARE
6049 || GET_RTX_CLASS (outer_code
) == RTX_COMM_COMPARE
)
6050 *total
= satisfies_constraint_Iu4 (x
) ? 0 : cost2
;
6051 else if (outer_code
== ASHIFT
|| outer_code
== ASHIFTRT
6052 || outer_code
== LSHIFTRT
)
6053 *total
= satisfies_constraint_Iu5 (x
) ? 0 : cost2
;
6062 *total
= COSTS_N_INSNS (2);
6066 /* Recognize a mult_highpart operation. */
6067 if ((mode
== HImode
|| mode
== SImode
)
6068 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6069 && GET_MODE (XEXP (x
, 0)) == GET_MODE_2XWIDER_MODE (mode
).require ()
6070 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
6071 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
6072 && INTVAL (XEXP (XEXP (x
, 0), 1)) == GET_MODE_BITSIZE (mode
))
6074 rtx mul
= XEXP (XEXP (x
, 0), 0);
6075 rtx op0
= XEXP (mul
, 0);
6076 rtx op1
= XEXP (mul
, 1);
6077 enum rtx_code code0
= GET_CODE (op0
);
6078 enum rtx_code code1
= GET_CODE (op1
);
6081 && (code0
== SIGN_EXTEND
|| code0
== ZERO_EXTEND
))
6083 && code0
== ZERO_EXTEND
&& code1
== SIGN_EXTEND
))
6086 *total
= COSTS_N_INSNS (2);
6088 *total
= COSTS_N_INSNS (12);
6089 mode
= GET_MODE (XEXP (op0
, 0));
6090 *total
+= rtx_cost (XEXP (op0
, 0), mode
, code0
, 0, speed
);
6091 *total
+= rtx_cost (XEXP (op1
, 0), mode
, code1
, 0, speed
);
6101 *total
= COSTS_N_INSNS (CONSTANT_P (XEXP (x
, 1)) ? 4 : 15);
6103 *total
= COSTS_N_INSNS (1);
6108 *total
= COSTS_N_INSNS (1);
6109 op0
= code
== PLUS
? XEXP (x
, 0) : XEXP (x
, 1);
6110 op1
= code
== PLUS
? XEXP (x
, 1) : XEXP (x
, 0);
6111 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
6112 && INTEGRAL_MODE_P (mode
)
6113 && GET_CODE (op0
) == MULT
6114 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
6115 && (INTVAL (XEXP (op0
, 1)) == 2
6116 || INTVAL (XEXP (op0
, 1)) == 4
6117 || (code
== PLUS
&& INTVAL (XEXP (op0
, 1)) == 8)))
6119 *total
+= rtx_cost (XEXP (op0
, 0), mode
, ASHIFT
, 0, speed
);
6120 *total
+= rtx_cost (op1
, mode
, (enum rtx_code
) code
, 1, speed
);
6131 *total
= COSTS_N_INSNS (speed
? 10 : 1);
6133 *total
= COSTS_N_INSNS (speed
? 200 : 4);
6135 else if (mode
== SFmode
)
6138 *total
= COSTS_N_INSNS (speed
? 4 : 1);
6140 *total
= COSTS_N_INSNS (speed
? 100 : 4);
6142 else if (mode
== DImode
)
6145 && GET_CODE (op0
) == GET_CODE (op1
)
6146 && (GET_CODE (op0
) == ZERO_EXTEND
6147 || GET_CODE (op0
) == SIGN_EXTEND
))
6149 *total
= COSTS_N_INSNS (speed
? 2 : 1);
6150 op0
= XEXP (op0
, 0);
6151 op1
= XEXP (op1
, 0);
6154 /* Maybe improve this laster. */
6155 *total
= COSTS_N_INSNS (20);
6157 else if (mode
== SImode
)
6159 if (((GET_CODE (op0
) == ZERO_EXTEND
6160 || GET_CODE (op0
) == SIGN_EXTEND
6161 || shift_p (op0
, LSHIFTRT
, 16))
6162 && (GET_CODE (op1
) == SIGN_EXTEND
6163 || GET_CODE (op1
) == ZERO_EXTEND
6164 || scst5_operand (op1
, SImode
)
6165 || shift_p (op1
, ASHIFTRT
, 16)
6166 || shift_p (op1
, LSHIFTRT
, 16)))
6167 || (shift_p (op0
, ASHIFTRT
, 16)
6168 && (GET_CODE (op1
) == SIGN_EXTEND
6169 || shift_p (op1
, ASHIFTRT
, 16))))
6171 *total
= COSTS_N_INSNS (speed
? 2 : 1);
6172 op0
= XEXP (op0
, 0);
6173 if (scst5_operand (op1
, SImode
))
6176 op1
= XEXP (op1
, 0);
6179 *total
= COSTS_N_INSNS (1);
6180 else if (TARGET_MPY32
)
6181 *total
= COSTS_N_INSNS (4);
6183 *total
= COSTS_N_INSNS (6);
6185 else if (mode
== HImode
)
6186 *total
= COSTS_N_INSNS (speed
? 2 : 1);
6188 if (GET_CODE (op0
) != REG
6189 && (GET_CODE (op0
) != SUBREG
|| GET_CODE (SUBREG_REG (op0
)) != REG
))
6190 *total
+= rtx_cost (op0
, mode
, MULT
, 0, speed
);
6191 if (op1
&& GET_CODE (op1
) != REG
6192 && (GET_CODE (op1
) != SUBREG
|| GET_CODE (SUBREG_REG (op1
)) != REG
))
6193 *total
+= rtx_cost (op1
, mode
, MULT
, 1, speed
);
6198 /* This is a bit random; assuming on average there'll be 16 leading
6199 zeros. FIXME: estimate better for constant dividends. */
6200 *total
= COSTS_N_INSNS (6 + 3 * 16);
6204 /* Recognize the cmp_and/ior patterns. */
6206 if ((GET_CODE (op0
) == EQ
|| GET_CODE (op0
) == NE
)
6207 && REG_P (XEXP (op0
, 0))
6208 && XEXP (op0
, 1) == const0_rtx
6209 && rtx_equal_p (XEXP (x
, 1), XEXP (op0
, 0)))
6211 *total
= rtx_cost (XEXP (x
, 1), VOIDmode
, (enum rtx_code
) outer_code
,
6222 /* Implements target hook vector_mode_supported_p. */
6225 c6x_vector_mode_supported_p (machine_mode mode
)
6240 /* Implements TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
6242 c6x_preferred_simd_mode (machine_mode mode
)
6256 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
6259 c6x_scalar_mode_supported_p (machine_mode mode
)
6261 if (ALL_FIXED_POINT_MODE_P (mode
)
6262 && GET_MODE_PRECISION (mode
) <= 2 * BITS_PER_WORD
)
6265 return default_scalar_mode_supported_p (mode
);
6268 /* Output a reference from a function exception table to the type_info
6269 object X. Output these via a special assembly directive. */
6272 c6x_output_ttype (rtx x
)
6274 /* Use special relocations for symbol references. */
6275 if (GET_CODE (x
) != CONST_INT
)
6276 fputs ("\t.ehtype\t", asm_out_file
);
6278 fputs ("\t.word\t", asm_out_file
);
6279 output_addr_const (asm_out_file
, x
);
6280 fputc ('\n', asm_out_file
);
6285 /* Modify the return address of the current function. */
6288 c6x_set_return_address (rtx source
, rtx scratch
)
6290 struct c6x_frame frame
;
6292 HOST_WIDE_INT offset
;
6294 c6x_compute_frame_layout (&frame
);
6295 if (! c6x_save_reg (RETURN_ADDR_REGNO
))
6296 emit_move_insn (gen_rtx_REG (Pmode
, RETURN_ADDR_REGNO
), source
);
6300 if (frame_pointer_needed
)
6302 addr
= hard_frame_pointer_rtx
;
6303 offset
= frame
.b3_offset
;
6307 addr
= stack_pointer_rtx
;
6308 offset
= frame
.to_allocate
- frame
.b3_offset
;
6311 /* TODO: Use base+offset loads where possible. */
6314 HOST_WIDE_INT low
= trunc_int_for_mode (offset
, HImode
);
6316 emit_insn (gen_movsi_high (scratch
, GEN_INT (low
)));
6318 emit_insn (gen_movsi_lo_sum (scratch
, scratch
, GEN_INT(offset
)));
6319 emit_insn (gen_addsi3 (scratch
, addr
, scratch
));
6323 emit_move_insn (gen_frame_mem (Pmode
, addr
), source
);
6327 /* We save pairs of registers using a DImode store. Describe the component
6328 registers for DWARF generation code. */
6331 c6x_dwarf_register_span (rtx rtl
)
6334 unsigned real_regno
;
6339 regno
= REGNO (rtl
);
6340 nregs
= HARD_REGNO_NREGS (regno
, GET_MODE (rtl
));
6344 p
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc(nregs
));
6345 for (i
= 0; i
< nregs
; i
++)
6347 if (TARGET_BIG_ENDIAN
)
6348 real_regno
= regno
+ nregs
- (i
+ 1);
6350 real_regno
= regno
+ i
;
6352 XVECEXP (p
, 0, i
) = gen_rtx_REG (SImode
, real_regno
);
6358 /* Codes for all the C6X builtins. */
6393 static GTY(()) tree c6x_builtin_decls
[C6X_BUILTIN_MAX
];
6395 /* Return the C6X builtin for CODE. */
6397 c6x_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
6399 if (code
>= C6X_BUILTIN_MAX
)
6400 return error_mark_node
;
6402 return c6x_builtin_decls
[code
];
6405 #define def_builtin(NAME, TYPE, CODE) \
6408 bdecl = add_builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
6410 c6x_builtin_decls[CODE] = bdecl; \
6413 /* Set up all builtin functions for this target. */
6415 c6x_init_builtins (void)
6417 tree V4QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 4);
6418 tree V2HI_type_node
= build_vector_type (intHI_type_node
, 2);
6419 tree V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
6421 = build_function_type_list (integer_type_node
, integer_type_node
,
6423 tree int_ftype_int_int
6424 = build_function_type_list (integer_type_node
, integer_type_node
,
6425 integer_type_node
, NULL_TREE
);
6426 tree v2hi_ftype_v2hi
6427 = build_function_type_list (V2HI_type_node
, V2HI_type_node
, NULL_TREE
);
6428 tree v4qi_ftype_v4qi_v4qi
6429 = build_function_type_list (V4QI_type_node
, V4QI_type_node
,
6430 V4QI_type_node
, NULL_TREE
);
6431 tree v2hi_ftype_v2hi_v2hi
6432 = build_function_type_list (V2HI_type_node
, V2HI_type_node
,
6433 V2HI_type_node
, NULL_TREE
);
6434 tree v2si_ftype_v2hi_v2hi
6435 = build_function_type_list (V2SI_type_node
, V2HI_type_node
,
6436 V2HI_type_node
, NULL_TREE
);
6438 def_builtin ("__builtin_c6x_sadd", int_ftype_int_int
,
6440 def_builtin ("__builtin_c6x_ssub", int_ftype_int_int
,
6442 def_builtin ("__builtin_c6x_add2", v2hi_ftype_v2hi_v2hi
,
6444 def_builtin ("__builtin_c6x_sub2", v2hi_ftype_v2hi_v2hi
,
6446 def_builtin ("__builtin_c6x_add4", v4qi_ftype_v4qi_v4qi
,
6448 def_builtin ("__builtin_c6x_sub4", v4qi_ftype_v4qi_v4qi
,
6450 def_builtin ("__builtin_c6x_mpy2", v2si_ftype_v2hi_v2hi
,
6452 def_builtin ("__builtin_c6x_sadd2", v2hi_ftype_v2hi_v2hi
,
6454 def_builtin ("__builtin_c6x_ssub2", v2hi_ftype_v2hi_v2hi
,
6456 def_builtin ("__builtin_c6x_saddu4", v4qi_ftype_v4qi_v4qi
,
6457 C6X_BUILTIN_SADDU4
);
6458 def_builtin ("__builtin_c6x_smpy2", v2si_ftype_v2hi_v2hi
,
6461 def_builtin ("__builtin_c6x_smpy", int_ftype_int_int
,
6463 def_builtin ("__builtin_c6x_smpyh", int_ftype_int_int
,
6465 def_builtin ("__builtin_c6x_smpyhl", int_ftype_int_int
,
6466 C6X_BUILTIN_SMPYHL
);
6467 def_builtin ("__builtin_c6x_smpylh", int_ftype_int_int
,
6468 C6X_BUILTIN_SMPYLH
);
6470 def_builtin ("__builtin_c6x_sshl", int_ftype_int_int
,
6472 def_builtin ("__builtin_c6x_subc", int_ftype_int_int
,
6475 def_builtin ("__builtin_c6x_avg2", v2hi_ftype_v2hi_v2hi
,
6477 def_builtin ("__builtin_c6x_avgu4", v4qi_ftype_v4qi_v4qi
,
6480 def_builtin ("__builtin_c6x_clrr", int_ftype_int_int
,
6482 def_builtin ("__builtin_c6x_extr", int_ftype_int_int
,
6484 def_builtin ("__builtin_c6x_extru", int_ftype_int_int
,
6487 def_builtin ("__builtin_c6x_abs", int_ftype_int
, C6X_BUILTIN_ABS
);
6488 def_builtin ("__builtin_c6x_abs2", v2hi_ftype_v2hi
, C6X_BUILTIN_ABS2
);
6492 struct builtin_description
6494 const enum insn_code icode
;
6495 const char *const name
;
6496 const enum c6x_builtins code
;
6499 static const struct builtin_description bdesc_2arg
[] =
6501 { CODE_FOR_saddsi3
, "__builtin_c6x_sadd", C6X_BUILTIN_SADD
},
6502 { CODE_FOR_ssubsi3
, "__builtin_c6x_ssub", C6X_BUILTIN_SSUB
},
6503 { CODE_FOR_addv2hi3
, "__builtin_c6x_add2", C6X_BUILTIN_ADD2
},
6504 { CODE_FOR_subv2hi3
, "__builtin_c6x_sub2", C6X_BUILTIN_SUB2
},
6505 { CODE_FOR_addv4qi3
, "__builtin_c6x_add4", C6X_BUILTIN_ADD4
},
6506 { CODE_FOR_subv4qi3
, "__builtin_c6x_sub4", C6X_BUILTIN_SUB4
},
6507 { CODE_FOR_ss_addv2hi3
, "__builtin_c6x_sadd2", C6X_BUILTIN_SADD2
},
6508 { CODE_FOR_ss_subv2hi3
, "__builtin_c6x_ssub2", C6X_BUILTIN_SSUB2
},
6509 { CODE_FOR_us_addv4qi3
, "__builtin_c6x_saddu4", C6X_BUILTIN_SADDU4
},
6511 { CODE_FOR_subcsi3
, "__builtin_c6x_subc", C6X_BUILTIN_SUBC
},
6512 { CODE_FOR_ss_ashlsi3
, "__builtin_c6x_sshl", C6X_BUILTIN_SSHL
},
6514 { CODE_FOR_avgv2hi3
, "__builtin_c6x_avg2", C6X_BUILTIN_AVG2
},
6515 { CODE_FOR_uavgv4qi3
, "__builtin_c6x_avgu4", C6X_BUILTIN_AVGU4
},
6517 { CODE_FOR_mulhqsq3
, "__builtin_c6x_smpy", C6X_BUILTIN_SMPY
},
6518 { CODE_FOR_mulhqsq3_hh
, "__builtin_c6x_smpyh", C6X_BUILTIN_SMPYH
},
6519 { CODE_FOR_mulhqsq3_lh
, "__builtin_c6x_smpylh", C6X_BUILTIN_SMPYLH
},
6520 { CODE_FOR_mulhqsq3_hl
, "__builtin_c6x_smpyhl", C6X_BUILTIN_SMPYHL
},
6522 { CODE_FOR_mulv2hqv2sq3
, "__builtin_c6x_smpy2", C6X_BUILTIN_SMPY2
},
6524 { CODE_FOR_clrr
, "__builtin_c6x_clrr", C6X_BUILTIN_CLRR
},
6525 { CODE_FOR_extr
, "__builtin_c6x_extr", C6X_BUILTIN_EXTR
},
6526 { CODE_FOR_extru
, "__builtin_c6x_extru", C6X_BUILTIN_EXTRU
}
6529 static const struct builtin_description bdesc_1arg
[] =
6531 { CODE_FOR_ssabssi2
, "__builtin_c6x_abs", C6X_BUILTIN_ABS
},
6532 { CODE_FOR_ssabsv2hi2
, "__builtin_c6x_abs2", C6X_BUILTIN_ABS2
}
6535 /* Errors in the source file can cause expand_expr to return const0_rtx
6536 where we expect a vector. To avoid crashing, use one of the vector
6537 clear instructions. */
6539 safe_vector_operand (rtx x
, machine_mode mode
)
6541 if (x
!= const0_rtx
)
6543 x
= gen_reg_rtx (SImode
);
6545 emit_insn (gen_movsi (x
, CONST0_RTX (SImode
)));
6546 return gen_lowpart (mode
, x
);
6549 /* Subroutine of c6x_expand_builtin to take care of binop insns. MACFLAG is -1
6550 if this is a normal binary op, or one of the MACFLAG_xxx constants. */
6553 c6x_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
,
6556 int offs
= match_op
? 1 : 0;
6558 tree arg0
= CALL_EXPR_ARG (exp
, 0);
6559 tree arg1
= CALL_EXPR_ARG (exp
, 1);
6560 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
6561 rtx op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
6562 machine_mode op0mode
= GET_MODE (op0
);
6563 machine_mode op1mode
= GET_MODE (op1
);
6564 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6565 machine_mode mode0
= insn_data
[icode
].operand
[1 + offs
].mode
;
6566 machine_mode mode1
= insn_data
[icode
].operand
[2 + offs
].mode
;
6569 if (VECTOR_MODE_P (mode0
))
6570 op0
= safe_vector_operand (op0
, mode0
);
6571 if (VECTOR_MODE_P (mode1
))
6572 op1
= safe_vector_operand (op1
, mode1
);
6575 || GET_MODE (target
) != tmode
6576 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6578 if (tmode
== SQmode
|| tmode
== V2SQmode
)
6580 ret
= gen_reg_rtx (tmode
== SQmode
? SImode
: V2SImode
);
6581 target
= gen_lowpart (tmode
, ret
);
6584 target
= gen_reg_rtx (tmode
);
6587 if ((op0mode
== V2HImode
|| op0mode
== SImode
|| op0mode
== VOIDmode
)
6588 && (mode0
== V2HQmode
|| mode0
== HQmode
|| mode0
== SQmode
))
6591 op0
= gen_lowpart (mode0
, op0
);
6593 if ((op1mode
== V2HImode
|| op1mode
== SImode
|| op1mode
== VOIDmode
)
6594 && (mode1
== V2HQmode
|| mode1
== HQmode
|| mode1
== SQmode
))
6597 op1
= gen_lowpart (mode1
, op1
);
6599 /* In case the insn wants input operands in modes different from
6600 the result, abort. */
6601 gcc_assert ((op0mode
== mode0
|| op0mode
== VOIDmode
)
6602 && (op1mode
== mode1
|| op1mode
== VOIDmode
));
6604 if (! (*insn_data
[icode
].operand
[1 + offs
].predicate
) (op0
, mode0
))
6605 op0
= copy_to_mode_reg (mode0
, op0
);
6606 if (! (*insn_data
[icode
].operand
[2 + offs
].predicate
) (op1
, mode1
))
6607 op1
= copy_to_mode_reg (mode1
, op1
);
6610 pat
= GEN_FCN (icode
) (target
, target
, op0
, op1
);
6612 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
6622 /* Subroutine of c6x_expand_builtin to take care of unop insns. */
6625 c6x_expand_unop_builtin (enum insn_code icode
, tree exp
,
6629 tree arg0
= CALL_EXPR_ARG (exp
, 0);
6630 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
6631 machine_mode op0mode
= GET_MODE (op0
);
6632 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
6633 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
6636 || GET_MODE (target
) != tmode
6637 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
6638 target
= gen_reg_rtx (tmode
);
6640 if (VECTOR_MODE_P (mode0
))
6641 op0
= safe_vector_operand (op0
, mode0
);
6643 if (op0mode
== SImode
&& mode0
== HImode
)
6646 op0
= gen_lowpart (HImode
, op0
);
6648 gcc_assert (op0mode
== mode0
|| op0mode
== VOIDmode
);
6650 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
6651 op0
= copy_to_mode_reg (mode0
, op0
);
6653 pat
= GEN_FCN (icode
) (target
, op0
);
6660 /* Expand an expression EXP that calls a built-in function,
6661 with result going to TARGET if that's convenient
6662 (and in mode MODE if that's convenient).
6663 SUBTARGET may be used as the target for computing one of EXP's operands.
6664 IGNORE is nonzero if the value is to be ignored. */
6667 c6x_expand_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
6668 rtx subtarget ATTRIBUTE_UNUSED
,
6669 machine_mode mode ATTRIBUTE_UNUSED
,
6670 int ignore ATTRIBUTE_UNUSED
)
6673 const struct builtin_description
*d
;
6674 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
6675 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
6677 for (i
= 0, d
= bdesc_2arg
; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
6678 if (d
->code
== fcode
)
6679 return c6x_expand_binop_builtin (d
->icode
, exp
, target
,
6680 fcode
== C6X_BUILTIN_CLRR
);
6682 for (i
= 0, d
= bdesc_1arg
; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
6683 if (d
->code
== fcode
)
6684 return c6x_expand_unop_builtin (d
->icode
, exp
, target
);
6689 /* Target unwind frame info is generated from dwarf CFI directives, so
6690 always output dwarf2 unwind info. */
6692 static enum unwind_info_type
6693 c6x_debug_unwind_info (void)
6695 if (flag_unwind_tables
|| flag_exceptions
)
6698 return default_debug_unwind_info ();
6701 /* Target Structure. */
6703 /* Initialize the GCC target structure. */
6704 #undef TARGET_FUNCTION_ARG
6705 #define TARGET_FUNCTION_ARG c6x_function_arg
6706 #undef TARGET_FUNCTION_ARG_ADVANCE
6707 #define TARGET_FUNCTION_ARG_ADVANCE c6x_function_arg_advance
6708 #undef TARGET_FUNCTION_ARG_BOUNDARY
6709 #define TARGET_FUNCTION_ARG_BOUNDARY c6x_function_arg_boundary
6710 #undef TARGET_FUNCTION_ARG_ROUND_BOUNDARY
6711 #define TARGET_FUNCTION_ARG_ROUND_BOUNDARY \
6712 c6x_function_arg_round_boundary
6713 #undef TARGET_FUNCTION_VALUE_REGNO_P
6714 #define TARGET_FUNCTION_VALUE_REGNO_P c6x_function_value_regno_p
6715 #undef TARGET_FUNCTION_VALUE
6716 #define TARGET_FUNCTION_VALUE c6x_function_value
6717 #undef TARGET_LIBCALL_VALUE
6718 #define TARGET_LIBCALL_VALUE c6x_libcall_value
6719 #undef TARGET_RETURN_IN_MEMORY
6720 #define TARGET_RETURN_IN_MEMORY c6x_return_in_memory
6721 #undef TARGET_RETURN_IN_MSB
6722 #define TARGET_RETURN_IN_MSB c6x_return_in_msb
6723 #undef TARGET_PASS_BY_REFERENCE
6724 #define TARGET_PASS_BY_REFERENCE c6x_pass_by_reference
6725 #undef TARGET_CALLEE_COPIES
6726 #define TARGET_CALLEE_COPIES c6x_callee_copies
6727 #undef TARGET_STRUCT_VALUE_RTX
6728 #define TARGET_STRUCT_VALUE_RTX c6x_struct_value_rtx
6729 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
6730 #define TARGET_FUNCTION_OK_FOR_SIBCALL c6x_function_ok_for_sibcall
6732 #undef TARGET_ASM_OUTPUT_MI_THUNK
6733 #define TARGET_ASM_OUTPUT_MI_THUNK c6x_output_mi_thunk
6734 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
6735 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK c6x_can_output_mi_thunk
6737 #undef TARGET_BUILD_BUILTIN_VA_LIST
6738 #define TARGET_BUILD_BUILTIN_VA_LIST c6x_build_builtin_va_list
6740 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
6741 #define TARGET_ASM_TRAMPOLINE_TEMPLATE c6x_asm_trampoline_template
6742 #undef TARGET_TRAMPOLINE_INIT
6743 #define TARGET_TRAMPOLINE_INIT c6x_initialize_trampoline
6745 #undef TARGET_LEGITIMATE_CONSTANT_P
6746 #define TARGET_LEGITIMATE_CONSTANT_P c6x_legitimate_constant_p
6747 #undef TARGET_LEGITIMATE_ADDRESS_P
6748 #define TARGET_LEGITIMATE_ADDRESS_P c6x_legitimate_address_p
6751 #define TARGET_LRA_P hook_bool_void_false
6753 #undef TARGET_IN_SMALL_DATA_P
6754 #define TARGET_IN_SMALL_DATA_P c6x_in_small_data_p
6755 #undef TARGET_ASM_SELECT_RTX_SECTION
6756 #define TARGET_ASM_SELECT_RTX_SECTION c6x_select_rtx_section
6757 #undef TARGET_ASM_SELECT_SECTION
6758 #define TARGET_ASM_SELECT_SECTION c6x_elf_select_section
6759 #undef TARGET_ASM_UNIQUE_SECTION
6760 #define TARGET_ASM_UNIQUE_SECTION c6x_elf_unique_section
6761 #undef TARGET_SECTION_TYPE_FLAGS
6762 #define TARGET_SECTION_TYPE_FLAGS c6x_section_type_flags
6763 #undef TARGET_HAVE_SRODATA_SECTION
6764 #define TARGET_HAVE_SRODATA_SECTION true
6765 #undef TARGET_ASM_MERGEABLE_RODATA_PREFIX
6766 #define TARGET_ASM_MERGEABLE_RODATA_PREFIX ".const"
6768 #undef TARGET_OPTION_OVERRIDE
6769 #define TARGET_OPTION_OVERRIDE c6x_option_override
6770 #undef TARGET_CONDITIONAL_REGISTER_USAGE
6771 #define TARGET_CONDITIONAL_REGISTER_USAGE c6x_conditional_register_usage
6773 #undef TARGET_INIT_LIBFUNCS
6774 #define TARGET_INIT_LIBFUNCS c6x_init_libfuncs
6775 #undef TARGET_LIBFUNC_GNU_PREFIX
6776 #define TARGET_LIBFUNC_GNU_PREFIX true
6778 #undef TARGET_SCALAR_MODE_SUPPORTED_P
6779 #define TARGET_SCALAR_MODE_SUPPORTED_P c6x_scalar_mode_supported_p
6780 #undef TARGET_VECTOR_MODE_SUPPORTED_P
6781 #define TARGET_VECTOR_MODE_SUPPORTED_P c6x_vector_mode_supported_p
6782 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
6783 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE c6x_preferred_simd_mode
6785 #undef TARGET_RTX_COSTS
6786 #define TARGET_RTX_COSTS c6x_rtx_costs
6788 #undef TARGET_SCHED_INIT
6789 #define TARGET_SCHED_INIT c6x_sched_init
6790 #undef TARGET_SCHED_SET_SCHED_FLAGS
6791 #define TARGET_SCHED_SET_SCHED_FLAGS c6x_set_sched_flags
6792 #undef TARGET_SCHED_ADJUST_COST
6793 #define TARGET_SCHED_ADJUST_COST c6x_adjust_cost
6794 #undef TARGET_SCHED_ISSUE_RATE
6795 #define TARGET_SCHED_ISSUE_RATE c6x_issue_rate
6796 #undef TARGET_SCHED_VARIABLE_ISSUE
6797 #define TARGET_SCHED_VARIABLE_ISSUE c6x_variable_issue
6798 #undef TARGET_SCHED_REORDER
6799 #define TARGET_SCHED_REORDER c6x_sched_reorder
6800 #undef TARGET_SCHED_REORDER2
6801 #define TARGET_SCHED_REORDER2 c6x_sched_reorder2
6802 #undef TARGET_SCHED_DFA_NEW_CYCLE
6803 #define TARGET_SCHED_DFA_NEW_CYCLE c6x_dfa_new_cycle
6804 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
6805 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN c6x_sched_dfa_pre_cycle_insn
6806 #undef TARGET_SCHED_EXPOSED_PIPELINE
6807 #define TARGET_SCHED_EXPOSED_PIPELINE true
6809 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
6810 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT c6x_alloc_sched_context
6811 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
6812 #define TARGET_SCHED_INIT_SCHED_CONTEXT c6x_init_sched_context
6813 #undef TARGET_SCHED_SET_SCHED_CONTEXT
6814 #define TARGET_SCHED_SET_SCHED_CONTEXT c6x_set_sched_context
6815 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
6816 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT c6x_clear_sched_context
6817 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
6818 #define TARGET_SCHED_FREE_SCHED_CONTEXT c6x_free_sched_context
6820 #undef TARGET_CAN_ELIMINATE
6821 #define TARGET_CAN_ELIMINATE c6x_can_eliminate
6823 #undef TARGET_PREFERRED_RENAME_CLASS
6824 #define TARGET_PREFERRED_RENAME_CLASS c6x_preferred_rename_class
6826 #undef TARGET_MACHINE_DEPENDENT_REORG
6827 #define TARGET_MACHINE_DEPENDENT_REORG c6x_reorg
6829 #undef TARGET_ASM_FILE_START
6830 #define TARGET_ASM_FILE_START c6x_file_start
6832 #undef TARGET_PRINT_OPERAND
6833 #define TARGET_PRINT_OPERAND c6x_print_operand
6834 #undef TARGET_PRINT_OPERAND_ADDRESS
6835 #define TARGET_PRINT_OPERAND_ADDRESS c6x_print_operand_address
6836 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
6837 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P c6x_print_operand_punct_valid_p
6839 /* C6x unwinding tables use a different format for the typeinfo tables. */
6840 #undef TARGET_ASM_TTYPE
6841 #define TARGET_ASM_TTYPE c6x_output_ttype
6843 /* The C6x ABI follows the ARM EABI exception handling rules. */
6844 #undef TARGET_ARM_EABI_UNWINDER
6845 #define TARGET_ARM_EABI_UNWINDER true
6847 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
6848 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY c6x_asm_emit_except_personality
6850 #undef TARGET_ASM_INIT_SECTIONS
6851 #define TARGET_ASM_INIT_SECTIONS c6x_asm_init_sections
6853 #undef TARGET_DEBUG_UNWIND_INFO
6854 #define TARGET_DEBUG_UNWIND_INFO c6x_debug_unwind_info
6856 #undef TARGET_DWARF_REGISTER_SPAN
6857 #define TARGET_DWARF_REGISTER_SPAN c6x_dwarf_register_span
6859 #undef TARGET_INIT_BUILTINS
6860 #define TARGET_INIT_BUILTINS c6x_init_builtins
6861 #undef TARGET_EXPAND_BUILTIN
6862 #define TARGET_EXPAND_BUILTIN c6x_expand_builtin
6863 #undef TARGET_BUILTIN_DECL
6864 #define TARGET_BUILTIN_DECL c6x_builtin_decl
6866 struct gcc_target targetm
= TARGET_INITIALIZER
;