Skip analyzer strndup test on hppa*-*-hpux*
[official-gcc.git] / gcc / config / mcore / mcore.cc
blob6f1d7af79371f6a6c84e7f5f522766dadc03c647
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
8 by the Free Software Foundation; either version 3, or (at your
9 option) any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "attribs.h"
34 #include "emit-rtl.h"
35 #include "diagnostic-core.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "calls.h"
39 #include "mcore.h"
40 #include "output.h"
41 #include "explow.h"
42 #include "expr.h"
43 #include "cfgrtl.h"
44 #include "builtins.h"
45 #include "regs.h"
47 /* This file should be included last. */
48 #include "target-def.h"
50 /* For dumping information about frame sizes. */
51 char * mcore_current_function_name = 0;
52 long mcore_current_compilation_timestamp = 0;
54 /* Global variables for machine-dependent things. */
56 /* Provides the class number of the smallest class containing
57 reg number. */
58 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
60 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
61 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
62 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
63 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
64 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
67 struct mcore_frame
69 int arg_size; /* Stdarg spills (bytes). */
70 int reg_size; /* Non-volatile reg saves (bytes). */
71 int reg_mask; /* Non-volatile reg saves. */
72 int local_size; /* Locals. */
73 int outbound_size; /* Arg overflow on calls out. */
74 int pad_outbound;
75 int pad_local;
76 int pad_reg;
77 /* Describe the steps we'll use to grow it. */
78 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
79 int growth[MAX_STACK_GROWS];
80 int arg_offset;
81 int reg_offset;
82 int reg_growth;
83 int local_growth;
86 typedef enum
88 COND_NO,
89 COND_MOV_INSN,
90 COND_CLR_INSN,
91 COND_INC_INSN,
92 COND_DEC_INSN,
93 COND_BRANCH_INSN
95 cond_type;
97 static void output_stack_adjust (int, int);
98 static int calc_live_regs (int *);
99 static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
100 static const char * output_inline_const (machine_mode, rtx *);
101 static void layout_mcore_frame (struct mcore_frame *);
102 static void mcore_setup_incoming_varargs (cumulative_args_t,
103 const function_arg_info &,
104 int *, int);
105 static cond_type is_cond_candidate (rtx);
106 static rtx_insn *emit_new_cond_insn (rtx_insn *, int);
107 static rtx_insn *conditionalize_block (rtx_insn *);
108 static void conditionalize_optimization (void);
109 static void mcore_reorg (void);
110 static rtx handle_structs_in_regs (machine_mode, const_tree, int);
111 static void mcore_mark_dllexport (tree);
112 static void mcore_mark_dllimport (tree);
113 static int mcore_dllexport_p (tree);
114 static int mcore_dllimport_p (tree);
115 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
116 #ifdef OBJECT_FORMAT_ELF
117 static void mcore_asm_named_section (const char *,
118 unsigned int, tree);
119 #endif
120 static void mcore_print_operand (FILE *, rtx, int);
121 static void mcore_print_operand_address (FILE *, machine_mode, rtx);
122 static bool mcore_print_operand_punct_valid_p (unsigned char code);
123 static void mcore_unique_section (tree, int);
124 static void mcore_encode_section_info (tree, rtx, int);
125 static const char *mcore_strip_name_encoding (const char *);
126 static int mcore_const_costs (rtx, RTX_CODE);
127 static int mcore_and_cost (rtx);
128 static int mcore_ior_cost (rtx);
129 static bool mcore_rtx_costs (rtx, machine_mode, int, int,
130 int *, bool);
131 static void mcore_external_libcall (rtx);
132 static bool mcore_return_in_memory (const_tree, const_tree);
133 static int mcore_arg_partial_bytes (cumulative_args_t,
134 const function_arg_info &);
135 static rtx mcore_function_arg (cumulative_args_t,
136 const function_arg_info &);
137 static void mcore_function_arg_advance (cumulative_args_t,
138 const function_arg_info &);
139 static unsigned int mcore_function_arg_boundary (machine_mode,
140 const_tree);
141 static void mcore_asm_trampoline_template (FILE *);
142 static void mcore_trampoline_init (rtx, tree, rtx);
143 static bool mcore_warn_func_return (tree);
144 static void mcore_option_override (void);
145 static bool mcore_legitimate_constant_p (machine_mode, rtx);
146 static bool mcore_legitimate_address_p (machine_mode, rtx, bool,
147 addr_space_t,
148 code_helper = ERROR_MARK);
149 static bool mcore_hard_regno_mode_ok (unsigned int, machine_mode);
150 static bool mcore_modes_tieable_p (machine_mode, machine_mode);
152 /* MCore specific attributes. */
154 static const struct attribute_spec mcore_attribute_table[] =
156 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
157 affects_type_identity, handler, exclude } */
158 { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
159 { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
160 { "naked", 0, 0, true, false, false, false,
161 mcore_handle_naked_attribute, NULL },
162 { NULL, 0, 0, false, false, false, false, NULL, NULL }
165 /* Initialize the GCC target structure. */
166 #undef TARGET_ASM_EXTERNAL_LIBCALL
167 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
169 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
170 #undef TARGET_MERGE_DECL_ATTRIBUTES
171 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
172 #endif
174 #ifdef OBJECT_FORMAT_ELF
175 #undef TARGET_ASM_UNALIGNED_HI_OP
176 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
177 #undef TARGET_ASM_UNALIGNED_SI_OP
178 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
179 #endif
181 #undef TARGET_PRINT_OPERAND
182 #define TARGET_PRINT_OPERAND mcore_print_operand
183 #undef TARGET_PRINT_OPERAND_ADDRESS
184 #define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
185 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
186 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
188 #undef TARGET_ATTRIBUTE_TABLE
189 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
190 #undef TARGET_ASM_UNIQUE_SECTION
191 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
192 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
193 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
194 #undef TARGET_ENCODE_SECTION_INFO
195 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
196 #undef TARGET_STRIP_NAME_ENCODING
197 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
198 #undef TARGET_RTX_COSTS
199 #define TARGET_RTX_COSTS mcore_rtx_costs
200 #undef TARGET_ADDRESS_COST
201 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
202 #undef TARGET_MACHINE_DEPENDENT_REORG
203 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
205 #undef TARGET_PROMOTE_FUNCTION_MODE
206 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
207 #undef TARGET_PROMOTE_PROTOTYPES
208 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
210 #undef TARGET_RETURN_IN_MEMORY
211 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
212 #undef TARGET_MUST_PASS_IN_STACK
213 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
214 #undef TARGET_PASS_BY_REFERENCE
215 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
216 #undef TARGET_ARG_PARTIAL_BYTES
217 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
218 #undef TARGET_FUNCTION_ARG
219 #define TARGET_FUNCTION_ARG mcore_function_arg
220 #undef TARGET_FUNCTION_ARG_ADVANCE
221 #define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
222 #undef TARGET_FUNCTION_ARG_BOUNDARY
223 #define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
225 #undef TARGET_SETUP_INCOMING_VARARGS
226 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
228 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
229 #define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
230 #undef TARGET_TRAMPOLINE_INIT
231 #define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
233 #undef TARGET_OPTION_OVERRIDE
234 #define TARGET_OPTION_OVERRIDE mcore_option_override
236 #undef TARGET_LEGITIMATE_CONSTANT_P
237 #define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
238 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
239 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
241 #undef TARGET_LRA_P
242 #define TARGET_LRA_P hook_bool_void_false
244 #undef TARGET_WARN_FUNC_RETURN
245 #define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
247 #undef TARGET_HARD_REGNO_MODE_OK
248 #define TARGET_HARD_REGNO_MODE_OK mcore_hard_regno_mode_ok
250 #undef TARGET_MODES_TIEABLE_P
251 #define TARGET_MODES_TIEABLE_P mcore_modes_tieable_p
253 #undef TARGET_CONSTANT_ALIGNMENT
254 #define TARGET_CONSTANT_ALIGNMENT constant_alignment_word_strings
256 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
257 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
259 struct gcc_target targetm = TARGET_INITIALIZER;
261 /* Adjust the stack and return the number of bytes taken to do it. */
262 static void
263 output_stack_adjust (int direction, int size)
265 /* If extending stack a lot, we do it incrementally. */
266 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
268 rtx tmp = gen_rtx_REG (SImode, 1);
269 rtx memref;
271 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
274 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
275 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
276 MEM_VOLATILE_P (memref) = 1;
277 emit_insn (gen_movsi (memref, stack_pointer_rtx));
278 size -= mcore_stack_increment;
280 while (size > mcore_stack_increment);
282 /* SIZE is now the residual for the last adjustment,
283 which doesn't require a probe. */
286 if (size)
288 rtx insn;
289 rtx val = GEN_INT (size);
291 if (size > 32)
293 rtx nval = gen_rtx_REG (SImode, 1);
294 emit_insn (gen_movsi (nval, val));
295 val = nval;
298 if (direction > 0)
299 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
300 else
301 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
303 emit_insn (insn);
307 /* Work out the registers which need to be saved,
308 both as a mask and a count. */
310 static int
311 calc_live_regs (int * count)
313 int reg;
314 int live_regs_mask = 0;
316 * count = 0;
318 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
320 if (df_regs_ever_live_p (reg) && !call_used_or_fixed_reg_p (reg))
322 (*count)++;
323 live_regs_mask |= (1 << reg);
327 return live_regs_mask;
330 /* Print the operand address in x to the stream. */
332 static void
333 mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
335 switch (GET_CODE (x))
337 case REG:
338 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
339 break;
341 case PLUS:
343 rtx base = XEXP (x, 0);
344 rtx index = XEXP (x, 1);
346 if (GET_CODE (base) != REG)
348 /* Ensure that BASE is a register (one of them must be). */
349 rtx temp = base;
350 base = index;
351 index = temp;
354 switch (GET_CODE (index))
356 case CONST_INT:
357 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
358 reg_names[REGNO(base)], INTVAL (index));
359 break;
361 default:
362 gcc_unreachable ();
366 break;
368 default:
369 output_addr_const (stream, x);
370 break;
374 static bool
375 mcore_print_operand_punct_valid_p (unsigned char code)
377 return (code == '.' || code == '#' || code == '*' || code == '^'
378 || code == '!');
381 /* Print operand x (an rtx) in assembler syntax to file stream
382 according to modifier code.
384 'R' print the next register or memory location along, i.e. the lsw in
385 a double word value
386 'O' print a constant without the #
387 'M' print a constant as its negative
388 'P' print log2 of a power of two
389 'Q' print log2 of an inverse of a power of two
390 'U' print register for ldm/stm instruction
391 'X' print byte number for xtrbN instruction. */
393 static void
394 mcore_print_operand (FILE * stream, rtx x, int code)
396 switch (code)
398 case 'N':
399 if (INTVAL(x) == -1)
400 fprintf (asm_out_file, "32");
401 else
402 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
403 break;
404 case 'P':
405 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
406 break;
407 case 'Q':
408 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
409 break;
410 case 'O':
411 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
412 break;
413 case 'M':
414 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
415 break;
416 case 'R':
417 /* Next location along in memory or register. */
418 switch (GET_CODE (x))
420 case REG:
421 fputs (reg_names[REGNO (x) + 1], (stream));
422 break;
423 case MEM:
424 mcore_print_operand_address
425 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
426 break;
427 default:
428 gcc_unreachable ();
430 break;
431 case 'U':
432 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
433 reg_names[REGNO (x) + 3]);
434 break;
435 case 'x':
436 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
437 break;
438 case 'X':
439 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
440 break;
442 default:
443 switch (GET_CODE (x))
445 case REG:
446 fputs (reg_names[REGNO (x)], (stream));
447 break;
448 case MEM:
449 output_address (GET_MODE (x), XEXP (x, 0));
450 break;
451 default:
452 output_addr_const (stream, x);
453 break;
455 break;
459 /* What does a constant cost ? */
461 static int
462 mcore_const_costs (rtx exp, enum rtx_code code)
464 HOST_WIDE_INT val = INTVAL (exp);
466 /* Easy constants. */
467 if ( CONST_OK_FOR_I (val)
468 || CONST_OK_FOR_M (val)
469 || CONST_OK_FOR_N (val)
470 || (code == PLUS && CONST_OK_FOR_L (val)))
471 return 1;
472 else if (code == AND
473 && ( CONST_OK_FOR_M (~val)
474 || CONST_OK_FOR_N (~val)))
475 return 2;
476 else if (code == PLUS
477 && ( CONST_OK_FOR_I (-val)
478 || CONST_OK_FOR_M (-val)
479 || CONST_OK_FOR_N (-val)))
480 return 2;
482 return 5;
485 /* What does an and instruction cost - we do this b/c immediates may
486 have been relaxed. We want to ensure that cse will cse relaxed immeds
487 out. Otherwise we'll get bad code (multiple reloads of the same const). */
489 static int
490 mcore_and_cost (rtx x)
492 HOST_WIDE_INT val;
494 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
495 return 2;
497 val = INTVAL (XEXP (x, 1));
499 /* Do it directly. */
500 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
501 return 2;
502 /* Takes one instruction to load. */
503 else if (const_ok_for_mcore (val))
504 return 3;
505 /* Takes two instructions to load. */
506 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
507 return 4;
509 /* Takes a lrw to load. */
510 return 5;
513 /* What does an or cost - see and_cost(). */
515 static int
516 mcore_ior_cost (rtx x)
518 HOST_WIDE_INT val;
520 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
521 return 2;
523 val = INTVAL (XEXP (x, 1));
525 /* Do it directly with bclri. */
526 if (CONST_OK_FOR_M (val))
527 return 2;
528 /* Takes one instruction to load. */
529 else if (const_ok_for_mcore (val))
530 return 3;
531 /* Takes two instructions to load. */
532 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
533 return 4;
535 /* Takes a lrw to load. */
536 return 5;
539 static bool
540 mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
541 int opno ATTRIBUTE_UNUSED,
542 int * total, bool speed ATTRIBUTE_UNUSED)
544 int code = GET_CODE (x);
546 switch (code)
548 case CONST_INT:
549 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
550 return true;
551 case CONST:
552 case LABEL_REF:
553 case SYMBOL_REF:
554 *total = 5;
555 return true;
556 case CONST_DOUBLE:
557 *total = 10;
558 return true;
560 case AND:
561 *total = COSTS_N_INSNS (mcore_and_cost (x));
562 return true;
564 case IOR:
565 *total = COSTS_N_INSNS (mcore_ior_cost (x));
566 return true;
568 case DIV:
569 case UDIV:
570 case MOD:
571 case UMOD:
572 case FLOAT:
573 case FIX:
574 *total = COSTS_N_INSNS (100);
575 return true;
577 default:
578 return false;
582 /* Prepare the operands for a comparison. Return whether the branch/setcc
583 should reverse the operands. */
585 bool
586 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
588 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
589 bool invert;
591 if (GET_CODE (op1) == CONST_INT)
593 HOST_WIDE_INT val = INTVAL (op1);
595 switch (code)
597 case GTU:
598 /* Unsigned > 0 is the same as != 0; everything else is converted
599 below to LEU (reversed cmphs). */
600 if (val == 0)
601 code = NE;
602 break;
604 /* Check whether (LE A imm) can become (LT A imm + 1),
605 or (GT A imm) can become (GE A imm + 1). */
606 case GT:
607 case LE:
608 if (CONST_OK_FOR_J (val + 1))
610 op1 = GEN_INT (val + 1);
611 code = code == LE ? LT : GE;
613 break;
615 default:
616 break;
620 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
621 op1 = force_reg (SImode, op1);
623 /* cmpnei: 0-31 (K immediate)
624 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
625 invert = false;
626 switch (code)
628 case EQ: /* Use inverted condition, cmpne. */
629 code = NE;
630 invert = true;
631 /* FALLTHRU */
633 case NE: /* Use normal condition, cmpne. */
634 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
635 op1 = force_reg (SImode, op1);
636 break;
638 case LE: /* Use inverted condition, reversed cmplt. */
639 code = GT;
640 invert = true;
641 /* FALLTHRU */
643 case GT: /* Use normal condition, reversed cmplt. */
644 if (GET_CODE (op1) == CONST_INT)
645 op1 = force_reg (SImode, op1);
646 break;
648 case GE: /* Use inverted condition, cmplt. */
649 code = LT;
650 invert = true;
651 /* FALLTHRU */
653 case LT: /* Use normal condition, cmplt. */
654 if (GET_CODE (op1) == CONST_INT &&
655 /* covered by btsti x,31. */
656 INTVAL (op1) != 0 &&
657 ! CONST_OK_FOR_J (INTVAL (op1)))
658 op1 = force_reg (SImode, op1);
659 break;
661 case GTU: /* Use inverted condition, cmple. */
662 /* We coped with unsigned > 0 above. */
663 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
664 code = LEU;
665 invert = true;
666 /* FALLTHRU */
668 case LEU: /* Use normal condition, reversed cmphs. */
669 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
670 op1 = force_reg (SImode, op1);
671 break;
673 case LTU: /* Use inverted condition, cmphs. */
674 code = GEU;
675 invert = true;
676 /* FALLTHRU */
678 case GEU: /* Use normal condition, cmphs. */
679 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
680 op1 = force_reg (SImode, op1);
681 break;
683 default:
684 break;
687 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
688 return invert;
692 mcore_symbolic_address_p (rtx x)
694 switch (GET_CODE (x))
696 case SYMBOL_REF:
697 case LABEL_REF:
698 return 1;
699 case CONST:
700 x = XEXP (x, 0);
701 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
702 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
703 && GET_CODE (XEXP (x, 1)) == CONST_INT);
704 default:
705 return 0;
709 /* Functions to output assembly code for a function call. */
711 char *
712 mcore_output_call (rtx operands[], int index)
714 static char buffer[20];
715 rtx addr = operands [index];
717 if (REG_P (addr))
719 if (TARGET_CG_DATA)
721 gcc_assert (mcore_current_function_name);
723 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
724 "unknown", 1);
727 sprintf (buffer, "jsr\t%%%d", index);
729 else
731 if (TARGET_CG_DATA)
733 gcc_assert (mcore_current_function_name);
734 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
736 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
737 XSTR (addr, 0), 0);
740 sprintf (buffer, "jbsr\t%%%d", index);
743 return buffer;
746 /* Can we load a constant with a single instruction ? */
749 const_ok_for_mcore (HOST_WIDE_INT value)
751 if (value >= 0 && value <= 127)
752 return 1;
754 /* Try exact power of two. */
755 if (CONST_OK_FOR_M (value))
756 return 1;
758 /* Try exact power of two - 1. */
759 if (CONST_OK_FOR_N (value) && value != -1)
760 return 1;
762 return 0;
765 /* Can we load a constant inline with up to 2 instructions ? */
768 mcore_const_ok_for_inline (HOST_WIDE_INT value)
770 HOST_WIDE_INT x, y;
772 return try_constant_tricks (value, & x, & y) > 0;
775 /* Are we loading the constant using a not ? */
778 mcore_const_trick_uses_not (HOST_WIDE_INT value)
780 HOST_WIDE_INT x, y;
782 return try_constant_tricks (value, & x, & y) == 2;
785 /* Try tricks to load a constant inline and return the trick number if
786 success (0 is non-inlinable).
788 0: not inlinable
789 1: single instruction (do the usual thing)
790 2: single insn followed by a 'not'
791 3: single insn followed by a subi
792 4: single insn followed by an addi
793 5: single insn followed by rsubi
794 6: single insn followed by bseti
795 7: single insn followed by bclri
796 8: single insn followed by rotli
797 9: single insn followed by lsli
798 10: single insn followed by ixh
799 11: single insn followed by ixw. */
801 static int
802 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
804 HOST_WIDE_INT i;
805 unsigned HOST_WIDE_INT bit, shf, rot;
807 if (const_ok_for_mcore (value))
808 return 1; /* Do the usual thing. */
810 if (! TARGET_HARDLIT)
811 return 0;
813 if (const_ok_for_mcore (~value))
815 *x = ~value;
816 return 2;
819 for (i = 1; i <= 32; i++)
821 if (const_ok_for_mcore (value - i))
823 *x = value - i;
824 *y = i;
826 return 3;
829 if (const_ok_for_mcore (value + i))
831 *x = value + i;
832 *y = i;
834 return 4;
838 bit = 0x80000000ULL;
840 for (i = 0; i <= 31; i++)
842 if (const_ok_for_mcore (i - value))
844 *x = i - value;
845 *y = i;
847 return 5;
850 if (const_ok_for_mcore (value & ~bit))
852 *y = bit;
853 *x = value & ~bit;
854 return 6;
857 if (const_ok_for_mcore (value | bit))
859 *y = ~bit;
860 *x = value | bit;
862 return 7;
865 bit >>= 1;
868 shf = value;
869 rot = value;
871 for (i = 1; i < 31; i++)
873 int c;
875 /* MCore has rotate left. */
876 c = rot << 31;
877 rot >>= 1;
878 rot &= 0x7FFFFFFF;
879 rot |= c; /* Simulate rotate. */
881 if (const_ok_for_mcore (rot))
883 *y = i;
884 *x = rot;
886 return 8;
889 if (shf & 1)
890 shf = 0; /* Can't use logical shift, low order bit is one. */
892 shf >>= 1;
894 if (shf != 0 && const_ok_for_mcore (shf))
896 *y = i;
897 *x = shf;
899 return 9;
903 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
905 *x = value / 3;
907 return 10;
910 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
912 *x = value / 5;
914 return 11;
917 return 0;
920 /* Check whether reg is dead at first. This is done by searching ahead
921 for either the next use (i.e., reg is live), a death note, or a set of
922 reg. Don't just use dead_or_set_p() since reload does not always mark
923 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
924 can ignore subregs by extracting the actual register. BRC */
927 mcore_is_dead (rtx_insn *first, rtx reg)
929 rtx_insn *insn;
931 /* For mcore, subregs can't live independently of their parent regs. */
932 if (GET_CODE (reg) == SUBREG)
933 reg = SUBREG_REG (reg);
935 /* Dies immediately. */
936 if (dead_or_set_p (first, reg))
937 return 1;
939 /* Look for conclusive evidence of live/death, otherwise we have
940 to assume that it is live. */
941 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
943 if (JUMP_P (insn))
944 return 0; /* We lose track, assume it is alive. */
946 else if (CALL_P (insn))
948 /* Call's might use it for target or register parms. */
949 if (reg_referenced_p (reg, PATTERN (insn))
950 || find_reg_fusage (insn, USE, reg))
951 return 0;
952 else if (dead_or_set_p (insn, reg))
953 return 1;
955 else if (NONJUMP_INSN_P (insn))
957 if (reg_referenced_p (reg, PATTERN (insn)))
958 return 0;
959 else if (dead_or_set_p (insn, reg))
960 return 1;
964 /* No conclusive evidence either way, we cannot take the chance
965 that control flow hid the use from us -- "I'm not dead yet". */
966 return 0;
969 /* Count the number of ones in mask. */
972 mcore_num_ones (HOST_WIDE_INT mask)
974 /* A trick to count set bits recently posted on comp.compilers. */
975 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
976 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
977 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
978 mask = ((mask >> 8) + mask);
980 return (mask + (mask >> 16)) & 0xff;
983 /* Count the number of zeros in mask. */
986 mcore_num_zeros (HOST_WIDE_INT mask)
988 return 32 - mcore_num_ones (mask);
991 /* Determine byte being masked. */
994 mcore_byte_offset (unsigned int mask)
996 if (mask == 0x00ffffffL)
997 return 0;
998 else if (mask == 0xff00ffffL)
999 return 1;
1000 else if (mask == 0xffff00ffL)
1001 return 2;
1002 else if (mask == 0xffffff00L)
1003 return 3;
1005 return -1;
1008 /* Determine halfword being masked. */
1011 mcore_halfword_offset (unsigned int mask)
1013 if (mask == 0x0000ffffL)
1014 return 0;
1015 else if (mask == 0xffff0000L)
1016 return 1;
1018 return -1;
1021 /* Output a series of bseti's corresponding to mask. */
1023 const char *
1024 mcore_output_bseti (rtx dst, int mask)
1026 rtx out_operands[2];
1027 int bit;
1029 out_operands[0] = dst;
1031 for (bit = 0; bit < 32; bit++)
1033 if ((mask & 0x1) == 0x1)
1035 out_operands[1] = GEN_INT (bit);
1037 output_asm_insn ("bseti\t%0,%1", out_operands);
1039 mask >>= 1;
1042 return "";
1045 /* Output a series of bclri's corresponding to mask. */
1047 const char *
1048 mcore_output_bclri (rtx dst, int mask)
1050 rtx out_operands[2];
1051 int bit;
1053 out_operands[0] = dst;
1055 for (bit = 0; bit < 32; bit++)
1057 if ((mask & 0x1) == 0x0)
1059 out_operands[1] = GEN_INT (bit);
1061 output_asm_insn ("bclri\t%0,%1", out_operands);
1064 mask >>= 1;
1067 return "";
1070 /* Output a conditional move of two constants that are +/- 1 within each
1071 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1072 really worth the effort. */
1074 const char *
1075 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1077 HOST_WIDE_INT load_value;
1078 HOST_WIDE_INT adjust_value;
1079 rtx out_operands[4];
1081 out_operands[0] = operands[0];
1083 /* Check to see which constant is loadable. */
1084 if (const_ok_for_mcore (INTVAL (operands[1])))
1086 out_operands[1] = operands[1];
1087 out_operands[2] = operands[2];
1089 else if (const_ok_for_mcore (INTVAL (operands[2])))
1091 out_operands[1] = operands[2];
1092 out_operands[2] = operands[1];
1094 /* Complement test since constants are swapped. */
1095 cmp_t = (cmp_t == 0);
1097 load_value = INTVAL (out_operands[1]);
1098 adjust_value = INTVAL (out_operands[2]);
1100 /* First output the test if folded into the pattern. */
1102 if (test)
1103 output_asm_insn (test, operands);
1105 /* Load the constant - for now, only support constants that can be
1106 generated with a single instruction. maybe add general inlinable
1107 constants later (this will increase the # of patterns since the
1108 instruction sequence has a different length attribute). */
1109 if (load_value >= 0 && load_value <= 127)
1110 output_asm_insn ("movi\t%0,%1", out_operands);
1111 else if (CONST_OK_FOR_M (load_value))
1112 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1113 else if (CONST_OK_FOR_N (load_value))
1114 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1116 /* Output the constant adjustment. */
1117 if (load_value > adjust_value)
1119 if (cmp_t)
1120 output_asm_insn ("decf\t%0", out_operands);
1121 else
1122 output_asm_insn ("dect\t%0", out_operands);
1124 else
1126 if (cmp_t)
1127 output_asm_insn ("incf\t%0", out_operands);
1128 else
1129 output_asm_insn ("inct\t%0", out_operands);
1132 return "";
1135 /* Outputs the peephole for moving a constant that gets not'ed followed
1136 by an and (i.e. combine the not and the and into andn). BRC */
1138 const char *
1139 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1141 HOST_WIDE_INT x, y;
1142 rtx out_operands[3];
1143 const char * load_op;
1144 char buf[256];
1145 int trick_no;
1147 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1148 gcc_assert (trick_no == 2);
1150 out_operands[0] = operands[0];
1151 out_operands[1] = GEN_INT (x);
1152 out_operands[2] = operands[2];
1154 if (x >= 0 && x <= 127)
1155 load_op = "movi\t%0,%1";
1157 /* Try exact power of two. */
1158 else if (CONST_OK_FOR_M (x))
1159 load_op = "bgeni\t%0,%P1";
1161 /* Try exact power of two - 1. */
1162 else if (CONST_OK_FOR_N (x))
1163 load_op = "bmaski\t%0,%N1";
1165 else
1167 load_op = "BADMOVI-andn\t%0, %1";
1168 gcc_unreachable ();
1171 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1172 output_asm_insn (buf, out_operands);
1174 return "";
1177 /* Output an inline constant. */
1179 static const char *
1180 output_inline_const (machine_mode mode, rtx operands[])
1182 HOST_WIDE_INT x = 0, y = 0;
1183 int trick_no;
1184 rtx out_operands[3];
1185 char buf[256];
1186 char load_op[128];
1187 const char *dst_fmt;
1188 HOST_WIDE_INT value;
1190 value = INTVAL (operands[1]);
1192 trick_no = try_constant_tricks (value, &x, &y);
1193 /* lrw's are handled separately: Large inlinable constants never get
1194 turned into lrw's. Our caller uses try_constant_tricks to back
1195 off to an lrw rather than calling this routine. */
1196 gcc_assert (trick_no != 0);
1198 if (trick_no == 1)
1199 x = value;
1201 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1202 out_operands[0] = operands[0];
1203 out_operands[1] = GEN_INT (x);
1205 if (trick_no > 2)
1206 out_operands[2] = GEN_INT (y);
1208 /* Select dst format based on mode. */
1209 if (mode == DImode && (! TARGET_LITTLE_END))
1210 dst_fmt = "%R0";
1211 else
1212 dst_fmt = "%0";
1214 if (x >= 0 && x <= 127)
1215 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1217 /* Try exact power of two. */
1218 else if (CONST_OK_FOR_M (x))
1219 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1221 /* Try exact power of two - 1. */
1222 else if (CONST_OK_FOR_N (x))
1223 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1225 else
1227 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1228 gcc_unreachable ();
1231 switch (trick_no)
1233 case 1:
1234 strcpy (buf, load_op);
1235 break;
1236 case 2: /* not */
1237 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1238 break;
1239 case 3: /* add */
1240 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1241 break;
1242 case 4: /* sub */
1243 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1244 break;
1245 case 5: /* rsub */
1246 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1247 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1248 break;
1249 case 6: /* bseti */
1250 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1251 break;
1252 case 7: /* bclr */
1253 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1254 break;
1255 case 8: /* rotl */
1256 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1257 break;
1258 case 9: /* lsl */
1259 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1260 break;
1261 case 10: /* ixh */
1262 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1263 break;
1264 case 11: /* ixw */
1265 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1266 break;
1267 default:
1268 return "";
1271 output_asm_insn (buf, out_operands);
1273 return "";
1276 /* Output a move of a word or less value. */
1278 const char *
1279 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1280 machine_mode mode ATTRIBUTE_UNUSED)
1282 rtx dst = operands[0];
1283 rtx src = operands[1];
1285 if (GET_CODE (dst) == REG)
1287 if (GET_CODE (src) == REG)
1289 if (REGNO (src) == CC_REG) /* r-c */
1290 return "mvc\t%0";
1291 else
1292 return "mov\t%0,%1"; /* r-r*/
1294 else if (GET_CODE (src) == MEM)
1296 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1297 return "lrw\t%0,[%1]"; /* a-R */
1298 else
1299 switch (GET_MODE (src)) /* r-m */
1301 case E_SImode:
1302 return "ldw\t%0,%1";
1303 case E_HImode:
1304 return "ld.h\t%0,%1";
1305 case E_QImode:
1306 return "ld.b\t%0,%1";
1307 default:
1308 gcc_unreachable ();
1311 else if (GET_CODE (src) == CONST_INT)
1313 HOST_WIDE_INT x, y;
1315 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1316 return "movi\t%0,%1";
1317 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1318 return "bgeni\t%0,%P1\t// %1 %x1";
1319 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1320 return "bmaski\t%0,%N1\t// %1 %x1";
1321 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1322 return output_inline_const (SImode, operands); /* 1-2 insns */
1323 else
1324 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1326 else
1327 return "lrw\t%0, %1"; /* Into the literal pool. */
1329 else if (GET_CODE (dst) == MEM) /* m-r */
1330 switch (GET_MODE (dst))
1332 case E_SImode:
1333 return "stw\t%1,%0";
1334 case E_HImode:
1335 return "st.h\t%1,%0";
1336 case E_QImode:
1337 return "st.b\t%1,%0";
1338 default:
1339 gcc_unreachable ();
1342 gcc_unreachable ();
1345 /* Return a sequence of instructions to perform DI or DF move.
1346 Since the MCORE cannot move a DI or DF in one instruction, we have
1347 to take care when we see overlapping source and dest registers. */
1349 const char *
1350 mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
1352 rtx dst = operands[0];
1353 rtx src = operands[1];
1355 if (GET_CODE (dst) == REG)
1357 if (GET_CODE (src) == REG)
1359 int dstreg = REGNO (dst);
1360 int srcreg = REGNO (src);
1362 /* Ensure the second source not overwritten. */
1363 if (srcreg + 1 == dstreg)
1364 return "mov %R0,%R1\n\tmov %0,%1";
1365 else
1366 return "mov %0,%1\n\tmov %R0,%R1";
1368 else if (GET_CODE (src) == MEM)
1370 rtx memexp = XEXP (src, 0);
1371 int dstreg = REGNO (dst);
1372 int basereg = -1;
1374 if (GET_CODE (memexp) == LABEL_REF)
1375 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1376 else if (GET_CODE (memexp) == REG)
1377 basereg = REGNO (memexp);
1378 else if (GET_CODE (memexp) == PLUS)
1380 if (GET_CODE (XEXP (memexp, 0)) == REG)
1381 basereg = REGNO (XEXP (memexp, 0));
1382 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1383 basereg = REGNO (XEXP (memexp, 1));
1384 else
1385 gcc_unreachable ();
1387 else
1388 gcc_unreachable ();
1390 /* ??? length attribute is wrong here. */
1391 if (dstreg == basereg)
1393 /* Just load them in reverse order. */
1394 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1396 /* XXX: alternative: move basereg to basereg+1
1397 and then fall through. */
1399 else
1400 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1402 else if (GET_CODE (src) == CONST_INT)
1404 if (TARGET_LITTLE_END)
1406 if (CONST_OK_FOR_I (INTVAL (src)))
1407 output_asm_insn ("movi %0,%1", operands);
1408 else if (CONST_OK_FOR_M (INTVAL (src)))
1409 output_asm_insn ("bgeni %0,%P1", operands);
1410 else if (CONST_OK_FOR_N (INTVAL (src)))
1411 output_asm_insn ("bmaski %0,%N1", operands);
1412 else
1413 gcc_unreachable ();
1415 if (INTVAL (src) < 0)
1416 return "bmaski %R0,32";
1417 else
1418 return "movi %R0,0";
1420 else
1422 if (CONST_OK_FOR_I (INTVAL (src)))
1423 output_asm_insn ("movi %R0,%1", operands);
1424 else if (CONST_OK_FOR_M (INTVAL (src)))
1425 output_asm_insn ("bgeni %R0,%P1", operands);
1426 else if (CONST_OK_FOR_N (INTVAL (src)))
1427 output_asm_insn ("bmaski %R0,%N1", operands);
1428 else
1429 gcc_unreachable ();
1431 if (INTVAL (src) < 0)
1432 return "bmaski %0,32";
1433 else
1434 return "movi %0,0";
1437 else
1438 gcc_unreachable ();
1440 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1441 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1442 else
1443 gcc_unreachable ();
1446 /* Predicates used by the templates. */
1449 mcore_arith_S_operand (rtx op)
1451 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1452 return 1;
1454 return 0;
1457 /* Expand insert bit field. BRC */
1460 mcore_expand_insv (rtx operands[])
1462 int width = INTVAL (operands[1]);
1463 int posn = INTVAL (operands[2]);
1464 int mask;
1465 rtx mreg, sreg, ereg;
1467 /* To get width 1 insv, the test in store_bit_field() (expmed.cc, line 191)
1468 for width==1 must be removed. Look around line 368. This is something
1469 we really want the md part to do. */
1470 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1472 /* Do directly with bseti or bclri. */
1473 /* RBE: 2/97 consider only low bit of constant. */
1474 if ((INTVAL (operands[3]) & 1) == 0)
1476 mask = ~(1 << posn);
1477 emit_insn (gen_rtx_SET (operands[0],
1478 gen_rtx_AND (SImode, operands[0],
1479 GEN_INT (mask))));
1481 else
1483 mask = 1 << posn;
1484 emit_insn (gen_rtx_SET (operands[0],
1485 gen_rtx_IOR (SImode, operands[0],
1486 GEN_INT (mask))));
1489 return 1;
1492 /* Look at some bit-field placements that we aren't interested
1493 in handling ourselves, unless specifically directed to do so. */
1494 if (! TARGET_W_FIELD)
1495 return 0; /* Generally, give up about now. */
1497 if (width == 8 && posn % 8 == 0)
1498 /* Byte sized and aligned; let caller break it up. */
1499 return 0;
1501 if (width == 16 && posn % 16 == 0)
1502 /* Short sized and aligned; let caller break it up. */
1503 return 0;
1505 /* The general case - we can do this a little bit better than what the
1506 machine independent part tries. This will get rid of all the subregs
1507 that mess up constant folding in combine when working with relaxed
1508 immediates. */
1510 /* If setting the entire field, do it directly. */
1511 if (GET_CODE (operands[3]) == CONST_INT
1512 && INTVAL (operands[3]) == ((1 << width) - 1))
1514 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1515 emit_insn (gen_rtx_SET (operands[0],
1516 gen_rtx_IOR (SImode, operands[0], mreg)));
1517 return 1;
1520 /* Generate the clear mask. */
1521 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1523 /* Clear the field, to overlay it later with the source. */
1524 emit_insn (gen_rtx_SET (operands[0],
1525 gen_rtx_AND (SImode, operands[0], mreg)));
1527 /* If the source is constant 0, we've nothing to add back. */
1528 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1529 return 1;
1531 /* XXX: Should we worry about more games with constant values?
1532 We've covered the high profile: set/clear single-bit and many-bit
1533 fields. How often do we see "arbitrary bit pattern" constants? */
1534 sreg = copy_to_mode_reg (SImode, operands[3]);
1536 /* Extract src as same width as dst (needed for signed values). We
1537 always have to do this since we widen everything to SImode.
1538 We don't have to mask if we're shifting this up against the
1539 MSB of the register (e.g., the shift will push out any hi-order
1540 bits. */
1541 if (width + posn != (int) GET_MODE_SIZE (SImode))
1543 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1544 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
1547 /* Insert source value in dest. */
1548 if (posn != 0)
1549 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1550 GEN_INT (posn))));
1552 emit_insn (gen_rtx_SET (operands[0],
1553 gen_rtx_IOR (SImode, operands[0], sreg)));
1555 return 1;
1558 /* ??? Block move stuff stolen from m88k. This code has not been
1559 verified for correctness. */
1561 /* Emit code to perform a block move. Choose the best method.
1563 OPERANDS[0] is the destination.
1564 OPERANDS[1] is the source.
1565 OPERANDS[2] is the size.
1566 OPERANDS[3] is the alignment safe to use. */
1568 /* Emit code to perform a block move with an offset sequence of ldw/st
1569 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1570 known constants. DEST and SRC are registers. OFFSET is the known
1571 starting point for the output pattern. */
1573 static const machine_mode mode_from_align[] =
1575 VOIDmode, QImode, HImode, VOIDmode, SImode,
1578 static void
1579 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1581 rtx temp[2];
1582 machine_mode mode[2];
1583 int amount[2];
1584 bool active[2];
1585 int phase = 0;
1586 int next;
1587 int offset_ld = 0;
1588 int offset_st = 0;
1589 rtx x;
1591 x = XEXP (dst_mem, 0);
1592 if (!REG_P (x))
1594 x = force_reg (Pmode, x);
1595 dst_mem = replace_equiv_address (dst_mem, x);
1598 x = XEXP (src_mem, 0);
1599 if (!REG_P (x))
1601 x = force_reg (Pmode, x);
1602 src_mem = replace_equiv_address (src_mem, x);
1605 active[0] = active[1] = false;
1609 next = phase;
1610 phase ^= 1;
1612 if (size > 0)
1614 int next_amount;
1616 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1617 next_amount = MIN (next_amount, align);
1619 amount[next] = next_amount;
1620 mode[next] = mode_from_align[next_amount];
1621 temp[next] = gen_reg_rtx (mode[next]);
1623 x = adjust_address (src_mem, mode[next], offset_ld);
1624 emit_insn (gen_rtx_SET (temp[next], x));
1626 offset_ld += next_amount;
1627 size -= next_amount;
1628 active[next] = true;
1631 if (active[phase])
1633 active[phase] = false;
1635 x = adjust_address (dst_mem, mode[phase], offset_st);
1636 emit_insn (gen_rtx_SET (x, temp[phase]));
1638 offset_st += amount[phase];
1641 while (active[next]);
1644 bool
1645 mcore_expand_block_move (rtx *operands)
1647 HOST_WIDE_INT align, bytes, max;
1649 if (GET_CODE (operands[2]) != CONST_INT)
1650 return false;
1652 bytes = INTVAL (operands[2]);
1653 align = INTVAL (operands[3]);
1655 if (bytes <= 0)
1656 return false;
1657 if (align > 4)
1658 align = 4;
1660 switch (align)
1662 case 4:
1663 if (bytes & 1)
1664 max = 4*4;
1665 else if (bytes & 3)
1666 max = 8*4;
1667 else
1668 max = 16*4;
1669 break;
1670 case 2:
1671 max = 4*2;
1672 break;
1673 case 1:
1674 max = 4*1;
1675 break;
1676 default:
1677 gcc_unreachable ();
1680 if (bytes <= max)
1682 block_move_sequence (operands[0], operands[1], bytes, align);
1683 return true;
1686 return false;
1690 /* Code to generate prologue and epilogue sequences. */
1691 static int number_of_regs_before_varargs;
1693 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1694 for a varargs function. */
1695 static int current_function_anonymous_args;
1697 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1698 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1699 #define ADDI_REACH (32) /* Maximum addi operand. */
1701 static void
1702 layout_mcore_frame (struct mcore_frame * infp)
1704 int n;
1705 unsigned int i;
1706 int nbytes;
1707 int regarg;
1708 int localregarg;
1709 int outbounds;
1710 unsigned int growths;
1711 int step;
1713 /* Might have to spill bytes to re-assemble a big argument that
1714 was passed partially in registers and partially on the stack. */
1715 nbytes = crtl->args.pretend_args_size;
1717 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1718 if (current_function_anonymous_args)
1719 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1721 infp->arg_size = nbytes;
1723 /* How much space to save non-volatile registers we stomp. */
1724 infp->reg_mask = calc_live_regs (& n);
1725 infp->reg_size = n * 4;
1727 /* And the rest of it... locals and space for overflowed outbounds. */
1728 infp->local_size = get_frame_size ();
1729 infp->outbound_size = crtl->outgoing_args_size;
1731 /* Make sure we have a whole number of words for the locals. */
1732 if (infp->local_size % STACK_BYTES)
1733 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1735 /* Only thing we know we have to pad is the outbound space, since
1736 we've aligned our locals assuming that base of locals is aligned. */
1737 infp->pad_local = 0;
1738 infp->pad_reg = 0;
1739 infp->pad_outbound = 0;
1740 if (infp->outbound_size % STACK_BYTES)
1741 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1743 /* Now we see how we want to stage the prologue so that it does
1744 the most appropriate stack growth and register saves to either:
1745 (1) run fast,
1746 (2) reduce instruction space, or
1747 (3) reduce stack space. */
1748 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1749 infp->growth[i] = 0;
1751 regarg = infp->reg_size + infp->arg_size;
1752 localregarg = infp->local_size + regarg;
1753 outbounds = infp->outbound_size + infp->pad_outbound;
1754 growths = 0;
1756 /* XXX: Consider one where we consider localregarg + outbound too! */
1758 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1759 use stw's with offsets and buy the frame in one shot. */
1760 if (localregarg <= ADDI_REACH
1761 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1763 /* Make sure we'll be aligned. */
1764 if (localregarg % STACK_BYTES)
1765 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1767 step = localregarg + infp->pad_reg;
1768 infp->reg_offset = infp->local_size;
1770 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1772 step += outbounds;
1773 infp->reg_offset += outbounds;
1774 outbounds = 0;
1777 infp->arg_offset = step - 4;
1778 infp->growth[growths++] = step;
1779 infp->reg_growth = growths;
1780 infp->local_growth = growths;
1782 /* If we haven't already folded it in. */
1783 if (outbounds)
1784 infp->growth[growths++] = outbounds;
1786 goto finish;
1789 /* Frame can't be done with a single subi, but can be done with 2
1790 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1791 shift some of the stack purchase into the first subi, so both are
1792 single instructions. */
1793 if (localregarg <= STORE_REACH
1794 && (infp->local_size > ADDI_REACH)
1795 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1797 int all;
1799 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1800 if (localregarg % STACK_BYTES)
1801 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1803 all = localregarg + infp->pad_reg + infp->pad_local;
1804 step = ADDI_REACH; /* As much up front as we can. */
1805 if (step > all)
1806 step = all;
1808 /* XXX: Consider whether step will still be aligned; we believe so. */
1809 infp->arg_offset = step - 4;
1810 infp->growth[growths++] = step;
1811 infp->reg_growth = growths;
1812 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1813 all -= step;
1815 /* Can we fold in any space required for outbounds? */
1816 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1818 all += outbounds;
1819 outbounds = 0;
1822 /* Get the rest of the locals in place. */
1823 step = all;
1824 infp->growth[growths++] = step;
1825 infp->local_growth = growths;
1826 all -= step;
1828 gcc_assert (all == 0);
1830 /* Finish off if we need to do so. */
1831 if (outbounds)
1832 infp->growth[growths++] = outbounds;
1834 goto finish;
1837 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1838 Then we buy the rest of the frame in 1 or 2 steps depending on
1839 whether we need a frame pointer. */
1840 if ((regarg % STACK_BYTES) == 0)
1842 infp->growth[growths++] = regarg;
1843 infp->reg_growth = growths;
1844 infp->arg_offset = regarg - 4;
1845 infp->reg_offset = 0;
1847 if (infp->local_size % STACK_BYTES)
1848 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1850 step = infp->local_size + infp->pad_local;
1852 if (!frame_pointer_needed)
1854 step += outbounds;
1855 outbounds = 0;
1858 infp->growth[growths++] = step;
1859 infp->local_growth = growths;
1861 /* If there's any left to be done. */
1862 if (outbounds)
1863 infp->growth[growths++] = outbounds;
1865 goto finish;
1868 /* XXX: optimizations that we'll want to play with....
1869 -- regarg is not aligned, but it's a small number of registers;
1870 use some of localsize so that regarg is aligned and then
1871 save the registers. */
1873 /* Simple encoding; plods down the stack buying the pieces as it goes.
1874 -- does not optimize space consumption.
1875 -- does not attempt to optimize instruction counts.
1876 -- but it is safe for all alignments. */
1877 if (regarg % STACK_BYTES != 0)
1878 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1880 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1881 infp->reg_growth = growths;
1882 infp->arg_offset = infp->growth[0] - 4;
1883 infp->reg_offset = 0;
1885 if (frame_pointer_needed)
1887 if (infp->local_size % STACK_BYTES != 0)
1888 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1890 infp->growth[growths++] = infp->local_size + infp->pad_local;
1891 infp->local_growth = growths;
1893 infp->growth[growths++] = outbounds;
1895 else
1897 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1898 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1900 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1901 infp->local_growth = growths;
1904 /* Anything else that we've forgotten?, plus a few consistency checks. */
1905 finish:
1906 gcc_assert (infp->reg_offset >= 0);
1907 gcc_assert (growths <= MAX_STACK_GROWS);
1909 for (i = 0; i < growths; i++)
1910 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1913 /* Define the offset between two registers, one to be eliminated, and
1914 the other its replacement, at the start of a routine. */
1917 mcore_initial_elimination_offset (int from, int to)
1919 int above_frame;
1920 int below_frame;
1921 struct mcore_frame fi;
1923 layout_mcore_frame (& fi);
1925 /* fp to ap */
1926 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1927 /* sp to fp */
1928 below_frame = fi.outbound_size + fi.pad_outbound;
1930 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1931 return above_frame;
1933 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1934 return above_frame + below_frame;
1936 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1937 return below_frame;
1939 gcc_unreachable ();
1942 /* Keep track of some information about varargs for the prolog. */
1944 static void
1945 mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
1946 const function_arg_info &arg,
1947 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1948 int second_time ATTRIBUTE_UNUSED)
1950 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1952 current_function_anonymous_args = 1;
1954 /* We need to know how many argument registers are used before
1955 the varargs start, so that we can push the remaining argument
1956 registers during the prologue. */
1957 number_of_regs_before_varargs = *args_so_far;
1958 if (!TYPE_NO_NAMED_ARGS_STDARG_P (TREE_TYPE (current_function_decl)))
1959 number_of_regs_before_varargs += mcore_num_arg_regs (arg.mode, arg.type);
1961 /* There is a bug somewhere in the arg handling code.
1962 Until I can find it this workaround always pushes the
1963 last named argument onto the stack. */
1964 number_of_regs_before_varargs = *args_so_far;
1966 /* The last named argument may be split between argument registers
1967 and the stack. Allow for this here. */
1968 if (number_of_regs_before_varargs > NPARM_REGS)
1969 number_of_regs_before_varargs = NPARM_REGS;
1972 void
1973 mcore_expand_prolog (void)
1975 struct mcore_frame fi;
1976 int space_allocated = 0;
1977 int growth = 0;
1979 /* Find out what we're doing. */
1980 layout_mcore_frame (&fi);
1982 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1983 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1985 if (TARGET_CG_DATA)
1987 /* Emit a symbol for this routine's frame size. */
1988 rtx x;
1990 x = DECL_RTL (current_function_decl);
1992 gcc_assert (GET_CODE (x) == MEM);
1994 x = XEXP (x, 0);
1996 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1998 free (mcore_current_function_name);
2000 mcore_current_function_name = xstrdup (XSTR (x, 0));
2002 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2004 if (cfun->calls_alloca)
2005 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2007 /* 970425: RBE:
2008 We're looking at how the 8byte alignment affects stack layout
2009 and where we had to pad things. This emits information we can
2010 extract which tells us about frame sizes and the like. */
2011 fprintf (asm_out_file,
2012 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2013 mcore_current_function_name,
2014 fi.arg_size, fi.reg_size, fi.reg_mask,
2015 fi.local_size, fi.outbound_size,
2016 frame_pointer_needed);
2019 if (mcore_naked_function_p ())
2020 return;
2022 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2023 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2025 /* If we have a parameter passed partially in regs and partially in memory,
2026 the registers will have been stored to memory already in function.cc. So
2027 we only need to do something here for varargs functions. */
2028 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
2030 int offset;
2031 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2032 int remaining = fi.arg_size;
2034 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2036 emit_insn (gen_movsi
2037 (gen_rtx_MEM (SImode,
2038 plus_constant (Pmode, stack_pointer_rtx,
2039 offset)),
2040 gen_rtx_REG (SImode, rn)));
2044 /* Do we need another stack adjustment before we do the register saves? */
2045 if (growth < fi.reg_growth)
2046 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2048 if (fi.reg_size != 0)
2050 int i;
2051 int offs = fi.reg_offset;
2053 for (i = 15; i >= 0; i--)
2055 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2057 int first_reg = 15;
2059 while (fi.reg_mask & (1 << first_reg))
2060 first_reg--;
2061 first_reg++;
2063 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2064 gen_rtx_REG (SImode, first_reg),
2065 GEN_INT (16 - first_reg)));
2067 i -= (15 - first_reg);
2068 offs += (16 - first_reg) * 4;
2070 else if (fi.reg_mask & (1 << i))
2072 emit_insn (gen_movsi
2073 (gen_rtx_MEM (SImode,
2074 plus_constant (Pmode, stack_pointer_rtx,
2075 offs)),
2076 gen_rtx_REG (SImode, i)));
2077 offs += 4;
2082 /* Figure the locals + outbounds. */
2083 if (frame_pointer_needed)
2085 /* If we haven't already purchased to 'fp'. */
2086 if (growth < fi.local_growth)
2087 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2089 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2091 /* ... and then go any remaining distance for outbounds, etc. */
2092 if (fi.growth[growth])
2093 output_stack_adjust (-1, fi.growth[growth++]);
2095 else
2097 if (growth < fi.local_growth)
2098 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2099 if (fi.growth[growth])
2100 output_stack_adjust (-1, fi.growth[growth++]);
2104 void
2105 mcore_expand_epilog (void)
2107 struct mcore_frame fi;
2108 int i;
2109 int offs;
2110 int growth = MAX_STACK_GROWS - 1 ;
2113 /* Find out what we're doing. */
2114 layout_mcore_frame(&fi);
2116 if (mcore_naked_function_p ())
2117 return;
2119 /* If we had a frame pointer, restore the sp from that. */
2120 if (frame_pointer_needed)
2122 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2123 growth = fi.local_growth - 1;
2125 else
2127 /* XXX: while loop should accumulate and do a single sell. */
2128 while (growth >= fi.local_growth)
2130 if (fi.growth[growth] != 0)
2131 output_stack_adjust (1, fi.growth[growth]);
2132 growth--;
2136 /* Make sure we've shrunk stack back to the point where the registers
2137 were laid down. This is typically 0/1 iterations. Then pull the
2138 register save information back off the stack. */
2139 while (growth >= fi.reg_growth)
2140 output_stack_adjust ( 1, fi.growth[growth--]);
2142 offs = fi.reg_offset;
2144 for (i = 15; i >= 0; i--)
2146 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2148 int first_reg;
2150 /* Find the starting register. */
2151 first_reg = 15;
2153 while (fi.reg_mask & (1 << first_reg))
2154 first_reg--;
2156 first_reg++;
2158 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2159 gen_rtx_MEM (SImode, stack_pointer_rtx),
2160 GEN_INT (16 - first_reg)));
2162 i -= (15 - first_reg);
2163 offs += (16 - first_reg) * 4;
2165 else if (fi.reg_mask & (1 << i))
2167 emit_insn (gen_movsi
2168 (gen_rtx_REG (SImode, i),
2169 gen_rtx_MEM (SImode,
2170 plus_constant (Pmode, stack_pointer_rtx,
2171 offs))));
2172 offs += 4;
2176 /* Give back anything else. */
2177 /* XXX: Should accumulate total and then give it back. */
2178 while (growth >= 0)
2179 output_stack_adjust ( 1, fi.growth[growth--]);
2182 /* This code is borrowed from the SH port. */
2184 /* The MCORE cannot load a large constant into a register, constants have to
2185 come from a pc relative load. The reference of a pc relative load
2186 instruction must be less than 1k in front of the instruction. This
2187 means that we often have to dump a constant inside a function, and
2188 generate code to branch around it.
2190 It is important to minimize this, since the branches will slow things
2191 down and make things bigger.
2193 Worst case code looks like:
2195 lrw L1,r0
2196 br L2
2197 align
2198 L1: .long value
2202 lrw L3,r0
2203 br L4
2204 align
2205 L3: .long value
2209 We fix this by performing a scan before scheduling, which notices which
2210 instructions need to have their operands fetched from the constant table
2211 and builds the table.
2213 The algorithm is:
2215 scan, find an instruction which needs a pcrel move. Look forward, find the
2216 last barrier which is within MAX_COUNT bytes of the requirement.
2217 If there isn't one, make one. Process all the instructions between
2218 the find and the barrier.
2220 In the above example, we can tell that L3 is within 1k of L1, so
2221 the first move can be shrunk from the 2 insn+constant sequence into
2222 just 1 insn, and the constant moved to L3 to make:
2224 lrw L1,r0
2226 lrw L3,r0
2227 bra L4
2228 align
2229 L3:.long value
2230 L4:.long value
2232 Then the second move becomes the target for the shortening process. */
2234 typedef struct
2236 rtx value; /* Value in table. */
2237 rtx label; /* Label of value. */
2238 } pool_node;
2240 /* The maximum number of constants that can fit into one pool, since
2241 the pc relative range is 0...1020 bytes and constants are at least 4
2242 bytes long. We subtract 4 from the range to allow for the case where
2243 we need to add a branch/align before the constant pool. */
2245 #define MAX_COUNT 1016
2246 #define MAX_POOL_SIZE (MAX_COUNT/4)
2247 static pool_node pool_vector[MAX_POOL_SIZE];
2248 static int pool_size;
2250 /* Dump out any constants accumulated in the final pass. These
2251 will only be labels. */
2253 const char *
2254 mcore_output_jump_label_table (void)
2256 int i;
2258 if (pool_size)
2260 fprintf (asm_out_file, "\t.align 2\n");
2262 for (i = 0; i < pool_size; i++)
2264 pool_node * p = pool_vector + i;
2266 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2268 output_asm_insn (".long %0", &p->value);
2271 pool_size = 0;
2274 return "";
2277 /* Check whether insn is a candidate for a conditional. */
2279 static cond_type
2280 is_cond_candidate (rtx insn)
2282 /* The only things we conditionalize are those that can be directly
2283 changed into a conditional. Only bother with SImode items. If
2284 we wanted to be a little more aggressive, we could also do other
2285 modes such as DImode with reg-reg move or load 0. */
2286 if (NONJUMP_INSN_P (insn))
2288 rtx pat = PATTERN (insn);
2289 rtx src, dst;
2291 if (GET_CODE (pat) != SET)
2292 return COND_NO;
2294 dst = XEXP (pat, 0);
2296 if ((GET_CODE (dst) != REG &&
2297 GET_CODE (dst) != SUBREG) ||
2298 GET_MODE (dst) != SImode)
2299 return COND_NO;
2301 src = XEXP (pat, 1);
2303 if ((GET_CODE (src) == REG ||
2304 (GET_CODE (src) == SUBREG &&
2305 GET_CODE (SUBREG_REG (src)) == REG)) &&
2306 GET_MODE (src) == SImode)
2307 return COND_MOV_INSN;
2308 else if (GET_CODE (src) == CONST_INT &&
2309 INTVAL (src) == 0)
2310 return COND_CLR_INSN;
2311 else if (GET_CODE (src) == PLUS &&
2312 (GET_CODE (XEXP (src, 0)) == REG ||
2313 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2314 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2315 GET_MODE (XEXP (src, 0)) == SImode &&
2316 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2317 INTVAL (XEXP (src, 1)) == 1)
2318 return COND_INC_INSN;
2319 else if (((GET_CODE (src) == MINUS &&
2320 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2321 INTVAL( XEXP (src, 1)) == 1) ||
2322 (GET_CODE (src) == PLUS &&
2323 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2324 INTVAL (XEXP (src, 1)) == -1)) &&
2325 (GET_CODE (XEXP (src, 0)) == REG ||
2326 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2327 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2328 GET_MODE (XEXP (src, 0)) == SImode)
2329 return COND_DEC_INSN;
2331 /* Some insns that we don't bother with:
2332 (set (rx:DI) (ry:DI))
2333 (set (rx:DI) (const_int 0))
2337 else if (JUMP_P (insn)
2338 && GET_CODE (PATTERN (insn)) == SET
2339 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2340 return COND_BRANCH_INSN;
2342 return COND_NO;
2345 /* Emit a conditional version of insn and replace the old insn with the
2346 new one. Return the new insn if emitted. */
2348 static rtx_insn *
2349 emit_new_cond_insn (rtx_insn *insn, int cond)
2351 rtx c_insn = 0;
2352 rtx pat, dst, src;
2353 cond_type num;
2355 if ((num = is_cond_candidate (insn)) == COND_NO)
2356 return NULL;
2358 pat = PATTERN (insn);
2360 if (NONJUMP_INSN_P (insn))
2362 dst = SET_DEST (pat);
2363 src = SET_SRC (pat);
2365 else
2367 dst = JUMP_LABEL (insn);
2368 src = NULL_RTX;
2371 switch (num)
2373 case COND_MOV_INSN:
2374 case COND_CLR_INSN:
2375 if (cond)
2376 c_insn = gen_movt0 (dst, src, dst);
2377 else
2378 c_insn = gen_movt0 (dst, dst, src);
2379 break;
2381 case COND_INC_INSN:
2382 if (cond)
2383 c_insn = gen_incscc (dst, dst);
2384 else
2385 c_insn = gen_incscc_false (dst, dst);
2386 break;
2388 case COND_DEC_INSN:
2389 if (cond)
2390 c_insn = gen_decscc (dst, dst);
2391 else
2392 c_insn = gen_decscc_false (dst, dst);
2393 break;
2395 case COND_BRANCH_INSN:
2396 if (cond)
2397 c_insn = gen_branch_true (dst);
2398 else
2399 c_insn = gen_branch_false (dst);
2400 break;
2402 default:
2403 return NULL;
2406 /* Only copy the notes if they exist. */
2407 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2409 /* We really don't need to bother with the notes and links at this
2410 point, but go ahead and save the notes. This will help is_dead()
2411 when applying peepholes (links don't matter since they are not
2412 used any more beyond this point for the mcore). */
2413 REG_NOTES (c_insn) = REG_NOTES (insn);
2416 if (num == COND_BRANCH_INSN)
2418 /* For jumps, we need to be a little bit careful and emit the new jump
2419 before the old one and to update the use count for the target label.
2420 This way, the barrier following the old (uncond) jump will get
2421 deleted, but the label won't. */
2422 c_insn = emit_jump_insn_before (c_insn, insn);
2424 ++ LABEL_NUSES (dst);
2426 JUMP_LABEL (c_insn) = dst;
2428 else
2429 c_insn = emit_insn_after (c_insn, insn);
2431 delete_insn (insn);
2433 return as_a <rtx_insn *> (c_insn);
2436 /* Attempt to change a basic block into a series of conditional insns. This
2437 works by taking the branch at the end of the 1st block and scanning for the
2438 end of the 2nd block. If all instructions in the 2nd block have cond.
2439 versions and the label at the start of block 3 is the same as the target
2440 from the branch at block 1, then conditionalize all insn in block 2 using
2441 the inverse condition of the branch at block 1. (Note I'm bending the
2442 definition of basic block here.)
2444 e.g., change:
2446 bt L2 <-- end of block 1 (delete)
2447 mov r7,r8
2448 addu r7,1
2449 br L3 <-- end of block 2
2451 L2: ... <-- start of block 3 (NUSES==1)
2452 L3: ...
2456 movf r7,r8
2457 incf r7
2458 bf L3
2460 L3: ...
2462 we can delete the L2 label if NUSES==1 and re-apply the optimization
2463 starting at the last instruction of block 2. This may allow an entire
2464 if-then-else statement to be conditionalized. BRC */
2465 static rtx_insn *
2466 conditionalize_block (rtx_insn *first)
2468 rtx_insn *insn;
2469 rtx br_pat;
2470 rtx_insn *end_blk_1_br = 0;
2471 rtx_insn *end_blk_2_insn = 0;
2472 rtx_insn *start_blk_3_lab = 0;
2473 int cond;
2474 int br_lab_num;
2475 int blk_size = 0;
2478 /* Check that the first insn is a candidate conditional jump. This is
2479 the one that we'll eliminate. If not, advance to the next insn to
2480 try. */
2481 if (! JUMP_P (first)
2482 || GET_CODE (PATTERN (first)) != SET
2483 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2484 return NEXT_INSN (first);
2486 /* Extract some information we need. */
2487 end_blk_1_br = first;
2488 br_pat = PATTERN (end_blk_1_br);
2490 /* Complement the condition since we use the reverse cond. for the insns. */
2491 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2493 /* Determine what kind of branch we have. */
2494 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2496 /* A normal branch, so extract label out of first arm. */
2497 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2499 else
2501 /* An inverse branch, so extract the label out of the 2nd arm
2502 and complement the condition. */
2503 cond = (cond == 0);
2504 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2507 /* Scan forward for the start of block 2: it must start with a
2508 label and that label must be the same as the branch target
2509 label from block 1. We don't care about whether block 2 actually
2510 ends with a branch or a label (an uncond. branch is
2511 conditionalizable). */
2512 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2514 enum rtx_code code;
2516 code = GET_CODE (insn);
2518 /* Look for the label at the start of block 3. */
2519 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2520 break;
2522 /* Skip barriers, notes, and conditionalizable insns. If the
2523 insn is not conditionalizable or makes this optimization fail,
2524 just return the next insn so we can start over from that point. */
2525 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2526 return NEXT_INSN (insn);
2528 /* Remember the last real insn before the label (i.e. end of block 2). */
2529 if (code == JUMP_INSN || code == INSN)
2531 blk_size ++;
2532 end_blk_2_insn = insn;
2536 if (!insn)
2537 return insn;
2539 /* It is possible for this optimization to slow performance if the blocks
2540 are long. This really depends upon whether the branch is likely taken
2541 or not. If the branch is taken, we slow performance in many cases. But,
2542 if the branch is not taken, we always help performance (for a single
2543 block, but for a double block (i.e. when the optimization is re-applied)
2544 this is not true since the 'right thing' depends on the overall length of
2545 the collapsed block). As a compromise, don't apply this optimization on
2546 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2547 the best threshold depends on the latencies of the instructions (i.e.,
2548 the branch penalty). */
2549 if (optimize > 1 && blk_size > 2)
2550 return insn;
2552 /* At this point, we've found the start of block 3 and we know that
2553 it is the destination of the branch from block 1. Also, all
2554 instructions in the block 2 are conditionalizable. So, apply the
2555 conditionalization and delete the branch. */
2556 start_blk_3_lab = insn;
2558 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2559 insn = NEXT_INSN (insn))
2561 rtx_insn *newinsn;
2563 if (insn->deleted ())
2564 continue;
2566 /* Try to form a conditional variant of the instruction and emit it. */
2567 if ((newinsn = emit_new_cond_insn (insn, cond)))
2569 if (end_blk_2_insn == insn)
2570 end_blk_2_insn = newinsn;
2572 insn = newinsn;
2576 /* Note whether we will delete the label starting blk 3 when the jump
2577 gets deleted. If so, we want to re-apply this optimization at the
2578 last real instruction right before the label. */
2579 if (LABEL_NUSES (start_blk_3_lab) == 1)
2581 start_blk_3_lab = 0;
2584 /* ??? we probably should redistribute the death notes for this insn, esp.
2585 the death of cc, but it doesn't really matter this late in the game.
2586 The peepholes all use is_dead() which will find the correct death
2587 regardless of whether there is a note. */
2588 delete_insn (end_blk_1_br);
2590 if (! start_blk_3_lab)
2591 return end_blk_2_insn;
2593 /* Return the insn right after the label at the start of block 3. */
2594 return NEXT_INSN (start_blk_3_lab);
2597 /* Apply the conditionalization of blocks optimization. This is the
2598 outer loop that traverses through the insns scanning for a branch
2599 that signifies an opportunity to apply the optimization. Note that
2600 this optimization is applied late. If we could apply it earlier,
2601 say before cse 2, it may expose more optimization opportunities.
2602 but, the pay back probably isn't really worth the effort (we'd have
2603 to update all reg/flow/notes/links/etc to make it work - and stick it
2604 in before cse 2). */
2606 static void
2607 conditionalize_optimization (void)
2609 rtx_insn *insn;
2611 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2612 continue;
2615 /* This is to handle loads from the constant pool. */
2617 static void
2618 mcore_reorg (void)
2620 /* Reset this variable. */
2621 current_function_anonymous_args = 0;
2623 if (optimize == 0)
2624 return;
2626 /* Conditionalize blocks where we can. */
2627 conditionalize_optimization ();
2629 /* Literal pool generation is now pushed off until the assembler. */
2633 /* Return true if X is something that can be moved directly into r15. */
2635 bool
2636 mcore_r15_operand_p (rtx x)
2638 switch (GET_CODE (x))
2640 case CONST_INT:
2641 return mcore_const_ok_for_inline (INTVAL (x));
2643 case REG:
2644 case SUBREG:
2645 case MEM:
2646 return 1;
2648 default:
2649 return 0;
2653 /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
2654 directly move X into it, use r1-r14 as a temporary. */
2656 enum reg_class
2657 mcore_secondary_reload_class (enum reg_class rclass,
2658 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2660 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2661 && !mcore_r15_operand_p (x))
2662 return LRW_REGS;
2663 return NO_REGS;
2666 /* Return the reg_class to use when reloading the rtx X into the class
2667 RCLASS. If X is too complex to move directly into r15, prefer to
2668 use LRW_REGS instead. */
2670 enum reg_class
2671 mcore_reload_class (rtx x, enum reg_class rclass)
2673 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2674 return LRW_REGS;
2676 return rclass;
2679 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2680 register. Note that the current version doesn't worry about whether
2681 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2682 in r2 matches an SImode in r2. Might think in the future about whether
2683 we want to be able to say something about modes. */
2686 mcore_is_same_reg (rtx x, rtx y)
2688 /* Strip any and all of the subreg wrappers. */
2689 while (GET_CODE (x) == SUBREG)
2690 x = SUBREG_REG (x);
2692 while (GET_CODE (y) == SUBREG)
2693 y = SUBREG_REG (y);
2695 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2696 return 1;
2698 return 0;
2701 static void
2702 mcore_option_override (void)
2704 /* Only the m340 supports little endian code. */
2705 if (TARGET_LITTLE_END && ! TARGET_M340)
2706 target_flags |= MASK_M340;
2710 /* Compute the number of word sized registers needed to
2711 hold a function argument of mode MODE and type TYPE. */
2714 mcore_num_arg_regs (machine_mode mode, const_tree type)
2716 int size;
2718 function_arg_info arg (const_cast<tree> (type), mode, /*named=*/true);
2719 if (targetm.calls.must_pass_in_stack (arg))
2720 return 0;
2722 if (type && mode == BLKmode)
2723 size = int_size_in_bytes (type);
2724 else
2725 size = GET_MODE_SIZE (mode);
2727 return ROUND_ADVANCE (size);
2730 static rtx
2731 handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
2733 int size;
2735 /* The MCore ABI defines that a structure whose size is not a whole multiple
2736 of bytes is passed packed into registers (or spilled onto the stack if
2737 not enough registers are available) with the last few bytes of the
2738 structure being packed, left-justified, into the last register/stack slot.
2739 GCC handles this correctly if the last word is in a stack slot, but we
2740 have to generate a special, PARALLEL RTX if the last word is in an
2741 argument register. */
2742 if (type
2743 && TYPE_MODE (type) == BLKmode
2744 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2745 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2746 && (size % UNITS_PER_WORD != 0)
2747 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2749 rtx arg_regs [NPARM_REGS];
2750 int nregs;
2751 rtx result;
2752 rtvec rtvec;
2754 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2756 arg_regs [nregs] =
2757 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2758 GEN_INT (nregs * UNITS_PER_WORD));
2759 nregs ++;
2762 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2763 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
2764 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2765 arg_regs[3], arg_regs[4], arg_regs[5]);
2767 result = gen_rtx_PARALLEL (mode, rtvec);
2768 return result;
2771 return gen_rtx_REG (mode, reg);
2775 mcore_function_value (const_tree valtype, const_tree func)
2777 machine_mode mode;
2778 int unsigned_p;
2780 mode = TYPE_MODE (valtype);
2782 /* Since we promote return types, we must promote the mode here too. */
2783 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2785 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2788 /* Define where to put the arguments to a function.
2789 Value is zero to push the argument on the stack,
2790 or a hard register in which to store the argument.
2792 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2793 the preceding args and about the function being called.
2794 ARG is a description of the argument.
2796 On MCore the first args are normally in registers
2797 and the rest are pushed. Any arg that starts within the first
2798 NPARM_REGS words is at least partially passed in a register unless
2799 its data type forbids. */
2801 static rtx
2802 mcore_function_arg (cumulative_args_t cum, const function_arg_info &arg)
2804 int arg_reg;
2806 if (!arg.named || arg.end_marker_p ())
2807 return 0;
2809 if (targetm.calls.must_pass_in_stack (arg))
2810 return 0;
2812 arg_reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
2814 if (arg_reg < NPARM_REGS)
2815 return handle_structs_in_regs (arg.mode, arg.type,
2816 FIRST_PARM_REG + arg_reg);
2818 return 0;
2821 static void
2822 mcore_function_arg_advance (cumulative_args_t cum_v,
2823 const function_arg_info &arg)
2825 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2827 *cum = (ROUND_REG (*cum, arg.mode)
2828 + (int) arg.named * mcore_num_arg_regs (arg.mode, arg.type));
2831 static unsigned int
2832 mcore_function_arg_boundary (machine_mode mode,
2833 const_tree type ATTRIBUTE_UNUSED)
2835 /* Doubles must be aligned to an 8 byte boundary. */
2836 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2837 ? BIGGEST_ALIGNMENT
2838 : PARM_BOUNDARY);
2841 /* Returns the number of bytes of argument registers required to hold *part*
2842 of argument ARG. If the argument fits entirely in the argument registers,
2843 or entirely on the stack, then 0 is returned. CUM is the number of
2844 argument registers already used by earlier parameters to the function. */
2846 static int
2847 mcore_arg_partial_bytes (cumulative_args_t cum, const function_arg_info &arg)
2849 int reg = ROUND_REG (*get_cumulative_args (cum), arg.mode);
2851 if (!arg.named)
2852 return 0;
2854 if (targetm.calls.must_pass_in_stack (arg))
2855 return 0;
2857 /* REG is not the *hardware* register number of the register that holds
2858 the argument, it is the *argument* register number. So for example,
2859 the first argument to a function goes in argument register 0, which
2860 translates (for the MCore) into hardware register 2. The second
2861 argument goes into argument register 1, which translates into hardware
2862 register 3, and so on. NPARM_REGS is the number of argument registers
2863 supported by the target, not the maximum hardware register number of
2864 the target. */
2865 if (reg >= NPARM_REGS)
2866 return 0;
2868 /* If the argument fits entirely in registers, return 0. */
2869 if (reg + mcore_num_arg_regs (arg.mode, arg.type) <= NPARM_REGS)
2870 return 0;
2872 /* The argument overflows the number of available argument registers.
2873 Compute how many argument registers have not yet been assigned to
2874 hold an argument. */
2875 reg = NPARM_REGS - reg;
2877 /* Return partially in registers and partially on the stack. */
2878 return reg * UNITS_PER_WORD;
2881 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2884 mcore_dllexport_name_p (const char * symbol)
2886 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2889 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2892 mcore_dllimport_name_p (const char * symbol)
2894 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2897 /* Mark a DECL as being dllexport'd. */
2899 static void
2900 mcore_mark_dllexport (tree decl)
2902 const char * oldname;
2903 char * newname;
2904 rtx rtlname;
2905 tree idp;
2907 rtlname = XEXP (DECL_RTL (decl), 0);
2909 if (GET_CODE (rtlname) == MEM)
2910 rtlname = XEXP (rtlname, 0);
2911 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2912 oldname = XSTR (rtlname, 0);
2914 if (mcore_dllexport_name_p (oldname))
2915 return; /* Already done. */
2917 newname = XALLOCAVEC (char, strlen (oldname) + 4);
2918 sprintf (newname, "@e.%s", oldname);
2920 /* We pass newname through get_identifier to ensure it has a unique
2921 address. RTL processing can sometimes peek inside the symbol ref
2922 and compare the string's addresses to see if two symbols are
2923 identical. */
2924 /* ??? At least I think that's why we do this. */
2925 idp = get_identifier (newname);
2927 XEXP (DECL_RTL (decl), 0) =
2928 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2931 /* Mark a DECL as being dllimport'd. */
2933 static void
2934 mcore_mark_dllimport (tree decl)
2936 const char * oldname;
2937 char * newname;
2938 tree idp;
2939 rtx rtlname;
2940 rtx newrtl;
2942 rtlname = XEXP (DECL_RTL (decl), 0);
2944 if (GET_CODE (rtlname) == MEM)
2945 rtlname = XEXP (rtlname, 0);
2946 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2947 oldname = XSTR (rtlname, 0);
2949 gcc_assert (!mcore_dllexport_name_p (oldname));
2950 if (mcore_dllimport_name_p (oldname))
2951 return; /* Already done. */
2953 /* ??? One can well ask why we're making these checks here,
2954 and that would be a good question. */
2956 /* Imported variables can't be initialized. */
2957 if (VAR_P (decl)
2958 && !DECL_VIRTUAL_P (decl)
2959 && DECL_INITIAL (decl))
2961 error ("initialized variable %q+D is marked dllimport", decl);
2962 return;
2965 /* `extern' needn't be specified with dllimport.
2966 Specify `extern' now and hope for the best. Sigh. */
2967 if (VAR_P (decl)
2968 /* ??? Is this test for vtables needed? */
2969 && !DECL_VIRTUAL_P (decl))
2971 DECL_EXTERNAL (decl) = 1;
2972 TREE_PUBLIC (decl) = 1;
2975 newname = XALLOCAVEC (char, strlen (oldname) + 11);
2976 sprintf (newname, "@i.__imp_%s", oldname);
2978 /* We pass newname through get_identifier to ensure it has a unique
2979 address. RTL processing can sometimes peek inside the symbol ref
2980 and compare the string's addresses to see if two symbols are
2981 identical. */
2982 /* ??? At least I think that's why we do this. */
2983 idp = get_identifier (newname);
2985 newrtl = gen_rtx_MEM (Pmode,
2986 gen_rtx_SYMBOL_REF (Pmode,
2987 IDENTIFIER_POINTER (idp)));
2988 XEXP (DECL_RTL (decl), 0) = newrtl;
2991 static int
2992 mcore_dllexport_p (tree decl)
2994 if ( TREE_CODE (decl) != VAR_DECL
2995 && TREE_CODE (decl) != FUNCTION_DECL)
2996 return 0;
2998 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
3001 static int
3002 mcore_dllimport_p (tree decl)
3004 if ( TREE_CODE (decl) != VAR_DECL
3005 && TREE_CODE (decl) != FUNCTION_DECL)
3006 return 0;
3008 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
3011 /* We must mark dll symbols specially. Definitions of dllexport'd objects
3012 install some info in the .drective (PE) or .exports (ELF) sections. */
3014 static void
3015 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3017 /* Mark the decl so we can tell from the rtl whether the object is
3018 dllexport'd or dllimport'd. */
3019 if (mcore_dllexport_p (decl))
3020 mcore_mark_dllexport (decl);
3021 else if (mcore_dllimport_p (decl))
3022 mcore_mark_dllimport (decl);
3024 /* It might be that DECL has already been marked as dllimport, but
3025 a subsequent definition nullified that. The attribute is gone
3026 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3027 else if ((TREE_CODE (decl) == FUNCTION_DECL
3028 || VAR_P (decl))
3029 && DECL_RTL (decl) != NULL_RTX
3030 && GET_CODE (DECL_RTL (decl)) == MEM
3031 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3032 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3033 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3035 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3036 tree idp = get_identifier (oldname + 9);
3037 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3039 XEXP (DECL_RTL (decl), 0) = newrtl;
3041 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3042 ??? We leave these alone for now. */
3046 /* Undo the effects of the above. */
3048 static const char *
3049 mcore_strip_name_encoding (const char * str)
3051 return str + (str[0] == '@' ? 3 : 0);
3054 /* MCore specific attribute support.
3055 dllexport - for exporting a function/variable that will live in a dll
3056 dllimport - for importing a function/variable from a dll
3057 naked - do not create a function prologue/epilogue. */
3059 /* Handle a "naked" attribute; arguments as in
3060 struct attribute_spec.handler. */
3062 static tree
3063 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3064 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3066 if (TREE_CODE (*node) != FUNCTION_DECL)
3068 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3069 name);
3070 *no_add_attrs = true;
3073 return NULL_TREE;
3076 /* ??? It looks like this is PE specific? Oh well, this is what the
3077 old code did as well. */
3079 static void
3080 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3082 int len;
3083 const char * name;
3084 char * string;
3085 const char * prefix;
3087 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3089 /* Strip off any encoding in name. */
3090 name = (* targetm.strip_name_encoding) (name);
3092 /* The object is put in, for example, section .text$foo.
3093 The linker will then ultimately place them in .text
3094 (everything from the $ on is stripped). */
3095 if (TREE_CODE (decl) == FUNCTION_DECL)
3096 prefix = ".text$";
3097 /* For compatibility with EPOC, we ignore the fact that the
3098 section might have relocs against it. */
3099 else if (decl_readonly_section (decl, 0))
3100 prefix = ".rdata$";
3101 else
3102 prefix = ".data$";
3104 len = strlen (name) + strlen (prefix);
3105 string = XALLOCAVEC (char, len + 1);
3107 sprintf (string, "%s%s", prefix, name);
3109 set_decl_section_name (decl, string);
3113 mcore_naked_function_p (void)
3115 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3118 static bool
3119 mcore_warn_func_return (tree decl)
3121 /* Naked functions are implemented entirely in assembly, including the
3122 return sequence, so suppress warnings about this. */
3123 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3126 #ifdef OBJECT_FORMAT_ELF
3127 static void
3128 mcore_asm_named_section (const char *name,
3129 unsigned int flags ATTRIBUTE_UNUSED,
3130 tree decl ATTRIBUTE_UNUSED)
3132 fprintf (asm_out_file, "\t.section %s\n", name);
3134 #endif /* OBJECT_FORMAT_ELF */
3136 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3138 static void
3139 mcore_external_libcall (rtx fun)
3141 fprintf (asm_out_file, "\t.import\t");
3142 assemble_name (asm_out_file, XSTR (fun, 0));
3143 fprintf (asm_out_file, "\n");
3146 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3148 static bool
3149 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3151 const HOST_WIDE_INT size = int_size_in_bytes (type);
3152 return (size == -1 || size > 2 * UNITS_PER_WORD);
3155 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3156 Output assembler code for a block containing the constant parts
3157 of a trampoline, leaving space for the variable parts.
3159 On the MCore, the trampoline looks like:
3160 lrw r1, function
3161 lrw r13, area
3162 jmp r13
3163 or r0, r0
3164 .literals */
3166 static void
3167 mcore_asm_trampoline_template (FILE *f)
3169 fprintf (f, "\t.short 0x7102\n");
3170 fprintf (f, "\t.short 0x7d02\n");
3171 fprintf (f, "\t.short 0x00cd\n");
3172 fprintf (f, "\t.short 0x1e00\n");
3173 fprintf (f, "\t.long 0\n");
3174 fprintf (f, "\t.long 0\n");
3177 /* Worker function for TARGET_TRAMPOLINE_INIT. */
3179 static void
3180 mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3182 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3183 rtx mem;
3185 emit_block_move (m_tramp, assemble_trampoline_template (),
3186 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3188 mem = adjust_address (m_tramp, SImode, 8);
3189 emit_move_insn (mem, chain_value);
3190 mem = adjust_address (m_tramp, SImode, 12);
3191 emit_move_insn (mem, fnaddr);
3194 /* Implement TARGET_LEGITIMATE_CONSTANT_P
3196 On the MCore, allow anything but a double. */
3198 static bool
3199 mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3201 return GET_CODE (x) != CONST_DOUBLE;
3204 /* Helper function for `mcore_legitimate_address_p'. */
3206 static bool
3207 mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3209 if (strict_p)
3210 return REGNO_OK_FOR_BASE_P (REGNO (reg));
3211 else
3212 return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3215 static bool
3216 mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3218 return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3221 /* A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3222 and for DI is 0..56 because we use two SI loads, etc. */
3224 static bool
3225 mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3227 if (CONST_INT_P (op))
3229 if (GET_MODE_SIZE (mode) >= 4
3230 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3231 && ((unsigned HOST_WIDE_INT) INTVAL (op))
3232 <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3233 return true;
3234 if (GET_MODE_SIZE (mode) == 2
3235 && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3236 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3237 return true;
3238 if (GET_MODE_SIZE (mode) == 1
3239 && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3240 return true;
3242 return false;
3246 /* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3248 Allow REG
3249 REG + disp */
3251 static bool
3252 mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3253 addr_space_t as, code_helper)
3255 gcc_assert (ADDR_SPACE_GENERIC_P (as));
3257 if (mcore_base_register_rtx_p (x, strict_p))
3258 return true;
3259 else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3261 rtx xop0 = XEXP (x, 0);
3262 rtx xop1 = XEXP (x, 1);
3263 if (mcore_base_register_rtx_p (xop0, strict_p)
3264 && mcore_legitimate_index_p (mode, xop1))
3265 return true;
3266 if (mcore_base_register_rtx_p (xop1, strict_p)
3267 && mcore_legitimate_index_p (mode, xop0))
3268 return true;
3271 return false;
3274 /* Implement TARGET_HARD_REGNO_MODE_OK. We may keep double values in
3275 even registers. */
3277 static bool
3278 mcore_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
3280 if (TARGET_8ALIGN && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
3281 return (regno & 1) == 0;
3283 return regno < 18;
3286 /* Implement TARGET_MODES_TIEABLE_P. */
3288 static bool
3289 mcore_modes_tieable_p (machine_mode mode1, machine_mode mode2)
3291 return mode1 == mode2 || GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2);