gcc/
[official-gcc.git] / gcc / config / mcore / mcore.c
blob2b284e2243293ef3f509525cd59c4179ac4e25d5
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
8 by the Free Software Foundation; either version 3, or (at your
9 option) any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "alias.h"
26 #include "symtab.h"
27 #include "tree.h"
28 #include "fold-const.h"
29 #include "stor-layout.h"
30 #include "varasm.h"
31 #include "stringpool.h"
32 #include "calls.h"
33 #include "tm_p.h"
34 #include "mcore.h"
35 #include "regs.h"
36 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "conditions.h"
39 #include "output.h"
40 #include "insn-attr.h"
41 #include "flags.h"
42 #include "obstack.h"
43 #include "function.h"
44 #include "expmed.h"
45 #include "dojump.h"
46 #include "explow.h"
47 #include "emit-rtl.h"
48 #include "stmt.h"
49 #include "expr.h"
50 #include "reload.h"
51 #include "recog.h"
52 #include "diagnostic-core.h"
53 #include "target.h"
54 #include "dominance.h"
55 #include "cfg.h"
56 #include "cfgrtl.h"
57 #include "cfganal.h"
58 #include "lcm.h"
59 #include "cfgbuild.h"
60 #include "cfgcleanup.h"
61 #include "predict.h"
62 #include "basic-block.h"
63 #include "df.h"
64 #include "builtins.h"
66 #include "target-def.h"
68 /* For dumping information about frame sizes. */
69 char * mcore_current_function_name = 0;
70 long mcore_current_compilation_timestamp = 0;
72 /* Global variables for machine-dependent things. */
74 /* Provides the class number of the smallest class containing
75 reg number. */
76 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
78 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
79 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
80 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
81 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
82 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
85 struct mcore_frame
87 int arg_size; /* Stdarg spills (bytes). */
88 int reg_size; /* Non-volatile reg saves (bytes). */
89 int reg_mask; /* Non-volatile reg saves. */
90 int local_size; /* Locals. */
91 int outbound_size; /* Arg overflow on calls out. */
92 int pad_outbound;
93 int pad_local;
94 int pad_reg;
95 /* Describe the steps we'll use to grow it. */
96 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
97 int growth[MAX_STACK_GROWS];
98 int arg_offset;
99 int reg_offset;
100 int reg_growth;
101 int local_growth;
104 typedef enum
106 COND_NO,
107 COND_MOV_INSN,
108 COND_CLR_INSN,
109 COND_INC_INSN,
110 COND_DEC_INSN,
111 COND_BRANCH_INSN
113 cond_type;
115 static void output_stack_adjust (int, int);
116 static int calc_live_regs (int *);
117 static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
118 static const char * output_inline_const (machine_mode, rtx *);
119 static void layout_mcore_frame (struct mcore_frame *);
120 static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
121 static cond_type is_cond_candidate (rtx);
122 static rtx_insn *emit_new_cond_insn (rtx, int);
123 static rtx_insn *conditionalize_block (rtx_insn *);
124 static void conditionalize_optimization (void);
125 static void mcore_reorg (void);
126 static rtx handle_structs_in_regs (machine_mode, const_tree, int);
127 static void mcore_mark_dllexport (tree);
128 static void mcore_mark_dllimport (tree);
129 static int mcore_dllexport_p (tree);
130 static int mcore_dllimport_p (tree);
131 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
132 #ifdef OBJECT_FORMAT_ELF
133 static void mcore_asm_named_section (const char *,
134 unsigned int, tree);
135 #endif
136 static void mcore_print_operand (FILE *, rtx, int);
137 static void mcore_print_operand_address (FILE *, rtx);
138 static bool mcore_print_operand_punct_valid_p (unsigned char code);
139 static void mcore_unique_section (tree, int);
140 static void mcore_encode_section_info (tree, rtx, int);
141 static const char *mcore_strip_name_encoding (const char *);
142 static int mcore_const_costs (rtx, RTX_CODE);
143 static int mcore_and_cost (rtx);
144 static int mcore_ior_cost (rtx);
145 static bool mcore_rtx_costs (rtx, int, int, int,
146 int *, bool);
147 static void mcore_external_libcall (rtx);
148 static bool mcore_return_in_memory (const_tree, const_tree);
149 static int mcore_arg_partial_bytes (cumulative_args_t,
150 machine_mode,
151 tree, bool);
152 static rtx mcore_function_arg (cumulative_args_t,
153 machine_mode,
154 const_tree, bool);
155 static void mcore_function_arg_advance (cumulative_args_t,
156 machine_mode,
157 const_tree, bool);
158 static unsigned int mcore_function_arg_boundary (machine_mode,
159 const_tree);
160 static void mcore_asm_trampoline_template (FILE *);
161 static void mcore_trampoline_init (rtx, tree, rtx);
162 static bool mcore_warn_func_return (tree);
163 static void mcore_option_override (void);
164 static bool mcore_legitimate_constant_p (machine_mode, rtx);
166 /* MCore specific attributes. */
168 static const struct attribute_spec mcore_attribute_table[] =
170 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
171 affects_type_identity } */
172 { "dllexport", 0, 0, true, false, false, NULL, false },
173 { "dllimport", 0, 0, true, false, false, NULL, false },
174 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
175 false },
176 { NULL, 0, 0, false, false, false, NULL, false }
179 /* Initialize the GCC target structure. */
180 #undef TARGET_ASM_EXTERNAL_LIBCALL
181 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
183 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
184 #undef TARGET_MERGE_DECL_ATTRIBUTES
185 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
186 #endif
188 #ifdef OBJECT_FORMAT_ELF
189 #undef TARGET_ASM_UNALIGNED_HI_OP
190 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
191 #undef TARGET_ASM_UNALIGNED_SI_OP
192 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
193 #endif
195 #undef TARGET_PRINT_OPERAND
196 #define TARGET_PRINT_OPERAND mcore_print_operand
197 #undef TARGET_PRINT_OPERAND_ADDRESS
198 #define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
199 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
200 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
202 #undef TARGET_ATTRIBUTE_TABLE
203 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
204 #undef TARGET_ASM_UNIQUE_SECTION
205 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
206 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
207 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
208 #undef TARGET_ENCODE_SECTION_INFO
209 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
210 #undef TARGET_STRIP_NAME_ENCODING
211 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
212 #undef TARGET_RTX_COSTS
213 #define TARGET_RTX_COSTS mcore_rtx_costs
214 #undef TARGET_ADDRESS_COST
215 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
216 #undef TARGET_MACHINE_DEPENDENT_REORG
217 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
219 #undef TARGET_PROMOTE_FUNCTION_MODE
220 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
221 #undef TARGET_PROMOTE_PROTOTYPES
222 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
224 #undef TARGET_RETURN_IN_MEMORY
225 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
226 #undef TARGET_MUST_PASS_IN_STACK
227 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
228 #undef TARGET_PASS_BY_REFERENCE
229 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
230 #undef TARGET_ARG_PARTIAL_BYTES
231 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
232 #undef TARGET_FUNCTION_ARG
233 #define TARGET_FUNCTION_ARG mcore_function_arg
234 #undef TARGET_FUNCTION_ARG_ADVANCE
235 #define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
236 #undef TARGET_FUNCTION_ARG_BOUNDARY
237 #define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
239 #undef TARGET_SETUP_INCOMING_VARARGS
240 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
242 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
243 #define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
244 #undef TARGET_TRAMPOLINE_INIT
245 #define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
247 #undef TARGET_OPTION_OVERRIDE
248 #define TARGET_OPTION_OVERRIDE mcore_option_override
250 #undef TARGET_LEGITIMATE_CONSTANT_P
251 #define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
253 #undef TARGET_WARN_FUNC_RETURN
254 #define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
256 struct gcc_target targetm = TARGET_INITIALIZER;
258 /* Adjust the stack and return the number of bytes taken to do it. */
259 static void
260 output_stack_adjust (int direction, int size)
262 /* If extending stack a lot, we do it incrementally. */
263 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
265 rtx tmp = gen_rtx_REG (SImode, 1);
266 rtx memref;
268 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
271 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
272 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
273 MEM_VOLATILE_P (memref) = 1;
274 emit_insn (gen_movsi (memref, stack_pointer_rtx));
275 size -= mcore_stack_increment;
277 while (size > mcore_stack_increment);
279 /* SIZE is now the residual for the last adjustment,
280 which doesn't require a probe. */
283 if (size)
285 rtx insn;
286 rtx val = GEN_INT (size);
288 if (size > 32)
290 rtx nval = gen_rtx_REG (SImode, 1);
291 emit_insn (gen_movsi (nval, val));
292 val = nval;
295 if (direction > 0)
296 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
297 else
298 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
300 emit_insn (insn);
304 /* Work out the registers which need to be saved,
305 both as a mask and a count. */
307 static int
308 calc_live_regs (int * count)
310 int reg;
311 int live_regs_mask = 0;
313 * count = 0;
315 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
317 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
319 (*count)++;
320 live_regs_mask |= (1 << reg);
324 return live_regs_mask;
327 /* Print the operand address in x to the stream. */
329 static void
330 mcore_print_operand_address (FILE * stream, rtx x)
332 switch (GET_CODE (x))
334 case REG:
335 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
336 break;
338 case PLUS:
340 rtx base = XEXP (x, 0);
341 rtx index = XEXP (x, 1);
343 if (GET_CODE (base) != REG)
345 /* Ensure that BASE is a register (one of them must be). */
346 rtx temp = base;
347 base = index;
348 index = temp;
351 switch (GET_CODE (index))
353 case CONST_INT:
354 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
355 reg_names[REGNO(base)], INTVAL (index));
356 break;
358 default:
359 gcc_unreachable ();
363 break;
365 default:
366 output_addr_const (stream, x);
367 break;
371 static bool
372 mcore_print_operand_punct_valid_p (unsigned char code)
374 return (code == '.' || code == '#' || code == '*' || code == '^'
375 || code == '!');
378 /* Print operand x (an rtx) in assembler syntax to file stream
379 according to modifier code.
381 'R' print the next register or memory location along, i.e. the lsw in
382 a double word value
383 'O' print a constant without the #
384 'M' print a constant as its negative
385 'P' print log2 of a power of two
386 'Q' print log2 of an inverse of a power of two
387 'U' print register for ldm/stm instruction
388 'X' print byte number for xtrbN instruction. */
390 static void
391 mcore_print_operand (FILE * stream, rtx x, int code)
393 switch (code)
395 case 'N':
396 if (INTVAL(x) == -1)
397 fprintf (asm_out_file, "32");
398 else
399 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
400 break;
401 case 'P':
402 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
403 break;
404 case 'Q':
405 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
406 break;
407 case 'O':
408 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
409 break;
410 case 'M':
411 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
412 break;
413 case 'R':
414 /* Next location along in memory or register. */
415 switch (GET_CODE (x))
417 case REG:
418 fputs (reg_names[REGNO (x) + 1], (stream));
419 break;
420 case MEM:
421 mcore_print_operand_address
422 (stream, XEXP (adjust_address (x, SImode, 4), 0));
423 break;
424 default:
425 gcc_unreachable ();
427 break;
428 case 'U':
429 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
430 reg_names[REGNO (x) + 3]);
431 break;
432 case 'x':
433 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
434 break;
435 case 'X':
436 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
437 break;
439 default:
440 switch (GET_CODE (x))
442 case REG:
443 fputs (reg_names[REGNO (x)], (stream));
444 break;
445 case MEM:
446 output_address (XEXP (x, 0));
447 break;
448 default:
449 output_addr_const (stream, x);
450 break;
452 break;
456 /* What does a constant cost ? */
458 static int
459 mcore_const_costs (rtx exp, enum rtx_code code)
461 HOST_WIDE_INT val = INTVAL (exp);
463 /* Easy constants. */
464 if ( CONST_OK_FOR_I (val)
465 || CONST_OK_FOR_M (val)
466 || CONST_OK_FOR_N (val)
467 || (code == PLUS && CONST_OK_FOR_L (val)))
468 return 1;
469 else if (code == AND
470 && ( CONST_OK_FOR_M (~val)
471 || CONST_OK_FOR_N (~val)))
472 return 2;
473 else if (code == PLUS
474 && ( CONST_OK_FOR_I (-val)
475 || CONST_OK_FOR_M (-val)
476 || CONST_OK_FOR_N (-val)))
477 return 2;
479 return 5;
482 /* What does an and instruction cost - we do this b/c immediates may
483 have been relaxed. We want to ensure that cse will cse relaxed immeds
484 out. Otherwise we'll get bad code (multiple reloads of the same const). */
486 static int
487 mcore_and_cost (rtx x)
489 HOST_WIDE_INT val;
491 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
492 return 2;
494 val = INTVAL (XEXP (x, 1));
496 /* Do it directly. */
497 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
498 return 2;
499 /* Takes one instruction to load. */
500 else if (const_ok_for_mcore (val))
501 return 3;
502 /* Takes two instructions to load. */
503 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
504 return 4;
506 /* Takes a lrw to load. */
507 return 5;
510 /* What does an or cost - see and_cost(). */
512 static int
513 mcore_ior_cost (rtx x)
515 HOST_WIDE_INT val;
517 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
518 return 2;
520 val = INTVAL (XEXP (x, 1));
522 /* Do it directly with bclri. */
523 if (CONST_OK_FOR_M (val))
524 return 2;
525 /* Takes one instruction to load. */
526 else if (const_ok_for_mcore (val))
527 return 3;
528 /* Takes two instructions to load. */
529 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
530 return 4;
532 /* Takes a lrw to load. */
533 return 5;
536 static bool
537 mcore_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
538 int * total, bool speed ATTRIBUTE_UNUSED)
540 switch (code)
542 case CONST_INT:
543 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
544 return true;
545 case CONST:
546 case LABEL_REF:
547 case SYMBOL_REF:
548 *total = 5;
549 return true;
550 case CONST_DOUBLE:
551 *total = 10;
552 return true;
554 case AND:
555 *total = COSTS_N_INSNS (mcore_and_cost (x));
556 return true;
558 case IOR:
559 *total = COSTS_N_INSNS (mcore_ior_cost (x));
560 return true;
562 case DIV:
563 case UDIV:
564 case MOD:
565 case UMOD:
566 case FLOAT:
567 case FIX:
568 *total = COSTS_N_INSNS (100);
569 return true;
571 default:
572 return false;
576 /* Prepare the operands for a comparison. Return whether the branch/setcc
577 should reverse the operands. */
579 bool
580 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
582 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
583 bool invert;
585 if (GET_CODE (op1) == CONST_INT)
587 HOST_WIDE_INT val = INTVAL (op1);
589 switch (code)
591 case GTU:
592 /* Unsigned > 0 is the same as != 0; everything else is converted
593 below to LEU (reversed cmphs). */
594 if (val == 0)
595 code = NE;
596 break;
598 /* Check whether (LE A imm) can become (LT A imm + 1),
599 or (GT A imm) can become (GE A imm + 1). */
600 case GT:
601 case LE:
602 if (CONST_OK_FOR_J (val + 1))
604 op1 = GEN_INT (val + 1);
605 code = code == LE ? LT : GE;
607 break;
609 default:
610 break;
614 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
615 op1 = force_reg (SImode, op1);
617 /* cmpnei: 0-31 (K immediate)
618 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
619 invert = false;
620 switch (code)
622 case EQ: /* Use inverted condition, cmpne. */
623 code = NE;
624 invert = true;
625 /* Drop through. */
627 case NE: /* Use normal condition, cmpne. */
628 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
629 op1 = force_reg (SImode, op1);
630 break;
632 case LE: /* Use inverted condition, reversed cmplt. */
633 code = GT;
634 invert = true;
635 /* Drop through. */
637 case GT: /* Use normal condition, reversed cmplt. */
638 if (GET_CODE (op1) == CONST_INT)
639 op1 = force_reg (SImode, op1);
640 break;
642 case GE: /* Use inverted condition, cmplt. */
643 code = LT;
644 invert = true;
645 /* Drop through. */
647 case LT: /* Use normal condition, cmplt. */
648 if (GET_CODE (op1) == CONST_INT &&
649 /* covered by btsti x,31. */
650 INTVAL (op1) != 0 &&
651 ! CONST_OK_FOR_J (INTVAL (op1)))
652 op1 = force_reg (SImode, op1);
653 break;
655 case GTU: /* Use inverted condition, cmple. */
656 /* We coped with unsigned > 0 above. */
657 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
658 code = LEU;
659 invert = true;
660 /* Drop through. */
662 case LEU: /* Use normal condition, reversed cmphs. */
663 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
664 op1 = force_reg (SImode, op1);
665 break;
667 case LTU: /* Use inverted condition, cmphs. */
668 code = GEU;
669 invert = true;
670 /* Drop through. */
672 case GEU: /* Use normal condition, cmphs. */
673 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
674 op1 = force_reg (SImode, op1);
675 break;
677 default:
678 break;
681 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
682 return invert;
686 mcore_symbolic_address_p (rtx x)
688 switch (GET_CODE (x))
690 case SYMBOL_REF:
691 case LABEL_REF:
692 return 1;
693 case CONST:
694 x = XEXP (x, 0);
695 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
696 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
697 && GET_CODE (XEXP (x, 1)) == CONST_INT);
698 default:
699 return 0;
703 /* Functions to output assembly code for a function call. */
705 char *
706 mcore_output_call (rtx operands[], int index)
708 static char buffer[20];
709 rtx addr = operands [index];
711 if (REG_P (addr))
713 if (TARGET_CG_DATA)
715 gcc_assert (mcore_current_function_name);
717 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
718 "unknown", 1);
721 sprintf (buffer, "jsr\t%%%d", index);
723 else
725 if (TARGET_CG_DATA)
727 gcc_assert (mcore_current_function_name);
728 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
730 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
731 XSTR (addr, 0), 0);
734 sprintf (buffer, "jbsr\t%%%d", index);
737 return buffer;
740 /* Can we load a constant with a single instruction ? */
743 const_ok_for_mcore (HOST_WIDE_INT value)
745 if (value >= 0 && value <= 127)
746 return 1;
748 /* Try exact power of two. */
749 if (CONST_OK_FOR_M (value))
750 return 1;
752 /* Try exact power of two - 1. */
753 if (CONST_OK_FOR_N (value) && value != -1)
754 return 1;
756 return 0;
759 /* Can we load a constant inline with up to 2 instructions ? */
762 mcore_const_ok_for_inline (HOST_WIDE_INT value)
764 HOST_WIDE_INT x, y;
766 return try_constant_tricks (value, & x, & y) > 0;
769 /* Are we loading the constant using a not ? */
772 mcore_const_trick_uses_not (HOST_WIDE_INT value)
774 HOST_WIDE_INT x, y;
776 return try_constant_tricks (value, & x, & y) == 2;
779 /* Try tricks to load a constant inline and return the trick number if
780 success (0 is non-inlinable).
782 0: not inlinable
783 1: single instruction (do the usual thing)
784 2: single insn followed by a 'not'
785 3: single insn followed by a subi
786 4: single insn followed by an addi
787 5: single insn followed by rsubi
788 6: single insn followed by bseti
789 7: single insn followed by bclri
790 8: single insn followed by rotli
791 9: single insn followed by lsli
792 10: single insn followed by ixh
793 11: single insn followed by ixw. */
795 static int
796 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
798 HOST_WIDE_INT i;
799 unsigned HOST_WIDE_INT bit, shf, rot;
801 if (const_ok_for_mcore (value))
802 return 1; /* Do the usual thing. */
804 if (! TARGET_HARDLIT)
805 return 0;
807 if (const_ok_for_mcore (~value))
809 *x = ~value;
810 return 2;
813 for (i = 1; i <= 32; i++)
815 if (const_ok_for_mcore (value - i))
817 *x = value - i;
818 *y = i;
820 return 3;
823 if (const_ok_for_mcore (value + i))
825 *x = value + i;
826 *y = i;
828 return 4;
832 bit = 0x80000000ULL;
834 for (i = 0; i <= 31; i++)
836 if (const_ok_for_mcore (i - value))
838 *x = i - value;
839 *y = i;
841 return 5;
844 if (const_ok_for_mcore (value & ~bit))
846 *y = bit;
847 *x = value & ~bit;
848 return 6;
851 if (const_ok_for_mcore (value | bit))
853 *y = ~bit;
854 *x = value | bit;
856 return 7;
859 bit >>= 1;
862 shf = value;
863 rot = value;
865 for (i = 1; i < 31; i++)
867 int c;
869 /* MCore has rotate left. */
870 c = rot << 31;
871 rot >>= 1;
872 rot &= 0x7FFFFFFF;
873 rot |= c; /* Simulate rotate. */
875 if (const_ok_for_mcore (rot))
877 *y = i;
878 *x = rot;
880 return 8;
883 if (shf & 1)
884 shf = 0; /* Can't use logical shift, low order bit is one. */
886 shf >>= 1;
888 if (shf != 0 && const_ok_for_mcore (shf))
890 *y = i;
891 *x = shf;
893 return 9;
897 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
899 *x = value / 3;
901 return 10;
904 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
906 *x = value / 5;
908 return 11;
911 return 0;
914 /* Check whether reg is dead at first. This is done by searching ahead
915 for either the next use (i.e., reg is live), a death note, or a set of
916 reg. Don't just use dead_or_set_p() since reload does not always mark
917 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
918 can ignore subregs by extracting the actual register. BRC */
921 mcore_is_dead (rtx_insn *first, rtx reg)
923 rtx_insn *insn;
925 /* For mcore, subregs can't live independently of their parent regs. */
926 if (GET_CODE (reg) == SUBREG)
927 reg = SUBREG_REG (reg);
929 /* Dies immediately. */
930 if (dead_or_set_p (first, reg))
931 return 1;
933 /* Look for conclusive evidence of live/death, otherwise we have
934 to assume that it is live. */
935 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
937 if (JUMP_P (insn))
938 return 0; /* We lose track, assume it is alive. */
940 else if (CALL_P (insn))
942 /* Call's might use it for target or register parms. */
943 if (reg_referenced_p (reg, PATTERN (insn))
944 || find_reg_fusage (insn, USE, reg))
945 return 0;
946 else if (dead_or_set_p (insn, reg))
947 return 1;
949 else if (NONJUMP_INSN_P (insn))
951 if (reg_referenced_p (reg, PATTERN (insn)))
952 return 0;
953 else if (dead_or_set_p (insn, reg))
954 return 1;
958 /* No conclusive evidence either way, we cannot take the chance
959 that control flow hid the use from us -- "I'm not dead yet". */
960 return 0;
963 /* Count the number of ones in mask. */
966 mcore_num_ones (HOST_WIDE_INT mask)
968 /* A trick to count set bits recently posted on comp.compilers. */
969 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
970 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
971 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
972 mask = ((mask >> 8) + mask);
974 return (mask + (mask >> 16)) & 0xff;
977 /* Count the number of zeros in mask. */
980 mcore_num_zeros (HOST_WIDE_INT mask)
982 return 32 - mcore_num_ones (mask);
985 /* Determine byte being masked. */
988 mcore_byte_offset (unsigned int mask)
990 if (mask == 0x00ffffffL)
991 return 0;
992 else if (mask == 0xff00ffffL)
993 return 1;
994 else if (mask == 0xffff00ffL)
995 return 2;
996 else if (mask == 0xffffff00L)
997 return 3;
999 return -1;
1002 /* Determine halfword being masked. */
1005 mcore_halfword_offset (unsigned int mask)
1007 if (mask == 0x0000ffffL)
1008 return 0;
1009 else if (mask == 0xffff0000L)
1010 return 1;
1012 return -1;
1015 /* Output a series of bseti's corresponding to mask. */
1017 const char *
1018 mcore_output_bseti (rtx dst, int mask)
1020 rtx out_operands[2];
1021 int bit;
1023 out_operands[0] = dst;
1025 for (bit = 0; bit < 32; bit++)
1027 if ((mask & 0x1) == 0x1)
1029 out_operands[1] = GEN_INT (bit);
1031 output_asm_insn ("bseti\t%0,%1", out_operands);
1033 mask >>= 1;
1036 return "";
1039 /* Output a series of bclri's corresponding to mask. */
1041 const char *
1042 mcore_output_bclri (rtx dst, int mask)
1044 rtx out_operands[2];
1045 int bit;
1047 out_operands[0] = dst;
1049 for (bit = 0; bit < 32; bit++)
1051 if ((mask & 0x1) == 0x0)
1053 out_operands[1] = GEN_INT (bit);
1055 output_asm_insn ("bclri\t%0,%1", out_operands);
1058 mask >>= 1;
1061 return "";
1064 /* Output a conditional move of two constants that are +/- 1 within each
1065 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1066 really worth the effort. */
1068 const char *
1069 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1071 HOST_WIDE_INT load_value;
1072 HOST_WIDE_INT adjust_value;
1073 rtx out_operands[4];
1075 out_operands[0] = operands[0];
1077 /* Check to see which constant is loadable. */
1078 if (const_ok_for_mcore (INTVAL (operands[1])))
1080 out_operands[1] = operands[1];
1081 out_operands[2] = operands[2];
1083 else if (const_ok_for_mcore (INTVAL (operands[2])))
1085 out_operands[1] = operands[2];
1086 out_operands[2] = operands[1];
1088 /* Complement test since constants are swapped. */
1089 cmp_t = (cmp_t == 0);
1091 load_value = INTVAL (out_operands[1]);
1092 adjust_value = INTVAL (out_operands[2]);
1094 /* First output the test if folded into the pattern. */
1096 if (test)
1097 output_asm_insn (test, operands);
1099 /* Load the constant - for now, only support constants that can be
1100 generated with a single instruction. maybe add general inlinable
1101 constants later (this will increase the # of patterns since the
1102 instruction sequence has a different length attribute). */
1103 if (load_value >= 0 && load_value <= 127)
1104 output_asm_insn ("movi\t%0,%1", out_operands);
1105 else if (CONST_OK_FOR_M (load_value))
1106 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1107 else if (CONST_OK_FOR_N (load_value))
1108 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1110 /* Output the constant adjustment. */
1111 if (load_value > adjust_value)
1113 if (cmp_t)
1114 output_asm_insn ("decf\t%0", out_operands);
1115 else
1116 output_asm_insn ("dect\t%0", out_operands);
1118 else
1120 if (cmp_t)
1121 output_asm_insn ("incf\t%0", out_operands);
1122 else
1123 output_asm_insn ("inct\t%0", out_operands);
1126 return "";
1129 /* Outputs the peephole for moving a constant that gets not'ed followed
1130 by an and (i.e. combine the not and the and into andn). BRC */
1132 const char *
1133 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1135 HOST_WIDE_INT x, y;
1136 rtx out_operands[3];
1137 const char * load_op;
1138 char buf[256];
1139 int trick_no;
1141 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1142 gcc_assert (trick_no == 2);
1144 out_operands[0] = operands[0];
1145 out_operands[1] = GEN_INT (x);
1146 out_operands[2] = operands[2];
1148 if (x >= 0 && x <= 127)
1149 load_op = "movi\t%0,%1";
1151 /* Try exact power of two. */
1152 else if (CONST_OK_FOR_M (x))
1153 load_op = "bgeni\t%0,%P1";
1155 /* Try exact power of two - 1. */
1156 else if (CONST_OK_FOR_N (x))
1157 load_op = "bmaski\t%0,%N1";
1159 else
1161 load_op = "BADMOVI-andn\t%0, %1";
1162 gcc_unreachable ();
1165 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1166 output_asm_insn (buf, out_operands);
1168 return "";
1171 /* Output an inline constant. */
1173 static const char *
1174 output_inline_const (machine_mode mode, rtx operands[])
1176 HOST_WIDE_INT x = 0, y = 0;
1177 int trick_no;
1178 rtx out_operands[3];
1179 char buf[256];
1180 char load_op[256];
1181 const char *dst_fmt;
1182 HOST_WIDE_INT value;
1184 value = INTVAL (operands[1]);
1186 trick_no = try_constant_tricks (value, &x, &y);
1187 /* lrw's are handled separately: Large inlinable constants never get
1188 turned into lrw's. Our caller uses try_constant_tricks to back
1189 off to an lrw rather than calling this routine. */
1190 gcc_assert (trick_no != 0);
1192 if (trick_no == 1)
1193 x = value;
1195 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1196 out_operands[0] = operands[0];
1197 out_operands[1] = GEN_INT (x);
1199 if (trick_no > 2)
1200 out_operands[2] = GEN_INT (y);
1202 /* Select dst format based on mode. */
1203 if (mode == DImode && (! TARGET_LITTLE_END))
1204 dst_fmt = "%R0";
1205 else
1206 dst_fmt = "%0";
1208 if (x >= 0 && x <= 127)
1209 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1211 /* Try exact power of two. */
1212 else if (CONST_OK_FOR_M (x))
1213 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1215 /* Try exact power of two - 1. */
1216 else if (CONST_OK_FOR_N (x))
1217 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1219 else
1221 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1222 gcc_unreachable ();
1225 switch (trick_no)
1227 case 1:
1228 strcpy (buf, load_op);
1229 break;
1230 case 2: /* not */
1231 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1232 break;
1233 case 3: /* add */
1234 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1235 break;
1236 case 4: /* sub */
1237 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1238 break;
1239 case 5: /* rsub */
1240 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1241 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1242 break;
1243 case 6: /* bseti */
1244 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1245 break;
1246 case 7: /* bclr */
1247 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1248 break;
1249 case 8: /* rotl */
1250 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1251 break;
1252 case 9: /* lsl */
1253 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1254 break;
1255 case 10: /* ixh */
1256 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1257 break;
1258 case 11: /* ixw */
1259 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1260 break;
1261 default:
1262 return "";
1265 output_asm_insn (buf, out_operands);
1267 return "";
1270 /* Output a move of a word or less value. */
1272 const char *
1273 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1274 machine_mode mode ATTRIBUTE_UNUSED)
1276 rtx dst = operands[0];
1277 rtx src = operands[1];
1279 if (GET_CODE (dst) == REG)
1281 if (GET_CODE (src) == REG)
1283 if (REGNO (src) == CC_REG) /* r-c */
1284 return "mvc\t%0";
1285 else
1286 return "mov\t%0,%1"; /* r-r*/
1288 else if (GET_CODE (src) == MEM)
1290 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1291 return "lrw\t%0,[%1]"; /* a-R */
1292 else
1293 switch (GET_MODE (src)) /* r-m */
1295 case SImode:
1296 return "ldw\t%0,%1";
1297 case HImode:
1298 return "ld.h\t%0,%1";
1299 case QImode:
1300 return "ld.b\t%0,%1";
1301 default:
1302 gcc_unreachable ();
1305 else if (GET_CODE (src) == CONST_INT)
1307 HOST_WIDE_INT x, y;
1309 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1310 return "movi\t%0,%1";
1311 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1312 return "bgeni\t%0,%P1\t// %1 %x1";
1313 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1314 return "bmaski\t%0,%N1\t// %1 %x1";
1315 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1316 return output_inline_const (SImode, operands); /* 1-2 insns */
1317 else
1318 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1320 else
1321 return "lrw\t%0, %1"; /* Into the literal pool. */
1323 else if (GET_CODE (dst) == MEM) /* m-r */
1324 switch (GET_MODE (dst))
1326 case SImode:
1327 return "stw\t%1,%0";
1328 case HImode:
1329 return "st.h\t%1,%0";
1330 case QImode:
1331 return "st.b\t%1,%0";
1332 default:
1333 gcc_unreachable ();
1336 gcc_unreachable ();
1339 /* Return a sequence of instructions to perform DI or DF move.
1340 Since the MCORE cannot move a DI or DF in one instruction, we have
1341 to take care when we see overlapping source and dest registers. */
1343 const char *
1344 mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
1346 rtx dst = operands[0];
1347 rtx src = operands[1];
1349 if (GET_CODE (dst) == REG)
1351 if (GET_CODE (src) == REG)
1353 int dstreg = REGNO (dst);
1354 int srcreg = REGNO (src);
1356 /* Ensure the second source not overwritten. */
1357 if (srcreg + 1 == dstreg)
1358 return "mov %R0,%R1\n\tmov %0,%1";
1359 else
1360 return "mov %0,%1\n\tmov %R0,%R1";
1362 else if (GET_CODE (src) == MEM)
1364 rtx memexp = XEXP (src, 0);
1365 int dstreg = REGNO (dst);
1366 int basereg = -1;
1368 if (GET_CODE (memexp) == LABEL_REF)
1369 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1370 else if (GET_CODE (memexp) == REG)
1371 basereg = REGNO (memexp);
1372 else if (GET_CODE (memexp) == PLUS)
1374 if (GET_CODE (XEXP (memexp, 0)) == REG)
1375 basereg = REGNO (XEXP (memexp, 0));
1376 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1377 basereg = REGNO (XEXP (memexp, 1));
1378 else
1379 gcc_unreachable ();
1381 else
1382 gcc_unreachable ();
1384 /* ??? length attribute is wrong here. */
1385 if (dstreg == basereg)
1387 /* Just load them in reverse order. */
1388 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1390 /* XXX: alternative: move basereg to basereg+1
1391 and then fall through. */
1393 else
1394 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1396 else if (GET_CODE (src) == CONST_INT)
1398 if (TARGET_LITTLE_END)
1400 if (CONST_OK_FOR_I (INTVAL (src)))
1401 output_asm_insn ("movi %0,%1", operands);
1402 else if (CONST_OK_FOR_M (INTVAL (src)))
1403 output_asm_insn ("bgeni %0,%P1", operands);
1404 else if (CONST_OK_FOR_N (INTVAL (src)))
1405 output_asm_insn ("bmaski %0,%N1", operands);
1406 else
1407 gcc_unreachable ();
1409 if (INTVAL (src) < 0)
1410 return "bmaski %R0,32";
1411 else
1412 return "movi %R0,0";
1414 else
1416 if (CONST_OK_FOR_I (INTVAL (src)))
1417 output_asm_insn ("movi %R0,%1", operands);
1418 else if (CONST_OK_FOR_M (INTVAL (src)))
1419 output_asm_insn ("bgeni %R0,%P1", operands);
1420 else if (CONST_OK_FOR_N (INTVAL (src)))
1421 output_asm_insn ("bmaski %R0,%N1", operands);
1422 else
1423 gcc_unreachable ();
1425 if (INTVAL (src) < 0)
1426 return "bmaski %0,32";
1427 else
1428 return "movi %0,0";
1431 else
1432 gcc_unreachable ();
1434 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1435 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1436 else
1437 gcc_unreachable ();
1440 /* Predicates used by the templates. */
1443 mcore_arith_S_operand (rtx op)
1445 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1446 return 1;
1448 return 0;
1451 /* Expand insert bit field. BRC */
1454 mcore_expand_insv (rtx operands[])
1456 int width = INTVAL (operands[1]);
1457 int posn = INTVAL (operands[2]);
1458 int mask;
1459 rtx mreg, sreg, ereg;
1461 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1462 for width==1 must be removed. Look around line 368. This is something
1463 we really want the md part to do. */
1464 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1466 /* Do directly with bseti or bclri. */
1467 /* RBE: 2/97 consider only low bit of constant. */
1468 if ((INTVAL (operands[3]) & 1) == 0)
1470 mask = ~(1 << posn);
1471 emit_insn (gen_rtx_SET (operands[0],
1472 gen_rtx_AND (SImode, operands[0],
1473 GEN_INT (mask))));
1475 else
1477 mask = 1 << posn;
1478 emit_insn (gen_rtx_SET (operands[0],
1479 gen_rtx_IOR (SImode, operands[0],
1480 GEN_INT (mask))));
1483 return 1;
1486 /* Look at some bit-field placements that we aren't interested
1487 in handling ourselves, unless specifically directed to do so. */
1488 if (! TARGET_W_FIELD)
1489 return 0; /* Generally, give up about now. */
1491 if (width == 8 && posn % 8 == 0)
1492 /* Byte sized and aligned; let caller break it up. */
1493 return 0;
1495 if (width == 16 && posn % 16 == 0)
1496 /* Short sized and aligned; let caller break it up. */
1497 return 0;
1499 /* The general case - we can do this a little bit better than what the
1500 machine independent part tries. This will get rid of all the subregs
1501 that mess up constant folding in combine when working with relaxed
1502 immediates. */
1504 /* If setting the entire field, do it directly. */
1505 if (GET_CODE (operands[3]) == CONST_INT
1506 && INTVAL (operands[3]) == ((1 << width) - 1))
1508 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1509 emit_insn (gen_rtx_SET (operands[0],
1510 gen_rtx_IOR (SImode, operands[0], mreg)));
1511 return 1;
1514 /* Generate the clear mask. */
1515 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1517 /* Clear the field, to overlay it later with the source. */
1518 emit_insn (gen_rtx_SET (operands[0],
1519 gen_rtx_AND (SImode, operands[0], mreg)));
1521 /* If the source is constant 0, we've nothing to add back. */
1522 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1523 return 1;
1525 /* XXX: Should we worry about more games with constant values?
1526 We've covered the high profile: set/clear single-bit and many-bit
1527 fields. How often do we see "arbitrary bit pattern" constants? */
1528 sreg = copy_to_mode_reg (SImode, operands[3]);
1530 /* Extract src as same width as dst (needed for signed values). We
1531 always have to do this since we widen everything to SImode.
1532 We don't have to mask if we're shifting this up against the
1533 MSB of the register (e.g., the shift will push out any hi-order
1534 bits. */
1535 if (width + posn != (int) GET_MODE_SIZE (SImode))
1537 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1538 emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
1541 /* Insert source value in dest. */
1542 if (posn != 0)
1543 emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1544 GEN_INT (posn))));
1546 emit_insn (gen_rtx_SET (operands[0],
1547 gen_rtx_IOR (SImode, operands[0], sreg)));
1549 return 1;
1552 /* ??? Block move stuff stolen from m88k. This code has not been
1553 verified for correctness. */
1555 /* Emit code to perform a block move. Choose the best method.
1557 OPERANDS[0] is the destination.
1558 OPERANDS[1] is the source.
1559 OPERANDS[2] is the size.
1560 OPERANDS[3] is the alignment safe to use. */
1562 /* Emit code to perform a block move with an offset sequence of ldw/st
1563 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1564 known constants. DEST and SRC are registers. OFFSET is the known
1565 starting point for the output pattern. */
1567 static const machine_mode mode_from_align[] =
1569 VOIDmode, QImode, HImode, VOIDmode, SImode,
1572 static void
1573 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1575 rtx temp[2];
1576 machine_mode mode[2];
1577 int amount[2];
1578 bool active[2];
1579 int phase = 0;
1580 int next;
1581 int offset_ld = 0;
1582 int offset_st = 0;
1583 rtx x;
1585 x = XEXP (dst_mem, 0);
1586 if (!REG_P (x))
1588 x = force_reg (Pmode, x);
1589 dst_mem = replace_equiv_address (dst_mem, x);
1592 x = XEXP (src_mem, 0);
1593 if (!REG_P (x))
1595 x = force_reg (Pmode, x);
1596 src_mem = replace_equiv_address (src_mem, x);
1599 active[0] = active[1] = false;
1603 next = phase;
1604 phase ^= 1;
1606 if (size > 0)
1608 int next_amount;
1610 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1611 next_amount = MIN (next_amount, align);
1613 amount[next] = next_amount;
1614 mode[next] = mode_from_align[next_amount];
1615 temp[next] = gen_reg_rtx (mode[next]);
1617 x = adjust_address (src_mem, mode[next], offset_ld);
1618 emit_insn (gen_rtx_SET (temp[next], x));
1620 offset_ld += next_amount;
1621 size -= next_amount;
1622 active[next] = true;
1625 if (active[phase])
1627 active[phase] = false;
1629 x = adjust_address (dst_mem, mode[phase], offset_st);
1630 emit_insn (gen_rtx_SET (x, temp[phase]));
1632 offset_st += amount[phase];
1635 while (active[next]);
1638 bool
1639 mcore_expand_block_move (rtx *operands)
1641 HOST_WIDE_INT align, bytes, max;
1643 if (GET_CODE (operands[2]) != CONST_INT)
1644 return false;
1646 bytes = INTVAL (operands[2]);
1647 align = INTVAL (operands[3]);
1649 if (bytes <= 0)
1650 return false;
1651 if (align > 4)
1652 align = 4;
1654 switch (align)
1656 case 4:
1657 if (bytes & 1)
1658 max = 4*4;
1659 else if (bytes & 3)
1660 max = 8*4;
1661 else
1662 max = 16*4;
1663 break;
1664 case 2:
1665 max = 4*2;
1666 break;
1667 case 1:
1668 max = 4*1;
1669 break;
1670 default:
1671 gcc_unreachable ();
1674 if (bytes <= max)
1676 block_move_sequence (operands[0], operands[1], bytes, align);
1677 return true;
1680 return false;
1684 /* Code to generate prologue and epilogue sequences. */
1685 static int number_of_regs_before_varargs;
1687 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1688 for a varargs function. */
1689 static int current_function_anonymous_args;
1691 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1692 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1693 #define ADDI_REACH (32) /* Maximum addi operand. */
1695 static void
1696 layout_mcore_frame (struct mcore_frame * infp)
1698 int n;
1699 unsigned int i;
1700 int nbytes;
1701 int regarg;
1702 int localregarg;
1703 int outbounds;
1704 unsigned int growths;
1705 int step;
1707 /* Might have to spill bytes to re-assemble a big argument that
1708 was passed partially in registers and partially on the stack. */
1709 nbytes = crtl->args.pretend_args_size;
1711 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1712 if (current_function_anonymous_args)
1713 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1715 infp->arg_size = nbytes;
1717 /* How much space to save non-volatile registers we stomp. */
1718 infp->reg_mask = calc_live_regs (& n);
1719 infp->reg_size = n * 4;
1721 /* And the rest of it... locals and space for overflowed outbounds. */
1722 infp->local_size = get_frame_size ();
1723 infp->outbound_size = crtl->outgoing_args_size;
1725 /* Make sure we have a whole number of words for the locals. */
1726 if (infp->local_size % STACK_BYTES)
1727 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1729 /* Only thing we know we have to pad is the outbound space, since
1730 we've aligned our locals assuming that base of locals is aligned. */
1731 infp->pad_local = 0;
1732 infp->pad_reg = 0;
1733 infp->pad_outbound = 0;
1734 if (infp->outbound_size % STACK_BYTES)
1735 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1737 /* Now we see how we want to stage the prologue so that it does
1738 the most appropriate stack growth and register saves to either:
1739 (1) run fast,
1740 (2) reduce instruction space, or
1741 (3) reduce stack space. */
1742 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1743 infp->growth[i] = 0;
1745 regarg = infp->reg_size + infp->arg_size;
1746 localregarg = infp->local_size + regarg;
1747 outbounds = infp->outbound_size + infp->pad_outbound;
1748 growths = 0;
1750 /* XXX: Consider one where we consider localregarg + outbound too! */
1752 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1753 use stw's with offsets and buy the frame in one shot. */
1754 if (localregarg <= ADDI_REACH
1755 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1757 /* Make sure we'll be aligned. */
1758 if (localregarg % STACK_BYTES)
1759 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1761 step = localregarg + infp->pad_reg;
1762 infp->reg_offset = infp->local_size;
1764 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1766 step += outbounds;
1767 infp->reg_offset += outbounds;
1768 outbounds = 0;
1771 infp->arg_offset = step - 4;
1772 infp->growth[growths++] = step;
1773 infp->reg_growth = growths;
1774 infp->local_growth = growths;
1776 /* If we haven't already folded it in. */
1777 if (outbounds)
1778 infp->growth[growths++] = outbounds;
1780 goto finish;
1783 /* Frame can't be done with a single subi, but can be done with 2
1784 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1785 shift some of the stack purchase into the first subi, so both are
1786 single instructions. */
1787 if (localregarg <= STORE_REACH
1788 && (infp->local_size > ADDI_REACH)
1789 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1791 int all;
1793 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1794 if (localregarg % STACK_BYTES)
1795 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1797 all = localregarg + infp->pad_reg + infp->pad_local;
1798 step = ADDI_REACH; /* As much up front as we can. */
1799 if (step > all)
1800 step = all;
1802 /* XXX: Consider whether step will still be aligned; we believe so. */
1803 infp->arg_offset = step - 4;
1804 infp->growth[growths++] = step;
1805 infp->reg_growth = growths;
1806 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1807 all -= step;
1809 /* Can we fold in any space required for outbounds? */
1810 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1812 all += outbounds;
1813 outbounds = 0;
1816 /* Get the rest of the locals in place. */
1817 step = all;
1818 infp->growth[growths++] = step;
1819 infp->local_growth = growths;
1820 all -= step;
1822 gcc_assert (all == 0);
1824 /* Finish off if we need to do so. */
1825 if (outbounds)
1826 infp->growth[growths++] = outbounds;
1828 goto finish;
1831 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1832 Then we buy the rest of the frame in 1 or 2 steps depending on
1833 whether we need a frame pointer. */
1834 if ((regarg % STACK_BYTES) == 0)
1836 infp->growth[growths++] = regarg;
1837 infp->reg_growth = growths;
1838 infp->arg_offset = regarg - 4;
1839 infp->reg_offset = 0;
1841 if (infp->local_size % STACK_BYTES)
1842 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1844 step = infp->local_size + infp->pad_local;
1846 if (!frame_pointer_needed)
1848 step += outbounds;
1849 outbounds = 0;
1852 infp->growth[growths++] = step;
1853 infp->local_growth = growths;
1855 /* If there's any left to be done. */
1856 if (outbounds)
1857 infp->growth[growths++] = outbounds;
1859 goto finish;
1862 /* XXX: optimizations that we'll want to play with....
1863 -- regarg is not aligned, but it's a small number of registers;
1864 use some of localsize so that regarg is aligned and then
1865 save the registers. */
1867 /* Simple encoding; plods down the stack buying the pieces as it goes.
1868 -- does not optimize space consumption.
1869 -- does not attempt to optimize instruction counts.
1870 -- but it is safe for all alignments. */
1871 if (regarg % STACK_BYTES != 0)
1872 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1874 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1875 infp->reg_growth = growths;
1876 infp->arg_offset = infp->growth[0] - 4;
1877 infp->reg_offset = 0;
1879 if (frame_pointer_needed)
1881 if (infp->local_size % STACK_BYTES != 0)
1882 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1884 infp->growth[growths++] = infp->local_size + infp->pad_local;
1885 infp->local_growth = growths;
1887 infp->growth[growths++] = outbounds;
1889 else
1891 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1892 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1894 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1895 infp->local_growth = growths;
1898 /* Anything else that we've forgotten?, plus a few consistency checks. */
1899 finish:
1900 gcc_assert (infp->reg_offset >= 0);
1901 gcc_assert (growths <= MAX_STACK_GROWS);
1903 for (i = 0; i < growths; i++)
1904 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1907 /* Define the offset between two registers, one to be eliminated, and
1908 the other its replacement, at the start of a routine. */
1911 mcore_initial_elimination_offset (int from, int to)
1913 int above_frame;
1914 int below_frame;
1915 struct mcore_frame fi;
1917 layout_mcore_frame (& fi);
1919 /* fp to ap */
1920 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1921 /* sp to fp */
1922 below_frame = fi.outbound_size + fi.pad_outbound;
1924 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1925 return above_frame;
1927 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1928 return above_frame + below_frame;
1930 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1931 return below_frame;
1933 gcc_unreachable ();
1936 /* Keep track of some information about varargs for the prolog. */
1938 static void
1939 mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
1940 machine_mode mode, tree type,
1941 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1942 int second_time ATTRIBUTE_UNUSED)
1944 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1946 current_function_anonymous_args = 1;
1948 /* We need to know how many argument registers are used before
1949 the varargs start, so that we can push the remaining argument
1950 registers during the prologue. */
1951 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1953 /* There is a bug somewhere in the arg handling code.
1954 Until I can find it this workaround always pushes the
1955 last named argument onto the stack. */
1956 number_of_regs_before_varargs = *args_so_far;
1958 /* The last named argument may be split between argument registers
1959 and the stack. Allow for this here. */
1960 if (number_of_regs_before_varargs > NPARM_REGS)
1961 number_of_regs_before_varargs = NPARM_REGS;
1964 void
1965 mcore_expand_prolog (void)
1967 struct mcore_frame fi;
1968 int space_allocated = 0;
1969 int growth = 0;
1971 /* Find out what we're doing. */
1972 layout_mcore_frame (&fi);
1974 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1975 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1977 if (TARGET_CG_DATA)
1979 /* Emit a symbol for this routine's frame size. */
1980 rtx x;
1982 x = DECL_RTL (current_function_decl);
1984 gcc_assert (GET_CODE (x) == MEM);
1986 x = XEXP (x, 0);
1988 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1990 free (mcore_current_function_name);
1992 mcore_current_function_name = xstrdup (XSTR (x, 0));
1994 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1996 if (cfun->calls_alloca)
1997 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1999 /* 970425: RBE:
2000 We're looking at how the 8byte alignment affects stack layout
2001 and where we had to pad things. This emits information we can
2002 extract which tells us about frame sizes and the like. */
2003 fprintf (asm_out_file,
2004 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2005 mcore_current_function_name,
2006 fi.arg_size, fi.reg_size, fi.reg_mask,
2007 fi.local_size, fi.outbound_size,
2008 frame_pointer_needed);
2011 if (mcore_naked_function_p ())
2012 return;
2014 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2015 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2017 /* If we have a parameter passed partially in regs and partially in memory,
2018 the registers will have been stored to memory already in function.c. So
2019 we only need to do something here for varargs functions. */
2020 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
2022 int offset;
2023 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2024 int remaining = fi.arg_size;
2026 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2028 emit_insn (gen_movsi
2029 (gen_rtx_MEM (SImode,
2030 plus_constant (Pmode, stack_pointer_rtx,
2031 offset)),
2032 gen_rtx_REG (SImode, rn)));
2036 /* Do we need another stack adjustment before we do the register saves? */
2037 if (growth < fi.reg_growth)
2038 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2040 if (fi.reg_size != 0)
2042 int i;
2043 int offs = fi.reg_offset;
2045 for (i = 15; i >= 0; i--)
2047 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2049 int first_reg = 15;
2051 while (fi.reg_mask & (1 << first_reg))
2052 first_reg--;
2053 first_reg++;
2055 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2056 gen_rtx_REG (SImode, first_reg),
2057 GEN_INT (16 - first_reg)));
2059 i -= (15 - first_reg);
2060 offs += (16 - first_reg) * 4;
2062 else if (fi.reg_mask & (1 << i))
2064 emit_insn (gen_movsi
2065 (gen_rtx_MEM (SImode,
2066 plus_constant (Pmode, stack_pointer_rtx,
2067 offs)),
2068 gen_rtx_REG (SImode, i)));
2069 offs += 4;
2074 /* Figure the locals + outbounds. */
2075 if (frame_pointer_needed)
2077 /* If we haven't already purchased to 'fp'. */
2078 if (growth < fi.local_growth)
2079 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2081 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2083 /* ... and then go any remaining distance for outbounds, etc. */
2084 if (fi.growth[growth])
2085 output_stack_adjust (-1, fi.growth[growth++]);
2087 else
2089 if (growth < fi.local_growth)
2090 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2091 if (fi.growth[growth])
2092 output_stack_adjust (-1, fi.growth[growth++]);
2096 void
2097 mcore_expand_epilog (void)
2099 struct mcore_frame fi;
2100 int i;
2101 int offs;
2102 int growth = MAX_STACK_GROWS - 1 ;
2105 /* Find out what we're doing. */
2106 layout_mcore_frame(&fi);
2108 if (mcore_naked_function_p ())
2109 return;
2111 /* If we had a frame pointer, restore the sp from that. */
2112 if (frame_pointer_needed)
2114 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2115 growth = fi.local_growth - 1;
2117 else
2119 /* XXX: while loop should accumulate and do a single sell. */
2120 while (growth >= fi.local_growth)
2122 if (fi.growth[growth] != 0)
2123 output_stack_adjust (1, fi.growth[growth]);
2124 growth--;
2128 /* Make sure we've shrunk stack back to the point where the registers
2129 were laid down. This is typically 0/1 iterations. Then pull the
2130 register save information back off the stack. */
2131 while (growth >= fi.reg_growth)
2132 output_stack_adjust ( 1, fi.growth[growth--]);
2134 offs = fi.reg_offset;
2136 for (i = 15; i >= 0; i--)
2138 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2140 int first_reg;
2142 /* Find the starting register. */
2143 first_reg = 15;
2145 while (fi.reg_mask & (1 << first_reg))
2146 first_reg--;
2148 first_reg++;
2150 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2151 gen_rtx_MEM (SImode, stack_pointer_rtx),
2152 GEN_INT (16 - first_reg)));
2154 i -= (15 - first_reg);
2155 offs += (16 - first_reg) * 4;
2157 else if (fi.reg_mask & (1 << i))
2159 emit_insn (gen_movsi
2160 (gen_rtx_REG (SImode, i),
2161 gen_rtx_MEM (SImode,
2162 plus_constant (Pmode, stack_pointer_rtx,
2163 offs))));
2164 offs += 4;
2168 /* Give back anything else. */
2169 /* XXX: Should accumulate total and then give it back. */
2170 while (growth >= 0)
2171 output_stack_adjust ( 1, fi.growth[growth--]);
2174 /* This code is borrowed from the SH port. */
2176 /* The MCORE cannot load a large constant into a register, constants have to
2177 come from a pc relative load. The reference of a pc relative load
2178 instruction must be less than 1k in front of the instruction. This
2179 means that we often have to dump a constant inside a function, and
2180 generate code to branch around it.
2182 It is important to minimize this, since the branches will slow things
2183 down and make things bigger.
2185 Worst case code looks like:
2187 lrw L1,r0
2188 br L2
2189 align
2190 L1: .long value
2194 lrw L3,r0
2195 br L4
2196 align
2197 L3: .long value
2201 We fix this by performing a scan before scheduling, which notices which
2202 instructions need to have their operands fetched from the constant table
2203 and builds the table.
2205 The algorithm is:
2207 scan, find an instruction which needs a pcrel move. Look forward, find the
2208 last barrier which is within MAX_COUNT bytes of the requirement.
2209 If there isn't one, make one. Process all the instructions between
2210 the find and the barrier.
2212 In the above example, we can tell that L3 is within 1k of L1, so
2213 the first move can be shrunk from the 2 insn+constant sequence into
2214 just 1 insn, and the constant moved to L3 to make:
2216 lrw L1,r0
2218 lrw L3,r0
2219 bra L4
2220 align
2221 L3:.long value
2222 L4:.long value
2224 Then the second move becomes the target for the shortening process. */
2226 typedef struct
2228 rtx value; /* Value in table. */
2229 rtx label; /* Label of value. */
2230 } pool_node;
2232 /* The maximum number of constants that can fit into one pool, since
2233 the pc relative range is 0...1020 bytes and constants are at least 4
2234 bytes long. We subtract 4 from the range to allow for the case where
2235 we need to add a branch/align before the constant pool. */
2237 #define MAX_COUNT 1016
2238 #define MAX_POOL_SIZE (MAX_COUNT/4)
2239 static pool_node pool_vector[MAX_POOL_SIZE];
2240 static int pool_size;
2242 /* Dump out any constants accumulated in the final pass. These
2243 will only be labels. */
2245 const char *
2246 mcore_output_jump_label_table (void)
2248 int i;
2250 if (pool_size)
2252 fprintf (asm_out_file, "\t.align 2\n");
2254 for (i = 0; i < pool_size; i++)
2256 pool_node * p = pool_vector + i;
2258 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2260 output_asm_insn (".long %0", &p->value);
2263 pool_size = 0;
2266 return "";
2269 /* Check whether insn is a candidate for a conditional. */
2271 static cond_type
2272 is_cond_candidate (rtx insn)
2274 /* The only things we conditionalize are those that can be directly
2275 changed into a conditional. Only bother with SImode items. If
2276 we wanted to be a little more aggressive, we could also do other
2277 modes such as DImode with reg-reg move or load 0. */
2278 if (NONJUMP_INSN_P (insn))
2280 rtx pat = PATTERN (insn);
2281 rtx src, dst;
2283 if (GET_CODE (pat) != SET)
2284 return COND_NO;
2286 dst = XEXP (pat, 0);
2288 if ((GET_CODE (dst) != REG &&
2289 GET_CODE (dst) != SUBREG) ||
2290 GET_MODE (dst) != SImode)
2291 return COND_NO;
2293 src = XEXP (pat, 1);
2295 if ((GET_CODE (src) == REG ||
2296 (GET_CODE (src) == SUBREG &&
2297 GET_CODE (SUBREG_REG (src)) == REG)) &&
2298 GET_MODE (src) == SImode)
2299 return COND_MOV_INSN;
2300 else if (GET_CODE (src) == CONST_INT &&
2301 INTVAL (src) == 0)
2302 return COND_CLR_INSN;
2303 else if (GET_CODE (src) == PLUS &&
2304 (GET_CODE (XEXP (src, 0)) == REG ||
2305 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2306 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2307 GET_MODE (XEXP (src, 0)) == SImode &&
2308 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2309 INTVAL (XEXP (src, 1)) == 1)
2310 return COND_INC_INSN;
2311 else if (((GET_CODE (src) == MINUS &&
2312 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2313 INTVAL( XEXP (src, 1)) == 1) ||
2314 (GET_CODE (src) == PLUS &&
2315 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2316 INTVAL (XEXP (src, 1)) == -1)) &&
2317 (GET_CODE (XEXP (src, 0)) == REG ||
2318 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2319 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2320 GET_MODE (XEXP (src, 0)) == SImode)
2321 return COND_DEC_INSN;
2323 /* Some insns that we don't bother with:
2324 (set (rx:DI) (ry:DI))
2325 (set (rx:DI) (const_int 0))
2329 else if (JUMP_P (insn)
2330 && GET_CODE (PATTERN (insn)) == SET
2331 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2332 return COND_BRANCH_INSN;
2334 return COND_NO;
2337 /* Emit a conditional version of insn and replace the old insn with the
2338 new one. Return the new insn if emitted. */
2340 static rtx_insn *
2341 emit_new_cond_insn (rtx insn, int cond)
2343 rtx c_insn = 0;
2344 rtx pat, dst, src;
2345 cond_type num;
2347 if ((num = is_cond_candidate (insn)) == COND_NO)
2348 return NULL;
2350 pat = PATTERN (insn);
2352 if (NONJUMP_INSN_P (insn))
2354 dst = SET_DEST (pat);
2355 src = SET_SRC (pat);
2357 else
2359 dst = JUMP_LABEL (insn);
2360 src = NULL_RTX;
2363 switch (num)
2365 case COND_MOV_INSN:
2366 case COND_CLR_INSN:
2367 if (cond)
2368 c_insn = gen_movt0 (dst, src, dst);
2369 else
2370 c_insn = gen_movt0 (dst, dst, src);
2371 break;
2373 case COND_INC_INSN:
2374 if (cond)
2375 c_insn = gen_incscc (dst, dst);
2376 else
2377 c_insn = gen_incscc_false (dst, dst);
2378 break;
2380 case COND_DEC_INSN:
2381 if (cond)
2382 c_insn = gen_decscc (dst, dst);
2383 else
2384 c_insn = gen_decscc_false (dst, dst);
2385 break;
2387 case COND_BRANCH_INSN:
2388 if (cond)
2389 c_insn = gen_branch_true (dst);
2390 else
2391 c_insn = gen_branch_false (dst);
2392 break;
2394 default:
2395 return NULL;
2398 /* Only copy the notes if they exist. */
2399 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2401 /* We really don't need to bother with the notes and links at this
2402 point, but go ahead and save the notes. This will help is_dead()
2403 when applying peepholes (links don't matter since they are not
2404 used any more beyond this point for the mcore). */
2405 REG_NOTES (c_insn) = REG_NOTES (insn);
2408 if (num == COND_BRANCH_INSN)
2410 /* For jumps, we need to be a little bit careful and emit the new jump
2411 before the old one and to update the use count for the target label.
2412 This way, the barrier following the old (uncond) jump will get
2413 deleted, but the label won't. */
2414 c_insn = emit_jump_insn_before (c_insn, insn);
2416 ++ LABEL_NUSES (dst);
2418 JUMP_LABEL (c_insn) = dst;
2420 else
2421 c_insn = emit_insn_after (c_insn, insn);
2423 delete_insn (insn);
2425 return as_a <rtx_insn *> (c_insn);
2428 /* Attempt to change a basic block into a series of conditional insns. This
2429 works by taking the branch at the end of the 1st block and scanning for the
2430 end of the 2nd block. If all instructions in the 2nd block have cond.
2431 versions and the label at the start of block 3 is the same as the target
2432 from the branch at block 1, then conditionalize all insn in block 2 using
2433 the inverse condition of the branch at block 1. (Note I'm bending the
2434 definition of basic block here.)
2436 e.g., change:
2438 bt L2 <-- end of block 1 (delete)
2439 mov r7,r8
2440 addu r7,1
2441 br L3 <-- end of block 2
2443 L2: ... <-- start of block 3 (NUSES==1)
2444 L3: ...
2448 movf r7,r8
2449 incf r7
2450 bf L3
2452 L3: ...
2454 we can delete the L2 label if NUSES==1 and re-apply the optimization
2455 starting at the last instruction of block 2. This may allow an entire
2456 if-then-else statement to be conditionalized. BRC */
2457 static rtx_insn *
2458 conditionalize_block (rtx_insn *first)
2460 rtx_insn *insn;
2461 rtx br_pat;
2462 rtx_insn *end_blk_1_br = 0;
2463 rtx_insn *end_blk_2_insn = 0;
2464 rtx_insn *start_blk_3_lab = 0;
2465 int cond;
2466 int br_lab_num;
2467 int blk_size = 0;
2470 /* Check that the first insn is a candidate conditional jump. This is
2471 the one that we'll eliminate. If not, advance to the next insn to
2472 try. */
2473 if (! JUMP_P (first)
2474 || GET_CODE (PATTERN (first)) != SET
2475 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2476 return NEXT_INSN (first);
2478 /* Extract some information we need. */
2479 end_blk_1_br = first;
2480 br_pat = PATTERN (end_blk_1_br);
2482 /* Complement the condition since we use the reverse cond. for the insns. */
2483 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2485 /* Determine what kind of branch we have. */
2486 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2488 /* A normal branch, so extract label out of first arm. */
2489 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2491 else
2493 /* An inverse branch, so extract the label out of the 2nd arm
2494 and complement the condition. */
2495 cond = (cond == 0);
2496 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2499 /* Scan forward for the start of block 2: it must start with a
2500 label and that label must be the same as the branch target
2501 label from block 1. We don't care about whether block 2 actually
2502 ends with a branch or a label (an uncond. branch is
2503 conditionalizable). */
2504 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2506 enum rtx_code code;
2508 code = GET_CODE (insn);
2510 /* Look for the label at the start of block 3. */
2511 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2512 break;
2514 /* Skip barriers, notes, and conditionalizable insns. If the
2515 insn is not conditionalizable or makes this optimization fail,
2516 just return the next insn so we can start over from that point. */
2517 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2518 return NEXT_INSN (insn);
2520 /* Remember the last real insn before the label (i.e. end of block 2). */
2521 if (code == JUMP_INSN || code == INSN)
2523 blk_size ++;
2524 end_blk_2_insn = insn;
2528 if (!insn)
2529 return insn;
2531 /* It is possible for this optimization to slow performance if the blocks
2532 are long. This really depends upon whether the branch is likely taken
2533 or not. If the branch is taken, we slow performance in many cases. But,
2534 if the branch is not taken, we always help performance (for a single
2535 block, but for a double block (i.e. when the optimization is re-applied)
2536 this is not true since the 'right thing' depends on the overall length of
2537 the collapsed block). As a compromise, don't apply this optimization on
2538 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2539 the best threshold depends on the latencies of the instructions (i.e.,
2540 the branch penalty). */
2541 if (optimize > 1 && blk_size > 2)
2542 return insn;
2544 /* At this point, we've found the start of block 3 and we know that
2545 it is the destination of the branch from block 1. Also, all
2546 instructions in the block 2 are conditionalizable. So, apply the
2547 conditionalization and delete the branch. */
2548 start_blk_3_lab = insn;
2550 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2551 insn = NEXT_INSN (insn))
2553 rtx_insn *newinsn;
2555 if (insn->deleted ())
2556 continue;
2558 /* Try to form a conditional variant of the instruction and emit it. */
2559 if ((newinsn = emit_new_cond_insn (insn, cond)))
2561 if (end_blk_2_insn == insn)
2562 end_blk_2_insn = newinsn;
2564 insn = newinsn;
2568 /* Note whether we will delete the label starting blk 3 when the jump
2569 gets deleted. If so, we want to re-apply this optimization at the
2570 last real instruction right before the label. */
2571 if (LABEL_NUSES (start_blk_3_lab) == 1)
2573 start_blk_3_lab = 0;
2576 /* ??? we probably should redistribute the death notes for this insn, esp.
2577 the death of cc, but it doesn't really matter this late in the game.
2578 The peepholes all use is_dead() which will find the correct death
2579 regardless of whether there is a note. */
2580 delete_insn (end_blk_1_br);
2582 if (! start_blk_3_lab)
2583 return end_blk_2_insn;
2585 /* Return the insn right after the label at the start of block 3. */
2586 return NEXT_INSN (start_blk_3_lab);
2589 /* Apply the conditionalization of blocks optimization. This is the
2590 outer loop that traverses through the insns scanning for a branch
2591 that signifies an opportunity to apply the optimization. Note that
2592 this optimization is applied late. If we could apply it earlier,
2593 say before cse 2, it may expose more optimization opportunities.
2594 but, the pay back probably isn't really worth the effort (we'd have
2595 to update all reg/flow/notes/links/etc to make it work - and stick it
2596 in before cse 2). */
2598 static void
2599 conditionalize_optimization (void)
2601 rtx_insn *insn;
2603 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2604 continue;
2607 /* This is to handle loads from the constant pool. */
2609 static void
2610 mcore_reorg (void)
2612 /* Reset this variable. */
2613 current_function_anonymous_args = 0;
2615 if (optimize == 0)
2616 return;
2618 /* Conditionalize blocks where we can. */
2619 conditionalize_optimization ();
2621 /* Literal pool generation is now pushed off until the assembler. */
2625 /* Return true if X is something that can be moved directly into r15. */
2627 bool
2628 mcore_r15_operand_p (rtx x)
2630 switch (GET_CODE (x))
2632 case CONST_INT:
2633 return mcore_const_ok_for_inline (INTVAL (x));
2635 case REG:
2636 case SUBREG:
2637 case MEM:
2638 return 1;
2640 default:
2641 return 0;
2645 /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
2646 directly move X into it, use r1-r14 as a temporary. */
2648 enum reg_class
2649 mcore_secondary_reload_class (enum reg_class rclass,
2650 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2652 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2653 && !mcore_r15_operand_p (x))
2654 return LRW_REGS;
2655 return NO_REGS;
2658 /* Return the reg_class to use when reloading the rtx X into the class
2659 RCLASS. If X is too complex to move directly into r15, prefer to
2660 use LRW_REGS instead. */
2662 enum reg_class
2663 mcore_reload_class (rtx x, enum reg_class rclass)
2665 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2666 return LRW_REGS;
2668 return rclass;
2671 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2672 register. Note that the current version doesn't worry about whether
2673 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2674 in r2 matches an SImode in r2. Might think in the future about whether
2675 we want to be able to say something about modes. */
2678 mcore_is_same_reg (rtx x, rtx y)
2680 /* Strip any and all of the subreg wrappers. */
2681 while (GET_CODE (x) == SUBREG)
2682 x = SUBREG_REG (x);
2684 while (GET_CODE (y) == SUBREG)
2685 y = SUBREG_REG (y);
2687 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2688 return 1;
2690 return 0;
2693 static void
2694 mcore_option_override (void)
2696 /* Only the m340 supports little endian code. */
2697 if (TARGET_LITTLE_END && ! TARGET_M340)
2698 target_flags |= MASK_M340;
2702 /* Compute the number of word sized registers needed to
2703 hold a function argument of mode MODE and type TYPE. */
2706 mcore_num_arg_regs (machine_mode mode, const_tree type)
2708 int size;
2710 if (targetm.calls.must_pass_in_stack (mode, type))
2711 return 0;
2713 if (type && mode == BLKmode)
2714 size = int_size_in_bytes (type);
2715 else
2716 size = GET_MODE_SIZE (mode);
2718 return ROUND_ADVANCE (size);
2721 static rtx
2722 handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
2724 int size;
2726 /* The MCore ABI defines that a structure whose size is not a whole multiple
2727 of bytes is passed packed into registers (or spilled onto the stack if
2728 not enough registers are available) with the last few bytes of the
2729 structure being packed, left-justified, into the last register/stack slot.
2730 GCC handles this correctly if the last word is in a stack slot, but we
2731 have to generate a special, PARALLEL RTX if the last word is in an
2732 argument register. */
2733 if (type
2734 && TYPE_MODE (type) == BLKmode
2735 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2736 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2737 && (size % UNITS_PER_WORD != 0)
2738 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2740 rtx arg_regs [NPARM_REGS];
2741 int nregs;
2742 rtx result;
2743 rtvec rtvec;
2745 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2747 arg_regs [nregs] =
2748 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2749 GEN_INT (nregs * UNITS_PER_WORD));
2750 nregs ++;
2753 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2754 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
2755 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2756 arg_regs[3], arg_regs[4], arg_regs[5]);
2758 result = gen_rtx_PARALLEL (mode, rtvec);
2759 return result;
2762 return gen_rtx_REG (mode, reg);
2766 mcore_function_value (const_tree valtype, const_tree func)
2768 machine_mode mode;
2769 int unsigned_p;
2771 mode = TYPE_MODE (valtype);
2773 /* Since we promote return types, we must promote the mode here too. */
2774 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2776 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2779 /* Define where to put the arguments to a function.
2780 Value is zero to push the argument on the stack,
2781 or a hard register in which to store the argument.
2783 MODE is the argument's machine mode.
2784 TYPE is the data type of the argument (as a tree).
2785 This is null for libcalls where that information may
2786 not be available.
2787 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2788 the preceding args and about the function being called.
2789 NAMED is nonzero if this argument is a named parameter
2790 (otherwise it is an extra parameter matching an ellipsis).
2792 On MCore the first args are normally in registers
2793 and the rest are pushed. Any arg that starts within the first
2794 NPARM_REGS words is at least partially passed in a register unless
2795 its data type forbids. */
2797 static rtx
2798 mcore_function_arg (cumulative_args_t cum, machine_mode mode,
2799 const_tree type, bool named)
2801 int arg_reg;
2803 if (! named || mode == VOIDmode)
2804 return 0;
2806 if (targetm.calls.must_pass_in_stack (mode, type))
2807 return 0;
2809 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
2811 if (arg_reg < NPARM_REGS)
2812 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2814 return 0;
2817 static void
2818 mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
2819 const_tree type, bool named ATTRIBUTE_UNUSED)
2821 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2823 *cum = (ROUND_REG (*cum, mode)
2824 + (int)named * mcore_num_arg_regs (mode, type));
2827 static unsigned int
2828 mcore_function_arg_boundary (machine_mode mode,
2829 const_tree type ATTRIBUTE_UNUSED)
2831 /* Doubles must be aligned to an 8 byte boundary. */
2832 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2833 ? BIGGEST_ALIGNMENT
2834 : PARM_BOUNDARY);
2837 /* Returns the number of bytes of argument registers required to hold *part*
2838 of a parameter of machine mode MODE and type TYPE (which may be NULL if
2839 the type is not known). If the argument fits entirely in the argument
2840 registers, or entirely on the stack, then 0 is returned. CUM is the
2841 number of argument registers already used by earlier parameters to
2842 the function. */
2844 static int
2845 mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
2846 tree type, bool named)
2848 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
2850 if (named == 0)
2851 return 0;
2853 if (targetm.calls.must_pass_in_stack (mode, type))
2854 return 0;
2856 /* REG is not the *hardware* register number of the register that holds
2857 the argument, it is the *argument* register number. So for example,
2858 the first argument to a function goes in argument register 0, which
2859 translates (for the MCore) into hardware register 2. The second
2860 argument goes into argument register 1, which translates into hardware
2861 register 3, and so on. NPARM_REGS is the number of argument registers
2862 supported by the target, not the maximum hardware register number of
2863 the target. */
2864 if (reg >= NPARM_REGS)
2865 return 0;
2867 /* If the argument fits entirely in registers, return 0. */
2868 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2869 return 0;
2871 /* The argument overflows the number of available argument registers.
2872 Compute how many argument registers have not yet been assigned to
2873 hold an argument. */
2874 reg = NPARM_REGS - reg;
2876 /* Return partially in registers and partially on the stack. */
2877 return reg * UNITS_PER_WORD;
2880 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2883 mcore_dllexport_name_p (const char * symbol)
2885 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2888 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2891 mcore_dllimport_name_p (const char * symbol)
2893 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2896 /* Mark a DECL as being dllexport'd. */
2898 static void
2899 mcore_mark_dllexport (tree decl)
2901 const char * oldname;
2902 char * newname;
2903 rtx rtlname;
2904 tree idp;
2906 rtlname = XEXP (DECL_RTL (decl), 0);
2908 if (GET_CODE (rtlname) == MEM)
2909 rtlname = XEXP (rtlname, 0);
2910 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2911 oldname = XSTR (rtlname, 0);
2913 if (mcore_dllexport_name_p (oldname))
2914 return; /* Already done. */
2916 newname = XALLOCAVEC (char, strlen (oldname) + 4);
2917 sprintf (newname, "@e.%s", oldname);
2919 /* We pass newname through get_identifier to ensure it has a unique
2920 address. RTL processing can sometimes peek inside the symbol ref
2921 and compare the string's addresses to see if two symbols are
2922 identical. */
2923 /* ??? At least I think that's why we do this. */
2924 idp = get_identifier (newname);
2926 XEXP (DECL_RTL (decl), 0) =
2927 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2930 /* Mark a DECL as being dllimport'd. */
2932 static void
2933 mcore_mark_dllimport (tree decl)
2935 const char * oldname;
2936 char * newname;
2937 tree idp;
2938 rtx rtlname;
2939 rtx newrtl;
2941 rtlname = XEXP (DECL_RTL (decl), 0);
2943 if (GET_CODE (rtlname) == MEM)
2944 rtlname = XEXP (rtlname, 0);
2945 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2946 oldname = XSTR (rtlname, 0);
2948 gcc_assert (!mcore_dllexport_name_p (oldname));
2949 if (mcore_dllimport_name_p (oldname))
2950 return; /* Already done. */
2952 /* ??? One can well ask why we're making these checks here,
2953 and that would be a good question. */
2955 /* Imported variables can't be initialized. */
2956 if (TREE_CODE (decl) == VAR_DECL
2957 && !DECL_VIRTUAL_P (decl)
2958 && DECL_INITIAL (decl))
2960 error ("initialized variable %q+D is marked dllimport", decl);
2961 return;
2964 /* `extern' needn't be specified with dllimport.
2965 Specify `extern' now and hope for the best. Sigh. */
2966 if (TREE_CODE (decl) == VAR_DECL
2967 /* ??? Is this test for vtables needed? */
2968 && !DECL_VIRTUAL_P (decl))
2970 DECL_EXTERNAL (decl) = 1;
2971 TREE_PUBLIC (decl) = 1;
2974 newname = XALLOCAVEC (char, strlen (oldname) + 11);
2975 sprintf (newname, "@i.__imp_%s", oldname);
2977 /* We pass newname through get_identifier to ensure it has a unique
2978 address. RTL processing can sometimes peek inside the symbol ref
2979 and compare the string's addresses to see if two symbols are
2980 identical. */
2981 /* ??? At least I think that's why we do this. */
2982 idp = get_identifier (newname);
2984 newrtl = gen_rtx_MEM (Pmode,
2985 gen_rtx_SYMBOL_REF (Pmode,
2986 IDENTIFIER_POINTER (idp)));
2987 XEXP (DECL_RTL (decl), 0) = newrtl;
2990 static int
2991 mcore_dllexport_p (tree decl)
2993 if ( TREE_CODE (decl) != VAR_DECL
2994 && TREE_CODE (decl) != FUNCTION_DECL)
2995 return 0;
2997 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
3000 static int
3001 mcore_dllimport_p (tree decl)
3003 if ( TREE_CODE (decl) != VAR_DECL
3004 && TREE_CODE (decl) != FUNCTION_DECL)
3005 return 0;
3007 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
3010 /* We must mark dll symbols specially. Definitions of dllexport'd objects
3011 install some info in the .drective (PE) or .exports (ELF) sections. */
3013 static void
3014 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3016 /* Mark the decl so we can tell from the rtl whether the object is
3017 dllexport'd or dllimport'd. */
3018 if (mcore_dllexport_p (decl))
3019 mcore_mark_dllexport (decl);
3020 else if (mcore_dllimport_p (decl))
3021 mcore_mark_dllimport (decl);
3023 /* It might be that DECL has already been marked as dllimport, but
3024 a subsequent definition nullified that. The attribute is gone
3025 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3026 else if ((TREE_CODE (decl) == FUNCTION_DECL
3027 || TREE_CODE (decl) == VAR_DECL)
3028 && DECL_RTL (decl) != NULL_RTX
3029 && GET_CODE (DECL_RTL (decl)) == MEM
3030 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3031 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3032 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3034 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3035 tree idp = get_identifier (oldname + 9);
3036 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3038 XEXP (DECL_RTL (decl), 0) = newrtl;
3040 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3041 ??? We leave these alone for now. */
3045 /* Undo the effects of the above. */
3047 static const char *
3048 mcore_strip_name_encoding (const char * str)
3050 return str + (str[0] == '@' ? 3 : 0);
3053 /* MCore specific attribute support.
3054 dllexport - for exporting a function/variable that will live in a dll
3055 dllimport - for importing a function/variable from a dll
3056 naked - do not create a function prologue/epilogue. */
3058 /* Handle a "naked" attribute; arguments as in
3059 struct attribute_spec.handler. */
3061 static tree
3062 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3063 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3065 if (TREE_CODE (*node) != FUNCTION_DECL)
3067 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3068 name);
3069 *no_add_attrs = true;
3072 return NULL_TREE;
3075 /* ??? It looks like this is PE specific? Oh well, this is what the
3076 old code did as well. */
3078 static void
3079 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3081 int len;
3082 const char * name;
3083 char * string;
3084 const char * prefix;
3086 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3088 /* Strip off any encoding in name. */
3089 name = (* targetm.strip_name_encoding) (name);
3091 /* The object is put in, for example, section .text$foo.
3092 The linker will then ultimately place them in .text
3093 (everything from the $ on is stripped). */
3094 if (TREE_CODE (decl) == FUNCTION_DECL)
3095 prefix = ".text$";
3096 /* For compatibility with EPOC, we ignore the fact that the
3097 section might have relocs against it. */
3098 else if (decl_readonly_section (decl, 0))
3099 prefix = ".rdata$";
3100 else
3101 prefix = ".data$";
3103 len = strlen (name) + strlen (prefix);
3104 string = XALLOCAVEC (char, len + 1);
3106 sprintf (string, "%s%s", prefix, name);
3108 set_decl_section_name (decl, string);
3112 mcore_naked_function_p (void)
3114 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3117 static bool
3118 mcore_warn_func_return (tree decl)
3120 /* Naked functions are implemented entirely in assembly, including the
3121 return sequence, so suppress warnings about this. */
3122 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3125 #ifdef OBJECT_FORMAT_ELF
3126 static void
3127 mcore_asm_named_section (const char *name,
3128 unsigned int flags ATTRIBUTE_UNUSED,
3129 tree decl ATTRIBUTE_UNUSED)
3131 fprintf (asm_out_file, "\t.section %s\n", name);
3133 #endif /* OBJECT_FORMAT_ELF */
3135 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3137 static void
3138 mcore_external_libcall (rtx fun)
3140 fprintf (asm_out_file, "\t.import\t");
3141 assemble_name (asm_out_file, XSTR (fun, 0));
3142 fprintf (asm_out_file, "\n");
3145 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3147 static bool
3148 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3150 const HOST_WIDE_INT size = int_size_in_bytes (type);
3151 return (size == -1 || size > 2 * UNITS_PER_WORD);
3154 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3155 Output assembler code for a block containing the constant parts
3156 of a trampoline, leaving space for the variable parts.
3158 On the MCore, the trampoline looks like:
3159 lrw r1, function
3160 lrw r13, area
3161 jmp r13
3162 or r0, r0
3163 .literals */
3165 static void
3166 mcore_asm_trampoline_template (FILE *f)
3168 fprintf (f, "\t.short 0x7102\n");
3169 fprintf (f, "\t.short 0x7d02\n");
3170 fprintf (f, "\t.short 0x00cd\n");
3171 fprintf (f, "\t.short 0x1e00\n");
3172 fprintf (f, "\t.long 0\n");
3173 fprintf (f, "\t.long 0\n");
3176 /* Worker function for TARGET_TRAMPOLINE_INIT. */
3178 static void
3179 mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3181 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3182 rtx mem;
3184 emit_block_move (m_tramp, assemble_trampoline_template (),
3185 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3187 mem = adjust_address (m_tramp, SImode, 8);
3188 emit_move_insn (mem, chain_value);
3189 mem = adjust_address (m_tramp, SImode, 12);
3190 emit_move_insn (mem, fnaddr);
3193 /* Implement TARGET_LEGITIMATE_CONSTANT_P
3195 On the MCore, allow anything but a double. */
3197 static bool
3198 mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3200 return GET_CODE (x) != CONST_DOUBLE;