2014-12-19 Andrew MacLeod <amacleod@redhat.com>
[official-gcc.git] / gcc / config / mcore / mcore.c
blob60c72be8f1376e64f14e95fbcf6e1bf6e393438b
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
8 by the Free Software Foundation; either version 3, or (at your
9 option) any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "stor-layout.h"
27 #include "varasm.h"
28 #include "stringpool.h"
29 #include "calls.h"
30 #include "tm_p.h"
31 #include "mcore.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "obstack.h"
40 #include "expr.h"
41 #include "reload.h"
42 #include "recog.h"
43 #include "hashtab.h"
44 #include "hash-set.h"
45 #include "vec.h"
46 #include "machmode.h"
47 #include "input.h"
48 #include "function.h"
49 #include "ggc.h"
50 #include "diagnostic-core.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "dominance.h"
54 #include "cfg.h"
55 #include "cfgrtl.h"
56 #include "cfganal.h"
57 #include "lcm.h"
58 #include "cfgbuild.h"
59 #include "cfgcleanup.h"
60 #include "predict.h"
61 #include "basic-block.h"
62 #include "df.h"
63 #include "builtins.h"
65 /* For dumping information about frame sizes. */
66 char * mcore_current_function_name = 0;
67 long mcore_current_compilation_timestamp = 0;
69 /* Global variables for machine-dependent things. */
71 /* Provides the class number of the smallest class containing
72 reg number. */
73 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
75 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
76 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
77 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
78 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
79 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
82 struct mcore_frame
84 int arg_size; /* Stdarg spills (bytes). */
85 int reg_size; /* Non-volatile reg saves (bytes). */
86 int reg_mask; /* Non-volatile reg saves. */
87 int local_size; /* Locals. */
88 int outbound_size; /* Arg overflow on calls out. */
89 int pad_outbound;
90 int pad_local;
91 int pad_reg;
92 /* Describe the steps we'll use to grow it. */
93 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
94 int growth[MAX_STACK_GROWS];
95 int arg_offset;
96 int reg_offset;
97 int reg_growth;
98 int local_growth;
101 typedef enum
103 COND_NO,
104 COND_MOV_INSN,
105 COND_CLR_INSN,
106 COND_INC_INSN,
107 COND_DEC_INSN,
108 COND_BRANCH_INSN
110 cond_type;
112 static void output_stack_adjust (int, int);
113 static int calc_live_regs (int *);
114 static int try_constant_tricks (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
115 static const char * output_inline_const (machine_mode, rtx *);
116 static void layout_mcore_frame (struct mcore_frame *);
117 static void mcore_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int);
118 static cond_type is_cond_candidate (rtx);
119 static rtx_insn *emit_new_cond_insn (rtx, int);
120 static rtx_insn *conditionalize_block (rtx_insn *);
121 static void conditionalize_optimization (void);
122 static void mcore_reorg (void);
123 static rtx handle_structs_in_regs (machine_mode, const_tree, int);
124 static void mcore_mark_dllexport (tree);
125 static void mcore_mark_dllimport (tree);
126 static int mcore_dllexport_p (tree);
127 static int mcore_dllimport_p (tree);
128 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
129 #ifdef OBJECT_FORMAT_ELF
130 static void mcore_asm_named_section (const char *,
131 unsigned int, tree);
132 #endif
133 static void mcore_print_operand (FILE *, rtx, int);
134 static void mcore_print_operand_address (FILE *, rtx);
135 static bool mcore_print_operand_punct_valid_p (unsigned char code);
136 static void mcore_unique_section (tree, int);
137 static void mcore_encode_section_info (tree, rtx, int);
138 static const char *mcore_strip_name_encoding (const char *);
139 static int mcore_const_costs (rtx, RTX_CODE);
140 static int mcore_and_cost (rtx);
141 static int mcore_ior_cost (rtx);
142 static bool mcore_rtx_costs (rtx, int, int, int,
143 int *, bool);
144 static void mcore_external_libcall (rtx);
145 static bool mcore_return_in_memory (const_tree, const_tree);
146 static int mcore_arg_partial_bytes (cumulative_args_t,
147 machine_mode,
148 tree, bool);
149 static rtx mcore_function_arg (cumulative_args_t,
150 machine_mode,
151 const_tree, bool);
152 static void mcore_function_arg_advance (cumulative_args_t,
153 machine_mode,
154 const_tree, bool);
155 static unsigned int mcore_function_arg_boundary (machine_mode,
156 const_tree);
157 static void mcore_asm_trampoline_template (FILE *);
158 static void mcore_trampoline_init (rtx, tree, rtx);
159 static bool mcore_warn_func_return (tree);
160 static void mcore_option_override (void);
161 static bool mcore_legitimate_constant_p (machine_mode, rtx);
163 /* MCore specific attributes. */
165 static const struct attribute_spec mcore_attribute_table[] =
167 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
168 affects_type_identity } */
169 { "dllexport", 0, 0, true, false, false, NULL, NULL, false },
170 { "dllimport", 0, 0, true, false, false, NULL, NULL, false },
171 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute, NULL,
172 false },
173 { NULL, 0, 0, false, false, false, NULL, NULL, false }
176 /* Initialize the GCC target structure. */
177 #undef TARGET_ASM_EXTERNAL_LIBCALL
178 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
180 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
181 #undef TARGET_MERGE_DECL_ATTRIBUTES
182 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
183 #endif
185 #ifdef OBJECT_FORMAT_ELF
186 #undef TARGET_ASM_UNALIGNED_HI_OP
187 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
188 #undef TARGET_ASM_UNALIGNED_SI_OP
189 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
190 #endif
192 #undef TARGET_PRINT_OPERAND
193 #define TARGET_PRINT_OPERAND mcore_print_operand
194 #undef TARGET_PRINT_OPERAND_ADDRESS
195 #define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
196 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
197 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
199 #undef TARGET_ATTRIBUTE_TABLE
200 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
201 #undef TARGET_ASM_UNIQUE_SECTION
202 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
203 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
204 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
205 #undef TARGET_ENCODE_SECTION_INFO
206 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
207 #undef TARGET_STRIP_NAME_ENCODING
208 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
209 #undef TARGET_RTX_COSTS
210 #define TARGET_RTX_COSTS mcore_rtx_costs
211 #undef TARGET_ADDRESS_COST
212 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
213 #undef TARGET_MACHINE_DEPENDENT_REORG
214 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
216 #undef TARGET_PROMOTE_FUNCTION_MODE
217 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
218 #undef TARGET_PROMOTE_PROTOTYPES
219 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
221 #undef TARGET_RETURN_IN_MEMORY
222 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
223 #undef TARGET_MUST_PASS_IN_STACK
224 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
225 #undef TARGET_PASS_BY_REFERENCE
226 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
227 #undef TARGET_ARG_PARTIAL_BYTES
228 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
229 #undef TARGET_FUNCTION_ARG
230 #define TARGET_FUNCTION_ARG mcore_function_arg
231 #undef TARGET_FUNCTION_ARG_ADVANCE
232 #define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
233 #undef TARGET_FUNCTION_ARG_BOUNDARY
234 #define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
236 #undef TARGET_SETUP_INCOMING_VARARGS
237 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
239 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
240 #define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
241 #undef TARGET_TRAMPOLINE_INIT
242 #define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
244 #undef TARGET_OPTION_OVERRIDE
245 #define TARGET_OPTION_OVERRIDE mcore_option_override
247 #undef TARGET_LEGITIMATE_CONSTANT_P
248 #define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
250 #undef TARGET_WARN_FUNC_RETURN
251 #define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
253 struct gcc_target targetm = TARGET_INITIALIZER;
255 /* Adjust the stack and return the number of bytes taken to do it. */
256 static void
257 output_stack_adjust (int direction, int size)
259 /* If extending stack a lot, we do it incrementally. */
260 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
262 rtx tmp = gen_rtx_REG (SImode, 1);
263 rtx memref;
265 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
268 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
269 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
270 MEM_VOLATILE_P (memref) = 1;
271 emit_insn (gen_movsi (memref, stack_pointer_rtx));
272 size -= mcore_stack_increment;
274 while (size > mcore_stack_increment);
276 /* SIZE is now the residual for the last adjustment,
277 which doesn't require a probe. */
280 if (size)
282 rtx insn;
283 rtx val = GEN_INT (size);
285 if (size > 32)
287 rtx nval = gen_rtx_REG (SImode, 1);
288 emit_insn (gen_movsi (nval, val));
289 val = nval;
292 if (direction > 0)
293 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
294 else
295 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
297 emit_insn (insn);
301 /* Work out the registers which need to be saved,
302 both as a mask and a count. */
304 static int
305 calc_live_regs (int * count)
307 int reg;
308 int live_regs_mask = 0;
310 * count = 0;
312 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
314 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
316 (*count)++;
317 live_regs_mask |= (1 << reg);
321 return live_regs_mask;
324 /* Print the operand address in x to the stream. */
326 static void
327 mcore_print_operand_address (FILE * stream, rtx x)
329 switch (GET_CODE (x))
331 case REG:
332 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
333 break;
335 case PLUS:
337 rtx base = XEXP (x, 0);
338 rtx index = XEXP (x, 1);
340 if (GET_CODE (base) != REG)
342 /* Ensure that BASE is a register (one of them must be). */
343 rtx temp = base;
344 base = index;
345 index = temp;
348 switch (GET_CODE (index))
350 case CONST_INT:
351 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
352 reg_names[REGNO(base)], INTVAL (index));
353 break;
355 default:
356 gcc_unreachable ();
360 break;
362 default:
363 output_addr_const (stream, x);
364 break;
368 static bool
369 mcore_print_operand_punct_valid_p (unsigned char code)
371 return (code == '.' || code == '#' || code == '*' || code == '^'
372 || code == '!');
375 /* Print operand x (an rtx) in assembler syntax to file stream
376 according to modifier code.
378 'R' print the next register or memory location along, i.e. the lsw in
379 a double word value
380 'O' print a constant without the #
381 'M' print a constant as its negative
382 'P' print log2 of a power of two
383 'Q' print log2 of an inverse of a power of two
384 'U' print register for ldm/stm instruction
385 'X' print byte number for xtrbN instruction. */
387 static void
388 mcore_print_operand (FILE * stream, rtx x, int code)
390 switch (code)
392 case 'N':
393 if (INTVAL(x) == -1)
394 fprintf (asm_out_file, "32");
395 else
396 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
397 break;
398 case 'P':
399 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
400 break;
401 case 'Q':
402 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
403 break;
404 case 'O':
405 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
406 break;
407 case 'M':
408 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
409 break;
410 case 'R':
411 /* Next location along in memory or register. */
412 switch (GET_CODE (x))
414 case REG:
415 fputs (reg_names[REGNO (x) + 1], (stream));
416 break;
417 case MEM:
418 mcore_print_operand_address
419 (stream, XEXP (adjust_address (x, SImode, 4), 0));
420 break;
421 default:
422 gcc_unreachable ();
424 break;
425 case 'U':
426 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
427 reg_names[REGNO (x) + 3]);
428 break;
429 case 'x':
430 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
431 break;
432 case 'X':
433 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
434 break;
436 default:
437 switch (GET_CODE (x))
439 case REG:
440 fputs (reg_names[REGNO (x)], (stream));
441 break;
442 case MEM:
443 output_address (XEXP (x, 0));
444 break;
445 default:
446 output_addr_const (stream, x);
447 break;
449 break;
453 /* What does a constant cost ? */
455 static int
456 mcore_const_costs (rtx exp, enum rtx_code code)
458 HOST_WIDE_INT val = INTVAL (exp);
460 /* Easy constants. */
461 if ( CONST_OK_FOR_I (val)
462 || CONST_OK_FOR_M (val)
463 || CONST_OK_FOR_N (val)
464 || (code == PLUS && CONST_OK_FOR_L (val)))
465 return 1;
466 else if (code == AND
467 && ( CONST_OK_FOR_M (~val)
468 || CONST_OK_FOR_N (~val)))
469 return 2;
470 else if (code == PLUS
471 && ( CONST_OK_FOR_I (-val)
472 || CONST_OK_FOR_M (-val)
473 || CONST_OK_FOR_N (-val)))
474 return 2;
476 return 5;
479 /* What does an and instruction cost - we do this b/c immediates may
480 have been relaxed. We want to ensure that cse will cse relaxed immeds
481 out. Otherwise we'll get bad code (multiple reloads of the same const). */
483 static int
484 mcore_and_cost (rtx x)
486 HOST_WIDE_INT val;
488 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
489 return 2;
491 val = INTVAL (XEXP (x, 1));
493 /* Do it directly. */
494 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
495 return 2;
496 /* Takes one instruction to load. */
497 else if (const_ok_for_mcore (val))
498 return 3;
499 /* Takes two instructions to load. */
500 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
501 return 4;
503 /* Takes a lrw to load. */
504 return 5;
507 /* What does an or cost - see and_cost(). */
509 static int
510 mcore_ior_cost (rtx x)
512 HOST_WIDE_INT val;
514 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
515 return 2;
517 val = INTVAL (XEXP (x, 1));
519 /* Do it directly with bclri. */
520 if (CONST_OK_FOR_M (val))
521 return 2;
522 /* Takes one instruction to load. */
523 else if (const_ok_for_mcore (val))
524 return 3;
525 /* Takes two instructions to load. */
526 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
527 return 4;
529 /* Takes a lrw to load. */
530 return 5;
533 static bool
534 mcore_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
535 int * total, bool speed ATTRIBUTE_UNUSED)
537 switch (code)
539 case CONST_INT:
540 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
541 return true;
542 case CONST:
543 case LABEL_REF:
544 case SYMBOL_REF:
545 *total = 5;
546 return true;
547 case CONST_DOUBLE:
548 *total = 10;
549 return true;
551 case AND:
552 *total = COSTS_N_INSNS (mcore_and_cost (x));
553 return true;
555 case IOR:
556 *total = COSTS_N_INSNS (mcore_ior_cost (x));
557 return true;
559 case DIV:
560 case UDIV:
561 case MOD:
562 case UMOD:
563 case FLOAT:
564 case FIX:
565 *total = COSTS_N_INSNS (100);
566 return true;
568 default:
569 return false;
573 /* Prepare the operands for a comparison. Return whether the branch/setcc
574 should reverse the operands. */
576 bool
577 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
579 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
580 bool invert;
582 if (GET_CODE (op1) == CONST_INT)
584 HOST_WIDE_INT val = INTVAL (op1);
586 switch (code)
588 case GTU:
589 /* Unsigned > 0 is the same as != 0; everything else is converted
590 below to LEU (reversed cmphs). */
591 if (val == 0)
592 code = NE;
593 break;
595 /* Check whether (LE A imm) can become (LT A imm + 1),
596 or (GT A imm) can become (GE A imm + 1). */
597 case GT:
598 case LE:
599 if (CONST_OK_FOR_J (val + 1))
601 op1 = GEN_INT (val + 1);
602 code = code == LE ? LT : GE;
604 break;
606 default:
607 break;
611 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
612 op1 = force_reg (SImode, op1);
614 /* cmpnei: 0-31 (K immediate)
615 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
616 invert = false;
617 switch (code)
619 case EQ: /* Use inverted condition, cmpne. */
620 code = NE;
621 invert = true;
622 /* Drop through. */
624 case NE: /* Use normal condition, cmpne. */
625 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
626 op1 = force_reg (SImode, op1);
627 break;
629 case LE: /* Use inverted condition, reversed cmplt. */
630 code = GT;
631 invert = true;
632 /* Drop through. */
634 case GT: /* Use normal condition, reversed cmplt. */
635 if (GET_CODE (op1) == CONST_INT)
636 op1 = force_reg (SImode, op1);
637 break;
639 case GE: /* Use inverted condition, cmplt. */
640 code = LT;
641 invert = true;
642 /* Drop through. */
644 case LT: /* Use normal condition, cmplt. */
645 if (GET_CODE (op1) == CONST_INT &&
646 /* covered by btsti x,31. */
647 INTVAL (op1) != 0 &&
648 ! CONST_OK_FOR_J (INTVAL (op1)))
649 op1 = force_reg (SImode, op1);
650 break;
652 case GTU: /* Use inverted condition, cmple. */
653 /* We coped with unsigned > 0 above. */
654 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
655 code = LEU;
656 invert = true;
657 /* Drop through. */
659 case LEU: /* Use normal condition, reversed cmphs. */
660 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
661 op1 = force_reg (SImode, op1);
662 break;
664 case LTU: /* Use inverted condition, cmphs. */
665 code = GEU;
666 invert = true;
667 /* Drop through. */
669 case GEU: /* Use normal condition, cmphs. */
670 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
671 op1 = force_reg (SImode, op1);
672 break;
674 default:
675 break;
678 emit_insn (gen_rtx_SET (VOIDmode,
679 cc_reg,
680 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
681 return invert;
685 mcore_symbolic_address_p (rtx x)
687 switch (GET_CODE (x))
689 case SYMBOL_REF:
690 case LABEL_REF:
691 return 1;
692 case CONST:
693 x = XEXP (x, 0);
694 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
695 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
696 && GET_CODE (XEXP (x, 1)) == CONST_INT);
697 default:
698 return 0;
702 /* Functions to output assembly code for a function call. */
704 char *
705 mcore_output_call (rtx operands[], int index)
707 static char buffer[20];
708 rtx addr = operands [index];
710 if (REG_P (addr))
712 if (TARGET_CG_DATA)
714 gcc_assert (mcore_current_function_name);
716 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
717 "unknown", 1);
720 sprintf (buffer, "jsr\t%%%d", index);
722 else
724 if (TARGET_CG_DATA)
726 gcc_assert (mcore_current_function_name);
727 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
729 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
730 XSTR (addr, 0), 0);
733 sprintf (buffer, "jbsr\t%%%d", index);
736 return buffer;
739 /* Can we load a constant with a single instruction ? */
742 const_ok_for_mcore (HOST_WIDE_INT value)
744 if (value >= 0 && value <= 127)
745 return 1;
747 /* Try exact power of two. */
748 if (CONST_OK_FOR_M (value))
749 return 1;
751 /* Try exact power of two - 1. */
752 if (CONST_OK_FOR_N (value) && value != -1)
753 return 1;
755 return 0;
758 /* Can we load a constant inline with up to 2 instructions ? */
761 mcore_const_ok_for_inline (HOST_WIDE_INT value)
763 HOST_WIDE_INT x, y;
765 return try_constant_tricks (value, & x, & y) > 0;
768 /* Are we loading the constant using a not ? */
771 mcore_const_trick_uses_not (HOST_WIDE_INT value)
773 HOST_WIDE_INT x, y;
775 return try_constant_tricks (value, & x, & y) == 2;
778 /* Try tricks to load a constant inline and return the trick number if
779 success (0 is non-inlinable).
781 0: not inlinable
782 1: single instruction (do the usual thing)
783 2: single insn followed by a 'not'
784 3: single insn followed by a subi
785 4: single insn followed by an addi
786 5: single insn followed by rsubi
787 6: single insn followed by bseti
788 7: single insn followed by bclri
789 8: single insn followed by rotli
790 9: single insn followed by lsli
791 10: single insn followed by ixh
792 11: single insn followed by ixw. */
794 static int
795 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
797 HOST_WIDE_INT i;
798 unsigned HOST_WIDE_INT bit, shf, rot;
800 if (const_ok_for_mcore (value))
801 return 1; /* Do the usual thing. */
803 if (! TARGET_HARDLIT)
804 return 0;
806 if (const_ok_for_mcore (~value))
808 *x = ~value;
809 return 2;
812 for (i = 1; i <= 32; i++)
814 if (const_ok_for_mcore (value - i))
816 *x = value - i;
817 *y = i;
819 return 3;
822 if (const_ok_for_mcore (value + i))
824 *x = value + i;
825 *y = i;
827 return 4;
831 bit = 0x80000000ULL;
833 for (i = 0; i <= 31; i++)
835 if (const_ok_for_mcore (i - value))
837 *x = i - value;
838 *y = i;
840 return 5;
843 if (const_ok_for_mcore (value & ~bit))
845 *y = bit;
846 *x = value & ~bit;
847 return 6;
850 if (const_ok_for_mcore (value | bit))
852 *y = ~bit;
853 *x = value | bit;
855 return 7;
858 bit >>= 1;
861 shf = value;
862 rot = value;
864 for (i = 1; i < 31; i++)
866 int c;
868 /* MCore has rotate left. */
869 c = rot << 31;
870 rot >>= 1;
871 rot &= 0x7FFFFFFF;
872 rot |= c; /* Simulate rotate. */
874 if (const_ok_for_mcore (rot))
876 *y = i;
877 *x = rot;
879 return 8;
882 if (shf & 1)
883 shf = 0; /* Can't use logical shift, low order bit is one. */
885 shf >>= 1;
887 if (shf != 0 && const_ok_for_mcore (shf))
889 *y = i;
890 *x = shf;
892 return 9;
896 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
898 *x = value / 3;
900 return 10;
903 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
905 *x = value / 5;
907 return 11;
910 return 0;
913 /* Check whether reg is dead at first. This is done by searching ahead
914 for either the next use (i.e., reg is live), a death note, or a set of
915 reg. Don't just use dead_or_set_p() since reload does not always mark
916 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
917 can ignore subregs by extracting the actual register. BRC */
920 mcore_is_dead (rtx_insn *first, rtx reg)
922 rtx_insn *insn;
924 /* For mcore, subregs can't live independently of their parent regs. */
925 if (GET_CODE (reg) == SUBREG)
926 reg = SUBREG_REG (reg);
928 /* Dies immediately. */
929 if (dead_or_set_p (first, reg))
930 return 1;
932 /* Look for conclusive evidence of live/death, otherwise we have
933 to assume that it is live. */
934 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
936 if (JUMP_P (insn))
937 return 0; /* We lose track, assume it is alive. */
939 else if (CALL_P (insn))
941 /* Call's might use it for target or register parms. */
942 if (reg_referenced_p (reg, PATTERN (insn))
943 || find_reg_fusage (insn, USE, reg))
944 return 0;
945 else if (dead_or_set_p (insn, reg))
946 return 1;
948 else if (NONJUMP_INSN_P (insn))
950 if (reg_referenced_p (reg, PATTERN (insn)))
951 return 0;
952 else if (dead_or_set_p (insn, reg))
953 return 1;
957 /* No conclusive evidence either way, we cannot take the chance
958 that control flow hid the use from us -- "I'm not dead yet". */
959 return 0;
962 /* Count the number of ones in mask. */
965 mcore_num_ones (HOST_WIDE_INT mask)
967 /* A trick to count set bits recently posted on comp.compilers. */
968 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
969 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
970 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
971 mask = ((mask >> 8) + mask);
973 return (mask + (mask >> 16)) & 0xff;
976 /* Count the number of zeros in mask. */
979 mcore_num_zeros (HOST_WIDE_INT mask)
981 return 32 - mcore_num_ones (mask);
984 /* Determine byte being masked. */
987 mcore_byte_offset (unsigned int mask)
989 if (mask == 0x00ffffffL)
990 return 0;
991 else if (mask == 0xff00ffffL)
992 return 1;
993 else if (mask == 0xffff00ffL)
994 return 2;
995 else if (mask == 0xffffff00L)
996 return 3;
998 return -1;
1001 /* Determine halfword being masked. */
1004 mcore_halfword_offset (unsigned int mask)
1006 if (mask == 0x0000ffffL)
1007 return 0;
1008 else if (mask == 0xffff0000L)
1009 return 1;
1011 return -1;
1014 /* Output a series of bseti's corresponding to mask. */
1016 const char *
1017 mcore_output_bseti (rtx dst, int mask)
1019 rtx out_operands[2];
1020 int bit;
1022 out_operands[0] = dst;
1024 for (bit = 0; bit < 32; bit++)
1026 if ((mask & 0x1) == 0x1)
1028 out_operands[1] = GEN_INT (bit);
1030 output_asm_insn ("bseti\t%0,%1", out_operands);
1032 mask >>= 1;
1035 return "";
1038 /* Output a series of bclri's corresponding to mask. */
1040 const char *
1041 mcore_output_bclri (rtx dst, int mask)
1043 rtx out_operands[2];
1044 int bit;
1046 out_operands[0] = dst;
1048 for (bit = 0; bit < 32; bit++)
1050 if ((mask & 0x1) == 0x0)
1052 out_operands[1] = GEN_INT (bit);
1054 output_asm_insn ("bclri\t%0,%1", out_operands);
1057 mask >>= 1;
1060 return "";
1063 /* Output a conditional move of two constants that are +/- 1 within each
1064 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1065 really worth the effort. */
1067 const char *
1068 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1070 HOST_WIDE_INT load_value;
1071 HOST_WIDE_INT adjust_value;
1072 rtx out_operands[4];
1074 out_operands[0] = operands[0];
1076 /* Check to see which constant is loadable. */
1077 if (const_ok_for_mcore (INTVAL (operands[1])))
1079 out_operands[1] = operands[1];
1080 out_operands[2] = operands[2];
1082 else if (const_ok_for_mcore (INTVAL (operands[2])))
1084 out_operands[1] = operands[2];
1085 out_operands[2] = operands[1];
1087 /* Complement test since constants are swapped. */
1088 cmp_t = (cmp_t == 0);
1090 load_value = INTVAL (out_operands[1]);
1091 adjust_value = INTVAL (out_operands[2]);
1093 /* First output the test if folded into the pattern. */
1095 if (test)
1096 output_asm_insn (test, operands);
1098 /* Load the constant - for now, only support constants that can be
1099 generated with a single instruction. maybe add general inlinable
1100 constants later (this will increase the # of patterns since the
1101 instruction sequence has a different length attribute). */
1102 if (load_value >= 0 && load_value <= 127)
1103 output_asm_insn ("movi\t%0,%1", out_operands);
1104 else if (CONST_OK_FOR_M (load_value))
1105 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1106 else if (CONST_OK_FOR_N (load_value))
1107 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1109 /* Output the constant adjustment. */
1110 if (load_value > adjust_value)
1112 if (cmp_t)
1113 output_asm_insn ("decf\t%0", out_operands);
1114 else
1115 output_asm_insn ("dect\t%0", out_operands);
1117 else
1119 if (cmp_t)
1120 output_asm_insn ("incf\t%0", out_operands);
1121 else
1122 output_asm_insn ("inct\t%0", out_operands);
1125 return "";
1128 /* Outputs the peephole for moving a constant that gets not'ed followed
1129 by an and (i.e. combine the not and the and into andn). BRC */
1131 const char *
1132 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1134 HOST_WIDE_INT x, y;
1135 rtx out_operands[3];
1136 const char * load_op;
1137 char buf[256];
1138 int trick_no;
1140 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1141 gcc_assert (trick_no == 2);
1143 out_operands[0] = operands[0];
1144 out_operands[1] = GEN_INT (x);
1145 out_operands[2] = operands[2];
1147 if (x >= 0 && x <= 127)
1148 load_op = "movi\t%0,%1";
1150 /* Try exact power of two. */
1151 else if (CONST_OK_FOR_M (x))
1152 load_op = "bgeni\t%0,%P1";
1154 /* Try exact power of two - 1. */
1155 else if (CONST_OK_FOR_N (x))
1156 load_op = "bmaski\t%0,%N1";
1158 else
1160 load_op = "BADMOVI-andn\t%0, %1";
1161 gcc_unreachable ();
1164 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1165 output_asm_insn (buf, out_operands);
1167 return "";
1170 /* Output an inline constant. */
1172 static const char *
1173 output_inline_const (machine_mode mode, rtx operands[])
1175 HOST_WIDE_INT x = 0, y = 0;
1176 int trick_no;
1177 rtx out_operands[3];
1178 char buf[256];
1179 char load_op[256];
1180 const char *dst_fmt;
1181 HOST_WIDE_INT value;
1183 value = INTVAL (operands[1]);
1185 trick_no = try_constant_tricks (value, &x, &y);
1186 /* lrw's are handled separately: Large inlinable constants never get
1187 turned into lrw's. Our caller uses try_constant_tricks to back
1188 off to an lrw rather than calling this routine. */
1189 gcc_assert (trick_no != 0);
1191 if (trick_no == 1)
1192 x = value;
1194 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1195 out_operands[0] = operands[0];
1196 out_operands[1] = GEN_INT (x);
1198 if (trick_no > 2)
1199 out_operands[2] = GEN_INT (y);
1201 /* Select dst format based on mode. */
1202 if (mode == DImode && (! TARGET_LITTLE_END))
1203 dst_fmt = "%R0";
1204 else
1205 dst_fmt = "%0";
1207 if (x >= 0 && x <= 127)
1208 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1210 /* Try exact power of two. */
1211 else if (CONST_OK_FOR_M (x))
1212 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1214 /* Try exact power of two - 1. */
1215 else if (CONST_OK_FOR_N (x))
1216 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1218 else
1220 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1221 gcc_unreachable ();
1224 switch (trick_no)
1226 case 1:
1227 strcpy (buf, load_op);
1228 break;
1229 case 2: /* not */
1230 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1231 break;
1232 case 3: /* add */
1233 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1234 break;
1235 case 4: /* sub */
1236 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1237 break;
1238 case 5: /* rsub */
1239 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1240 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1241 break;
1242 case 6: /* bseti */
1243 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1244 break;
1245 case 7: /* bclr */
1246 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1247 break;
1248 case 8: /* rotl */
1249 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1250 break;
1251 case 9: /* lsl */
1252 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1253 break;
1254 case 10: /* ixh */
1255 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1256 break;
1257 case 11: /* ixw */
1258 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1259 break;
1260 default:
1261 return "";
1264 output_asm_insn (buf, out_operands);
1266 return "";
1269 /* Output a move of a word or less value. */
1271 const char *
1272 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1273 machine_mode mode ATTRIBUTE_UNUSED)
1275 rtx dst = operands[0];
1276 rtx src = operands[1];
1278 if (GET_CODE (dst) == REG)
1280 if (GET_CODE (src) == REG)
1282 if (REGNO (src) == CC_REG) /* r-c */
1283 return "mvc\t%0";
1284 else
1285 return "mov\t%0,%1"; /* r-r*/
1287 else if (GET_CODE (src) == MEM)
1289 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1290 return "lrw\t%0,[%1]"; /* a-R */
1291 else
1292 switch (GET_MODE (src)) /* r-m */
1294 case SImode:
1295 return "ldw\t%0,%1";
1296 case HImode:
1297 return "ld.h\t%0,%1";
1298 case QImode:
1299 return "ld.b\t%0,%1";
1300 default:
1301 gcc_unreachable ();
1304 else if (GET_CODE (src) == CONST_INT)
1306 HOST_WIDE_INT x, y;
1308 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1309 return "movi\t%0,%1";
1310 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1311 return "bgeni\t%0,%P1\t// %1 %x1";
1312 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1313 return "bmaski\t%0,%N1\t// %1 %x1";
1314 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1315 return output_inline_const (SImode, operands); /* 1-2 insns */
1316 else
1317 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1319 else
1320 return "lrw\t%0, %1"; /* Into the literal pool. */
1322 else if (GET_CODE (dst) == MEM) /* m-r */
1323 switch (GET_MODE (dst))
1325 case SImode:
1326 return "stw\t%1,%0";
1327 case HImode:
1328 return "st.h\t%1,%0";
1329 case QImode:
1330 return "st.b\t%1,%0";
1331 default:
1332 gcc_unreachable ();
1335 gcc_unreachable ();
1338 /* Return a sequence of instructions to perform DI or DF move.
1339 Since the MCORE cannot move a DI or DF in one instruction, we have
1340 to take care when we see overlapping source and dest registers. */
1342 const char *
1343 mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
1345 rtx dst = operands[0];
1346 rtx src = operands[1];
1348 if (GET_CODE (dst) == REG)
1350 if (GET_CODE (src) == REG)
1352 int dstreg = REGNO (dst);
1353 int srcreg = REGNO (src);
1355 /* Ensure the second source not overwritten. */
1356 if (srcreg + 1 == dstreg)
1357 return "mov %R0,%R1\n\tmov %0,%1";
1358 else
1359 return "mov %0,%1\n\tmov %R0,%R1";
1361 else if (GET_CODE (src) == MEM)
1363 rtx memexp = XEXP (src, 0);
1364 int dstreg = REGNO (dst);
1365 int basereg = -1;
1367 if (GET_CODE (memexp) == LABEL_REF)
1368 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1369 else if (GET_CODE (memexp) == REG)
1370 basereg = REGNO (memexp);
1371 else if (GET_CODE (memexp) == PLUS)
1373 if (GET_CODE (XEXP (memexp, 0)) == REG)
1374 basereg = REGNO (XEXP (memexp, 0));
1375 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1376 basereg = REGNO (XEXP (memexp, 1));
1377 else
1378 gcc_unreachable ();
1380 else
1381 gcc_unreachable ();
1383 /* ??? length attribute is wrong here. */
1384 if (dstreg == basereg)
1386 /* Just load them in reverse order. */
1387 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1389 /* XXX: alternative: move basereg to basereg+1
1390 and then fall through. */
1392 else
1393 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1395 else if (GET_CODE (src) == CONST_INT)
1397 if (TARGET_LITTLE_END)
1399 if (CONST_OK_FOR_I (INTVAL (src)))
1400 output_asm_insn ("movi %0,%1", operands);
1401 else if (CONST_OK_FOR_M (INTVAL (src)))
1402 output_asm_insn ("bgeni %0,%P1", operands);
1403 else if (CONST_OK_FOR_N (INTVAL (src)))
1404 output_asm_insn ("bmaski %0,%N1", operands);
1405 else
1406 gcc_unreachable ();
1408 if (INTVAL (src) < 0)
1409 return "bmaski %R0,32";
1410 else
1411 return "movi %R0,0";
1413 else
1415 if (CONST_OK_FOR_I (INTVAL (src)))
1416 output_asm_insn ("movi %R0,%1", operands);
1417 else if (CONST_OK_FOR_M (INTVAL (src)))
1418 output_asm_insn ("bgeni %R0,%P1", operands);
1419 else if (CONST_OK_FOR_N (INTVAL (src)))
1420 output_asm_insn ("bmaski %R0,%N1", operands);
1421 else
1422 gcc_unreachable ();
1424 if (INTVAL (src) < 0)
1425 return "bmaski %0,32";
1426 else
1427 return "movi %0,0";
1430 else
1431 gcc_unreachable ();
1433 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1434 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1435 else
1436 gcc_unreachable ();
1439 /* Predicates used by the templates. */
1442 mcore_arith_S_operand (rtx op)
1444 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1445 return 1;
1447 return 0;
1450 /* Expand insert bit field. BRC */
1453 mcore_expand_insv (rtx operands[])
1455 int width = INTVAL (operands[1]);
1456 int posn = INTVAL (operands[2]);
1457 int mask;
1458 rtx mreg, sreg, ereg;
1460 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1461 for width==1 must be removed. Look around line 368. This is something
1462 we really want the md part to do. */
1463 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1465 /* Do directly with bseti or bclri. */
1466 /* RBE: 2/97 consider only low bit of constant. */
1467 if ((INTVAL (operands[3]) & 1) == 0)
1469 mask = ~(1 << posn);
1470 emit_insn (gen_rtx_SET (SImode, operands[0],
1471 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
1473 else
1475 mask = 1 << posn;
1476 emit_insn (gen_rtx_SET (SImode, operands[0],
1477 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
1480 return 1;
1483 /* Look at some bit-field placements that we aren't interested
1484 in handling ourselves, unless specifically directed to do so. */
1485 if (! TARGET_W_FIELD)
1486 return 0; /* Generally, give up about now. */
1488 if (width == 8 && posn % 8 == 0)
1489 /* Byte sized and aligned; let caller break it up. */
1490 return 0;
1492 if (width == 16 && posn % 16 == 0)
1493 /* Short sized and aligned; let caller break it up. */
1494 return 0;
1496 /* The general case - we can do this a little bit better than what the
1497 machine independent part tries. This will get rid of all the subregs
1498 that mess up constant folding in combine when working with relaxed
1499 immediates. */
1501 /* If setting the entire field, do it directly. */
1502 if (GET_CODE (operands[3]) == CONST_INT
1503 && INTVAL (operands[3]) == ((1 << width) - 1))
1505 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1506 emit_insn (gen_rtx_SET (SImode, operands[0],
1507 gen_rtx_IOR (SImode, operands[0], mreg)));
1508 return 1;
1511 /* Generate the clear mask. */
1512 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1514 /* Clear the field, to overlay it later with the source. */
1515 emit_insn (gen_rtx_SET (SImode, operands[0],
1516 gen_rtx_AND (SImode, operands[0], mreg)));
1518 /* If the source is constant 0, we've nothing to add back. */
1519 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1520 return 1;
1522 /* XXX: Should we worry about more games with constant values?
1523 We've covered the high profile: set/clear single-bit and many-bit
1524 fields. How often do we see "arbitrary bit pattern" constants? */
1525 sreg = copy_to_mode_reg (SImode, operands[3]);
1527 /* Extract src as same width as dst (needed for signed values). We
1528 always have to do this since we widen everything to SImode.
1529 We don't have to mask if we're shifting this up against the
1530 MSB of the register (e.g., the shift will push out any hi-order
1531 bits. */
1532 if (width + posn != (int) GET_MODE_SIZE (SImode))
1534 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1535 emit_insn (gen_rtx_SET (SImode, sreg,
1536 gen_rtx_AND (SImode, sreg, ereg)));
1539 /* Insert source value in dest. */
1540 if (posn != 0)
1541 emit_insn (gen_rtx_SET (SImode, sreg,
1542 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
1544 emit_insn (gen_rtx_SET (SImode, operands[0],
1545 gen_rtx_IOR (SImode, operands[0], sreg)));
1547 return 1;
1550 /* ??? Block move stuff stolen from m88k. This code has not been
1551 verified for correctness. */
1553 /* Emit code to perform a block move. Choose the best method.
1555 OPERANDS[0] is the destination.
1556 OPERANDS[1] is the source.
1557 OPERANDS[2] is the size.
1558 OPERANDS[3] is the alignment safe to use. */
1560 /* Emit code to perform a block move with an offset sequence of ldw/st
1561 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1562 known constants. DEST and SRC are registers. OFFSET is the known
1563 starting point for the output pattern. */
1565 static const machine_mode mode_from_align[] =
1567 VOIDmode, QImode, HImode, VOIDmode, SImode,
1570 static void
1571 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1573 rtx temp[2];
1574 machine_mode mode[2];
1575 int amount[2];
1576 bool active[2];
1577 int phase = 0;
1578 int next;
1579 int offset_ld = 0;
1580 int offset_st = 0;
1581 rtx x;
1583 x = XEXP (dst_mem, 0);
1584 if (!REG_P (x))
1586 x = force_reg (Pmode, x);
1587 dst_mem = replace_equiv_address (dst_mem, x);
1590 x = XEXP (src_mem, 0);
1591 if (!REG_P (x))
1593 x = force_reg (Pmode, x);
1594 src_mem = replace_equiv_address (src_mem, x);
1597 active[0] = active[1] = false;
1601 next = phase;
1602 phase ^= 1;
1604 if (size > 0)
1606 int next_amount;
1608 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1609 next_amount = MIN (next_amount, align);
1611 amount[next] = next_amount;
1612 mode[next] = mode_from_align[next_amount];
1613 temp[next] = gen_reg_rtx (mode[next]);
1615 x = adjust_address (src_mem, mode[next], offset_ld);
1616 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1618 offset_ld += next_amount;
1619 size -= next_amount;
1620 active[next] = true;
1623 if (active[phase])
1625 active[phase] = false;
1627 x = adjust_address (dst_mem, mode[phase], offset_st);
1628 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1630 offset_st += amount[phase];
1633 while (active[next]);
1636 bool
1637 mcore_expand_block_move (rtx *operands)
1639 HOST_WIDE_INT align, bytes, max;
1641 if (GET_CODE (operands[2]) != CONST_INT)
1642 return false;
1644 bytes = INTVAL (operands[2]);
1645 align = INTVAL (operands[3]);
1647 if (bytes <= 0)
1648 return false;
1649 if (align > 4)
1650 align = 4;
1652 switch (align)
1654 case 4:
1655 if (bytes & 1)
1656 max = 4*4;
1657 else if (bytes & 3)
1658 max = 8*4;
1659 else
1660 max = 16*4;
1661 break;
1662 case 2:
1663 max = 4*2;
1664 break;
1665 case 1:
1666 max = 4*1;
1667 break;
1668 default:
1669 gcc_unreachable ();
1672 if (bytes <= max)
1674 block_move_sequence (operands[0], operands[1], bytes, align);
1675 return true;
1678 return false;
1682 /* Code to generate prologue and epilogue sequences. */
1683 static int number_of_regs_before_varargs;
1685 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1686 for a varargs function. */
1687 static int current_function_anonymous_args;
1689 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1690 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1691 #define ADDI_REACH (32) /* Maximum addi operand. */
1693 static void
1694 layout_mcore_frame (struct mcore_frame * infp)
1696 int n;
1697 unsigned int i;
1698 int nbytes;
1699 int regarg;
1700 int localregarg;
1701 int outbounds;
1702 unsigned int growths;
1703 int step;
1705 /* Might have to spill bytes to re-assemble a big argument that
1706 was passed partially in registers and partially on the stack. */
1707 nbytes = crtl->args.pretend_args_size;
1709 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1710 if (current_function_anonymous_args)
1711 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1713 infp->arg_size = nbytes;
1715 /* How much space to save non-volatile registers we stomp. */
1716 infp->reg_mask = calc_live_regs (& n);
1717 infp->reg_size = n * 4;
1719 /* And the rest of it... locals and space for overflowed outbounds. */
1720 infp->local_size = get_frame_size ();
1721 infp->outbound_size = crtl->outgoing_args_size;
1723 /* Make sure we have a whole number of words for the locals. */
1724 if (infp->local_size % STACK_BYTES)
1725 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1727 /* Only thing we know we have to pad is the outbound space, since
1728 we've aligned our locals assuming that base of locals is aligned. */
1729 infp->pad_local = 0;
1730 infp->pad_reg = 0;
1731 infp->pad_outbound = 0;
1732 if (infp->outbound_size % STACK_BYTES)
1733 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1735 /* Now we see how we want to stage the prologue so that it does
1736 the most appropriate stack growth and register saves to either:
1737 (1) run fast,
1738 (2) reduce instruction space, or
1739 (3) reduce stack space. */
1740 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1741 infp->growth[i] = 0;
1743 regarg = infp->reg_size + infp->arg_size;
1744 localregarg = infp->local_size + regarg;
1745 outbounds = infp->outbound_size + infp->pad_outbound;
1746 growths = 0;
1748 /* XXX: Consider one where we consider localregarg + outbound too! */
1750 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1751 use stw's with offsets and buy the frame in one shot. */
1752 if (localregarg <= ADDI_REACH
1753 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1755 /* Make sure we'll be aligned. */
1756 if (localregarg % STACK_BYTES)
1757 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1759 step = localregarg + infp->pad_reg;
1760 infp->reg_offset = infp->local_size;
1762 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1764 step += outbounds;
1765 infp->reg_offset += outbounds;
1766 outbounds = 0;
1769 infp->arg_offset = step - 4;
1770 infp->growth[growths++] = step;
1771 infp->reg_growth = growths;
1772 infp->local_growth = growths;
1774 /* If we haven't already folded it in. */
1775 if (outbounds)
1776 infp->growth[growths++] = outbounds;
1778 goto finish;
1781 /* Frame can't be done with a single subi, but can be done with 2
1782 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1783 shift some of the stack purchase into the first subi, so both are
1784 single instructions. */
1785 if (localregarg <= STORE_REACH
1786 && (infp->local_size > ADDI_REACH)
1787 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1789 int all;
1791 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1792 if (localregarg % STACK_BYTES)
1793 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1795 all = localregarg + infp->pad_reg + infp->pad_local;
1796 step = ADDI_REACH; /* As much up front as we can. */
1797 if (step > all)
1798 step = all;
1800 /* XXX: Consider whether step will still be aligned; we believe so. */
1801 infp->arg_offset = step - 4;
1802 infp->growth[growths++] = step;
1803 infp->reg_growth = growths;
1804 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1805 all -= step;
1807 /* Can we fold in any space required for outbounds? */
1808 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1810 all += outbounds;
1811 outbounds = 0;
1814 /* Get the rest of the locals in place. */
1815 step = all;
1816 infp->growth[growths++] = step;
1817 infp->local_growth = growths;
1818 all -= step;
1820 gcc_assert (all == 0);
1822 /* Finish off if we need to do so. */
1823 if (outbounds)
1824 infp->growth[growths++] = outbounds;
1826 goto finish;
1829 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1830 Then we buy the rest of the frame in 1 or 2 steps depending on
1831 whether we need a frame pointer. */
1832 if ((regarg % STACK_BYTES) == 0)
1834 infp->growth[growths++] = regarg;
1835 infp->reg_growth = growths;
1836 infp->arg_offset = regarg - 4;
1837 infp->reg_offset = 0;
1839 if (infp->local_size % STACK_BYTES)
1840 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1842 step = infp->local_size + infp->pad_local;
1844 if (!frame_pointer_needed)
1846 step += outbounds;
1847 outbounds = 0;
1850 infp->growth[growths++] = step;
1851 infp->local_growth = growths;
1853 /* If there's any left to be done. */
1854 if (outbounds)
1855 infp->growth[growths++] = outbounds;
1857 goto finish;
1860 /* XXX: optimizations that we'll want to play with....
1861 -- regarg is not aligned, but it's a small number of registers;
1862 use some of localsize so that regarg is aligned and then
1863 save the registers. */
1865 /* Simple encoding; plods down the stack buying the pieces as it goes.
1866 -- does not optimize space consumption.
1867 -- does not attempt to optimize instruction counts.
1868 -- but it is safe for all alignments. */
1869 if (regarg % STACK_BYTES != 0)
1870 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1872 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1873 infp->reg_growth = growths;
1874 infp->arg_offset = infp->growth[0] - 4;
1875 infp->reg_offset = 0;
1877 if (frame_pointer_needed)
1879 if (infp->local_size % STACK_BYTES != 0)
1880 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1882 infp->growth[growths++] = infp->local_size + infp->pad_local;
1883 infp->local_growth = growths;
1885 infp->growth[growths++] = outbounds;
1887 else
1889 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1890 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1892 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1893 infp->local_growth = growths;
1896 /* Anything else that we've forgotten?, plus a few consistency checks. */
1897 finish:
1898 gcc_assert (infp->reg_offset >= 0);
1899 gcc_assert (growths <= MAX_STACK_GROWS);
1901 for (i = 0; i < growths; i++)
1902 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1905 /* Define the offset between two registers, one to be eliminated, and
1906 the other its replacement, at the start of a routine. */
1909 mcore_initial_elimination_offset (int from, int to)
1911 int above_frame;
1912 int below_frame;
1913 struct mcore_frame fi;
1915 layout_mcore_frame (& fi);
1917 /* fp to ap */
1918 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1919 /* sp to fp */
1920 below_frame = fi.outbound_size + fi.pad_outbound;
1922 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1923 return above_frame;
1925 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1926 return above_frame + below_frame;
1928 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1929 return below_frame;
1931 gcc_unreachable ();
1934 /* Keep track of some information about varargs for the prolog. */
1936 static void
1937 mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
1938 machine_mode mode, tree type,
1939 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1940 int second_time ATTRIBUTE_UNUSED)
1942 CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1944 current_function_anonymous_args = 1;
1946 /* We need to know how many argument registers are used before
1947 the varargs start, so that we can push the remaining argument
1948 registers during the prologue. */
1949 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1951 /* There is a bug somewhere in the arg handling code.
1952 Until I can find it this workaround always pushes the
1953 last named argument onto the stack. */
1954 number_of_regs_before_varargs = *args_so_far;
1956 /* The last named argument may be split between argument registers
1957 and the stack. Allow for this here. */
1958 if (number_of_regs_before_varargs > NPARM_REGS)
1959 number_of_regs_before_varargs = NPARM_REGS;
1962 void
1963 mcore_expand_prolog (void)
1965 struct mcore_frame fi;
1966 int space_allocated = 0;
1967 int growth = 0;
1969 /* Find out what we're doing. */
1970 layout_mcore_frame (&fi);
1972 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1973 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1975 if (TARGET_CG_DATA)
1977 /* Emit a symbol for this routine's frame size. */
1978 rtx x;
1980 x = DECL_RTL (current_function_decl);
1982 gcc_assert (GET_CODE (x) == MEM);
1984 x = XEXP (x, 0);
1986 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1988 free (mcore_current_function_name);
1990 mcore_current_function_name = xstrdup (XSTR (x, 0));
1992 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1994 if (cfun->calls_alloca)
1995 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1997 /* 970425: RBE:
1998 We're looking at how the 8byte alignment affects stack layout
1999 and where we had to pad things. This emits information we can
2000 extract which tells us about frame sizes and the like. */
2001 fprintf (asm_out_file,
2002 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2003 mcore_current_function_name,
2004 fi.arg_size, fi.reg_size, fi.reg_mask,
2005 fi.local_size, fi.outbound_size,
2006 frame_pointer_needed);
2009 if (mcore_naked_function_p ())
2010 return;
2012 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2013 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2015 /* If we have a parameter passed partially in regs and partially in memory,
2016 the registers will have been stored to memory already in function.c. So
2017 we only need to do something here for varargs functions. */
2018 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
2020 int offset;
2021 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2022 int remaining = fi.arg_size;
2024 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2026 emit_insn (gen_movsi
2027 (gen_rtx_MEM (SImode,
2028 plus_constant (Pmode, stack_pointer_rtx,
2029 offset)),
2030 gen_rtx_REG (SImode, rn)));
2034 /* Do we need another stack adjustment before we do the register saves? */
2035 if (growth < fi.reg_growth)
2036 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2038 if (fi.reg_size != 0)
2040 int i;
2041 int offs = fi.reg_offset;
2043 for (i = 15; i >= 0; i--)
2045 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2047 int first_reg = 15;
2049 while (fi.reg_mask & (1 << first_reg))
2050 first_reg--;
2051 first_reg++;
2053 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2054 gen_rtx_REG (SImode, first_reg),
2055 GEN_INT (16 - first_reg)));
2057 i -= (15 - first_reg);
2058 offs += (16 - first_reg) * 4;
2060 else if (fi.reg_mask & (1 << i))
2062 emit_insn (gen_movsi
2063 (gen_rtx_MEM (SImode,
2064 plus_constant (Pmode, stack_pointer_rtx,
2065 offs)),
2066 gen_rtx_REG (SImode, i)));
2067 offs += 4;
2072 /* Figure the locals + outbounds. */
2073 if (frame_pointer_needed)
2075 /* If we haven't already purchased to 'fp'. */
2076 if (growth < fi.local_growth)
2077 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2079 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2081 /* ... and then go any remaining distance for outbounds, etc. */
2082 if (fi.growth[growth])
2083 output_stack_adjust (-1, fi.growth[growth++]);
2085 else
2087 if (growth < fi.local_growth)
2088 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2089 if (fi.growth[growth])
2090 output_stack_adjust (-1, fi.growth[growth++]);
2094 void
2095 mcore_expand_epilog (void)
2097 struct mcore_frame fi;
2098 int i;
2099 int offs;
2100 int growth = MAX_STACK_GROWS - 1 ;
2103 /* Find out what we're doing. */
2104 layout_mcore_frame(&fi);
2106 if (mcore_naked_function_p ())
2107 return;
2109 /* If we had a frame pointer, restore the sp from that. */
2110 if (frame_pointer_needed)
2112 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2113 growth = fi.local_growth - 1;
2115 else
2117 /* XXX: while loop should accumulate and do a single sell. */
2118 while (growth >= fi.local_growth)
2120 if (fi.growth[growth] != 0)
2121 output_stack_adjust (1, fi.growth[growth]);
2122 growth--;
2126 /* Make sure we've shrunk stack back to the point where the registers
2127 were laid down. This is typically 0/1 iterations. Then pull the
2128 register save information back off the stack. */
2129 while (growth >= fi.reg_growth)
2130 output_stack_adjust ( 1, fi.growth[growth--]);
2132 offs = fi.reg_offset;
2134 for (i = 15; i >= 0; i--)
2136 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2138 int first_reg;
2140 /* Find the starting register. */
2141 first_reg = 15;
2143 while (fi.reg_mask & (1 << first_reg))
2144 first_reg--;
2146 first_reg++;
2148 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2149 gen_rtx_MEM (SImode, stack_pointer_rtx),
2150 GEN_INT (16 - first_reg)));
2152 i -= (15 - first_reg);
2153 offs += (16 - first_reg) * 4;
2155 else if (fi.reg_mask & (1 << i))
2157 emit_insn (gen_movsi
2158 (gen_rtx_REG (SImode, i),
2159 gen_rtx_MEM (SImode,
2160 plus_constant (Pmode, stack_pointer_rtx,
2161 offs))));
2162 offs += 4;
2166 /* Give back anything else. */
2167 /* XXX: Should accumulate total and then give it back. */
2168 while (growth >= 0)
2169 output_stack_adjust ( 1, fi.growth[growth--]);
2172 /* This code is borrowed from the SH port. */
2174 /* The MCORE cannot load a large constant into a register, constants have to
2175 come from a pc relative load. The reference of a pc relative load
2176 instruction must be less than 1k in front of the instruction. This
2177 means that we often have to dump a constant inside a function, and
2178 generate code to branch around it.
2180 It is important to minimize this, since the branches will slow things
2181 down and make things bigger.
2183 Worst case code looks like:
2185 lrw L1,r0
2186 br L2
2187 align
2188 L1: .long value
2192 lrw L3,r0
2193 br L4
2194 align
2195 L3: .long value
2199 We fix this by performing a scan before scheduling, which notices which
2200 instructions need to have their operands fetched from the constant table
2201 and builds the table.
2203 The algorithm is:
2205 scan, find an instruction which needs a pcrel move. Look forward, find the
2206 last barrier which is within MAX_COUNT bytes of the requirement.
2207 If there isn't one, make one. Process all the instructions between
2208 the find and the barrier.
2210 In the above example, we can tell that L3 is within 1k of L1, so
2211 the first move can be shrunk from the 2 insn+constant sequence into
2212 just 1 insn, and the constant moved to L3 to make:
2214 lrw L1,r0
2216 lrw L3,r0
2217 bra L4
2218 align
2219 L3:.long value
2220 L4:.long value
2222 Then the second move becomes the target for the shortening process. */
2224 typedef struct
2226 rtx value; /* Value in table. */
2227 rtx label; /* Label of value. */
2228 } pool_node;
2230 /* The maximum number of constants that can fit into one pool, since
2231 the pc relative range is 0...1020 bytes and constants are at least 4
2232 bytes long. We subtract 4 from the range to allow for the case where
2233 we need to add a branch/align before the constant pool. */
2235 #define MAX_COUNT 1016
2236 #define MAX_POOL_SIZE (MAX_COUNT/4)
2237 static pool_node pool_vector[MAX_POOL_SIZE];
2238 static int pool_size;
2240 /* Dump out any constants accumulated in the final pass. These
2241 will only be labels. */
2243 const char *
2244 mcore_output_jump_label_table (void)
2246 int i;
2248 if (pool_size)
2250 fprintf (asm_out_file, "\t.align 2\n");
2252 for (i = 0; i < pool_size; i++)
2254 pool_node * p = pool_vector + i;
2256 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2258 output_asm_insn (".long %0", &p->value);
2261 pool_size = 0;
2264 return "";
2267 /* Check whether insn is a candidate for a conditional. */
2269 static cond_type
2270 is_cond_candidate (rtx insn)
2272 /* The only things we conditionalize are those that can be directly
2273 changed into a conditional. Only bother with SImode items. If
2274 we wanted to be a little more aggressive, we could also do other
2275 modes such as DImode with reg-reg move or load 0. */
2276 if (NONJUMP_INSN_P (insn))
2278 rtx pat = PATTERN (insn);
2279 rtx src, dst;
2281 if (GET_CODE (pat) != SET)
2282 return COND_NO;
2284 dst = XEXP (pat, 0);
2286 if ((GET_CODE (dst) != REG &&
2287 GET_CODE (dst) != SUBREG) ||
2288 GET_MODE (dst) != SImode)
2289 return COND_NO;
2291 src = XEXP (pat, 1);
2293 if ((GET_CODE (src) == REG ||
2294 (GET_CODE (src) == SUBREG &&
2295 GET_CODE (SUBREG_REG (src)) == REG)) &&
2296 GET_MODE (src) == SImode)
2297 return COND_MOV_INSN;
2298 else if (GET_CODE (src) == CONST_INT &&
2299 INTVAL (src) == 0)
2300 return COND_CLR_INSN;
2301 else if (GET_CODE (src) == PLUS &&
2302 (GET_CODE (XEXP (src, 0)) == REG ||
2303 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2304 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2305 GET_MODE (XEXP (src, 0)) == SImode &&
2306 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2307 INTVAL (XEXP (src, 1)) == 1)
2308 return COND_INC_INSN;
2309 else if (((GET_CODE (src) == MINUS &&
2310 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2311 INTVAL( XEXP (src, 1)) == 1) ||
2312 (GET_CODE (src) == PLUS &&
2313 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2314 INTVAL (XEXP (src, 1)) == -1)) &&
2315 (GET_CODE (XEXP (src, 0)) == REG ||
2316 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2317 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2318 GET_MODE (XEXP (src, 0)) == SImode)
2319 return COND_DEC_INSN;
2321 /* Some insns that we don't bother with:
2322 (set (rx:DI) (ry:DI))
2323 (set (rx:DI) (const_int 0))
2327 else if (JUMP_P (insn)
2328 && GET_CODE (PATTERN (insn)) == SET
2329 && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2330 return COND_BRANCH_INSN;
2332 return COND_NO;
2335 /* Emit a conditional version of insn and replace the old insn with the
2336 new one. Return the new insn if emitted. */
2338 static rtx_insn *
2339 emit_new_cond_insn (rtx insn, int cond)
2341 rtx c_insn = 0;
2342 rtx pat, dst, src;
2343 cond_type num;
2345 if ((num = is_cond_candidate (insn)) == COND_NO)
2346 return NULL;
2348 pat = PATTERN (insn);
2350 if (NONJUMP_INSN_P (insn))
2352 dst = SET_DEST (pat);
2353 src = SET_SRC (pat);
2355 else
2357 dst = JUMP_LABEL (insn);
2358 src = NULL_RTX;
2361 switch (num)
2363 case COND_MOV_INSN:
2364 case COND_CLR_INSN:
2365 if (cond)
2366 c_insn = gen_movt0 (dst, src, dst);
2367 else
2368 c_insn = gen_movt0 (dst, dst, src);
2369 break;
2371 case COND_INC_INSN:
2372 if (cond)
2373 c_insn = gen_incscc (dst, dst);
2374 else
2375 c_insn = gen_incscc_false (dst, dst);
2376 break;
2378 case COND_DEC_INSN:
2379 if (cond)
2380 c_insn = gen_decscc (dst, dst);
2381 else
2382 c_insn = gen_decscc_false (dst, dst);
2383 break;
2385 case COND_BRANCH_INSN:
2386 if (cond)
2387 c_insn = gen_branch_true (dst);
2388 else
2389 c_insn = gen_branch_false (dst);
2390 break;
2392 default:
2393 return NULL;
2396 /* Only copy the notes if they exist. */
2397 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2399 /* We really don't need to bother with the notes and links at this
2400 point, but go ahead and save the notes. This will help is_dead()
2401 when applying peepholes (links don't matter since they are not
2402 used any more beyond this point for the mcore). */
2403 REG_NOTES (c_insn) = REG_NOTES (insn);
2406 if (num == COND_BRANCH_INSN)
2408 /* For jumps, we need to be a little bit careful and emit the new jump
2409 before the old one and to update the use count for the target label.
2410 This way, the barrier following the old (uncond) jump will get
2411 deleted, but the label won't. */
2412 c_insn = emit_jump_insn_before (c_insn, insn);
2414 ++ LABEL_NUSES (dst);
2416 JUMP_LABEL (c_insn) = dst;
2418 else
2419 c_insn = emit_insn_after (c_insn, insn);
2421 delete_insn (insn);
2423 return as_a <rtx_insn *> (c_insn);
2426 /* Attempt to change a basic block into a series of conditional insns. This
2427 works by taking the branch at the end of the 1st block and scanning for the
2428 end of the 2nd block. If all instructions in the 2nd block have cond.
2429 versions and the label at the start of block 3 is the same as the target
2430 from the branch at block 1, then conditionalize all insn in block 2 using
2431 the inverse condition of the branch at block 1. (Note I'm bending the
2432 definition of basic block here.)
2434 e.g., change:
2436 bt L2 <-- end of block 1 (delete)
2437 mov r7,r8
2438 addu r7,1
2439 br L3 <-- end of block 2
2441 L2: ... <-- start of block 3 (NUSES==1)
2442 L3: ...
2446 movf r7,r8
2447 incf r7
2448 bf L3
2450 L3: ...
2452 we can delete the L2 label if NUSES==1 and re-apply the optimization
2453 starting at the last instruction of block 2. This may allow an entire
2454 if-then-else statement to be conditionalized. BRC */
2455 static rtx_insn *
2456 conditionalize_block (rtx_insn *first)
2458 rtx_insn *insn;
2459 rtx br_pat;
2460 rtx_insn *end_blk_1_br = 0;
2461 rtx_insn *end_blk_2_insn = 0;
2462 rtx_insn *start_blk_3_lab = 0;
2463 int cond;
2464 int br_lab_num;
2465 int blk_size = 0;
2468 /* Check that the first insn is a candidate conditional jump. This is
2469 the one that we'll eliminate. If not, advance to the next insn to
2470 try. */
2471 if (! JUMP_P (first)
2472 || GET_CODE (PATTERN (first)) != SET
2473 || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2474 return NEXT_INSN (first);
2476 /* Extract some information we need. */
2477 end_blk_1_br = first;
2478 br_pat = PATTERN (end_blk_1_br);
2480 /* Complement the condition since we use the reverse cond. for the insns. */
2481 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2483 /* Determine what kind of branch we have. */
2484 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2486 /* A normal branch, so extract label out of first arm. */
2487 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2489 else
2491 /* An inverse branch, so extract the label out of the 2nd arm
2492 and complement the condition. */
2493 cond = (cond == 0);
2494 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2497 /* Scan forward for the start of block 2: it must start with a
2498 label and that label must be the same as the branch target
2499 label from block 1. We don't care about whether block 2 actually
2500 ends with a branch or a label (an uncond. branch is
2501 conditionalizable). */
2502 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2504 enum rtx_code code;
2506 code = GET_CODE (insn);
2508 /* Look for the label at the start of block 3. */
2509 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2510 break;
2512 /* Skip barriers, notes, and conditionalizable insns. If the
2513 insn is not conditionalizable or makes this optimization fail,
2514 just return the next insn so we can start over from that point. */
2515 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2516 return NEXT_INSN (insn);
2518 /* Remember the last real insn before the label (i.e. end of block 2). */
2519 if (code == JUMP_INSN || code == INSN)
2521 blk_size ++;
2522 end_blk_2_insn = insn;
2526 if (!insn)
2527 return insn;
2529 /* It is possible for this optimization to slow performance if the blocks
2530 are long. This really depends upon whether the branch is likely taken
2531 or not. If the branch is taken, we slow performance in many cases. But,
2532 if the branch is not taken, we always help performance (for a single
2533 block, but for a double block (i.e. when the optimization is re-applied)
2534 this is not true since the 'right thing' depends on the overall length of
2535 the collapsed block). As a compromise, don't apply this optimization on
2536 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2537 the best threshold depends on the latencies of the instructions (i.e.,
2538 the branch penalty). */
2539 if (optimize > 1 && blk_size > 2)
2540 return insn;
2542 /* At this point, we've found the start of block 3 and we know that
2543 it is the destination of the branch from block 1. Also, all
2544 instructions in the block 2 are conditionalizable. So, apply the
2545 conditionalization and delete the branch. */
2546 start_blk_3_lab = insn;
2548 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2549 insn = NEXT_INSN (insn))
2551 rtx_insn *newinsn;
2553 if (insn->deleted ())
2554 continue;
2556 /* Try to form a conditional variant of the instruction and emit it. */
2557 if ((newinsn = emit_new_cond_insn (insn, cond)))
2559 if (end_blk_2_insn == insn)
2560 end_blk_2_insn = newinsn;
2562 insn = newinsn;
2566 /* Note whether we will delete the label starting blk 3 when the jump
2567 gets deleted. If so, we want to re-apply this optimization at the
2568 last real instruction right before the label. */
2569 if (LABEL_NUSES (start_blk_3_lab) == 1)
2571 start_blk_3_lab = 0;
2574 /* ??? we probably should redistribute the death notes for this insn, esp.
2575 the death of cc, but it doesn't really matter this late in the game.
2576 The peepholes all use is_dead() which will find the correct death
2577 regardless of whether there is a note. */
2578 delete_insn (end_blk_1_br);
2580 if (! start_blk_3_lab)
2581 return end_blk_2_insn;
2583 /* Return the insn right after the label at the start of block 3. */
2584 return NEXT_INSN (start_blk_3_lab);
2587 /* Apply the conditionalization of blocks optimization. This is the
2588 outer loop that traverses through the insns scanning for a branch
2589 that signifies an opportunity to apply the optimization. Note that
2590 this optimization is applied late. If we could apply it earlier,
2591 say before cse 2, it may expose more optimization opportunities.
2592 but, the pay back probably isn't really worth the effort (we'd have
2593 to update all reg/flow/notes/links/etc to make it work - and stick it
2594 in before cse 2). */
2596 static void
2597 conditionalize_optimization (void)
2599 rtx_insn *insn;
2601 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2602 continue;
2605 /* This is to handle loads from the constant pool. */
2607 static void
2608 mcore_reorg (void)
2610 /* Reset this variable. */
2611 current_function_anonymous_args = 0;
2613 if (optimize == 0)
2614 return;
2616 /* Conditionalize blocks where we can. */
2617 conditionalize_optimization ();
2619 /* Literal pool generation is now pushed off until the assembler. */
2623 /* Return true if X is something that can be moved directly into r15. */
2625 bool
2626 mcore_r15_operand_p (rtx x)
2628 switch (GET_CODE (x))
2630 case CONST_INT:
2631 return mcore_const_ok_for_inline (INTVAL (x));
2633 case REG:
2634 case SUBREG:
2635 case MEM:
2636 return 1;
2638 default:
2639 return 0;
2643 /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
2644 directly move X into it, use r1-r14 as a temporary. */
2646 enum reg_class
2647 mcore_secondary_reload_class (enum reg_class rclass,
2648 machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2650 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2651 && !mcore_r15_operand_p (x))
2652 return LRW_REGS;
2653 return NO_REGS;
2656 /* Return the reg_class to use when reloading the rtx X into the class
2657 RCLASS. If X is too complex to move directly into r15, prefer to
2658 use LRW_REGS instead. */
2660 enum reg_class
2661 mcore_reload_class (rtx x, enum reg_class rclass)
2663 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2664 return LRW_REGS;
2666 return rclass;
2669 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2670 register. Note that the current version doesn't worry about whether
2671 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2672 in r2 matches an SImode in r2. Might think in the future about whether
2673 we want to be able to say something about modes. */
2676 mcore_is_same_reg (rtx x, rtx y)
2678 /* Strip any and all of the subreg wrappers. */
2679 while (GET_CODE (x) == SUBREG)
2680 x = SUBREG_REG (x);
2682 while (GET_CODE (y) == SUBREG)
2683 y = SUBREG_REG (y);
2685 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2686 return 1;
2688 return 0;
2691 static void
2692 mcore_option_override (void)
2694 /* Only the m340 supports little endian code. */
2695 if (TARGET_LITTLE_END && ! TARGET_M340)
2696 target_flags |= MASK_M340;
2700 /* Compute the number of word sized registers needed to
2701 hold a function argument of mode MODE and type TYPE. */
2704 mcore_num_arg_regs (machine_mode mode, const_tree type)
2706 int size;
2708 if (targetm.calls.must_pass_in_stack (mode, type))
2709 return 0;
2711 if (type && mode == BLKmode)
2712 size = int_size_in_bytes (type);
2713 else
2714 size = GET_MODE_SIZE (mode);
2716 return ROUND_ADVANCE (size);
2719 static rtx
2720 handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
2722 int size;
2724 /* The MCore ABI defines that a structure whose size is not a whole multiple
2725 of bytes is passed packed into registers (or spilled onto the stack if
2726 not enough registers are available) with the last few bytes of the
2727 structure being packed, left-justified, into the last register/stack slot.
2728 GCC handles this correctly if the last word is in a stack slot, but we
2729 have to generate a special, PARALLEL RTX if the last word is in an
2730 argument register. */
2731 if (type
2732 && TYPE_MODE (type) == BLKmode
2733 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2734 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2735 && (size % UNITS_PER_WORD != 0)
2736 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2738 rtx arg_regs [NPARM_REGS];
2739 int nregs;
2740 rtx result;
2741 rtvec rtvec;
2743 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2745 arg_regs [nregs] =
2746 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2747 GEN_INT (nregs * UNITS_PER_WORD));
2748 nregs ++;
2751 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2752 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
2753 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2754 arg_regs[3], arg_regs[4], arg_regs[5]);
2756 result = gen_rtx_PARALLEL (mode, rtvec);
2757 return result;
2760 return gen_rtx_REG (mode, reg);
2764 mcore_function_value (const_tree valtype, const_tree func)
2766 machine_mode mode;
2767 int unsigned_p;
2769 mode = TYPE_MODE (valtype);
2771 /* Since we promote return types, we must promote the mode here too. */
2772 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2774 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2777 /* Define where to put the arguments to a function.
2778 Value is zero to push the argument on the stack,
2779 or a hard register in which to store the argument.
2781 MODE is the argument's machine mode.
2782 TYPE is the data type of the argument (as a tree).
2783 This is null for libcalls where that information may
2784 not be available.
2785 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2786 the preceding args and about the function being called.
2787 NAMED is nonzero if this argument is a named parameter
2788 (otherwise it is an extra parameter matching an ellipsis).
2790 On MCore the first args are normally in registers
2791 and the rest are pushed. Any arg that starts within the first
2792 NPARM_REGS words is at least partially passed in a register unless
2793 its data type forbids. */
2795 static rtx
2796 mcore_function_arg (cumulative_args_t cum, machine_mode mode,
2797 const_tree type, bool named)
2799 int arg_reg;
2801 if (! named || mode == VOIDmode)
2802 return 0;
2804 if (targetm.calls.must_pass_in_stack (mode, type))
2805 return 0;
2807 arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
2809 if (arg_reg < NPARM_REGS)
2810 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2812 return 0;
2815 static void
2816 mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
2817 const_tree type, bool named ATTRIBUTE_UNUSED)
2819 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2821 *cum = (ROUND_REG (*cum, mode)
2822 + (int)named * mcore_num_arg_regs (mode, type));
2825 static unsigned int
2826 mcore_function_arg_boundary (machine_mode mode,
2827 const_tree type ATTRIBUTE_UNUSED)
2829 /* Doubles must be aligned to an 8 byte boundary. */
2830 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2831 ? BIGGEST_ALIGNMENT
2832 : PARM_BOUNDARY);
2835 /* Returns the number of bytes of argument registers required to hold *part*
2836 of a parameter of machine mode MODE and type TYPE (which may be NULL if
2837 the type is not known). If the argument fits entirely in the argument
2838 registers, or entirely on the stack, then 0 is returned. CUM is the
2839 number of argument registers already used by earlier parameters to
2840 the function. */
2842 static int
2843 mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
2844 tree type, bool named)
2846 int reg = ROUND_REG (*get_cumulative_args (cum), mode);
2848 if (named == 0)
2849 return 0;
2851 if (targetm.calls.must_pass_in_stack (mode, type))
2852 return 0;
2854 /* REG is not the *hardware* register number of the register that holds
2855 the argument, it is the *argument* register number. So for example,
2856 the first argument to a function goes in argument register 0, which
2857 translates (for the MCore) into hardware register 2. The second
2858 argument goes into argument register 1, which translates into hardware
2859 register 3, and so on. NPARM_REGS is the number of argument registers
2860 supported by the target, not the maximum hardware register number of
2861 the target. */
2862 if (reg >= NPARM_REGS)
2863 return 0;
2865 /* If the argument fits entirely in registers, return 0. */
2866 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2867 return 0;
2869 /* The argument overflows the number of available argument registers.
2870 Compute how many argument registers have not yet been assigned to
2871 hold an argument. */
2872 reg = NPARM_REGS - reg;
2874 /* Return partially in registers and partially on the stack. */
2875 return reg * UNITS_PER_WORD;
2878 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2881 mcore_dllexport_name_p (const char * symbol)
2883 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2886 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2889 mcore_dllimport_name_p (const char * symbol)
2891 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2894 /* Mark a DECL as being dllexport'd. */
2896 static void
2897 mcore_mark_dllexport (tree decl)
2899 const char * oldname;
2900 char * newname;
2901 rtx rtlname;
2902 tree idp;
2904 rtlname = XEXP (DECL_RTL (decl), 0);
2906 if (GET_CODE (rtlname) == MEM)
2907 rtlname = XEXP (rtlname, 0);
2908 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2909 oldname = XSTR (rtlname, 0);
2911 if (mcore_dllexport_name_p (oldname))
2912 return; /* Already done. */
2914 newname = XALLOCAVEC (char, strlen (oldname) + 4);
2915 sprintf (newname, "@e.%s", oldname);
2917 /* We pass newname through get_identifier to ensure it has a unique
2918 address. RTL processing can sometimes peek inside the symbol ref
2919 and compare the string's addresses to see if two symbols are
2920 identical. */
2921 /* ??? At least I think that's why we do this. */
2922 idp = get_identifier (newname);
2924 XEXP (DECL_RTL (decl), 0) =
2925 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2928 /* Mark a DECL as being dllimport'd. */
2930 static void
2931 mcore_mark_dllimport (tree decl)
2933 const char * oldname;
2934 char * newname;
2935 tree idp;
2936 rtx rtlname;
2937 rtx newrtl;
2939 rtlname = XEXP (DECL_RTL (decl), 0);
2941 if (GET_CODE (rtlname) == MEM)
2942 rtlname = XEXP (rtlname, 0);
2943 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2944 oldname = XSTR (rtlname, 0);
2946 gcc_assert (!mcore_dllexport_name_p (oldname));
2947 if (mcore_dllimport_name_p (oldname))
2948 return; /* Already done. */
2950 /* ??? One can well ask why we're making these checks here,
2951 and that would be a good question. */
2953 /* Imported variables can't be initialized. */
2954 if (TREE_CODE (decl) == VAR_DECL
2955 && !DECL_VIRTUAL_P (decl)
2956 && DECL_INITIAL (decl))
2958 error ("initialized variable %q+D is marked dllimport", decl);
2959 return;
2962 /* `extern' needn't be specified with dllimport.
2963 Specify `extern' now and hope for the best. Sigh. */
2964 if (TREE_CODE (decl) == VAR_DECL
2965 /* ??? Is this test for vtables needed? */
2966 && !DECL_VIRTUAL_P (decl))
2968 DECL_EXTERNAL (decl) = 1;
2969 TREE_PUBLIC (decl) = 1;
2972 newname = XALLOCAVEC (char, strlen (oldname) + 11);
2973 sprintf (newname, "@i.__imp_%s", oldname);
2975 /* We pass newname through get_identifier to ensure it has a unique
2976 address. RTL processing can sometimes peek inside the symbol ref
2977 and compare the string's addresses to see if two symbols are
2978 identical. */
2979 /* ??? At least I think that's why we do this. */
2980 idp = get_identifier (newname);
2982 newrtl = gen_rtx_MEM (Pmode,
2983 gen_rtx_SYMBOL_REF (Pmode,
2984 IDENTIFIER_POINTER (idp)));
2985 XEXP (DECL_RTL (decl), 0) = newrtl;
2988 static int
2989 mcore_dllexport_p (tree decl)
2991 if ( TREE_CODE (decl) != VAR_DECL
2992 && TREE_CODE (decl) != FUNCTION_DECL)
2993 return 0;
2995 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2998 static int
2999 mcore_dllimport_p (tree decl)
3001 if ( TREE_CODE (decl) != VAR_DECL
3002 && TREE_CODE (decl) != FUNCTION_DECL)
3003 return 0;
3005 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
3008 /* We must mark dll symbols specially. Definitions of dllexport'd objects
3009 install some info in the .drective (PE) or .exports (ELF) sections. */
3011 static void
3012 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3014 /* Mark the decl so we can tell from the rtl whether the object is
3015 dllexport'd or dllimport'd. */
3016 if (mcore_dllexport_p (decl))
3017 mcore_mark_dllexport (decl);
3018 else if (mcore_dllimport_p (decl))
3019 mcore_mark_dllimport (decl);
3021 /* It might be that DECL has already been marked as dllimport, but
3022 a subsequent definition nullified that. The attribute is gone
3023 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3024 else if ((TREE_CODE (decl) == FUNCTION_DECL
3025 || TREE_CODE (decl) == VAR_DECL)
3026 && DECL_RTL (decl) != NULL_RTX
3027 && GET_CODE (DECL_RTL (decl)) == MEM
3028 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3029 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3030 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3032 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3033 tree idp = get_identifier (oldname + 9);
3034 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3036 XEXP (DECL_RTL (decl), 0) = newrtl;
3038 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3039 ??? We leave these alone for now. */
3043 /* Undo the effects of the above. */
3045 static const char *
3046 mcore_strip_name_encoding (const char * str)
3048 return str + (str[0] == '@' ? 3 : 0);
3051 /* MCore specific attribute support.
3052 dllexport - for exporting a function/variable that will live in a dll
3053 dllimport - for importing a function/variable from a dll
3054 naked - do not create a function prologue/epilogue. */
3056 /* Handle a "naked" attribute; arguments as in
3057 struct attribute_spec.handler. */
3059 static tree
3060 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3061 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3063 if (TREE_CODE (*node) != FUNCTION_DECL)
3065 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3066 name);
3067 *no_add_attrs = true;
3070 return NULL_TREE;
3073 /* ??? It looks like this is PE specific? Oh well, this is what the
3074 old code did as well. */
3076 static void
3077 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3079 int len;
3080 const char * name;
3081 char * string;
3082 const char * prefix;
3084 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3086 /* Strip off any encoding in name. */
3087 name = (* targetm.strip_name_encoding) (name);
3089 /* The object is put in, for example, section .text$foo.
3090 The linker will then ultimately place them in .text
3091 (everything from the $ on is stripped). */
3092 if (TREE_CODE (decl) == FUNCTION_DECL)
3093 prefix = ".text$";
3094 /* For compatibility with EPOC, we ignore the fact that the
3095 section might have relocs against it. */
3096 else if (decl_readonly_section (decl, 0))
3097 prefix = ".rdata$";
3098 else
3099 prefix = ".data$";
3101 len = strlen (name) + strlen (prefix);
3102 string = XALLOCAVEC (char, len + 1);
3104 sprintf (string, "%s%s", prefix, name);
3106 set_decl_section_name (decl, string);
3110 mcore_naked_function_p (void)
3112 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3115 static bool
3116 mcore_warn_func_return (tree decl)
3118 /* Naked functions are implemented entirely in assembly, including the
3119 return sequence, so suppress warnings about this. */
3120 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3123 #ifdef OBJECT_FORMAT_ELF
3124 static void
3125 mcore_asm_named_section (const char *name,
3126 unsigned int flags ATTRIBUTE_UNUSED,
3127 tree decl ATTRIBUTE_UNUSED)
3129 fprintf (asm_out_file, "\t.section %s\n", name);
3131 #endif /* OBJECT_FORMAT_ELF */
3133 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3135 static void
3136 mcore_external_libcall (rtx fun)
3138 fprintf (asm_out_file, "\t.import\t");
3139 assemble_name (asm_out_file, XSTR (fun, 0));
3140 fprintf (asm_out_file, "\n");
3143 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3145 static bool
3146 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3148 const HOST_WIDE_INT size = int_size_in_bytes (type);
3149 return (size == -1 || size > 2 * UNITS_PER_WORD);
3152 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3153 Output assembler code for a block containing the constant parts
3154 of a trampoline, leaving space for the variable parts.
3156 On the MCore, the trampoline looks like:
3157 lrw r1, function
3158 lrw r13, area
3159 jmp r13
3160 or r0, r0
3161 .literals */
3163 static void
3164 mcore_asm_trampoline_template (FILE *f)
3166 fprintf (f, "\t.short 0x7102\n");
3167 fprintf (f, "\t.short 0x7d02\n");
3168 fprintf (f, "\t.short 0x00cd\n");
3169 fprintf (f, "\t.short 0x1e00\n");
3170 fprintf (f, "\t.long 0\n");
3171 fprintf (f, "\t.long 0\n");
3174 /* Worker function for TARGET_TRAMPOLINE_INIT. */
3176 static void
3177 mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3179 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3180 rtx mem;
3182 emit_block_move (m_tramp, assemble_trampoline_template (),
3183 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3185 mem = adjust_address (m_tramp, SImode, 8);
3186 emit_move_insn (mem, chain_value);
3187 mem = adjust_address (m_tramp, SImode, 12);
3188 emit_move_insn (mem, fnaddr);
3191 /* Implement TARGET_LEGITIMATE_CONSTANT_P
3193 On the MCore, allow anything but a double. */
3195 static bool
3196 mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3198 return GET_CODE (x) != CONST_DOUBLE;