* target.h (enum opt_levels, struct default_options): New.
[official-gcc.git] / gcc / config / mcore / mcore.c
blob9bacc5caee061d46dfca085609471c68bb7bd62c
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2009, 2010 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "assert.h"
29 #include "mcore.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "obstack.h"
38 #include "expr.h"
39 #include "reload.h"
40 #include "recog.h"
41 #include "function.h"
42 #include "ggc.h"
43 #include "diagnostic-core.h"
44 #include "toplev.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "df.h"
49 /* For dumping information about frame sizes. */
50 char * mcore_current_function_name = 0;
51 long mcore_current_compilation_timestamp = 0;
53 /* Global variables for machine-dependent things. */
55 /* Provides the class number of the smallest class containing
56 reg number. */
57 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
59 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
60 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
61 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
62 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
63 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
66 /* Provide reg_class from a letter such as appears in the machine
67 description. */
68 const enum reg_class reg_class_from_letter[] =
70 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
71 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
72 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
73 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
74 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
75 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
76 /* y */ NO_REGS, /* z */ NO_REGS
79 struct mcore_frame
81 int arg_size; /* Stdarg spills (bytes). */
82 int reg_size; /* Non-volatile reg saves (bytes). */
83 int reg_mask; /* Non-volatile reg saves. */
84 int local_size; /* Locals. */
85 int outbound_size; /* Arg overflow on calls out. */
86 int pad_outbound;
87 int pad_local;
88 int pad_reg;
89 /* Describe the steps we'll use to grow it. */
90 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
91 int growth[MAX_STACK_GROWS];
92 int arg_offset;
93 int reg_offset;
94 int reg_growth;
95 int local_growth;
98 typedef enum
100 COND_NO,
101 COND_MOV_INSN,
102 COND_CLR_INSN,
103 COND_INC_INSN,
104 COND_DEC_INSN,
105 COND_BRANCH_INSN
107 cond_type;
109 static void output_stack_adjust (int, int);
110 static int calc_live_regs (int *);
111 static int try_constant_tricks (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
112 static const char * output_inline_const (enum machine_mode, rtx *);
113 static void layout_mcore_frame (struct mcore_frame *);
114 static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
115 static cond_type is_cond_candidate (rtx);
116 static rtx emit_new_cond_insn (rtx, int);
117 static rtx conditionalize_block (rtx);
118 static void conditionalize_optimization (void);
119 static void mcore_reorg (void);
120 static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
121 static void mcore_mark_dllexport (tree);
122 static void mcore_mark_dllimport (tree);
123 static int mcore_dllexport_p (tree);
124 static int mcore_dllimport_p (tree);
125 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
126 #ifdef OBJECT_FORMAT_ELF
127 static void mcore_asm_named_section (const char *,
128 unsigned int, tree);
129 #endif
130 static void mcore_print_operand (FILE *, rtx, int);
131 static void mcore_print_operand_address (FILE *, rtx);
132 static bool mcore_print_operand_punct_valid_p (unsigned char code);
133 static void mcore_unique_section (tree, int);
134 static void mcore_encode_section_info (tree, rtx, int);
135 static const char *mcore_strip_name_encoding (const char *);
136 static int mcore_const_costs (rtx, RTX_CODE);
137 static int mcore_and_cost (rtx);
138 static int mcore_ior_cost (rtx);
139 static bool mcore_rtx_costs (rtx, int, int, int *, bool);
140 static void mcore_external_libcall (rtx);
141 static bool mcore_return_in_memory (const_tree, const_tree);
142 static int mcore_arg_partial_bytes (CUMULATIVE_ARGS *,
143 enum machine_mode,
144 tree, bool);
145 static void mcore_asm_trampoline_template (FILE *);
146 static void mcore_trampoline_init (rtx, tree, rtx);
147 static void mcore_option_override (void);
149 /* MCore specific attributes. */
151 static const struct attribute_spec mcore_attribute_table[] =
153 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
154 { "dllexport", 0, 0, true, false, false, NULL },
155 { "dllimport", 0, 0, true, false, false, NULL },
156 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute },
157 { NULL, 0, 0, false, false, false, NULL }
160 /* What options are we going to default to specific settings when
161 -O* happens; the user can subsequently override these settings.
163 Omitting the frame pointer is a very good idea on the MCore.
164 Scheduling isn't worth anything on the current MCore implementation. */
166 static const struct default_options mcore_option_optimization_table[] =
168 { OPT_LEVELS_1_PLUS, OPT_ffunction_cse, NULL, 0 },
169 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
170 { OPT_LEVELS_ALL, OPT_fcaller_saves, NULL, 0 },
171 { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 },
172 { OPT_LEVELS_ALL, OPT_fschedule_insns2, NULL, 0 },
173 { OPT_LEVELS_SIZE, OPT_mhardlit, NULL, 0 },
174 { OPT_LEVELS_NONE, 0, NULL, 0 }
177 /* Initialize the GCC target structure. */
178 #undef TARGET_ASM_EXTERNAL_LIBCALL
179 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
181 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
182 #undef TARGET_MERGE_DECL_ATTRIBUTES
183 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
184 #endif
186 #ifdef OBJECT_FORMAT_ELF
187 #undef TARGET_ASM_UNALIGNED_HI_OP
188 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
189 #undef TARGET_ASM_UNALIGNED_SI_OP
190 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
191 #endif
193 #undef TARGET_PRINT_OPERAND
194 #define TARGET_PRINT_OPERAND mcore_print_operand
195 #undef TARGET_PRINT_OPERAND_ADDRESS
196 #define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
197 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
198 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
200 #undef TARGET_ATTRIBUTE_TABLE
201 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
202 #undef TARGET_ASM_UNIQUE_SECTION
203 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
204 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
205 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
206 #undef TARGET_DEFAULT_TARGET_FLAGS
207 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
208 #undef TARGET_ENCODE_SECTION_INFO
209 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
210 #undef TARGET_STRIP_NAME_ENCODING
211 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
212 #undef TARGET_RTX_COSTS
213 #define TARGET_RTX_COSTS mcore_rtx_costs
214 #undef TARGET_ADDRESS_COST
215 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
216 #undef TARGET_MACHINE_DEPENDENT_REORG
217 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
219 #undef TARGET_PROMOTE_FUNCTION_MODE
220 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
221 #undef TARGET_PROMOTE_PROTOTYPES
222 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
224 #undef TARGET_RETURN_IN_MEMORY
225 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
226 #undef TARGET_MUST_PASS_IN_STACK
227 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
228 #undef TARGET_PASS_BY_REFERENCE
229 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
230 #undef TARGET_ARG_PARTIAL_BYTES
231 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
233 #undef TARGET_SETUP_INCOMING_VARARGS
234 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
236 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
237 #define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
238 #undef TARGET_TRAMPOLINE_INIT
239 #define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
241 #undef TARGET_OPTION_OVERRIDE
242 #define TARGET_OPTION_OVERRIDE mcore_option_override
243 #undef TARGET_OPTION_OPTIMIZATION_TABLE
244 #define TARGET_OPTION_OPTIMIZATION_TABLE mcore_option_optimization_table
246 #undef TARGET_EXCEPT_UNWIND_INFO
247 #define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
249 struct gcc_target targetm = TARGET_INITIALIZER;
251 /* Adjust the stack and return the number of bytes taken to do it. */
252 static void
253 output_stack_adjust (int direction, int size)
255 /* If extending stack a lot, we do it incrementally. */
256 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
258 rtx tmp = gen_rtx_REG (SImode, 1);
259 rtx memref;
261 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
264 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
265 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
266 MEM_VOLATILE_P (memref) = 1;
267 emit_insn (gen_movsi (memref, stack_pointer_rtx));
268 size -= mcore_stack_increment;
270 while (size > mcore_stack_increment);
272 /* SIZE is now the residual for the last adjustment,
273 which doesn't require a probe. */
276 if (size)
278 rtx insn;
279 rtx val = GEN_INT (size);
281 if (size > 32)
283 rtx nval = gen_rtx_REG (SImode, 1);
284 emit_insn (gen_movsi (nval, val));
285 val = nval;
288 if (direction > 0)
289 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
290 else
291 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
293 emit_insn (insn);
297 /* Work out the registers which need to be saved,
298 both as a mask and a count. */
300 static int
301 calc_live_regs (int * count)
303 int reg;
304 int live_regs_mask = 0;
306 * count = 0;
308 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
310 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
312 (*count)++;
313 live_regs_mask |= (1 << reg);
317 return live_regs_mask;
320 /* Print the operand address in x to the stream. */
322 static void
323 mcore_print_operand_address (FILE * stream, rtx x)
325 switch (GET_CODE (x))
327 case REG:
328 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
329 break;
331 case PLUS:
333 rtx base = XEXP (x, 0);
334 rtx index = XEXP (x, 1);
336 if (GET_CODE (base) != REG)
338 /* Ensure that BASE is a register (one of them must be). */
339 rtx temp = base;
340 base = index;
341 index = temp;
344 switch (GET_CODE (index))
346 case CONST_INT:
347 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
348 reg_names[REGNO(base)], INTVAL (index));
349 break;
351 default:
352 gcc_unreachable ();
356 break;
358 default:
359 output_addr_const (stream, x);
360 break;
364 static bool
365 mcore_print_operand_punct_valid_p (unsigned char code)
367 return (code == '.' || code == '#' || code == '*' || code == '^'
368 || code == '!');
371 /* Print operand x (an rtx) in assembler syntax to file stream
372 according to modifier code.
374 'R' print the next register or memory location along, i.e. the lsw in
375 a double word value
376 'O' print a constant without the #
377 'M' print a constant as its negative
378 'P' print log2 of a power of two
379 'Q' print log2 of an inverse of a power of two
380 'U' print register for ldm/stm instruction
381 'X' print byte number for xtrbN instruction. */
383 static void
384 mcore_print_operand (FILE * stream, rtx x, int code)
386 switch (code)
388 case 'N':
389 if (INTVAL(x) == -1)
390 fprintf (asm_out_file, "32");
391 else
392 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
393 break;
394 case 'P':
395 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
396 break;
397 case 'Q':
398 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
399 break;
400 case 'O':
401 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
402 break;
403 case 'M':
404 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
405 break;
406 case 'R':
407 /* Next location along in memory or register. */
408 switch (GET_CODE (x))
410 case REG:
411 fputs (reg_names[REGNO (x) + 1], (stream));
412 break;
413 case MEM:
414 mcore_print_operand_address
415 (stream, XEXP (adjust_address (x, SImode, 4), 0));
416 break;
417 default:
418 gcc_unreachable ();
420 break;
421 case 'U':
422 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
423 reg_names[REGNO (x) + 3]);
424 break;
425 case 'x':
426 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
427 break;
428 case 'X':
429 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
430 break;
432 default:
433 switch (GET_CODE (x))
435 case REG:
436 fputs (reg_names[REGNO (x)], (stream));
437 break;
438 case MEM:
439 output_address (XEXP (x, 0));
440 break;
441 default:
442 output_addr_const (stream, x);
443 break;
445 break;
449 /* What does a constant cost ? */
451 static int
452 mcore_const_costs (rtx exp, enum rtx_code code)
454 HOST_WIDE_INT val = INTVAL (exp);
456 /* Easy constants. */
457 if ( CONST_OK_FOR_I (val)
458 || CONST_OK_FOR_M (val)
459 || CONST_OK_FOR_N (val)
460 || (code == PLUS && CONST_OK_FOR_L (val)))
461 return 1;
462 else if (code == AND
463 && ( CONST_OK_FOR_M (~val)
464 || CONST_OK_FOR_N (~val)))
465 return 2;
466 else if (code == PLUS
467 && ( CONST_OK_FOR_I (-val)
468 || CONST_OK_FOR_M (-val)
469 || CONST_OK_FOR_N (-val)))
470 return 2;
472 return 5;
475 /* What does an and instruction cost - we do this b/c immediates may
476 have been relaxed. We want to ensure that cse will cse relaxed immeds
477 out. Otherwise we'll get bad code (multiple reloads of the same const). */
479 static int
480 mcore_and_cost (rtx x)
482 HOST_WIDE_INT val;
484 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
485 return 2;
487 val = INTVAL (XEXP (x, 1));
489 /* Do it directly. */
490 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
491 return 2;
492 /* Takes one instruction to load. */
493 else if (const_ok_for_mcore (val))
494 return 3;
495 /* Takes two instructions to load. */
496 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
497 return 4;
499 /* Takes a lrw to load. */
500 return 5;
503 /* What does an or cost - see and_cost(). */
505 static int
506 mcore_ior_cost (rtx x)
508 HOST_WIDE_INT val;
510 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
511 return 2;
513 val = INTVAL (XEXP (x, 1));
515 /* Do it directly with bclri. */
516 if (CONST_OK_FOR_M (val))
517 return 2;
518 /* Takes one instruction to load. */
519 else if (const_ok_for_mcore (val))
520 return 3;
521 /* Takes two instructions to load. */
522 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
523 return 4;
525 /* Takes a lrw to load. */
526 return 5;
529 static bool
530 mcore_rtx_costs (rtx x, int code, int outer_code, int * total,
531 bool speed ATTRIBUTE_UNUSED)
533 switch (code)
535 case CONST_INT:
536 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
537 return true;
538 case CONST:
539 case LABEL_REF:
540 case SYMBOL_REF:
541 *total = 5;
542 return true;
543 case CONST_DOUBLE:
544 *total = 10;
545 return true;
547 case AND:
548 *total = COSTS_N_INSNS (mcore_and_cost (x));
549 return true;
551 case IOR:
552 *total = COSTS_N_INSNS (mcore_ior_cost (x));
553 return true;
555 case DIV:
556 case UDIV:
557 case MOD:
558 case UMOD:
559 case FLOAT:
560 case FIX:
561 *total = COSTS_N_INSNS (100);
562 return true;
564 default:
565 return false;
569 /* Prepare the operands for a comparison. Return whether the branch/setcc
570 should reverse the operands. */
572 bool
573 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
575 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
576 bool invert;
578 if (GET_CODE (op1) == CONST_INT)
580 HOST_WIDE_INT val = INTVAL (op1);
582 switch (code)
584 case GTU:
585 /* Unsigned > 0 is the same as != 0; everything else is converted
586 below to LEU (reversed cmphs). */
587 if (val == 0)
588 code = NE;
589 break;
591 /* Check whether (LE A imm) can become (LT A imm + 1),
592 or (GT A imm) can become (GE A imm + 1). */
593 case GT:
594 case LE:
595 if (CONST_OK_FOR_J (val + 1))
597 op1 = GEN_INT (val + 1);
598 code = code == LE ? LT : GE;
600 break;
602 default:
603 break;
607 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
608 op1 = force_reg (SImode, op1);
610 /* cmpnei: 0-31 (K immediate)
611 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
612 invert = false;
613 switch (code)
615 case EQ: /* Use inverted condition, cmpne. */
616 code = NE;
617 invert = true;
618 /* Drop through. */
620 case NE: /* Use normal condition, cmpne. */
621 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
622 op1 = force_reg (SImode, op1);
623 break;
625 case LE: /* Use inverted condition, reversed cmplt. */
626 code = GT;
627 invert = true;
628 /* Drop through. */
630 case GT: /* Use normal condition, reversed cmplt. */
631 if (GET_CODE (op1) == CONST_INT)
632 op1 = force_reg (SImode, op1);
633 break;
635 case GE: /* Use inverted condition, cmplt. */
636 code = LT;
637 invert = true;
638 /* Drop through. */
640 case LT: /* Use normal condition, cmplt. */
641 if (GET_CODE (op1) == CONST_INT &&
642 /* covered by btsti x,31. */
643 INTVAL (op1) != 0 &&
644 ! CONST_OK_FOR_J (INTVAL (op1)))
645 op1 = force_reg (SImode, op1);
646 break;
648 case GTU: /* Use inverted condition, cmple. */
649 /* We coped with unsigned > 0 above. */
650 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
651 code = LEU;
652 invert = true;
653 /* Drop through. */
655 case LEU: /* Use normal condition, reversed cmphs. */
656 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
657 op1 = force_reg (SImode, op1);
658 break;
660 case LTU: /* Use inverted condition, cmphs. */
661 code = GEU;
662 invert = true;
663 /* Drop through. */
665 case GEU: /* Use normal condition, cmphs. */
666 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
667 op1 = force_reg (SImode, op1);
668 break;
670 default:
671 break;
674 emit_insn (gen_rtx_SET (VOIDmode,
675 cc_reg,
676 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
677 return invert;
681 mcore_symbolic_address_p (rtx x)
683 switch (GET_CODE (x))
685 case SYMBOL_REF:
686 case LABEL_REF:
687 return 1;
688 case CONST:
689 x = XEXP (x, 0);
690 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
691 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
692 && GET_CODE (XEXP (x, 1)) == CONST_INT);
693 default:
694 return 0;
698 /* Functions to output assembly code for a function call. */
700 char *
701 mcore_output_call (rtx operands[], int index)
703 static char buffer[20];
704 rtx addr = operands [index];
706 if (REG_P (addr))
708 if (TARGET_CG_DATA)
710 gcc_assert (mcore_current_function_name);
712 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
713 "unknown", 1);
716 sprintf (buffer, "jsr\t%%%d", index);
718 else
720 if (TARGET_CG_DATA)
722 gcc_assert (mcore_current_function_name);
723 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
725 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
726 XSTR (addr, 0), 0);
729 sprintf (buffer, "jbsr\t%%%d", index);
732 return buffer;
735 /* Can we load a constant with a single instruction ? */
738 const_ok_for_mcore (HOST_WIDE_INT value)
740 if (value >= 0 && value <= 127)
741 return 1;
743 /* Try exact power of two. */
744 if (CONST_OK_FOR_M (value))
745 return 1;
747 /* Try exact power of two - 1. */
748 if (CONST_OK_FOR_N (value) && value != -1)
749 return 1;
751 return 0;
754 /* Can we load a constant inline with up to 2 instructions ? */
757 mcore_const_ok_for_inline (HOST_WIDE_INT value)
759 HOST_WIDE_INT x, y;
761 return try_constant_tricks (value, & x, & y) > 0;
764 /* Are we loading the constant using a not ? */
767 mcore_const_trick_uses_not (HOST_WIDE_INT value)
769 HOST_WIDE_INT x, y;
771 return try_constant_tricks (value, & x, & y) == 2;
774 /* Try tricks to load a constant inline and return the trick number if
775 success (0 is non-inlinable).
777 0: not inlinable
778 1: single instruction (do the usual thing)
779 2: single insn followed by a 'not'
780 3: single insn followed by a subi
781 4: single insn followed by an addi
782 5: single insn followed by rsubi
783 6: single insn followed by bseti
784 7: single insn followed by bclri
785 8: single insn followed by rotli
786 9: single insn followed by lsli
787 10: single insn followed by ixh
788 11: single insn followed by ixw. */
790 static int
791 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
793 HOST_WIDE_INT i;
794 unsigned HOST_WIDE_INT bit, shf, rot;
796 if (const_ok_for_mcore (value))
797 return 1; /* Do the usual thing. */
799 if (! TARGET_HARDLIT)
800 return 0;
802 if (const_ok_for_mcore (~value))
804 *x = ~value;
805 return 2;
808 for (i = 1; i <= 32; i++)
810 if (const_ok_for_mcore (value - i))
812 *x = value - i;
813 *y = i;
815 return 3;
818 if (const_ok_for_mcore (value + i))
820 *x = value + i;
821 *y = i;
823 return 4;
827 bit = 0x80000000ULL;
829 for (i = 0; i <= 31; i++)
831 if (const_ok_for_mcore (i - value))
833 *x = i - value;
834 *y = i;
836 return 5;
839 if (const_ok_for_mcore (value & ~bit))
841 *y = bit;
842 *x = value & ~bit;
843 return 6;
846 if (const_ok_for_mcore (value | bit))
848 *y = ~bit;
849 *x = value | bit;
851 return 7;
854 bit >>= 1;
857 shf = value;
858 rot = value;
860 for (i = 1; i < 31; i++)
862 int c;
864 /* MCore has rotate left. */
865 c = rot << 31;
866 rot >>= 1;
867 rot &= 0x7FFFFFFF;
868 rot |= c; /* Simulate rotate. */
870 if (const_ok_for_mcore (rot))
872 *y = i;
873 *x = rot;
875 return 8;
878 if (shf & 1)
879 shf = 0; /* Can't use logical shift, low order bit is one. */
881 shf >>= 1;
883 if (shf != 0 && const_ok_for_mcore (shf))
885 *y = i;
886 *x = shf;
888 return 9;
892 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
894 *x = value / 3;
896 return 10;
899 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
901 *x = value / 5;
903 return 11;
906 return 0;
909 /* Check whether reg is dead at first. This is done by searching ahead
910 for either the next use (i.e., reg is live), a death note, or a set of
911 reg. Don't just use dead_or_set_p() since reload does not always mark
912 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
913 can ignore subregs by extracting the actual register. BRC */
916 mcore_is_dead (rtx first, rtx reg)
918 rtx insn;
920 /* For mcore, subregs can't live independently of their parent regs. */
921 if (GET_CODE (reg) == SUBREG)
922 reg = SUBREG_REG (reg);
924 /* Dies immediately. */
925 if (dead_or_set_p (first, reg))
926 return 1;
928 /* Look for conclusive evidence of live/death, otherwise we have
929 to assume that it is live. */
930 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
932 if (GET_CODE (insn) == JUMP_INSN)
933 return 0; /* We lose track, assume it is alive. */
935 else if (GET_CODE(insn) == CALL_INSN)
937 /* Call's might use it for target or register parms. */
938 if (reg_referenced_p (reg, PATTERN (insn))
939 || find_reg_fusage (insn, USE, reg))
940 return 0;
941 else if (dead_or_set_p (insn, reg))
942 return 1;
944 else if (GET_CODE (insn) == INSN)
946 if (reg_referenced_p (reg, PATTERN (insn)))
947 return 0;
948 else if (dead_or_set_p (insn, reg))
949 return 1;
953 /* No conclusive evidence either way, we cannot take the chance
954 that control flow hid the use from us -- "I'm not dead yet". */
955 return 0;
958 /* Count the number of ones in mask. */
961 mcore_num_ones (HOST_WIDE_INT mask)
963 /* A trick to count set bits recently posted on comp.compilers. */
964 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
965 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
966 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
967 mask = ((mask >> 8) + mask);
969 return (mask + (mask >> 16)) & 0xff;
972 /* Count the number of zeros in mask. */
975 mcore_num_zeros (HOST_WIDE_INT mask)
977 return 32 - mcore_num_ones (mask);
980 /* Determine byte being masked. */
983 mcore_byte_offset (unsigned int mask)
985 if (mask == 0x00ffffffL)
986 return 0;
987 else if (mask == 0xff00ffffL)
988 return 1;
989 else if (mask == 0xffff00ffL)
990 return 2;
991 else if (mask == 0xffffff00L)
992 return 3;
994 return -1;
997 /* Determine halfword being masked. */
1000 mcore_halfword_offset (unsigned int mask)
1002 if (mask == 0x0000ffffL)
1003 return 0;
1004 else if (mask == 0xffff0000L)
1005 return 1;
1007 return -1;
1010 /* Output a series of bseti's corresponding to mask. */
1012 const char *
1013 mcore_output_bseti (rtx dst, int mask)
1015 rtx out_operands[2];
1016 int bit;
1018 out_operands[0] = dst;
1020 for (bit = 0; bit < 32; bit++)
1022 if ((mask & 0x1) == 0x1)
1024 out_operands[1] = GEN_INT (bit);
1026 output_asm_insn ("bseti\t%0,%1", out_operands);
1028 mask >>= 1;
1031 return "";
1034 /* Output a series of bclri's corresponding to mask. */
1036 const char *
1037 mcore_output_bclri (rtx dst, int mask)
1039 rtx out_operands[2];
1040 int bit;
1042 out_operands[0] = dst;
1044 for (bit = 0; bit < 32; bit++)
1046 if ((mask & 0x1) == 0x0)
1048 out_operands[1] = GEN_INT (bit);
1050 output_asm_insn ("bclri\t%0,%1", out_operands);
1053 mask >>= 1;
1056 return "";
1059 /* Output a conditional move of two constants that are +/- 1 within each
1060 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1061 really worth the effort. */
1063 const char *
1064 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1066 HOST_WIDE_INT load_value;
1067 HOST_WIDE_INT adjust_value;
1068 rtx out_operands[4];
1070 out_operands[0] = operands[0];
1072 /* Check to see which constant is loadable. */
1073 if (const_ok_for_mcore (INTVAL (operands[1])))
1075 out_operands[1] = operands[1];
1076 out_operands[2] = operands[2];
1078 else if (const_ok_for_mcore (INTVAL (operands[2])))
1080 out_operands[1] = operands[2];
1081 out_operands[2] = operands[1];
1083 /* Complement test since constants are swapped. */
1084 cmp_t = (cmp_t == 0);
1086 load_value = INTVAL (out_operands[1]);
1087 adjust_value = INTVAL (out_operands[2]);
1089 /* First output the test if folded into the pattern. */
1091 if (test)
1092 output_asm_insn (test, operands);
1094 /* Load the constant - for now, only support constants that can be
1095 generated with a single instruction. maybe add general inlinable
1096 constants later (this will increase the # of patterns since the
1097 instruction sequence has a different length attribute). */
1098 if (load_value >= 0 && load_value <= 127)
1099 output_asm_insn ("movi\t%0,%1", out_operands);
1100 else if (CONST_OK_FOR_M (load_value))
1101 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1102 else if (CONST_OK_FOR_N (load_value))
1103 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1105 /* Output the constant adjustment. */
1106 if (load_value > adjust_value)
1108 if (cmp_t)
1109 output_asm_insn ("decf\t%0", out_operands);
1110 else
1111 output_asm_insn ("dect\t%0", out_operands);
1113 else
1115 if (cmp_t)
1116 output_asm_insn ("incf\t%0", out_operands);
1117 else
1118 output_asm_insn ("inct\t%0", out_operands);
1121 return "";
1124 /* Outputs the peephole for moving a constant that gets not'ed followed
1125 by an and (i.e. combine the not and the and into andn). BRC */
1127 const char *
1128 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1130 HOST_WIDE_INT x, y;
1131 rtx out_operands[3];
1132 const char * load_op;
1133 char buf[256];
1134 int trick_no;
1136 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1137 gcc_assert (trick_no == 2);
1139 out_operands[0] = operands[0];
1140 out_operands[1] = GEN_INT (x);
1141 out_operands[2] = operands[2];
1143 if (x >= 0 && x <= 127)
1144 load_op = "movi\t%0,%1";
1146 /* Try exact power of two. */
1147 else if (CONST_OK_FOR_M (x))
1148 load_op = "bgeni\t%0,%P1";
1150 /* Try exact power of two - 1. */
1151 else if (CONST_OK_FOR_N (x))
1152 load_op = "bmaski\t%0,%N1";
1154 else
1156 load_op = "BADMOVI-andn\t%0, %1";
1157 gcc_unreachable ();
1160 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1161 output_asm_insn (buf, out_operands);
1163 return "";
1166 /* Output an inline constant. */
1168 static const char *
1169 output_inline_const (enum machine_mode mode, rtx operands[])
1171 HOST_WIDE_INT x = 0, y = 0;
1172 int trick_no;
1173 rtx out_operands[3];
1174 char buf[256];
1175 char load_op[256];
1176 const char *dst_fmt;
1177 HOST_WIDE_INT value;
1179 value = INTVAL (operands[1]);
1181 trick_no = try_constant_tricks (value, &x, &y);
1182 /* lrw's are handled separately: Large inlinable constants never get
1183 turned into lrw's. Our caller uses try_constant_tricks to back
1184 off to an lrw rather than calling this routine. */
1185 gcc_assert (trick_no != 0);
1187 if (trick_no == 1)
1188 x = value;
1190 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1191 out_operands[0] = operands[0];
1192 out_operands[1] = GEN_INT (x);
1194 if (trick_no > 2)
1195 out_operands[2] = GEN_INT (y);
1197 /* Select dst format based on mode. */
1198 if (mode == DImode && (! TARGET_LITTLE_END))
1199 dst_fmt = "%R0";
1200 else
1201 dst_fmt = "%0";
1203 if (x >= 0 && x <= 127)
1204 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1206 /* Try exact power of two. */
1207 else if (CONST_OK_FOR_M (x))
1208 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1210 /* Try exact power of two - 1. */
1211 else if (CONST_OK_FOR_N (x))
1212 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1214 else
1216 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1217 gcc_unreachable ();
1220 switch (trick_no)
1222 case 1:
1223 strcpy (buf, load_op);
1224 break;
1225 case 2: /* not */
1226 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1227 break;
1228 case 3: /* add */
1229 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1230 break;
1231 case 4: /* sub */
1232 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1233 break;
1234 case 5: /* rsub */
1235 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1236 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1237 break;
1238 case 6: /* bseti */
1239 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1240 break;
1241 case 7: /* bclr */
1242 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1243 break;
1244 case 8: /* rotl */
1245 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1246 break;
1247 case 9: /* lsl */
1248 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1249 break;
1250 case 10: /* ixh */
1251 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1252 break;
1253 case 11: /* ixw */
1254 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1255 break;
1256 default:
1257 return "";
1260 output_asm_insn (buf, out_operands);
1262 return "";
1265 /* Output a move of a word or less value. */
1267 const char *
1268 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1269 enum machine_mode mode ATTRIBUTE_UNUSED)
1271 rtx dst = operands[0];
1272 rtx src = operands[1];
1274 if (GET_CODE (dst) == REG)
1276 if (GET_CODE (src) == REG)
1278 if (REGNO (src) == CC_REG) /* r-c */
1279 return "mvc\t%0";
1280 else
1281 return "mov\t%0,%1"; /* r-r*/
1283 else if (GET_CODE (src) == MEM)
1285 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1286 return "lrw\t%0,[%1]"; /* a-R */
1287 else
1288 switch (GET_MODE (src)) /* r-m */
1290 case SImode:
1291 return "ldw\t%0,%1";
1292 case HImode:
1293 return "ld.h\t%0,%1";
1294 case QImode:
1295 return "ld.b\t%0,%1";
1296 default:
1297 gcc_unreachable ();
1300 else if (GET_CODE (src) == CONST_INT)
1302 HOST_WIDE_INT x, y;
1304 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1305 return "movi\t%0,%1";
1306 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1307 return "bgeni\t%0,%P1\t// %1 %x1";
1308 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1309 return "bmaski\t%0,%N1\t// %1 %x1";
1310 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1311 return output_inline_const (SImode, operands); /* 1-2 insns */
1312 else
1313 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1315 else
1316 return "lrw\t%0, %1"; /* Into the literal pool. */
1318 else if (GET_CODE (dst) == MEM) /* m-r */
1319 switch (GET_MODE (dst))
1321 case SImode:
1322 return "stw\t%1,%0";
1323 case HImode:
1324 return "st.h\t%1,%0";
1325 case QImode:
1326 return "st.b\t%1,%0";
1327 default:
1328 gcc_unreachable ();
1331 gcc_unreachable ();
1334 /* Return a sequence of instructions to perform DI or DF move.
1335 Since the MCORE cannot move a DI or DF in one instruction, we have
1336 to take care when we see overlapping source and dest registers. */
1338 const char *
1339 mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
1341 rtx dst = operands[0];
1342 rtx src = operands[1];
1344 if (GET_CODE (dst) == REG)
1346 if (GET_CODE (src) == REG)
1348 int dstreg = REGNO (dst);
1349 int srcreg = REGNO (src);
1351 /* Ensure the second source not overwritten. */
1352 if (srcreg + 1 == dstreg)
1353 return "mov %R0,%R1\n\tmov %0,%1";
1354 else
1355 return "mov %0,%1\n\tmov %R0,%R1";
1357 else if (GET_CODE (src) == MEM)
1359 rtx memexp = memexp = XEXP (src, 0);
1360 int dstreg = REGNO (dst);
1361 int basereg = -1;
1363 if (GET_CODE (memexp) == LABEL_REF)
1364 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1365 else if (GET_CODE (memexp) == REG)
1366 basereg = REGNO (memexp);
1367 else if (GET_CODE (memexp) == PLUS)
1369 if (GET_CODE (XEXP (memexp, 0)) == REG)
1370 basereg = REGNO (XEXP (memexp, 0));
1371 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1372 basereg = REGNO (XEXP (memexp, 1));
1373 else
1374 gcc_unreachable ();
1376 else
1377 gcc_unreachable ();
1379 /* ??? length attribute is wrong here. */
1380 if (dstreg == basereg)
1382 /* Just load them in reverse order. */
1383 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1385 /* XXX: alternative: move basereg to basereg+1
1386 and then fall through. */
1388 else
1389 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1391 else if (GET_CODE (src) == CONST_INT)
1393 if (TARGET_LITTLE_END)
1395 if (CONST_OK_FOR_I (INTVAL (src)))
1396 output_asm_insn ("movi %0,%1", operands);
1397 else if (CONST_OK_FOR_M (INTVAL (src)))
1398 output_asm_insn ("bgeni %0,%P1", operands);
1399 else if (CONST_OK_FOR_N (INTVAL (src)))
1400 output_asm_insn ("bmaski %0,%N1", operands);
1401 else
1402 gcc_unreachable ();
1404 if (INTVAL (src) < 0)
1405 return "bmaski %R0,32";
1406 else
1407 return "movi %R0,0";
1409 else
1411 if (CONST_OK_FOR_I (INTVAL (src)))
1412 output_asm_insn ("movi %R0,%1", operands);
1413 else if (CONST_OK_FOR_M (INTVAL (src)))
1414 output_asm_insn ("bgeni %R0,%P1", operands);
1415 else if (CONST_OK_FOR_N (INTVAL (src)))
1416 output_asm_insn ("bmaski %R0,%N1", operands);
1417 else
1418 gcc_unreachable ();
1420 if (INTVAL (src) < 0)
1421 return "bmaski %0,32";
1422 else
1423 return "movi %0,0";
1426 else
1427 gcc_unreachable ();
1429 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1430 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1431 else
1432 gcc_unreachable ();
1435 /* Predicates used by the templates. */
1438 mcore_arith_S_operand (rtx op)
1440 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1441 return 1;
1443 return 0;
1446 /* Expand insert bit field. BRC */
1449 mcore_expand_insv (rtx operands[])
1451 int width = INTVAL (operands[1]);
1452 int posn = INTVAL (operands[2]);
1453 int mask;
1454 rtx mreg, sreg, ereg;
1456 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1457 for width==1 must be removed. Look around line 368. This is something
1458 we really want the md part to do. */
1459 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1461 /* Do directly with bseti or bclri. */
1462 /* RBE: 2/97 consider only low bit of constant. */
1463 if ((INTVAL (operands[3]) & 1) == 0)
1465 mask = ~(1 << posn);
1466 emit_insn (gen_rtx_SET (SImode, operands[0],
1467 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
1469 else
1471 mask = 1 << posn;
1472 emit_insn (gen_rtx_SET (SImode, operands[0],
1473 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
1476 return 1;
1479 /* Look at some bit-field placements that we aren't interested
1480 in handling ourselves, unless specifically directed to do so. */
1481 if (! TARGET_W_FIELD)
1482 return 0; /* Generally, give up about now. */
1484 if (width == 8 && posn % 8 == 0)
1485 /* Byte sized and aligned; let caller break it up. */
1486 return 0;
1488 if (width == 16 && posn % 16 == 0)
1489 /* Short sized and aligned; let caller break it up. */
1490 return 0;
1492 /* The general case - we can do this a little bit better than what the
1493 machine independent part tries. This will get rid of all the subregs
1494 that mess up constant folding in combine when working with relaxed
1495 immediates. */
1497 /* If setting the entire field, do it directly. */
1498 if (GET_CODE (operands[3]) == CONST_INT
1499 && INTVAL (operands[3]) == ((1 << width) - 1))
1501 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1502 emit_insn (gen_rtx_SET (SImode, operands[0],
1503 gen_rtx_IOR (SImode, operands[0], mreg)));
1504 return 1;
1507 /* Generate the clear mask. */
1508 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1510 /* Clear the field, to overlay it later with the source. */
1511 emit_insn (gen_rtx_SET (SImode, operands[0],
1512 gen_rtx_AND (SImode, operands[0], mreg)));
1514 /* If the source is constant 0, we've nothing to add back. */
1515 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1516 return 1;
1518 /* XXX: Should we worry about more games with constant values?
1519 We've covered the high profile: set/clear single-bit and many-bit
1520 fields. How often do we see "arbitrary bit pattern" constants? */
1521 sreg = copy_to_mode_reg (SImode, operands[3]);
1523 /* Extract src as same width as dst (needed for signed values). We
1524 always have to do this since we widen everything to SImode.
1525 We don't have to mask if we're shifting this up against the
1526 MSB of the register (e.g., the shift will push out any hi-order
1527 bits. */
1528 if (width + posn != (int) GET_MODE_SIZE (SImode))
1530 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1531 emit_insn (gen_rtx_SET (SImode, sreg,
1532 gen_rtx_AND (SImode, sreg, ereg)));
1535 /* Insert source value in dest. */
1536 if (posn != 0)
1537 emit_insn (gen_rtx_SET (SImode, sreg,
1538 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
1540 emit_insn (gen_rtx_SET (SImode, operands[0],
1541 gen_rtx_IOR (SImode, operands[0], sreg)));
1543 return 1;
1546 /* ??? Block move stuff stolen from m88k. This code has not been
1547 verified for correctness. */
1549 /* Emit code to perform a block move. Choose the best method.
1551 OPERANDS[0] is the destination.
1552 OPERANDS[1] is the source.
1553 OPERANDS[2] is the size.
1554 OPERANDS[3] is the alignment safe to use. */
1556 /* Emit code to perform a block move with an offset sequence of ldw/st
1557 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1558 known constants. DEST and SRC are registers. OFFSET is the known
1559 starting point for the output pattern. */
1561 static const enum machine_mode mode_from_align[] =
1563 VOIDmode, QImode, HImode, VOIDmode, SImode,
1566 static void
1567 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1569 rtx temp[2];
1570 enum machine_mode mode[2];
1571 int amount[2];
1572 bool active[2];
1573 int phase = 0;
1574 int next;
1575 int offset_ld = 0;
1576 int offset_st = 0;
1577 rtx x;
1579 x = XEXP (dst_mem, 0);
1580 if (!REG_P (x))
1582 x = force_reg (Pmode, x);
1583 dst_mem = replace_equiv_address (dst_mem, x);
1586 x = XEXP (src_mem, 0);
1587 if (!REG_P (x))
1589 x = force_reg (Pmode, x);
1590 src_mem = replace_equiv_address (src_mem, x);
1593 active[0] = active[1] = false;
1597 next = phase;
1598 phase ^= 1;
1600 if (size > 0)
1602 int next_amount;
1604 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1605 next_amount = MIN (next_amount, align);
1607 amount[next] = next_amount;
1608 mode[next] = mode_from_align[next_amount];
1609 temp[next] = gen_reg_rtx (mode[next]);
1611 x = adjust_address (src_mem, mode[next], offset_ld);
1612 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1614 offset_ld += next_amount;
1615 size -= next_amount;
1616 active[next] = true;
1619 if (active[phase])
1621 active[phase] = false;
1623 x = adjust_address (dst_mem, mode[phase], offset_st);
1624 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1626 offset_st += amount[phase];
1629 while (active[next]);
1632 bool
1633 mcore_expand_block_move (rtx *operands)
1635 HOST_WIDE_INT align, bytes, max;
1637 if (GET_CODE (operands[2]) != CONST_INT)
1638 return false;
1640 bytes = INTVAL (operands[2]);
1641 align = INTVAL (operands[3]);
1643 if (bytes <= 0)
1644 return false;
1645 if (align > 4)
1646 align = 4;
1648 switch (align)
1650 case 4:
1651 if (bytes & 1)
1652 max = 4*4;
1653 else if (bytes & 3)
1654 max = 8*4;
1655 else
1656 max = 16*4;
1657 break;
1658 case 2:
1659 max = 4*2;
1660 break;
1661 case 1:
1662 max = 4*1;
1663 break;
1664 default:
1665 gcc_unreachable ();
1668 if (bytes <= max)
1670 block_move_sequence (operands[0], operands[1], bytes, align);
1671 return true;
1674 return false;
1678 /* Code to generate prologue and epilogue sequences. */
1679 static int number_of_regs_before_varargs;
1681 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1682 for a varargs function. */
1683 static int current_function_anonymous_args;
1685 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1686 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1687 #define ADDI_REACH (32) /* Maximum addi operand. */
1689 static void
1690 layout_mcore_frame (struct mcore_frame * infp)
1692 int n;
1693 unsigned int i;
1694 int nbytes;
1695 int regarg;
1696 int localregarg;
1697 int localreg;
1698 int outbounds;
1699 unsigned int growths;
1700 int step;
1702 /* Might have to spill bytes to re-assemble a big argument that
1703 was passed partially in registers and partially on the stack. */
1704 nbytes = crtl->args.pretend_args_size;
1706 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1707 if (current_function_anonymous_args)
1708 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1710 infp->arg_size = nbytes;
1712 /* How much space to save non-volatile registers we stomp. */
1713 infp->reg_mask = calc_live_regs (& n);
1714 infp->reg_size = n * 4;
1716 /* And the rest of it... locals and space for overflowed outbounds. */
1717 infp->local_size = get_frame_size ();
1718 infp->outbound_size = crtl->outgoing_args_size;
1720 /* Make sure we have a whole number of words for the locals. */
1721 if (infp->local_size % STACK_BYTES)
1722 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1724 /* Only thing we know we have to pad is the outbound space, since
1725 we've aligned our locals assuming that base of locals is aligned. */
1726 infp->pad_local = 0;
1727 infp->pad_reg = 0;
1728 infp->pad_outbound = 0;
1729 if (infp->outbound_size % STACK_BYTES)
1730 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1732 /* Now we see how we want to stage the prologue so that it does
1733 the most appropriate stack growth and register saves to either:
1734 (1) run fast,
1735 (2) reduce instruction space, or
1736 (3) reduce stack space. */
1737 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1738 infp->growth[i] = 0;
1740 regarg = infp->reg_size + infp->arg_size;
1741 localregarg = infp->local_size + regarg;
1742 localreg = infp->local_size + infp->reg_size;
1743 outbounds = infp->outbound_size + infp->pad_outbound;
1744 growths = 0;
1746 /* XXX: Consider one where we consider localregarg + outbound too! */
1748 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1749 use stw's with offsets and buy the frame in one shot. */
1750 if (localregarg <= ADDI_REACH
1751 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1753 /* Make sure we'll be aligned. */
1754 if (localregarg % STACK_BYTES)
1755 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1757 step = localregarg + infp->pad_reg;
1758 infp->reg_offset = infp->local_size;
1760 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1762 step += outbounds;
1763 infp->reg_offset += outbounds;
1764 outbounds = 0;
1767 infp->arg_offset = step - 4;
1768 infp->growth[growths++] = step;
1769 infp->reg_growth = growths;
1770 infp->local_growth = growths;
1772 /* If we haven't already folded it in. */
1773 if (outbounds)
1774 infp->growth[growths++] = outbounds;
1776 goto finish;
1779 /* Frame can't be done with a single subi, but can be done with 2
1780 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1781 shift some of the stack purchase into the first subi, so both are
1782 single instructions. */
1783 if (localregarg <= STORE_REACH
1784 && (infp->local_size > ADDI_REACH)
1785 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1787 int all;
1789 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1790 if (localregarg % STACK_BYTES)
1791 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1793 all = localregarg + infp->pad_reg + infp->pad_local;
1794 step = ADDI_REACH; /* As much up front as we can. */
1795 if (step > all)
1796 step = all;
1798 /* XXX: Consider whether step will still be aligned; we believe so. */
1799 infp->arg_offset = step - 4;
1800 infp->growth[growths++] = step;
1801 infp->reg_growth = growths;
1802 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1803 all -= step;
1805 /* Can we fold in any space required for outbounds? */
1806 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1808 all += outbounds;
1809 outbounds = 0;
1812 /* Get the rest of the locals in place. */
1813 step = all;
1814 infp->growth[growths++] = step;
1815 infp->local_growth = growths;
1816 all -= step;
1818 assert (all == 0);
1820 /* Finish off if we need to do so. */
1821 if (outbounds)
1822 infp->growth[growths++] = outbounds;
1824 goto finish;
1827 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1828 Then we buy the rest of the frame in 1 or 2 steps depending on
1829 whether we need a frame pointer. */
1830 if ((regarg % STACK_BYTES) == 0)
1832 infp->growth[growths++] = regarg;
1833 infp->reg_growth = growths;
1834 infp->arg_offset = regarg - 4;
1835 infp->reg_offset = 0;
1837 if (infp->local_size % STACK_BYTES)
1838 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1840 step = infp->local_size + infp->pad_local;
1842 if (!frame_pointer_needed)
1844 step += outbounds;
1845 outbounds = 0;
1848 infp->growth[growths++] = step;
1849 infp->local_growth = growths;
1851 /* If there's any left to be done. */
1852 if (outbounds)
1853 infp->growth[growths++] = outbounds;
1855 goto finish;
1858 /* XXX: optimizations that we'll want to play with....
1859 -- regarg is not aligned, but it's a small number of registers;
1860 use some of localsize so that regarg is aligned and then
1861 save the registers. */
1863 /* Simple encoding; plods down the stack buying the pieces as it goes.
1864 -- does not optimize space consumption.
1865 -- does not attempt to optimize instruction counts.
1866 -- but it is safe for all alignments. */
1867 if (regarg % STACK_BYTES != 0)
1868 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1870 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1871 infp->reg_growth = growths;
1872 infp->arg_offset = infp->growth[0] - 4;
1873 infp->reg_offset = 0;
1875 if (frame_pointer_needed)
1877 if (infp->local_size % STACK_BYTES != 0)
1878 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1880 infp->growth[growths++] = infp->local_size + infp->pad_local;
1881 infp->local_growth = growths;
1883 infp->growth[growths++] = outbounds;
1885 else
1887 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1888 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1890 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1891 infp->local_growth = growths;
1894 /* Anything else that we've forgotten?, plus a few consistency checks. */
1895 finish:
1896 assert (infp->reg_offset >= 0);
1897 assert (growths <= MAX_STACK_GROWS);
1899 for (i = 0; i < growths; i++)
1900 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1903 /* Define the offset between two registers, one to be eliminated, and
1904 the other its replacement, at the start of a routine. */
1907 mcore_initial_elimination_offset (int from, int to)
1909 int above_frame;
1910 int below_frame;
1911 struct mcore_frame fi;
1913 layout_mcore_frame (& fi);
1915 /* fp to ap */
1916 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1917 /* sp to fp */
1918 below_frame = fi.outbound_size + fi.pad_outbound;
1920 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1921 return above_frame;
1923 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1924 return above_frame + below_frame;
1926 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1927 return below_frame;
1929 gcc_unreachable ();
1932 /* Keep track of some information about varargs for the prolog. */
1934 static void
1935 mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far,
1936 enum machine_mode mode, tree type,
1937 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1938 int second_time ATTRIBUTE_UNUSED)
1940 current_function_anonymous_args = 1;
1942 /* We need to know how many argument registers are used before
1943 the varargs start, so that we can push the remaining argument
1944 registers during the prologue. */
1945 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1947 /* There is a bug somewhere in the arg handling code.
1948 Until I can find it this workaround always pushes the
1949 last named argument onto the stack. */
1950 number_of_regs_before_varargs = *args_so_far;
1952 /* The last named argument may be split between argument registers
1953 and the stack. Allow for this here. */
1954 if (number_of_regs_before_varargs > NPARM_REGS)
1955 number_of_regs_before_varargs = NPARM_REGS;
1958 void
1959 mcore_expand_prolog (void)
1961 struct mcore_frame fi;
1962 int space_allocated = 0;
1963 int growth = 0;
1965 /* Find out what we're doing. */
1966 layout_mcore_frame (&fi);
1968 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1969 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1971 if (TARGET_CG_DATA)
1973 /* Emit a symbol for this routine's frame size. */
1974 rtx x;
1976 x = DECL_RTL (current_function_decl);
1978 gcc_assert (GET_CODE (x) == MEM);
1980 x = XEXP (x, 0);
1982 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1984 if (mcore_current_function_name)
1985 free (mcore_current_function_name);
1987 mcore_current_function_name = xstrdup (XSTR (x, 0));
1989 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1991 if (cfun->calls_alloca)
1992 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1994 /* 970425: RBE:
1995 We're looking at how the 8byte alignment affects stack layout
1996 and where we had to pad things. This emits information we can
1997 extract which tells us about frame sizes and the like. */
1998 fprintf (asm_out_file,
1999 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2000 mcore_current_function_name,
2001 fi.arg_size, fi.reg_size, fi.reg_mask,
2002 fi.local_size, fi.outbound_size,
2003 frame_pointer_needed);
2006 if (mcore_naked_function_p ())
2007 return;
2009 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2010 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2012 /* If we have a parameter passed partially in regs and partially in memory,
2013 the registers will have been stored to memory already in function.c. So
2014 we only need to do something here for varargs functions. */
2015 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
2017 int offset;
2018 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2019 int remaining = fi.arg_size;
2021 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2023 emit_insn (gen_movsi
2024 (gen_rtx_MEM (SImode,
2025 plus_constant (stack_pointer_rtx, offset)),
2026 gen_rtx_REG (SImode, rn)));
2030 /* Do we need another stack adjustment before we do the register saves? */
2031 if (growth < fi.reg_growth)
2032 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2034 if (fi.reg_size != 0)
2036 int i;
2037 int offs = fi.reg_offset;
2039 for (i = 15; i >= 0; i--)
2041 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2043 int first_reg = 15;
2045 while (fi.reg_mask & (1 << first_reg))
2046 first_reg--;
2047 first_reg++;
2049 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2050 gen_rtx_REG (SImode, first_reg),
2051 GEN_INT (16 - first_reg)));
2053 i -= (15 - first_reg);
2054 offs += (16 - first_reg) * 4;
2056 else if (fi.reg_mask & (1 << i))
2058 emit_insn (gen_movsi
2059 (gen_rtx_MEM (SImode,
2060 plus_constant (stack_pointer_rtx, offs)),
2061 gen_rtx_REG (SImode, i)));
2062 offs += 4;
2067 /* Figure the locals + outbounds. */
2068 if (frame_pointer_needed)
2070 /* If we haven't already purchased to 'fp'. */
2071 if (growth < fi.local_growth)
2072 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2074 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2076 /* ... and then go any remaining distance for outbounds, etc. */
2077 if (fi.growth[growth])
2078 output_stack_adjust (-1, fi.growth[growth++]);
2080 else
2082 if (growth < fi.local_growth)
2083 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2084 if (fi.growth[growth])
2085 output_stack_adjust (-1, fi.growth[growth++]);
2089 void
2090 mcore_expand_epilog (void)
2092 struct mcore_frame fi;
2093 int i;
2094 int offs;
2095 int growth = MAX_STACK_GROWS - 1 ;
2098 /* Find out what we're doing. */
2099 layout_mcore_frame(&fi);
2101 if (mcore_naked_function_p ())
2102 return;
2104 /* If we had a frame pointer, restore the sp from that. */
2105 if (frame_pointer_needed)
2107 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2108 growth = fi.local_growth - 1;
2110 else
2112 /* XXX: while loop should accumulate and do a single sell. */
2113 while (growth >= fi.local_growth)
2115 if (fi.growth[growth] != 0)
2116 output_stack_adjust (1, fi.growth[growth]);
2117 growth--;
2121 /* Make sure we've shrunk stack back to the point where the registers
2122 were laid down. This is typically 0/1 iterations. Then pull the
2123 register save information back off the stack. */
2124 while (growth >= fi.reg_growth)
2125 output_stack_adjust ( 1, fi.growth[growth--]);
2127 offs = fi.reg_offset;
2129 for (i = 15; i >= 0; i--)
2131 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2133 int first_reg;
2135 /* Find the starting register. */
2136 first_reg = 15;
2138 while (fi.reg_mask & (1 << first_reg))
2139 first_reg--;
2141 first_reg++;
2143 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2144 gen_rtx_MEM (SImode, stack_pointer_rtx),
2145 GEN_INT (16 - first_reg)));
2147 i -= (15 - first_reg);
2148 offs += (16 - first_reg) * 4;
2150 else if (fi.reg_mask & (1 << i))
2152 emit_insn (gen_movsi
2153 (gen_rtx_REG (SImode, i),
2154 gen_rtx_MEM (SImode,
2155 plus_constant (stack_pointer_rtx, offs))));
2156 offs += 4;
2160 /* Give back anything else. */
2161 /* XXX: Should accumulate total and then give it back. */
2162 while (growth >= 0)
2163 output_stack_adjust ( 1, fi.growth[growth--]);
2166 /* This code is borrowed from the SH port. */
2168 /* The MCORE cannot load a large constant into a register, constants have to
2169 come from a pc relative load. The reference of a pc relative load
2170 instruction must be less than 1k in front of the instruction. This
2171 means that we often have to dump a constant inside a function, and
2172 generate code to branch around it.
2174 It is important to minimize this, since the branches will slow things
2175 down and make things bigger.
2177 Worst case code looks like:
2179 lrw L1,r0
2180 br L2
2181 align
2182 L1: .long value
2186 lrw L3,r0
2187 br L4
2188 align
2189 L3: .long value
2193 We fix this by performing a scan before scheduling, which notices which
2194 instructions need to have their operands fetched from the constant table
2195 and builds the table.
2197 The algorithm is:
2199 scan, find an instruction which needs a pcrel move. Look forward, find the
2200 last barrier which is within MAX_COUNT bytes of the requirement.
2201 If there isn't one, make one. Process all the instructions between
2202 the find and the barrier.
2204 In the above example, we can tell that L3 is within 1k of L1, so
2205 the first move can be shrunk from the 2 insn+constant sequence into
2206 just 1 insn, and the constant moved to L3 to make:
2208 lrw L1,r0
2210 lrw L3,r0
2211 bra L4
2212 align
2213 L3:.long value
2214 L4:.long value
2216 Then the second move becomes the target for the shortening process. */
2218 typedef struct
2220 rtx value; /* Value in table. */
2221 rtx label; /* Label of value. */
2222 } pool_node;
2224 /* The maximum number of constants that can fit into one pool, since
2225 the pc relative range is 0...1020 bytes and constants are at least 4
2226 bytes long. We subtract 4 from the range to allow for the case where
2227 we need to add a branch/align before the constant pool. */
2229 #define MAX_COUNT 1016
2230 #define MAX_POOL_SIZE (MAX_COUNT/4)
2231 static pool_node pool_vector[MAX_POOL_SIZE];
2232 static int pool_size;
2234 /* Dump out any constants accumulated in the final pass. These
2235 will only be labels. */
2237 const char *
2238 mcore_output_jump_label_table (void)
2240 int i;
2242 if (pool_size)
2244 fprintf (asm_out_file, "\t.align 2\n");
2246 for (i = 0; i < pool_size; i++)
2248 pool_node * p = pool_vector + i;
2250 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2252 output_asm_insn (".long %0", &p->value);
2255 pool_size = 0;
2258 return "";
2261 /* Check whether insn is a candidate for a conditional. */
2263 static cond_type
2264 is_cond_candidate (rtx insn)
2266 /* The only things we conditionalize are those that can be directly
2267 changed into a conditional. Only bother with SImode items. If
2268 we wanted to be a little more aggressive, we could also do other
2269 modes such as DImode with reg-reg move or load 0. */
2270 if (GET_CODE (insn) == INSN)
2272 rtx pat = PATTERN (insn);
2273 rtx src, dst;
2275 if (GET_CODE (pat) != SET)
2276 return COND_NO;
2278 dst = XEXP (pat, 0);
2280 if ((GET_CODE (dst) != REG &&
2281 GET_CODE (dst) != SUBREG) ||
2282 GET_MODE (dst) != SImode)
2283 return COND_NO;
2285 src = XEXP (pat, 1);
2287 if ((GET_CODE (src) == REG ||
2288 (GET_CODE (src) == SUBREG &&
2289 GET_CODE (SUBREG_REG (src)) == REG)) &&
2290 GET_MODE (src) == SImode)
2291 return COND_MOV_INSN;
2292 else if (GET_CODE (src) == CONST_INT &&
2293 INTVAL (src) == 0)
2294 return COND_CLR_INSN;
2295 else if (GET_CODE (src) == PLUS &&
2296 (GET_CODE (XEXP (src, 0)) == REG ||
2297 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2298 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2299 GET_MODE (XEXP (src, 0)) == SImode &&
2300 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2301 INTVAL (XEXP (src, 1)) == 1)
2302 return COND_INC_INSN;
2303 else if (((GET_CODE (src) == MINUS &&
2304 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2305 INTVAL( XEXP (src, 1)) == 1) ||
2306 (GET_CODE (src) == PLUS &&
2307 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2308 INTVAL (XEXP (src, 1)) == -1)) &&
2309 (GET_CODE (XEXP (src, 0)) == REG ||
2310 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2311 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2312 GET_MODE (XEXP (src, 0)) == SImode)
2313 return COND_DEC_INSN;
2315 /* Some insns that we don't bother with:
2316 (set (rx:DI) (ry:DI))
2317 (set (rx:DI) (const_int 0))
2321 else if (GET_CODE (insn) == JUMP_INSN &&
2322 GET_CODE (PATTERN (insn)) == SET &&
2323 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2324 return COND_BRANCH_INSN;
2326 return COND_NO;
2329 /* Emit a conditional version of insn and replace the old insn with the
2330 new one. Return the new insn if emitted. */
2332 static rtx
2333 emit_new_cond_insn (rtx insn, int cond)
2335 rtx c_insn = 0;
2336 rtx pat, dst, src;
2337 cond_type num;
2339 if ((num = is_cond_candidate (insn)) == COND_NO)
2340 return NULL;
2342 pat = PATTERN (insn);
2344 if (GET_CODE (insn) == INSN)
2346 dst = SET_DEST (pat);
2347 src = SET_SRC (pat);
2349 else
2351 dst = JUMP_LABEL (insn);
2352 src = NULL_RTX;
2355 switch (num)
2357 case COND_MOV_INSN:
2358 case COND_CLR_INSN:
2359 if (cond)
2360 c_insn = gen_movt0 (dst, src, dst);
2361 else
2362 c_insn = gen_movt0 (dst, dst, src);
2363 break;
2365 case COND_INC_INSN:
2366 if (cond)
2367 c_insn = gen_incscc (dst, dst);
2368 else
2369 c_insn = gen_incscc_false (dst, dst);
2370 break;
2372 case COND_DEC_INSN:
2373 if (cond)
2374 c_insn = gen_decscc (dst, dst);
2375 else
2376 c_insn = gen_decscc_false (dst, dst);
2377 break;
2379 case COND_BRANCH_INSN:
2380 if (cond)
2381 c_insn = gen_branch_true (dst);
2382 else
2383 c_insn = gen_branch_false (dst);
2384 break;
2386 default:
2387 return NULL;
2390 /* Only copy the notes if they exist. */
2391 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2393 /* We really don't need to bother with the notes and links at this
2394 point, but go ahead and save the notes. This will help is_dead()
2395 when applying peepholes (links don't matter since they are not
2396 used any more beyond this point for the mcore). */
2397 REG_NOTES (c_insn) = REG_NOTES (insn);
2400 if (num == COND_BRANCH_INSN)
2402 /* For jumps, we need to be a little bit careful and emit the new jump
2403 before the old one and to update the use count for the target label.
2404 This way, the barrier following the old (uncond) jump will get
2405 deleted, but the label won't. */
2406 c_insn = emit_jump_insn_before (c_insn, insn);
2408 ++ LABEL_NUSES (dst);
2410 JUMP_LABEL (c_insn) = dst;
2412 else
2413 c_insn = emit_insn_after (c_insn, insn);
2415 delete_insn (insn);
2417 return c_insn;
2420 /* Attempt to change a basic block into a series of conditional insns. This
2421 works by taking the branch at the end of the 1st block and scanning for the
2422 end of the 2nd block. If all instructions in the 2nd block have cond.
2423 versions and the label at the start of block 3 is the same as the target
2424 from the branch at block 1, then conditionalize all insn in block 2 using
2425 the inverse condition of the branch at block 1. (Note I'm bending the
2426 definition of basic block here.)
2428 e.g., change:
2430 bt L2 <-- end of block 1 (delete)
2431 mov r7,r8
2432 addu r7,1
2433 br L3 <-- end of block 2
2435 L2: ... <-- start of block 3 (NUSES==1)
2436 L3: ...
2440 movf r7,r8
2441 incf r7
2442 bf L3
2444 L3: ...
2446 we can delete the L2 label if NUSES==1 and re-apply the optimization
2447 starting at the last instruction of block 2. This may allow an entire
2448 if-then-else statement to be conditionalized. BRC */
2449 static rtx
2450 conditionalize_block (rtx first)
2452 rtx insn;
2453 rtx br_pat;
2454 rtx end_blk_1_br = 0;
2455 rtx end_blk_2_insn = 0;
2456 rtx start_blk_3_lab = 0;
2457 int cond;
2458 int br_lab_num;
2459 int blk_size = 0;
2462 /* Check that the first insn is a candidate conditional jump. This is
2463 the one that we'll eliminate. If not, advance to the next insn to
2464 try. */
2465 if (GET_CODE (first) != JUMP_INSN ||
2466 GET_CODE (PATTERN (first)) != SET ||
2467 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2468 return NEXT_INSN (first);
2470 /* Extract some information we need. */
2471 end_blk_1_br = first;
2472 br_pat = PATTERN (end_blk_1_br);
2474 /* Complement the condition since we use the reverse cond. for the insns. */
2475 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2477 /* Determine what kind of branch we have. */
2478 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2480 /* A normal branch, so extract label out of first arm. */
2481 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2483 else
2485 /* An inverse branch, so extract the label out of the 2nd arm
2486 and complement the condition. */
2487 cond = (cond == 0);
2488 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2491 /* Scan forward for the start of block 2: it must start with a
2492 label and that label must be the same as the branch target
2493 label from block 1. We don't care about whether block 2 actually
2494 ends with a branch or a label (an uncond. branch is
2495 conditionalizable). */
2496 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2498 enum rtx_code code;
2500 code = GET_CODE (insn);
2502 /* Look for the label at the start of block 3. */
2503 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2504 break;
2506 /* Skip barriers, notes, and conditionalizable insns. If the
2507 insn is not conditionalizable or makes this optimization fail,
2508 just return the next insn so we can start over from that point. */
2509 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2510 return NEXT_INSN (insn);
2512 /* Remember the last real insn before the label (i.e. end of block 2). */
2513 if (code == JUMP_INSN || code == INSN)
2515 blk_size ++;
2516 end_blk_2_insn = insn;
2520 if (!insn)
2521 return insn;
2523 /* It is possible for this optimization to slow performance if the blocks
2524 are long. This really depends upon whether the branch is likely taken
2525 or not. If the branch is taken, we slow performance in many cases. But,
2526 if the branch is not taken, we always help performance (for a single
2527 block, but for a double block (i.e. when the optimization is re-applied)
2528 this is not true since the 'right thing' depends on the overall length of
2529 the collapsed block). As a compromise, don't apply this optimization on
2530 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2531 the best threshold depends on the latencies of the instructions (i.e.,
2532 the branch penalty). */
2533 if (optimize > 1 && blk_size > 2)
2534 return insn;
2536 /* At this point, we've found the start of block 3 and we know that
2537 it is the destination of the branch from block 1. Also, all
2538 instructions in the block 2 are conditionalizable. So, apply the
2539 conditionalization and delete the branch. */
2540 start_blk_3_lab = insn;
2542 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2543 insn = NEXT_INSN (insn))
2545 rtx newinsn;
2547 if (INSN_DELETED_P (insn))
2548 continue;
2550 /* Try to form a conditional variant of the instruction and emit it. */
2551 if ((newinsn = emit_new_cond_insn (insn, cond)))
2553 if (end_blk_2_insn == insn)
2554 end_blk_2_insn = newinsn;
2556 insn = newinsn;
2560 /* Note whether we will delete the label starting blk 3 when the jump
2561 gets deleted. If so, we want to re-apply this optimization at the
2562 last real instruction right before the label. */
2563 if (LABEL_NUSES (start_blk_3_lab) == 1)
2565 start_blk_3_lab = 0;
2568 /* ??? we probably should redistribute the death notes for this insn, esp.
2569 the death of cc, but it doesn't really matter this late in the game.
2570 The peepholes all use is_dead() which will find the correct death
2571 regardless of whether there is a note. */
2572 delete_insn (end_blk_1_br);
2574 if (! start_blk_3_lab)
2575 return end_blk_2_insn;
2577 /* Return the insn right after the label at the start of block 3. */
2578 return NEXT_INSN (start_blk_3_lab);
2581 /* Apply the conditionalization of blocks optimization. This is the
2582 outer loop that traverses through the insns scanning for a branch
2583 that signifies an opportunity to apply the optimization. Note that
2584 this optimization is applied late. If we could apply it earlier,
2585 say before cse 2, it may expose more optimization opportunities.
2586 but, the pay back probably isn't really worth the effort (we'd have
2587 to update all reg/flow/notes/links/etc to make it work - and stick it
2588 in before cse 2). */
2590 static void
2591 conditionalize_optimization (void)
2593 rtx insn;
2595 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2596 continue;
2599 static int saved_warn_return_type = -1;
2600 static int saved_warn_return_type_count = 0;
2602 /* This is to handle loads from the constant pool. */
2604 static void
2605 mcore_reorg (void)
2607 /* Reset this variable. */
2608 current_function_anonymous_args = 0;
2610 /* Restore the warn_return_type if it has been altered. */
2611 if (saved_warn_return_type != -1)
2613 /* Only restore the value if we have reached another function.
2614 The test of warn_return_type occurs in final_function () in
2615 c-decl.c a long time after the code for the function is generated,
2616 so we need a counter to tell us when we have finished parsing that
2617 function and can restore the flag. */
2618 if (--saved_warn_return_type_count == 0)
2620 warn_return_type = saved_warn_return_type;
2621 saved_warn_return_type = -1;
2625 if (optimize == 0)
2626 return;
2628 /* Conditionalize blocks where we can. */
2629 conditionalize_optimization ();
2631 /* Literal pool generation is now pushed off until the assembler. */
2635 /* Return true if X is something that can be moved directly into r15. */
2637 bool
2638 mcore_r15_operand_p (rtx x)
2640 switch (GET_CODE (x))
2642 case CONST_INT:
2643 return mcore_const_ok_for_inline (INTVAL (x));
2645 case REG:
2646 case SUBREG:
2647 case MEM:
2648 return 1;
2650 default:
2651 return 0;
2655 /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
2656 directly move X into it, use r1-r14 as a temporary. */
2658 enum reg_class
2659 mcore_secondary_reload_class (enum reg_class rclass,
2660 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2662 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2663 && !mcore_r15_operand_p (x))
2664 return LRW_REGS;
2665 return NO_REGS;
2668 /* Return the reg_class to use when reloading the rtx X into the class
2669 RCLASS. If X is too complex to move directly into r15, prefer to
2670 use LRW_REGS instead. */
2672 enum reg_class
2673 mcore_reload_class (rtx x, enum reg_class rclass)
2675 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2676 return LRW_REGS;
2678 return rclass;
2681 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2682 register. Note that the current version doesn't worry about whether
2683 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2684 in r2 matches an SImode in r2. Might think in the future about whether
2685 we want to be able to say something about modes. */
2688 mcore_is_same_reg (rtx x, rtx y)
2690 /* Strip any and all of the subreg wrappers. */
2691 while (GET_CODE (x) == SUBREG)
2692 x = SUBREG_REG (x);
2694 while (GET_CODE (y) == SUBREG)
2695 y = SUBREG_REG (y);
2697 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2698 return 1;
2700 return 0;
2703 static void
2704 mcore_option_override (void)
2706 /* Only the m340 supports little endian code. */
2707 if (TARGET_LITTLE_END && ! TARGET_M340)
2708 target_flags |= MASK_M340;
2712 /* Compute the number of word sized registers needed to
2713 hold a function argument of mode MODE and type TYPE. */
2716 mcore_num_arg_regs (enum machine_mode mode, const_tree type)
2718 int size;
2720 if (targetm.calls.must_pass_in_stack (mode, type))
2721 return 0;
2723 if (type && mode == BLKmode)
2724 size = int_size_in_bytes (type);
2725 else
2726 size = GET_MODE_SIZE (mode);
2728 return ROUND_ADVANCE (size);
2731 static rtx
2732 handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
2734 int size;
2736 /* The MCore ABI defines that a structure whose size is not a whole multiple
2737 of bytes is passed packed into registers (or spilled onto the stack if
2738 not enough registers are available) with the last few bytes of the
2739 structure being packed, left-justified, into the last register/stack slot.
2740 GCC handles this correctly if the last word is in a stack slot, but we
2741 have to generate a special, PARALLEL RTX if the last word is in an
2742 argument register. */
2743 if (type
2744 && TYPE_MODE (type) == BLKmode
2745 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2746 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2747 && (size % UNITS_PER_WORD != 0)
2748 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2750 rtx arg_regs [NPARM_REGS];
2751 int nregs;
2752 rtx result;
2753 rtvec rtvec;
2755 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2757 arg_regs [nregs] =
2758 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2759 GEN_INT (nregs * UNITS_PER_WORD));
2760 nregs ++;
2763 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2764 assert (ARRAY_SIZE (arg_regs) == 6);
2765 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2766 arg_regs[3], arg_regs[4], arg_regs[5]);
2768 result = gen_rtx_PARALLEL (mode, rtvec);
2769 return result;
2772 return gen_rtx_REG (mode, reg);
2776 mcore_function_value (const_tree valtype, const_tree func)
2778 enum machine_mode mode;
2779 int unsigned_p;
2781 mode = TYPE_MODE (valtype);
2783 /* Since we promote return types, we must promote the mode here too. */
2784 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2786 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2789 /* Define where to put the arguments to a function.
2790 Value is zero to push the argument on the stack,
2791 or a hard register in which to store the argument.
2793 MODE is the argument's machine mode.
2794 TYPE is the data type of the argument (as a tree).
2795 This is null for libcalls where that information may
2796 not be available.
2797 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2798 the preceding args and about the function being called.
2799 NAMED is nonzero if this argument is a named parameter
2800 (otherwise it is an extra parameter matching an ellipsis).
2802 On MCore the first args are normally in registers
2803 and the rest are pushed. Any arg that starts within the first
2804 NPARM_REGS words is at least partially passed in a register unless
2805 its data type forbids. */
2808 mcore_function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode,
2809 tree type, int named)
2811 int arg_reg;
2813 if (! named || mode == VOIDmode)
2814 return 0;
2816 if (targetm.calls.must_pass_in_stack (mode, type))
2817 return 0;
2819 arg_reg = ROUND_REG (cum, mode);
2821 if (arg_reg < NPARM_REGS)
2822 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2824 return 0;
2827 /* Returns the number of bytes of argument registers required to hold *part*
2828 of a parameter of machine mode MODE and type TYPE (which may be NULL if
2829 the type is not known). If the argument fits entirely in the argument
2830 registers, or entirely on the stack, then 0 is returned. CUM is the
2831 number of argument registers already used by earlier parameters to
2832 the function. */
2834 static int
2835 mcore_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2836 tree type, bool named)
2838 int reg = ROUND_REG (*cum, mode);
2840 if (named == 0)
2841 return 0;
2843 if (targetm.calls.must_pass_in_stack (mode, type))
2844 return 0;
2846 /* REG is not the *hardware* register number of the register that holds
2847 the argument, it is the *argument* register number. So for example,
2848 the first argument to a function goes in argument register 0, which
2849 translates (for the MCore) into hardware register 2. The second
2850 argument goes into argument register 1, which translates into hardware
2851 register 3, and so on. NPARM_REGS is the number of argument registers
2852 supported by the target, not the maximum hardware register number of
2853 the target. */
2854 if (reg >= NPARM_REGS)
2855 return 0;
2857 /* If the argument fits entirely in registers, return 0. */
2858 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2859 return 0;
2861 /* The argument overflows the number of available argument registers.
2862 Compute how many argument registers have not yet been assigned to
2863 hold an argument. */
2864 reg = NPARM_REGS - reg;
2866 /* Return partially in registers and partially on the stack. */
2867 return reg * UNITS_PER_WORD;
2870 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2873 mcore_dllexport_name_p (const char * symbol)
2875 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2878 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2881 mcore_dllimport_name_p (const char * symbol)
2883 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2886 /* Mark a DECL as being dllexport'd. */
2888 static void
2889 mcore_mark_dllexport (tree decl)
2891 const char * oldname;
2892 char * newname;
2893 rtx rtlname;
2894 tree idp;
2896 rtlname = XEXP (DECL_RTL (decl), 0);
2898 if (GET_CODE (rtlname) == MEM)
2899 rtlname = XEXP (rtlname, 0);
2900 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2901 oldname = XSTR (rtlname, 0);
2903 if (mcore_dllexport_name_p (oldname))
2904 return; /* Already done. */
2906 newname = XALLOCAVEC (char, strlen (oldname) + 4);
2907 sprintf (newname, "@e.%s", oldname);
2909 /* We pass newname through get_identifier to ensure it has a unique
2910 address. RTL processing can sometimes peek inside the symbol ref
2911 and compare the string's addresses to see if two symbols are
2912 identical. */
2913 /* ??? At least I think that's why we do this. */
2914 idp = get_identifier (newname);
2916 XEXP (DECL_RTL (decl), 0) =
2917 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2920 /* Mark a DECL as being dllimport'd. */
2922 static void
2923 mcore_mark_dllimport (tree decl)
2925 const char * oldname;
2926 char * newname;
2927 tree idp;
2928 rtx rtlname;
2929 rtx newrtl;
2931 rtlname = XEXP (DECL_RTL (decl), 0);
2933 if (GET_CODE (rtlname) == MEM)
2934 rtlname = XEXP (rtlname, 0);
2935 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2936 oldname = XSTR (rtlname, 0);
2938 gcc_assert (!mcore_dllexport_name_p (oldname));
2939 if (mcore_dllimport_name_p (oldname))
2940 return; /* Already done. */
2942 /* ??? One can well ask why we're making these checks here,
2943 and that would be a good question. */
2945 /* Imported variables can't be initialized. */
2946 if (TREE_CODE (decl) == VAR_DECL
2947 && !DECL_VIRTUAL_P (decl)
2948 && DECL_INITIAL (decl))
2950 error ("initialized variable %q+D is marked dllimport", decl);
2951 return;
2954 /* `extern' needn't be specified with dllimport.
2955 Specify `extern' now and hope for the best. Sigh. */
2956 if (TREE_CODE (decl) == VAR_DECL
2957 /* ??? Is this test for vtables needed? */
2958 && !DECL_VIRTUAL_P (decl))
2960 DECL_EXTERNAL (decl) = 1;
2961 TREE_PUBLIC (decl) = 1;
2964 newname = XALLOCAVEC (char, strlen (oldname) + 11);
2965 sprintf (newname, "@i.__imp_%s", oldname);
2967 /* We pass newname through get_identifier to ensure it has a unique
2968 address. RTL processing can sometimes peek inside the symbol ref
2969 and compare the string's addresses to see if two symbols are
2970 identical. */
2971 /* ??? At least I think that's why we do this. */
2972 idp = get_identifier (newname);
2974 newrtl = gen_rtx_MEM (Pmode,
2975 gen_rtx_SYMBOL_REF (Pmode,
2976 IDENTIFIER_POINTER (idp)));
2977 XEXP (DECL_RTL (decl), 0) = newrtl;
2980 static int
2981 mcore_dllexport_p (tree decl)
2983 if ( TREE_CODE (decl) != VAR_DECL
2984 && TREE_CODE (decl) != FUNCTION_DECL)
2985 return 0;
2987 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2990 static int
2991 mcore_dllimport_p (tree decl)
2993 if ( TREE_CODE (decl) != VAR_DECL
2994 && TREE_CODE (decl) != FUNCTION_DECL)
2995 return 0;
2997 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
3000 /* We must mark dll symbols specially. Definitions of dllexport'd objects
3001 install some info in the .drective (PE) or .exports (ELF) sections. */
3003 static void
3004 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3006 /* Mark the decl so we can tell from the rtl whether the object is
3007 dllexport'd or dllimport'd. */
3008 if (mcore_dllexport_p (decl))
3009 mcore_mark_dllexport (decl);
3010 else if (mcore_dllimport_p (decl))
3011 mcore_mark_dllimport (decl);
3013 /* It might be that DECL has already been marked as dllimport, but
3014 a subsequent definition nullified that. The attribute is gone
3015 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3016 else if ((TREE_CODE (decl) == FUNCTION_DECL
3017 || TREE_CODE (decl) == VAR_DECL)
3018 && DECL_RTL (decl) != NULL_RTX
3019 && GET_CODE (DECL_RTL (decl)) == MEM
3020 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3021 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3022 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3024 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3025 tree idp = get_identifier (oldname + 9);
3026 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3028 XEXP (DECL_RTL (decl), 0) = newrtl;
3030 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3031 ??? We leave these alone for now. */
3035 /* Undo the effects of the above. */
3037 static const char *
3038 mcore_strip_name_encoding (const char * str)
3040 return str + (str[0] == '@' ? 3 : 0);
3043 /* MCore specific attribute support.
3044 dllexport - for exporting a function/variable that will live in a dll
3045 dllimport - for importing a function/variable from a dll
3046 naked - do not create a function prologue/epilogue. */
3048 /* Handle a "naked" attribute; arguments as in
3049 struct attribute_spec.handler. */
3051 static tree
3052 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3053 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3055 if (TREE_CODE (*node) == FUNCTION_DECL)
3057 /* PR14310 - don't complain about lack of return statement
3058 in naked functions. The solution here is a gross hack
3059 but this is the only way to solve the problem without
3060 adding a new feature to GCC. I did try submitting a patch
3061 that would add such a new feature, but it was (rightfully)
3062 rejected on the grounds that it was creeping featurism,
3063 so hence this code. */
3064 if (warn_return_type)
3066 saved_warn_return_type = warn_return_type;
3067 warn_return_type = 0;
3068 saved_warn_return_type_count = 2;
3070 else if (saved_warn_return_type_count)
3071 saved_warn_return_type_count = 2;
3073 else
3075 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3076 name);
3077 *no_add_attrs = true;
3080 return NULL_TREE;
3083 /* ??? It looks like this is PE specific? Oh well, this is what the
3084 old code did as well. */
3086 static void
3087 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3089 int len;
3090 const char * name;
3091 char * string;
3092 const char * prefix;
3094 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3096 /* Strip off any encoding in name. */
3097 name = (* targetm.strip_name_encoding) (name);
3099 /* The object is put in, for example, section .text$foo.
3100 The linker will then ultimately place them in .text
3101 (everything from the $ on is stripped). */
3102 if (TREE_CODE (decl) == FUNCTION_DECL)
3103 prefix = ".text$";
3104 /* For compatibility with EPOC, we ignore the fact that the
3105 section might have relocs against it. */
3106 else if (decl_readonly_section (decl, 0))
3107 prefix = ".rdata$";
3108 else
3109 prefix = ".data$";
3111 len = strlen (name) + strlen (prefix);
3112 string = XALLOCAVEC (char, len + 1);
3114 sprintf (string, "%s%s", prefix, name);
3116 DECL_SECTION_NAME (decl) = build_string (len, string);
3120 mcore_naked_function_p (void)
3122 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3125 #ifdef OBJECT_FORMAT_ELF
3126 static void
3127 mcore_asm_named_section (const char *name,
3128 unsigned int flags ATTRIBUTE_UNUSED,
3129 tree decl ATTRIBUTE_UNUSED)
3131 fprintf (asm_out_file, "\t.section %s\n", name);
3133 #endif /* OBJECT_FORMAT_ELF */
3135 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3137 static void
3138 mcore_external_libcall (rtx fun)
3140 fprintf (asm_out_file, "\t.import\t");
3141 assemble_name (asm_out_file, XSTR (fun, 0));
3142 fprintf (asm_out_file, "\n");
3145 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3147 static bool
3148 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3150 const HOST_WIDE_INT size = int_size_in_bytes (type);
3151 return (size == -1 || size > 2 * UNITS_PER_WORD);
3154 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3155 Output assembler code for a block containing the constant parts
3156 of a trampoline, leaving space for the variable parts.
3158 On the MCore, the trampoline looks like:
3159 lrw r1, function
3160 lrw r13, area
3161 jmp r13
3162 or r0, r0
3163 .literals */
3165 static void
3166 mcore_asm_trampoline_template (FILE *f)
3168 fprintf (f, "\t.short 0x7102\n");
3169 fprintf (f, "\t.short 0x7d02\n");
3170 fprintf (f, "\t.short 0x00cd\n");
3171 fprintf (f, "\t.short 0x1e00\n");
3172 fprintf (f, "\t.long 0\n");
3173 fprintf (f, "\t.long 0\n");
3176 /* Worker function for TARGET_TRAMPOLINE_INIT. */
3178 static void
3179 mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3181 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3182 rtx mem;
3184 emit_block_move (m_tramp, assemble_trampoline_template (),
3185 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3187 mem = adjust_address (m_tramp, SImode, 8);
3188 emit_move_insn (mem, chain_value);
3189 mem = adjust_address (m_tramp, SImode, 12);
3190 emit_move_insn (mem, fnaddr);