2009-04-17 Paolo Bonzini <bonzini@gnu.org>
[official-gcc.git] / gcc / config / mcore / mcore.c
blob170b4b00de61925eebb75c121d9e597f9ea8a6a8
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2009 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "assert.h"
29 #include "mcore.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "obstack.h"
39 #include "expr.h"
40 #include "reload.h"
41 #include "recog.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "toplev.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "df.h"
49 /* Maximum size we are allowed to grow the stack in a single operation.
50 If we want more, we must do it in increments of at most this size.
51 If this value is 0, we don't check at all. */
52 int mcore_stack_increment = STACK_UNITS_MAXSTEP;
54 /* For dumping information about frame sizes. */
55 char * mcore_current_function_name = 0;
56 long mcore_current_compilation_timestamp = 0;
58 /* Global variables for machine-dependent things. */
60 /* Provides the class number of the smallest class containing
61 reg number. */
62 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
64 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
65 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
66 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
67 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
68 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
71 /* Provide reg_class from a letter such as appears in the machine
72 description. */
73 const enum reg_class reg_class_from_letter[] =
75 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
76 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
77 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
78 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
79 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
80 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
81 /* y */ NO_REGS, /* z */ NO_REGS
84 struct mcore_frame
86 int arg_size; /* Stdarg spills (bytes). */
87 int reg_size; /* Non-volatile reg saves (bytes). */
88 int reg_mask; /* Non-volatile reg saves. */
89 int local_size; /* Locals. */
90 int outbound_size; /* Arg overflow on calls out. */
91 int pad_outbound;
92 int pad_local;
93 int pad_reg;
94 /* Describe the steps we'll use to grow it. */
95 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
96 int growth[MAX_STACK_GROWS];
97 int arg_offset;
98 int reg_offset;
99 int reg_growth;
100 int local_growth;
103 typedef enum
105 COND_NO,
106 COND_MOV_INSN,
107 COND_CLR_INSN,
108 COND_INC_INSN,
109 COND_DEC_INSN,
110 COND_BRANCH_INSN
112 cond_type;
114 static void output_stack_adjust (int, int);
115 static int calc_live_regs (int *);
116 static int try_constant_tricks (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
117 static const char * output_inline_const (enum machine_mode, rtx *);
118 static void layout_mcore_frame (struct mcore_frame *);
119 static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
120 static cond_type is_cond_candidate (rtx);
121 static rtx emit_new_cond_insn (rtx, int);
122 static rtx conditionalize_block (rtx);
123 static void conditionalize_optimization (void);
124 static void mcore_reorg (void);
125 static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
126 static void mcore_mark_dllexport (tree);
127 static void mcore_mark_dllimport (tree);
128 static int mcore_dllexport_p (tree);
129 static int mcore_dllimport_p (tree);
130 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
131 #ifdef OBJECT_FORMAT_ELF
132 static void mcore_asm_named_section (const char *,
133 unsigned int, tree);
134 #endif
135 static void mcore_unique_section (tree, int);
136 static void mcore_encode_section_info (tree, rtx, int);
137 static const char *mcore_strip_name_encoding (const char *);
138 static int mcore_const_costs (rtx, RTX_CODE);
139 static int mcore_and_cost (rtx);
140 static int mcore_ior_cost (rtx);
141 static bool mcore_rtx_costs (rtx, int, int, int *, bool);
142 static void mcore_external_libcall (rtx);
143 static bool mcore_return_in_memory (const_tree, const_tree);
144 static int mcore_arg_partial_bytes (CUMULATIVE_ARGS *,
145 enum machine_mode,
146 tree, bool);
149 /* MCore specific attributes. */
151 static const struct attribute_spec mcore_attribute_table[] =
153 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
154 { "dllexport", 0, 0, true, false, false, NULL },
155 { "dllimport", 0, 0, true, false, false, NULL },
156 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute },
157 { NULL, 0, 0, false, false, false, NULL }
160 /* Initialize the GCC target structure. */
161 #undef TARGET_ASM_EXTERNAL_LIBCALL
162 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
164 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
165 #undef TARGET_MERGE_DECL_ATTRIBUTES
166 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
167 #endif
169 #ifdef OBJECT_FORMAT_ELF
170 #undef TARGET_ASM_UNALIGNED_HI_OP
171 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
172 #undef TARGET_ASM_UNALIGNED_SI_OP
173 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
174 #endif
176 #undef TARGET_ATTRIBUTE_TABLE
177 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
178 #undef TARGET_ASM_UNIQUE_SECTION
179 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
180 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
181 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
182 #undef TARGET_DEFAULT_TARGET_FLAGS
183 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
184 #undef TARGET_ENCODE_SECTION_INFO
185 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
186 #undef TARGET_STRIP_NAME_ENCODING
187 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
188 #undef TARGET_RTX_COSTS
189 #define TARGET_RTX_COSTS mcore_rtx_costs
190 #undef TARGET_ADDRESS_COST
191 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
192 #undef TARGET_MACHINE_DEPENDENT_REORG
193 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
195 #undef TARGET_PROMOTE_FUNCTION_MODE
196 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
197 #undef TARGET_PROMOTE_PROTOTYPES
198 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
200 #undef TARGET_RETURN_IN_MEMORY
201 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
202 #undef TARGET_MUST_PASS_IN_STACK
203 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
204 #undef TARGET_PASS_BY_REFERENCE
205 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
206 #undef TARGET_ARG_PARTIAL_BYTES
207 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
209 #undef TARGET_SETUP_INCOMING_VARARGS
210 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
212 struct gcc_target targetm = TARGET_INITIALIZER;
214 /* Adjust the stack and return the number of bytes taken to do it. */
215 static void
216 output_stack_adjust (int direction, int size)
218 /* If extending stack a lot, we do it incrementally. */
219 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
221 rtx tmp = gen_rtx_REG (SImode, 1);
222 rtx memref;
224 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
227 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
228 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
229 MEM_VOLATILE_P (memref) = 1;
230 emit_insn (gen_movsi (memref, stack_pointer_rtx));
231 size -= mcore_stack_increment;
233 while (size > mcore_stack_increment);
235 /* SIZE is now the residual for the last adjustment,
236 which doesn't require a probe. */
239 if (size)
241 rtx insn;
242 rtx val = GEN_INT (size);
244 if (size > 32)
246 rtx nval = gen_rtx_REG (SImode, 1);
247 emit_insn (gen_movsi (nval, val));
248 val = nval;
251 if (direction > 0)
252 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
253 else
254 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
256 emit_insn (insn);
260 /* Work out the registers which need to be saved,
261 both as a mask and a count. */
263 static int
264 calc_live_regs (int * count)
266 int reg;
267 int live_regs_mask = 0;
269 * count = 0;
271 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
273 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
275 (*count)++;
276 live_regs_mask |= (1 << reg);
280 return live_regs_mask;
283 /* Print the operand address in x to the stream. */
285 void
286 mcore_print_operand_address (FILE * stream, rtx x)
288 switch (GET_CODE (x))
290 case REG:
291 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
292 break;
294 case PLUS:
296 rtx base = XEXP (x, 0);
297 rtx index = XEXP (x, 1);
299 if (GET_CODE (base) != REG)
301 /* Ensure that BASE is a register (one of them must be). */
302 rtx temp = base;
303 base = index;
304 index = temp;
307 switch (GET_CODE (index))
309 case CONST_INT:
310 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
311 reg_names[REGNO(base)], INTVAL (index));
312 break;
314 default:
315 gcc_unreachable ();
319 break;
321 default:
322 output_addr_const (stream, x);
323 break;
327 /* Print operand x (an rtx) in assembler syntax to file stream
328 according to modifier code.
330 'R' print the next register or memory location along, i.e. the lsw in
331 a double word value
332 'O' print a constant without the #
333 'M' print a constant as its negative
334 'P' print log2 of a power of two
335 'Q' print log2 of an inverse of a power of two
336 'U' print register for ldm/stm instruction
337 'X' print byte number for xtrbN instruction. */
339 void
340 mcore_print_operand (FILE * stream, rtx x, int code)
342 switch (code)
344 case 'N':
345 if (INTVAL(x) == -1)
346 fprintf (asm_out_file, "32");
347 else
348 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
349 break;
350 case 'P':
351 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
352 break;
353 case 'Q':
354 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
355 break;
356 case 'O':
357 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
358 break;
359 case 'M':
360 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
361 break;
362 case 'R':
363 /* Next location along in memory or register. */
364 switch (GET_CODE (x))
366 case REG:
367 fputs (reg_names[REGNO (x) + 1], (stream));
368 break;
369 case MEM:
370 mcore_print_operand_address
371 (stream, XEXP (adjust_address (x, SImode, 4), 0));
372 break;
373 default:
374 gcc_unreachable ();
376 break;
377 case 'U':
378 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
379 reg_names[REGNO (x) + 3]);
380 break;
381 case 'x':
382 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
383 break;
384 case 'X':
385 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
386 break;
388 default:
389 switch (GET_CODE (x))
391 case REG:
392 fputs (reg_names[REGNO (x)], (stream));
393 break;
394 case MEM:
395 output_address (XEXP (x, 0));
396 break;
397 default:
398 output_addr_const (stream, x);
399 break;
401 break;
405 /* What does a constant cost ? */
407 static int
408 mcore_const_costs (rtx exp, enum rtx_code code)
410 HOST_WIDE_INT val = INTVAL (exp);
412 /* Easy constants. */
413 if ( CONST_OK_FOR_I (val)
414 || CONST_OK_FOR_M (val)
415 || CONST_OK_FOR_N (val)
416 || (code == PLUS && CONST_OK_FOR_L (val)))
417 return 1;
418 else if (code == AND
419 && ( CONST_OK_FOR_M (~val)
420 || CONST_OK_FOR_N (~val)))
421 return 2;
422 else if (code == PLUS
423 && ( CONST_OK_FOR_I (-val)
424 || CONST_OK_FOR_M (-val)
425 || CONST_OK_FOR_N (-val)))
426 return 2;
428 return 5;
431 /* What does an and instruction cost - we do this b/c immediates may
432 have been relaxed. We want to ensure that cse will cse relaxed immeds
433 out. Otherwise we'll get bad code (multiple reloads of the same const). */
435 static int
436 mcore_and_cost (rtx x)
438 HOST_WIDE_INT val;
440 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
441 return 2;
443 val = INTVAL (XEXP (x, 1));
445 /* Do it directly. */
446 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
447 return 2;
448 /* Takes one instruction to load. */
449 else if (const_ok_for_mcore (val))
450 return 3;
451 /* Takes two instructions to load. */
452 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
453 return 4;
455 /* Takes a lrw to load. */
456 return 5;
459 /* What does an or cost - see and_cost(). */
461 static int
462 mcore_ior_cost (rtx x)
464 HOST_WIDE_INT val;
466 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
467 return 2;
469 val = INTVAL (XEXP (x, 1));
471 /* Do it directly with bclri. */
472 if (CONST_OK_FOR_M (val))
473 return 2;
474 /* Takes one instruction to load. */
475 else if (const_ok_for_mcore (val))
476 return 3;
477 /* Takes two instructions to load. */
478 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
479 return 4;
481 /* Takes a lrw to load. */
482 return 5;
485 static bool
486 mcore_rtx_costs (rtx x, int code, int outer_code, int * total,
487 bool speed ATTRIBUTE_UNUSED)
489 switch (code)
491 case CONST_INT:
492 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
493 return true;
494 case CONST:
495 case LABEL_REF:
496 case SYMBOL_REF:
497 *total = 5;
498 return true;
499 case CONST_DOUBLE:
500 *total = 10;
501 return true;
503 case AND:
504 *total = COSTS_N_INSNS (mcore_and_cost (x));
505 return true;
507 case IOR:
508 *total = COSTS_N_INSNS (mcore_ior_cost (x));
509 return true;
511 case DIV:
512 case UDIV:
513 case MOD:
514 case UMOD:
515 case FLOAT:
516 case FIX:
517 *total = COSTS_N_INSNS (100);
518 return true;
520 default:
521 return false;
525 /* Prepare the operands for a comparison. Return whether the branch/setcc
526 should reverse the operands. */
528 bool
529 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
531 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
532 bool invert;
534 if (GET_CODE (op1) == CONST_INT)
536 HOST_WIDE_INT val = INTVAL (op1);
538 switch (code)
540 case GTU:
541 /* Unsigned > 0 is the same as != 0; everything else is converted
542 below to LEU (reversed cmphs). */
543 if (val == 0)
544 code = NE;
545 break;
547 /* Check whether (LE A imm) can become (LT A imm + 1),
548 or (GT A imm) can become (GE A imm + 1). */
549 case GT:
550 case LE:
551 if (CONST_OK_FOR_J (val + 1))
553 op1 = GEN_INT (val + 1);
554 code = code == LE ? LT : GE;
556 break;
558 default:
559 break;
563 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
564 op1 = force_reg (SImode, op1);
566 /* cmpnei: 0-31 (K immediate)
567 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
568 invert = false;
569 switch (code)
571 case EQ: /* Use inverted condition, cmpne. */
572 code = NE;
573 invert = true;
574 /* Drop through. */
576 case NE: /* Use normal condition, cmpne. */
577 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
578 op1 = force_reg (SImode, op1);
579 break;
581 case LE: /* Use inverted condition, reversed cmplt. */
582 code = GT;
583 invert = true;
584 /* Drop through. */
586 case GT: /* Use normal condition, reversed cmplt. */
587 if (GET_CODE (op1) == CONST_INT)
588 op1 = force_reg (SImode, op1);
589 break;
591 case GE: /* Use inverted condition, cmplt. */
592 code = LT;
593 invert = true;
594 /* Drop through. */
596 case LT: /* Use normal condition, cmplt. */
597 if (GET_CODE (op1) == CONST_INT &&
598 /* covered by btsti x,31. */
599 INTVAL (op1) != 0 &&
600 ! CONST_OK_FOR_J (INTVAL (op1)))
601 op1 = force_reg (SImode, op1);
602 break;
604 case GTU: /* Use inverted condition, cmple. */
605 /* We coped with unsigned > 0 above. */
606 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
607 code = LEU;
608 invert = true;
609 /* Drop through. */
611 case LEU: /* Use normal condition, reversed cmphs. */
612 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
613 op1 = force_reg (SImode, op1);
614 break;
616 case LTU: /* Use inverted condition, cmphs. */
617 code = GEU;
618 invert = true;
619 /* Drop through. */
621 case GEU: /* Use normal condition, cmphs. */
622 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
623 op1 = force_reg (SImode, op1);
624 break;
626 default:
627 break;
630 emit_insn (gen_rtx_SET (VOIDmode,
631 cc_reg,
632 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
633 return invert;
637 mcore_symbolic_address_p (rtx x)
639 switch (GET_CODE (x))
641 case SYMBOL_REF:
642 case LABEL_REF:
643 return 1;
644 case CONST:
645 x = XEXP (x, 0);
646 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
647 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
648 && GET_CODE (XEXP (x, 1)) == CONST_INT);
649 default:
650 return 0;
654 /* Functions to output assembly code for a function call. */
656 char *
657 mcore_output_call (rtx operands[], int index)
659 static char buffer[20];
660 rtx addr = operands [index];
662 if (REG_P (addr))
664 if (TARGET_CG_DATA)
666 gcc_assert (mcore_current_function_name);
668 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
669 "unknown", 1);
672 sprintf (buffer, "jsr\t%%%d", index);
674 else
676 if (TARGET_CG_DATA)
678 gcc_assert (mcore_current_function_name);
679 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
681 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
682 XSTR (addr, 0), 0);
685 sprintf (buffer, "jbsr\t%%%d", index);
688 return buffer;
691 /* Can we load a constant with a single instruction ? */
694 const_ok_for_mcore (HOST_WIDE_INT value)
696 if (value >= 0 && value <= 127)
697 return 1;
699 /* Try exact power of two. */
700 if (CONST_OK_FOR_M (value))
701 return 1;
703 /* Try exact power of two - 1. */
704 if (CONST_OK_FOR_N (value) && value != -1)
705 return 1;
707 return 0;
710 /* Can we load a constant inline with up to 2 instructions ? */
713 mcore_const_ok_for_inline (HOST_WIDE_INT value)
715 HOST_WIDE_INT x, y;
717 return try_constant_tricks (value, & x, & y) > 0;
720 /* Are we loading the constant using a not ? */
723 mcore_const_trick_uses_not (HOST_WIDE_INT value)
725 HOST_WIDE_INT x, y;
727 return try_constant_tricks (value, & x, & y) == 2;
730 /* Try tricks to load a constant inline and return the trick number if
731 success (0 is non-inlinable).
733 0: not inlinable
734 1: single instruction (do the usual thing)
735 2: single insn followed by a 'not'
736 3: single insn followed by a subi
737 4: single insn followed by an addi
738 5: single insn followed by rsubi
739 6: single insn followed by bseti
740 7: single insn followed by bclri
741 8: single insn followed by rotli
742 9: single insn followed by lsli
743 10: single insn followed by ixh
744 11: single insn followed by ixw. */
746 static int
747 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
749 HOST_WIDE_INT i;
750 unsigned HOST_WIDE_INT bit, shf, rot;
752 if (const_ok_for_mcore (value))
753 return 1; /* Do the usual thing. */
755 if (! TARGET_HARDLIT)
756 return 0;
758 if (const_ok_for_mcore (~value))
760 *x = ~value;
761 return 2;
764 for (i = 1; i <= 32; i++)
766 if (const_ok_for_mcore (value - i))
768 *x = value - i;
769 *y = i;
771 return 3;
774 if (const_ok_for_mcore (value + i))
776 *x = value + i;
777 *y = i;
779 return 4;
783 bit = 0x80000000ULL;
785 for (i = 0; i <= 31; i++)
787 if (const_ok_for_mcore (i - value))
789 *x = i - value;
790 *y = i;
792 return 5;
795 if (const_ok_for_mcore (value & ~bit))
797 *y = bit;
798 *x = value & ~bit;
799 return 6;
802 if (const_ok_for_mcore (value | bit))
804 *y = ~bit;
805 *x = value | bit;
807 return 7;
810 bit >>= 1;
813 shf = value;
814 rot = value;
816 for (i = 1; i < 31; i++)
818 int c;
820 /* MCore has rotate left. */
821 c = rot << 31;
822 rot >>= 1;
823 rot &= 0x7FFFFFFF;
824 rot |= c; /* Simulate rotate. */
826 if (const_ok_for_mcore (rot))
828 *y = i;
829 *x = rot;
831 return 8;
834 if (shf & 1)
835 shf = 0; /* Can't use logical shift, low order bit is one. */
837 shf >>= 1;
839 if (shf != 0 && const_ok_for_mcore (shf))
841 *y = i;
842 *x = shf;
844 return 9;
848 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
850 *x = value / 3;
852 return 10;
855 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
857 *x = value / 5;
859 return 11;
862 return 0;
865 /* Check whether reg is dead at first. This is done by searching ahead
866 for either the next use (i.e., reg is live), a death note, or a set of
867 reg. Don't just use dead_or_set_p() since reload does not always mark
868 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
869 can ignore subregs by extracting the actual register. BRC */
872 mcore_is_dead (rtx first, rtx reg)
874 rtx insn;
876 /* For mcore, subregs can't live independently of their parent regs. */
877 if (GET_CODE (reg) == SUBREG)
878 reg = SUBREG_REG (reg);
880 /* Dies immediately. */
881 if (dead_or_set_p (first, reg))
882 return 1;
884 /* Look for conclusive evidence of live/death, otherwise we have
885 to assume that it is live. */
886 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
888 if (GET_CODE (insn) == JUMP_INSN)
889 return 0; /* We lose track, assume it is alive. */
891 else if (GET_CODE(insn) == CALL_INSN)
893 /* Call's might use it for target or register parms. */
894 if (reg_referenced_p (reg, PATTERN (insn))
895 || find_reg_fusage (insn, USE, reg))
896 return 0;
897 else if (dead_or_set_p (insn, reg))
898 return 1;
900 else if (GET_CODE (insn) == INSN)
902 if (reg_referenced_p (reg, PATTERN (insn)))
903 return 0;
904 else if (dead_or_set_p (insn, reg))
905 return 1;
909 /* No conclusive evidence either way, we cannot take the chance
910 that control flow hid the use from us -- "I'm not dead yet". */
911 return 0;
914 /* Count the number of ones in mask. */
917 mcore_num_ones (HOST_WIDE_INT mask)
919 /* A trick to count set bits recently posted on comp.compilers. */
920 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
921 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
922 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
923 mask = ((mask >> 8) + mask);
925 return (mask + (mask >> 16)) & 0xff;
928 /* Count the number of zeros in mask. */
931 mcore_num_zeros (HOST_WIDE_INT mask)
933 return 32 - mcore_num_ones (mask);
936 /* Determine byte being masked. */
939 mcore_byte_offset (unsigned int mask)
941 if (mask == 0x00ffffffL)
942 return 0;
943 else if (mask == 0xff00ffffL)
944 return 1;
945 else if (mask == 0xffff00ffL)
946 return 2;
947 else if (mask == 0xffffff00L)
948 return 3;
950 return -1;
953 /* Determine halfword being masked. */
956 mcore_halfword_offset (unsigned int mask)
958 if (mask == 0x0000ffffL)
959 return 0;
960 else if (mask == 0xffff0000L)
961 return 1;
963 return -1;
966 /* Output a series of bseti's corresponding to mask. */
968 const char *
969 mcore_output_bseti (rtx dst, int mask)
971 rtx out_operands[2];
972 int bit;
974 out_operands[0] = dst;
976 for (bit = 0; bit < 32; bit++)
978 if ((mask & 0x1) == 0x1)
980 out_operands[1] = GEN_INT (bit);
982 output_asm_insn ("bseti\t%0,%1", out_operands);
984 mask >>= 1;
987 return "";
990 /* Output a series of bclri's corresponding to mask. */
992 const char *
993 mcore_output_bclri (rtx dst, int mask)
995 rtx out_operands[2];
996 int bit;
998 out_operands[0] = dst;
1000 for (bit = 0; bit < 32; bit++)
1002 if ((mask & 0x1) == 0x0)
1004 out_operands[1] = GEN_INT (bit);
1006 output_asm_insn ("bclri\t%0,%1", out_operands);
1009 mask >>= 1;
1012 return "";
1015 /* Output a conditional move of two constants that are +/- 1 within each
1016 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1017 really worth the effort. */
1019 const char *
1020 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1022 HOST_WIDE_INT load_value;
1023 HOST_WIDE_INT adjust_value;
1024 rtx out_operands[4];
1026 out_operands[0] = operands[0];
1028 /* Check to see which constant is loadable. */
1029 if (const_ok_for_mcore (INTVAL (operands[1])))
1031 out_operands[1] = operands[1];
1032 out_operands[2] = operands[2];
1034 else if (const_ok_for_mcore (INTVAL (operands[2])))
1036 out_operands[1] = operands[2];
1037 out_operands[2] = operands[1];
1039 /* Complement test since constants are swapped. */
1040 cmp_t = (cmp_t == 0);
1042 load_value = INTVAL (out_operands[1]);
1043 adjust_value = INTVAL (out_operands[2]);
1045 /* First output the test if folded into the pattern. */
1047 if (test)
1048 output_asm_insn (test, operands);
1050 /* Load the constant - for now, only support constants that can be
1051 generated with a single instruction. maybe add general inlinable
1052 constants later (this will increase the # of patterns since the
1053 instruction sequence has a different length attribute). */
1054 if (load_value >= 0 && load_value <= 127)
1055 output_asm_insn ("movi\t%0,%1", out_operands);
1056 else if (CONST_OK_FOR_M (load_value))
1057 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1058 else if (CONST_OK_FOR_N (load_value))
1059 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1061 /* Output the constant adjustment. */
1062 if (load_value > adjust_value)
1064 if (cmp_t)
1065 output_asm_insn ("decf\t%0", out_operands);
1066 else
1067 output_asm_insn ("dect\t%0", out_operands);
1069 else
1071 if (cmp_t)
1072 output_asm_insn ("incf\t%0", out_operands);
1073 else
1074 output_asm_insn ("inct\t%0", out_operands);
1077 return "";
1080 /* Outputs the peephole for moving a constant that gets not'ed followed
1081 by an and (i.e. combine the not and the and into andn). BRC */
1083 const char *
1084 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1086 HOST_WIDE_INT x, y;
1087 rtx out_operands[3];
1088 const char * load_op;
1089 char buf[256];
1090 int trick_no;
1092 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1093 gcc_assert (trick_no == 2);
1095 out_operands[0] = operands[0];
1096 out_operands[1] = GEN_INT (x);
1097 out_operands[2] = operands[2];
1099 if (x >= 0 && x <= 127)
1100 load_op = "movi\t%0,%1";
1102 /* Try exact power of two. */
1103 else if (CONST_OK_FOR_M (x))
1104 load_op = "bgeni\t%0,%P1";
1106 /* Try exact power of two - 1. */
1107 else if (CONST_OK_FOR_N (x))
1108 load_op = "bmaski\t%0,%N1";
1110 else
1112 load_op = "BADMOVI-andn\t%0, %1";
1113 gcc_unreachable ();
1116 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1117 output_asm_insn (buf, out_operands);
1119 return "";
1122 /* Output an inline constant. */
1124 static const char *
1125 output_inline_const (enum machine_mode mode, rtx operands[])
1127 HOST_WIDE_INT x = 0, y = 0;
1128 int trick_no;
1129 rtx out_operands[3];
1130 char buf[256];
1131 char load_op[256];
1132 const char *dst_fmt;
1133 HOST_WIDE_INT value;
1135 value = INTVAL (operands[1]);
1137 trick_no = try_constant_tricks (value, &x, &y);
1138 /* lrw's are handled separately: Large inlinable constants never get
1139 turned into lrw's. Our caller uses try_constant_tricks to back
1140 off to an lrw rather than calling this routine. */
1141 gcc_assert (trick_no != 0);
1143 if (trick_no == 1)
1144 x = value;
1146 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1147 out_operands[0] = operands[0];
1148 out_operands[1] = GEN_INT (x);
1150 if (trick_no > 2)
1151 out_operands[2] = GEN_INT (y);
1153 /* Select dst format based on mode. */
1154 if (mode == DImode && (! TARGET_LITTLE_END))
1155 dst_fmt = "%R0";
1156 else
1157 dst_fmt = "%0";
1159 if (x >= 0 && x <= 127)
1160 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1162 /* Try exact power of two. */
1163 else if (CONST_OK_FOR_M (x))
1164 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1166 /* Try exact power of two - 1. */
1167 else if (CONST_OK_FOR_N (x))
1168 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1170 else
1172 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1173 gcc_unreachable ();
1176 switch (trick_no)
1178 case 1:
1179 strcpy (buf, load_op);
1180 break;
1181 case 2: /* not */
1182 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1183 break;
1184 case 3: /* add */
1185 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1186 break;
1187 case 4: /* sub */
1188 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1189 break;
1190 case 5: /* rsub */
1191 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1192 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1193 break;
1194 case 6: /* bseti */
1195 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1196 break;
1197 case 7: /* bclr */
1198 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1199 break;
1200 case 8: /* rotl */
1201 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1202 break;
1203 case 9: /* lsl */
1204 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1205 break;
1206 case 10: /* ixh */
1207 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1208 break;
1209 case 11: /* ixw */
1210 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1211 break;
1212 default:
1213 return "";
1216 output_asm_insn (buf, out_operands);
1218 return "";
1221 /* Output a move of a word or less value. */
1223 const char *
1224 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1225 enum machine_mode mode ATTRIBUTE_UNUSED)
1227 rtx dst = operands[0];
1228 rtx src = operands[1];
1230 if (GET_CODE (dst) == REG)
1232 if (GET_CODE (src) == REG)
1234 if (REGNO (src) == CC_REG) /* r-c */
1235 return "mvc\t%0";
1236 else
1237 return "mov\t%0,%1"; /* r-r*/
1239 else if (GET_CODE (src) == MEM)
1241 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1242 return "lrw\t%0,[%1]"; /* a-R */
1243 else
1244 switch (GET_MODE (src)) /* r-m */
1246 case SImode:
1247 return "ldw\t%0,%1";
1248 case HImode:
1249 return "ld.h\t%0,%1";
1250 case QImode:
1251 return "ld.b\t%0,%1";
1252 default:
1253 gcc_unreachable ();
1256 else if (GET_CODE (src) == CONST_INT)
1258 HOST_WIDE_INT x, y;
1260 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1261 return "movi\t%0,%1";
1262 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1263 return "bgeni\t%0,%P1\t// %1 %x1";
1264 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1265 return "bmaski\t%0,%N1\t// %1 %x1";
1266 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1267 return output_inline_const (SImode, operands); /* 1-2 insns */
1268 else
1269 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1271 else
1272 return "lrw\t%0, %1"; /* Into the literal pool. */
1274 else if (GET_CODE (dst) == MEM) /* m-r */
1275 switch (GET_MODE (dst))
1277 case SImode:
1278 return "stw\t%1,%0";
1279 case HImode:
1280 return "st.h\t%1,%0";
1281 case QImode:
1282 return "st.b\t%1,%0";
1283 default:
1284 gcc_unreachable ();
1287 gcc_unreachable ();
1290 /* Return a sequence of instructions to perform DI or DF move.
1291 Since the MCORE cannot move a DI or DF in one instruction, we have
1292 to take care when we see overlapping source and dest registers. */
1294 const char *
1295 mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
1297 rtx dst = operands[0];
1298 rtx src = operands[1];
1300 if (GET_CODE (dst) == REG)
1302 if (GET_CODE (src) == REG)
1304 int dstreg = REGNO (dst);
1305 int srcreg = REGNO (src);
1307 /* Ensure the second source not overwritten. */
1308 if (srcreg + 1 == dstreg)
1309 return "mov %R0,%R1\n\tmov %0,%1";
1310 else
1311 return "mov %0,%1\n\tmov %R0,%R1";
1313 else if (GET_CODE (src) == MEM)
1315 rtx memexp = memexp = XEXP (src, 0);
1316 int dstreg = REGNO (dst);
1317 int basereg = -1;
1319 if (GET_CODE (memexp) == LABEL_REF)
1320 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1321 else if (GET_CODE (memexp) == REG)
1322 basereg = REGNO (memexp);
1323 else if (GET_CODE (memexp) == PLUS)
1325 if (GET_CODE (XEXP (memexp, 0)) == REG)
1326 basereg = REGNO (XEXP (memexp, 0));
1327 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1328 basereg = REGNO (XEXP (memexp, 1));
1329 else
1330 gcc_unreachable ();
1332 else
1333 gcc_unreachable ();
1335 /* ??? length attribute is wrong here. */
1336 if (dstreg == basereg)
1338 /* Just load them in reverse order. */
1339 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1341 /* XXX: alternative: move basereg to basereg+1
1342 and then fall through. */
1344 else
1345 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1347 else if (GET_CODE (src) == CONST_INT)
1349 if (TARGET_LITTLE_END)
1351 if (CONST_OK_FOR_I (INTVAL (src)))
1352 output_asm_insn ("movi %0,%1", operands);
1353 else if (CONST_OK_FOR_M (INTVAL (src)))
1354 output_asm_insn ("bgeni %0,%P1", operands);
1355 else if (CONST_OK_FOR_N (INTVAL (src)))
1356 output_asm_insn ("bmaski %0,%N1", operands);
1357 else
1358 gcc_unreachable ();
1360 if (INTVAL (src) < 0)
1361 return "bmaski %R0,32";
1362 else
1363 return "movi %R0,0";
1365 else
1367 if (CONST_OK_FOR_I (INTVAL (src)))
1368 output_asm_insn ("movi %R0,%1", operands);
1369 else if (CONST_OK_FOR_M (INTVAL (src)))
1370 output_asm_insn ("bgeni %R0,%P1", operands);
1371 else if (CONST_OK_FOR_N (INTVAL (src)))
1372 output_asm_insn ("bmaski %R0,%N1", operands);
1373 else
1374 gcc_unreachable ();
1376 if (INTVAL (src) < 0)
1377 return "bmaski %0,32";
1378 else
1379 return "movi %0,0";
1382 else
1383 gcc_unreachable ();
1385 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1386 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1387 else
1388 gcc_unreachable ();
1391 /* Predicates used by the templates. */
1394 mcore_arith_S_operand (rtx op)
1396 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1397 return 1;
1399 return 0;
1402 /* Expand insert bit field. BRC */
1405 mcore_expand_insv (rtx operands[])
1407 int width = INTVAL (operands[1]);
1408 int posn = INTVAL (operands[2]);
1409 int mask;
1410 rtx mreg, sreg, ereg;
1412 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1413 for width==1 must be removed. Look around line 368. This is something
1414 we really want the md part to do. */
1415 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1417 /* Do directly with bseti or bclri. */
1418 /* RBE: 2/97 consider only low bit of constant. */
1419 if ((INTVAL (operands[3]) & 1) == 0)
1421 mask = ~(1 << posn);
1422 emit_insn (gen_rtx_SET (SImode, operands[0],
1423 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
1425 else
1427 mask = 1 << posn;
1428 emit_insn (gen_rtx_SET (SImode, operands[0],
1429 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
1432 return 1;
1435 /* Look at some bit-field placements that we aren't interested
1436 in handling ourselves, unless specifically directed to do so. */
1437 if (! TARGET_W_FIELD)
1438 return 0; /* Generally, give up about now. */
1440 if (width == 8 && posn % 8 == 0)
1441 /* Byte sized and aligned; let caller break it up. */
1442 return 0;
1444 if (width == 16 && posn % 16 == 0)
1445 /* Short sized and aligned; let caller break it up. */
1446 return 0;
1448 /* The general case - we can do this a little bit better than what the
1449 machine independent part tries. This will get rid of all the subregs
1450 that mess up constant folding in combine when working with relaxed
1451 immediates. */
1453 /* If setting the entire field, do it directly. */
1454 if (GET_CODE (operands[3]) == CONST_INT
1455 && INTVAL (operands[3]) == ((1 << width) - 1))
1457 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1458 emit_insn (gen_rtx_SET (SImode, operands[0],
1459 gen_rtx_IOR (SImode, operands[0], mreg)));
1460 return 1;
1463 /* Generate the clear mask. */
1464 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1466 /* Clear the field, to overlay it later with the source. */
1467 emit_insn (gen_rtx_SET (SImode, operands[0],
1468 gen_rtx_AND (SImode, operands[0], mreg)));
1470 /* If the source is constant 0, we've nothing to add back. */
1471 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1472 return 1;
1474 /* XXX: Should we worry about more games with constant values?
1475 We've covered the high profile: set/clear single-bit and many-bit
1476 fields. How often do we see "arbitrary bit pattern" constants? */
1477 sreg = copy_to_mode_reg (SImode, operands[3]);
1479 /* Extract src as same width as dst (needed for signed values). We
1480 always have to do this since we widen everything to SImode.
1481 We don't have to mask if we're shifting this up against the
1482 MSB of the register (e.g., the shift will push out any hi-order
1483 bits. */
1484 if (width + posn != (int) GET_MODE_SIZE (SImode))
1486 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1487 emit_insn (gen_rtx_SET (SImode, sreg,
1488 gen_rtx_AND (SImode, sreg, ereg)));
1491 /* Insert source value in dest. */
1492 if (posn != 0)
1493 emit_insn (gen_rtx_SET (SImode, sreg,
1494 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
1496 emit_insn (gen_rtx_SET (SImode, operands[0],
1497 gen_rtx_IOR (SImode, operands[0], sreg)));
1499 return 1;
1502 /* ??? Block move stuff stolen from m88k. This code has not been
1503 verified for correctness. */
1505 /* Emit code to perform a block move. Choose the best method.
1507 OPERANDS[0] is the destination.
1508 OPERANDS[1] is the source.
1509 OPERANDS[2] is the size.
1510 OPERANDS[3] is the alignment safe to use. */
1512 /* Emit code to perform a block move with an offset sequence of ldw/st
1513 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1514 known constants. DEST and SRC are registers. OFFSET is the known
1515 starting point for the output pattern. */
1517 static const enum machine_mode mode_from_align[] =
1519 VOIDmode, QImode, HImode, VOIDmode, SImode,
1522 static void
1523 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1525 rtx temp[2];
1526 enum machine_mode mode[2];
1527 int amount[2];
1528 bool active[2];
1529 int phase = 0;
1530 int next;
1531 int offset_ld = 0;
1532 int offset_st = 0;
1533 rtx x;
1535 x = XEXP (dst_mem, 0);
1536 if (!REG_P (x))
1538 x = force_reg (Pmode, x);
1539 dst_mem = replace_equiv_address (dst_mem, x);
1542 x = XEXP (src_mem, 0);
1543 if (!REG_P (x))
1545 x = force_reg (Pmode, x);
1546 src_mem = replace_equiv_address (src_mem, x);
1549 active[0] = active[1] = false;
1553 next = phase;
1554 phase ^= 1;
1556 if (size > 0)
1558 int next_amount;
1560 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1561 next_amount = MIN (next_amount, align);
1563 amount[next] = next_amount;
1564 mode[next] = mode_from_align[next_amount];
1565 temp[next] = gen_reg_rtx (mode[next]);
1567 x = adjust_address (src_mem, mode[next], offset_ld);
1568 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1570 offset_ld += next_amount;
1571 size -= next_amount;
1572 active[next] = true;
1575 if (active[phase])
1577 active[phase] = false;
1579 x = adjust_address (dst_mem, mode[phase], offset_st);
1580 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1582 offset_st += amount[phase];
1585 while (active[next]);
1588 bool
1589 mcore_expand_block_move (rtx *operands)
1591 HOST_WIDE_INT align, bytes, max;
1593 if (GET_CODE (operands[2]) != CONST_INT)
1594 return false;
1596 bytes = INTVAL (operands[2]);
1597 align = INTVAL (operands[3]);
1599 if (bytes <= 0)
1600 return false;
1601 if (align > 4)
1602 align = 4;
1604 switch (align)
1606 case 4:
1607 if (bytes & 1)
1608 max = 4*4;
1609 else if (bytes & 3)
1610 max = 8*4;
1611 else
1612 max = 16*4;
1613 break;
1614 case 2:
1615 max = 4*2;
1616 break;
1617 case 1:
1618 max = 4*1;
1619 break;
1620 default:
1621 gcc_unreachable ();
1624 if (bytes <= max)
1626 block_move_sequence (operands[0], operands[1], bytes, align);
1627 return true;
1630 return false;
1634 /* Code to generate prologue and epilogue sequences. */
1635 static int number_of_regs_before_varargs;
1637 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1638 for a varargs function. */
1639 static int current_function_anonymous_args;
1641 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1642 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1643 #define ADDI_REACH (32) /* Maximum addi operand. */
1645 static void
1646 layout_mcore_frame (struct mcore_frame * infp)
1648 int n;
1649 unsigned int i;
1650 int nbytes;
1651 int regarg;
1652 int localregarg;
1653 int localreg;
1654 int outbounds;
1655 unsigned int growths;
1656 int step;
1658 /* Might have to spill bytes to re-assemble a big argument that
1659 was passed partially in registers and partially on the stack. */
1660 nbytes = crtl->args.pretend_args_size;
1662 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1663 if (current_function_anonymous_args)
1664 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1666 infp->arg_size = nbytes;
1668 /* How much space to save non-volatile registers we stomp. */
1669 infp->reg_mask = calc_live_regs (& n);
1670 infp->reg_size = n * 4;
1672 /* And the rest of it... locals and space for overflowed outbounds. */
1673 infp->local_size = get_frame_size ();
1674 infp->outbound_size = crtl->outgoing_args_size;
1676 /* Make sure we have a whole number of words for the locals. */
1677 if (infp->local_size % STACK_BYTES)
1678 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1680 /* Only thing we know we have to pad is the outbound space, since
1681 we've aligned our locals assuming that base of locals is aligned. */
1682 infp->pad_local = 0;
1683 infp->pad_reg = 0;
1684 infp->pad_outbound = 0;
1685 if (infp->outbound_size % STACK_BYTES)
1686 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1688 /* Now we see how we want to stage the prologue so that it does
1689 the most appropriate stack growth and register saves to either:
1690 (1) run fast,
1691 (2) reduce instruction space, or
1692 (3) reduce stack space. */
1693 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1694 infp->growth[i] = 0;
1696 regarg = infp->reg_size + infp->arg_size;
1697 localregarg = infp->local_size + regarg;
1698 localreg = infp->local_size + infp->reg_size;
1699 outbounds = infp->outbound_size + infp->pad_outbound;
1700 growths = 0;
1702 /* XXX: Consider one where we consider localregarg + outbound too! */
1704 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1705 use stw's with offsets and buy the frame in one shot. */
1706 if (localregarg <= ADDI_REACH
1707 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1709 /* Make sure we'll be aligned. */
1710 if (localregarg % STACK_BYTES)
1711 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1713 step = localregarg + infp->pad_reg;
1714 infp->reg_offset = infp->local_size;
1716 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1718 step += outbounds;
1719 infp->reg_offset += outbounds;
1720 outbounds = 0;
1723 infp->arg_offset = step - 4;
1724 infp->growth[growths++] = step;
1725 infp->reg_growth = growths;
1726 infp->local_growth = growths;
1728 /* If we haven't already folded it in. */
1729 if (outbounds)
1730 infp->growth[growths++] = outbounds;
1732 goto finish;
1735 /* Frame can't be done with a single subi, but can be done with 2
1736 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1737 shift some of the stack purchase into the first subi, so both are
1738 single instructions. */
1739 if (localregarg <= STORE_REACH
1740 && (infp->local_size > ADDI_REACH)
1741 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1743 int all;
1745 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1746 if (localregarg % STACK_BYTES)
1747 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1749 all = localregarg + infp->pad_reg + infp->pad_local;
1750 step = ADDI_REACH; /* As much up front as we can. */
1751 if (step > all)
1752 step = all;
1754 /* XXX: Consider whether step will still be aligned; we believe so. */
1755 infp->arg_offset = step - 4;
1756 infp->growth[growths++] = step;
1757 infp->reg_growth = growths;
1758 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1759 all -= step;
1761 /* Can we fold in any space required for outbounds? */
1762 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1764 all += outbounds;
1765 outbounds = 0;
1768 /* Get the rest of the locals in place. */
1769 step = all;
1770 infp->growth[growths++] = step;
1771 infp->local_growth = growths;
1772 all -= step;
1774 assert (all == 0);
1776 /* Finish off if we need to do so. */
1777 if (outbounds)
1778 infp->growth[growths++] = outbounds;
1780 goto finish;
1783 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1784 Then we buy the rest of the frame in 1 or 2 steps depending on
1785 whether we need a frame pointer. */
1786 if ((regarg % STACK_BYTES) == 0)
1788 infp->growth[growths++] = regarg;
1789 infp->reg_growth = growths;
1790 infp->arg_offset = regarg - 4;
1791 infp->reg_offset = 0;
1793 if (infp->local_size % STACK_BYTES)
1794 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1796 step = infp->local_size + infp->pad_local;
1798 if (!frame_pointer_needed)
1800 step += outbounds;
1801 outbounds = 0;
1804 infp->growth[growths++] = step;
1805 infp->local_growth = growths;
1807 /* If there's any left to be done. */
1808 if (outbounds)
1809 infp->growth[growths++] = outbounds;
1811 goto finish;
1814 /* XXX: optimizations that we'll want to play with....
1815 -- regarg is not aligned, but it's a small number of registers;
1816 use some of localsize so that regarg is aligned and then
1817 save the registers. */
1819 /* Simple encoding; plods down the stack buying the pieces as it goes.
1820 -- does not optimize space consumption.
1821 -- does not attempt to optimize instruction counts.
1822 -- but it is safe for all alignments. */
1823 if (regarg % STACK_BYTES != 0)
1824 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1826 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1827 infp->reg_growth = growths;
1828 infp->arg_offset = infp->growth[0] - 4;
1829 infp->reg_offset = 0;
1831 if (frame_pointer_needed)
1833 if (infp->local_size % STACK_BYTES != 0)
1834 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1836 infp->growth[growths++] = infp->local_size + infp->pad_local;
1837 infp->local_growth = growths;
1839 infp->growth[growths++] = outbounds;
1841 else
1843 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1844 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1846 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1847 infp->local_growth = growths;
1850 /* Anything else that we've forgotten?, plus a few consistency checks. */
1851 finish:
1852 assert (infp->reg_offset >= 0);
1853 assert (growths <= MAX_STACK_GROWS);
1855 for (i = 0; i < growths; i++)
1856 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1859 /* Define the offset between two registers, one to be eliminated, and
1860 the other its replacement, at the start of a routine. */
1863 mcore_initial_elimination_offset (int from, int to)
1865 int above_frame;
1866 int below_frame;
1867 struct mcore_frame fi;
1869 layout_mcore_frame (& fi);
1871 /* fp to ap */
1872 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1873 /* sp to fp */
1874 below_frame = fi.outbound_size + fi.pad_outbound;
1876 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1877 return above_frame;
1879 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1880 return above_frame + below_frame;
1882 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1883 return below_frame;
1885 gcc_unreachable ();
1888 /* Keep track of some information about varargs for the prolog. */
1890 static void
1891 mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far,
1892 enum machine_mode mode, tree type,
1893 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1894 int second_time ATTRIBUTE_UNUSED)
1896 current_function_anonymous_args = 1;
1898 /* We need to know how many argument registers are used before
1899 the varargs start, so that we can push the remaining argument
1900 registers during the prologue. */
1901 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1903 /* There is a bug somewhere in the arg handling code.
1904 Until I can find it this workaround always pushes the
1905 last named argument onto the stack. */
1906 number_of_regs_before_varargs = *args_so_far;
1908 /* The last named argument may be split between argument registers
1909 and the stack. Allow for this here. */
1910 if (number_of_regs_before_varargs > NPARM_REGS)
1911 number_of_regs_before_varargs = NPARM_REGS;
1914 void
1915 mcore_expand_prolog (void)
1917 struct mcore_frame fi;
1918 int space_allocated = 0;
1919 int growth = 0;
1921 /* Find out what we're doing. */
1922 layout_mcore_frame (&fi);
1924 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1925 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1927 if (TARGET_CG_DATA)
1929 /* Emit a symbol for this routine's frame size. */
1930 rtx x;
1932 x = DECL_RTL (current_function_decl);
1934 gcc_assert (GET_CODE (x) == MEM);
1936 x = XEXP (x, 0);
1938 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1940 if (mcore_current_function_name)
1941 free (mcore_current_function_name);
1943 mcore_current_function_name = xstrdup (XSTR (x, 0));
1945 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1947 if (cfun->calls_alloca)
1948 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1950 /* 970425: RBE:
1951 We're looking at how the 8byte alignment affects stack layout
1952 and where we had to pad things. This emits information we can
1953 extract which tells us about frame sizes and the like. */
1954 fprintf (asm_out_file,
1955 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1956 mcore_current_function_name,
1957 fi.arg_size, fi.reg_size, fi.reg_mask,
1958 fi.local_size, fi.outbound_size,
1959 frame_pointer_needed);
1962 if (mcore_naked_function_p ())
1963 return;
1965 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
1966 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1968 /* If we have a parameter passed partially in regs and partially in memory,
1969 the registers will have been stored to memory already in function.c. So
1970 we only need to do something here for varargs functions. */
1971 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
1973 int offset;
1974 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
1975 int remaining = fi.arg_size;
1977 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
1979 emit_insn (gen_movsi
1980 (gen_rtx_MEM (SImode,
1981 plus_constant (stack_pointer_rtx, offset)),
1982 gen_rtx_REG (SImode, rn)));
1986 /* Do we need another stack adjustment before we do the register saves? */
1987 if (growth < fi.reg_growth)
1988 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1990 if (fi.reg_size != 0)
1992 int i;
1993 int offs = fi.reg_offset;
1995 for (i = 15; i >= 0; i--)
1997 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
1999 int first_reg = 15;
2001 while (fi.reg_mask & (1 << first_reg))
2002 first_reg--;
2003 first_reg++;
2005 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2006 gen_rtx_REG (SImode, first_reg),
2007 GEN_INT (16 - first_reg)));
2009 i -= (15 - first_reg);
2010 offs += (16 - first_reg) * 4;
2012 else if (fi.reg_mask & (1 << i))
2014 emit_insn (gen_movsi
2015 (gen_rtx_MEM (SImode,
2016 plus_constant (stack_pointer_rtx, offs)),
2017 gen_rtx_REG (SImode, i)));
2018 offs += 4;
2023 /* Figure the locals + outbounds. */
2024 if (frame_pointer_needed)
2026 /* If we haven't already purchased to 'fp'. */
2027 if (growth < fi.local_growth)
2028 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2030 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2032 /* ... and then go any remaining distance for outbounds, etc. */
2033 if (fi.growth[growth])
2034 output_stack_adjust (-1, fi.growth[growth++]);
2036 else
2038 if (growth < fi.local_growth)
2039 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2040 if (fi.growth[growth])
2041 output_stack_adjust (-1, fi.growth[growth++]);
2045 void
2046 mcore_expand_epilog (void)
2048 struct mcore_frame fi;
2049 int i;
2050 int offs;
2051 int growth = MAX_STACK_GROWS - 1 ;
2054 /* Find out what we're doing. */
2055 layout_mcore_frame(&fi);
2057 if (mcore_naked_function_p ())
2058 return;
2060 /* If we had a frame pointer, restore the sp from that. */
2061 if (frame_pointer_needed)
2063 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2064 growth = fi.local_growth - 1;
2066 else
2068 /* XXX: while loop should accumulate and do a single sell. */
2069 while (growth >= fi.local_growth)
2071 if (fi.growth[growth] != 0)
2072 output_stack_adjust (1, fi.growth[growth]);
2073 growth--;
2077 /* Make sure we've shrunk stack back to the point where the registers
2078 were laid down. This is typically 0/1 iterations. Then pull the
2079 register save information back off the stack. */
2080 while (growth >= fi.reg_growth)
2081 output_stack_adjust ( 1, fi.growth[growth--]);
2083 offs = fi.reg_offset;
2085 for (i = 15; i >= 0; i--)
2087 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2089 int first_reg;
2091 /* Find the starting register. */
2092 first_reg = 15;
2094 while (fi.reg_mask & (1 << first_reg))
2095 first_reg--;
2097 first_reg++;
2099 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2100 gen_rtx_MEM (SImode, stack_pointer_rtx),
2101 GEN_INT (16 - first_reg)));
2103 i -= (15 - first_reg);
2104 offs += (16 - first_reg) * 4;
2106 else if (fi.reg_mask & (1 << i))
2108 emit_insn (gen_movsi
2109 (gen_rtx_REG (SImode, i),
2110 gen_rtx_MEM (SImode,
2111 plus_constant (stack_pointer_rtx, offs))));
2112 offs += 4;
2116 /* Give back anything else. */
2117 /* XXX: Should accumulate total and then give it back. */
2118 while (growth >= 0)
2119 output_stack_adjust ( 1, fi.growth[growth--]);
2122 /* This code is borrowed from the SH port. */
2124 /* The MCORE cannot load a large constant into a register, constants have to
2125 come from a pc relative load. The reference of a pc relative load
2126 instruction must be less than 1k in front of the instruction. This
2127 means that we often have to dump a constant inside a function, and
2128 generate code to branch around it.
2130 It is important to minimize this, since the branches will slow things
2131 down and make things bigger.
2133 Worst case code looks like:
2135 lrw L1,r0
2136 br L2
2137 align
2138 L1: .long value
2142 lrw L3,r0
2143 br L4
2144 align
2145 L3: .long value
2149 We fix this by performing a scan before scheduling, which notices which
2150 instructions need to have their operands fetched from the constant table
2151 and builds the table.
2153 The algorithm is:
2155 scan, find an instruction which needs a pcrel move. Look forward, find the
2156 last barrier which is within MAX_COUNT bytes of the requirement.
2157 If there isn't one, make one. Process all the instructions between
2158 the find and the barrier.
2160 In the above example, we can tell that L3 is within 1k of L1, so
2161 the first move can be shrunk from the 2 insn+constant sequence into
2162 just 1 insn, and the constant moved to L3 to make:
2164 lrw L1,r0
2166 lrw L3,r0
2167 bra L4
2168 align
2169 L3:.long value
2170 L4:.long value
2172 Then the second move becomes the target for the shortening process. */
2174 typedef struct
2176 rtx value; /* Value in table. */
2177 rtx label; /* Label of value. */
2178 } pool_node;
2180 /* The maximum number of constants that can fit into one pool, since
2181 the pc relative range is 0...1020 bytes and constants are at least 4
2182 bytes long. We subtract 4 from the range to allow for the case where
2183 we need to add a branch/align before the constant pool. */
2185 #define MAX_COUNT 1016
2186 #define MAX_POOL_SIZE (MAX_COUNT/4)
2187 static pool_node pool_vector[MAX_POOL_SIZE];
2188 static int pool_size;
2190 /* Dump out any constants accumulated in the final pass. These
2191 will only be labels. */
2193 const char *
2194 mcore_output_jump_label_table (void)
2196 int i;
2198 if (pool_size)
2200 fprintf (asm_out_file, "\t.align 2\n");
2202 for (i = 0; i < pool_size; i++)
2204 pool_node * p = pool_vector + i;
2206 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2208 output_asm_insn (".long %0", &p->value);
2211 pool_size = 0;
2214 return "";
2217 /* Check whether insn is a candidate for a conditional. */
2219 static cond_type
2220 is_cond_candidate (rtx insn)
2222 /* The only things we conditionalize are those that can be directly
2223 changed into a conditional. Only bother with SImode items. If
2224 we wanted to be a little more aggressive, we could also do other
2225 modes such as DImode with reg-reg move or load 0. */
2226 if (GET_CODE (insn) == INSN)
2228 rtx pat = PATTERN (insn);
2229 rtx src, dst;
2231 if (GET_CODE (pat) != SET)
2232 return COND_NO;
2234 dst = XEXP (pat, 0);
2236 if ((GET_CODE (dst) != REG &&
2237 GET_CODE (dst) != SUBREG) ||
2238 GET_MODE (dst) != SImode)
2239 return COND_NO;
2241 src = XEXP (pat, 1);
2243 if ((GET_CODE (src) == REG ||
2244 (GET_CODE (src) == SUBREG &&
2245 GET_CODE (SUBREG_REG (src)) == REG)) &&
2246 GET_MODE (src) == SImode)
2247 return COND_MOV_INSN;
2248 else if (GET_CODE (src) == CONST_INT &&
2249 INTVAL (src) == 0)
2250 return COND_CLR_INSN;
2251 else if (GET_CODE (src) == PLUS &&
2252 (GET_CODE (XEXP (src, 0)) == REG ||
2253 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2254 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2255 GET_MODE (XEXP (src, 0)) == SImode &&
2256 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2257 INTVAL (XEXP (src, 1)) == 1)
2258 return COND_INC_INSN;
2259 else if (((GET_CODE (src) == MINUS &&
2260 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2261 INTVAL( XEXP (src, 1)) == 1) ||
2262 (GET_CODE (src) == PLUS &&
2263 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2264 INTVAL (XEXP (src, 1)) == -1)) &&
2265 (GET_CODE (XEXP (src, 0)) == REG ||
2266 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2267 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2268 GET_MODE (XEXP (src, 0)) == SImode)
2269 return COND_DEC_INSN;
2271 /* Some insns that we don't bother with:
2272 (set (rx:DI) (ry:DI))
2273 (set (rx:DI) (const_int 0))
2277 else if (GET_CODE (insn) == JUMP_INSN &&
2278 GET_CODE (PATTERN (insn)) == SET &&
2279 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2280 return COND_BRANCH_INSN;
2282 return COND_NO;
2285 /* Emit a conditional version of insn and replace the old insn with the
2286 new one. Return the new insn if emitted. */
2288 static rtx
2289 emit_new_cond_insn (rtx insn, int cond)
2291 rtx c_insn = 0;
2292 rtx pat, dst, src;
2293 cond_type num;
2295 if ((num = is_cond_candidate (insn)) == COND_NO)
2296 return NULL;
2298 pat = PATTERN (insn);
2300 if (GET_CODE (insn) == INSN)
2302 dst = SET_DEST (pat);
2303 src = SET_SRC (pat);
2305 else
2307 dst = JUMP_LABEL (insn);
2308 src = NULL_RTX;
2311 switch (num)
2313 case COND_MOV_INSN:
2314 case COND_CLR_INSN:
2315 if (cond)
2316 c_insn = gen_movt0 (dst, src, dst);
2317 else
2318 c_insn = gen_movt0 (dst, dst, src);
2319 break;
2321 case COND_INC_INSN:
2322 if (cond)
2323 c_insn = gen_incscc (dst, dst);
2324 else
2325 c_insn = gen_incscc_false (dst, dst);
2326 break;
2328 case COND_DEC_INSN:
2329 if (cond)
2330 c_insn = gen_decscc (dst, dst);
2331 else
2332 c_insn = gen_decscc_false (dst, dst);
2333 break;
2335 case COND_BRANCH_INSN:
2336 if (cond)
2337 c_insn = gen_branch_true (dst);
2338 else
2339 c_insn = gen_branch_false (dst);
2340 break;
2342 default:
2343 return NULL;
2346 /* Only copy the notes if they exist. */
2347 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2349 /* We really don't need to bother with the notes and links at this
2350 point, but go ahead and save the notes. This will help is_dead()
2351 when applying peepholes (links don't matter since they are not
2352 used any more beyond this point for the mcore). */
2353 REG_NOTES (c_insn) = REG_NOTES (insn);
2356 if (num == COND_BRANCH_INSN)
2358 /* For jumps, we need to be a little bit careful and emit the new jump
2359 before the old one and to update the use count for the target label.
2360 This way, the barrier following the old (uncond) jump will get
2361 deleted, but the label won't. */
2362 c_insn = emit_jump_insn_before (c_insn, insn);
2364 ++ LABEL_NUSES (dst);
2366 JUMP_LABEL (c_insn) = dst;
2368 else
2369 c_insn = emit_insn_after (c_insn, insn);
2371 delete_insn (insn);
2373 return c_insn;
2376 /* Attempt to change a basic block into a series of conditional insns. This
2377 works by taking the branch at the end of the 1st block and scanning for the
2378 end of the 2nd block. If all instructions in the 2nd block have cond.
2379 versions and the label at the start of block 3 is the same as the target
2380 from the branch at block 1, then conditionalize all insn in block 2 using
2381 the inverse condition of the branch at block 1. (Note I'm bending the
2382 definition of basic block here.)
2384 e.g., change:
2386 bt L2 <-- end of block 1 (delete)
2387 mov r7,r8
2388 addu r7,1
2389 br L3 <-- end of block 2
2391 L2: ... <-- start of block 3 (NUSES==1)
2392 L3: ...
2396 movf r7,r8
2397 incf r7
2398 bf L3
2400 L3: ...
2402 we can delete the L2 label if NUSES==1 and re-apply the optimization
2403 starting at the last instruction of block 2. This may allow an entire
2404 if-then-else statement to be conditionalized. BRC */
2405 static rtx
2406 conditionalize_block (rtx first)
2408 rtx insn;
2409 rtx br_pat;
2410 rtx end_blk_1_br = 0;
2411 rtx end_blk_2_insn = 0;
2412 rtx start_blk_3_lab = 0;
2413 int cond;
2414 int br_lab_num;
2415 int blk_size = 0;
2418 /* Check that the first insn is a candidate conditional jump. This is
2419 the one that we'll eliminate. If not, advance to the next insn to
2420 try. */
2421 if (GET_CODE (first) != JUMP_INSN ||
2422 GET_CODE (PATTERN (first)) != SET ||
2423 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2424 return NEXT_INSN (first);
2426 /* Extract some information we need. */
2427 end_blk_1_br = first;
2428 br_pat = PATTERN (end_blk_1_br);
2430 /* Complement the condition since we use the reverse cond. for the insns. */
2431 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2433 /* Determine what kind of branch we have. */
2434 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2436 /* A normal branch, so extract label out of first arm. */
2437 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2439 else
2441 /* An inverse branch, so extract the label out of the 2nd arm
2442 and complement the condition. */
2443 cond = (cond == 0);
2444 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2447 /* Scan forward for the start of block 2: it must start with a
2448 label and that label must be the same as the branch target
2449 label from block 1. We don't care about whether block 2 actually
2450 ends with a branch or a label (an uncond. branch is
2451 conditionalizable). */
2452 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2454 enum rtx_code code;
2456 code = GET_CODE (insn);
2458 /* Look for the label at the start of block 3. */
2459 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2460 break;
2462 /* Skip barriers, notes, and conditionalizable insns. If the
2463 insn is not conditionalizable or makes this optimization fail,
2464 just return the next insn so we can start over from that point. */
2465 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2466 return NEXT_INSN (insn);
2468 /* Remember the last real insn before the label (i.e. end of block 2). */
2469 if (code == JUMP_INSN || code == INSN)
2471 blk_size ++;
2472 end_blk_2_insn = insn;
2476 if (!insn)
2477 return insn;
2479 /* It is possible for this optimization to slow performance if the blocks
2480 are long. This really depends upon whether the branch is likely taken
2481 or not. If the branch is taken, we slow performance in many cases. But,
2482 if the branch is not taken, we always help performance (for a single
2483 block, but for a double block (i.e. when the optimization is re-applied)
2484 this is not true since the 'right thing' depends on the overall length of
2485 the collapsed block). As a compromise, don't apply this optimization on
2486 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2487 the best threshold depends on the latencies of the instructions (i.e.,
2488 the branch penalty). */
2489 if (optimize > 1 && blk_size > 2)
2490 return insn;
2492 /* At this point, we've found the start of block 3 and we know that
2493 it is the destination of the branch from block 1. Also, all
2494 instructions in the block 2 are conditionalizable. So, apply the
2495 conditionalization and delete the branch. */
2496 start_blk_3_lab = insn;
2498 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2499 insn = NEXT_INSN (insn))
2501 rtx newinsn;
2503 if (INSN_DELETED_P (insn))
2504 continue;
2506 /* Try to form a conditional variant of the instruction and emit it. */
2507 if ((newinsn = emit_new_cond_insn (insn, cond)))
2509 if (end_blk_2_insn == insn)
2510 end_blk_2_insn = newinsn;
2512 insn = newinsn;
2516 /* Note whether we will delete the label starting blk 3 when the jump
2517 gets deleted. If so, we want to re-apply this optimization at the
2518 last real instruction right before the label. */
2519 if (LABEL_NUSES (start_blk_3_lab) == 1)
2521 start_blk_3_lab = 0;
2524 /* ??? we probably should redistribute the death notes for this insn, esp.
2525 the death of cc, but it doesn't really matter this late in the game.
2526 The peepholes all use is_dead() which will find the correct death
2527 regardless of whether there is a note. */
2528 delete_insn (end_blk_1_br);
2530 if (! start_blk_3_lab)
2531 return end_blk_2_insn;
2533 /* Return the insn right after the label at the start of block 3. */
2534 return NEXT_INSN (start_blk_3_lab);
2537 /* Apply the conditionalization of blocks optimization. This is the
2538 outer loop that traverses through the insns scanning for a branch
2539 that signifies an opportunity to apply the optimization. Note that
2540 this optimization is applied late. If we could apply it earlier,
2541 say before cse 2, it may expose more optimization opportunities.
2542 but, the pay back probably isn't really worth the effort (we'd have
2543 to update all reg/flow/notes/links/etc to make it work - and stick it
2544 in before cse 2). */
2546 static void
2547 conditionalize_optimization (void)
2549 rtx insn;
2551 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2552 continue;
2555 static int saved_warn_return_type = -1;
2556 static int saved_warn_return_type_count = 0;
2558 /* This is to handle loads from the constant pool. */
2560 static void
2561 mcore_reorg (void)
2563 /* Reset this variable. */
2564 current_function_anonymous_args = 0;
2566 /* Restore the warn_return_type if it has been altered. */
2567 if (saved_warn_return_type != -1)
2569 /* Only restore the value if we have reached another function.
2570 The test of warn_return_type occurs in final_function () in
2571 c-decl.c a long time after the code for the function is generated,
2572 so we need a counter to tell us when we have finished parsing that
2573 function and can restore the flag. */
2574 if (--saved_warn_return_type_count == 0)
2576 warn_return_type = saved_warn_return_type;
2577 saved_warn_return_type = -1;
2581 if (optimize == 0)
2582 return;
2584 /* Conditionalize blocks where we can. */
2585 conditionalize_optimization ();
2587 /* Literal pool generation is now pushed off until the assembler. */
2591 /* Return true if X is something that can be moved directly into r15. */
2593 bool
2594 mcore_r15_operand_p (rtx x)
2596 switch (GET_CODE (x))
2598 case CONST_INT:
2599 return mcore_const_ok_for_inline (INTVAL (x));
2601 case REG:
2602 case SUBREG:
2603 case MEM:
2604 return 1;
2606 default:
2607 return 0;
2611 /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
2612 directly move X into it, use r1-r14 as a temporary. */
2614 enum reg_class
2615 mcore_secondary_reload_class (enum reg_class rclass,
2616 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2618 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2619 && !mcore_r15_operand_p (x))
2620 return LRW_REGS;
2621 return NO_REGS;
2624 /* Return the reg_class to use when reloading the rtx X into the class
2625 RCLASS. If X is too complex to move directly into r15, prefer to
2626 use LRW_REGS instead. */
2628 enum reg_class
2629 mcore_reload_class (rtx x, enum reg_class rclass)
2631 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2632 return LRW_REGS;
2634 return rclass;
2637 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2638 register. Note that the current version doesn't worry about whether
2639 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2640 in r2 matches an SImode in r2. Might think in the future about whether
2641 we want to be able to say something about modes. */
2644 mcore_is_same_reg (rtx x, rtx y)
2646 /* Strip any and all of the subreg wrappers. */
2647 while (GET_CODE (x) == SUBREG)
2648 x = SUBREG_REG (x);
2650 while (GET_CODE (y) == SUBREG)
2651 y = SUBREG_REG (y);
2653 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2654 return 1;
2656 return 0;
2659 void
2660 mcore_override_options (void)
2662 /* Only the m340 supports little endian code. */
2663 if (TARGET_LITTLE_END && ! TARGET_M340)
2664 target_flags |= MASK_M340;
2667 /* Compute the number of word sized registers needed to
2668 hold a function argument of mode MODE and type TYPE. */
2671 mcore_num_arg_regs (enum machine_mode mode, const_tree type)
2673 int size;
2675 if (targetm.calls.must_pass_in_stack (mode, type))
2676 return 0;
2678 if (type && mode == BLKmode)
2679 size = int_size_in_bytes (type);
2680 else
2681 size = GET_MODE_SIZE (mode);
2683 return ROUND_ADVANCE (size);
2686 static rtx
2687 handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
2689 int size;
2691 /* The MCore ABI defines that a structure whose size is not a whole multiple
2692 of bytes is passed packed into registers (or spilled onto the stack if
2693 not enough registers are available) with the last few bytes of the
2694 structure being packed, left-justified, into the last register/stack slot.
2695 GCC handles this correctly if the last word is in a stack slot, but we
2696 have to generate a special, PARALLEL RTX if the last word is in an
2697 argument register. */
2698 if (type
2699 && TYPE_MODE (type) == BLKmode
2700 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2701 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2702 && (size % UNITS_PER_WORD != 0)
2703 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2705 rtx arg_regs [NPARM_REGS];
2706 int nregs;
2707 rtx result;
2708 rtvec rtvec;
2710 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2712 arg_regs [nregs] =
2713 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2714 GEN_INT (nregs * UNITS_PER_WORD));
2715 nregs ++;
2718 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2719 assert (ARRAY_SIZE (arg_regs) == 6);
2720 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2721 arg_regs[3], arg_regs[4], arg_regs[5]);
2723 result = gen_rtx_PARALLEL (mode, rtvec);
2724 return result;
2727 return gen_rtx_REG (mode, reg);
2731 mcore_function_value (const_tree valtype, const_tree func)
2733 enum machine_mode mode;
2734 int unsigned_p;
2736 mode = TYPE_MODE (valtype);
2738 /* Since we promote return types, we must promote the mode here too. */
2739 mode = promote_function_mode (valtype, mode, &unsignedp, func, 1);
2741 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2744 /* Define where to put the arguments to a function.
2745 Value is zero to push the argument on the stack,
2746 or a hard register in which to store the argument.
2748 MODE is the argument's machine mode.
2749 TYPE is the data type of the argument (as a tree).
2750 This is null for libcalls where that information may
2751 not be available.
2752 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2753 the preceding args and about the function being called.
2754 NAMED is nonzero if this argument is a named parameter
2755 (otherwise it is an extra parameter matching an ellipsis).
2757 On MCore the first args are normally in registers
2758 and the rest are pushed. Any arg that starts within the first
2759 NPARM_REGS words is at least partially passed in a register unless
2760 its data type forbids. */
2763 mcore_function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode,
2764 tree type, int named)
2766 int arg_reg;
2768 if (! named || mode == VOIDmode)
2769 return 0;
2771 if (targetm.calls.must_pass_in_stack (mode, type))
2772 return 0;
2774 arg_reg = ROUND_REG (cum, mode);
2776 if (arg_reg < NPARM_REGS)
2777 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2779 return 0;
2782 /* Returns the number of bytes of argument registers required to hold *part*
2783 of a parameter of machine mode MODE and type TYPE (which may be NULL if
2784 the type is not known). If the argument fits entirely in the argument
2785 registers, or entirely on the stack, then 0 is returned. CUM is the
2786 number of argument registers already used by earlier parameters to
2787 the function. */
2789 static int
2790 mcore_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2791 tree type, bool named)
2793 int reg = ROUND_REG (*cum, mode);
2795 if (named == 0)
2796 return 0;
2798 if (targetm.calls.must_pass_in_stack (mode, type))
2799 return 0;
2801 /* REG is not the *hardware* register number of the register that holds
2802 the argument, it is the *argument* register number. So for example,
2803 the first argument to a function goes in argument register 0, which
2804 translates (for the MCore) into hardware register 2. The second
2805 argument goes into argument register 1, which translates into hardware
2806 register 3, and so on. NPARM_REGS is the number of argument registers
2807 supported by the target, not the maximum hardware register number of
2808 the target. */
2809 if (reg >= NPARM_REGS)
2810 return 0;
2812 /* If the argument fits entirely in registers, return 0. */
2813 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2814 return 0;
2816 /* The argument overflows the number of available argument registers.
2817 Compute how many argument registers have not yet been assigned to
2818 hold an argument. */
2819 reg = NPARM_REGS - reg;
2821 /* Return partially in registers and partially on the stack. */
2822 return reg * UNITS_PER_WORD;
2825 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2828 mcore_dllexport_name_p (const char * symbol)
2830 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2833 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2836 mcore_dllimport_name_p (const char * symbol)
2838 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2841 /* Mark a DECL as being dllexport'd. */
2843 static void
2844 mcore_mark_dllexport (tree decl)
2846 const char * oldname;
2847 char * newname;
2848 rtx rtlname;
2849 tree idp;
2851 rtlname = XEXP (DECL_RTL (decl), 0);
2853 if (GET_CODE (rtlname) == MEM)
2854 rtlname = XEXP (rtlname, 0);
2855 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2856 oldname = XSTR (rtlname, 0);
2858 if (mcore_dllexport_name_p (oldname))
2859 return; /* Already done. */
2861 newname = XALLOCAVEC (char, strlen (oldname) + 4);
2862 sprintf (newname, "@e.%s", oldname);
2864 /* We pass newname through get_identifier to ensure it has a unique
2865 address. RTL processing can sometimes peek inside the symbol ref
2866 and compare the string's addresses to see if two symbols are
2867 identical. */
2868 /* ??? At least I think that's why we do this. */
2869 idp = get_identifier (newname);
2871 XEXP (DECL_RTL (decl), 0) =
2872 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2875 /* Mark a DECL as being dllimport'd. */
2877 static void
2878 mcore_mark_dllimport (tree decl)
2880 const char * oldname;
2881 char * newname;
2882 tree idp;
2883 rtx rtlname;
2884 rtx newrtl;
2886 rtlname = XEXP (DECL_RTL (decl), 0);
2888 if (GET_CODE (rtlname) == MEM)
2889 rtlname = XEXP (rtlname, 0);
2890 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2891 oldname = XSTR (rtlname, 0);
2893 gcc_assert (!mcore_dllexport_name_p (oldname));
2894 if (mcore_dllimport_name_p (oldname))
2895 return; /* Already done. */
2897 /* ??? One can well ask why we're making these checks here,
2898 and that would be a good question. */
2900 /* Imported variables can't be initialized. */
2901 if (TREE_CODE (decl) == VAR_DECL
2902 && !DECL_VIRTUAL_P (decl)
2903 && DECL_INITIAL (decl))
2905 error ("initialized variable %q+D is marked dllimport", decl);
2906 return;
2909 /* `extern' needn't be specified with dllimport.
2910 Specify `extern' now and hope for the best. Sigh. */
2911 if (TREE_CODE (decl) == VAR_DECL
2912 /* ??? Is this test for vtables needed? */
2913 && !DECL_VIRTUAL_P (decl))
2915 DECL_EXTERNAL (decl) = 1;
2916 TREE_PUBLIC (decl) = 1;
2919 newname = XALLOCAVEC (char, strlen (oldname) + 11);
2920 sprintf (newname, "@i.__imp_%s", oldname);
2922 /* We pass newname through get_identifier to ensure it has a unique
2923 address. RTL processing can sometimes peek inside the symbol ref
2924 and compare the string's addresses to see if two symbols are
2925 identical. */
2926 /* ??? At least I think that's why we do this. */
2927 idp = get_identifier (newname);
2929 newrtl = gen_rtx_MEM (Pmode,
2930 gen_rtx_SYMBOL_REF (Pmode,
2931 IDENTIFIER_POINTER (idp)));
2932 XEXP (DECL_RTL (decl), 0) = newrtl;
2935 static int
2936 mcore_dllexport_p (tree decl)
2938 if ( TREE_CODE (decl) != VAR_DECL
2939 && TREE_CODE (decl) != FUNCTION_DECL)
2940 return 0;
2942 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2945 static int
2946 mcore_dllimport_p (tree decl)
2948 if ( TREE_CODE (decl) != VAR_DECL
2949 && TREE_CODE (decl) != FUNCTION_DECL)
2950 return 0;
2952 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
2955 /* We must mark dll symbols specially. Definitions of dllexport'd objects
2956 install some info in the .drective (PE) or .exports (ELF) sections. */
2958 static void
2959 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
2961 /* Mark the decl so we can tell from the rtl whether the object is
2962 dllexport'd or dllimport'd. */
2963 if (mcore_dllexport_p (decl))
2964 mcore_mark_dllexport (decl);
2965 else if (mcore_dllimport_p (decl))
2966 mcore_mark_dllimport (decl);
2968 /* It might be that DECL has already been marked as dllimport, but
2969 a subsequent definition nullified that. The attribute is gone
2970 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
2971 else if ((TREE_CODE (decl) == FUNCTION_DECL
2972 || TREE_CODE (decl) == VAR_DECL)
2973 && DECL_RTL (decl) != NULL_RTX
2974 && GET_CODE (DECL_RTL (decl)) == MEM
2975 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
2976 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
2977 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
2979 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
2980 tree idp = get_identifier (oldname + 9);
2981 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2983 XEXP (DECL_RTL (decl), 0) = newrtl;
2985 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
2986 ??? We leave these alone for now. */
2990 /* Undo the effects of the above. */
2992 static const char *
2993 mcore_strip_name_encoding (const char * str)
2995 return str + (str[0] == '@' ? 3 : 0);
2998 /* MCore specific attribute support.
2999 dllexport - for exporting a function/variable that will live in a dll
3000 dllimport - for importing a function/variable from a dll
3001 naked - do not create a function prologue/epilogue. */
3003 /* Handle a "naked" attribute; arguments as in
3004 struct attribute_spec.handler. */
3006 static tree
3007 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3008 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3010 if (TREE_CODE (*node) == FUNCTION_DECL)
3012 /* PR14310 - don't complain about lack of return statement
3013 in naked functions. The solution here is a gross hack
3014 but this is the only way to solve the problem without
3015 adding a new feature to GCC. I did try submitting a patch
3016 that would add such a new feature, but it was (rightfully)
3017 rejected on the grounds that it was creeping featurism,
3018 so hence this code. */
3019 if (warn_return_type)
3021 saved_warn_return_type = warn_return_type;
3022 warn_return_type = 0;
3023 saved_warn_return_type_count = 2;
3025 else if (saved_warn_return_type_count)
3026 saved_warn_return_type_count = 2;
3028 else
3030 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3031 name);
3032 *no_add_attrs = true;
3035 return NULL_TREE;
3038 /* ??? It looks like this is PE specific? Oh well, this is what the
3039 old code did as well. */
3041 static void
3042 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3044 int len;
3045 const char * name;
3046 char * string;
3047 const char * prefix;
3049 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3051 /* Strip off any encoding in name. */
3052 name = (* targetm.strip_name_encoding) (name);
3054 /* The object is put in, for example, section .text$foo.
3055 The linker will then ultimately place them in .text
3056 (everything from the $ on is stripped). */
3057 if (TREE_CODE (decl) == FUNCTION_DECL)
3058 prefix = ".text$";
3059 /* For compatibility with EPOC, we ignore the fact that the
3060 section might have relocs against it. */
3061 else if (decl_readonly_section (decl, 0))
3062 prefix = ".rdata$";
3063 else
3064 prefix = ".data$";
3066 len = strlen (name) + strlen (prefix);
3067 string = XALLOCAVEC (char, len + 1);
3069 sprintf (string, "%s%s", prefix, name);
3071 DECL_SECTION_NAME (decl) = build_string (len, string);
3075 mcore_naked_function_p (void)
3077 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3080 #ifdef OBJECT_FORMAT_ELF
3081 static void
3082 mcore_asm_named_section (const char *name,
3083 unsigned int flags ATTRIBUTE_UNUSED,
3084 tree decl ATTRIBUTE_UNUSED)
3086 fprintf (asm_out_file, "\t.section %s\n", name);
3088 #endif /* OBJECT_FORMAT_ELF */
3090 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3092 static void
3093 mcore_external_libcall (rtx fun)
3095 fprintf (asm_out_file, "\t.import\t");
3096 assemble_name (asm_out_file, XSTR (fun, 0));
3097 fprintf (asm_out_file, "\n");
3100 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3102 static bool
3103 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3105 const HOST_WIDE_INT size = int_size_in_bytes (type);
3106 return (size == -1 || size > 2 * UNITS_PER_WORD);