* config/arc/arc.c: Include "df.h".
[official-gcc.git] / gcc / config / mcore / mcore.c
blob3b0adef1074a0daba4a4820da0a96e4b2fa59dc2
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2009 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "assert.h"
29 #include "mcore.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "obstack.h"
39 #include "expr.h"
40 #include "reload.h"
41 #include "recog.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "toplev.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "df.h"
49 /* Maximum size we are allowed to grow the stack in a single operation.
50 If we want more, we must do it in increments of at most this size.
51 If this value is 0, we don't check at all. */
52 int mcore_stack_increment = STACK_UNITS_MAXSTEP;
54 /* For dumping information about frame sizes. */
55 char * mcore_current_function_name = 0;
56 long mcore_current_compilation_timestamp = 0;
58 /* Global variables for machine-dependent things. */
60 /* Provides the class number of the smallest class containing
61 reg number. */
62 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
64 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
65 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
66 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
67 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
68 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
71 /* Provide reg_class from a letter such as appears in the machine
72 description. */
73 const enum reg_class reg_class_from_letter[] =
75 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
76 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
77 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
78 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
79 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
80 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
81 /* y */ NO_REGS, /* z */ NO_REGS
84 struct mcore_frame
86 int arg_size; /* Stdarg spills (bytes). */
87 int reg_size; /* Non-volatile reg saves (bytes). */
88 int reg_mask; /* Non-volatile reg saves. */
89 int local_size; /* Locals. */
90 int outbound_size; /* Arg overflow on calls out. */
91 int pad_outbound;
92 int pad_local;
93 int pad_reg;
94 /* Describe the steps we'll use to grow it. */
95 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
96 int growth[MAX_STACK_GROWS];
97 int arg_offset;
98 int reg_offset;
99 int reg_growth;
100 int local_growth;
103 typedef enum
105 COND_NO,
106 COND_MOV_INSN,
107 COND_CLR_INSN,
108 COND_INC_INSN,
109 COND_DEC_INSN,
110 COND_BRANCH_INSN
112 cond_type;
114 static void output_stack_adjust (int, int);
115 static int calc_live_regs (int *);
116 static int try_constant_tricks (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
117 static const char * output_inline_const (enum machine_mode, rtx *);
118 static void layout_mcore_frame (struct mcore_frame *);
119 static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
120 static cond_type is_cond_candidate (rtx);
121 static rtx emit_new_cond_insn (rtx, int);
122 static rtx conditionalize_block (rtx);
123 static void conditionalize_optimization (void);
124 static void mcore_reorg (void);
125 static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
126 static void mcore_mark_dllexport (tree);
127 static void mcore_mark_dllimport (tree);
128 static int mcore_dllexport_p (tree);
129 static int mcore_dllimport_p (tree);
130 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
131 #ifdef OBJECT_FORMAT_ELF
132 static void mcore_asm_named_section (const char *,
133 unsigned int, tree);
134 #endif
135 static void mcore_unique_section (tree, int);
136 static void mcore_encode_section_info (tree, rtx, int);
137 static const char *mcore_strip_name_encoding (const char *);
138 static int mcore_const_costs (rtx, RTX_CODE);
139 static int mcore_and_cost (rtx);
140 static int mcore_ior_cost (rtx);
141 static bool mcore_rtx_costs (rtx, int, int, int *, bool);
142 static void mcore_external_libcall (rtx);
143 static bool mcore_return_in_memory (const_tree, const_tree);
144 static int mcore_arg_partial_bytes (CUMULATIVE_ARGS *,
145 enum machine_mode,
146 tree, bool);
149 /* MCore specific attributes. */
151 static const struct attribute_spec mcore_attribute_table[] =
153 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
154 { "dllexport", 0, 0, true, false, false, NULL },
155 { "dllimport", 0, 0, true, false, false, NULL },
156 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute },
157 { NULL, 0, 0, false, false, false, NULL }
160 /* Initialize the GCC target structure. */
161 #undef TARGET_ASM_EXTERNAL_LIBCALL
162 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
164 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
165 #undef TARGET_MERGE_DECL_ATTRIBUTES
166 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
167 #endif
169 #ifdef OBJECT_FORMAT_ELF
170 #undef TARGET_ASM_UNALIGNED_HI_OP
171 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
172 #undef TARGET_ASM_UNALIGNED_SI_OP
173 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
174 #endif
176 #undef TARGET_ATTRIBUTE_TABLE
177 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
178 #undef TARGET_ASM_UNIQUE_SECTION
179 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
180 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
181 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
182 #undef TARGET_DEFAULT_TARGET_FLAGS
183 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
184 #undef TARGET_ENCODE_SECTION_INFO
185 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
186 #undef TARGET_STRIP_NAME_ENCODING
187 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
188 #undef TARGET_RTX_COSTS
189 #define TARGET_RTX_COSTS mcore_rtx_costs
190 #undef TARGET_ADDRESS_COST
191 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
192 #undef TARGET_MACHINE_DEPENDENT_REORG
193 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
195 #undef TARGET_PROMOTE_FUNCTION_ARGS
196 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
197 #undef TARGET_PROMOTE_FUNCTION_RETURN
198 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
199 #undef TARGET_PROMOTE_PROTOTYPES
200 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
202 #undef TARGET_RETURN_IN_MEMORY
203 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
204 #undef TARGET_MUST_PASS_IN_STACK
205 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
206 #undef TARGET_PASS_BY_REFERENCE
207 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
208 #undef TARGET_ARG_PARTIAL_BYTES
209 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
211 #undef TARGET_SETUP_INCOMING_VARARGS
212 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
214 struct gcc_target targetm = TARGET_INITIALIZER;
216 /* Adjust the stack and return the number of bytes taken to do it. */
217 static void
218 output_stack_adjust (int direction, int size)
220 /* If extending stack a lot, we do it incrementally. */
221 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
223 rtx tmp = gen_rtx_REG (SImode, 1);
224 rtx memref;
226 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
229 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
230 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
231 MEM_VOLATILE_P (memref) = 1;
232 emit_insn (gen_movsi (memref, stack_pointer_rtx));
233 size -= mcore_stack_increment;
235 while (size > mcore_stack_increment);
237 /* SIZE is now the residual for the last adjustment,
238 which doesn't require a probe. */
241 if (size)
243 rtx insn;
244 rtx val = GEN_INT (size);
246 if (size > 32)
248 rtx nval = gen_rtx_REG (SImode, 1);
249 emit_insn (gen_movsi (nval, val));
250 val = nval;
253 if (direction > 0)
254 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
255 else
256 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
258 emit_insn (insn);
262 /* Work out the registers which need to be saved,
263 both as a mask and a count. */
265 static int
266 calc_live_regs (int * count)
268 int reg;
269 int live_regs_mask = 0;
271 * count = 0;
273 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
275 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
277 (*count)++;
278 live_regs_mask |= (1 << reg);
282 return live_regs_mask;
285 /* Print the operand address in x to the stream. */
287 void
288 mcore_print_operand_address (FILE * stream, rtx x)
290 switch (GET_CODE (x))
292 case REG:
293 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
294 break;
296 case PLUS:
298 rtx base = XEXP (x, 0);
299 rtx index = XEXP (x, 1);
301 if (GET_CODE (base) != REG)
303 /* Ensure that BASE is a register (one of them must be). */
304 rtx temp = base;
305 base = index;
306 index = temp;
309 switch (GET_CODE (index))
311 case CONST_INT:
312 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
313 reg_names[REGNO(base)], INTVAL (index));
314 break;
316 default:
317 gcc_unreachable ();
321 break;
323 default:
324 output_addr_const (stream, x);
325 break;
329 /* Print operand x (an rtx) in assembler syntax to file stream
330 according to modifier code.
332 'R' print the next register or memory location along, i.e. the lsw in
333 a double word value
334 'O' print a constant without the #
335 'M' print a constant as its negative
336 'P' print log2 of a power of two
337 'Q' print log2 of an inverse of a power of two
338 'U' print register for ldm/stm instruction
339 'X' print byte number for xtrbN instruction. */
341 void
342 mcore_print_operand (FILE * stream, rtx x, int code)
344 switch (code)
346 case 'N':
347 if (INTVAL(x) == -1)
348 fprintf (asm_out_file, "32");
349 else
350 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
351 break;
352 case 'P':
353 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
354 break;
355 case 'Q':
356 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
357 break;
358 case 'O':
359 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
360 break;
361 case 'M':
362 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
363 break;
364 case 'R':
365 /* Next location along in memory or register. */
366 switch (GET_CODE (x))
368 case REG:
369 fputs (reg_names[REGNO (x) + 1], (stream));
370 break;
371 case MEM:
372 mcore_print_operand_address
373 (stream, XEXP (adjust_address (x, SImode, 4), 0));
374 break;
375 default:
376 gcc_unreachable ();
378 break;
379 case 'U':
380 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
381 reg_names[REGNO (x) + 3]);
382 break;
383 case 'x':
384 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
385 break;
386 case 'X':
387 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
388 break;
390 default:
391 switch (GET_CODE (x))
393 case REG:
394 fputs (reg_names[REGNO (x)], (stream));
395 break;
396 case MEM:
397 output_address (XEXP (x, 0));
398 break;
399 default:
400 output_addr_const (stream, x);
401 break;
403 break;
407 /* What does a constant cost ? */
409 static int
410 mcore_const_costs (rtx exp, enum rtx_code code)
412 HOST_WIDE_INT val = INTVAL (exp);
414 /* Easy constants. */
415 if ( CONST_OK_FOR_I (val)
416 || CONST_OK_FOR_M (val)
417 || CONST_OK_FOR_N (val)
418 || (code == PLUS && CONST_OK_FOR_L (val)))
419 return 1;
420 else if (code == AND
421 && ( CONST_OK_FOR_M (~val)
422 || CONST_OK_FOR_N (~val)))
423 return 2;
424 else if (code == PLUS
425 && ( CONST_OK_FOR_I (-val)
426 || CONST_OK_FOR_M (-val)
427 || CONST_OK_FOR_N (-val)))
428 return 2;
430 return 5;
433 /* What does an and instruction cost - we do this b/c immediates may
434 have been relaxed. We want to ensure that cse will cse relaxed immeds
435 out. Otherwise we'll get bad code (multiple reloads of the same const). */
437 static int
438 mcore_and_cost (rtx x)
440 HOST_WIDE_INT val;
442 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
443 return 2;
445 val = INTVAL (XEXP (x, 1));
447 /* Do it directly. */
448 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
449 return 2;
450 /* Takes one instruction to load. */
451 else if (const_ok_for_mcore (val))
452 return 3;
453 /* Takes two instructions to load. */
454 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
455 return 4;
457 /* Takes a lrw to load. */
458 return 5;
461 /* What does an or cost - see and_cost(). */
463 static int
464 mcore_ior_cost (rtx x)
466 HOST_WIDE_INT val;
468 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
469 return 2;
471 val = INTVAL (XEXP (x, 1));
473 /* Do it directly with bclri. */
474 if (CONST_OK_FOR_M (val))
475 return 2;
476 /* Takes one instruction to load. */
477 else if (const_ok_for_mcore (val))
478 return 3;
479 /* Takes two instructions to load. */
480 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
481 return 4;
483 /* Takes a lrw to load. */
484 return 5;
487 static bool
488 mcore_rtx_costs (rtx x, int code, int outer_code, int * total,
489 bool speed ATTRIBUTE_UNUSED)
491 switch (code)
493 case CONST_INT:
494 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
495 return true;
496 case CONST:
497 case LABEL_REF:
498 case SYMBOL_REF:
499 *total = 5;
500 return true;
501 case CONST_DOUBLE:
502 *total = 10;
503 return true;
505 case AND:
506 *total = COSTS_N_INSNS (mcore_and_cost (x));
507 return true;
509 case IOR:
510 *total = COSTS_N_INSNS (mcore_ior_cost (x));
511 return true;
513 case DIV:
514 case UDIV:
515 case MOD:
516 case UMOD:
517 case FLOAT:
518 case FIX:
519 *total = COSTS_N_INSNS (100);
520 return true;
522 default:
523 return false;
527 /* Prepare the operands for a comparison. Return whether the branch/setcc
528 should reverse the operands. */
530 bool
531 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
533 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
534 bool invert;
536 if (GET_CODE (op1) == CONST_INT)
538 HOST_WIDE_INT val = INTVAL (op1);
540 switch (code)
542 case GTU:
543 /* Unsigned > 0 is the same as != 0; everything else is converted
544 below to LEU (reversed cmphs). */
545 if (val == 0)
546 code = NE;
547 break;
549 /* Check whether (LE A imm) can become (LT A imm + 1),
550 or (GT A imm) can become (GE A imm + 1). */
551 case GT:
552 case LE:
553 if (CONST_OK_FOR_J (val + 1))
555 op1 = GEN_INT (val + 1);
556 code = code == LE ? LT : GE;
558 break;
560 default:
561 break;
565 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
566 op1 = force_reg (SImode, op1);
568 /* cmpnei: 0-31 (K immediate)
569 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
570 invert = false;
571 switch (code)
573 case EQ: /* Use inverted condition, cmpne. */
574 code = NE;
575 invert = true;
576 /* Drop through. */
578 case NE: /* Use normal condition, cmpne. */
579 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
580 op1 = force_reg (SImode, op1);
581 break;
583 case LE: /* Use inverted condition, reversed cmplt. */
584 code = GT;
585 invert = true;
586 /* Drop through. */
588 case GT: /* Use normal condition, reversed cmplt. */
589 if (GET_CODE (op1) == CONST_INT)
590 op1 = force_reg (SImode, op1);
591 break;
593 case GE: /* Use inverted condition, cmplt. */
594 code = LT;
595 invert = true;
596 /* Drop through. */
598 case LT: /* Use normal condition, cmplt. */
599 if (GET_CODE (op1) == CONST_INT &&
600 /* covered by btsti x,31. */
601 INTVAL (op1) != 0 &&
602 ! CONST_OK_FOR_J (INTVAL (op1)))
603 op1 = force_reg (SImode, op1);
604 break;
606 case GTU: /* Use inverted condition, cmple. */
607 /* We coped with unsigned > 0 above. */
608 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
609 code = LEU;
610 invert = true;
611 /* Drop through. */
613 case LEU: /* Use normal condition, reversed cmphs. */
614 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
615 op1 = force_reg (SImode, op1);
616 break;
618 case LTU: /* Use inverted condition, cmphs. */
619 code = GEU;
620 invert = true;
621 /* Drop through. */
623 case GEU: /* Use normal condition, cmphs. */
624 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
625 op1 = force_reg (SImode, op1);
626 break;
628 default:
629 break;
632 emit_insn (gen_rtx_SET (VOIDmode,
633 cc_reg,
634 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
635 return invert;
639 mcore_symbolic_address_p (rtx x)
641 switch (GET_CODE (x))
643 case SYMBOL_REF:
644 case LABEL_REF:
645 return 1;
646 case CONST:
647 x = XEXP (x, 0);
648 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
649 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
650 && GET_CODE (XEXP (x, 1)) == CONST_INT);
651 default:
652 return 0;
656 /* Functions to output assembly code for a function call. */
658 char *
659 mcore_output_call (rtx operands[], int index)
661 static char buffer[20];
662 rtx addr = operands [index];
664 if (REG_P (addr))
666 if (TARGET_CG_DATA)
668 gcc_assert (mcore_current_function_name);
670 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
671 "unknown", 1);
674 sprintf (buffer, "jsr\t%%%d", index);
676 else
678 if (TARGET_CG_DATA)
680 gcc_assert (mcore_current_function_name);
681 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
683 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
684 XSTR (addr, 0), 0);
687 sprintf (buffer, "jbsr\t%%%d", index);
690 return buffer;
693 /* Can we load a constant with a single instruction ? */
696 const_ok_for_mcore (HOST_WIDE_INT value)
698 if (value >= 0 && value <= 127)
699 return 1;
701 /* Try exact power of two. */
702 if (CONST_OK_FOR_M (value))
703 return 1;
705 /* Try exact power of two - 1. */
706 if (CONST_OK_FOR_N (value) && value != -1)
707 return 1;
709 return 0;
712 /* Can we load a constant inline with up to 2 instructions ? */
715 mcore_const_ok_for_inline (HOST_WIDE_INT value)
717 HOST_WIDE_INT x, y;
719 return try_constant_tricks (value, & x, & y) > 0;
722 /* Are we loading the constant using a not ? */
725 mcore_const_trick_uses_not (HOST_WIDE_INT value)
727 HOST_WIDE_INT x, y;
729 return try_constant_tricks (value, & x, & y) == 2;
732 /* Try tricks to load a constant inline and return the trick number if
733 success (0 is non-inlinable).
735 0: not inlinable
736 1: single instruction (do the usual thing)
737 2: single insn followed by a 'not'
738 3: single insn followed by a subi
739 4: single insn followed by an addi
740 5: single insn followed by rsubi
741 6: single insn followed by bseti
742 7: single insn followed by bclri
743 8: single insn followed by rotli
744 9: single insn followed by lsli
745 10: single insn followed by ixh
746 11: single insn followed by ixw. */
748 static int
749 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
751 HOST_WIDE_INT i;
752 unsigned HOST_WIDE_INT bit, shf, rot;
754 if (const_ok_for_mcore (value))
755 return 1; /* Do the usual thing. */
757 if (! TARGET_HARDLIT)
758 return 0;
760 if (const_ok_for_mcore (~value))
762 *x = ~value;
763 return 2;
766 for (i = 1; i <= 32; i++)
768 if (const_ok_for_mcore (value - i))
770 *x = value - i;
771 *y = i;
773 return 3;
776 if (const_ok_for_mcore (value + i))
778 *x = value + i;
779 *y = i;
781 return 4;
785 bit = 0x80000000ULL;
787 for (i = 0; i <= 31; i++)
789 if (const_ok_for_mcore (i - value))
791 *x = i - value;
792 *y = i;
794 return 5;
797 if (const_ok_for_mcore (value & ~bit))
799 *y = bit;
800 *x = value & ~bit;
801 return 6;
804 if (const_ok_for_mcore (value | bit))
806 *y = ~bit;
807 *x = value | bit;
809 return 7;
812 bit >>= 1;
815 shf = value;
816 rot = value;
818 for (i = 1; i < 31; i++)
820 int c;
822 /* MCore has rotate left. */
823 c = rot << 31;
824 rot >>= 1;
825 rot &= 0x7FFFFFFF;
826 rot |= c; /* Simulate rotate. */
828 if (const_ok_for_mcore (rot))
830 *y = i;
831 *x = rot;
833 return 8;
836 if (shf & 1)
837 shf = 0; /* Can't use logical shift, low order bit is one. */
839 shf >>= 1;
841 if (shf != 0 && const_ok_for_mcore (shf))
843 *y = i;
844 *x = shf;
846 return 9;
850 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
852 *x = value / 3;
854 return 10;
857 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
859 *x = value / 5;
861 return 11;
864 return 0;
867 /* Check whether reg is dead at first. This is done by searching ahead
868 for either the next use (i.e., reg is live), a death note, or a set of
869 reg. Don't just use dead_or_set_p() since reload does not always mark
870 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
871 can ignore subregs by extracting the actual register. BRC */
874 mcore_is_dead (rtx first, rtx reg)
876 rtx insn;
878 /* For mcore, subregs can't live independently of their parent regs. */
879 if (GET_CODE (reg) == SUBREG)
880 reg = SUBREG_REG (reg);
882 /* Dies immediately. */
883 if (dead_or_set_p (first, reg))
884 return 1;
886 /* Look for conclusive evidence of live/death, otherwise we have
887 to assume that it is live. */
888 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
890 if (GET_CODE (insn) == JUMP_INSN)
891 return 0; /* We lose track, assume it is alive. */
893 else if (GET_CODE(insn) == CALL_INSN)
895 /* Call's might use it for target or register parms. */
896 if (reg_referenced_p (reg, PATTERN (insn))
897 || find_reg_fusage (insn, USE, reg))
898 return 0;
899 else if (dead_or_set_p (insn, reg))
900 return 1;
902 else if (GET_CODE (insn) == INSN)
904 if (reg_referenced_p (reg, PATTERN (insn)))
905 return 0;
906 else if (dead_or_set_p (insn, reg))
907 return 1;
911 /* No conclusive evidence either way, we cannot take the chance
912 that control flow hid the use from us -- "I'm not dead yet". */
913 return 0;
916 /* Count the number of ones in mask. */
919 mcore_num_ones (HOST_WIDE_INT mask)
921 /* A trick to count set bits recently posted on comp.compilers. */
922 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
923 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
924 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
925 mask = ((mask >> 8) + mask);
927 return (mask + (mask >> 16)) & 0xff;
930 /* Count the number of zeros in mask. */
933 mcore_num_zeros (HOST_WIDE_INT mask)
935 return 32 - mcore_num_ones (mask);
938 /* Determine byte being masked. */
941 mcore_byte_offset (unsigned int mask)
943 if (mask == 0x00ffffffL)
944 return 0;
945 else if (mask == 0xff00ffffL)
946 return 1;
947 else if (mask == 0xffff00ffL)
948 return 2;
949 else if (mask == 0xffffff00L)
950 return 3;
952 return -1;
955 /* Determine halfword being masked. */
958 mcore_halfword_offset (unsigned int mask)
960 if (mask == 0x0000ffffL)
961 return 0;
962 else if (mask == 0xffff0000L)
963 return 1;
965 return -1;
968 /* Output a series of bseti's corresponding to mask. */
970 const char *
971 mcore_output_bseti (rtx dst, int mask)
973 rtx out_operands[2];
974 int bit;
976 out_operands[0] = dst;
978 for (bit = 0; bit < 32; bit++)
980 if ((mask & 0x1) == 0x1)
982 out_operands[1] = GEN_INT (bit);
984 output_asm_insn ("bseti\t%0,%1", out_operands);
986 mask >>= 1;
989 return "";
992 /* Output a series of bclri's corresponding to mask. */
994 const char *
995 mcore_output_bclri (rtx dst, int mask)
997 rtx out_operands[2];
998 int bit;
1000 out_operands[0] = dst;
1002 for (bit = 0; bit < 32; bit++)
1004 if ((mask & 0x1) == 0x0)
1006 out_operands[1] = GEN_INT (bit);
1008 output_asm_insn ("bclri\t%0,%1", out_operands);
1011 mask >>= 1;
1014 return "";
1017 /* Output a conditional move of two constants that are +/- 1 within each
1018 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1019 really worth the effort. */
1021 const char *
1022 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1024 HOST_WIDE_INT load_value;
1025 HOST_WIDE_INT adjust_value;
1026 rtx out_operands[4];
1028 out_operands[0] = operands[0];
1030 /* Check to see which constant is loadable. */
1031 if (const_ok_for_mcore (INTVAL (operands[1])))
1033 out_operands[1] = operands[1];
1034 out_operands[2] = operands[2];
1036 else if (const_ok_for_mcore (INTVAL (operands[2])))
1038 out_operands[1] = operands[2];
1039 out_operands[2] = operands[1];
1041 /* Complement test since constants are swapped. */
1042 cmp_t = (cmp_t == 0);
1044 load_value = INTVAL (out_operands[1]);
1045 adjust_value = INTVAL (out_operands[2]);
1047 /* First output the test if folded into the pattern. */
1049 if (test)
1050 output_asm_insn (test, operands);
1052 /* Load the constant - for now, only support constants that can be
1053 generated with a single instruction. maybe add general inlinable
1054 constants later (this will increase the # of patterns since the
1055 instruction sequence has a different length attribute). */
1056 if (load_value >= 0 && load_value <= 127)
1057 output_asm_insn ("movi\t%0,%1", out_operands);
1058 else if (CONST_OK_FOR_M (load_value))
1059 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1060 else if (CONST_OK_FOR_N (load_value))
1061 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1063 /* Output the constant adjustment. */
1064 if (load_value > adjust_value)
1066 if (cmp_t)
1067 output_asm_insn ("decf\t%0", out_operands);
1068 else
1069 output_asm_insn ("dect\t%0", out_operands);
1071 else
1073 if (cmp_t)
1074 output_asm_insn ("incf\t%0", out_operands);
1075 else
1076 output_asm_insn ("inct\t%0", out_operands);
1079 return "";
1082 /* Outputs the peephole for moving a constant that gets not'ed followed
1083 by an and (i.e. combine the not and the and into andn). BRC */
1085 const char *
1086 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1088 HOST_WIDE_INT x, y;
1089 rtx out_operands[3];
1090 const char * load_op;
1091 char buf[256];
1092 int trick_no;
1094 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1095 gcc_assert (trick_no == 2);
1097 out_operands[0] = operands[0];
1098 out_operands[1] = GEN_INT (x);
1099 out_operands[2] = operands[2];
1101 if (x >= 0 && x <= 127)
1102 load_op = "movi\t%0,%1";
1104 /* Try exact power of two. */
1105 else if (CONST_OK_FOR_M (x))
1106 load_op = "bgeni\t%0,%P1";
1108 /* Try exact power of two - 1. */
1109 else if (CONST_OK_FOR_N (x))
1110 load_op = "bmaski\t%0,%N1";
1112 else
1114 load_op = "BADMOVI-andn\t%0, %1";
1115 gcc_unreachable ();
1118 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1119 output_asm_insn (buf, out_operands);
1121 return "";
1124 /* Output an inline constant. */
1126 static const char *
1127 output_inline_const (enum machine_mode mode, rtx operands[])
1129 HOST_WIDE_INT x = 0, y = 0;
1130 int trick_no;
1131 rtx out_operands[3];
1132 char buf[256];
1133 char load_op[256];
1134 const char *dst_fmt;
1135 HOST_WIDE_INT value;
1137 value = INTVAL (operands[1]);
1139 trick_no = try_constant_tricks (value, &x, &y);
1140 /* lrw's are handled separately: Large inlinable constants never get
1141 turned into lrw's. Our caller uses try_constant_tricks to back
1142 off to an lrw rather than calling this routine. */
1143 gcc_assert (trick_no != 0);
1145 if (trick_no == 1)
1146 x = value;
1148 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1149 out_operands[0] = operands[0];
1150 out_operands[1] = GEN_INT (x);
1152 if (trick_no > 2)
1153 out_operands[2] = GEN_INT (y);
1155 /* Select dst format based on mode. */
1156 if (mode == DImode && (! TARGET_LITTLE_END))
1157 dst_fmt = "%R0";
1158 else
1159 dst_fmt = "%0";
1161 if (x >= 0 && x <= 127)
1162 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1164 /* Try exact power of two. */
1165 else if (CONST_OK_FOR_M (x))
1166 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1168 /* Try exact power of two - 1. */
1169 else if (CONST_OK_FOR_N (x))
1170 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1172 else
1174 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1175 gcc_unreachable ();
1178 switch (trick_no)
1180 case 1:
1181 strcpy (buf, load_op);
1182 break;
1183 case 2: /* not */
1184 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1185 break;
1186 case 3: /* add */
1187 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1188 break;
1189 case 4: /* sub */
1190 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1191 break;
1192 case 5: /* rsub */
1193 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1194 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1195 break;
1196 case 6: /* bseti */
1197 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1198 break;
1199 case 7: /* bclr */
1200 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1201 break;
1202 case 8: /* rotl */
1203 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1204 break;
1205 case 9: /* lsl */
1206 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1207 break;
1208 case 10: /* ixh */
1209 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1210 break;
1211 case 11: /* ixw */
1212 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1213 break;
1214 default:
1215 return "";
1218 output_asm_insn (buf, out_operands);
1220 return "";
1223 /* Output a move of a word or less value. */
1225 const char *
1226 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1227 enum machine_mode mode ATTRIBUTE_UNUSED)
1229 rtx dst = operands[0];
1230 rtx src = operands[1];
1232 if (GET_CODE (dst) == REG)
1234 if (GET_CODE (src) == REG)
1236 if (REGNO (src) == CC_REG) /* r-c */
1237 return "mvc\t%0";
1238 else
1239 return "mov\t%0,%1"; /* r-r*/
1241 else if (GET_CODE (src) == MEM)
1243 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1244 return "lrw\t%0,[%1]"; /* a-R */
1245 else
1246 switch (GET_MODE (src)) /* r-m */
1248 case SImode:
1249 return "ldw\t%0,%1";
1250 case HImode:
1251 return "ld.h\t%0,%1";
1252 case QImode:
1253 return "ld.b\t%0,%1";
1254 default:
1255 gcc_unreachable ();
1258 else if (GET_CODE (src) == CONST_INT)
1260 HOST_WIDE_INT x, y;
1262 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1263 return "movi\t%0,%1";
1264 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1265 return "bgeni\t%0,%P1\t// %1 %x1";
1266 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1267 return "bmaski\t%0,%N1\t// %1 %x1";
1268 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1269 return output_inline_const (SImode, operands); /* 1-2 insns */
1270 else
1271 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1273 else
1274 return "lrw\t%0, %1"; /* Into the literal pool. */
1276 else if (GET_CODE (dst) == MEM) /* m-r */
1277 switch (GET_MODE (dst))
1279 case SImode:
1280 return "stw\t%1,%0";
1281 case HImode:
1282 return "st.h\t%1,%0";
1283 case QImode:
1284 return "st.b\t%1,%0";
1285 default:
1286 gcc_unreachable ();
1289 gcc_unreachable ();
1292 /* Return a sequence of instructions to perform DI or DF move.
1293 Since the MCORE cannot move a DI or DF in one instruction, we have
1294 to take care when we see overlapping source and dest registers. */
1296 const char *
1297 mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
1299 rtx dst = operands[0];
1300 rtx src = operands[1];
1302 if (GET_CODE (dst) == REG)
1304 if (GET_CODE (src) == REG)
1306 int dstreg = REGNO (dst);
1307 int srcreg = REGNO (src);
1309 /* Ensure the second source not overwritten. */
1310 if (srcreg + 1 == dstreg)
1311 return "mov %R0,%R1\n\tmov %0,%1";
1312 else
1313 return "mov %0,%1\n\tmov %R0,%R1";
1315 else if (GET_CODE (src) == MEM)
1317 rtx memexp = memexp = XEXP (src, 0);
1318 int dstreg = REGNO (dst);
1319 int basereg = -1;
1321 if (GET_CODE (memexp) == LABEL_REF)
1322 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1323 else if (GET_CODE (memexp) == REG)
1324 basereg = REGNO (memexp);
1325 else if (GET_CODE (memexp) == PLUS)
1327 if (GET_CODE (XEXP (memexp, 0)) == REG)
1328 basereg = REGNO (XEXP (memexp, 0));
1329 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1330 basereg = REGNO (XEXP (memexp, 1));
1331 else
1332 gcc_unreachable ();
1334 else
1335 gcc_unreachable ();
1337 /* ??? length attribute is wrong here. */
1338 if (dstreg == basereg)
1340 /* Just load them in reverse order. */
1341 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1343 /* XXX: alternative: move basereg to basereg+1
1344 and then fall through. */
1346 else
1347 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1349 else if (GET_CODE (src) == CONST_INT)
1351 if (TARGET_LITTLE_END)
1353 if (CONST_OK_FOR_I (INTVAL (src)))
1354 output_asm_insn ("movi %0,%1", operands);
1355 else if (CONST_OK_FOR_M (INTVAL (src)))
1356 output_asm_insn ("bgeni %0,%P1", operands);
1357 else if (CONST_OK_FOR_N (INTVAL (src)))
1358 output_asm_insn ("bmaski %0,%N1", operands);
1359 else
1360 gcc_unreachable ();
1362 if (INTVAL (src) < 0)
1363 return "bmaski %R0,32";
1364 else
1365 return "movi %R0,0";
1367 else
1369 if (CONST_OK_FOR_I (INTVAL (src)))
1370 output_asm_insn ("movi %R0,%1", operands);
1371 else if (CONST_OK_FOR_M (INTVAL (src)))
1372 output_asm_insn ("bgeni %R0,%P1", operands);
1373 else if (CONST_OK_FOR_N (INTVAL (src)))
1374 output_asm_insn ("bmaski %R0,%N1", operands);
1375 else
1376 gcc_unreachable ();
1378 if (INTVAL (src) < 0)
1379 return "bmaski %0,32";
1380 else
1381 return "movi %0,0";
1384 else
1385 gcc_unreachable ();
1387 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1388 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1389 else
1390 gcc_unreachable ();
1393 /* Predicates used by the templates. */
1396 mcore_arith_S_operand (rtx op)
1398 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1399 return 1;
1401 return 0;
1404 /* Expand insert bit field. BRC */
1407 mcore_expand_insv (rtx operands[])
1409 int width = INTVAL (operands[1]);
1410 int posn = INTVAL (operands[2]);
1411 int mask;
1412 rtx mreg, sreg, ereg;
1414 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1415 for width==1 must be removed. Look around line 368. This is something
1416 we really want the md part to do. */
1417 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1419 /* Do directly with bseti or bclri. */
1420 /* RBE: 2/97 consider only low bit of constant. */
1421 if ((INTVAL (operands[3]) & 1) == 0)
1423 mask = ~(1 << posn);
1424 emit_insn (gen_rtx_SET (SImode, operands[0],
1425 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
1427 else
1429 mask = 1 << posn;
1430 emit_insn (gen_rtx_SET (SImode, operands[0],
1431 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
1434 return 1;
1437 /* Look at some bit-field placements that we aren't interested
1438 in handling ourselves, unless specifically directed to do so. */
1439 if (! TARGET_W_FIELD)
1440 return 0; /* Generally, give up about now. */
1442 if (width == 8 && posn % 8 == 0)
1443 /* Byte sized and aligned; let caller break it up. */
1444 return 0;
1446 if (width == 16 && posn % 16 == 0)
1447 /* Short sized and aligned; let caller break it up. */
1448 return 0;
1450 /* The general case - we can do this a little bit better than what the
1451 machine independent part tries. This will get rid of all the subregs
1452 that mess up constant folding in combine when working with relaxed
1453 immediates. */
1455 /* If setting the entire field, do it directly. */
1456 if (GET_CODE (operands[3]) == CONST_INT
1457 && INTVAL (operands[3]) == ((1 << width) - 1))
1459 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1460 emit_insn (gen_rtx_SET (SImode, operands[0],
1461 gen_rtx_IOR (SImode, operands[0], mreg)));
1462 return 1;
1465 /* Generate the clear mask. */
1466 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1468 /* Clear the field, to overlay it later with the source. */
1469 emit_insn (gen_rtx_SET (SImode, operands[0],
1470 gen_rtx_AND (SImode, operands[0], mreg)));
1472 /* If the source is constant 0, we've nothing to add back. */
1473 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1474 return 1;
1476 /* XXX: Should we worry about more games with constant values?
1477 We've covered the high profile: set/clear single-bit and many-bit
1478 fields. How often do we see "arbitrary bit pattern" constants? */
1479 sreg = copy_to_mode_reg (SImode, operands[3]);
1481 /* Extract src as same width as dst (needed for signed values). We
1482 always have to do this since we widen everything to SImode.
1483 We don't have to mask if we're shifting this up against the
1484 MSB of the register (e.g., the shift will push out any hi-order
1485 bits. */
1486 if (width + posn != (int) GET_MODE_SIZE (SImode))
1488 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1489 emit_insn (gen_rtx_SET (SImode, sreg,
1490 gen_rtx_AND (SImode, sreg, ereg)));
1493 /* Insert source value in dest. */
1494 if (posn != 0)
1495 emit_insn (gen_rtx_SET (SImode, sreg,
1496 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
1498 emit_insn (gen_rtx_SET (SImode, operands[0],
1499 gen_rtx_IOR (SImode, operands[0], sreg)));
1501 return 1;
1504 /* ??? Block move stuff stolen from m88k. This code has not been
1505 verified for correctness. */
1507 /* Emit code to perform a block move. Choose the best method.
1509 OPERANDS[0] is the destination.
1510 OPERANDS[1] is the source.
1511 OPERANDS[2] is the size.
1512 OPERANDS[3] is the alignment safe to use. */
1514 /* Emit code to perform a block move with an offset sequence of ldw/st
1515 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1516 known constants. DEST and SRC are registers. OFFSET is the known
1517 starting point for the output pattern. */
1519 static const enum machine_mode mode_from_align[] =
1521 VOIDmode, QImode, HImode, VOIDmode, SImode,
1524 static void
1525 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1527 rtx temp[2];
1528 enum machine_mode mode[2];
1529 int amount[2];
1530 bool active[2];
1531 int phase = 0;
1532 int next;
1533 int offset_ld = 0;
1534 int offset_st = 0;
1535 rtx x;
1537 x = XEXP (dst_mem, 0);
1538 if (!REG_P (x))
1540 x = force_reg (Pmode, x);
1541 dst_mem = replace_equiv_address (dst_mem, x);
1544 x = XEXP (src_mem, 0);
1545 if (!REG_P (x))
1547 x = force_reg (Pmode, x);
1548 src_mem = replace_equiv_address (src_mem, x);
1551 active[0] = active[1] = false;
1555 next = phase;
1556 phase ^= 1;
1558 if (size > 0)
1560 int next_amount;
1562 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1563 next_amount = MIN (next_amount, align);
1565 amount[next] = next_amount;
1566 mode[next] = mode_from_align[next_amount];
1567 temp[next] = gen_reg_rtx (mode[next]);
1569 x = adjust_address (src_mem, mode[next], offset_ld);
1570 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1572 offset_ld += next_amount;
1573 size -= next_amount;
1574 active[next] = true;
1577 if (active[phase])
1579 active[phase] = false;
1581 x = adjust_address (dst_mem, mode[phase], offset_st);
1582 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1584 offset_st += amount[phase];
1587 while (active[next]);
1590 bool
1591 mcore_expand_block_move (rtx *operands)
1593 HOST_WIDE_INT align, bytes, max;
1595 if (GET_CODE (operands[2]) != CONST_INT)
1596 return false;
1598 bytes = INTVAL (operands[2]);
1599 align = INTVAL (operands[3]);
1601 if (bytes <= 0)
1602 return false;
1603 if (align > 4)
1604 align = 4;
1606 switch (align)
1608 case 4:
1609 if (bytes & 1)
1610 max = 4*4;
1611 else if (bytes & 3)
1612 max = 8*4;
1613 else
1614 max = 16*4;
1615 break;
1616 case 2:
1617 max = 4*2;
1618 break;
1619 case 1:
1620 max = 4*1;
1621 break;
1622 default:
1623 gcc_unreachable ();
1626 if (bytes <= max)
1628 block_move_sequence (operands[0], operands[1], bytes, align);
1629 return true;
1632 return false;
1636 /* Code to generate prologue and epilogue sequences. */
1637 static int number_of_regs_before_varargs;
1639 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1640 for a varargs function. */
1641 static int current_function_anonymous_args;
1643 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1644 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1645 #define ADDI_REACH (32) /* Maximum addi operand. */
1647 static void
1648 layout_mcore_frame (struct mcore_frame * infp)
1650 int n;
1651 unsigned int i;
1652 int nbytes;
1653 int regarg;
1654 int localregarg;
1655 int localreg;
1656 int outbounds;
1657 unsigned int growths;
1658 int step;
1660 /* Might have to spill bytes to re-assemble a big argument that
1661 was passed partially in registers and partially on the stack. */
1662 nbytes = crtl->args.pretend_args_size;
1664 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1665 if (current_function_anonymous_args)
1666 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1668 infp->arg_size = nbytes;
1670 /* How much space to save non-volatile registers we stomp. */
1671 infp->reg_mask = calc_live_regs (& n);
1672 infp->reg_size = n * 4;
1674 /* And the rest of it... locals and space for overflowed outbounds. */
1675 infp->local_size = get_frame_size ();
1676 infp->outbound_size = crtl->outgoing_args_size;
1678 /* Make sure we have a whole number of words for the locals. */
1679 if (infp->local_size % STACK_BYTES)
1680 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1682 /* Only thing we know we have to pad is the outbound space, since
1683 we've aligned our locals assuming that base of locals is aligned. */
1684 infp->pad_local = 0;
1685 infp->pad_reg = 0;
1686 infp->pad_outbound = 0;
1687 if (infp->outbound_size % STACK_BYTES)
1688 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1690 /* Now we see how we want to stage the prologue so that it does
1691 the most appropriate stack growth and register saves to either:
1692 (1) run fast,
1693 (2) reduce instruction space, or
1694 (3) reduce stack space. */
1695 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1696 infp->growth[i] = 0;
1698 regarg = infp->reg_size + infp->arg_size;
1699 localregarg = infp->local_size + regarg;
1700 localreg = infp->local_size + infp->reg_size;
1701 outbounds = infp->outbound_size + infp->pad_outbound;
1702 growths = 0;
1704 /* XXX: Consider one where we consider localregarg + outbound too! */
1706 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1707 use stw's with offsets and buy the frame in one shot. */
1708 if (localregarg <= ADDI_REACH
1709 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1711 /* Make sure we'll be aligned. */
1712 if (localregarg % STACK_BYTES)
1713 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1715 step = localregarg + infp->pad_reg;
1716 infp->reg_offset = infp->local_size;
1718 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1720 step += outbounds;
1721 infp->reg_offset += outbounds;
1722 outbounds = 0;
1725 infp->arg_offset = step - 4;
1726 infp->growth[growths++] = step;
1727 infp->reg_growth = growths;
1728 infp->local_growth = growths;
1730 /* If we haven't already folded it in. */
1731 if (outbounds)
1732 infp->growth[growths++] = outbounds;
1734 goto finish;
1737 /* Frame can't be done with a single subi, but can be done with 2
1738 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1739 shift some of the stack purchase into the first subi, so both are
1740 single instructions. */
1741 if (localregarg <= STORE_REACH
1742 && (infp->local_size > ADDI_REACH)
1743 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1745 int all;
1747 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1748 if (localregarg % STACK_BYTES)
1749 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1751 all = localregarg + infp->pad_reg + infp->pad_local;
1752 step = ADDI_REACH; /* As much up front as we can. */
1753 if (step > all)
1754 step = all;
1756 /* XXX: Consider whether step will still be aligned; we believe so. */
1757 infp->arg_offset = step - 4;
1758 infp->growth[growths++] = step;
1759 infp->reg_growth = growths;
1760 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1761 all -= step;
1763 /* Can we fold in any space required for outbounds? */
1764 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1766 all += outbounds;
1767 outbounds = 0;
1770 /* Get the rest of the locals in place. */
1771 step = all;
1772 infp->growth[growths++] = step;
1773 infp->local_growth = growths;
1774 all -= step;
1776 assert (all == 0);
1778 /* Finish off if we need to do so. */
1779 if (outbounds)
1780 infp->growth[growths++] = outbounds;
1782 goto finish;
1785 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1786 Then we buy the rest of the frame in 1 or 2 steps depending on
1787 whether we need a frame pointer. */
1788 if ((regarg % STACK_BYTES) == 0)
1790 infp->growth[growths++] = regarg;
1791 infp->reg_growth = growths;
1792 infp->arg_offset = regarg - 4;
1793 infp->reg_offset = 0;
1795 if (infp->local_size % STACK_BYTES)
1796 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1798 step = infp->local_size + infp->pad_local;
1800 if (!frame_pointer_needed)
1802 step += outbounds;
1803 outbounds = 0;
1806 infp->growth[growths++] = step;
1807 infp->local_growth = growths;
1809 /* If there's any left to be done. */
1810 if (outbounds)
1811 infp->growth[growths++] = outbounds;
1813 goto finish;
1816 /* XXX: optimizations that we'll want to play with....
1817 -- regarg is not aligned, but it's a small number of registers;
1818 use some of localsize so that regarg is aligned and then
1819 save the registers. */
1821 /* Simple encoding; plods down the stack buying the pieces as it goes.
1822 -- does not optimize space consumption.
1823 -- does not attempt to optimize instruction counts.
1824 -- but it is safe for all alignments. */
1825 if (regarg % STACK_BYTES != 0)
1826 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1828 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1829 infp->reg_growth = growths;
1830 infp->arg_offset = infp->growth[0] - 4;
1831 infp->reg_offset = 0;
1833 if (frame_pointer_needed)
1835 if (infp->local_size % STACK_BYTES != 0)
1836 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1838 infp->growth[growths++] = infp->local_size + infp->pad_local;
1839 infp->local_growth = growths;
1841 infp->growth[growths++] = outbounds;
1843 else
1845 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1846 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1848 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1849 infp->local_growth = growths;
1852 /* Anything else that we've forgotten?, plus a few consistency checks. */
1853 finish:
1854 assert (infp->reg_offset >= 0);
1855 assert (growths <= MAX_STACK_GROWS);
1857 for (i = 0; i < growths; i++)
1858 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1861 /* Define the offset between two registers, one to be eliminated, and
1862 the other its replacement, at the start of a routine. */
1865 mcore_initial_elimination_offset (int from, int to)
1867 int above_frame;
1868 int below_frame;
1869 struct mcore_frame fi;
1871 layout_mcore_frame (& fi);
1873 /* fp to ap */
1874 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1875 /* sp to fp */
1876 below_frame = fi.outbound_size + fi.pad_outbound;
1878 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1879 return above_frame;
1881 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1882 return above_frame + below_frame;
1884 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1885 return below_frame;
1887 gcc_unreachable ();
1890 /* Keep track of some information about varargs for the prolog. */
1892 static void
1893 mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far,
1894 enum machine_mode mode, tree type,
1895 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1896 int second_time ATTRIBUTE_UNUSED)
1898 current_function_anonymous_args = 1;
1900 /* We need to know how many argument registers are used before
1901 the varargs start, so that we can push the remaining argument
1902 registers during the prologue. */
1903 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1905 /* There is a bug somewhere in the arg handling code.
1906 Until I can find it this workaround always pushes the
1907 last named argument onto the stack. */
1908 number_of_regs_before_varargs = *args_so_far;
1910 /* The last named argument may be split between argument registers
1911 and the stack. Allow for this here. */
1912 if (number_of_regs_before_varargs > NPARM_REGS)
1913 number_of_regs_before_varargs = NPARM_REGS;
1916 void
1917 mcore_expand_prolog (void)
1919 struct mcore_frame fi;
1920 int space_allocated = 0;
1921 int growth = 0;
1923 /* Find out what we're doing. */
1924 layout_mcore_frame (&fi);
1926 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1927 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1929 if (TARGET_CG_DATA)
1931 /* Emit a symbol for this routine's frame size. */
1932 rtx x;
1934 x = DECL_RTL (current_function_decl);
1936 gcc_assert (GET_CODE (x) == MEM);
1938 x = XEXP (x, 0);
1940 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1942 if (mcore_current_function_name)
1943 free (mcore_current_function_name);
1945 mcore_current_function_name = xstrdup (XSTR (x, 0));
1947 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1949 if (cfun->calls_alloca)
1950 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1952 /* 970425: RBE:
1953 We're looking at how the 8byte alignment affects stack layout
1954 and where we had to pad things. This emits information we can
1955 extract which tells us about frame sizes and the like. */
1956 fprintf (asm_out_file,
1957 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1958 mcore_current_function_name,
1959 fi.arg_size, fi.reg_size, fi.reg_mask,
1960 fi.local_size, fi.outbound_size,
1961 frame_pointer_needed);
1964 if (mcore_naked_function_p ())
1965 return;
1967 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
1968 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1970 /* If we have a parameter passed partially in regs and partially in memory,
1971 the registers will have been stored to memory already in function.c. So
1972 we only need to do something here for varargs functions. */
1973 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
1975 int offset;
1976 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
1977 int remaining = fi.arg_size;
1979 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
1981 emit_insn (gen_movsi
1982 (gen_rtx_MEM (SImode,
1983 plus_constant (stack_pointer_rtx, offset)),
1984 gen_rtx_REG (SImode, rn)));
1988 /* Do we need another stack adjustment before we do the register saves? */
1989 if (growth < fi.reg_growth)
1990 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1992 if (fi.reg_size != 0)
1994 int i;
1995 int offs = fi.reg_offset;
1997 for (i = 15; i >= 0; i--)
1999 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2001 int first_reg = 15;
2003 while (fi.reg_mask & (1 << first_reg))
2004 first_reg--;
2005 first_reg++;
2007 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2008 gen_rtx_REG (SImode, first_reg),
2009 GEN_INT (16 - first_reg)));
2011 i -= (15 - first_reg);
2012 offs += (16 - first_reg) * 4;
2014 else if (fi.reg_mask & (1 << i))
2016 emit_insn (gen_movsi
2017 (gen_rtx_MEM (SImode,
2018 plus_constant (stack_pointer_rtx, offs)),
2019 gen_rtx_REG (SImode, i)));
2020 offs += 4;
2025 /* Figure the locals + outbounds. */
2026 if (frame_pointer_needed)
2028 /* If we haven't already purchased to 'fp'. */
2029 if (growth < fi.local_growth)
2030 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2032 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2034 /* ... and then go any remaining distance for outbounds, etc. */
2035 if (fi.growth[growth])
2036 output_stack_adjust (-1, fi.growth[growth++]);
2038 else
2040 if (growth < fi.local_growth)
2041 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2042 if (fi.growth[growth])
2043 output_stack_adjust (-1, fi.growth[growth++]);
2047 void
2048 mcore_expand_epilog (void)
2050 struct mcore_frame fi;
2051 int i;
2052 int offs;
2053 int growth = MAX_STACK_GROWS - 1 ;
2056 /* Find out what we're doing. */
2057 layout_mcore_frame(&fi);
2059 if (mcore_naked_function_p ())
2060 return;
2062 /* If we had a frame pointer, restore the sp from that. */
2063 if (frame_pointer_needed)
2065 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2066 growth = fi.local_growth - 1;
2068 else
2070 /* XXX: while loop should accumulate and do a single sell. */
2071 while (growth >= fi.local_growth)
2073 if (fi.growth[growth] != 0)
2074 output_stack_adjust (1, fi.growth[growth]);
2075 growth--;
2079 /* Make sure we've shrunk stack back to the point where the registers
2080 were laid down. This is typically 0/1 iterations. Then pull the
2081 register save information back off the stack. */
2082 while (growth >= fi.reg_growth)
2083 output_stack_adjust ( 1, fi.growth[growth--]);
2085 offs = fi.reg_offset;
2087 for (i = 15; i >= 0; i--)
2089 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2091 int first_reg;
2093 /* Find the starting register. */
2094 first_reg = 15;
2096 while (fi.reg_mask & (1 << first_reg))
2097 first_reg--;
2099 first_reg++;
2101 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2102 gen_rtx_MEM (SImode, stack_pointer_rtx),
2103 GEN_INT (16 - first_reg)));
2105 i -= (15 - first_reg);
2106 offs += (16 - first_reg) * 4;
2108 else if (fi.reg_mask & (1 << i))
2110 emit_insn (gen_movsi
2111 (gen_rtx_REG (SImode, i),
2112 gen_rtx_MEM (SImode,
2113 plus_constant (stack_pointer_rtx, offs))));
2114 offs += 4;
2118 /* Give back anything else. */
2119 /* XXX: Should accumulate total and then give it back. */
2120 while (growth >= 0)
2121 output_stack_adjust ( 1, fi.growth[growth--]);
2124 /* This code is borrowed from the SH port. */
2126 /* The MCORE cannot load a large constant into a register, constants have to
2127 come from a pc relative load. The reference of a pc relative load
2128 instruction must be less than 1k in front of the instruction. This
2129 means that we often have to dump a constant inside a function, and
2130 generate code to branch around it.
2132 It is important to minimize this, since the branches will slow things
2133 down and make things bigger.
2135 Worst case code looks like:
2137 lrw L1,r0
2138 br L2
2139 align
2140 L1: .long value
2144 lrw L3,r0
2145 br L4
2146 align
2147 L3: .long value
2151 We fix this by performing a scan before scheduling, which notices which
2152 instructions need to have their operands fetched from the constant table
2153 and builds the table.
2155 The algorithm is:
2157 scan, find an instruction which needs a pcrel move. Look forward, find the
2158 last barrier which is within MAX_COUNT bytes of the requirement.
2159 If there isn't one, make one. Process all the instructions between
2160 the find and the barrier.
2162 In the above example, we can tell that L3 is within 1k of L1, so
2163 the first move can be shrunk from the 2 insn+constant sequence into
2164 just 1 insn, and the constant moved to L3 to make:
2166 lrw L1,r0
2168 lrw L3,r0
2169 bra L4
2170 align
2171 L3:.long value
2172 L4:.long value
2174 Then the second move becomes the target for the shortening process. */
2176 typedef struct
2178 rtx value; /* Value in table. */
2179 rtx label; /* Label of value. */
2180 } pool_node;
2182 /* The maximum number of constants that can fit into one pool, since
2183 the pc relative range is 0...1020 bytes and constants are at least 4
2184 bytes long. We subtract 4 from the range to allow for the case where
2185 we need to add a branch/align before the constant pool. */
2187 #define MAX_COUNT 1016
2188 #define MAX_POOL_SIZE (MAX_COUNT/4)
2189 static pool_node pool_vector[MAX_POOL_SIZE];
2190 static int pool_size;
2192 /* Dump out any constants accumulated in the final pass. These
2193 will only be labels. */
2195 const char *
2196 mcore_output_jump_label_table (void)
2198 int i;
2200 if (pool_size)
2202 fprintf (asm_out_file, "\t.align 2\n");
2204 for (i = 0; i < pool_size; i++)
2206 pool_node * p = pool_vector + i;
2208 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2210 output_asm_insn (".long %0", &p->value);
2213 pool_size = 0;
2216 return "";
2219 /* Check whether insn is a candidate for a conditional. */
2221 static cond_type
2222 is_cond_candidate (rtx insn)
2224 /* The only things we conditionalize are those that can be directly
2225 changed into a conditional. Only bother with SImode items. If
2226 we wanted to be a little more aggressive, we could also do other
2227 modes such as DImode with reg-reg move or load 0. */
2228 if (GET_CODE (insn) == INSN)
2230 rtx pat = PATTERN (insn);
2231 rtx src, dst;
2233 if (GET_CODE (pat) != SET)
2234 return COND_NO;
2236 dst = XEXP (pat, 0);
2238 if ((GET_CODE (dst) != REG &&
2239 GET_CODE (dst) != SUBREG) ||
2240 GET_MODE (dst) != SImode)
2241 return COND_NO;
2243 src = XEXP (pat, 1);
2245 if ((GET_CODE (src) == REG ||
2246 (GET_CODE (src) == SUBREG &&
2247 GET_CODE (SUBREG_REG (src)) == REG)) &&
2248 GET_MODE (src) == SImode)
2249 return COND_MOV_INSN;
2250 else if (GET_CODE (src) == CONST_INT &&
2251 INTVAL (src) == 0)
2252 return COND_CLR_INSN;
2253 else if (GET_CODE (src) == PLUS &&
2254 (GET_CODE (XEXP (src, 0)) == REG ||
2255 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2256 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2257 GET_MODE (XEXP (src, 0)) == SImode &&
2258 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2259 INTVAL (XEXP (src, 1)) == 1)
2260 return COND_INC_INSN;
2261 else if (((GET_CODE (src) == MINUS &&
2262 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2263 INTVAL( XEXP (src, 1)) == 1) ||
2264 (GET_CODE (src) == PLUS &&
2265 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2266 INTVAL (XEXP (src, 1)) == -1)) &&
2267 (GET_CODE (XEXP (src, 0)) == REG ||
2268 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2269 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2270 GET_MODE (XEXP (src, 0)) == SImode)
2271 return COND_DEC_INSN;
2273 /* Some insns that we don't bother with:
2274 (set (rx:DI) (ry:DI))
2275 (set (rx:DI) (const_int 0))
2279 else if (GET_CODE (insn) == JUMP_INSN &&
2280 GET_CODE (PATTERN (insn)) == SET &&
2281 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2282 return COND_BRANCH_INSN;
2284 return COND_NO;
2287 /* Emit a conditional version of insn and replace the old insn with the
2288 new one. Return the new insn if emitted. */
2290 static rtx
2291 emit_new_cond_insn (rtx insn, int cond)
2293 rtx c_insn = 0;
2294 rtx pat, dst, src;
2295 cond_type num;
2297 if ((num = is_cond_candidate (insn)) == COND_NO)
2298 return NULL;
2300 pat = PATTERN (insn);
2302 if (GET_CODE (insn) == INSN)
2304 dst = SET_DEST (pat);
2305 src = SET_SRC (pat);
2307 else
2309 dst = JUMP_LABEL (insn);
2310 src = NULL_RTX;
2313 switch (num)
2315 case COND_MOV_INSN:
2316 case COND_CLR_INSN:
2317 if (cond)
2318 c_insn = gen_movt0 (dst, src, dst);
2319 else
2320 c_insn = gen_movt0 (dst, dst, src);
2321 break;
2323 case COND_INC_INSN:
2324 if (cond)
2325 c_insn = gen_incscc (dst, dst);
2326 else
2327 c_insn = gen_incscc_false (dst, dst);
2328 break;
2330 case COND_DEC_INSN:
2331 if (cond)
2332 c_insn = gen_decscc (dst, dst);
2333 else
2334 c_insn = gen_decscc_false (dst, dst);
2335 break;
2337 case COND_BRANCH_INSN:
2338 if (cond)
2339 c_insn = gen_branch_true (dst);
2340 else
2341 c_insn = gen_branch_false (dst);
2342 break;
2344 default:
2345 return NULL;
2348 /* Only copy the notes if they exist. */
2349 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2351 /* We really don't need to bother with the notes and links at this
2352 point, but go ahead and save the notes. This will help is_dead()
2353 when applying peepholes (links don't matter since they are not
2354 used any more beyond this point for the mcore). */
2355 REG_NOTES (c_insn) = REG_NOTES (insn);
2358 if (num == COND_BRANCH_INSN)
2360 /* For jumps, we need to be a little bit careful and emit the new jump
2361 before the old one and to update the use count for the target label.
2362 This way, the barrier following the old (uncond) jump will get
2363 deleted, but the label won't. */
2364 c_insn = emit_jump_insn_before (c_insn, insn);
2366 ++ LABEL_NUSES (dst);
2368 JUMP_LABEL (c_insn) = dst;
2370 else
2371 c_insn = emit_insn_after (c_insn, insn);
2373 delete_insn (insn);
2375 return c_insn;
2378 /* Attempt to change a basic block into a series of conditional insns. This
2379 works by taking the branch at the end of the 1st block and scanning for the
2380 end of the 2nd block. If all instructions in the 2nd block have cond.
2381 versions and the label at the start of block 3 is the same as the target
2382 from the branch at block 1, then conditionalize all insn in block 2 using
2383 the inverse condition of the branch at block 1. (Note I'm bending the
2384 definition of basic block here.)
2386 e.g., change:
2388 bt L2 <-- end of block 1 (delete)
2389 mov r7,r8
2390 addu r7,1
2391 br L3 <-- end of block 2
2393 L2: ... <-- start of block 3 (NUSES==1)
2394 L3: ...
2398 movf r7,r8
2399 incf r7
2400 bf L3
2402 L3: ...
2404 we can delete the L2 label if NUSES==1 and re-apply the optimization
2405 starting at the last instruction of block 2. This may allow an entire
2406 if-then-else statement to be conditionalized. BRC */
2407 static rtx
2408 conditionalize_block (rtx first)
2410 rtx insn;
2411 rtx br_pat;
2412 rtx end_blk_1_br = 0;
2413 rtx end_blk_2_insn = 0;
2414 rtx start_blk_3_lab = 0;
2415 int cond;
2416 int br_lab_num;
2417 int blk_size = 0;
2420 /* Check that the first insn is a candidate conditional jump. This is
2421 the one that we'll eliminate. If not, advance to the next insn to
2422 try. */
2423 if (GET_CODE (first) != JUMP_INSN ||
2424 GET_CODE (PATTERN (first)) != SET ||
2425 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2426 return NEXT_INSN (first);
2428 /* Extract some information we need. */
2429 end_blk_1_br = first;
2430 br_pat = PATTERN (end_blk_1_br);
2432 /* Complement the condition since we use the reverse cond. for the insns. */
2433 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2435 /* Determine what kind of branch we have. */
2436 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2438 /* A normal branch, so extract label out of first arm. */
2439 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2441 else
2443 /* An inverse branch, so extract the label out of the 2nd arm
2444 and complement the condition. */
2445 cond = (cond == 0);
2446 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2449 /* Scan forward for the start of block 2: it must start with a
2450 label and that label must be the same as the branch target
2451 label from block 1. We don't care about whether block 2 actually
2452 ends with a branch or a label (an uncond. branch is
2453 conditionalizable). */
2454 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2456 enum rtx_code code;
2458 code = GET_CODE (insn);
2460 /* Look for the label at the start of block 3. */
2461 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2462 break;
2464 /* Skip barriers, notes, and conditionalizable insns. If the
2465 insn is not conditionalizable or makes this optimization fail,
2466 just return the next insn so we can start over from that point. */
2467 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2468 return NEXT_INSN (insn);
2470 /* Remember the last real insn before the label (i.e. end of block 2). */
2471 if (code == JUMP_INSN || code == INSN)
2473 blk_size ++;
2474 end_blk_2_insn = insn;
2478 if (!insn)
2479 return insn;
2481 /* It is possible for this optimization to slow performance if the blocks
2482 are long. This really depends upon whether the branch is likely taken
2483 or not. If the branch is taken, we slow performance in many cases. But,
2484 if the branch is not taken, we always help performance (for a single
2485 block, but for a double block (i.e. when the optimization is re-applied)
2486 this is not true since the 'right thing' depends on the overall length of
2487 the collapsed block). As a compromise, don't apply this optimization on
2488 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2489 the best threshold depends on the latencies of the instructions (i.e.,
2490 the branch penalty). */
2491 if (optimize > 1 && blk_size > 2)
2492 return insn;
2494 /* At this point, we've found the start of block 3 and we know that
2495 it is the destination of the branch from block 1. Also, all
2496 instructions in the block 2 are conditionalizable. So, apply the
2497 conditionalization and delete the branch. */
2498 start_blk_3_lab = insn;
2500 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2501 insn = NEXT_INSN (insn))
2503 rtx newinsn;
2505 if (INSN_DELETED_P (insn))
2506 continue;
2508 /* Try to form a conditional variant of the instruction and emit it. */
2509 if ((newinsn = emit_new_cond_insn (insn, cond)))
2511 if (end_blk_2_insn == insn)
2512 end_blk_2_insn = newinsn;
2514 insn = newinsn;
2518 /* Note whether we will delete the label starting blk 3 when the jump
2519 gets deleted. If so, we want to re-apply this optimization at the
2520 last real instruction right before the label. */
2521 if (LABEL_NUSES (start_blk_3_lab) == 1)
2523 start_blk_3_lab = 0;
2526 /* ??? we probably should redistribute the death notes for this insn, esp.
2527 the death of cc, but it doesn't really matter this late in the game.
2528 The peepholes all use is_dead() which will find the correct death
2529 regardless of whether there is a note. */
2530 delete_insn (end_blk_1_br);
2532 if (! start_blk_3_lab)
2533 return end_blk_2_insn;
2535 /* Return the insn right after the label at the start of block 3. */
2536 return NEXT_INSN (start_blk_3_lab);
2539 /* Apply the conditionalization of blocks optimization. This is the
2540 outer loop that traverses through the insns scanning for a branch
2541 that signifies an opportunity to apply the optimization. Note that
2542 this optimization is applied late. If we could apply it earlier,
2543 say before cse 2, it may expose more optimization opportunities.
2544 but, the pay back probably isn't really worth the effort (we'd have
2545 to update all reg/flow/notes/links/etc to make it work - and stick it
2546 in before cse 2). */
2548 static void
2549 conditionalize_optimization (void)
2551 rtx insn;
2553 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2554 continue;
2557 static int saved_warn_return_type = -1;
2558 static int saved_warn_return_type_count = 0;
2560 /* This is to handle loads from the constant pool. */
2562 static void
2563 mcore_reorg (void)
2565 /* Reset this variable. */
2566 current_function_anonymous_args = 0;
2568 /* Restore the warn_return_type if it has been altered. */
2569 if (saved_warn_return_type != -1)
2571 /* Only restore the value if we have reached another function.
2572 The test of warn_return_type occurs in final_function () in
2573 c-decl.c a long time after the code for the function is generated,
2574 so we need a counter to tell us when we have finished parsing that
2575 function and can restore the flag. */
2576 if (--saved_warn_return_type_count == 0)
2578 warn_return_type = saved_warn_return_type;
2579 saved_warn_return_type = -1;
2583 if (optimize == 0)
2584 return;
2586 /* Conditionalize blocks where we can. */
2587 conditionalize_optimization ();
2589 /* Literal pool generation is now pushed off until the assembler. */
2593 /* Return true if X is something that can be moved directly into r15. */
2595 bool
2596 mcore_r15_operand_p (rtx x)
2598 switch (GET_CODE (x))
2600 case CONST_INT:
2601 return mcore_const_ok_for_inline (INTVAL (x));
2603 case REG:
2604 case SUBREG:
2605 case MEM:
2606 return 1;
2608 default:
2609 return 0;
2613 /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
2614 directly move X into it, use r1-r14 as a temporary. */
2616 enum reg_class
2617 mcore_secondary_reload_class (enum reg_class rclass,
2618 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2620 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2621 && !mcore_r15_operand_p (x))
2622 return LRW_REGS;
2623 return NO_REGS;
2626 /* Return the reg_class to use when reloading the rtx X into the class
2627 RCLASS. If X is too complex to move directly into r15, prefer to
2628 use LRW_REGS instead. */
2630 enum reg_class
2631 mcore_reload_class (rtx x, enum reg_class rclass)
2633 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2634 return LRW_REGS;
2636 return rclass;
2639 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2640 register. Note that the current version doesn't worry about whether
2641 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2642 in r2 matches an SImode in r2. Might think in the future about whether
2643 we want to be able to say something about modes. */
2646 mcore_is_same_reg (rtx x, rtx y)
2648 /* Strip any and all of the subreg wrappers. */
2649 while (GET_CODE (x) == SUBREG)
2650 x = SUBREG_REG (x);
2652 while (GET_CODE (y) == SUBREG)
2653 y = SUBREG_REG (y);
2655 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2656 return 1;
2658 return 0;
2661 void
2662 mcore_override_options (void)
2664 /* Only the m340 supports little endian code. */
2665 if (TARGET_LITTLE_END && ! TARGET_M340)
2666 target_flags |= MASK_M340;
2669 /* Compute the number of word sized registers needed to
2670 hold a function argument of mode MODE and type TYPE. */
2673 mcore_num_arg_regs (enum machine_mode mode, const_tree type)
2675 int size;
2677 if (targetm.calls.must_pass_in_stack (mode, type))
2678 return 0;
2680 if (type && mode == BLKmode)
2681 size = int_size_in_bytes (type);
2682 else
2683 size = GET_MODE_SIZE (mode);
2685 return ROUND_ADVANCE (size);
2688 static rtx
2689 handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
2691 int size;
2693 /* The MCore ABI defines that a structure whose size is not a whole multiple
2694 of bytes is passed packed into registers (or spilled onto the stack if
2695 not enough registers are available) with the last few bytes of the
2696 structure being packed, left-justified, into the last register/stack slot.
2697 GCC handles this correctly if the last word is in a stack slot, but we
2698 have to generate a special, PARALLEL RTX if the last word is in an
2699 argument register. */
2700 if (type
2701 && TYPE_MODE (type) == BLKmode
2702 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2703 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2704 && (size % UNITS_PER_WORD != 0)
2705 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2707 rtx arg_regs [NPARM_REGS];
2708 int nregs;
2709 rtx result;
2710 rtvec rtvec;
2712 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2714 arg_regs [nregs] =
2715 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2716 GEN_INT (nregs * UNITS_PER_WORD));
2717 nregs ++;
2720 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2721 assert (ARRAY_SIZE (arg_regs) == 6);
2722 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2723 arg_regs[3], arg_regs[4], arg_regs[5]);
2725 result = gen_rtx_PARALLEL (mode, rtvec);
2726 return result;
2729 return gen_rtx_REG (mode, reg);
2733 mcore_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
2735 enum machine_mode mode;
2736 int unsigned_p;
2738 mode = TYPE_MODE (valtype);
2740 mode = promote_mode (valtype, mode, &unsigned_p, 1);
2742 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2745 /* Define where to put the arguments to a function.
2746 Value is zero to push the argument on the stack,
2747 or a hard register in which to store the argument.
2749 MODE is the argument's machine mode.
2750 TYPE is the data type of the argument (as a tree).
2751 This is null for libcalls where that information may
2752 not be available.
2753 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2754 the preceding args and about the function being called.
2755 NAMED is nonzero if this argument is a named parameter
2756 (otherwise it is an extra parameter matching an ellipsis).
2758 On MCore the first args are normally in registers
2759 and the rest are pushed. Any arg that starts within the first
2760 NPARM_REGS words is at least partially passed in a register unless
2761 its data type forbids. */
2764 mcore_function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode,
2765 tree type, int named)
2767 int arg_reg;
2769 if (! named || mode == VOIDmode)
2770 return 0;
2772 if (targetm.calls.must_pass_in_stack (mode, type))
2773 return 0;
2775 arg_reg = ROUND_REG (cum, mode);
2777 if (arg_reg < NPARM_REGS)
2778 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2780 return 0;
2783 /* Returns the number of bytes of argument registers required to hold *part*
2784 of a parameter of machine mode MODE and type TYPE (which may be NULL if
2785 the type is not known). If the argument fits entirely in the argument
2786 registers, or entirely on the stack, then 0 is returned. CUM is the
2787 number of argument registers already used by earlier parameters to
2788 the function. */
2790 static int
2791 mcore_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2792 tree type, bool named)
2794 int reg = ROUND_REG (*cum, mode);
2796 if (named == 0)
2797 return 0;
2799 if (targetm.calls.must_pass_in_stack (mode, type))
2800 return 0;
2802 /* REG is not the *hardware* register number of the register that holds
2803 the argument, it is the *argument* register number. So for example,
2804 the first argument to a function goes in argument register 0, which
2805 translates (for the MCore) into hardware register 2. The second
2806 argument goes into argument register 1, which translates into hardware
2807 register 3, and so on. NPARM_REGS is the number of argument registers
2808 supported by the target, not the maximum hardware register number of
2809 the target. */
2810 if (reg >= NPARM_REGS)
2811 return 0;
2813 /* If the argument fits entirely in registers, return 0. */
2814 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2815 return 0;
2817 /* The argument overflows the number of available argument registers.
2818 Compute how many argument registers have not yet been assigned to
2819 hold an argument. */
2820 reg = NPARM_REGS - reg;
2822 /* Return partially in registers and partially on the stack. */
2823 return reg * UNITS_PER_WORD;
2826 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2829 mcore_dllexport_name_p (const char * symbol)
2831 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2834 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2837 mcore_dllimport_name_p (const char * symbol)
2839 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2842 /* Mark a DECL as being dllexport'd. */
2844 static void
2845 mcore_mark_dllexport (tree decl)
2847 const char * oldname;
2848 char * newname;
2849 rtx rtlname;
2850 tree idp;
2852 rtlname = XEXP (DECL_RTL (decl), 0);
2854 if (GET_CODE (rtlname) == MEM)
2855 rtlname = XEXP (rtlname, 0);
2856 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2857 oldname = XSTR (rtlname, 0);
2859 if (mcore_dllexport_name_p (oldname))
2860 return; /* Already done. */
2862 newname = XALLOCAVEC (char, strlen (oldname) + 4);
2863 sprintf (newname, "@e.%s", oldname);
2865 /* We pass newname through get_identifier to ensure it has a unique
2866 address. RTL processing can sometimes peek inside the symbol ref
2867 and compare the string's addresses to see if two symbols are
2868 identical. */
2869 /* ??? At least I think that's why we do this. */
2870 idp = get_identifier (newname);
2872 XEXP (DECL_RTL (decl), 0) =
2873 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2876 /* Mark a DECL as being dllimport'd. */
2878 static void
2879 mcore_mark_dllimport (tree decl)
2881 const char * oldname;
2882 char * newname;
2883 tree idp;
2884 rtx rtlname;
2885 rtx newrtl;
2887 rtlname = XEXP (DECL_RTL (decl), 0);
2889 if (GET_CODE (rtlname) == MEM)
2890 rtlname = XEXP (rtlname, 0);
2891 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2892 oldname = XSTR (rtlname, 0);
2894 gcc_assert (!mcore_dllexport_name_p (oldname));
2895 if (mcore_dllimport_name_p (oldname))
2896 return; /* Already done. */
2898 /* ??? One can well ask why we're making these checks here,
2899 and that would be a good question. */
2901 /* Imported variables can't be initialized. */
2902 if (TREE_CODE (decl) == VAR_DECL
2903 && !DECL_VIRTUAL_P (decl)
2904 && DECL_INITIAL (decl))
2906 error ("initialized variable %q+D is marked dllimport", decl);
2907 return;
2910 /* `extern' needn't be specified with dllimport.
2911 Specify `extern' now and hope for the best. Sigh. */
2912 if (TREE_CODE (decl) == VAR_DECL
2913 /* ??? Is this test for vtables needed? */
2914 && !DECL_VIRTUAL_P (decl))
2916 DECL_EXTERNAL (decl) = 1;
2917 TREE_PUBLIC (decl) = 1;
2920 newname = XALLOCAVEC (char, strlen (oldname) + 11);
2921 sprintf (newname, "@i.__imp_%s", oldname);
2923 /* We pass newname through get_identifier to ensure it has a unique
2924 address. RTL processing can sometimes peek inside the symbol ref
2925 and compare the string's addresses to see if two symbols are
2926 identical. */
2927 /* ??? At least I think that's why we do this. */
2928 idp = get_identifier (newname);
2930 newrtl = gen_rtx_MEM (Pmode,
2931 gen_rtx_SYMBOL_REF (Pmode,
2932 IDENTIFIER_POINTER (idp)));
2933 XEXP (DECL_RTL (decl), 0) = newrtl;
2936 static int
2937 mcore_dllexport_p (tree decl)
2939 if ( TREE_CODE (decl) != VAR_DECL
2940 && TREE_CODE (decl) != FUNCTION_DECL)
2941 return 0;
2943 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2946 static int
2947 mcore_dllimport_p (tree decl)
2949 if ( TREE_CODE (decl) != VAR_DECL
2950 && TREE_CODE (decl) != FUNCTION_DECL)
2951 return 0;
2953 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
2956 /* We must mark dll symbols specially. Definitions of dllexport'd objects
2957 install some info in the .drective (PE) or .exports (ELF) sections. */
2959 static void
2960 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
2962 /* Mark the decl so we can tell from the rtl whether the object is
2963 dllexport'd or dllimport'd. */
2964 if (mcore_dllexport_p (decl))
2965 mcore_mark_dllexport (decl);
2966 else if (mcore_dllimport_p (decl))
2967 mcore_mark_dllimport (decl);
2969 /* It might be that DECL has already been marked as dllimport, but
2970 a subsequent definition nullified that. The attribute is gone
2971 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
2972 else if ((TREE_CODE (decl) == FUNCTION_DECL
2973 || TREE_CODE (decl) == VAR_DECL)
2974 && DECL_RTL (decl) != NULL_RTX
2975 && GET_CODE (DECL_RTL (decl)) == MEM
2976 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
2977 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
2978 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
2980 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
2981 tree idp = get_identifier (oldname + 9);
2982 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2984 XEXP (DECL_RTL (decl), 0) = newrtl;
2986 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
2987 ??? We leave these alone for now. */
2991 /* Undo the effects of the above. */
2993 static const char *
2994 mcore_strip_name_encoding (const char * str)
2996 return str + (str[0] == '@' ? 3 : 0);
2999 /* MCore specific attribute support.
3000 dllexport - for exporting a function/variable that will live in a dll
3001 dllimport - for importing a function/variable from a dll
3002 naked - do not create a function prologue/epilogue. */
3004 /* Handle a "naked" attribute; arguments as in
3005 struct attribute_spec.handler. */
3007 static tree
3008 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3009 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3011 if (TREE_CODE (*node) == FUNCTION_DECL)
3013 /* PR14310 - don't complain about lack of return statement
3014 in naked functions. The solution here is a gross hack
3015 but this is the only way to solve the problem without
3016 adding a new feature to GCC. I did try submitting a patch
3017 that would add such a new feature, but it was (rightfully)
3018 rejected on the grounds that it was creeping featurism,
3019 so hence this code. */
3020 if (warn_return_type)
3022 saved_warn_return_type = warn_return_type;
3023 warn_return_type = 0;
3024 saved_warn_return_type_count = 2;
3026 else if (saved_warn_return_type_count)
3027 saved_warn_return_type_count = 2;
3029 else
3031 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3032 name);
3033 *no_add_attrs = true;
3036 return NULL_TREE;
3039 /* ??? It looks like this is PE specific? Oh well, this is what the
3040 old code did as well. */
3042 static void
3043 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3045 int len;
3046 const char * name;
3047 char * string;
3048 const char * prefix;
3050 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3052 /* Strip off any encoding in name. */
3053 name = (* targetm.strip_name_encoding) (name);
3055 /* The object is put in, for example, section .text$foo.
3056 The linker will then ultimately place them in .text
3057 (everything from the $ on is stripped). */
3058 if (TREE_CODE (decl) == FUNCTION_DECL)
3059 prefix = ".text$";
3060 /* For compatibility with EPOC, we ignore the fact that the
3061 section might have relocs against it. */
3062 else if (decl_readonly_section (decl, 0))
3063 prefix = ".rdata$";
3064 else
3065 prefix = ".data$";
3067 len = strlen (name) + strlen (prefix);
3068 string = XALLOCAVEC (char, len + 1);
3070 sprintf (string, "%s%s", prefix, name);
3072 DECL_SECTION_NAME (decl) = build_string (len, string);
3076 mcore_naked_function_p (void)
3078 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3081 #ifdef OBJECT_FORMAT_ELF
3082 static void
3083 mcore_asm_named_section (const char *name,
3084 unsigned int flags ATTRIBUTE_UNUSED,
3085 tree decl ATTRIBUTE_UNUSED)
3087 fprintf (asm_out_file, "\t.section %s\n", name);
3089 #endif /* OBJECT_FORMAT_ELF */
3091 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3093 static void
3094 mcore_external_libcall (rtx fun)
3096 fprintf (asm_out_file, "\t.import\t");
3097 assemble_name (asm_out_file, XSTR (fun, 0));
3098 fprintf (asm_out_file, "\n");
3101 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3103 static bool
3104 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3106 const HOST_WIDE_INT size = int_size_in_bytes (type);
3107 return (size == -1 || size > 2 * UNITS_PER_WORD);