* target-def.h (TARGET_HAVE_NAMED_SECTIONS): Move to
[official-gcc.git] / gcc / config / mcore / mcore.c
blobff6e6804c6c1ea8a9043606be93785e2de94b20c
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3 2009, 2010, 2011 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "mcore.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "flags.h"
36 #include "obstack.h"
37 #include "expr.h"
38 #include "reload.h"
39 #include "recog.h"
40 #include "function.h"
41 #include "ggc.h"
42 #include "diagnostic-core.h"
43 #include "target.h"
44 #include "target-def.h"
45 #include "df.h"
47 /* For dumping information about frame sizes. */
48 char * mcore_current_function_name = 0;
49 long mcore_current_compilation_timestamp = 0;
51 /* Global variables for machine-dependent things. */
53 /* Provides the class number of the smallest class containing
54 reg number. */
55 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
57 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
58 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
59 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
60 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
61 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
64 struct mcore_frame
66 int arg_size; /* Stdarg spills (bytes). */
67 int reg_size; /* Non-volatile reg saves (bytes). */
68 int reg_mask; /* Non-volatile reg saves. */
69 int local_size; /* Locals. */
70 int outbound_size; /* Arg overflow on calls out. */
71 int pad_outbound;
72 int pad_local;
73 int pad_reg;
74 /* Describe the steps we'll use to grow it. */
75 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
76 int growth[MAX_STACK_GROWS];
77 int arg_offset;
78 int reg_offset;
79 int reg_growth;
80 int local_growth;
83 typedef enum
85 COND_NO,
86 COND_MOV_INSN,
87 COND_CLR_INSN,
88 COND_INC_INSN,
89 COND_DEC_INSN,
90 COND_BRANCH_INSN
92 cond_type;
94 static void output_stack_adjust (int, int);
95 static int calc_live_regs (int *);
96 static int try_constant_tricks (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
97 static const char * output_inline_const (enum machine_mode, rtx *);
98 static void layout_mcore_frame (struct mcore_frame *);
99 static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
100 static cond_type is_cond_candidate (rtx);
101 static rtx emit_new_cond_insn (rtx, int);
102 static rtx conditionalize_block (rtx);
103 static void conditionalize_optimization (void);
104 static void mcore_reorg (void);
105 static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
106 static void mcore_mark_dllexport (tree);
107 static void mcore_mark_dllimport (tree);
108 static int mcore_dllexport_p (tree);
109 static int mcore_dllimport_p (tree);
110 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
111 #ifdef OBJECT_FORMAT_ELF
112 static void mcore_asm_named_section (const char *,
113 unsigned int, tree);
114 #endif
115 static void mcore_print_operand (FILE *, rtx, int);
116 static void mcore_print_operand_address (FILE *, rtx);
117 static bool mcore_print_operand_punct_valid_p (unsigned char code);
118 static void mcore_unique_section (tree, int);
119 static void mcore_encode_section_info (tree, rtx, int);
120 static const char *mcore_strip_name_encoding (const char *);
121 static int mcore_const_costs (rtx, RTX_CODE);
122 static int mcore_and_cost (rtx);
123 static int mcore_ior_cost (rtx);
124 static bool mcore_rtx_costs (rtx, int, int, int *, bool);
125 static void mcore_external_libcall (rtx);
126 static bool mcore_return_in_memory (const_tree, const_tree);
127 static int mcore_arg_partial_bytes (CUMULATIVE_ARGS *,
128 enum machine_mode,
129 tree, bool);
130 static rtx mcore_function_arg (CUMULATIVE_ARGS *,
131 enum machine_mode,
132 const_tree, bool);
133 static void mcore_function_arg_advance (CUMULATIVE_ARGS *,
134 enum machine_mode,
135 const_tree, bool);
136 static unsigned int mcore_function_arg_boundary (enum machine_mode,
137 const_tree);
138 static void mcore_asm_trampoline_template (FILE *);
139 static void mcore_trampoline_init (rtx, tree, rtx);
140 static void mcore_option_override (void);
141 static bool mcore_legitimate_constant_p (enum machine_mode, rtx);
143 /* MCore specific attributes. */
145 static const struct attribute_spec mcore_attribute_table[] =
147 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
148 affects_type_identity } */
149 { "dllexport", 0, 0, true, false, false, NULL, false },
150 { "dllimport", 0, 0, true, false, false, NULL, false },
151 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute,
152 false },
153 { NULL, 0, 0, false, false, false, NULL, false }
156 /* Initialize the GCC target structure. */
157 #undef TARGET_ASM_EXTERNAL_LIBCALL
158 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
160 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
161 #undef TARGET_MERGE_DECL_ATTRIBUTES
162 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
163 #endif
165 #ifdef OBJECT_FORMAT_ELF
166 #undef TARGET_ASM_UNALIGNED_HI_OP
167 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
168 #undef TARGET_ASM_UNALIGNED_SI_OP
169 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
170 #endif
172 #undef TARGET_PRINT_OPERAND
173 #define TARGET_PRINT_OPERAND mcore_print_operand
174 #undef TARGET_PRINT_OPERAND_ADDRESS
175 #define TARGET_PRINT_OPERAND_ADDRESS mcore_print_operand_address
176 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
177 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
179 #undef TARGET_ATTRIBUTE_TABLE
180 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
181 #undef TARGET_ASM_UNIQUE_SECTION
182 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
183 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
184 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
185 #undef TARGET_ENCODE_SECTION_INFO
186 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
187 #undef TARGET_STRIP_NAME_ENCODING
188 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
189 #undef TARGET_RTX_COSTS
190 #define TARGET_RTX_COSTS mcore_rtx_costs
191 #undef TARGET_ADDRESS_COST
192 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
193 #undef TARGET_MACHINE_DEPENDENT_REORG
194 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
196 #undef TARGET_PROMOTE_FUNCTION_MODE
197 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
198 #undef TARGET_PROMOTE_PROTOTYPES
199 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
201 #undef TARGET_RETURN_IN_MEMORY
202 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
203 #undef TARGET_MUST_PASS_IN_STACK
204 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
205 #undef TARGET_PASS_BY_REFERENCE
206 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
207 #undef TARGET_ARG_PARTIAL_BYTES
208 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
209 #undef TARGET_FUNCTION_ARG
210 #define TARGET_FUNCTION_ARG mcore_function_arg
211 #undef TARGET_FUNCTION_ARG_ADVANCE
212 #define TARGET_FUNCTION_ARG_ADVANCE mcore_function_arg_advance
213 #undef TARGET_FUNCTION_ARG_BOUNDARY
214 #define TARGET_FUNCTION_ARG_BOUNDARY mcore_function_arg_boundary
216 #undef TARGET_SETUP_INCOMING_VARARGS
217 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
219 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
220 #define TARGET_ASM_TRAMPOLINE_TEMPLATE mcore_asm_trampoline_template
221 #undef TARGET_TRAMPOLINE_INIT
222 #define TARGET_TRAMPOLINE_INIT mcore_trampoline_init
224 #undef TARGET_OPTION_OVERRIDE
225 #define TARGET_OPTION_OVERRIDE mcore_option_override
227 #undef TARGET_LEGITIMATE_CONSTANT_P
228 #define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
230 struct gcc_target targetm = TARGET_INITIALIZER;
232 /* Adjust the stack and return the number of bytes taken to do it. */
233 static void
234 output_stack_adjust (int direction, int size)
236 /* If extending stack a lot, we do it incrementally. */
237 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
239 rtx tmp = gen_rtx_REG (SImode, 1);
240 rtx memref;
242 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
245 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
246 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
247 MEM_VOLATILE_P (memref) = 1;
248 emit_insn (gen_movsi (memref, stack_pointer_rtx));
249 size -= mcore_stack_increment;
251 while (size > mcore_stack_increment);
253 /* SIZE is now the residual for the last adjustment,
254 which doesn't require a probe. */
257 if (size)
259 rtx insn;
260 rtx val = GEN_INT (size);
262 if (size > 32)
264 rtx nval = gen_rtx_REG (SImode, 1);
265 emit_insn (gen_movsi (nval, val));
266 val = nval;
269 if (direction > 0)
270 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
271 else
272 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
274 emit_insn (insn);
278 /* Work out the registers which need to be saved,
279 both as a mask and a count. */
281 static int
282 calc_live_regs (int * count)
284 int reg;
285 int live_regs_mask = 0;
287 * count = 0;
289 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
291 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
293 (*count)++;
294 live_regs_mask |= (1 << reg);
298 return live_regs_mask;
301 /* Print the operand address in x to the stream. */
303 static void
304 mcore_print_operand_address (FILE * stream, rtx x)
306 switch (GET_CODE (x))
308 case REG:
309 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
310 break;
312 case PLUS:
314 rtx base = XEXP (x, 0);
315 rtx index = XEXP (x, 1);
317 if (GET_CODE (base) != REG)
319 /* Ensure that BASE is a register (one of them must be). */
320 rtx temp = base;
321 base = index;
322 index = temp;
325 switch (GET_CODE (index))
327 case CONST_INT:
328 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
329 reg_names[REGNO(base)], INTVAL (index));
330 break;
332 default:
333 gcc_unreachable ();
337 break;
339 default:
340 output_addr_const (stream, x);
341 break;
345 static bool
346 mcore_print_operand_punct_valid_p (unsigned char code)
348 return (code == '.' || code == '#' || code == '*' || code == '^'
349 || code == '!');
352 /* Print operand x (an rtx) in assembler syntax to file stream
353 according to modifier code.
355 'R' print the next register or memory location along, i.e. the lsw in
356 a double word value
357 'O' print a constant without the #
358 'M' print a constant as its negative
359 'P' print log2 of a power of two
360 'Q' print log2 of an inverse of a power of two
361 'U' print register for ldm/stm instruction
362 'X' print byte number for xtrbN instruction. */
364 static void
365 mcore_print_operand (FILE * stream, rtx x, int code)
367 switch (code)
369 case 'N':
370 if (INTVAL(x) == -1)
371 fprintf (asm_out_file, "32");
372 else
373 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
374 break;
375 case 'P':
376 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
377 break;
378 case 'Q':
379 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
380 break;
381 case 'O':
382 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
383 break;
384 case 'M':
385 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
386 break;
387 case 'R':
388 /* Next location along in memory or register. */
389 switch (GET_CODE (x))
391 case REG:
392 fputs (reg_names[REGNO (x) + 1], (stream));
393 break;
394 case MEM:
395 mcore_print_operand_address
396 (stream, XEXP (adjust_address (x, SImode, 4), 0));
397 break;
398 default:
399 gcc_unreachable ();
401 break;
402 case 'U':
403 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
404 reg_names[REGNO (x) + 3]);
405 break;
406 case 'x':
407 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
408 break;
409 case 'X':
410 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
411 break;
413 default:
414 switch (GET_CODE (x))
416 case REG:
417 fputs (reg_names[REGNO (x)], (stream));
418 break;
419 case MEM:
420 output_address (XEXP (x, 0));
421 break;
422 default:
423 output_addr_const (stream, x);
424 break;
426 break;
430 /* What does a constant cost ? */
432 static int
433 mcore_const_costs (rtx exp, enum rtx_code code)
435 HOST_WIDE_INT val = INTVAL (exp);
437 /* Easy constants. */
438 if ( CONST_OK_FOR_I (val)
439 || CONST_OK_FOR_M (val)
440 || CONST_OK_FOR_N (val)
441 || (code == PLUS && CONST_OK_FOR_L (val)))
442 return 1;
443 else if (code == AND
444 && ( CONST_OK_FOR_M (~val)
445 || CONST_OK_FOR_N (~val)))
446 return 2;
447 else if (code == PLUS
448 && ( CONST_OK_FOR_I (-val)
449 || CONST_OK_FOR_M (-val)
450 || CONST_OK_FOR_N (-val)))
451 return 2;
453 return 5;
456 /* What does an and instruction cost - we do this b/c immediates may
457 have been relaxed. We want to ensure that cse will cse relaxed immeds
458 out. Otherwise we'll get bad code (multiple reloads of the same const). */
460 static int
461 mcore_and_cost (rtx x)
463 HOST_WIDE_INT val;
465 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
466 return 2;
468 val = INTVAL (XEXP (x, 1));
470 /* Do it directly. */
471 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
472 return 2;
473 /* Takes one instruction to load. */
474 else if (const_ok_for_mcore (val))
475 return 3;
476 /* Takes two instructions to load. */
477 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
478 return 4;
480 /* Takes a lrw to load. */
481 return 5;
484 /* What does an or cost - see and_cost(). */
486 static int
487 mcore_ior_cost (rtx x)
489 HOST_WIDE_INT val;
491 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
492 return 2;
494 val = INTVAL (XEXP (x, 1));
496 /* Do it directly with bclri. */
497 if (CONST_OK_FOR_M (val))
498 return 2;
499 /* Takes one instruction to load. */
500 else if (const_ok_for_mcore (val))
501 return 3;
502 /* Takes two instructions to load. */
503 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
504 return 4;
506 /* Takes a lrw to load. */
507 return 5;
510 static bool
511 mcore_rtx_costs (rtx x, int code, int outer_code, int * total,
512 bool speed ATTRIBUTE_UNUSED)
514 switch (code)
516 case CONST_INT:
517 *total = mcore_const_costs (x, (enum rtx_code) outer_code);
518 return true;
519 case CONST:
520 case LABEL_REF:
521 case SYMBOL_REF:
522 *total = 5;
523 return true;
524 case CONST_DOUBLE:
525 *total = 10;
526 return true;
528 case AND:
529 *total = COSTS_N_INSNS (mcore_and_cost (x));
530 return true;
532 case IOR:
533 *total = COSTS_N_INSNS (mcore_ior_cost (x));
534 return true;
536 case DIV:
537 case UDIV:
538 case MOD:
539 case UMOD:
540 case FLOAT:
541 case FIX:
542 *total = COSTS_N_INSNS (100);
543 return true;
545 default:
546 return false;
550 /* Prepare the operands for a comparison. Return whether the branch/setcc
551 should reverse the operands. */
553 bool
554 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
556 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
557 bool invert;
559 if (GET_CODE (op1) == CONST_INT)
561 HOST_WIDE_INT val = INTVAL (op1);
563 switch (code)
565 case GTU:
566 /* Unsigned > 0 is the same as != 0; everything else is converted
567 below to LEU (reversed cmphs). */
568 if (val == 0)
569 code = NE;
570 break;
572 /* Check whether (LE A imm) can become (LT A imm + 1),
573 or (GT A imm) can become (GE A imm + 1). */
574 case GT:
575 case LE:
576 if (CONST_OK_FOR_J (val + 1))
578 op1 = GEN_INT (val + 1);
579 code = code == LE ? LT : GE;
581 break;
583 default:
584 break;
588 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
589 op1 = force_reg (SImode, op1);
591 /* cmpnei: 0-31 (K immediate)
592 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
593 invert = false;
594 switch (code)
596 case EQ: /* Use inverted condition, cmpne. */
597 code = NE;
598 invert = true;
599 /* Drop through. */
601 case NE: /* Use normal condition, cmpne. */
602 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
603 op1 = force_reg (SImode, op1);
604 break;
606 case LE: /* Use inverted condition, reversed cmplt. */
607 code = GT;
608 invert = true;
609 /* Drop through. */
611 case GT: /* Use normal condition, reversed cmplt. */
612 if (GET_CODE (op1) == CONST_INT)
613 op1 = force_reg (SImode, op1);
614 break;
616 case GE: /* Use inverted condition, cmplt. */
617 code = LT;
618 invert = true;
619 /* Drop through. */
621 case LT: /* Use normal condition, cmplt. */
622 if (GET_CODE (op1) == CONST_INT &&
623 /* covered by btsti x,31. */
624 INTVAL (op1) != 0 &&
625 ! CONST_OK_FOR_J (INTVAL (op1)))
626 op1 = force_reg (SImode, op1);
627 break;
629 case GTU: /* Use inverted condition, cmple. */
630 /* We coped with unsigned > 0 above. */
631 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
632 code = LEU;
633 invert = true;
634 /* Drop through. */
636 case LEU: /* Use normal condition, reversed cmphs. */
637 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
638 op1 = force_reg (SImode, op1);
639 break;
641 case LTU: /* Use inverted condition, cmphs. */
642 code = GEU;
643 invert = true;
644 /* Drop through. */
646 case GEU: /* Use normal condition, cmphs. */
647 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
648 op1 = force_reg (SImode, op1);
649 break;
651 default:
652 break;
655 emit_insn (gen_rtx_SET (VOIDmode,
656 cc_reg,
657 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
658 return invert;
662 mcore_symbolic_address_p (rtx x)
664 switch (GET_CODE (x))
666 case SYMBOL_REF:
667 case LABEL_REF:
668 return 1;
669 case CONST:
670 x = XEXP (x, 0);
671 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
672 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
673 && GET_CODE (XEXP (x, 1)) == CONST_INT);
674 default:
675 return 0;
679 /* Functions to output assembly code for a function call. */
681 char *
682 mcore_output_call (rtx operands[], int index)
684 static char buffer[20];
685 rtx addr = operands [index];
687 if (REG_P (addr))
689 if (TARGET_CG_DATA)
691 gcc_assert (mcore_current_function_name);
693 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
694 "unknown", 1);
697 sprintf (buffer, "jsr\t%%%d", index);
699 else
701 if (TARGET_CG_DATA)
703 gcc_assert (mcore_current_function_name);
704 gcc_assert (GET_CODE (addr) == SYMBOL_REF);
706 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
707 XSTR (addr, 0), 0);
710 sprintf (buffer, "jbsr\t%%%d", index);
713 return buffer;
716 /* Can we load a constant with a single instruction ? */
719 const_ok_for_mcore (HOST_WIDE_INT value)
721 if (value >= 0 && value <= 127)
722 return 1;
724 /* Try exact power of two. */
725 if (CONST_OK_FOR_M (value))
726 return 1;
728 /* Try exact power of two - 1. */
729 if (CONST_OK_FOR_N (value) && value != -1)
730 return 1;
732 return 0;
735 /* Can we load a constant inline with up to 2 instructions ? */
738 mcore_const_ok_for_inline (HOST_WIDE_INT value)
740 HOST_WIDE_INT x, y;
742 return try_constant_tricks (value, & x, & y) > 0;
745 /* Are we loading the constant using a not ? */
748 mcore_const_trick_uses_not (HOST_WIDE_INT value)
750 HOST_WIDE_INT x, y;
752 return try_constant_tricks (value, & x, & y) == 2;
755 /* Try tricks to load a constant inline and return the trick number if
756 success (0 is non-inlinable).
758 0: not inlinable
759 1: single instruction (do the usual thing)
760 2: single insn followed by a 'not'
761 3: single insn followed by a subi
762 4: single insn followed by an addi
763 5: single insn followed by rsubi
764 6: single insn followed by bseti
765 7: single insn followed by bclri
766 8: single insn followed by rotli
767 9: single insn followed by lsli
768 10: single insn followed by ixh
769 11: single insn followed by ixw. */
771 static int
772 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
774 HOST_WIDE_INT i;
775 unsigned HOST_WIDE_INT bit, shf, rot;
777 if (const_ok_for_mcore (value))
778 return 1; /* Do the usual thing. */
780 if (! TARGET_HARDLIT)
781 return 0;
783 if (const_ok_for_mcore (~value))
785 *x = ~value;
786 return 2;
789 for (i = 1; i <= 32; i++)
791 if (const_ok_for_mcore (value - i))
793 *x = value - i;
794 *y = i;
796 return 3;
799 if (const_ok_for_mcore (value + i))
801 *x = value + i;
802 *y = i;
804 return 4;
808 bit = 0x80000000ULL;
810 for (i = 0; i <= 31; i++)
812 if (const_ok_for_mcore (i - value))
814 *x = i - value;
815 *y = i;
817 return 5;
820 if (const_ok_for_mcore (value & ~bit))
822 *y = bit;
823 *x = value & ~bit;
824 return 6;
827 if (const_ok_for_mcore (value | bit))
829 *y = ~bit;
830 *x = value | bit;
832 return 7;
835 bit >>= 1;
838 shf = value;
839 rot = value;
841 for (i = 1; i < 31; i++)
843 int c;
845 /* MCore has rotate left. */
846 c = rot << 31;
847 rot >>= 1;
848 rot &= 0x7FFFFFFF;
849 rot |= c; /* Simulate rotate. */
851 if (const_ok_for_mcore (rot))
853 *y = i;
854 *x = rot;
856 return 8;
859 if (shf & 1)
860 shf = 0; /* Can't use logical shift, low order bit is one. */
862 shf >>= 1;
864 if (shf != 0 && const_ok_for_mcore (shf))
866 *y = i;
867 *x = shf;
869 return 9;
873 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
875 *x = value / 3;
877 return 10;
880 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
882 *x = value / 5;
884 return 11;
887 return 0;
890 /* Check whether reg is dead at first. This is done by searching ahead
891 for either the next use (i.e., reg is live), a death note, or a set of
892 reg. Don't just use dead_or_set_p() since reload does not always mark
893 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
894 can ignore subregs by extracting the actual register. BRC */
897 mcore_is_dead (rtx first, rtx reg)
899 rtx insn;
901 /* For mcore, subregs can't live independently of their parent regs. */
902 if (GET_CODE (reg) == SUBREG)
903 reg = SUBREG_REG (reg);
905 /* Dies immediately. */
906 if (dead_or_set_p (first, reg))
907 return 1;
909 /* Look for conclusive evidence of live/death, otherwise we have
910 to assume that it is live. */
911 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
913 if (GET_CODE (insn) == JUMP_INSN)
914 return 0; /* We lose track, assume it is alive. */
916 else if (GET_CODE(insn) == CALL_INSN)
918 /* Call's might use it for target or register parms. */
919 if (reg_referenced_p (reg, PATTERN (insn))
920 || find_reg_fusage (insn, USE, reg))
921 return 0;
922 else if (dead_or_set_p (insn, reg))
923 return 1;
925 else if (GET_CODE (insn) == INSN)
927 if (reg_referenced_p (reg, PATTERN (insn)))
928 return 0;
929 else if (dead_or_set_p (insn, reg))
930 return 1;
934 /* No conclusive evidence either way, we cannot take the chance
935 that control flow hid the use from us -- "I'm not dead yet". */
936 return 0;
939 /* Count the number of ones in mask. */
942 mcore_num_ones (HOST_WIDE_INT mask)
944 /* A trick to count set bits recently posted on comp.compilers. */
945 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
946 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
947 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
948 mask = ((mask >> 8) + mask);
950 return (mask + (mask >> 16)) & 0xff;
953 /* Count the number of zeros in mask. */
956 mcore_num_zeros (HOST_WIDE_INT mask)
958 return 32 - mcore_num_ones (mask);
961 /* Determine byte being masked. */
964 mcore_byte_offset (unsigned int mask)
966 if (mask == 0x00ffffffL)
967 return 0;
968 else if (mask == 0xff00ffffL)
969 return 1;
970 else if (mask == 0xffff00ffL)
971 return 2;
972 else if (mask == 0xffffff00L)
973 return 3;
975 return -1;
978 /* Determine halfword being masked. */
981 mcore_halfword_offset (unsigned int mask)
983 if (mask == 0x0000ffffL)
984 return 0;
985 else if (mask == 0xffff0000L)
986 return 1;
988 return -1;
991 /* Output a series of bseti's corresponding to mask. */
993 const char *
994 mcore_output_bseti (rtx dst, int mask)
996 rtx out_operands[2];
997 int bit;
999 out_operands[0] = dst;
1001 for (bit = 0; bit < 32; bit++)
1003 if ((mask & 0x1) == 0x1)
1005 out_operands[1] = GEN_INT (bit);
1007 output_asm_insn ("bseti\t%0,%1", out_operands);
1009 mask >>= 1;
1012 return "";
1015 /* Output a series of bclri's corresponding to mask. */
1017 const char *
1018 mcore_output_bclri (rtx dst, int mask)
1020 rtx out_operands[2];
1021 int bit;
1023 out_operands[0] = dst;
1025 for (bit = 0; bit < 32; bit++)
1027 if ((mask & 0x1) == 0x0)
1029 out_operands[1] = GEN_INT (bit);
1031 output_asm_insn ("bclri\t%0,%1", out_operands);
1034 mask >>= 1;
1037 return "";
1040 /* Output a conditional move of two constants that are +/- 1 within each
1041 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1042 really worth the effort. */
1044 const char *
1045 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1047 HOST_WIDE_INT load_value;
1048 HOST_WIDE_INT adjust_value;
1049 rtx out_operands[4];
1051 out_operands[0] = operands[0];
1053 /* Check to see which constant is loadable. */
1054 if (const_ok_for_mcore (INTVAL (operands[1])))
1056 out_operands[1] = operands[1];
1057 out_operands[2] = operands[2];
1059 else if (const_ok_for_mcore (INTVAL (operands[2])))
1061 out_operands[1] = operands[2];
1062 out_operands[2] = operands[1];
1064 /* Complement test since constants are swapped. */
1065 cmp_t = (cmp_t == 0);
1067 load_value = INTVAL (out_operands[1]);
1068 adjust_value = INTVAL (out_operands[2]);
1070 /* First output the test if folded into the pattern. */
1072 if (test)
1073 output_asm_insn (test, operands);
1075 /* Load the constant - for now, only support constants that can be
1076 generated with a single instruction. maybe add general inlinable
1077 constants later (this will increase the # of patterns since the
1078 instruction sequence has a different length attribute). */
1079 if (load_value >= 0 && load_value <= 127)
1080 output_asm_insn ("movi\t%0,%1", out_operands);
1081 else if (CONST_OK_FOR_M (load_value))
1082 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1083 else if (CONST_OK_FOR_N (load_value))
1084 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1086 /* Output the constant adjustment. */
1087 if (load_value > adjust_value)
1089 if (cmp_t)
1090 output_asm_insn ("decf\t%0", out_operands);
1091 else
1092 output_asm_insn ("dect\t%0", out_operands);
1094 else
1096 if (cmp_t)
1097 output_asm_insn ("incf\t%0", out_operands);
1098 else
1099 output_asm_insn ("inct\t%0", out_operands);
1102 return "";
1105 /* Outputs the peephole for moving a constant that gets not'ed followed
1106 by an and (i.e. combine the not and the and into andn). BRC */
1108 const char *
1109 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1111 HOST_WIDE_INT x, y;
1112 rtx out_operands[3];
1113 const char * load_op;
1114 char buf[256];
1115 int trick_no;
1117 trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1118 gcc_assert (trick_no == 2);
1120 out_operands[0] = operands[0];
1121 out_operands[1] = GEN_INT (x);
1122 out_operands[2] = operands[2];
1124 if (x >= 0 && x <= 127)
1125 load_op = "movi\t%0,%1";
1127 /* Try exact power of two. */
1128 else if (CONST_OK_FOR_M (x))
1129 load_op = "bgeni\t%0,%P1";
1131 /* Try exact power of two - 1. */
1132 else if (CONST_OK_FOR_N (x))
1133 load_op = "bmaski\t%0,%N1";
1135 else
1137 load_op = "BADMOVI-andn\t%0, %1";
1138 gcc_unreachable ();
1141 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1142 output_asm_insn (buf, out_operands);
1144 return "";
1147 /* Output an inline constant. */
1149 static const char *
1150 output_inline_const (enum machine_mode mode, rtx operands[])
1152 HOST_WIDE_INT x = 0, y = 0;
1153 int trick_no;
1154 rtx out_operands[3];
1155 char buf[256];
1156 char load_op[256];
1157 const char *dst_fmt;
1158 HOST_WIDE_INT value;
1160 value = INTVAL (operands[1]);
1162 trick_no = try_constant_tricks (value, &x, &y);
1163 /* lrw's are handled separately: Large inlinable constants never get
1164 turned into lrw's. Our caller uses try_constant_tricks to back
1165 off to an lrw rather than calling this routine. */
1166 gcc_assert (trick_no != 0);
1168 if (trick_no == 1)
1169 x = value;
1171 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1172 out_operands[0] = operands[0];
1173 out_operands[1] = GEN_INT (x);
1175 if (trick_no > 2)
1176 out_operands[2] = GEN_INT (y);
1178 /* Select dst format based on mode. */
1179 if (mode == DImode && (! TARGET_LITTLE_END))
1180 dst_fmt = "%R0";
1181 else
1182 dst_fmt = "%0";
1184 if (x >= 0 && x <= 127)
1185 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1187 /* Try exact power of two. */
1188 else if (CONST_OK_FOR_M (x))
1189 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1191 /* Try exact power of two - 1. */
1192 else if (CONST_OK_FOR_N (x))
1193 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1195 else
1197 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1198 gcc_unreachable ();
1201 switch (trick_no)
1203 case 1:
1204 strcpy (buf, load_op);
1205 break;
1206 case 2: /* not */
1207 sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1208 break;
1209 case 3: /* add */
1210 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1211 break;
1212 case 4: /* sub */
1213 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1214 break;
1215 case 5: /* rsub */
1216 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1217 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1218 break;
1219 case 6: /* bseti */
1220 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1221 break;
1222 case 7: /* bclr */
1223 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1224 break;
1225 case 8: /* rotl */
1226 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1227 break;
1228 case 9: /* lsl */
1229 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1230 break;
1231 case 10: /* ixh */
1232 sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1233 break;
1234 case 11: /* ixw */
1235 sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1236 break;
1237 default:
1238 return "";
1241 output_asm_insn (buf, out_operands);
1243 return "";
1246 /* Output a move of a word or less value. */
1248 const char *
1249 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1250 enum machine_mode mode ATTRIBUTE_UNUSED)
1252 rtx dst = operands[0];
1253 rtx src = operands[1];
1255 if (GET_CODE (dst) == REG)
1257 if (GET_CODE (src) == REG)
1259 if (REGNO (src) == CC_REG) /* r-c */
1260 return "mvc\t%0";
1261 else
1262 return "mov\t%0,%1"; /* r-r*/
1264 else if (GET_CODE (src) == MEM)
1266 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1267 return "lrw\t%0,[%1]"; /* a-R */
1268 else
1269 switch (GET_MODE (src)) /* r-m */
1271 case SImode:
1272 return "ldw\t%0,%1";
1273 case HImode:
1274 return "ld.h\t%0,%1";
1275 case QImode:
1276 return "ld.b\t%0,%1";
1277 default:
1278 gcc_unreachable ();
1281 else if (GET_CODE (src) == CONST_INT)
1283 HOST_WIDE_INT x, y;
1285 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1286 return "movi\t%0,%1";
1287 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1288 return "bgeni\t%0,%P1\t// %1 %x1";
1289 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1290 return "bmaski\t%0,%N1\t// %1 %x1";
1291 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1292 return output_inline_const (SImode, operands); /* 1-2 insns */
1293 else
1294 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1296 else
1297 return "lrw\t%0, %1"; /* Into the literal pool. */
1299 else if (GET_CODE (dst) == MEM) /* m-r */
1300 switch (GET_MODE (dst))
1302 case SImode:
1303 return "stw\t%1,%0";
1304 case HImode:
1305 return "st.h\t%1,%0";
1306 case QImode:
1307 return "st.b\t%1,%0";
1308 default:
1309 gcc_unreachable ();
1312 gcc_unreachable ();
1315 /* Return a sequence of instructions to perform DI or DF move.
1316 Since the MCORE cannot move a DI or DF in one instruction, we have
1317 to take care when we see overlapping source and dest registers. */
1319 const char *
1320 mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
1322 rtx dst = operands[0];
1323 rtx src = operands[1];
1325 if (GET_CODE (dst) == REG)
1327 if (GET_CODE (src) == REG)
1329 int dstreg = REGNO (dst);
1330 int srcreg = REGNO (src);
1332 /* Ensure the second source not overwritten. */
1333 if (srcreg + 1 == dstreg)
1334 return "mov %R0,%R1\n\tmov %0,%1";
1335 else
1336 return "mov %0,%1\n\tmov %R0,%R1";
1338 else if (GET_CODE (src) == MEM)
1340 rtx memexp = memexp = XEXP (src, 0);
1341 int dstreg = REGNO (dst);
1342 int basereg = -1;
1344 if (GET_CODE (memexp) == LABEL_REF)
1345 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1346 else if (GET_CODE (memexp) == REG)
1347 basereg = REGNO (memexp);
1348 else if (GET_CODE (memexp) == PLUS)
1350 if (GET_CODE (XEXP (memexp, 0)) == REG)
1351 basereg = REGNO (XEXP (memexp, 0));
1352 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1353 basereg = REGNO (XEXP (memexp, 1));
1354 else
1355 gcc_unreachable ();
1357 else
1358 gcc_unreachable ();
1360 /* ??? length attribute is wrong here. */
1361 if (dstreg == basereg)
1363 /* Just load them in reverse order. */
1364 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1366 /* XXX: alternative: move basereg to basereg+1
1367 and then fall through. */
1369 else
1370 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1372 else if (GET_CODE (src) == CONST_INT)
1374 if (TARGET_LITTLE_END)
1376 if (CONST_OK_FOR_I (INTVAL (src)))
1377 output_asm_insn ("movi %0,%1", operands);
1378 else if (CONST_OK_FOR_M (INTVAL (src)))
1379 output_asm_insn ("bgeni %0,%P1", operands);
1380 else if (CONST_OK_FOR_N (INTVAL (src)))
1381 output_asm_insn ("bmaski %0,%N1", operands);
1382 else
1383 gcc_unreachable ();
1385 if (INTVAL (src) < 0)
1386 return "bmaski %R0,32";
1387 else
1388 return "movi %R0,0";
1390 else
1392 if (CONST_OK_FOR_I (INTVAL (src)))
1393 output_asm_insn ("movi %R0,%1", operands);
1394 else if (CONST_OK_FOR_M (INTVAL (src)))
1395 output_asm_insn ("bgeni %R0,%P1", operands);
1396 else if (CONST_OK_FOR_N (INTVAL (src)))
1397 output_asm_insn ("bmaski %R0,%N1", operands);
1398 else
1399 gcc_unreachable ();
1401 if (INTVAL (src) < 0)
1402 return "bmaski %0,32";
1403 else
1404 return "movi %0,0";
1407 else
1408 gcc_unreachable ();
1410 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1411 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1412 else
1413 gcc_unreachable ();
1416 /* Predicates used by the templates. */
1419 mcore_arith_S_operand (rtx op)
1421 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1422 return 1;
1424 return 0;
1427 /* Expand insert bit field. BRC */
1430 mcore_expand_insv (rtx operands[])
1432 int width = INTVAL (operands[1]);
1433 int posn = INTVAL (operands[2]);
1434 int mask;
1435 rtx mreg, sreg, ereg;
1437 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1438 for width==1 must be removed. Look around line 368. This is something
1439 we really want the md part to do. */
1440 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1442 /* Do directly with bseti or bclri. */
1443 /* RBE: 2/97 consider only low bit of constant. */
1444 if ((INTVAL (operands[3]) & 1) == 0)
1446 mask = ~(1 << posn);
1447 emit_insn (gen_rtx_SET (SImode, operands[0],
1448 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
1450 else
1452 mask = 1 << posn;
1453 emit_insn (gen_rtx_SET (SImode, operands[0],
1454 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
1457 return 1;
1460 /* Look at some bit-field placements that we aren't interested
1461 in handling ourselves, unless specifically directed to do so. */
1462 if (! TARGET_W_FIELD)
1463 return 0; /* Generally, give up about now. */
1465 if (width == 8 && posn % 8 == 0)
1466 /* Byte sized and aligned; let caller break it up. */
1467 return 0;
1469 if (width == 16 && posn % 16 == 0)
1470 /* Short sized and aligned; let caller break it up. */
1471 return 0;
1473 /* The general case - we can do this a little bit better than what the
1474 machine independent part tries. This will get rid of all the subregs
1475 that mess up constant folding in combine when working with relaxed
1476 immediates. */
1478 /* If setting the entire field, do it directly. */
1479 if (GET_CODE (operands[3]) == CONST_INT
1480 && INTVAL (operands[3]) == ((1 << width) - 1))
1482 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1483 emit_insn (gen_rtx_SET (SImode, operands[0],
1484 gen_rtx_IOR (SImode, operands[0], mreg)));
1485 return 1;
1488 /* Generate the clear mask. */
1489 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1491 /* Clear the field, to overlay it later with the source. */
1492 emit_insn (gen_rtx_SET (SImode, operands[0],
1493 gen_rtx_AND (SImode, operands[0], mreg)));
1495 /* If the source is constant 0, we've nothing to add back. */
1496 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1497 return 1;
1499 /* XXX: Should we worry about more games with constant values?
1500 We've covered the high profile: set/clear single-bit and many-bit
1501 fields. How often do we see "arbitrary bit pattern" constants? */
1502 sreg = copy_to_mode_reg (SImode, operands[3]);
1504 /* Extract src as same width as dst (needed for signed values). We
1505 always have to do this since we widen everything to SImode.
1506 We don't have to mask if we're shifting this up against the
1507 MSB of the register (e.g., the shift will push out any hi-order
1508 bits. */
1509 if (width + posn != (int) GET_MODE_SIZE (SImode))
1511 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1512 emit_insn (gen_rtx_SET (SImode, sreg,
1513 gen_rtx_AND (SImode, sreg, ereg)));
1516 /* Insert source value in dest. */
1517 if (posn != 0)
1518 emit_insn (gen_rtx_SET (SImode, sreg,
1519 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
1521 emit_insn (gen_rtx_SET (SImode, operands[0],
1522 gen_rtx_IOR (SImode, operands[0], sreg)));
1524 return 1;
1527 /* ??? Block move stuff stolen from m88k. This code has not been
1528 verified for correctness. */
1530 /* Emit code to perform a block move. Choose the best method.
1532 OPERANDS[0] is the destination.
1533 OPERANDS[1] is the source.
1534 OPERANDS[2] is the size.
1535 OPERANDS[3] is the alignment safe to use. */
1537 /* Emit code to perform a block move with an offset sequence of ldw/st
1538 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1539 known constants. DEST and SRC are registers. OFFSET is the known
1540 starting point for the output pattern. */
1542 static const enum machine_mode mode_from_align[] =
1544 VOIDmode, QImode, HImode, VOIDmode, SImode,
1547 static void
1548 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1550 rtx temp[2];
1551 enum machine_mode mode[2];
1552 int amount[2];
1553 bool active[2];
1554 int phase = 0;
1555 int next;
1556 int offset_ld = 0;
1557 int offset_st = 0;
1558 rtx x;
1560 x = XEXP (dst_mem, 0);
1561 if (!REG_P (x))
1563 x = force_reg (Pmode, x);
1564 dst_mem = replace_equiv_address (dst_mem, x);
1567 x = XEXP (src_mem, 0);
1568 if (!REG_P (x))
1570 x = force_reg (Pmode, x);
1571 src_mem = replace_equiv_address (src_mem, x);
1574 active[0] = active[1] = false;
1578 next = phase;
1579 phase ^= 1;
1581 if (size > 0)
1583 int next_amount;
1585 next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1586 next_amount = MIN (next_amount, align);
1588 amount[next] = next_amount;
1589 mode[next] = mode_from_align[next_amount];
1590 temp[next] = gen_reg_rtx (mode[next]);
1592 x = adjust_address (src_mem, mode[next], offset_ld);
1593 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1595 offset_ld += next_amount;
1596 size -= next_amount;
1597 active[next] = true;
1600 if (active[phase])
1602 active[phase] = false;
1604 x = adjust_address (dst_mem, mode[phase], offset_st);
1605 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1607 offset_st += amount[phase];
1610 while (active[next]);
1613 bool
1614 mcore_expand_block_move (rtx *operands)
1616 HOST_WIDE_INT align, bytes, max;
1618 if (GET_CODE (operands[2]) != CONST_INT)
1619 return false;
1621 bytes = INTVAL (operands[2]);
1622 align = INTVAL (operands[3]);
1624 if (bytes <= 0)
1625 return false;
1626 if (align > 4)
1627 align = 4;
1629 switch (align)
1631 case 4:
1632 if (bytes & 1)
1633 max = 4*4;
1634 else if (bytes & 3)
1635 max = 8*4;
1636 else
1637 max = 16*4;
1638 break;
1639 case 2:
1640 max = 4*2;
1641 break;
1642 case 1:
1643 max = 4*1;
1644 break;
1645 default:
1646 gcc_unreachable ();
1649 if (bytes <= max)
1651 block_move_sequence (operands[0], operands[1], bytes, align);
1652 return true;
1655 return false;
1659 /* Code to generate prologue and epilogue sequences. */
1660 static int number_of_regs_before_varargs;
1662 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1663 for a varargs function. */
1664 static int current_function_anonymous_args;
1666 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1667 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1668 #define ADDI_REACH (32) /* Maximum addi operand. */
1670 static void
1671 layout_mcore_frame (struct mcore_frame * infp)
1673 int n;
1674 unsigned int i;
1675 int nbytes;
1676 int regarg;
1677 int localregarg;
1678 int outbounds;
1679 unsigned int growths;
1680 int step;
1682 /* Might have to spill bytes to re-assemble a big argument that
1683 was passed partially in registers and partially on the stack. */
1684 nbytes = crtl->args.pretend_args_size;
1686 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1687 if (current_function_anonymous_args)
1688 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1690 infp->arg_size = nbytes;
1692 /* How much space to save non-volatile registers we stomp. */
1693 infp->reg_mask = calc_live_regs (& n);
1694 infp->reg_size = n * 4;
1696 /* And the rest of it... locals and space for overflowed outbounds. */
1697 infp->local_size = get_frame_size ();
1698 infp->outbound_size = crtl->outgoing_args_size;
1700 /* Make sure we have a whole number of words for the locals. */
1701 if (infp->local_size % STACK_BYTES)
1702 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1704 /* Only thing we know we have to pad is the outbound space, since
1705 we've aligned our locals assuming that base of locals is aligned. */
1706 infp->pad_local = 0;
1707 infp->pad_reg = 0;
1708 infp->pad_outbound = 0;
1709 if (infp->outbound_size % STACK_BYTES)
1710 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1712 /* Now we see how we want to stage the prologue so that it does
1713 the most appropriate stack growth and register saves to either:
1714 (1) run fast,
1715 (2) reduce instruction space, or
1716 (3) reduce stack space. */
1717 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1718 infp->growth[i] = 0;
1720 regarg = infp->reg_size + infp->arg_size;
1721 localregarg = infp->local_size + regarg;
1722 outbounds = infp->outbound_size + infp->pad_outbound;
1723 growths = 0;
1725 /* XXX: Consider one where we consider localregarg + outbound too! */
1727 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1728 use stw's with offsets and buy the frame in one shot. */
1729 if (localregarg <= ADDI_REACH
1730 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1732 /* Make sure we'll be aligned. */
1733 if (localregarg % STACK_BYTES)
1734 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1736 step = localregarg + infp->pad_reg;
1737 infp->reg_offset = infp->local_size;
1739 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1741 step += outbounds;
1742 infp->reg_offset += outbounds;
1743 outbounds = 0;
1746 infp->arg_offset = step - 4;
1747 infp->growth[growths++] = step;
1748 infp->reg_growth = growths;
1749 infp->local_growth = growths;
1751 /* If we haven't already folded it in. */
1752 if (outbounds)
1753 infp->growth[growths++] = outbounds;
1755 goto finish;
1758 /* Frame can't be done with a single subi, but can be done with 2
1759 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1760 shift some of the stack purchase into the first subi, so both are
1761 single instructions. */
1762 if (localregarg <= STORE_REACH
1763 && (infp->local_size > ADDI_REACH)
1764 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1766 int all;
1768 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1769 if (localregarg % STACK_BYTES)
1770 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1772 all = localregarg + infp->pad_reg + infp->pad_local;
1773 step = ADDI_REACH; /* As much up front as we can. */
1774 if (step > all)
1775 step = all;
1777 /* XXX: Consider whether step will still be aligned; we believe so. */
1778 infp->arg_offset = step - 4;
1779 infp->growth[growths++] = step;
1780 infp->reg_growth = growths;
1781 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1782 all -= step;
1784 /* Can we fold in any space required for outbounds? */
1785 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1787 all += outbounds;
1788 outbounds = 0;
1791 /* Get the rest of the locals in place. */
1792 step = all;
1793 infp->growth[growths++] = step;
1794 infp->local_growth = growths;
1795 all -= step;
1797 gcc_assert (all == 0);
1799 /* Finish off if we need to do so. */
1800 if (outbounds)
1801 infp->growth[growths++] = outbounds;
1803 goto finish;
1806 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1807 Then we buy the rest of the frame in 1 or 2 steps depending on
1808 whether we need a frame pointer. */
1809 if ((regarg % STACK_BYTES) == 0)
1811 infp->growth[growths++] = regarg;
1812 infp->reg_growth = growths;
1813 infp->arg_offset = regarg - 4;
1814 infp->reg_offset = 0;
1816 if (infp->local_size % STACK_BYTES)
1817 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1819 step = infp->local_size + infp->pad_local;
1821 if (!frame_pointer_needed)
1823 step += outbounds;
1824 outbounds = 0;
1827 infp->growth[growths++] = step;
1828 infp->local_growth = growths;
1830 /* If there's any left to be done. */
1831 if (outbounds)
1832 infp->growth[growths++] = outbounds;
1834 goto finish;
1837 /* XXX: optimizations that we'll want to play with....
1838 -- regarg is not aligned, but it's a small number of registers;
1839 use some of localsize so that regarg is aligned and then
1840 save the registers. */
1842 /* Simple encoding; plods down the stack buying the pieces as it goes.
1843 -- does not optimize space consumption.
1844 -- does not attempt to optimize instruction counts.
1845 -- but it is safe for all alignments. */
1846 if (regarg % STACK_BYTES != 0)
1847 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1849 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1850 infp->reg_growth = growths;
1851 infp->arg_offset = infp->growth[0] - 4;
1852 infp->reg_offset = 0;
1854 if (frame_pointer_needed)
1856 if (infp->local_size % STACK_BYTES != 0)
1857 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1859 infp->growth[growths++] = infp->local_size + infp->pad_local;
1860 infp->local_growth = growths;
1862 infp->growth[growths++] = outbounds;
1864 else
1866 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1867 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1869 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1870 infp->local_growth = growths;
1873 /* Anything else that we've forgotten?, plus a few consistency checks. */
1874 finish:
1875 gcc_assert (infp->reg_offset >= 0);
1876 gcc_assert (growths <= MAX_STACK_GROWS);
1878 for (i = 0; i < growths; i++)
1879 gcc_assert (!(infp->growth[i] % STACK_BYTES));
1882 /* Define the offset between two registers, one to be eliminated, and
1883 the other its replacement, at the start of a routine. */
1886 mcore_initial_elimination_offset (int from, int to)
1888 int above_frame;
1889 int below_frame;
1890 struct mcore_frame fi;
1892 layout_mcore_frame (& fi);
1894 /* fp to ap */
1895 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1896 /* sp to fp */
1897 below_frame = fi.outbound_size + fi.pad_outbound;
1899 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1900 return above_frame;
1902 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1903 return above_frame + below_frame;
1905 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1906 return below_frame;
1908 gcc_unreachable ();
1911 /* Keep track of some information about varargs for the prolog. */
1913 static void
1914 mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far,
1915 enum machine_mode mode, tree type,
1916 int * ptr_pretend_size ATTRIBUTE_UNUSED,
1917 int second_time ATTRIBUTE_UNUSED)
1919 current_function_anonymous_args = 1;
1921 /* We need to know how many argument registers are used before
1922 the varargs start, so that we can push the remaining argument
1923 registers during the prologue. */
1924 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1926 /* There is a bug somewhere in the arg handling code.
1927 Until I can find it this workaround always pushes the
1928 last named argument onto the stack. */
1929 number_of_regs_before_varargs = *args_so_far;
1931 /* The last named argument may be split between argument registers
1932 and the stack. Allow for this here. */
1933 if (number_of_regs_before_varargs > NPARM_REGS)
1934 number_of_regs_before_varargs = NPARM_REGS;
1937 void
1938 mcore_expand_prolog (void)
1940 struct mcore_frame fi;
1941 int space_allocated = 0;
1942 int growth = 0;
1944 /* Find out what we're doing. */
1945 layout_mcore_frame (&fi);
1947 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1948 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1950 if (TARGET_CG_DATA)
1952 /* Emit a symbol for this routine's frame size. */
1953 rtx x;
1955 x = DECL_RTL (current_function_decl);
1957 gcc_assert (GET_CODE (x) == MEM);
1959 x = XEXP (x, 0);
1961 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1963 free (mcore_current_function_name);
1965 mcore_current_function_name = xstrdup (XSTR (x, 0));
1967 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1969 if (cfun->calls_alloca)
1970 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1972 /* 970425: RBE:
1973 We're looking at how the 8byte alignment affects stack layout
1974 and where we had to pad things. This emits information we can
1975 extract which tells us about frame sizes and the like. */
1976 fprintf (asm_out_file,
1977 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1978 mcore_current_function_name,
1979 fi.arg_size, fi.reg_size, fi.reg_mask,
1980 fi.local_size, fi.outbound_size,
1981 frame_pointer_needed);
1984 if (mcore_naked_function_p ())
1985 return;
1987 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
1988 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
1990 /* If we have a parameter passed partially in regs and partially in memory,
1991 the registers will have been stored to memory already in function.c. So
1992 we only need to do something here for varargs functions. */
1993 if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
1995 int offset;
1996 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
1997 int remaining = fi.arg_size;
1999 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2001 emit_insn (gen_movsi
2002 (gen_rtx_MEM (SImode,
2003 plus_constant (stack_pointer_rtx, offset)),
2004 gen_rtx_REG (SImode, rn)));
2008 /* Do we need another stack adjustment before we do the register saves? */
2009 if (growth < fi.reg_growth)
2010 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2012 if (fi.reg_size != 0)
2014 int i;
2015 int offs = fi.reg_offset;
2017 for (i = 15; i >= 0; i--)
2019 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2021 int first_reg = 15;
2023 while (fi.reg_mask & (1 << first_reg))
2024 first_reg--;
2025 first_reg++;
2027 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2028 gen_rtx_REG (SImode, first_reg),
2029 GEN_INT (16 - first_reg)));
2031 i -= (15 - first_reg);
2032 offs += (16 - first_reg) * 4;
2034 else if (fi.reg_mask & (1 << i))
2036 emit_insn (gen_movsi
2037 (gen_rtx_MEM (SImode,
2038 plus_constant (stack_pointer_rtx, offs)),
2039 gen_rtx_REG (SImode, i)));
2040 offs += 4;
2045 /* Figure the locals + outbounds. */
2046 if (frame_pointer_needed)
2048 /* If we haven't already purchased to 'fp'. */
2049 if (growth < fi.local_growth)
2050 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2052 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2054 /* ... and then go any remaining distance for outbounds, etc. */
2055 if (fi.growth[growth])
2056 output_stack_adjust (-1, fi.growth[growth++]);
2058 else
2060 if (growth < fi.local_growth)
2061 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2062 if (fi.growth[growth])
2063 output_stack_adjust (-1, fi.growth[growth++]);
2067 void
2068 mcore_expand_epilog (void)
2070 struct mcore_frame fi;
2071 int i;
2072 int offs;
2073 int growth = MAX_STACK_GROWS - 1 ;
2076 /* Find out what we're doing. */
2077 layout_mcore_frame(&fi);
2079 if (mcore_naked_function_p ())
2080 return;
2082 /* If we had a frame pointer, restore the sp from that. */
2083 if (frame_pointer_needed)
2085 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2086 growth = fi.local_growth - 1;
2088 else
2090 /* XXX: while loop should accumulate and do a single sell. */
2091 while (growth >= fi.local_growth)
2093 if (fi.growth[growth] != 0)
2094 output_stack_adjust (1, fi.growth[growth]);
2095 growth--;
2099 /* Make sure we've shrunk stack back to the point where the registers
2100 were laid down. This is typically 0/1 iterations. Then pull the
2101 register save information back off the stack. */
2102 while (growth >= fi.reg_growth)
2103 output_stack_adjust ( 1, fi.growth[growth--]);
2105 offs = fi.reg_offset;
2107 for (i = 15; i >= 0; i--)
2109 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2111 int first_reg;
2113 /* Find the starting register. */
2114 first_reg = 15;
2116 while (fi.reg_mask & (1 << first_reg))
2117 first_reg--;
2119 first_reg++;
2121 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2122 gen_rtx_MEM (SImode, stack_pointer_rtx),
2123 GEN_INT (16 - first_reg)));
2125 i -= (15 - first_reg);
2126 offs += (16 - first_reg) * 4;
2128 else if (fi.reg_mask & (1 << i))
2130 emit_insn (gen_movsi
2131 (gen_rtx_REG (SImode, i),
2132 gen_rtx_MEM (SImode,
2133 plus_constant (stack_pointer_rtx, offs))));
2134 offs += 4;
2138 /* Give back anything else. */
2139 /* XXX: Should accumulate total and then give it back. */
2140 while (growth >= 0)
2141 output_stack_adjust ( 1, fi.growth[growth--]);
2144 /* This code is borrowed from the SH port. */
2146 /* The MCORE cannot load a large constant into a register, constants have to
2147 come from a pc relative load. The reference of a pc relative load
2148 instruction must be less than 1k in front of the instruction. This
2149 means that we often have to dump a constant inside a function, and
2150 generate code to branch around it.
2152 It is important to minimize this, since the branches will slow things
2153 down and make things bigger.
2155 Worst case code looks like:
2157 lrw L1,r0
2158 br L2
2159 align
2160 L1: .long value
2164 lrw L3,r0
2165 br L4
2166 align
2167 L3: .long value
2171 We fix this by performing a scan before scheduling, which notices which
2172 instructions need to have their operands fetched from the constant table
2173 and builds the table.
2175 The algorithm is:
2177 scan, find an instruction which needs a pcrel move. Look forward, find the
2178 last barrier which is within MAX_COUNT bytes of the requirement.
2179 If there isn't one, make one. Process all the instructions between
2180 the find and the barrier.
2182 In the above example, we can tell that L3 is within 1k of L1, so
2183 the first move can be shrunk from the 2 insn+constant sequence into
2184 just 1 insn, and the constant moved to L3 to make:
2186 lrw L1,r0
2188 lrw L3,r0
2189 bra L4
2190 align
2191 L3:.long value
2192 L4:.long value
2194 Then the second move becomes the target for the shortening process. */
2196 typedef struct
2198 rtx value; /* Value in table. */
2199 rtx label; /* Label of value. */
2200 } pool_node;
2202 /* The maximum number of constants that can fit into one pool, since
2203 the pc relative range is 0...1020 bytes and constants are at least 4
2204 bytes long. We subtract 4 from the range to allow for the case where
2205 we need to add a branch/align before the constant pool. */
2207 #define MAX_COUNT 1016
2208 #define MAX_POOL_SIZE (MAX_COUNT/4)
2209 static pool_node pool_vector[MAX_POOL_SIZE];
2210 static int pool_size;
2212 /* Dump out any constants accumulated in the final pass. These
2213 will only be labels. */
2215 const char *
2216 mcore_output_jump_label_table (void)
2218 int i;
2220 if (pool_size)
2222 fprintf (asm_out_file, "\t.align 2\n");
2224 for (i = 0; i < pool_size; i++)
2226 pool_node * p = pool_vector + i;
2228 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2230 output_asm_insn (".long %0", &p->value);
2233 pool_size = 0;
2236 return "";
2239 /* Check whether insn is a candidate for a conditional. */
2241 static cond_type
2242 is_cond_candidate (rtx insn)
2244 /* The only things we conditionalize are those that can be directly
2245 changed into a conditional. Only bother with SImode items. If
2246 we wanted to be a little more aggressive, we could also do other
2247 modes such as DImode with reg-reg move or load 0. */
2248 if (GET_CODE (insn) == INSN)
2250 rtx pat = PATTERN (insn);
2251 rtx src, dst;
2253 if (GET_CODE (pat) != SET)
2254 return COND_NO;
2256 dst = XEXP (pat, 0);
2258 if ((GET_CODE (dst) != REG &&
2259 GET_CODE (dst) != SUBREG) ||
2260 GET_MODE (dst) != SImode)
2261 return COND_NO;
2263 src = XEXP (pat, 1);
2265 if ((GET_CODE (src) == REG ||
2266 (GET_CODE (src) == SUBREG &&
2267 GET_CODE (SUBREG_REG (src)) == REG)) &&
2268 GET_MODE (src) == SImode)
2269 return COND_MOV_INSN;
2270 else if (GET_CODE (src) == CONST_INT &&
2271 INTVAL (src) == 0)
2272 return COND_CLR_INSN;
2273 else if (GET_CODE (src) == PLUS &&
2274 (GET_CODE (XEXP (src, 0)) == REG ||
2275 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2276 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2277 GET_MODE (XEXP (src, 0)) == SImode &&
2278 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2279 INTVAL (XEXP (src, 1)) == 1)
2280 return COND_INC_INSN;
2281 else if (((GET_CODE (src) == MINUS &&
2282 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2283 INTVAL( XEXP (src, 1)) == 1) ||
2284 (GET_CODE (src) == PLUS &&
2285 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2286 INTVAL (XEXP (src, 1)) == -1)) &&
2287 (GET_CODE (XEXP (src, 0)) == REG ||
2288 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2289 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2290 GET_MODE (XEXP (src, 0)) == SImode)
2291 return COND_DEC_INSN;
2293 /* Some insns that we don't bother with:
2294 (set (rx:DI) (ry:DI))
2295 (set (rx:DI) (const_int 0))
2299 else if (GET_CODE (insn) == JUMP_INSN &&
2300 GET_CODE (PATTERN (insn)) == SET &&
2301 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2302 return COND_BRANCH_INSN;
2304 return COND_NO;
2307 /* Emit a conditional version of insn and replace the old insn with the
2308 new one. Return the new insn if emitted. */
2310 static rtx
2311 emit_new_cond_insn (rtx insn, int cond)
2313 rtx c_insn = 0;
2314 rtx pat, dst, src;
2315 cond_type num;
2317 if ((num = is_cond_candidate (insn)) == COND_NO)
2318 return NULL;
2320 pat = PATTERN (insn);
2322 if (GET_CODE (insn) == INSN)
2324 dst = SET_DEST (pat);
2325 src = SET_SRC (pat);
2327 else
2329 dst = JUMP_LABEL (insn);
2330 src = NULL_RTX;
2333 switch (num)
2335 case COND_MOV_INSN:
2336 case COND_CLR_INSN:
2337 if (cond)
2338 c_insn = gen_movt0 (dst, src, dst);
2339 else
2340 c_insn = gen_movt0 (dst, dst, src);
2341 break;
2343 case COND_INC_INSN:
2344 if (cond)
2345 c_insn = gen_incscc (dst, dst);
2346 else
2347 c_insn = gen_incscc_false (dst, dst);
2348 break;
2350 case COND_DEC_INSN:
2351 if (cond)
2352 c_insn = gen_decscc (dst, dst);
2353 else
2354 c_insn = gen_decscc_false (dst, dst);
2355 break;
2357 case COND_BRANCH_INSN:
2358 if (cond)
2359 c_insn = gen_branch_true (dst);
2360 else
2361 c_insn = gen_branch_false (dst);
2362 break;
2364 default:
2365 return NULL;
2368 /* Only copy the notes if they exist. */
2369 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2371 /* We really don't need to bother with the notes and links at this
2372 point, but go ahead and save the notes. This will help is_dead()
2373 when applying peepholes (links don't matter since they are not
2374 used any more beyond this point for the mcore). */
2375 REG_NOTES (c_insn) = REG_NOTES (insn);
2378 if (num == COND_BRANCH_INSN)
2380 /* For jumps, we need to be a little bit careful and emit the new jump
2381 before the old one and to update the use count for the target label.
2382 This way, the barrier following the old (uncond) jump will get
2383 deleted, but the label won't. */
2384 c_insn = emit_jump_insn_before (c_insn, insn);
2386 ++ LABEL_NUSES (dst);
2388 JUMP_LABEL (c_insn) = dst;
2390 else
2391 c_insn = emit_insn_after (c_insn, insn);
2393 delete_insn (insn);
2395 return c_insn;
2398 /* Attempt to change a basic block into a series of conditional insns. This
2399 works by taking the branch at the end of the 1st block and scanning for the
2400 end of the 2nd block. If all instructions in the 2nd block have cond.
2401 versions and the label at the start of block 3 is the same as the target
2402 from the branch at block 1, then conditionalize all insn in block 2 using
2403 the inverse condition of the branch at block 1. (Note I'm bending the
2404 definition of basic block here.)
2406 e.g., change:
2408 bt L2 <-- end of block 1 (delete)
2409 mov r7,r8
2410 addu r7,1
2411 br L3 <-- end of block 2
2413 L2: ... <-- start of block 3 (NUSES==1)
2414 L3: ...
2418 movf r7,r8
2419 incf r7
2420 bf L3
2422 L3: ...
2424 we can delete the L2 label if NUSES==1 and re-apply the optimization
2425 starting at the last instruction of block 2. This may allow an entire
2426 if-then-else statement to be conditionalized. BRC */
2427 static rtx
2428 conditionalize_block (rtx first)
2430 rtx insn;
2431 rtx br_pat;
2432 rtx end_blk_1_br = 0;
2433 rtx end_blk_2_insn = 0;
2434 rtx start_blk_3_lab = 0;
2435 int cond;
2436 int br_lab_num;
2437 int blk_size = 0;
2440 /* Check that the first insn is a candidate conditional jump. This is
2441 the one that we'll eliminate. If not, advance to the next insn to
2442 try. */
2443 if (GET_CODE (first) != JUMP_INSN ||
2444 GET_CODE (PATTERN (first)) != SET ||
2445 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2446 return NEXT_INSN (first);
2448 /* Extract some information we need. */
2449 end_blk_1_br = first;
2450 br_pat = PATTERN (end_blk_1_br);
2452 /* Complement the condition since we use the reverse cond. for the insns. */
2453 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2455 /* Determine what kind of branch we have. */
2456 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2458 /* A normal branch, so extract label out of first arm. */
2459 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2461 else
2463 /* An inverse branch, so extract the label out of the 2nd arm
2464 and complement the condition. */
2465 cond = (cond == 0);
2466 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2469 /* Scan forward for the start of block 2: it must start with a
2470 label and that label must be the same as the branch target
2471 label from block 1. We don't care about whether block 2 actually
2472 ends with a branch or a label (an uncond. branch is
2473 conditionalizable). */
2474 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2476 enum rtx_code code;
2478 code = GET_CODE (insn);
2480 /* Look for the label at the start of block 3. */
2481 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2482 break;
2484 /* Skip barriers, notes, and conditionalizable insns. If the
2485 insn is not conditionalizable or makes this optimization fail,
2486 just return the next insn so we can start over from that point. */
2487 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2488 return NEXT_INSN (insn);
2490 /* Remember the last real insn before the label (i.e. end of block 2). */
2491 if (code == JUMP_INSN || code == INSN)
2493 blk_size ++;
2494 end_blk_2_insn = insn;
2498 if (!insn)
2499 return insn;
2501 /* It is possible for this optimization to slow performance if the blocks
2502 are long. This really depends upon whether the branch is likely taken
2503 or not. If the branch is taken, we slow performance in many cases. But,
2504 if the branch is not taken, we always help performance (for a single
2505 block, but for a double block (i.e. when the optimization is re-applied)
2506 this is not true since the 'right thing' depends on the overall length of
2507 the collapsed block). As a compromise, don't apply this optimization on
2508 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2509 the best threshold depends on the latencies of the instructions (i.e.,
2510 the branch penalty). */
2511 if (optimize > 1 && blk_size > 2)
2512 return insn;
2514 /* At this point, we've found the start of block 3 and we know that
2515 it is the destination of the branch from block 1. Also, all
2516 instructions in the block 2 are conditionalizable. So, apply the
2517 conditionalization and delete the branch. */
2518 start_blk_3_lab = insn;
2520 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2521 insn = NEXT_INSN (insn))
2523 rtx newinsn;
2525 if (INSN_DELETED_P (insn))
2526 continue;
2528 /* Try to form a conditional variant of the instruction and emit it. */
2529 if ((newinsn = emit_new_cond_insn (insn, cond)))
2531 if (end_blk_2_insn == insn)
2532 end_blk_2_insn = newinsn;
2534 insn = newinsn;
2538 /* Note whether we will delete the label starting blk 3 when the jump
2539 gets deleted. If so, we want to re-apply this optimization at the
2540 last real instruction right before the label. */
2541 if (LABEL_NUSES (start_blk_3_lab) == 1)
2543 start_blk_3_lab = 0;
2546 /* ??? we probably should redistribute the death notes for this insn, esp.
2547 the death of cc, but it doesn't really matter this late in the game.
2548 The peepholes all use is_dead() which will find the correct death
2549 regardless of whether there is a note. */
2550 delete_insn (end_blk_1_br);
2552 if (! start_blk_3_lab)
2553 return end_blk_2_insn;
2555 /* Return the insn right after the label at the start of block 3. */
2556 return NEXT_INSN (start_blk_3_lab);
2559 /* Apply the conditionalization of blocks optimization. This is the
2560 outer loop that traverses through the insns scanning for a branch
2561 that signifies an opportunity to apply the optimization. Note that
2562 this optimization is applied late. If we could apply it earlier,
2563 say before cse 2, it may expose more optimization opportunities.
2564 but, the pay back probably isn't really worth the effort (we'd have
2565 to update all reg/flow/notes/links/etc to make it work - and stick it
2566 in before cse 2). */
2568 static void
2569 conditionalize_optimization (void)
2571 rtx insn;
2573 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2574 continue;
2577 static int saved_warn_return_type = -1;
2578 static int saved_warn_return_type_count = 0;
2580 /* This is to handle loads from the constant pool. */
2582 static void
2583 mcore_reorg (void)
2585 /* Reset this variable. */
2586 current_function_anonymous_args = 0;
2588 /* Restore the warn_return_type if it has been altered. */
2589 if (saved_warn_return_type != -1)
2591 /* Only restore the value if we have reached another function.
2592 The test of warn_return_type occurs in final_function () in
2593 c-decl.c a long time after the code for the function is generated,
2594 so we need a counter to tell us when we have finished parsing that
2595 function and can restore the flag. */
2596 if (--saved_warn_return_type_count == 0)
2598 warn_return_type = saved_warn_return_type;
2599 saved_warn_return_type = -1;
2603 if (optimize == 0)
2604 return;
2606 /* Conditionalize blocks where we can. */
2607 conditionalize_optimization ();
2609 /* Literal pool generation is now pushed off until the assembler. */
2613 /* Return true if X is something that can be moved directly into r15. */
2615 bool
2616 mcore_r15_operand_p (rtx x)
2618 switch (GET_CODE (x))
2620 case CONST_INT:
2621 return mcore_const_ok_for_inline (INTVAL (x));
2623 case REG:
2624 case SUBREG:
2625 case MEM:
2626 return 1;
2628 default:
2629 return 0;
2633 /* Implement SECONDARY_RELOAD_CLASS. If RCLASS contains r15, and we can't
2634 directly move X into it, use r1-r14 as a temporary. */
2636 enum reg_class
2637 mcore_secondary_reload_class (enum reg_class rclass,
2638 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2640 if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2641 && !mcore_r15_operand_p (x))
2642 return LRW_REGS;
2643 return NO_REGS;
2646 /* Return the reg_class to use when reloading the rtx X into the class
2647 RCLASS. If X is too complex to move directly into r15, prefer to
2648 use LRW_REGS instead. */
2650 enum reg_class
2651 mcore_reload_class (rtx x, enum reg_class rclass)
2653 if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2654 return LRW_REGS;
2656 return rclass;
2659 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2660 register. Note that the current version doesn't worry about whether
2661 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2662 in r2 matches an SImode in r2. Might think in the future about whether
2663 we want to be able to say something about modes. */
2666 mcore_is_same_reg (rtx x, rtx y)
2668 /* Strip any and all of the subreg wrappers. */
2669 while (GET_CODE (x) == SUBREG)
2670 x = SUBREG_REG (x);
2672 while (GET_CODE (y) == SUBREG)
2673 y = SUBREG_REG (y);
2675 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2676 return 1;
2678 return 0;
2681 static void
2682 mcore_option_override (void)
2684 /* Only the m340 supports little endian code. */
2685 if (TARGET_LITTLE_END && ! TARGET_M340)
2686 target_flags |= MASK_M340;
2690 /* Compute the number of word sized registers needed to
2691 hold a function argument of mode MODE and type TYPE. */
2694 mcore_num_arg_regs (enum machine_mode mode, const_tree type)
2696 int size;
2698 if (targetm.calls.must_pass_in_stack (mode, type))
2699 return 0;
2701 if (type && mode == BLKmode)
2702 size = int_size_in_bytes (type);
2703 else
2704 size = GET_MODE_SIZE (mode);
2706 return ROUND_ADVANCE (size);
2709 static rtx
2710 handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
2712 int size;
2714 /* The MCore ABI defines that a structure whose size is not a whole multiple
2715 of bytes is passed packed into registers (or spilled onto the stack if
2716 not enough registers are available) with the last few bytes of the
2717 structure being packed, left-justified, into the last register/stack slot.
2718 GCC handles this correctly if the last word is in a stack slot, but we
2719 have to generate a special, PARALLEL RTX if the last word is in an
2720 argument register. */
2721 if (type
2722 && TYPE_MODE (type) == BLKmode
2723 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2724 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2725 && (size % UNITS_PER_WORD != 0)
2726 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2728 rtx arg_regs [NPARM_REGS];
2729 int nregs;
2730 rtx result;
2731 rtvec rtvec;
2733 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2735 arg_regs [nregs] =
2736 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2737 GEN_INT (nregs * UNITS_PER_WORD));
2738 nregs ++;
2741 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2742 gcc_assert (ARRAY_SIZE (arg_regs) == 6);
2743 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2744 arg_regs[3], arg_regs[4], arg_regs[5]);
2746 result = gen_rtx_PARALLEL (mode, rtvec);
2747 return result;
2750 return gen_rtx_REG (mode, reg);
2754 mcore_function_value (const_tree valtype, const_tree func)
2756 enum machine_mode mode;
2757 int unsigned_p;
2759 mode = TYPE_MODE (valtype);
2761 /* Since we promote return types, we must promote the mode here too. */
2762 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2764 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2767 /* Define where to put the arguments to a function.
2768 Value is zero to push the argument on the stack,
2769 or a hard register in which to store the argument.
2771 MODE is the argument's machine mode.
2772 TYPE is the data type of the argument (as a tree).
2773 This is null for libcalls where that information may
2774 not be available.
2775 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2776 the preceding args and about the function being called.
2777 NAMED is nonzero if this argument is a named parameter
2778 (otherwise it is an extra parameter matching an ellipsis).
2780 On MCore the first args are normally in registers
2781 and the rest are pushed. Any arg that starts within the first
2782 NPARM_REGS words is at least partially passed in a register unless
2783 its data type forbids. */
2785 static rtx
2786 mcore_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2787 const_tree type, bool named)
2789 int arg_reg;
2791 if (! named || mode == VOIDmode)
2792 return 0;
2794 if (targetm.calls.must_pass_in_stack (mode, type))
2795 return 0;
2797 arg_reg = ROUND_REG (*cum, mode);
2799 if (arg_reg < NPARM_REGS)
2800 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2802 return 0;
2805 static void
2806 mcore_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2807 const_tree type, bool named ATTRIBUTE_UNUSED)
2809 *cum = (ROUND_REG (*cum, mode)
2810 + (int)named * mcore_num_arg_regs (mode, type));
2813 static unsigned int
2814 mcore_function_arg_boundary (enum machine_mode mode,
2815 const_tree type ATTRIBUTE_UNUSED)
2817 /* Doubles must be aligned to an 8 byte boundary. */
2818 return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2819 ? BIGGEST_ALIGNMENT
2820 : PARM_BOUNDARY);
2823 /* Returns the number of bytes of argument registers required to hold *part*
2824 of a parameter of machine mode MODE and type TYPE (which may be NULL if
2825 the type is not known). If the argument fits entirely in the argument
2826 registers, or entirely on the stack, then 0 is returned. CUM is the
2827 number of argument registers already used by earlier parameters to
2828 the function. */
2830 static int
2831 mcore_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2832 tree type, bool named)
2834 int reg = ROUND_REG (*cum, mode);
2836 if (named == 0)
2837 return 0;
2839 if (targetm.calls.must_pass_in_stack (mode, type))
2840 return 0;
2842 /* REG is not the *hardware* register number of the register that holds
2843 the argument, it is the *argument* register number. So for example,
2844 the first argument to a function goes in argument register 0, which
2845 translates (for the MCore) into hardware register 2. The second
2846 argument goes into argument register 1, which translates into hardware
2847 register 3, and so on. NPARM_REGS is the number of argument registers
2848 supported by the target, not the maximum hardware register number of
2849 the target. */
2850 if (reg >= NPARM_REGS)
2851 return 0;
2853 /* If the argument fits entirely in registers, return 0. */
2854 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2855 return 0;
2857 /* The argument overflows the number of available argument registers.
2858 Compute how many argument registers have not yet been assigned to
2859 hold an argument. */
2860 reg = NPARM_REGS - reg;
2862 /* Return partially in registers and partially on the stack. */
2863 return reg * UNITS_PER_WORD;
2866 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2869 mcore_dllexport_name_p (const char * symbol)
2871 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2874 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2877 mcore_dllimport_name_p (const char * symbol)
2879 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2882 /* Mark a DECL as being dllexport'd. */
2884 static void
2885 mcore_mark_dllexport (tree decl)
2887 const char * oldname;
2888 char * newname;
2889 rtx rtlname;
2890 tree idp;
2892 rtlname = XEXP (DECL_RTL (decl), 0);
2894 if (GET_CODE (rtlname) == MEM)
2895 rtlname = XEXP (rtlname, 0);
2896 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2897 oldname = XSTR (rtlname, 0);
2899 if (mcore_dllexport_name_p (oldname))
2900 return; /* Already done. */
2902 newname = XALLOCAVEC (char, strlen (oldname) + 4);
2903 sprintf (newname, "@e.%s", oldname);
2905 /* We pass newname through get_identifier to ensure it has a unique
2906 address. RTL processing can sometimes peek inside the symbol ref
2907 and compare the string's addresses to see if two symbols are
2908 identical. */
2909 /* ??? At least I think that's why we do this. */
2910 idp = get_identifier (newname);
2912 XEXP (DECL_RTL (decl), 0) =
2913 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2916 /* Mark a DECL as being dllimport'd. */
2918 static void
2919 mcore_mark_dllimport (tree decl)
2921 const char * oldname;
2922 char * newname;
2923 tree idp;
2924 rtx rtlname;
2925 rtx newrtl;
2927 rtlname = XEXP (DECL_RTL (decl), 0);
2929 if (GET_CODE (rtlname) == MEM)
2930 rtlname = XEXP (rtlname, 0);
2931 gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2932 oldname = XSTR (rtlname, 0);
2934 gcc_assert (!mcore_dllexport_name_p (oldname));
2935 if (mcore_dllimport_name_p (oldname))
2936 return; /* Already done. */
2938 /* ??? One can well ask why we're making these checks here,
2939 and that would be a good question. */
2941 /* Imported variables can't be initialized. */
2942 if (TREE_CODE (decl) == VAR_DECL
2943 && !DECL_VIRTUAL_P (decl)
2944 && DECL_INITIAL (decl))
2946 error ("initialized variable %q+D is marked dllimport", decl);
2947 return;
2950 /* `extern' needn't be specified with dllimport.
2951 Specify `extern' now and hope for the best. Sigh. */
2952 if (TREE_CODE (decl) == VAR_DECL
2953 /* ??? Is this test for vtables needed? */
2954 && !DECL_VIRTUAL_P (decl))
2956 DECL_EXTERNAL (decl) = 1;
2957 TREE_PUBLIC (decl) = 1;
2960 newname = XALLOCAVEC (char, strlen (oldname) + 11);
2961 sprintf (newname, "@i.__imp_%s", oldname);
2963 /* We pass newname through get_identifier to ensure it has a unique
2964 address. RTL processing can sometimes peek inside the symbol ref
2965 and compare the string's addresses to see if two symbols are
2966 identical. */
2967 /* ??? At least I think that's why we do this. */
2968 idp = get_identifier (newname);
2970 newrtl = gen_rtx_MEM (Pmode,
2971 gen_rtx_SYMBOL_REF (Pmode,
2972 IDENTIFIER_POINTER (idp)));
2973 XEXP (DECL_RTL (decl), 0) = newrtl;
2976 static int
2977 mcore_dllexport_p (tree decl)
2979 if ( TREE_CODE (decl) != VAR_DECL
2980 && TREE_CODE (decl) != FUNCTION_DECL)
2981 return 0;
2983 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2986 static int
2987 mcore_dllimport_p (tree decl)
2989 if ( TREE_CODE (decl) != VAR_DECL
2990 && TREE_CODE (decl) != FUNCTION_DECL)
2991 return 0;
2993 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
2996 /* We must mark dll symbols specially. Definitions of dllexport'd objects
2997 install some info in the .drective (PE) or .exports (ELF) sections. */
2999 static void
3000 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3002 /* Mark the decl so we can tell from the rtl whether the object is
3003 dllexport'd or dllimport'd. */
3004 if (mcore_dllexport_p (decl))
3005 mcore_mark_dllexport (decl);
3006 else if (mcore_dllimport_p (decl))
3007 mcore_mark_dllimport (decl);
3009 /* It might be that DECL has already been marked as dllimport, but
3010 a subsequent definition nullified that. The attribute is gone
3011 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3012 else if ((TREE_CODE (decl) == FUNCTION_DECL
3013 || TREE_CODE (decl) == VAR_DECL)
3014 && DECL_RTL (decl) != NULL_RTX
3015 && GET_CODE (DECL_RTL (decl)) == MEM
3016 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3017 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3018 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3020 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3021 tree idp = get_identifier (oldname + 9);
3022 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3024 XEXP (DECL_RTL (decl), 0) = newrtl;
3026 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3027 ??? We leave these alone for now. */
3031 /* Undo the effects of the above. */
3033 static const char *
3034 mcore_strip_name_encoding (const char * str)
3036 return str + (str[0] == '@' ? 3 : 0);
3039 /* MCore specific attribute support.
3040 dllexport - for exporting a function/variable that will live in a dll
3041 dllimport - for importing a function/variable from a dll
3042 naked - do not create a function prologue/epilogue. */
3044 /* Handle a "naked" attribute; arguments as in
3045 struct attribute_spec.handler. */
3047 static tree
3048 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3049 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3051 if (TREE_CODE (*node) == FUNCTION_DECL)
3053 /* PR14310 - don't complain about lack of return statement
3054 in naked functions. The solution here is a gross hack
3055 but this is the only way to solve the problem without
3056 adding a new feature to GCC. I did try submitting a patch
3057 that would add such a new feature, but it was (rightfully)
3058 rejected on the grounds that it was creeping featurism,
3059 so hence this code. */
3060 if (warn_return_type)
3062 saved_warn_return_type = warn_return_type;
3063 warn_return_type = 0;
3064 saved_warn_return_type_count = 2;
3066 else if (saved_warn_return_type_count)
3067 saved_warn_return_type_count = 2;
3069 else
3071 warning (OPT_Wattributes, "%qE attribute only applies to functions",
3072 name);
3073 *no_add_attrs = true;
3076 return NULL_TREE;
3079 /* ??? It looks like this is PE specific? Oh well, this is what the
3080 old code did as well. */
3082 static void
3083 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3085 int len;
3086 const char * name;
3087 char * string;
3088 const char * prefix;
3090 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3092 /* Strip off any encoding in name. */
3093 name = (* targetm.strip_name_encoding) (name);
3095 /* The object is put in, for example, section .text$foo.
3096 The linker will then ultimately place them in .text
3097 (everything from the $ on is stripped). */
3098 if (TREE_CODE (decl) == FUNCTION_DECL)
3099 prefix = ".text$";
3100 /* For compatibility with EPOC, we ignore the fact that the
3101 section might have relocs against it. */
3102 else if (decl_readonly_section (decl, 0))
3103 prefix = ".rdata$";
3104 else
3105 prefix = ".data$";
3107 len = strlen (name) + strlen (prefix);
3108 string = XALLOCAVEC (char, len + 1);
3110 sprintf (string, "%s%s", prefix, name);
3112 DECL_SECTION_NAME (decl) = build_string (len, string);
3116 mcore_naked_function_p (void)
3118 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3121 #ifdef OBJECT_FORMAT_ELF
3122 static void
3123 mcore_asm_named_section (const char *name,
3124 unsigned int flags ATTRIBUTE_UNUSED,
3125 tree decl ATTRIBUTE_UNUSED)
3127 fprintf (asm_out_file, "\t.section %s\n", name);
3129 #endif /* OBJECT_FORMAT_ELF */
3131 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3133 static void
3134 mcore_external_libcall (rtx fun)
3136 fprintf (asm_out_file, "\t.import\t");
3137 assemble_name (asm_out_file, XSTR (fun, 0));
3138 fprintf (asm_out_file, "\n");
3141 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3143 static bool
3144 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3146 const HOST_WIDE_INT size = int_size_in_bytes (type);
3147 return (size == -1 || size > 2 * UNITS_PER_WORD);
3150 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3151 Output assembler code for a block containing the constant parts
3152 of a trampoline, leaving space for the variable parts.
3154 On the MCore, the trampoline looks like:
3155 lrw r1, function
3156 lrw r13, area
3157 jmp r13
3158 or r0, r0
3159 .literals */
3161 static void
3162 mcore_asm_trampoline_template (FILE *f)
3164 fprintf (f, "\t.short 0x7102\n");
3165 fprintf (f, "\t.short 0x7d02\n");
3166 fprintf (f, "\t.short 0x00cd\n");
3167 fprintf (f, "\t.short 0x1e00\n");
3168 fprintf (f, "\t.long 0\n");
3169 fprintf (f, "\t.long 0\n");
3172 /* Worker function for TARGET_TRAMPOLINE_INIT. */
3174 static void
3175 mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3177 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3178 rtx mem;
3180 emit_block_move (m_tramp, assemble_trampoline_template (),
3181 GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3183 mem = adjust_address (m_tramp, SImode, 8);
3184 emit_move_insn (mem, chain_value);
3185 mem = adjust_address (m_tramp, SImode, 12);
3186 emit_move_insn (mem, fnaddr);
3189 /* Implement TARGET_LEGITIMATE_CONSTANT_P
3191 On the MCore, allow anything but a double. */
3193 static bool
3194 mcore_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3196 return GET_CODE (x) != CONST_DOUBLE;