* class.c (build_class_ref): Mark _Jv_fooClass DECL_EXTERNAL.
[official-gcc.git] / gcc / config / mcore / mcore.c
blob2b47e24e82ddf827b3521238610dbd2970cc6ce5
1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993, 1999, 2000, 2001 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "rtl.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "assert.h"
27 #include "mcore.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "flags.h"
36 #include "obstack.h"
37 #include "expr.h"
38 #include "reload.h"
39 #include "recog.h"
40 #include "function.h"
41 #include "ggc.h"
42 #include "toplev.h"
43 #include "target.h"
44 #include "target-def.h"
46 /* Maximum size we are allowed to grow the stack in a single operation.
47 If we want more, we must do it in increments of at most this size.
48 If this value is 0, we don't check at all. */
49 const char * mcore_stack_increment_string = 0;
50 int mcore_stack_increment = STACK_UNITS_MAXSTEP;
52 /* For dumping information about frame sizes. */
53 char * mcore_current_function_name = 0;
54 long mcore_current_compilation_timestamp = 0;
56 /* Global variables for machine-dependent things. */
58 /* Saved operands from the last compare to use when we generate an scc
59 or bcc insn. */
60 rtx arch_compare_op0;
61 rtx arch_compare_op1;
63 /* Provides the class number of the smallest class containing
64 reg number. */
65 int regno_reg_class[FIRST_PSEUDO_REGISTER] =
67 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
68 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
69 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
70 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
71 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
74 /* Provide reg_class from a letter such as appears in the machine
75 description. */
76 enum reg_class reg_class_from_letter[] =
78 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
79 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
80 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
81 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
82 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
83 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
84 /* y */ NO_REGS, /* z */ NO_REGS
87 struct mcore_frame
89 int arg_size; /* stdarg spills (bytes) */
90 int reg_size; /* non-volatile reg saves (bytes) */
91 int reg_mask; /* non-volatile reg saves */
92 int local_size; /* locals */
93 int outbound_size; /* arg overflow on calls out */
94 int pad_outbound;
95 int pad_local;
96 int pad_reg;
97 /* Describe the steps we'll use to grow it. */
98 #define MAX_STACK_GROWS 4 /* gives us some spare space */
99 int growth[MAX_STACK_GROWS];
100 int arg_offset;
101 int reg_offset;
102 int reg_growth;
103 int local_growth;
106 typedef enum
108 COND_NO,
109 COND_MOV_INSN,
110 COND_CLR_INSN,
111 COND_INC_INSN,
112 COND_DEC_INSN,
113 COND_BRANCH_INSN
115 cond_type;
117 static void output_stack_adjust PARAMS ((int, int));
118 static int calc_live_regs PARAMS ((int *));
119 static int const_ok_for_mcore PARAMS ((int));
120 static int try_constant_tricks PARAMS ((long, int *, int *));
121 static const char * output_inline_const PARAMS ((enum machine_mode, rtx *));
122 static void block_move_sequence PARAMS ((rtx, rtx, rtx, rtx, int, int, int));
123 static void layout_mcore_frame PARAMS ((struct mcore_frame *));
124 static cond_type is_cond_candidate PARAMS ((rtx));
125 static rtx emit_new_cond_insn PARAMS ((rtx, int));
126 static rtx conditionalize_block PARAMS ((rtx));
127 static void conditionalize_optimization PARAMS ((rtx));
128 static void mcore_add_gc_roots PARAMS ((void));
129 static rtx handle_structs_in_regs PARAMS ((enum machine_mode, tree, int));
130 static void mcore_mark_dllexport PARAMS ((tree));
131 static void mcore_mark_dllimport PARAMS ((tree));
132 static int mcore_dllexport_p PARAMS ((tree));
133 static int mcore_dllimport_p PARAMS ((tree));
134 const struct attribute_spec mcore_attribute_table[];
135 static tree mcore_handle_naked_attribute PARAMS ((tree *, tree, tree, int, bool *));
136 #ifdef OBJECT_FORMAT_ELF
137 static void mcore_asm_named_section PARAMS ((const char *,
138 unsigned int));
139 #endif
141 /* Initialize the GCC target structure. */
142 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
143 #undef TARGET_MERGE_DECL_ATTRIBUTES
144 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
145 #endif
147 #ifdef OBJECT_FORMAT_ELF
148 #undef TARGET_ASM_UNALIGNED_HI_OP
149 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
150 #undef TARGET_ASM_UNALIGNED_SI_OP
151 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
152 #endif
154 #undef TARGET_ATTRIBUTE_TABLE
155 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
157 struct gcc_target targetm = TARGET_INITIALIZER;
159 /* Adjust the stack and return the number of bytes taken to do it. */
160 static void
161 output_stack_adjust (direction, size)
162 int direction;
163 int size;
165 /* If extending stack a lot, we do it incrementally. */
166 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
168 rtx tmp = gen_rtx (REG, SImode, 1);
169 rtx memref;
170 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
173 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
174 memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
175 MEM_VOLATILE_P (memref) = 1;
176 emit_insn (gen_movsi (memref, stack_pointer_rtx));
177 size -= mcore_stack_increment;
179 while (size > mcore_stack_increment);
181 /* SIZE is now the residual for the last adjustment,
182 which doesn't require a probe. */
185 if (size)
187 rtx insn;
188 rtx val = GEN_INT (size);
190 if (size > 32)
192 rtx nval = gen_rtx (REG, SImode, 1);
193 emit_insn (gen_movsi (nval, val));
194 val = nval;
197 if (direction > 0)
198 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
199 else
200 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
202 emit_insn (insn);
206 /* Work out the registers which need to be saved,
207 both as a mask and a count. */
209 static int
210 calc_live_regs (count)
211 int * count;
213 int reg;
214 int live_regs_mask = 0;
216 * count = 0;
218 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
220 if (regs_ever_live[reg] && !call_used_regs[reg])
222 (*count)++;
223 live_regs_mask |= (1 << reg);
227 return live_regs_mask;
230 /* Print the operand address in x to the stream. */
232 void
233 mcore_print_operand_address (stream, x)
234 FILE * stream;
235 rtx x;
237 switch (GET_CODE (x))
239 case REG:
240 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
241 break;
243 case PLUS:
245 rtx base = XEXP (x, 0);
246 rtx index = XEXP (x, 1);
248 if (GET_CODE (base) != REG)
250 /* Ensure that BASE is a register (one of them must be). */
251 rtx temp = base;
252 base = index;
253 index = temp;
256 switch (GET_CODE (index))
258 case CONST_INT:
259 fprintf (stream, "(%s,%d)", reg_names[REGNO(base)],
260 INTVAL (index));
261 break;
263 default:
264 debug_rtx (x);
266 abort ();
270 break;
272 default:
273 output_addr_const (stream, x);
274 break;
278 /* Print operand x (an rtx) in assembler syntax to file stream
279 according to modifier code.
281 'R' print the next register or memory location along, ie the lsw in
282 a double word value
283 'O' print a constant without the #
284 'M' print a constant as its negative
285 'P' print log2 of a power of two
286 'Q' print log2 of an inverse of a power of two
287 'U' print register for ldm/stm instruction
288 'X' print byte number for xtrbN instruction. */
290 void
291 mcore_print_operand (stream, x, code)
292 FILE * stream;
293 rtx x;
294 int code;
296 switch (code)
298 case 'N':
299 if (INTVAL(x) == -1)
300 fprintf (asm_out_file, "32");
301 else
302 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
303 break;
304 case 'P':
305 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x)));
306 break;
307 case 'Q':
308 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
309 break;
310 case 'O':
311 fprintf (asm_out_file, "%d", INTVAL (x));
312 break;
313 case 'M':
314 fprintf (asm_out_file, "%d", - INTVAL (x));
315 break;
316 case 'R':
317 /* Next location along in memory or register. */
318 switch (GET_CODE (x))
320 case REG:
321 fputs (reg_names[REGNO (x) + 1], (stream));
322 break;
323 case MEM:
324 mcore_print_operand_address
325 (stream, XEXP (adjust_address (x, SImode, 4), 0));
326 break;
327 default:
328 abort ();
330 break;
331 case 'U':
332 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
333 reg_names[REGNO (x) + 3]);
334 break;
335 case 'x':
336 fprintf (asm_out_file, "0x%x", INTVAL (x));
337 break;
338 case 'X':
339 fprintf (asm_out_file, "%d", 3 - INTVAL (x) / 8);
340 break;
342 default:
343 switch (GET_CODE (x))
345 case REG:
346 fputs (reg_names[REGNO (x)], (stream));
347 break;
348 case MEM:
349 output_address (XEXP (x, 0));
350 break;
351 default:
352 output_addr_const (stream, x);
353 break;
355 break;
359 /* What does a constant cost ? */
362 mcore_const_costs (exp, code)
363 rtx exp;
364 enum rtx_code code;
367 int val = INTVAL (exp);
369 /* Easy constants. */
370 if ( CONST_OK_FOR_I (val)
371 || CONST_OK_FOR_M (val)
372 || CONST_OK_FOR_N (val)
373 || (code == PLUS && CONST_OK_FOR_L (val)))
374 return 1;
375 else if (code == AND
376 && ( CONST_OK_FOR_M (~val)
377 || CONST_OK_FOR_N (~val)))
378 return 2;
379 else if (code == PLUS
380 && ( CONST_OK_FOR_I (-val)
381 || CONST_OK_FOR_M (-val)
382 || CONST_OK_FOR_N (-val)))
383 return 2;
385 return 5;
388 /* What does an and instruction cost - we do this b/c immediates may
389 have been relaxed. We want to ensure that cse will cse relaxed immeds
390 out. Otherwise we'll get bad code (multiple reloads of the same const). */
393 mcore_and_cost (x)
394 rtx x;
396 int val;
398 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
399 return 2;
401 val = INTVAL (XEXP (x, 1));
403 /* Do it directly. */
404 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
405 return 2;
406 /* Takes one instruction to load. */
407 else if (const_ok_for_mcore (val))
408 return 3;
409 /* Takes two instructions to load. */
410 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
411 return 4;
413 /* Takes a lrw to load. */
414 return 5;
417 /* What does an or cost - see and_cost(). */
420 mcore_ior_cost (x)
421 rtx x;
423 int val;
425 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
426 return 2;
428 val = INTVAL (XEXP (x, 1));
430 /* Do it directly with bclri. */
431 if (CONST_OK_FOR_M (val))
432 return 2;
433 /* Takes one instruction to load. */
434 else if (const_ok_for_mcore (val))
435 return 3;
436 /* Takes two instructions to load. */
437 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
438 return 4;
440 /* Takes a lrw to load. */
441 return 5;
444 /* Check to see if a comparison against a constant can be made more efficient
445 by incrementing/decrementing the constant to get one that is more efficient
446 to load. */
449 mcore_modify_comparison (code)
450 enum rtx_code code;
452 rtx op1 = arch_compare_op1;
454 if (GET_CODE (op1) == CONST_INT)
456 int val = INTVAL (op1);
458 switch (code)
460 case LE:
461 if (CONST_OK_FOR_J (val + 1))
463 arch_compare_op1 = GEN_INT (val + 1);
464 return 1;
466 break;
468 default:
469 break;
473 return 0;
476 /* Prepare the operands for a comparison. */
479 mcore_gen_compare_reg (code)
480 enum rtx_code code;
482 rtx op0 = arch_compare_op0;
483 rtx op1 = arch_compare_op1;
484 rtx cc_reg = gen_rtx (REG, CCmode, CC_REG);
486 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
487 op1 = force_reg (SImode, op1);
489 /* cmpnei: 0-31 (K immediate)
490 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
491 switch (code)
493 case EQ: /* Use inverted condition, cmpne. */
494 code = NE;
495 /* drop through */
497 case NE: /* Use normal condition, cmpne. */
498 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
499 op1 = force_reg (SImode, op1);
500 break;
502 case LE: /* Use inverted condition, reversed cmplt. */
503 code = GT;
504 /* drop through */
506 case GT: /* Use normal condition, reversed cmplt. */
507 if (GET_CODE (op1) == CONST_INT)
508 op1 = force_reg (SImode, op1);
509 break;
511 case GE: /* Use inverted condition, cmplt. */
512 code = LT;
513 /* drop through */
515 case LT: /* Use normal condition, cmplt. */
516 if (GET_CODE (op1) == CONST_INT &&
517 /* covered by btsti x,31 */
518 INTVAL (op1) != 0 &&
519 ! CONST_OK_FOR_J (INTVAL (op1)))
520 op1 = force_reg (SImode, op1);
521 break;
523 case GTU: /* Use inverted condition, cmple. */
524 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)
526 /* Unsigned > 0 is the same as != 0, but we need
527 to invert the condition, so we want to set
528 code = EQ. This cannot be done however, as the
529 mcore does not support such a test. Instead we
530 cope with this case in the "bgtu" pattern itself
531 so we should never reach this point. */
532 /* code = EQ; */
533 abort ();
534 break;
536 code = LEU;
537 /* drop through */
539 case LEU: /* Use normal condition, reversed cmphs. */
540 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
541 op1 = force_reg (SImode, op1);
542 break;
544 case LTU: /* Use inverted condition, cmphs. */
545 code = GEU;
546 /* drop through */
548 case GEU: /* Use normal condition, cmphs. */
549 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
550 op1 = force_reg (SImode, op1);
551 break;
553 default:
554 break;
557 emit_insn (gen_rtx (SET, VOIDmode, cc_reg, gen_rtx (code, CCmode, op0, op1)));
559 return cc_reg;
564 mcore_symbolic_address_p (x)
565 rtx x;
567 switch (GET_CODE (x))
569 case SYMBOL_REF:
570 case LABEL_REF:
571 return 1;
572 case CONST:
573 x = XEXP (x, 0);
574 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
575 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
576 && GET_CODE (XEXP (x, 1)) == CONST_INT);
577 default:
578 return 0;
583 mcore_call_address_operand (x, mode)
584 rtx x;
585 enum machine_mode mode;
587 return register_operand (x, mode) || CONSTANT_P (x);
590 /* Functions to output assembly code for a function call. */
592 char *
593 mcore_output_call (operands, index)
594 rtx operands[];
595 int index;
597 static char buffer[20];
598 rtx addr = operands [index];
600 if (REG_P (addr))
602 if (TARGET_CG_DATA)
604 if (mcore_current_function_name == 0)
605 abort ();
607 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
608 "unknown", 1);
611 sprintf (buffer, "jsr\t%%%d", index);
613 else
615 if (TARGET_CG_DATA)
617 if (mcore_current_function_name == 0)
618 abort ();
620 if (GET_CODE (addr) != SYMBOL_REF)
621 abort ();
623 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);
626 sprintf (buffer, "jbsr\t%%%d", index);
629 return buffer;
632 /* Can we load a constant with a single instruction ? */
634 static int
635 const_ok_for_mcore (value)
636 int value;
638 if (value >= 0 && value <= 127)
639 return 1;
641 /* Try exact power of two. */
642 if ((value & (value - 1)) == 0)
643 return 1;
645 /* Try exact power of two - 1. */
646 if ((value & (value + 1)) == 0)
647 return 1;
649 return 0;
652 /* Can we load a constant inline with up to 2 instructions ? */
655 mcore_const_ok_for_inline (value)
656 long value;
658 int x, y;
660 return try_constant_tricks (value, & x, & y) > 0;
663 /* Are we loading the constant using a not ? */
666 mcore_const_trick_uses_not (value)
667 long value;
669 int x, y;
671 return try_constant_tricks (value, & x, & y) == 2;
674 /* Try tricks to load a constant inline and return the trick number if
675 success (0 is non-inlinable).
677 0: not inlinable
678 1: single instruction (do the usual thing)
679 2: single insn followed by a 'not'
680 3: single insn followed by a subi
681 4: single insn followed by an addi
682 5: single insn followed by rsubi
683 6: single insn followed by bseti
684 7: single insn followed by bclri
685 8: single insn followed by rotli
686 9: single insn followed by lsli
687 10: single insn followed by ixh
688 11: single insn followed by ixw. */
690 static int
691 try_constant_tricks (value, x, y)
692 long value;
693 int * x;
694 int * y;
696 int i;
697 unsigned bit, shf, rot;
699 if (const_ok_for_mcore (value))
700 return 1; /* Do the usual thing. */
702 if (TARGET_HARDLIT)
704 if (const_ok_for_mcore (~value))
706 *x = ~value;
707 return 2;
710 for (i = 1; i <= 32; i++)
712 if (const_ok_for_mcore (value - i))
714 *x = value - i;
715 *y = i;
717 return 3;
720 if (const_ok_for_mcore (value + i))
722 *x = value + i;
723 *y = i;
725 return 4;
729 bit = 0x80000000L;
731 for (i = 0; i <= 31; i++)
733 if (const_ok_for_mcore (i - value))
735 *x = i - value;
736 *y = i;
738 return 5;
741 if (const_ok_for_mcore (value & ~bit))
743 *y = bit;
744 *x = value & ~bit;
746 return 6;
749 if (const_ok_for_mcore (value | bit))
751 *y = ~bit;
752 *x = value | bit;
754 return 7;
757 bit >>= 1;
760 shf = value;
761 rot = value;
763 for (i = 1; i < 31; i++)
765 int c;
767 /* MCore has rotate left. */
768 c = rot << 31;
769 rot >>= 1;
770 rot &= 0x7FFFFFFF;
771 rot |= c; /* Simulate rotate. */
773 if (const_ok_for_mcore (rot))
775 *y = i;
776 *x = rot;
778 return 8;
781 if (shf & 1)
782 shf = 0; /* Can't use logical shift, low order bit is one. */
784 shf >>= 1;
786 if (shf != 0 && const_ok_for_mcore (shf))
788 *y = i;
789 *x = shf;
791 return 9;
795 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
797 *x = value / 3;
799 return 10;
802 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
804 *x = value / 5;
806 return 11;
810 return 0;
814 /* Check whether reg is dead at first. This is done by searching ahead
815 for either the next use (i.e., reg is live), a death note, or a set of
816 reg. Don't just use dead_or_set_p() since reload does not always mark
817 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
818 can ignore subregs by extracting the actual register. BRC */
821 mcore_is_dead (first, reg)
822 rtx first;
823 rtx reg;
825 rtx insn;
827 /* For mcore, subregs can't live independently of their parent regs. */
828 if (GET_CODE (reg) == SUBREG)
829 reg = SUBREG_REG (reg);
831 /* Dies immediately. */
832 if (dead_or_set_p (first, reg))
833 return 1;
835 /* Look for conclusive evidence of live/death, otherwise we have
836 to assume that it is live. */
837 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
839 if (GET_CODE (insn) == JUMP_INSN)
840 return 0; /* We lose track, assume it is alive. */
842 else if (GET_CODE(insn) == CALL_INSN)
844 /* Call's might use it for target or register parms. */
845 if (reg_referenced_p (reg, PATTERN (insn))
846 || find_reg_fusage (insn, USE, reg))
847 return 0;
848 else if (dead_or_set_p (insn, reg))
849 return 1;
851 else if (GET_CODE (insn) == INSN)
853 if (reg_referenced_p (reg, PATTERN (insn)))
854 return 0;
855 else if (dead_or_set_p (insn, reg))
856 return 1;
860 /* No conclusive evidence either way, we can not take the chance
861 that control flow hid the use from us -- "I'm not dead yet". */
862 return 0;
866 /* Count the number of ones in mask. */
869 mcore_num_ones (mask)
870 int mask;
872 /* A trick to count set bits recently posted on comp.compilers. */
873 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
874 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
875 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
876 mask = ((mask >> 8) + mask);
878 return (mask + (mask >> 16)) & 0xff;
881 /* Count the number of zeros in mask. */
884 mcore_num_zeros (mask)
885 int mask;
887 return 32 - mcore_num_ones (mask);
890 /* Determine byte being masked. */
893 mcore_byte_offset (mask)
894 unsigned int mask;
896 if (mask == 0x00ffffffL)
897 return 0;
898 else if (mask == 0xff00ffffL)
899 return 1;
900 else if (mask == 0xffff00ffL)
901 return 2;
902 else if (mask == 0xffffff00L)
903 return 3;
905 return -1;
908 /* Determine halfword being masked. */
911 mcore_halfword_offset (mask)
912 unsigned int mask;
914 if (mask == 0x0000ffffL)
915 return 0;
916 else if (mask == 0xffff0000L)
917 return 1;
919 return -1;
922 /* Output a series of bseti's corresponding to mask. */
924 const char *
925 mcore_output_bseti (dst, mask)
926 rtx dst;
927 int mask;
929 rtx out_operands[2];
930 int bit;
932 out_operands[0] = dst;
934 for (bit = 0; bit < 32; bit++)
936 if ((mask & 0x1) == 0x1)
938 out_operands[1] = GEN_INT (bit);
940 output_asm_insn ("bseti\t%0,%1", out_operands);
942 mask >>= 1;
945 return "";
948 /* Output a series of bclri's corresponding to mask. */
950 const char *
951 mcore_output_bclri (dst, mask)
952 rtx dst;
953 int mask;
955 rtx out_operands[2];
956 int bit;
958 out_operands[0] = dst;
960 for (bit = 0; bit < 32; bit++)
962 if ((mask & 0x1) == 0x0)
964 out_operands[1] = GEN_INT (bit);
966 output_asm_insn ("bclri\t%0,%1", out_operands);
969 mask >>= 1;
972 return "";
975 /* Output a conditional move of two constants that are +/- 1 within each
976 other. See the "movtK" patterns in mcore.md. I'm not sure this is
977 really worth the effort. */
979 const char *
980 mcore_output_cmov (operands, cmp_t, test)
981 rtx operands[];
982 int cmp_t;
983 const char * test;
985 int load_value;
986 int adjust_value;
987 rtx out_operands[4];
989 out_operands[0] = operands[0];
991 /* Check to see which constant is loadable. */
992 if (const_ok_for_mcore (INTVAL (operands[1])))
994 out_operands[1] = operands[1];
995 out_operands[2] = operands[2];
997 else if (const_ok_for_mcore (INTVAL (operands[2])))
999 out_operands[1] = operands[2];
1000 out_operands[2] = operands[1];
1002 /* Complement test since constants are swapped. */
1003 cmp_t = (cmp_t == 0);
1005 load_value = INTVAL (out_operands[1]);
1006 adjust_value = INTVAL (out_operands[2]);
1008 /* First output the test if folded into the pattern. */
1010 if (test)
1011 output_asm_insn (test, operands);
1013 /* Load the constant - for now, only support constants that can be
1014 generated with a single instruction. maybe add general inlinable
1015 constants later (this will increase the # of patterns since the
1016 instruction sequence has a different length attribute). */
1017 if (load_value >= 0 && load_value <= 127)
1018 output_asm_insn ("movi\t%0,%1", out_operands);
1019 else if ((load_value & (load_value - 1)) == 0)
1020 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1021 else if ((load_value & (load_value + 1)) == 0)
1022 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1024 /* Output the constant adjustment. */
1025 if (load_value > adjust_value)
1027 if (cmp_t)
1028 output_asm_insn ("decf\t%0", out_operands);
1029 else
1030 output_asm_insn ("dect\t%0", out_operands);
1032 else
1034 if (cmp_t)
1035 output_asm_insn ("incf\t%0", out_operands);
1036 else
1037 output_asm_insn ("inct\t%0", out_operands);
1040 return "";
1043 /* Outputs the peephole for moving a constant that gets not'ed followed
1044 by an and (i.e. combine the not and the and into andn). BRC */
1046 const char *
1047 mcore_output_andn (insn, operands)
1048 rtx insn ATTRIBUTE_UNUSED;
1049 rtx operands[];
1051 int x, y;
1052 rtx out_operands[3];
1053 const char * load_op;
1054 char buf[256];
1056 if (try_constant_tricks (INTVAL (operands[1]), &x, &y) != 2)
1057 abort ();
1059 out_operands[0] = operands[0];
1060 out_operands[1] = GEN_INT(x);
1061 out_operands[2] = operands[2];
1063 if (x >= 0 && x <= 127)
1064 load_op = "movi\t%0,%1";
1066 /* Try exact power of two. */
1067 else if ((x & (x - 1)) == 0)
1068 load_op = "bgeni\t%0,%P1";
1070 /* Try exact power of two - 1. */
1071 else if ((x & (x + 1)) == 0)
1072 load_op = "bmaski\t%0,%N1";
1074 else
1075 load_op = "BADMOVI\t%0,%1";
1077 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1078 output_asm_insn (buf, out_operands);
1080 return "";
1083 /* Output an inline constant. */
1085 static const char *
1086 output_inline_const (mode, operands)
1087 enum machine_mode mode;
1088 rtx operands[];
1090 int x = 0, y = 0;
1091 int trick_no;
1092 rtx out_operands[3];
1093 char buf[256];
1094 char load_op[256];
1095 const char *dst_fmt;
1096 int value;
1098 value = INTVAL (operands[1]);
1100 if ((trick_no = try_constant_tricks (value, &x, &y)) == 0)
1102 /* lrw's are handled separately: Large inlinable constants
1103 never get turned into lrw's. Our caller uses try_constant_tricks
1104 to back off to an lrw rather than calling this routine. */
1105 abort ();
1108 if (trick_no == 1)
1109 x = value;
1111 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1112 out_operands[0] = operands[0];
1113 out_operands[1] = GEN_INT (x);
1115 if (trick_no > 2)
1116 out_operands[2] = GEN_INT (y);
1118 /* Select dst format based on mode. */
1119 if (mode == DImode && (! TARGET_LITTLE_END))
1120 dst_fmt = "%R0";
1121 else
1122 dst_fmt = "%0";
1124 if (x >= 0 && x <= 127)
1125 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1127 /* Try exact power of two. */
1128 else if ((x & (x - 1)) == 0)
1129 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1131 /* Try exact power of two - 1. */
1132 else if ((x & (x + 1)) == 0)
1133 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1135 else
1136 sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt);
1138 switch (trick_no)
1140 case 1:
1141 strcpy (buf, load_op);
1142 break;
1143 case 2: /* not */
1144 sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value);
1145 break;
1146 case 3: /* add */
1147 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1148 break;
1149 case 4: /* sub */
1150 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1151 break;
1152 case 5: /* rsub */
1153 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1154 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1155 break;
1156 case 6: /* bset */
1157 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value);
1158 break;
1159 case 7: /* bclr */
1160 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value);
1161 break;
1162 case 8: /* rotl */
1163 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1164 break;
1165 case 9: /* lsl */
1166 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1167 break;
1168 case 10: /* ixh */
1169 sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1170 break;
1171 case 11: /* ixw */
1172 sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1173 break;
1174 default:
1175 return "";
1178 output_asm_insn (buf, out_operands);
1180 return "";
1183 /* Output a move of a word or less value. */
1185 const char *
1186 mcore_output_move (insn, operands, mode)
1187 rtx insn ATTRIBUTE_UNUSED;
1188 rtx operands[];
1189 enum machine_mode mode ATTRIBUTE_UNUSED;
1191 rtx dst = operands[0];
1192 rtx src = operands[1];
1194 if (GET_CODE (dst) == REG)
1196 if (GET_CODE (src) == REG)
1198 if (REGNO (src) == CC_REG) /* r-c */
1199 return "mvc\t%0";
1200 else
1201 return "mov\t%0,%1"; /* r-r*/
1203 else if (GET_CODE (src) == MEM)
1205 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1206 return "lrw\t%0,[%1]"; /* a-R */
1207 else
1208 return "ldw\t%0,%1"; /* r-m */
1210 else if (GET_CODE (src) == CONST_INT)
1212 int x, y;
1214 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1215 return "movi\t%0,%1";
1216 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1217 return "bgeni\t%0,%P1\t// %1 %x1";
1218 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1219 return "bmaski\t%0,%N1\t// %1 %x1";
1220 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1221 return output_inline_const (SImode, operands); /* 1-2 insns */
1222 else
1223 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1225 else
1226 return "lrw\t%0, %1"; /* Into the literal pool. */
1228 else if (GET_CODE (dst) == MEM) /* m-r */
1229 return "stw\t%1,%0";
1231 abort ();
1234 /* Outputs a constant inline -- regardless of the cost.
1235 Useful for things where we've gotten into trouble and think we'd
1236 be doing an lrw into r15 (forbidden). This lets us get out of
1237 that pickle even after register allocation. */
1239 const char *
1240 mcore_output_inline_const_forced (insn, operands, mode)
1241 rtx insn ATTRIBUTE_UNUSED;
1242 rtx operands[];
1243 enum machine_mode mode ATTRIBUTE_UNUSED;
1245 unsigned long value = INTVAL (operands[1]);
1246 unsigned long ovalue = value;
1247 struct piece
1249 int low;
1250 int shift;
1252 part[6];
1253 int i;
1255 if (mcore_const_ok_for_inline (value))
1256 return output_inline_const (SImode, operands);
1258 for (i = 0; (unsigned) i < ARRAY_SIZE (part); i++)
1260 part[i].shift = 0;
1261 part[i].low = (value & 0x1F);
1262 value -= part[i].low;
1264 if (mcore_const_ok_for_inline (value))
1265 break;
1266 else
1268 value >>= 5;
1269 part[i].shift = 5;
1271 while ((value & 1) == 0)
1273 part[i].shift++;
1274 value >>= 1;
1277 if (mcore_const_ok_for_inline (value))
1278 break;
1282 /* 5 bits per iteration, a maximum of 5 times == 25 bits and leaves
1283 7 bits left in the constant -- which we know we can cover with
1284 a movi. The final value can't be zero otherwise we'd have stopped
1285 in the previous iteration. */
1286 if (value == 0 || ! mcore_const_ok_for_inline (value))
1287 abort ();
1289 /* Now, work our way backwards emitting the constant. */
1291 /* Emit the value that remains -- it will be non-zero. */
1292 operands[1] = GEN_INT (value);
1293 output_asm_insn (output_inline_const (SImode, operands), operands);
1295 while (i >= 0)
1297 /* Shift anything we've already loaded. */
1298 if (part[i].shift)
1300 operands[2] = GEN_INT (part[i].shift);
1301 output_asm_insn ("lsli %0,%2", operands);
1302 value <<= part[i].shift;
1305 /* Add anything we need into the low 5 bits. */
1306 if (part[i].low != 0)
1308 operands[2] = GEN_INT (part[i].low);
1309 output_asm_insn ("addi %0,%2", operands);
1310 value += part[i].low;
1313 i--;
1316 if (value != ovalue) /* sanity */
1317 abort ();
1319 /* We've output all the instructions. */
1320 return "";
1323 /* Return a sequence of instructions to perform DI or DF move.
1324 Since the MCORE cannot move a DI or DF in one instruction, we have
1325 to take care when we see overlapping source and dest registers. */
1327 const char *
1328 mcore_output_movedouble (operands, mode)
1329 rtx operands[];
1330 enum machine_mode mode ATTRIBUTE_UNUSED;
1332 rtx dst = operands[0];
1333 rtx src = operands[1];
1335 if (GET_CODE (dst) == REG)
1337 if (GET_CODE (src) == REG)
1339 int dstreg = REGNO (dst);
1340 int srcreg = REGNO (src);
1342 /* Ensure the second source not overwritten. */
1343 if (srcreg + 1 == dstreg)
1344 return "mov %R0,%R1\n\tmov %0,%1";
1345 else
1346 return "mov %0,%1\n\tmov %R0,%R1";
1348 else if (GET_CODE (src) == MEM)
1350 rtx memexp = memexp = XEXP (src, 0);
1351 int dstreg = REGNO (dst);
1352 int basereg = -1;
1354 if (GET_CODE (memexp) == LABEL_REF)
1355 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1356 else if (GET_CODE (memexp) == REG)
1357 basereg = REGNO (memexp);
1358 else if (GET_CODE (memexp) == PLUS)
1360 if (GET_CODE (XEXP (memexp, 0)) == REG)
1361 basereg = REGNO (XEXP (memexp, 0));
1362 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1363 basereg = REGNO (XEXP (memexp, 1));
1364 else
1365 abort ();
1367 else
1368 abort ();
1370 /* ??? length attribute is wrong here. */
1371 if (dstreg == basereg)
1373 /* Just load them in reverse order. */
1374 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1376 /* XXX: alternative: move basereg to basereg+1
1377 and then fall through. */
1379 else
1380 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1382 else if (GET_CODE (src) == CONST_INT)
1384 if (TARGET_LITTLE_END)
1386 if (CONST_OK_FOR_I (INTVAL (src)))
1387 output_asm_insn ("movi %0,%1", operands);
1388 else if (CONST_OK_FOR_M (INTVAL (src)))
1389 output_asm_insn ("bgeni %0,%P1", operands);
1390 else if (INTVAL (src) == -1)
1391 output_asm_insn ("bmaski %0,32", operands);
1392 else if (CONST_OK_FOR_N (INTVAL (src)))
1393 output_asm_insn ("bmaski %0,%N1", operands);
1394 else
1395 abort ();
1397 if (INTVAL (src) < 0)
1398 return "bmaski %R0,32";
1399 else
1400 return "movi %R0,0";
1402 else
1404 if (CONST_OK_FOR_I (INTVAL (src)))
1405 output_asm_insn ("movi %R0,%1", operands);
1406 else if (CONST_OK_FOR_M (INTVAL (src)))
1407 output_asm_insn ("bgeni %R0,%P1", operands);
1408 else if (INTVAL (src) == -1)
1409 output_asm_insn ("bmaski %R0,32", operands);
1410 else if (CONST_OK_FOR_N (INTVAL (src)))
1411 output_asm_insn ("bmaski %R0,%N1", operands);
1412 else
1413 abort ();
1415 if (INTVAL (src) < 0)
1416 return "bmaski %0,32";
1417 else
1418 return "movi %0,0";
1421 else
1422 abort ();
1424 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1425 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1426 else
1427 abort ();
1430 /* Predicates used by the templates. */
1432 /* Non zero if OP can be source of a simple move operation. */
1435 mcore_general_movsrc_operand (op, mode)
1436 rtx op;
1437 enum machine_mode mode;
1439 /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
1440 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == LABEL_REF)
1441 return 1;
1443 return general_operand (op, mode);
1446 /* Non zero if OP can be destination of a simple move operation. */
1449 mcore_general_movdst_operand (op, mode)
1450 rtx op;
1451 enum machine_mode mode;
1453 if (GET_CODE (op) == REG && REGNO (op) == CC_REG)
1454 return 0;
1456 return general_operand (op, mode);
1459 /* Nonzero if OP is a normal arithmetic register. */
1462 mcore_arith_reg_operand (op, mode)
1463 rtx op;
1464 enum machine_mode mode;
1466 if (! register_operand (op, mode))
1467 return 0;
1469 if (GET_CODE (op) == SUBREG)
1470 op = SUBREG_REG (op);
1472 if (GET_CODE (op) == REG)
1473 return REGNO (op) != CC_REG;
1475 return 1;
1478 /* Non zero if OP should be recognized during reload for an ixh/ixw
1479 operand. See the ixh/ixw patterns. */
1482 mcore_reload_operand (op, mode)
1483 rtx op;
1484 enum machine_mode mode;
1486 if (mcore_arith_reg_operand (op, mode))
1487 return 1;
1489 if (! reload_in_progress)
1490 return 0;
1492 return GET_CODE (op) == MEM;
1495 /* Nonzero if OP is a valid source operand for an arithmetic insn. */
1498 mcore_arith_J_operand (op, mode)
1499 rtx op;
1500 enum machine_mode mode;
1502 if (register_operand (op, mode))
1503 return 1;
1505 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
1506 return 1;
1508 return 0;
1511 /* Nonzero if OP is a valid source operand for an arithmetic insn. */
1514 mcore_arith_K_operand (op, mode)
1515 rtx op;
1516 enum machine_mode mode;
1518 if (register_operand (op, mode))
1519 return 1;
1521 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1522 return 1;
1524 return 0;
1527 /* Nonzero if OP is a valid source operand for a shift or rotate insn. */
1530 mcore_arith_K_operand_not_0 (op, mode)
1531 rtx op;
1532 enum machine_mode mode;
1534 if (register_operand (op, mode))
1535 return 1;
1537 if ( GET_CODE (op) == CONST_INT
1538 && CONST_OK_FOR_K (INTVAL (op))
1539 && INTVAL (op) != 0)
1540 return 1;
1542 return 0;
1546 mcore_arith_K_S_operand (op, mode)
1547 rtx op;
1548 enum machine_mode mode;
1550 if (register_operand (op, mode))
1551 return 1;
1553 if (GET_CODE (op) == CONST_INT)
1555 if (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_M (~INTVAL (op)))
1556 return 1;
1559 return 0;
1563 mcore_arith_S_operand (op)
1564 rtx op;
1566 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1567 return 1;
1569 return 0;
1573 mcore_arith_M_operand (op, mode)
1574 rtx op;
1575 enum machine_mode mode;
1577 if (register_operand (op, mode))
1578 return 1;
1580 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
1581 return 1;
1583 return 0;
1586 /* Nonzero if OP is a valid source operand for loading. */
1589 mcore_arith_imm_operand (op, mode)
1590 rtx op;
1591 enum machine_mode mode;
1593 if (register_operand (op, mode))
1594 return 1;
1596 if (GET_CODE (op) == CONST_INT && const_ok_for_mcore (INTVAL (op)))
1597 return 1;
1599 return 0;
1603 mcore_arith_any_imm_operand (op, mode)
1604 rtx op;
1605 enum machine_mode mode;
1607 if (register_operand (op, mode))
1608 return 1;
1610 if (GET_CODE (op) == CONST_INT)
1611 return 1;
1613 return 0;
1616 /* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1. */
1619 mcore_arith_O_operand (op, mode)
1620 rtx op;
1621 enum machine_mode mode;
1623 if (register_operand (op, mode))
1624 return 1;
1626 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op)))
1627 return 1;
1629 return 0;
1632 /* Nonzero if OP is a valid source operand for a btsti. */
1635 mcore_literal_K_operand (op, mode)
1636 rtx op;
1637 enum machine_mode mode ATTRIBUTE_UNUSED;
1639 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1640 return 1;
1642 return 0;
1645 /* Nonzero if OP is a valid source operand for an add/sub insn. */
1648 mcore_addsub_operand (op, mode)
1649 rtx op;
1650 enum machine_mode mode;
1652 if (register_operand (op, mode))
1653 return 1;
1655 if (GET_CODE (op) == CONST_INT)
1657 return 1;
1659 /* The following is removed because it precludes large constants from being
1660 returned as valid source operands for and add/sub insn. While large
1661 constants may not directly be used in an add/sub, they may if first loaded
1662 into a register. Thus, this predicate should indicate that they are valid,
1663 and the constraint in mcore.md should control whether an additional load to
1664 register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */
1666 if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op)))
1667 return 1;
1671 return 0;
1674 /* Nonzero if OP is a valid source operand for a compare operation. */
1677 mcore_compare_operand (op, mode)
1678 rtx op;
1679 enum machine_mode mode;
1681 if (register_operand (op, mode))
1682 return 1;
1684 if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
1685 return 1;
1687 return 0;
1690 /* Expand insert bit field. BRC */
1693 mcore_expand_insv (operands)
1694 rtx operands[];
1696 int width = INTVAL (operands[1]);
1697 int posn = INTVAL (operands[2]);
1698 int mask;
1699 rtx mreg, sreg, ereg;
1701 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1702 for width==1 must be removed. Look around line 368. This is something
1703 we really want the md part to do. */
1704 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1706 /* Do directly with bseti or bclri. */
1707 /* RBE: 2/97 consider only low bit of constant. */
1708 if ((INTVAL(operands[3])&1) == 0)
1710 mask = ~(1 << posn);
1711 emit_insn (gen_rtx (SET, SImode, operands[0],
1712 gen_rtx (AND, SImode, operands[0], GEN_INT (mask))));
1714 else
1716 mask = 1 << posn;
1717 emit_insn (gen_rtx (SET, SImode, operands[0],
1718 gen_rtx (IOR, SImode, operands[0], GEN_INT (mask))));
1721 return 1;
1724 /* Look at some bitfield placements that we aren't interested
1725 in handling ourselves, unless specifically directed to do so. */
1726 if (! TARGET_W_FIELD)
1727 return 0; /* Generally, give up about now. */
1729 if (width == 8 && posn % 8 == 0)
1730 /* Byte sized and aligned; let caller break it up. */
1731 return 0;
1733 if (width == 16 && posn % 16 == 0)
1734 /* Short sized and aligned; let caller break it up. */
1735 return 0;
1737 /* The general case - we can do this a little bit better than what the
1738 machine independent part tries. This will get rid of all the subregs
1739 that mess up constant folding in combine when working with relaxed
1740 immediates. */
1742 /* If setting the entire field, do it directly. */
1743 if (GET_CODE (operands[3]) == CONST_INT &&
1744 INTVAL (operands[3]) == ((1 << width) - 1))
1746 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1747 emit_insn (gen_rtx (SET, SImode, operands[0],
1748 gen_rtx (IOR, SImode, operands[0], mreg)));
1749 return 1;
1752 /* Generate the clear mask. */
1753 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1755 /* Clear the field, to overlay it later with the source. */
1756 emit_insn (gen_rtx (SET, SImode, operands[0],
1757 gen_rtx (AND, SImode, operands[0], mreg)));
1759 /* If the source is constant 0, we've nothing to add back. */
1760 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1761 return 1;
1763 /* XXX: Should we worry about more games with constant values?
1764 We've covered the high profile: set/clear single-bit and many-bit
1765 fields. How often do we see "arbitrary bit pattern" constants? */
1766 sreg = copy_to_mode_reg (SImode, operands[3]);
1768 /* Extract src as same width as dst (needed for signed values). We
1769 always have to do this since we widen everything to SImode.
1770 We don't have to mask if we're shifting this up against the
1771 MSB of the register (e.g., the shift will push out any hi-order
1772 bits. */
1773 if (width + posn != (int) GET_MODE_SIZE (SImode))
1775 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1776 emit_insn (gen_rtx (SET, SImode, sreg,
1777 gen_rtx (AND, SImode, sreg, ereg)));
1780 /* Insert source value in dest. */
1781 if (posn != 0)
1782 emit_insn (gen_rtx (SET, SImode, sreg,
1783 gen_rtx (ASHIFT, SImode, sreg, GEN_INT (posn))));
1785 emit_insn (gen_rtx (SET, SImode, operands[0],
1786 gen_rtx (IOR, SImode, operands[0], sreg)));
1788 return 1;
1791 /* Return 1 if OP is a load multiple operation. It is known to be a
1792 PARALLEL and the first section will be tested. */
1794 mcore_load_multiple_operation (op, mode)
1795 rtx op;
1796 enum machine_mode mode ATTRIBUTE_UNUSED;
1798 int count = XVECLEN (op, 0);
1799 int dest_regno;
1800 rtx src_addr;
1801 int i;
1803 /* Perform a quick check so we don't blow up below. */
1804 if (count <= 1
1805 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1806 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
1807 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
1808 return 0;
1810 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
1811 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
1813 for (i = 1; i < count; i++)
1815 rtx elt = XVECEXP (op, 0, i);
1817 if (GET_CODE (elt) != SET
1818 || GET_CODE (SET_DEST (elt)) != REG
1819 || GET_MODE (SET_DEST (elt)) != SImode
1820 || REGNO (SET_DEST (elt)) != (unsigned) (dest_regno + i)
1821 || GET_CODE (SET_SRC (elt)) != MEM
1822 || GET_MODE (SET_SRC (elt)) != SImode
1823 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
1824 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
1825 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
1826 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
1827 return 0;
1830 return 1;
1833 /* Similar, but tests for store multiple. */
1836 mcore_store_multiple_operation (op, mode)
1837 rtx op;
1838 enum machine_mode mode ATTRIBUTE_UNUSED;
1840 int count = XVECLEN (op, 0);
1841 int src_regno;
1842 rtx dest_addr;
1843 int i;
1845 /* Perform a quick check so we don't blow up below. */
1846 if (count <= 1
1847 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1848 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
1849 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
1850 return 0;
1852 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
1853 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
1855 for (i = 1; i < count; i++)
1857 rtx elt = XVECEXP (op, 0, i);
1859 if (GET_CODE (elt) != SET
1860 || GET_CODE (SET_SRC (elt)) != REG
1861 || GET_MODE (SET_SRC (elt)) != SImode
1862 || REGNO (SET_SRC (elt)) != (unsigned) (src_regno + i)
1863 || GET_CODE (SET_DEST (elt)) != MEM
1864 || GET_MODE (SET_DEST (elt)) != SImode
1865 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
1866 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
1867 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
1868 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
1869 return 0;
1872 return 1;
1875 /* ??? Block move stuff stolen from m88k. This code has not been
1876 verified for correctness. */
1878 /* Emit code to perform a block move. Choose the best method.
1880 OPERANDS[0] is the destination.
1881 OPERANDS[1] is the source.
1882 OPERANDS[2] is the size.
1883 OPERANDS[3] is the alignment safe to use. */
1885 /* Emit code to perform a block move with an offset sequence of ldw/st
1886 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1887 known constants. DEST and SRC are registers. OFFSET is the known
1888 starting point for the output pattern. */
1890 static const enum machine_mode mode_from_align[] =
1892 VOIDmode, QImode, HImode, VOIDmode, SImode,
1893 VOIDmode, VOIDmode, VOIDmode, DImode
1896 static void
1897 block_move_sequence (dest, dst_mem, src, src_mem, size, align, offset)
1898 rtx dest, dst_mem;
1899 rtx src, src_mem;
1900 int size;
1901 int align;
1902 int offset;
1904 rtx temp[2];
1905 enum machine_mode mode[2];
1906 int amount[2];
1907 int active[2];
1908 int phase = 0;
1909 int next;
1910 int offset_ld = offset;
1911 int offset_st = offset;
1913 active[0] = active[1] = FALSE;
1915 /* Establish parameters for the first load and for the second load if
1916 it is known to be the same mode as the first. */
1917 amount[0] = amount[1] = align;
1919 mode[0] = mode_from_align[align];
1921 temp[0] = gen_reg_rtx (mode[0]);
1923 if (size >= 2 * align)
1925 mode[1] = mode[0];
1926 temp[1] = gen_reg_rtx (mode[1]);
1931 rtx srcp, dstp;
1933 next = phase;
1934 phase = !phase;
1936 if (size > 0)
1938 /* Change modes as the sequence tails off. */
1939 if (size < amount[next])
1941 amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1942 mode[next] = mode_from_align[amount[next]];
1943 temp[next] = gen_reg_rtx (mode[next]);
1946 size -= amount[next];
1947 srcp = gen_rtx (MEM,
1948 #if 0
1949 MEM_IN_STRUCT_P (src_mem) ? mode[next] : BLKmode,
1950 #else
1951 mode[next],
1952 #endif
1953 gen_rtx (PLUS, Pmode, src,
1954 gen_rtx (CONST_INT, SImode, offset_ld)));
1956 RTX_UNCHANGING_P (srcp) = RTX_UNCHANGING_P (src_mem);
1957 MEM_VOLATILE_P (srcp) = MEM_VOLATILE_P (src_mem);
1958 MEM_IN_STRUCT_P (srcp) = 1;
1959 emit_insn (gen_rtx (SET, VOIDmode, temp[next], srcp));
1960 offset_ld += amount[next];
1961 active[next] = TRUE;
1964 if (active[phase])
1966 active[phase] = FALSE;
1968 dstp = gen_rtx (MEM,
1969 #if 0
1970 MEM_IN_STRUCT_P (dst_mem) ? mode[phase] : BLKmode,
1971 #else
1972 mode[phase],
1973 #endif
1974 gen_rtx (PLUS, Pmode, dest,
1975 gen_rtx (CONST_INT, SImode, offset_st)));
1977 RTX_UNCHANGING_P (dstp) = RTX_UNCHANGING_P (dst_mem);
1978 MEM_VOLATILE_P (dstp) = MEM_VOLATILE_P (dst_mem);
1979 MEM_IN_STRUCT_P (dstp) = 1;
1980 emit_insn (gen_rtx (SET, VOIDmode, dstp, temp[phase]));
1981 offset_st += amount[phase];
1984 while (active[next]);
1987 void
1988 mcore_expand_block_move (dst_mem, src_mem, operands)
1989 rtx dst_mem;
1990 rtx src_mem;
1991 rtx * operands;
1993 int align = INTVAL (operands[3]);
1994 int bytes;
1996 if (GET_CODE (operands[2]) == CONST_INT)
1998 bytes = INTVAL (operands[2]);
2000 if (bytes <= 0)
2001 return;
2002 if (align > 4)
2003 align = 4;
2005 /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before
2006 we give up and go to memcpy. */
2007 if ((align == 4 && (bytes <= 4*4
2008 || ((bytes & 01) == 0 && bytes <= 8*4)
2009 || ((bytes & 03) == 0 && bytes <= 16*4)))
2010 || (align == 2 && bytes <= 4*2)
2011 || (align == 1 && bytes <= 4*1))
2013 block_move_sequence (operands[0], dst_mem, operands[1], src_mem,
2014 bytes, align, 0);
2015 return;
2019 /* If we get here, just use the library routine. */
2020 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, VOIDmode, 3,
2021 operands[0], Pmode, operands[1], Pmode, operands[2],
2022 SImode);
2026 /* Code to generate prologue and epilogue sequences. */
2027 static int number_of_regs_before_varargs;
2029 /* Set by SETUP_INCOMING_VARARGS to indicate to prolog that this is
2030 for a varargs function. */
2031 static int current_function_anonymous_args;
2033 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
2034 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
2035 #define ADDI_REACH (32) /* Maximum addi operand. */
2037 static void
2038 layout_mcore_frame (infp)
2039 struct mcore_frame * infp;
2041 int n;
2042 unsigned int i;
2043 int nbytes;
2044 int regarg;
2045 int localregarg;
2046 int localreg;
2047 int outbounds;
2048 unsigned int growths;
2049 int step;
2051 /* Might have to spill bytes to re-assemble a big argument that
2052 was passed partially in registers and partially on the stack. */
2053 nbytes = current_function_pretend_args_size;
2055 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
2056 if (current_function_anonymous_args)
2057 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
2059 infp->arg_size = nbytes;
2061 /* How much space to save non-volatile registers we stomp. */
2062 infp->reg_mask = calc_live_regs (& n);
2063 infp->reg_size = n * 4;
2065 /* And the rest of it... locals and space for overflowed outbounds. */
2066 infp->local_size = get_frame_size ();
2067 infp->outbound_size = current_function_outgoing_args_size;
2069 /* Make sure we have a whole number of words for the locals. */
2070 if (infp->local_size % STACK_BYTES)
2071 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
2073 /* Only thing we know we have to pad is the outbound space, since
2074 we've aligned our locals assuming that base of locals is aligned. */
2075 infp->pad_local = 0;
2076 infp->pad_reg = 0;
2077 infp->pad_outbound = 0;
2078 if (infp->outbound_size % STACK_BYTES)
2079 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
2081 /* Now we see how we want to stage the prologue so that it does
2082 the most appropriate stack growth and register saves to either:
2083 (1) run fast,
2084 (2) reduce instruction space, or
2085 (3) reduce stack space. */
2086 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
2087 infp->growth[i] = 0;
2089 regarg = infp->reg_size + infp->arg_size;
2090 localregarg = infp->local_size + regarg;
2091 localreg = infp->local_size + infp->reg_size;
2092 outbounds = infp->outbound_size + infp->pad_outbound;
2093 growths = 0;
2095 /* XXX: Consider one where we consider localregarg + outbound too! */
2097 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
2098 use stw's with offsets and buy the frame in one shot. */
2099 if (localregarg <= ADDI_REACH
2100 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2102 /* Make sure we'll be aligned. */
2103 if (localregarg % STACK_BYTES)
2104 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2106 step = localregarg + infp->pad_reg;
2107 infp->reg_offset = infp->local_size;
2109 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
2111 step += outbounds;
2112 infp->reg_offset += outbounds;
2113 outbounds = 0;
2116 infp->arg_offset = step - 4;
2117 infp->growth[growths++] = step;
2118 infp->reg_growth = growths;
2119 infp->local_growth = growths;
2121 /* If we haven't already folded it in. */
2122 if (outbounds)
2123 infp->growth[growths++] = outbounds;
2125 goto finish;
2128 /* Frame can't be done with a single subi, but can be done with 2
2129 insns. If the 'stm' is getting <= 2 registers, we use stw's and
2130 shift some of the stack purchase into the first subi, so both are
2131 single instructions. */
2132 if (localregarg <= STORE_REACH
2133 && (infp->local_size > ADDI_REACH)
2134 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2136 int all;
2138 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
2139 if (localregarg % STACK_BYTES)
2140 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2142 all = localregarg + infp->pad_reg + infp->pad_local;
2143 step = ADDI_REACH; /* As much up front as we can. */
2144 if (step > all)
2145 step = all;
2147 /* XXX: Consider whether step will still be aligned; we believe so. */
2148 infp->arg_offset = step - 4;
2149 infp->growth[growths++] = step;
2150 infp->reg_growth = growths;
2151 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
2152 all -= step;
2154 /* Can we fold in any space required for outbounds? */
2155 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
2157 all += outbounds;
2158 outbounds = 0;
2161 /* Get the rest of the locals in place. */
2162 step = all;
2163 infp->growth[growths++] = step;
2164 infp->local_growth = growths;
2165 all -= step;
2167 assert (all == 0);
2169 /* Finish off if we need to do so. */
2170 if (outbounds)
2171 infp->growth[growths++] = outbounds;
2173 goto finish;
2176 /* Registers + args is nicely aligned, so we'll buy that in one shot.
2177 Then we buy the rest of the frame in 1 or 2 steps depending on
2178 whether we need a frame pointer. */
2179 if ((regarg % STACK_BYTES) == 0)
2181 infp->growth[growths++] = regarg;
2182 infp->reg_growth = growths;
2183 infp->arg_offset = regarg - 4;
2184 infp->reg_offset = 0;
2186 if (infp->local_size % STACK_BYTES)
2187 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2189 step = infp->local_size + infp->pad_local;
2191 if (!frame_pointer_needed)
2193 step += outbounds;
2194 outbounds = 0;
2197 infp->growth[growths++] = step;
2198 infp->local_growth = growths;
2200 /* If there's any left to be done. */
2201 if (outbounds)
2202 infp->growth[growths++] = outbounds;
2204 goto finish;
2207 /* XXX: optimizations that we'll want to play with....
2208 -- regarg is not aligned, but it's a small number of registers;
2209 use some of localsize so that regarg is aligned and then
2210 save the registers. */
2212 /* Simple encoding; plods down the stack buying the pieces as it goes.
2213 -- does not optimize space consumption.
2214 -- does not attempt to optimize instruction counts.
2215 -- but it is safe for all alignments. */
2216 if (regarg % STACK_BYTES != 0)
2217 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
2219 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
2220 infp->reg_growth = growths;
2221 infp->arg_offset = infp->growth[0] - 4;
2222 infp->reg_offset = 0;
2224 if (frame_pointer_needed)
2226 if (infp->local_size % STACK_BYTES != 0)
2227 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2229 infp->growth[growths++] = infp->local_size + infp->pad_local;
2230 infp->local_growth = growths;
2232 infp->growth[growths++] = outbounds;
2234 else
2236 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
2237 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
2239 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
2240 infp->local_growth = growths;
2243 /* Anything else that we've forgotten?, plus a few consistency checks. */
2244 finish:
2245 assert (infp->reg_offset >= 0);
2246 assert (growths <= MAX_STACK_GROWS);
2248 for (i = 0; i < growths; i++)
2250 if (infp->growth[i] % STACK_BYTES)
2252 fprintf (stderr,"stack growth of %d is not %d aligned\n",
2253 infp->growth[i], STACK_BYTES);
2254 abort ();
2259 /* Define the offset between two registers, one to be eliminated, and
2260 the other its replacement, at the start of a routine. */
2263 mcore_initial_elimination_offset (from, to)
2264 int from;
2265 int to;
2267 int above_frame;
2268 int below_frame;
2269 struct mcore_frame fi;
2271 layout_mcore_frame (& fi);
2273 /* fp to ap */
2274 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
2275 /* sp to fp */
2276 below_frame = fi.outbound_size + fi.pad_outbound;
2278 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
2279 return above_frame;
2281 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2282 return above_frame + below_frame;
2284 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2285 return below_frame;
2287 abort ();
2289 return 0;
2292 /* Keep track of some information about varargs for the prolog. */
2294 void
2295 mcore_setup_incoming_varargs (args_so_far, mode, type, ptr_pretend_size)
2296 CUMULATIVE_ARGS args_so_far;
2297 enum machine_mode mode;
2298 tree type;
2299 int * ptr_pretend_size ATTRIBUTE_UNUSED;
2301 current_function_anonymous_args = 1;
2303 /* We need to know how many argument registers are used before
2304 the varargs start, so that we can push the remaining argument
2305 registers during the prologue. */
2306 number_of_regs_before_varargs = args_so_far + mcore_num_arg_regs (mode, type);
2308 /* There is a bug somwehere in the arg handling code.
2309 Until I can find it this workaround always pushes the
2310 last named argument onto the stack. */
2311 number_of_regs_before_varargs = args_so_far;
2313 /* The last named argument may be split between argument registers
2314 and the stack. Allow for this here. */
2315 if (number_of_regs_before_varargs > NPARM_REGS)
2316 number_of_regs_before_varargs = NPARM_REGS;
2319 void
2320 mcore_expand_prolog ()
2322 struct mcore_frame fi;
2323 int space_allocated = 0;
2324 int growth = 0;
2326 /* Find out what we're doing. */
2327 layout_mcore_frame (&fi);
2329 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
2330 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
2332 if (TARGET_CG_DATA)
2334 /* Emit a symbol for this routine's frame size. */
2335 rtx x;
2336 int len;
2338 x = DECL_RTL (current_function_decl);
2340 if (GET_CODE (x) != MEM)
2341 abort ();
2343 x = XEXP (x, 0);
2345 if (GET_CODE (x) != SYMBOL_REF)
2346 abort ();
2348 if (mcore_current_function_name)
2349 free (mcore_current_function_name);
2351 len = strlen (XSTR (x, 0)) + 1;
2352 mcore_current_function_name = (char *) xmalloc (len);
2354 memcpy (mcore_current_function_name, XSTR (x, 0), len);
2356 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2358 if (current_function_calls_alloca)
2359 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2361 /* 970425: RBE:
2362 We're looking at how the 8byte alignment affects stack layout
2363 and where we had to pad things. This emits information we can
2364 extract which tells us about frame sizes and the like. */
2365 fprintf (asm_out_file,
2366 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2367 mcore_current_function_name,
2368 fi.arg_size, fi.reg_size, fi.reg_mask,
2369 fi.local_size, fi.outbound_size,
2370 frame_pointer_needed);
2373 if (mcore_naked_function_p ())
2374 return;
2376 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2377 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2379 /* If we have a parameter passed partially in regs and partially in memory,
2380 the registers will have been stored to memory already in function.c. So
2381 we only need to do something here for varargs functions. */
2382 if (fi.arg_size != 0 && current_function_pretend_args_size == 0)
2384 int offset;
2385 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2386 int remaining = fi.arg_size;
2388 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2390 emit_insn (gen_movsi
2391 (gen_rtx (MEM, SImode,
2392 plus_constant (stack_pointer_rtx, offset)),
2393 gen_rtx (REG, SImode, rn)));
2397 /* Do we need another stack adjustment before we do the register saves? */
2398 if (growth < fi.reg_growth)
2399 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2401 if (fi.reg_size != 0)
2403 int i;
2404 int offs = fi.reg_offset;
2406 for (i = 15; i >= 0; i--)
2408 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2410 int first_reg = 15;
2412 while (fi.reg_mask & (1 << first_reg))
2413 first_reg--;
2414 first_reg++;
2416 emit_insn (gen_store_multiple (gen_rtx (MEM, SImode, stack_pointer_rtx),
2417 gen_rtx (REG, SImode, first_reg),
2418 GEN_INT (16 - first_reg)));
2420 i -= (15 - first_reg);
2421 offs += (16 - first_reg) * 4;
2423 else if (fi.reg_mask & (1 << i))
2425 emit_insn (gen_movsi
2426 (gen_rtx (MEM, SImode,
2427 plus_constant (stack_pointer_rtx, offs)),
2428 gen_rtx (REG, SImode, i)));
2429 offs += 4;
2434 /* Figure the locals + outbounds. */
2435 if (frame_pointer_needed)
2437 /* If we haven't already purchased to 'fp'. */
2438 if (growth < fi.local_growth)
2439 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2441 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2443 /* ... and then go any remaining distance for outbounds, etc. */
2444 if (fi.growth[growth])
2445 output_stack_adjust (-1, fi.growth[growth++]);
2447 else
2449 if (growth < fi.local_growth)
2450 output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
2451 if (fi.growth[growth])
2452 output_stack_adjust (-1, fi.growth[growth++]);
2456 void
2457 mcore_expand_epilog ()
2459 struct mcore_frame fi;
2460 int i;
2461 int offs;
2462 int growth = MAX_STACK_GROWS - 1 ;
2465 /* Find out what we're doing. */
2466 layout_mcore_frame(&fi);
2468 if (mcore_naked_function_p ())
2469 return;
2471 /* If we had a frame pointer, restore the sp from that. */
2472 if (frame_pointer_needed)
2474 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2475 growth = fi.local_growth - 1;
2477 else
2479 /* XXX: while loop should accumulate and do a single sell. */
2480 while (growth >= fi.local_growth)
2482 if (fi.growth[growth] != 0)
2483 output_stack_adjust (1, fi.growth[growth]);
2484 growth--;
2488 /* Make sure we've shrunk stack back to the point where the registers
2489 were laid down. This is typically 0/1 iterations. Then pull the
2490 register save information back off the stack. */
2491 while (growth >= fi.reg_growth)
2492 output_stack_adjust ( 1, fi.growth[growth--]);
2494 offs = fi.reg_offset;
2496 for (i = 15; i >= 0; i--)
2498 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2500 int first_reg;
2502 /* Find the starting register. */
2503 first_reg = 15;
2505 while (fi.reg_mask & (1 << first_reg))
2506 first_reg--;
2508 first_reg++;
2510 emit_insn (gen_load_multiple (gen_rtx (REG, SImode, first_reg),
2511 gen_rtx (MEM, SImode, stack_pointer_rtx),
2512 GEN_INT (16 - first_reg)));
2514 i -= (15 - first_reg);
2515 offs += (16 - first_reg) * 4;
2517 else if (fi.reg_mask & (1 << i))
2519 emit_insn (gen_movsi
2520 (gen_rtx (REG, SImode, i),
2521 gen_rtx (MEM, SImode,
2522 plus_constant (stack_pointer_rtx, offs))));
2523 offs += 4;
2527 /* Give back anything else. */
2528 /* XXX: Should accumuate total and then give it back. */
2529 while (growth >= 0)
2530 output_stack_adjust ( 1, fi.growth[growth--]);
2533 /* This code is borrowed from the SH port. */
2535 /* The MCORE cannot load a large constant into a register, constants have to
2536 come from a pc relative load. The reference of a pc relative load
2537 instruction must be less than 1k infront of the instruction. This
2538 means that we often have to dump a constant inside a function, and
2539 generate code to branch around it.
2541 It is important to minimize this, since the branches will slow things
2542 down and make things bigger.
2544 Worst case code looks like:
2546 lrw L1,r0
2547 br L2
2548 align
2549 L1: .long value
2553 lrw L3,r0
2554 br L4
2555 align
2556 L3: .long value
2560 We fix this by performing a scan before scheduling, which notices which
2561 instructions need to have their operands fetched from the constant table
2562 and builds the table.
2564 The algorithm is:
2566 scan, find an instruction which needs a pcrel move. Look forward, find the
2567 last barrier which is within MAX_COUNT bytes of the requirement.
2568 If there isn't one, make one. Process all the instructions between
2569 the find and the barrier.
2571 In the above example, we can tell that L3 is within 1k of L1, so
2572 the first move can be shrunk from the 2 insn+constant sequence into
2573 just 1 insn, and the constant moved to L3 to make:
2575 lrw L1,r0
2577 lrw L3,r0
2578 bra L4
2579 align
2580 L3:.long value
2581 L4:.long value
2583 Then the second move becomes the target for the shortening process. */
2585 typedef struct
2587 rtx value; /* Value in table. */
2588 rtx label; /* Label of value. */
2589 } pool_node;
2591 /* The maximum number of constants that can fit into one pool, since
2592 the pc relative range is 0...1020 bytes and constants are at least 4
2593 bytes long. We subtact 4 from the range to allow for the case where
2594 we need to add a branch/align before the constant pool. */
2596 #define MAX_COUNT 1016
2597 #define MAX_POOL_SIZE (MAX_COUNT/4)
2598 static pool_node pool_vector[MAX_POOL_SIZE];
2599 static int pool_size;
2601 /* Dump out any constants accumulated in the final pass. These
2602 will only be labels. */
2604 const char *
2605 mcore_output_jump_label_table ()
2607 int i;
2609 if (pool_size)
2611 fprintf (asm_out_file, "\t.align 2\n");
2613 for (i = 0; i < pool_size; i++)
2615 pool_node * p = pool_vector + i;
2617 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2619 output_asm_insn (".long %0", &p->value);
2622 pool_size = 0;
2625 return "";
2628 /* Check whether insn is a candidate for a conditional. */
2630 static cond_type
2631 is_cond_candidate (insn)
2632 rtx insn;
2634 /* The only things we conditionalize are those that can be directly
2635 changed into a conditional. Only bother with SImode items. If
2636 we wanted to be a little more aggressive, we could also do other
2637 modes such as DImode with reg-reg move or load 0. */
2638 if (GET_CODE (insn) == INSN)
2640 rtx pat = PATTERN (insn);
2641 rtx src, dst;
2643 if (GET_CODE (pat) != SET)
2644 return COND_NO;
2646 dst = XEXP (pat, 0);
2648 if ((GET_CODE (dst) != REG &&
2649 GET_CODE (dst) != SUBREG) ||
2650 GET_MODE (dst) != SImode)
2651 return COND_NO;
2653 src = XEXP (pat, 1);
2655 if ((GET_CODE (src) == REG ||
2656 (GET_CODE (src) == SUBREG &&
2657 GET_CODE (SUBREG_REG (src)) == REG)) &&
2658 GET_MODE (src) == SImode)
2659 return COND_MOV_INSN;
2660 else if (GET_CODE (src) == CONST_INT &&
2661 INTVAL (src) == 0)
2662 return COND_CLR_INSN;
2663 else if (GET_CODE (src) == PLUS &&
2664 (GET_CODE (XEXP (src, 0)) == REG ||
2665 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2666 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2667 GET_MODE (XEXP (src, 0)) == SImode &&
2668 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2669 INTVAL (XEXP (src, 1)) == 1)
2670 return COND_INC_INSN;
2671 else if (((GET_CODE (src) == MINUS &&
2672 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2673 INTVAL( XEXP (src, 1)) == 1) ||
2674 (GET_CODE (src) == PLUS &&
2675 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2676 INTVAL (XEXP (src, 1)) == -1)) &&
2677 (GET_CODE (XEXP (src, 0)) == REG ||
2678 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2679 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2680 GET_MODE (XEXP (src, 0)) == SImode)
2681 return COND_DEC_INSN;
2683 /* some insns that we don't bother with:
2684 (set (rx:DI) (ry:DI))
2685 (set (rx:DI) (const_int 0))
2689 else if (GET_CODE (insn) == JUMP_INSN &&
2690 GET_CODE (PATTERN (insn)) == SET &&
2691 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2692 return COND_BRANCH_INSN;
2694 return COND_NO;
2697 /* Emit a conditional version of insn and replace the old insn with the
2698 new one. Return the new insn if emitted. */
2700 static rtx
2701 emit_new_cond_insn (insn, cond)
2702 rtx insn;
2703 int cond;
2705 rtx c_insn = 0;
2706 rtx pat, dst, src;
2707 cond_type num;
2709 if ((num = is_cond_candidate (insn)) == COND_NO)
2710 return NULL;
2712 pat = PATTERN (insn);
2714 if (GET_CODE (insn) == INSN)
2716 dst = SET_DEST (pat);
2717 src = SET_SRC (pat);
2719 else
2721 dst = JUMP_LABEL (insn);
2722 src = NULL_RTX;
2725 switch (num)
2727 case COND_MOV_INSN:
2728 case COND_CLR_INSN:
2729 if (cond)
2730 c_insn = gen_movt0 (dst, src, dst);
2731 else
2732 c_insn = gen_movt0 (dst, dst, src);
2733 break;
2735 case COND_INC_INSN:
2736 if (cond)
2737 c_insn = gen_incscc (dst, dst);
2738 else
2739 c_insn = gen_incscc_false (dst, dst);
2740 break;
2742 case COND_DEC_INSN:
2743 if (cond)
2744 c_insn = gen_decscc (dst, dst);
2745 else
2746 c_insn = gen_decscc_false (dst, dst);
2747 break;
2749 case COND_BRANCH_INSN:
2750 if (cond)
2751 c_insn = gen_branch_true (dst);
2752 else
2753 c_insn = gen_branch_false (dst);
2754 break;
2756 default:
2757 return NULL;
2760 /* Only copy the notes if they exist. */
2761 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2763 /* We really don't need to bother with the notes and links at this
2764 point, but go ahead and save the notes. This will help is_dead()
2765 when applying peepholes (links don't matter since they are not
2766 used any more beyond this point for the mcore). */
2767 REG_NOTES (c_insn) = REG_NOTES (insn);
2770 if (num == COND_BRANCH_INSN)
2772 /* For jumps, we need to be a little bit careful and emit the new jump
2773 before the old one and to update the use count for the target label.
2774 This way, the barrier following the old (uncond) jump will get
2775 deleted, but the label won't. */
2776 c_insn = emit_jump_insn_before (c_insn, insn);
2778 ++ LABEL_NUSES (dst);
2780 JUMP_LABEL (c_insn) = dst;
2782 else
2783 c_insn = emit_insn_after (c_insn, insn);
2785 delete_insn (insn);
2787 return c_insn;
2790 /* Attempt to change a basic block into a series of conditional insns. This
2791 works by taking the branch at the end of the 1st block and scanning for the
2792 end of the 2nd block. If all instructions in the 2nd block have cond.
2793 versions and the label at the start of block 3 is the same as the target
2794 from the branch at block 1, then conditionalize all insn in block 2 using
2795 the inverse condition of the branch at block 1. (Note I'm bending the
2796 definition of basic block here.)
2798 e.g., change:
2800 bt L2 <-- end of block 1 (delete)
2801 mov r7,r8
2802 addu r7,1
2803 br L3 <-- end of block 2
2805 L2: ... <-- start of block 3 (NUSES==1)
2806 L3: ...
2810 movf r7,r8
2811 incf r7
2812 bf L3
2814 L3: ...
2816 we can delete the L2 label if NUSES==1 and re-apply the optimization
2817 starting at the last instruction of block 2. This may allow an entire
2818 if-then-else statement to be conditionalized. BRC */
2819 static rtx
2820 conditionalize_block (first)
2821 rtx first;
2823 rtx insn;
2824 rtx br_pat;
2825 rtx end_blk_1_br = 0;
2826 rtx end_blk_2_insn = 0;
2827 rtx start_blk_3_lab = 0;
2828 int cond;
2829 int br_lab_num;
2830 int blk_size = 0;
2833 /* Check that the first insn is a candidate conditional jump. This is
2834 the one that we'll eliminate. If not, advance to the next insn to
2835 try. */
2836 if (GET_CODE (first) != JUMP_INSN ||
2837 GET_CODE (PATTERN (first)) != SET ||
2838 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2839 return NEXT_INSN (first);
2841 /* Extract some information we need. */
2842 end_blk_1_br = first;
2843 br_pat = PATTERN (end_blk_1_br);
2845 /* Complement the condition since we use the reverse cond. for the insns. */
2846 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2848 /* Determine what kind of branch we have. */
2849 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2851 /* A normal branch, so extract label out of first arm. */
2852 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2854 else
2856 /* An inverse branch, so extract the label out of the 2nd arm
2857 and complement the condition. */
2858 cond = (cond == 0);
2859 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2862 /* Scan forward for the start of block 2: it must start with a
2863 label and that label must be the same as the branch target
2864 label from block 1. We don't care about whether block 2 actually
2865 ends with a branch or a label (an uncond. branch is
2866 conditionalizable). */
2867 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2869 enum rtx_code code;
2871 code = GET_CODE (insn);
2873 /* Look for the label at the start of block 3. */
2874 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2875 break;
2877 /* Skip barriers, notes, and conditionalizable insns. If the
2878 insn is not conditionalizable or makes this optimization fail,
2879 just return the next insn so we can start over from that point. */
2880 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2881 return NEXT_INSN (insn);
2883 /* Remember the last real insn before the label (ie end of block 2). */
2884 if (code == JUMP_INSN || code == INSN)
2886 blk_size ++;
2887 end_blk_2_insn = insn;
2891 if (!insn)
2892 return insn;
2894 /* It is possible for this optimization to slow performance if the blocks
2895 are long. This really depends upon whether the branch is likely taken
2896 or not. If the branch is taken, we slow performance in many cases. But,
2897 if the branch is not taken, we always help performance (for a single
2898 block, but for a double block (i.e. when the optimization is re-applied)
2899 this is not true since the 'right thing' depends on the overall length of
2900 the collapsed block). As a compromise, don't apply this optimization on
2901 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2902 the best threshold depends on the latencies of the instructions (i.e.,
2903 the branch penalty). */
2904 if (optimize > 1 && blk_size > 2)
2905 return insn;
2907 /* At this point, we've found the start of block 3 and we know that
2908 it is the destination of the branch from block 1. Also, all
2909 instructions in the block 2 are conditionalizable. So, apply the
2910 conditionalization and delete the branch. */
2911 start_blk_3_lab = insn;
2913 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2914 insn = NEXT_INSN (insn))
2916 rtx newinsn;
2918 if (INSN_DELETED_P (insn))
2919 continue;
2921 /* Try to form a conditional variant of the instruction and emit it. */
2922 if ((newinsn = emit_new_cond_insn (insn, cond)))
2924 if (end_blk_2_insn == insn)
2925 end_blk_2_insn = newinsn;
2927 insn = newinsn;
2931 /* Note whether we will delete the label starting blk 3 when the jump
2932 gets deleted. If so, we want to re-apply this optimization at the
2933 last real instruction right before the label. */
2934 if (LABEL_NUSES (start_blk_3_lab) == 1)
2936 start_blk_3_lab = 0;
2939 /* ??? we probably should redistribute the death notes for this insn, esp.
2940 the death of cc, but it doesn't really matter this late in the game.
2941 The peepholes all use is_dead() which will find the correct death
2942 regardless of whether there is a note. */
2943 delete_insn (end_blk_1_br);
2945 if (! start_blk_3_lab)
2946 return end_blk_2_insn;
2948 /* Return the insn right after the label at the start of block 3. */
2949 return NEXT_INSN (start_blk_3_lab);
2952 /* Apply the conditionalization of blocks optimization. This is the
2953 outer loop that traverses through the insns scanning for a branch
2954 that signifies an opportunity to apply the optimization. Note that
2955 this optimization is applied late. If we could apply it earlier,
2956 say before cse 2, it may expose more optimization opportunities.
2957 but, the pay back probably isn't really worth the effort (we'd have
2958 to update all reg/flow/notes/links/etc to make it work - and stick it
2959 in before cse 2). */
2961 static void
2962 conditionalize_optimization (first)
2963 rtx first;
2965 rtx insn;
2967 for (insn = first; insn; insn = conditionalize_block (insn))
2968 continue;
2971 static int saved_warn_return_type = -1;
2972 static int saved_warn_return_type_count = 0;
2974 /* This function is called from toplev.c before reorg. */
2976 void
2977 mcore_dependent_reorg (first)
2978 rtx first;
2980 /* Reset this variable. */
2981 current_function_anonymous_args = 0;
2983 /* Restore the warn_return_type if it has been altered. */
2984 if (saved_warn_return_type != -1)
2986 /* Only restore the value if we have reached another function.
2987 The test of warn_return_type occurs in final_function () in
2988 c-decl.c a long time after the code for the function is generated,
2989 so we need a counter to tell us when we have finished parsing that
2990 function and can restore the flag. */
2991 if (--saved_warn_return_type_count == 0)
2993 warn_return_type = saved_warn_return_type;
2994 saved_warn_return_type = -1;
2998 if (optimize == 0)
2999 return;
3001 /* Conditionalize blocks where we can. */
3002 conditionalize_optimization (first);
3004 /* Literal pool generation is now pushed off until the assembler. */
3008 /* Return the reg_class to use when reloading the rtx X into the class
3009 CLASS. */
3011 /* If the input is (PLUS REG CONSTANT) representing a stack slot address,
3012 then we want to restrict the class to LRW_REGS since that ensures that
3013 will be able to safely load the constant.
3015 If the input is a constant that should be loaded with mvir1, then use
3016 ONLYR1_REGS.
3018 ??? We don't handle the case where we have (PLUS REG CONSTANT) and
3019 the constant should be loaded with mvir1, because that can lead to cases
3020 where an instruction needs two ONLYR1_REGS reloads. */
3021 enum reg_class
3022 mcore_reload_class (x, class)
3023 rtx x;
3024 enum reg_class class;
3026 enum reg_class new_class;
3028 if (class == GENERAL_REGS && CONSTANT_P (x)
3029 && (GET_CODE (x) != CONST_INT
3030 || ( ! CONST_OK_FOR_I (INTVAL (x))
3031 && ! CONST_OK_FOR_M (INTVAL (x))
3032 && ! CONST_OK_FOR_N (INTVAL (x)))))
3033 new_class = LRW_REGS;
3034 else
3035 new_class = class;
3037 return new_class;
3040 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
3041 register. Note that the current version doesn't worry about whether
3042 they are the same mode or note (e.g., a QImode in r2 matches an HImode
3043 in r2 matches an SImode in r2. Might think in the future about whether
3044 we want to be able to say something about modes. */
3046 mcore_is_same_reg (x, y)
3047 rtx x;
3048 rtx y;
3050 /* Strip any and all of the subreg wrappers. */
3051 while (GET_CODE (x) == SUBREG)
3052 x = SUBREG_REG (x);
3054 while (GET_CODE (y) == SUBREG)
3055 y = SUBREG_REG (y);
3057 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
3058 return 1;
3060 return 0;
3063 /* Called to register all of our global variables with the garbage
3064 collector. */
3065 static void
3066 mcore_add_gc_roots ()
3068 ggc_add_rtx_root (&arch_compare_op0, 1);
3069 ggc_add_rtx_root (&arch_compare_op1, 1);
3072 void
3073 mcore_override_options ()
3075 if (mcore_stack_increment_string)
3077 mcore_stack_increment = atoi (mcore_stack_increment_string);
3079 if (mcore_stack_increment < 0
3080 || (mcore_stack_increment == 0
3081 && (mcore_stack_increment_string[0] != '0'
3082 || mcore_stack_increment_string[1] != 0)))
3083 error ("invalid option `-mstack-increment=%s'",
3084 mcore_stack_increment_string);
3087 /* Only the m340 supports little endian code. */
3088 if (TARGET_LITTLE_END && ! TARGET_M340)
3089 target_flags |= M340_BIT;
3091 mcore_add_gc_roots ();
3095 mcore_must_pass_on_stack (mode, type)
3096 enum machine_mode mode ATTRIBUTE_UNUSED;
3097 tree type;
3099 if (type == NULL)
3100 return 0;
3102 /* If the argugment can have its address taken, it must
3103 be placed on the stack. */
3104 if (TREE_ADDRESSABLE (type))
3105 return 1;
3107 return 0;
3110 /* Compute the number of word sized registers needed to
3111 hold a function argument of mode MODE and type TYPE. */
3113 mcore_num_arg_regs (mode, type)
3114 enum machine_mode mode;
3115 tree type;
3117 int size;
3119 if (MUST_PASS_IN_STACK (mode, type))
3120 return 0;
3122 if (type && mode == BLKmode)
3123 size = int_size_in_bytes (type);
3124 else
3125 size = GET_MODE_SIZE (mode);
3127 return ROUND_ADVANCE (size);
3130 static rtx
3131 handle_structs_in_regs (mode, type, reg)
3132 enum machine_mode mode;
3133 tree type;
3134 int reg;
3136 int size;
3138 /* The MCore ABI defines that a structure whoes size is not a whole multiple
3139 of bytes is passed packed into registers (or spilled onto the stack if
3140 not enough registers are available) with the last few bytes of the
3141 structure being packed, left-justified, into the last register/stack slot.
3142 GCC handles this correctly if the last word is in a stack slot, but we
3143 have to generate a special, PARALLEL RTX if the last word is in an
3144 argument register. */
3145 if (type
3146 && TYPE_MODE (type) == BLKmode
3147 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
3148 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
3149 && (size % UNITS_PER_WORD != 0)
3150 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
3152 rtx arg_regs [NPARM_REGS];
3153 int nregs;
3154 rtx result;
3155 rtvec rtvec;
3157 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
3159 arg_regs [nregs] =
3160 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
3161 GEN_INT (nregs * UNITS_PER_WORD));
3162 nregs ++;
3165 /* We assume here that NPARM_REGS == 6. The assert checks this. */
3166 assert (ARRAY_SIZE (arg_regs) == 6);
3167 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
3168 arg_regs[3], arg_regs[4], arg_regs[5]);
3170 result = gen_rtx_PARALLEL (mode, rtvec);
3171 return result;
3174 return gen_rtx_REG (mode, reg);
3178 mcore_function_value (valtype, func)
3179 tree valtype;
3180 tree func ATTRIBUTE_UNUSED;
3182 enum machine_mode mode;
3183 int unsigned_p;
3185 mode = TYPE_MODE (valtype);
3187 PROMOTE_MODE (mode, unsigned_p, NULL);
3189 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
3192 /* Define where to put the arguments to a function.
3193 Value is zero to push the argument on the stack,
3194 or a hard register in which to store the argument.
3196 MODE is the argument's machine mode.
3197 TYPE is the data type of the argument (as a tree).
3198 This is null for libcalls where that information may
3199 not be available.
3200 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3201 the preceding args and about the function being called.
3202 NAMED is nonzero if this argument is a named parameter
3203 (otherwise it is an extra parameter matching an ellipsis).
3205 On MCore the first args are normally in registers
3206 and the rest are pushed. Any arg that starts within the first
3207 NPARM_REGS words is at least partially passed in a register unless
3208 its data type forbids. */
3210 mcore_function_arg (cum, mode, type, named)
3211 CUMULATIVE_ARGS cum;
3212 enum machine_mode mode;
3213 tree type;
3214 int named;
3216 int arg_reg;
3218 if (! named)
3219 return 0;
3221 if (MUST_PASS_IN_STACK (mode, type))
3222 return 0;
3224 arg_reg = ROUND_REG (cum, mode);
3226 if (arg_reg < NPARM_REGS)
3227 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
3229 return 0;
3232 /* Implements the FUNCTION_ARG_PARTIAL_NREGS macro.
3233 Returns the number of argument registers required to hold *part* of
3234 a parameter of machine mode MODE and type TYPE (which may be NULL if
3235 the type is not known). If the argument fits entirly in the argument
3236 registers, or entirely on the stack, then 0 is returned. CUM is the
3237 number of argument registers already used by earlier parameters to
3238 the function. */
3240 mcore_function_arg_partial_nregs (cum, mode, type, named)
3241 CUMULATIVE_ARGS cum;
3242 enum machine_mode mode;
3243 tree type;
3244 int named;
3246 int reg = ROUND_REG (cum, mode);
3248 if (named == 0)
3249 return 0;
3251 if (MUST_PASS_IN_STACK (mode, type))
3252 return 0;
3254 /* REG is not the *hardware* register number of the register that holds
3255 the argument, it is the *argument* register number. So for example,
3256 the first argument to a function goes in argument register 0, which
3257 translates (for the MCore) into hardware register 2. The second
3258 argument goes into argument register 1, which translates into hardware
3259 register 3, and so on. NPARM_REGS is the number of argument registers
3260 supported by the target, not the maximum hardware register number of
3261 the target. */
3262 if (reg >= NPARM_REGS)
3263 return 0;
3265 /* If the argument fits entirely in registers, return 0. */
3266 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
3267 return 0;
3269 /* The argument overflows the number of available argument registers.
3270 Compute how many argument registers have not yet been assigned to
3271 hold an argument. */
3272 reg = NPARM_REGS - reg;
3274 /* Return partially in registers and partially on the stack. */
3275 return reg;
3278 /* Return non-zero if SYMBOL is marked as being dllexport'd. */
3280 mcore_dllexport_name_p (symbol)
3281 const char * symbol;
3283 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
3286 /* Return non-zero if SYMBOL is marked as being dllimport'd. */
3288 mcore_dllimport_name_p (symbol)
3289 const char * symbol;
3291 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
3294 /* Mark a DECL as being dllexport'd. */
3295 static void
3296 mcore_mark_dllexport (decl)
3297 tree decl;
3299 const char * oldname;
3300 char * newname;
3301 rtx rtlname;
3302 tree idp;
3304 rtlname = XEXP (DECL_RTL (decl), 0);
3306 if (GET_CODE (rtlname) == SYMBOL_REF)
3307 oldname = XSTR (rtlname, 0);
3308 else if ( GET_CODE (rtlname) == MEM
3309 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3310 oldname = XSTR (XEXP (rtlname, 0), 0);
3311 else
3312 abort ();
3314 if (mcore_dllexport_name_p (oldname))
3315 return; /* Already done. */
3317 newname = alloca (strlen (oldname) + 4);
3318 sprintf (newname, "@e.%s", oldname);
3320 /* We pass newname through get_identifier to ensure it has a unique
3321 address. RTL processing can sometimes peek inside the symbol ref
3322 and compare the string's addresses to see if two symbols are
3323 identical. */
3324 /* ??? At least I think that's why we do this. */
3325 idp = get_identifier (newname);
3327 XEXP (DECL_RTL (decl), 0) =
3328 gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3331 /* Mark a DECL as being dllimport'd. */
3332 static void
3333 mcore_mark_dllimport (decl)
3334 tree decl;
3336 const char * oldname;
3337 char * newname;
3338 tree idp;
3339 rtx rtlname;
3340 rtx newrtl;
3342 rtlname = XEXP (DECL_RTL (decl), 0);
3344 if (GET_CODE (rtlname) == SYMBOL_REF)
3345 oldname = XSTR (rtlname, 0);
3346 else if ( GET_CODE (rtlname) == MEM
3347 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3348 oldname = XSTR (XEXP (rtlname, 0), 0);
3349 else
3350 abort ();
3352 if (mcore_dllexport_name_p (oldname))
3353 abort (); /* This shouldn't happen. */
3354 else if (mcore_dllimport_name_p (oldname))
3355 return; /* Already done. */
3357 /* ??? One can well ask why we're making these checks here,
3358 and that would be a good question. */
3360 /* Imported variables can't be initialized. */
3361 if (TREE_CODE (decl) == VAR_DECL
3362 && !DECL_VIRTUAL_P (decl)
3363 && DECL_INITIAL (decl))
3365 error_with_decl (decl, "initialized variable `%s' is marked dllimport");
3366 return;
3369 /* `extern' needn't be specified with dllimport.
3370 Specify `extern' now and hope for the best. Sigh. */
3371 if (TREE_CODE (decl) == VAR_DECL
3372 /* ??? Is this test for vtables needed? */
3373 && !DECL_VIRTUAL_P (decl))
3375 DECL_EXTERNAL (decl) = 1;
3376 TREE_PUBLIC (decl) = 1;
3379 newname = alloca (strlen (oldname) + 11);
3380 sprintf (newname, "@i.__imp_%s", oldname);
3382 /* We pass newname through get_identifier to ensure it has a unique
3383 address. RTL processing can sometimes peek inside the symbol ref
3384 and compare the string's addresses to see if two symbols are
3385 identical. */
3386 /* ??? At least I think that's why we do this. */
3387 idp = get_identifier (newname);
3389 newrtl = gen_rtx (MEM, Pmode,
3390 gen_rtx (SYMBOL_REF, Pmode,
3391 IDENTIFIER_POINTER (idp)));
3392 XEXP (DECL_RTL (decl), 0) = newrtl;
3395 static int
3396 mcore_dllexport_p (decl)
3397 tree decl;
3399 if ( TREE_CODE (decl) != VAR_DECL
3400 && TREE_CODE (decl) != FUNCTION_DECL)
3401 return 0;
3403 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
3406 static int
3407 mcore_dllimport_p (decl)
3408 tree decl;
3410 if ( TREE_CODE (decl) != VAR_DECL
3411 && TREE_CODE (decl) != FUNCTION_DECL)
3412 return 0;
3414 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
3417 /* Cover function to implement ENCODE_SECTION_INFO. */
3418 void
3419 mcore_encode_section_info (decl)
3420 tree decl;
3422 /* This bit is copied from arm.h. */
3423 if (optimize > 0
3424 && TREE_CONSTANT (decl)
3425 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
3427 rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
3428 ? TREE_CST_RTL (decl) : DECL_RTL (decl));
3429 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
3432 /* Mark the decl so we can tell from the rtl whether the object is
3433 dllexport'd or dllimport'd. */
3434 if (mcore_dllexport_p (decl))
3435 mcore_mark_dllexport (decl);
3436 else if (mcore_dllimport_p (decl))
3437 mcore_mark_dllimport (decl);
3439 /* It might be that DECL has already been marked as dllimport, but
3440 a subsequent definition nullified that. The attribute is gone
3441 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3442 else if ((TREE_CODE (decl) == FUNCTION_DECL
3443 || TREE_CODE (decl) == VAR_DECL)
3444 && DECL_RTL (decl) != NULL_RTX
3445 && GET_CODE (DECL_RTL (decl)) == MEM
3446 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3447 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3448 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3450 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3451 tree idp = get_identifier (oldname + 9);
3452 rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
3454 XEXP (DECL_RTL (decl), 0) = newrtl;
3456 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3457 ??? We leave these alone for now. */
3461 /* MCore specific attribute support.
3462 dllexport - for exporting a function/variable that will live in a dll
3463 dllimport - for importing a function/variable from a dll
3464 naked - do not create a function prologue/epilogue. */
3466 const struct attribute_spec mcore_attribute_table[] =
3468 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
3469 { "dllexport", 0, 0, true, false, false, NULL },
3470 { "dllimport", 0, 0, true, false, false, NULL },
3471 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute },
3472 { NULL, 0, 0, false, false, false, NULL }
3475 /* Handle a "naked" attribute; arguments as in
3476 struct attribute_spec.handler. */
3477 static tree
3478 mcore_handle_naked_attribute (node, name, args, flags, no_add_attrs)
3479 tree *node;
3480 tree name;
3481 tree args ATTRIBUTE_UNUSED;
3482 int flags ATTRIBUTE_UNUSED;
3483 bool *no_add_attrs;
3485 if (TREE_CODE (*node) == FUNCTION_DECL)
3487 /* PR14310 - don't complain about lack of return statement
3488 in naked functions. The solution here is a gross hack
3489 but this is the only way to solve the problem without
3490 adding a new feature to GCC. I did try submitting a patch
3491 that would add such a new feature, but it was (rightfully)
3492 rejected on the grounds that it was creeping featurism,
3493 so hence this code. */
3494 if (warn_return_type)
3496 saved_warn_return_type = warn_return_type;
3497 warn_return_type = 0;
3498 saved_warn_return_type_count = 2;
3500 else if (saved_warn_return_type_count)
3501 saved_warn_return_type_count = 2;
3503 else
3505 warning ("`%s' attribute only applies to functions",
3506 IDENTIFIER_POINTER (name));
3507 *no_add_attrs = true;
3510 return NULL_TREE;
3513 /* Cover function for UNIQUE_SECTION. */
3515 void
3516 mcore_unique_section (decl, reloc)
3517 tree decl;
3518 int reloc ATTRIBUTE_UNUSED;
3520 int len;
3521 char * name;
3522 char * string;
3523 const char * prefix;
3525 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3527 /* Strip off any encoding in name. */
3528 STRIP_NAME_ENCODING (name, name);
3530 /* The object is put in, for example, section .text$foo.
3531 The linker will then ultimately place them in .text
3532 (everything from the $ on is stripped). */
3533 if (TREE_CODE (decl) == FUNCTION_DECL)
3534 prefix = ".text$";
3535 /* For compatibility with EPOC, we ignore the fact that the
3536 section might have relocs against it. */
3537 else if (DECL_READONLY_SECTION (decl, 0))
3538 prefix = ".rdata$";
3539 else
3540 prefix = ".data$";
3542 len = strlen (name) + strlen (prefix);
3543 string = alloca (len + 1);
3545 sprintf (string, "%s%s", prefix, name);
3547 DECL_SECTION_NAME (decl) = build_string (len, string);
3551 mcore_naked_function_p ()
3553 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3556 #ifdef OBJECT_FORMAT_ELF
3557 static void
3558 mcore_asm_named_section (name, flags)
3559 const char *name;
3560 unsigned int flags ATTRIBUTE_UNUSED;
3562 fprintf (asm_out_file, "\t.section %s\n", name);
3564 #endif /* OBJECT_FORMAT_ELF */