1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published
8 by the Free Software Foundation; either version 2, or (at your
9 option) any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
46 #include "target-def.h"
48 /* Maximum size we are allowed to grow the stack in a single operation.
49 If we want more, we must do it in increments of at most this size.
50 If this value is 0, we don't check at all. */
51 const char * mcore_stack_increment_string
= 0;
52 int mcore_stack_increment
= STACK_UNITS_MAXSTEP
;
54 /* For dumping information about frame sizes. */
55 char * mcore_current_function_name
= 0;
56 long mcore_current_compilation_timestamp
= 0;
58 /* Global variables for machine-dependent things. */
60 /* Saved operands from the last compare to use when we generate an scc
65 /* Provides the class number of the smallest class containing
67 const int regno_reg_class
[FIRST_PSEUDO_REGISTER
] =
69 GENERAL_REGS
, ONLYR1_REGS
, LRW_REGS
, LRW_REGS
,
70 LRW_REGS
, LRW_REGS
, LRW_REGS
, LRW_REGS
,
71 LRW_REGS
, LRW_REGS
, LRW_REGS
, LRW_REGS
,
72 LRW_REGS
, LRW_REGS
, LRW_REGS
, GENERAL_REGS
,
73 GENERAL_REGS
, C_REGS
, NO_REGS
, NO_REGS
,
76 /* Provide reg_class from a letter such as appears in the machine
78 const enum reg_class reg_class_from_letter
[] =
80 /* a */ LRW_REGS
, /* b */ ONLYR1_REGS
, /* c */ C_REGS
, /* d */ NO_REGS
,
81 /* e */ NO_REGS
, /* f */ NO_REGS
, /* g */ NO_REGS
, /* h */ NO_REGS
,
82 /* i */ NO_REGS
, /* j */ NO_REGS
, /* k */ NO_REGS
, /* l */ NO_REGS
,
83 /* m */ NO_REGS
, /* n */ NO_REGS
, /* o */ NO_REGS
, /* p */ NO_REGS
,
84 /* q */ NO_REGS
, /* r */ GENERAL_REGS
, /* s */ NO_REGS
, /* t */ NO_REGS
,
85 /* u */ NO_REGS
, /* v */ NO_REGS
, /* w */ NO_REGS
, /* x */ ALL_REGS
,
86 /* y */ NO_REGS
, /* z */ NO_REGS
91 int arg_size
; /* Stdarg spills (bytes). */
92 int reg_size
; /* Non-volatile reg saves (bytes). */
93 int reg_mask
; /* Non-volatile reg saves. */
94 int local_size
; /* Locals. */
95 int outbound_size
; /* Arg overflow on calls out. */
99 /* Describe the steps we'll use to grow it. */
100 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
101 int growth
[MAX_STACK_GROWS
];
119 static void output_stack_adjust (int, int);
120 static int calc_live_regs (int *);
121 static int const_ok_for_mcore (int);
122 static int try_constant_tricks (long, int *, int *);
123 static const char * output_inline_const (enum machine_mode
, rtx
*);
124 static void block_move_sequence (rtx
, rtx
, rtx
, rtx
, int, int, int);
125 static void layout_mcore_frame (struct mcore_frame
*);
126 static cond_type
is_cond_candidate (rtx
);
127 static rtx
emit_new_cond_insn (rtx
, int);
128 static rtx
conditionalize_block (rtx
);
129 static void conditionalize_optimization (void);
130 static void mcore_reorg (void);
131 static rtx
handle_structs_in_regs (enum machine_mode
, tree
, int);
132 static void mcore_mark_dllexport (tree
);
133 static void mcore_mark_dllimport (tree
);
134 static int mcore_dllexport_p (tree
);
135 static int mcore_dllimport_p (tree
);
136 const struct attribute_spec mcore_attribute_table
[];
137 static tree
mcore_handle_naked_attribute (tree
*, tree
, tree
, int, bool *);
138 #ifdef OBJECT_FORMAT_ELF
139 static void mcore_asm_named_section (const char *,
142 static void mcore_unique_section (tree
, int);
143 static void mcore_encode_section_info (tree
, rtx
, int);
144 static const char *mcore_strip_name_encoding (const char *);
145 static int mcore_const_costs (rtx
, RTX_CODE
);
146 static int mcore_and_cost (rtx
);
147 static int mcore_ior_cost (rtx
);
148 static bool mcore_rtx_costs (rtx
, int, int, int *);
150 /* Initialize the GCC target structure. */
151 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
152 #undef TARGET_MERGE_DECL_ATTRIBUTES
153 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
156 #ifdef OBJECT_FORMAT_ELF
157 #undef TARGET_ASM_UNALIGNED_HI_OP
158 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
159 #undef TARGET_ASM_UNALIGNED_SI_OP
160 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
163 #undef TARGET_ATTRIBUTE_TABLE
164 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
165 #undef TARGET_ASM_UNIQUE_SECTION
166 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
167 #undef TARGET_ENCODE_SECTION_INFO
168 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
169 #undef TARGET_STRIP_NAME_ENCODING
170 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
171 #undef TARGET_RTX_COSTS
172 #define TARGET_RTX_COSTS mcore_rtx_costs
173 #undef TARGET_ADDRESS_COST
174 #define TARGET_ADDRESS_COST hook_int_rtx_0
175 #undef TARGET_MACHINE_DEPENDENT_REORG
176 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
178 struct gcc_target targetm
= TARGET_INITIALIZER
;
180 /* Adjust the stack and return the number of bytes taken to do it. */
182 output_stack_adjust (int direction
, int size
)
184 /* If extending stack a lot, we do it incrementally. */
185 if (direction
< 0 && size
> mcore_stack_increment
&& mcore_stack_increment
> 0)
187 rtx tmp
= gen_rtx (REG
, SImode
, 1);
190 emit_insn (gen_movsi (tmp
, GEN_INT (mcore_stack_increment
)));
193 emit_insn (gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, tmp
));
194 memref
= gen_rtx (MEM
, SImode
, stack_pointer_rtx
);
195 MEM_VOLATILE_P (memref
) = 1;
196 emit_insn (gen_movsi (memref
, stack_pointer_rtx
));
197 size
-= mcore_stack_increment
;
199 while (size
> mcore_stack_increment
);
201 /* SIZE is now the residual for the last adjustment,
202 which doesn't require a probe. */
208 rtx val
= GEN_INT (size
);
212 rtx nval
= gen_rtx (REG
, SImode
, 1);
213 emit_insn (gen_movsi (nval
, val
));
218 insn
= gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, val
);
220 insn
= gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, val
);
226 /* Work out the registers which need to be saved,
227 both as a mask and a count. */
230 calc_live_regs (int * count
)
233 int live_regs_mask
= 0;
237 for (reg
= 0; reg
< FIRST_PSEUDO_REGISTER
; reg
++)
239 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
242 live_regs_mask
|= (1 << reg
);
246 return live_regs_mask
;
249 /* Print the operand address in x to the stream. */
252 mcore_print_operand_address (FILE * stream
, rtx x
)
254 switch (GET_CODE (x
))
257 fprintf (stream
, "(%s)", reg_names
[REGNO (x
)]);
262 rtx base
= XEXP (x
, 0);
263 rtx index
= XEXP (x
, 1);
265 if (GET_CODE (base
) != REG
)
267 /* Ensure that BASE is a register (one of them must be). */
273 switch (GET_CODE (index
))
276 fprintf (stream
, "(%s," HOST_WIDE_INT_PRINT_DEC
")",
277 reg_names
[REGNO(base
)], INTVAL (index
));
290 output_addr_const (stream
, x
);
295 /* Print operand x (an rtx) in assembler syntax to file stream
296 according to modifier code.
298 'R' print the next register or memory location along, ie the lsw in
300 'O' print a constant without the #
301 'M' print a constant as its negative
302 'P' print log2 of a power of two
303 'Q' print log2 of an inverse of a power of two
304 'U' print register for ldm/stm instruction
305 'X' print byte number for xtrbN instruction. */
308 mcore_print_operand (FILE * stream
, rtx x
, int code
)
314 fprintf (asm_out_file
, "32");
316 fprintf (asm_out_file
, "%d", exact_log2 (INTVAL (x
) + 1));
319 fprintf (asm_out_file
, "%d", exact_log2 (INTVAL (x
)));
322 fprintf (asm_out_file
, "%d", exact_log2 (~INTVAL (x
)));
325 fprintf (asm_out_file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
328 fprintf (asm_out_file
, HOST_WIDE_INT_PRINT_DEC
, - INTVAL (x
));
331 /* Next location along in memory or register. */
332 switch (GET_CODE (x
))
335 fputs (reg_names
[REGNO (x
) + 1], (stream
));
338 mcore_print_operand_address
339 (stream
, XEXP (adjust_address (x
, SImode
, 4), 0));
346 fprintf (asm_out_file
, "%s-%s", reg_names
[REGNO (x
)],
347 reg_names
[REGNO (x
) + 3]);
350 fprintf (asm_out_file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (x
));
353 fprintf (asm_out_file
, HOST_WIDE_INT_PRINT_DEC
, 3 - INTVAL (x
) / 8);
357 switch (GET_CODE (x
))
360 fputs (reg_names
[REGNO (x
)], (stream
));
363 output_address (XEXP (x
, 0));
366 output_addr_const (stream
, x
);
373 /* What does a constant cost ? */
376 mcore_const_costs (rtx exp
, enum rtx_code code
)
378 int val
= INTVAL (exp
);
380 /* Easy constants. */
381 if ( CONST_OK_FOR_I (val
)
382 || CONST_OK_FOR_M (val
)
383 || CONST_OK_FOR_N (val
)
384 || (code
== PLUS
&& CONST_OK_FOR_L (val
)))
387 && ( CONST_OK_FOR_M (~val
)
388 || CONST_OK_FOR_N (~val
)))
390 else if (code
== PLUS
391 && ( CONST_OK_FOR_I (-val
)
392 || CONST_OK_FOR_M (-val
)
393 || CONST_OK_FOR_N (-val
)))
399 /* What does an and instruction cost - we do this b/c immediates may
400 have been relaxed. We want to ensure that cse will cse relaxed immeds
401 out. Otherwise we'll get bad code (multiple reloads of the same const). */
404 mcore_and_cost (rtx x
)
408 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
411 val
= INTVAL (XEXP (x
, 1));
413 /* Do it directly. */
414 if (CONST_OK_FOR_K (val
) || CONST_OK_FOR_M (~val
))
416 /* Takes one instruction to load. */
417 else if (const_ok_for_mcore (val
))
419 /* Takes two instructions to load. */
420 else if (TARGET_HARDLIT
&& mcore_const_ok_for_inline (val
))
423 /* Takes a lrw to load. */
427 /* What does an or cost - see and_cost(). */
430 mcore_ior_cost (rtx x
)
434 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
437 val
= INTVAL (XEXP (x
, 1));
439 /* Do it directly with bclri. */
440 if (CONST_OK_FOR_M (val
))
442 /* Takes one instruction to load. */
443 else if (const_ok_for_mcore (val
))
445 /* Takes two instructions to load. */
446 else if (TARGET_HARDLIT
&& mcore_const_ok_for_inline (val
))
449 /* Takes a lrw to load. */
454 mcore_rtx_costs (rtx x
, int code
, int outer_code
, int * total
)
459 *total
= mcore_const_costs (x
, outer_code
);
471 *total
= COSTS_N_INSNS (mcore_and_cost (x
));
475 *total
= COSTS_N_INSNS (mcore_ior_cost (x
));
484 *total
= COSTS_N_INSNS (100);
492 /* Check to see if a comparison against a constant can be made more efficient
493 by incrementing/decrementing the constant to get one that is more efficient
497 mcore_modify_comparison (enum rtx_code code
)
499 rtx op1
= arch_compare_op1
;
501 if (GET_CODE (op1
) == CONST_INT
)
503 int val
= INTVAL (op1
);
508 if (CONST_OK_FOR_J (val
+ 1))
510 arch_compare_op1
= GEN_INT (val
+ 1);
523 /* Prepare the operands for a comparison. */
526 mcore_gen_compare_reg (enum rtx_code code
)
528 rtx op0
= arch_compare_op0
;
529 rtx op1
= arch_compare_op1
;
530 rtx cc_reg
= gen_rtx (REG
, CCmode
, CC_REG
);
532 if (CONSTANT_P (op1
) && GET_CODE (op1
) != CONST_INT
)
533 op1
= force_reg (SImode
, op1
);
535 /* cmpnei: 0-31 (K immediate)
536 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
539 case EQ
: /* Use inverted condition, cmpne. */
543 case NE
: /* Use normal condition, cmpne. */
544 if (GET_CODE (op1
) == CONST_INT
&& ! CONST_OK_FOR_K (INTVAL (op1
)))
545 op1
= force_reg (SImode
, op1
);
548 case LE
: /* Use inverted condition, reversed cmplt. */
552 case GT
: /* Use normal condition, reversed cmplt. */
553 if (GET_CODE (op1
) == CONST_INT
)
554 op1
= force_reg (SImode
, op1
);
557 case GE
: /* Use inverted condition, cmplt. */
561 case LT
: /* Use normal condition, cmplt. */
562 if (GET_CODE (op1
) == CONST_INT
&&
563 /* covered by btsti x,31. */
565 ! CONST_OK_FOR_J (INTVAL (op1
)))
566 op1
= force_reg (SImode
, op1
);
569 case GTU
: /* Use inverted condition, cmple. */
570 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) == 0)
572 /* Unsigned > 0 is the same as != 0, but we need
573 to invert the condition, so we want to set
574 code = EQ. This cannot be done however, as the
575 mcore does not support such a test. Instead we
576 cope with this case in the "bgtu" pattern itself
577 so we should never reach this point. */
585 case LEU
: /* Use normal condition, reversed cmphs. */
586 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) != 0)
587 op1
= force_reg (SImode
, op1
);
590 case LTU
: /* Use inverted condition, cmphs. */
594 case GEU
: /* Use normal condition, cmphs. */
595 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) != 0)
596 op1
= force_reg (SImode
, op1
);
603 emit_insn (gen_rtx (SET
, VOIDmode
, cc_reg
, gen_rtx (code
, CCmode
, op0
, op1
)));
609 mcore_symbolic_address_p (rtx x
)
611 switch (GET_CODE (x
))
618 return ( (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
619 || GET_CODE (XEXP (x
, 0)) == LABEL_REF
)
620 && GET_CODE (XEXP (x
, 1)) == CONST_INT
);
627 mcore_call_address_operand (rtx x
, enum machine_mode mode
)
629 return register_operand (x
, mode
) || CONSTANT_P (x
);
632 /* Functions to output assembly code for a function call. */
635 mcore_output_call (rtx operands
[], int index
)
637 static char buffer
[20];
638 rtx addr
= operands
[index
];
644 if (mcore_current_function_name
== 0)
647 ASM_OUTPUT_CG_EDGE (asm_out_file
, mcore_current_function_name
,
651 sprintf (buffer
, "jsr\t%%%d", index
);
657 if (mcore_current_function_name
== 0)
660 if (GET_CODE (addr
) != SYMBOL_REF
)
663 ASM_OUTPUT_CG_EDGE (asm_out_file
, mcore_current_function_name
, XSTR (addr
, 0), 0);
666 sprintf (buffer
, "jbsr\t%%%d", index
);
672 /* Can we load a constant with a single instruction ? */
675 const_ok_for_mcore (int value
)
677 if (value
>= 0 && value
<= 127)
680 /* Try exact power of two. */
681 if ((value
& (value
- 1)) == 0)
684 /* Try exact power of two - 1. */
685 if ((value
& (value
+ 1)) == 0)
691 /* Can we load a constant inline with up to 2 instructions ? */
694 mcore_const_ok_for_inline (long value
)
698 return try_constant_tricks (value
, & x
, & y
) > 0;
701 /* Are we loading the constant using a not ? */
704 mcore_const_trick_uses_not (long value
)
708 return try_constant_tricks (value
, & x
, & y
) == 2;
711 /* Try tricks to load a constant inline and return the trick number if
712 success (0 is non-inlinable).
715 1: single instruction (do the usual thing)
716 2: single insn followed by a 'not'
717 3: single insn followed by a subi
718 4: single insn followed by an addi
719 5: single insn followed by rsubi
720 6: single insn followed by bseti
721 7: single insn followed by bclri
722 8: single insn followed by rotli
723 9: single insn followed by lsli
724 10: single insn followed by ixh
725 11: single insn followed by ixw. */
728 try_constant_tricks (long value
, int * x
, int * y
)
731 unsigned bit
, shf
, rot
;
733 if (const_ok_for_mcore (value
))
734 return 1; /* Do the usual thing. */
738 if (const_ok_for_mcore (~value
))
744 for (i
= 1; i
<= 32; i
++)
746 if (const_ok_for_mcore (value
- i
))
754 if (const_ok_for_mcore (value
+ i
))
765 for (i
= 0; i
<= 31; i
++)
767 if (const_ok_for_mcore (i
- value
))
775 if (const_ok_for_mcore (value
& ~bit
))
783 if (const_ok_for_mcore (value
| bit
))
797 for (i
= 1; i
< 31; i
++)
801 /* MCore has rotate left. */
805 rot
|= c
; /* Simulate rotate. */
807 if (const_ok_for_mcore (rot
))
816 shf
= 0; /* Can't use logical shift, low order bit is one. */
820 if (shf
!= 0 && const_ok_for_mcore (shf
))
829 if ((value
% 3) == 0 && const_ok_for_mcore (value
/ 3))
836 if ((value
% 5) == 0 && const_ok_for_mcore (value
/ 5))
847 /* Check whether reg is dead at first. This is done by searching ahead
848 for either the next use (i.e., reg is live), a death note, or a set of
849 reg. Don't just use dead_or_set_p() since reload does not always mark
850 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
851 can ignore subregs by extracting the actual register. BRC */
854 mcore_is_dead (rtx first
, rtx reg
)
858 /* For mcore, subregs can't live independently of their parent regs. */
859 if (GET_CODE (reg
) == SUBREG
)
860 reg
= SUBREG_REG (reg
);
862 /* Dies immediately. */
863 if (dead_or_set_p (first
, reg
))
866 /* Look for conclusive evidence of live/death, otherwise we have
867 to assume that it is live. */
868 for (insn
= NEXT_INSN (first
); insn
; insn
= NEXT_INSN (insn
))
870 if (GET_CODE (insn
) == JUMP_INSN
)
871 return 0; /* We lose track, assume it is alive. */
873 else if (GET_CODE(insn
) == CALL_INSN
)
875 /* Call's might use it for target or register parms. */
876 if (reg_referenced_p (reg
, PATTERN (insn
))
877 || find_reg_fusage (insn
, USE
, reg
))
879 else if (dead_or_set_p (insn
, reg
))
882 else if (GET_CODE (insn
) == INSN
)
884 if (reg_referenced_p (reg
, PATTERN (insn
)))
886 else if (dead_or_set_p (insn
, reg
))
891 /* No conclusive evidence either way, we can not take the chance
892 that control flow hid the use from us -- "I'm not dead yet". */
896 /* Count the number of ones in mask. */
899 mcore_num_ones (int mask
)
901 /* A trick to count set bits recently posted on comp.compilers. */
902 mask
= (mask
>> 1 & 0x55555555) + (mask
& 0x55555555);
903 mask
= ((mask
>> 2) & 0x33333333) + (mask
& 0x33333333);
904 mask
= ((mask
>> 4) + mask
) & 0x0f0f0f0f;
905 mask
= ((mask
>> 8) + mask
);
907 return (mask
+ (mask
>> 16)) & 0xff;
910 /* Count the number of zeros in mask. */
913 mcore_num_zeros (int mask
)
915 return 32 - mcore_num_ones (mask
);
918 /* Determine byte being masked. */
921 mcore_byte_offset (unsigned int mask
)
923 if (mask
== 0x00ffffffL
)
925 else if (mask
== 0xff00ffffL
)
927 else if (mask
== 0xffff00ffL
)
929 else if (mask
== 0xffffff00L
)
935 /* Determine halfword being masked. */
938 mcore_halfword_offset (unsigned int mask
)
940 if (mask
== 0x0000ffffL
)
942 else if (mask
== 0xffff0000L
)
948 /* Output a series of bseti's corresponding to mask. */
951 mcore_output_bseti (rtx dst
, int mask
)
956 out_operands
[0] = dst
;
958 for (bit
= 0; bit
< 32; bit
++)
960 if ((mask
& 0x1) == 0x1)
962 out_operands
[1] = GEN_INT (bit
);
964 output_asm_insn ("bseti\t%0,%1", out_operands
);
972 /* Output a series of bclri's corresponding to mask. */
975 mcore_output_bclri (rtx dst
, int mask
)
980 out_operands
[0] = dst
;
982 for (bit
= 0; bit
< 32; bit
++)
984 if ((mask
& 0x1) == 0x0)
986 out_operands
[1] = GEN_INT (bit
);
988 output_asm_insn ("bclri\t%0,%1", out_operands
);
997 /* Output a conditional move of two constants that are +/- 1 within each
998 other. See the "movtK" patterns in mcore.md. I'm not sure this is
999 really worth the effort. */
1002 mcore_output_cmov (rtx operands
[], int cmp_t
, const char * test
)
1006 rtx out_operands
[4];
1008 out_operands
[0] = operands
[0];
1010 /* Check to see which constant is loadable. */
1011 if (const_ok_for_mcore (INTVAL (operands
[1])))
1013 out_operands
[1] = operands
[1];
1014 out_operands
[2] = operands
[2];
1016 else if (const_ok_for_mcore (INTVAL (operands
[2])))
1018 out_operands
[1] = operands
[2];
1019 out_operands
[2] = operands
[1];
1021 /* Complement test since constants are swapped. */
1022 cmp_t
= (cmp_t
== 0);
1024 load_value
= INTVAL (out_operands
[1]);
1025 adjust_value
= INTVAL (out_operands
[2]);
1027 /* First output the test if folded into the pattern. */
1030 output_asm_insn (test
, operands
);
1032 /* Load the constant - for now, only support constants that can be
1033 generated with a single instruction. maybe add general inlinable
1034 constants later (this will increase the # of patterns since the
1035 instruction sequence has a different length attribute). */
1036 if (load_value
>= 0 && load_value
<= 127)
1037 output_asm_insn ("movi\t%0,%1", out_operands
);
1038 else if ((load_value
& (load_value
- 1)) == 0)
1039 output_asm_insn ("bgeni\t%0,%P1", out_operands
);
1040 else if ((load_value
& (load_value
+ 1)) == 0)
1041 output_asm_insn ("bmaski\t%0,%N1", out_operands
);
1043 /* Output the constant adjustment. */
1044 if (load_value
> adjust_value
)
1047 output_asm_insn ("decf\t%0", out_operands
);
1049 output_asm_insn ("dect\t%0", out_operands
);
1054 output_asm_insn ("incf\t%0", out_operands
);
1056 output_asm_insn ("inct\t%0", out_operands
);
1062 /* Outputs the peephole for moving a constant that gets not'ed followed
1063 by an and (i.e. combine the not and the and into andn). BRC */
1066 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[])
1069 rtx out_operands
[3];
1070 const char * load_op
;
1073 if (try_constant_tricks (INTVAL (operands
[1]), &x
, &y
) != 2)
1076 out_operands
[0] = operands
[0];
1077 out_operands
[1] = GEN_INT(x
);
1078 out_operands
[2] = operands
[2];
1080 if (x
>= 0 && x
<= 127)
1081 load_op
= "movi\t%0,%1";
1083 /* Try exact power of two. */
1084 else if ((x
& (x
- 1)) == 0)
1085 load_op
= "bgeni\t%0,%P1";
1087 /* Try exact power of two - 1. */
1088 else if ((x
& (x
+ 1)) == 0)
1089 load_op
= "bmaski\t%0,%N1";
1092 load_op
= "BADMOVI\t%0,%1";
1094 sprintf (buf
, "%s\n\tandn\t%%2,%%0", load_op
);
1095 output_asm_insn (buf
, out_operands
);
1100 /* Output an inline constant. */
1103 output_inline_const (enum machine_mode mode
, rtx operands
[])
1107 rtx out_operands
[3];
1110 const char *dst_fmt
;
1113 value
= INTVAL (operands
[1]);
1115 if ((trick_no
= try_constant_tricks (value
, &x
, &y
)) == 0)
1117 /* lrw's are handled separately: Large inlinable constants
1118 never get turned into lrw's. Our caller uses try_constant_tricks
1119 to back off to an lrw rather than calling this routine. */
1126 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1127 out_operands
[0] = operands
[0];
1128 out_operands
[1] = GEN_INT (x
);
1131 out_operands
[2] = GEN_INT (y
);
1133 /* Select dst format based on mode. */
1134 if (mode
== DImode
&& (! TARGET_LITTLE_END
))
1139 if (x
>= 0 && x
<= 127)
1140 sprintf (load_op
, "movi\t%s,%%1", dst_fmt
);
1142 /* Try exact power of two. */
1143 else if ((x
& (x
- 1)) == 0)
1144 sprintf (load_op
, "bgeni\t%s,%%P1", dst_fmt
);
1146 /* Try exact power of two - 1. */
1147 else if ((x
& (x
+ 1)) == 0)
1148 sprintf (load_op
, "bmaski\t%s,%%N1", dst_fmt
);
1151 sprintf (load_op
, "BADMOVI\t%s,%%1", dst_fmt
);
1156 strcpy (buf
, load_op
);
1159 sprintf (buf
, "%s\n\tnot\t%s\t// %d 0x%x", load_op
, dst_fmt
, value
, value
);
1162 sprintf (buf
, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op
, dst_fmt
, value
, value
);
1165 sprintf (buf
, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op
, dst_fmt
, value
, value
);
1168 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1169 sprintf (buf
, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op
, dst_fmt
, value
, value
);
1172 sprintf (buf
, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op
, dst_fmt
, value
, value
);
1175 sprintf (buf
, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op
, dst_fmt
, value
, value
);
1178 sprintf (buf
, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op
, dst_fmt
, value
, value
);
1181 sprintf (buf
, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op
, dst_fmt
, value
, value
);
1184 sprintf (buf
, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op
, dst_fmt
, dst_fmt
, value
, value
);
1187 sprintf (buf
, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op
, dst_fmt
, dst_fmt
, value
, value
);
1193 output_asm_insn (buf
, out_operands
);
1198 /* Output a move of a word or less value. */
1201 mcore_output_move (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[],
1202 enum machine_mode mode ATTRIBUTE_UNUSED
)
1204 rtx dst
= operands
[0];
1205 rtx src
= operands
[1];
1207 if (GET_CODE (dst
) == REG
)
1209 if (GET_CODE (src
) == REG
)
1211 if (REGNO (src
) == CC_REG
) /* r-c */
1214 return "mov\t%0,%1"; /* r-r*/
1216 else if (GET_CODE (src
) == MEM
)
1218 if (GET_CODE (XEXP (src
, 0)) == LABEL_REF
)
1219 return "lrw\t%0,[%1]"; /* a-R */
1221 switch (GET_MODE (src
)) /* r-m */
1224 return "ldw\t%0,%1";
1226 return "ld.h\t%0,%1";
1228 return "ld.b\t%0,%1";
1233 else if (GET_CODE (src
) == CONST_INT
)
1237 if (CONST_OK_FOR_I (INTVAL (src
))) /* r-I */
1238 return "movi\t%0,%1";
1239 else if (CONST_OK_FOR_M (INTVAL (src
))) /* r-M */
1240 return "bgeni\t%0,%P1\t// %1 %x1";
1241 else if (CONST_OK_FOR_N (INTVAL (src
))) /* r-N */
1242 return "bmaski\t%0,%N1\t// %1 %x1";
1243 else if (try_constant_tricks (INTVAL (src
), &x
, &y
)) /* R-P */
1244 return output_inline_const (SImode
, operands
); /* 1-2 insns */
1246 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1249 return "lrw\t%0, %1"; /* Into the literal pool. */
1251 else if (GET_CODE (dst
) == MEM
) /* m-r */
1252 switch (GET_MODE (dst
))
1255 return "stw\t%1,%0";
1257 return "st.h\t%1,%0";
1259 return "st.b\t%1,%0";
1267 /* Return a sequence of instructions to perform DI or DF move.
1268 Since the MCORE cannot move a DI or DF in one instruction, we have
1269 to take care when we see overlapping source and dest registers. */
1272 mcore_output_movedouble (rtx operands
[], enum machine_mode mode ATTRIBUTE_UNUSED
)
1274 rtx dst
= operands
[0];
1275 rtx src
= operands
[1];
1277 if (GET_CODE (dst
) == REG
)
1279 if (GET_CODE (src
) == REG
)
1281 int dstreg
= REGNO (dst
);
1282 int srcreg
= REGNO (src
);
1284 /* Ensure the second source not overwritten. */
1285 if (srcreg
+ 1 == dstreg
)
1286 return "mov %R0,%R1\n\tmov %0,%1";
1288 return "mov %0,%1\n\tmov %R0,%R1";
1290 else if (GET_CODE (src
) == MEM
)
1292 rtx memexp
= memexp
= XEXP (src
, 0);
1293 int dstreg
= REGNO (dst
);
1296 if (GET_CODE (memexp
) == LABEL_REF
)
1297 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1298 else if (GET_CODE (memexp
) == REG
)
1299 basereg
= REGNO (memexp
);
1300 else if (GET_CODE (memexp
) == PLUS
)
1302 if (GET_CODE (XEXP (memexp
, 0)) == REG
)
1303 basereg
= REGNO (XEXP (memexp
, 0));
1304 else if (GET_CODE (XEXP (memexp
, 1)) == REG
)
1305 basereg
= REGNO (XEXP (memexp
, 1));
1312 /* ??? length attribute is wrong here. */
1313 if (dstreg
== basereg
)
1315 /* Just load them in reverse order. */
1316 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1318 /* XXX: alternative: move basereg to basereg+1
1319 and then fall through. */
1322 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1324 else if (GET_CODE (src
) == CONST_INT
)
1326 if (TARGET_LITTLE_END
)
1328 if (CONST_OK_FOR_I (INTVAL (src
)))
1329 output_asm_insn ("movi %0,%1", operands
);
1330 else if (CONST_OK_FOR_M (INTVAL (src
)))
1331 output_asm_insn ("bgeni %0,%P1", operands
);
1332 else if (INTVAL (src
) == -1)
1333 output_asm_insn ("bmaski %0,32", operands
);
1334 else if (CONST_OK_FOR_N (INTVAL (src
)))
1335 output_asm_insn ("bmaski %0,%N1", operands
);
1339 if (INTVAL (src
) < 0)
1340 return "bmaski %R0,32";
1342 return "movi %R0,0";
1346 if (CONST_OK_FOR_I (INTVAL (src
)))
1347 output_asm_insn ("movi %R0,%1", operands
);
1348 else if (CONST_OK_FOR_M (INTVAL (src
)))
1349 output_asm_insn ("bgeni %R0,%P1", operands
);
1350 else if (INTVAL (src
) == -1)
1351 output_asm_insn ("bmaski %R0,32", operands
);
1352 else if (CONST_OK_FOR_N (INTVAL (src
)))
1353 output_asm_insn ("bmaski %R0,%N1", operands
);
1357 if (INTVAL (src
) < 0)
1358 return "bmaski %0,32";
1366 else if (GET_CODE (dst
) == MEM
&& GET_CODE (src
) == REG
)
1367 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1372 /* Predicates used by the templates. */
1374 /* Nonzero if OP can be source of a simple move operation. */
1377 mcore_general_movsrc_operand (rtx op
, enum machine_mode mode
)
1379 /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
1380 if (GET_CODE (op
) == MEM
&& GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1383 return general_operand (op
, mode
);
1386 /* Nonzero if OP can be destination of a simple move operation. */
1389 mcore_general_movdst_operand (rtx op
, enum machine_mode mode
)
1391 if (GET_CODE (op
) == REG
&& REGNO (op
) == CC_REG
)
1394 return general_operand (op
, mode
);
1397 /* Nonzero if OP is a normal arithmetic register. */
1400 mcore_arith_reg_operand (rtx op
, enum machine_mode mode
)
1402 if (! register_operand (op
, mode
))
1405 if (GET_CODE (op
) == SUBREG
)
1406 op
= SUBREG_REG (op
);
1408 if (GET_CODE (op
) == REG
)
1409 return REGNO (op
) != CC_REG
;
1414 /* Nonzero if OP should be recognized during reload for an ixh/ixw
1415 operand. See the ixh/ixw patterns. */
1418 mcore_reload_operand (rtx op
, enum machine_mode mode
)
1420 if (mcore_arith_reg_operand (op
, mode
))
1423 if (! reload_in_progress
)
1426 return GET_CODE (op
) == MEM
;
1429 /* Nonzero if OP is a valid source operand for an arithmetic insn. */
1432 mcore_arith_J_operand (rtx op
, enum machine_mode mode
)
1434 if (register_operand (op
, mode
))
1437 if (GET_CODE (op
) == CONST_INT
&& CONST_OK_FOR_J (INTVAL (op
)))
1443 /* Nonzero if OP is a valid source operand for an arithmetic insn. */
1446 mcore_arith_K_operand (rtx op
, enum machine_mode mode
)
1448 if (register_operand (op
, mode
))
1451 if (GET_CODE (op
) == CONST_INT
&& CONST_OK_FOR_K (INTVAL (op
)))
1457 /* Nonzero if OP is a valid source operand for a shift or rotate insn. */
1460 mcore_arith_K_operand_not_0 (rtx op
, enum machine_mode mode
)
1462 if (register_operand (op
, mode
))
1465 if ( GET_CODE (op
) == CONST_INT
1466 && CONST_OK_FOR_K (INTVAL (op
))
1467 && INTVAL (op
) != 0)
1474 mcore_arith_K_S_operand (rtx op
, enum machine_mode mode
)
1476 if (register_operand (op
, mode
))
1479 if (GET_CODE (op
) == CONST_INT
)
1481 if (CONST_OK_FOR_K (INTVAL (op
)) || CONST_OK_FOR_M (~INTVAL (op
)))
1489 mcore_arith_S_operand (rtx op
)
1491 if (GET_CODE (op
) == CONST_INT
&& CONST_OK_FOR_M (~INTVAL (op
)))
1498 mcore_arith_M_operand (rtx op
, enum machine_mode mode
)
1500 if (register_operand (op
, mode
))
1503 if (GET_CODE (op
) == CONST_INT
&& CONST_OK_FOR_M (INTVAL (op
)))
1509 /* Nonzero if OP is a valid source operand for loading. */
1512 mcore_arith_imm_operand (rtx op
, enum machine_mode mode
)
1514 if (register_operand (op
, mode
))
1517 if (GET_CODE (op
) == CONST_INT
&& const_ok_for_mcore (INTVAL (op
)))
1524 mcore_arith_any_imm_operand (rtx op
, enum machine_mode mode
)
1526 if (register_operand (op
, mode
))
1529 if (GET_CODE (op
) == CONST_INT
)
1535 /* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1. */
1538 mcore_arith_O_operand (rtx op
, enum machine_mode mode
)
1540 if (register_operand (op
, mode
))
1543 if (GET_CODE (op
) == CONST_INT
&& CONST_OK_FOR_O (INTVAL (op
)))
1549 /* Nonzero if OP is a valid source operand for a btsti. */
1552 mcore_literal_K_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
1554 if (GET_CODE (op
) == CONST_INT
&& CONST_OK_FOR_K (INTVAL (op
)))
1560 /* Nonzero if OP is a valid source operand for an add/sub insn. */
1563 mcore_addsub_operand (rtx op
, enum machine_mode mode
)
1565 if (register_operand (op
, mode
))
1568 if (GET_CODE (op
) == CONST_INT
)
1572 /* The following is removed because it precludes large constants from being
1573 returned as valid source operands for and add/sub insn. While large
1574 constants may not directly be used in an add/sub, they may if first loaded
1575 into a register. Thus, this predicate should indicate that they are valid,
1576 and the constraint in mcore.md should control whether an additional load to
1577 register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */
1579 if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op)))
1587 /* Nonzero if OP is a valid source operand for a compare operation. */
1590 mcore_compare_operand (rtx op
, enum machine_mode mode
)
1592 if (register_operand (op
, mode
))
1595 if (GET_CODE (op
) == CONST_INT
&& INTVAL (op
) == 0)
1601 /* Expand insert bit field. BRC */
1604 mcore_expand_insv (rtx operands
[])
1606 int width
= INTVAL (operands
[1]);
1607 int posn
= INTVAL (operands
[2]);
1609 rtx mreg
, sreg
, ereg
;
1611 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1612 for width==1 must be removed. Look around line 368. This is something
1613 we really want the md part to do. */
1614 if (width
== 1 && GET_CODE (operands
[3]) == CONST_INT
)
1616 /* Do directly with bseti or bclri. */
1617 /* RBE: 2/97 consider only low bit of constant. */
1618 if ((INTVAL(operands
[3])&1) == 0)
1620 mask
= ~(1 << posn
);
1621 emit_insn (gen_rtx (SET
, SImode
, operands
[0],
1622 gen_rtx (AND
, SImode
, operands
[0], GEN_INT (mask
))));
1627 emit_insn (gen_rtx (SET
, SImode
, operands
[0],
1628 gen_rtx (IOR
, SImode
, operands
[0], GEN_INT (mask
))));
1634 /* Look at some bit-field placements that we aren't interested
1635 in handling ourselves, unless specifically directed to do so. */
1636 if (! TARGET_W_FIELD
)
1637 return 0; /* Generally, give up about now. */
1639 if (width
== 8 && posn
% 8 == 0)
1640 /* Byte sized and aligned; let caller break it up. */
1643 if (width
== 16 && posn
% 16 == 0)
1644 /* Short sized and aligned; let caller break it up. */
1647 /* The general case - we can do this a little bit better than what the
1648 machine independent part tries. This will get rid of all the subregs
1649 that mess up constant folding in combine when working with relaxed
1652 /* If setting the entire field, do it directly. */
1653 if (GET_CODE (operands
[3]) == CONST_INT
&&
1654 INTVAL (operands
[3]) == ((1 << width
) - 1))
1656 mreg
= force_reg (SImode
, GEN_INT (INTVAL (operands
[3]) << posn
));
1657 emit_insn (gen_rtx (SET
, SImode
, operands
[0],
1658 gen_rtx (IOR
, SImode
, operands
[0], mreg
)));
1662 /* Generate the clear mask. */
1663 mreg
= force_reg (SImode
, GEN_INT (~(((1 << width
) - 1) << posn
)));
1665 /* Clear the field, to overlay it later with the source. */
1666 emit_insn (gen_rtx (SET
, SImode
, operands
[0],
1667 gen_rtx (AND
, SImode
, operands
[0], mreg
)));
1669 /* If the source is constant 0, we've nothing to add back. */
1670 if (GET_CODE (operands
[3]) == CONST_INT
&& INTVAL (operands
[3]) == 0)
1673 /* XXX: Should we worry about more games with constant values?
1674 We've covered the high profile: set/clear single-bit and many-bit
1675 fields. How often do we see "arbitrary bit pattern" constants? */
1676 sreg
= copy_to_mode_reg (SImode
, operands
[3]);
1678 /* Extract src as same width as dst (needed for signed values). We
1679 always have to do this since we widen everything to SImode.
1680 We don't have to mask if we're shifting this up against the
1681 MSB of the register (e.g., the shift will push out any hi-order
1683 if (width
+ posn
!= (int) GET_MODE_SIZE (SImode
))
1685 ereg
= force_reg (SImode
, GEN_INT ((1 << width
) - 1));
1686 emit_insn (gen_rtx (SET
, SImode
, sreg
,
1687 gen_rtx (AND
, SImode
, sreg
, ereg
)));
1690 /* Insert source value in dest. */
1692 emit_insn (gen_rtx (SET
, SImode
, sreg
,
1693 gen_rtx (ASHIFT
, SImode
, sreg
, GEN_INT (posn
))));
1695 emit_insn (gen_rtx (SET
, SImode
, operands
[0],
1696 gen_rtx (IOR
, SImode
, operands
[0], sreg
)));
1701 /* Return 1 if OP is a load multiple operation. It is known to be a
1702 PARALLEL and the first section will be tested. */
1705 mcore_load_multiple_operation (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
1707 int count
= XVECLEN (op
, 0);
1712 /* Perform a quick check so we don't blow up below. */
1714 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
1715 || GET_CODE (SET_DEST (XVECEXP (op
, 0, 0))) != REG
1716 || GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != MEM
)
1719 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, 0)));
1720 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, 0)), 0);
1722 for (i
= 1; i
< count
; i
++)
1724 rtx elt
= XVECEXP (op
, 0, i
);
1726 if (GET_CODE (elt
) != SET
1727 || GET_CODE (SET_DEST (elt
)) != REG
1728 || GET_MODE (SET_DEST (elt
)) != SImode
1729 || REGNO (SET_DEST (elt
)) != (unsigned) (dest_regno
+ i
)
1730 || GET_CODE (SET_SRC (elt
)) != MEM
1731 || GET_MODE (SET_SRC (elt
)) != SImode
1732 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
1733 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
1734 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
1735 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != i
* 4)
1742 /* Similar, but tests for store multiple. */
1745 mcore_store_multiple_operation (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
1747 int count
= XVECLEN (op
, 0);
1752 /* Perform a quick check so we don't blow up below. */
1754 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
1755 || GET_CODE (SET_DEST (XVECEXP (op
, 0, 0))) != MEM
1756 || GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != REG
)
1759 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, 0)));
1760 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, 0)), 0);
1762 for (i
= 1; i
< count
; i
++)
1764 rtx elt
= XVECEXP (op
, 0, i
);
1766 if (GET_CODE (elt
) != SET
1767 || GET_CODE (SET_SRC (elt
)) != REG
1768 || GET_MODE (SET_SRC (elt
)) != SImode
1769 || REGNO (SET_SRC (elt
)) != (unsigned) (src_regno
+ i
)
1770 || GET_CODE (SET_DEST (elt
)) != MEM
1771 || GET_MODE (SET_DEST (elt
)) != SImode
1772 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
1773 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
1774 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
1775 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != i
* 4)
1782 /* ??? Block move stuff stolen from m88k. This code has not been
1783 verified for correctness. */
1785 /* Emit code to perform a block move. Choose the best method.
1787 OPERANDS[0] is the destination.
1788 OPERANDS[1] is the source.
1789 OPERANDS[2] is the size.
1790 OPERANDS[3] is the alignment safe to use. */
1792 /* Emit code to perform a block move with an offset sequence of ldw/st
1793 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1794 known constants. DEST and SRC are registers. OFFSET is the known
1795 starting point for the output pattern. */
1797 static const enum machine_mode mode_from_align
[] =
1799 VOIDmode
, QImode
, HImode
, VOIDmode
, SImode
,
1800 VOIDmode
, VOIDmode
, VOIDmode
, DImode
1804 block_move_sequence (rtx dest
, rtx dst_mem
, rtx src
, rtx src_mem
,
1805 int size
, int align
, int offset
)
1808 enum machine_mode mode
[2];
1813 int offset_ld
= offset
;
1814 int offset_st
= offset
;
1816 active
[0] = active
[1] = FALSE
;
1818 /* Establish parameters for the first load and for the second load if
1819 it is known to be the same mode as the first. */
1820 amount
[0] = amount
[1] = align
;
1822 mode
[0] = mode_from_align
[align
];
1824 temp
[0] = gen_reg_rtx (mode
[0]);
1826 if (size
>= 2 * align
)
1829 temp
[1] = gen_reg_rtx (mode
[1]);
1841 /* Change modes as the sequence tails off. */
1842 if (size
< amount
[next
])
1844 amount
[next
] = (size
>= 4 ? 4 : (size
>= 2 ? 2 : 1));
1845 mode
[next
] = mode_from_align
[amount
[next
]];
1846 temp
[next
] = gen_reg_rtx (mode
[next
]);
1849 size
-= amount
[next
];
1850 srcp
= gen_rtx (MEM
,
1852 MEM_IN_STRUCT_P (src_mem
) ? mode
[next
] : BLKmode
,
1856 gen_rtx (PLUS
, Pmode
, src
,
1857 gen_rtx (CONST_INT
, SImode
, offset_ld
)));
1859 RTX_UNCHANGING_P (srcp
) = RTX_UNCHANGING_P (src_mem
);
1860 MEM_VOLATILE_P (srcp
) = MEM_VOLATILE_P (src_mem
);
1861 MEM_IN_STRUCT_P (srcp
) = 1;
1862 emit_insn (gen_rtx (SET
, VOIDmode
, temp
[next
], srcp
));
1863 offset_ld
+= amount
[next
];
1864 active
[next
] = TRUE
;
1869 active
[phase
] = FALSE
;
1871 dstp
= gen_rtx (MEM
,
1873 MEM_IN_STRUCT_P (dst_mem
) ? mode
[phase
] : BLKmode
,
1877 gen_rtx (PLUS
, Pmode
, dest
,
1878 gen_rtx (CONST_INT
, SImode
, offset_st
)));
1880 RTX_UNCHANGING_P (dstp
) = RTX_UNCHANGING_P (dst_mem
);
1881 MEM_VOLATILE_P (dstp
) = MEM_VOLATILE_P (dst_mem
);
1882 MEM_IN_STRUCT_P (dstp
) = 1;
1883 emit_insn (gen_rtx (SET
, VOIDmode
, dstp
, temp
[phase
]));
1884 offset_st
+= amount
[phase
];
1887 while (active
[next
]);
1891 mcore_expand_block_move (rtx dst_mem
, rtx src_mem
, rtx
* operands
)
1893 int align
= INTVAL (operands
[3]);
1896 if (GET_CODE (operands
[2]) == CONST_INT
)
1898 bytes
= INTVAL (operands
[2]);
1905 /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before
1906 we give up and go to memcpy. */
1907 if ((align
== 4 && (bytes
<= 4*4
1908 || ((bytes
& 01) == 0 && bytes
<= 8*4)
1909 || ((bytes
& 03) == 0 && bytes
<= 16*4)))
1910 || (align
== 2 && bytes
<= 4*2)
1911 || (align
== 1 && bytes
<= 4*1))
1913 block_move_sequence (operands
[0], dst_mem
, operands
[1], src_mem
,
1919 /* If we get here, just use the library routine. */
1920 emit_library_call (gen_rtx (SYMBOL_REF
, Pmode
, "memcpy"), 0, VOIDmode
, 3,
1921 operands
[0], Pmode
, operands
[1], Pmode
, operands
[2],
1926 /* Code to generate prologue and epilogue sequences. */
1927 static int number_of_regs_before_varargs
;
1929 /* Set by SETUP_INCOMING_VARARGS to indicate to prolog that this is
1930 for a varargs function. */
1931 static int current_function_anonymous_args
;
1933 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1934 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1935 #define ADDI_REACH (32) /* Maximum addi operand. */
1938 layout_mcore_frame (struct mcore_frame
* infp
)
1947 unsigned int growths
;
1950 /* Might have to spill bytes to re-assemble a big argument that
1951 was passed partially in registers and partially on the stack. */
1952 nbytes
= current_function_pretend_args_size
;
1954 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1955 if (current_function_anonymous_args
)
1956 nbytes
+= (NPARM_REGS
- number_of_regs_before_varargs
) * UNITS_PER_WORD
;
1958 infp
->arg_size
= nbytes
;
1960 /* How much space to save non-volatile registers we stomp. */
1961 infp
->reg_mask
= calc_live_regs (& n
);
1962 infp
->reg_size
= n
* 4;
1964 /* And the rest of it... locals and space for overflowed outbounds. */
1965 infp
->local_size
= get_frame_size ();
1966 infp
->outbound_size
= current_function_outgoing_args_size
;
1968 /* Make sure we have a whole number of words for the locals. */
1969 if (infp
->local_size
% STACK_BYTES
)
1970 infp
->local_size
= (infp
->local_size
+ STACK_BYTES
- 1) & ~ (STACK_BYTES
-1);
1972 /* Only thing we know we have to pad is the outbound space, since
1973 we've aligned our locals assuming that base of locals is aligned. */
1974 infp
->pad_local
= 0;
1976 infp
->pad_outbound
= 0;
1977 if (infp
->outbound_size
% STACK_BYTES
)
1978 infp
->pad_outbound
= STACK_BYTES
- (infp
->outbound_size
% STACK_BYTES
);
1980 /* Now we see how we want to stage the prologue so that it does
1981 the most appropriate stack growth and register saves to either:
1983 (2) reduce instruction space, or
1984 (3) reduce stack space. */
1985 for (i
= 0; i
< ARRAY_SIZE (infp
->growth
); i
++)
1986 infp
->growth
[i
] = 0;
1988 regarg
= infp
->reg_size
+ infp
->arg_size
;
1989 localregarg
= infp
->local_size
+ regarg
;
1990 localreg
= infp
->local_size
+ infp
->reg_size
;
1991 outbounds
= infp
->outbound_size
+ infp
->pad_outbound
;
1994 /* XXX: Consider one where we consider localregarg + outbound too! */
1996 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1997 use stw's with offsets and buy the frame in one shot. */
1998 if (localregarg
<= ADDI_REACH
1999 && (infp
->reg_size
<= 8 || (infp
->reg_mask
& 0xc000) != 0xc000))
2001 /* Make sure we'll be aligned. */
2002 if (localregarg
% STACK_BYTES
)
2003 infp
->pad_reg
= STACK_BYTES
- (localregarg
% STACK_BYTES
);
2005 step
= localregarg
+ infp
->pad_reg
;
2006 infp
->reg_offset
= infp
->local_size
;
2008 if (outbounds
+ step
<= ADDI_REACH
&& !frame_pointer_needed
)
2011 infp
->reg_offset
+= outbounds
;
2015 infp
->arg_offset
= step
- 4;
2016 infp
->growth
[growths
++] = step
;
2017 infp
->reg_growth
= growths
;
2018 infp
->local_growth
= growths
;
2020 /* If we haven't already folded it in. */
2022 infp
->growth
[growths
++] = outbounds
;
2027 /* Frame can't be done with a single subi, but can be done with 2
2028 insns. If the 'stm' is getting <= 2 registers, we use stw's and
2029 shift some of the stack purchase into the first subi, so both are
2030 single instructions. */
2031 if (localregarg
<= STORE_REACH
2032 && (infp
->local_size
> ADDI_REACH
)
2033 && (infp
->reg_size
<= 8 || (infp
->reg_mask
& 0xc000) != 0xc000))
2037 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
2038 if (localregarg
% STACK_BYTES
)
2039 infp
->pad_reg
= STACK_BYTES
- (localregarg
% STACK_BYTES
);
2041 all
= localregarg
+ infp
->pad_reg
+ infp
->pad_local
;
2042 step
= ADDI_REACH
; /* As much up front as we can. */
2046 /* XXX: Consider whether step will still be aligned; we believe so. */
2047 infp
->arg_offset
= step
- 4;
2048 infp
->growth
[growths
++] = step
;
2049 infp
->reg_growth
= growths
;
2050 infp
->reg_offset
= step
- infp
->pad_reg
- infp
->reg_size
;
2053 /* Can we fold in any space required for outbounds? */
2054 if (outbounds
+ all
<= ADDI_REACH
&& !frame_pointer_needed
)
2060 /* Get the rest of the locals in place. */
2062 infp
->growth
[growths
++] = step
;
2063 infp
->local_growth
= growths
;
2068 /* Finish off if we need to do so. */
2070 infp
->growth
[growths
++] = outbounds
;
2075 /* Registers + args is nicely aligned, so we'll buy that in one shot.
2076 Then we buy the rest of the frame in 1 or 2 steps depending on
2077 whether we need a frame pointer. */
2078 if ((regarg
% STACK_BYTES
) == 0)
2080 infp
->growth
[growths
++] = regarg
;
2081 infp
->reg_growth
= growths
;
2082 infp
->arg_offset
= regarg
- 4;
2083 infp
->reg_offset
= 0;
2085 if (infp
->local_size
% STACK_BYTES
)
2086 infp
->pad_local
= STACK_BYTES
- (infp
->local_size
% STACK_BYTES
);
2088 step
= infp
->local_size
+ infp
->pad_local
;
2090 if (!frame_pointer_needed
)
2096 infp
->growth
[growths
++] = step
;
2097 infp
->local_growth
= growths
;
2099 /* If there's any left to be done. */
2101 infp
->growth
[growths
++] = outbounds
;
2106 /* XXX: optimizations that we'll want to play with....
2107 -- regarg is not aligned, but it's a small number of registers;
2108 use some of localsize so that regarg is aligned and then
2109 save the registers. */
2111 /* Simple encoding; plods down the stack buying the pieces as it goes.
2112 -- does not optimize space consumption.
2113 -- does not attempt to optimize instruction counts.
2114 -- but it is safe for all alignments. */
2115 if (regarg
% STACK_BYTES
!= 0)
2116 infp
->pad_reg
= STACK_BYTES
- (regarg
% STACK_BYTES
);
2118 infp
->growth
[growths
++] = infp
->arg_size
+ infp
->reg_size
+ infp
->pad_reg
;
2119 infp
->reg_growth
= growths
;
2120 infp
->arg_offset
= infp
->growth
[0] - 4;
2121 infp
->reg_offset
= 0;
2123 if (frame_pointer_needed
)
2125 if (infp
->local_size
% STACK_BYTES
!= 0)
2126 infp
->pad_local
= STACK_BYTES
- (infp
->local_size
% STACK_BYTES
);
2128 infp
->growth
[growths
++] = infp
->local_size
+ infp
->pad_local
;
2129 infp
->local_growth
= growths
;
2131 infp
->growth
[growths
++] = outbounds
;
2135 if ((infp
->local_size
+ outbounds
) % STACK_BYTES
!= 0)
2136 infp
->pad_local
= STACK_BYTES
- ((infp
->local_size
+ outbounds
) % STACK_BYTES
);
2138 infp
->growth
[growths
++] = infp
->local_size
+ infp
->pad_local
+ outbounds
;
2139 infp
->local_growth
= growths
;
2142 /* Anything else that we've forgotten?, plus a few consistency checks. */
2144 assert (infp
->reg_offset
>= 0);
2145 assert (growths
<= MAX_STACK_GROWS
);
2147 for (i
= 0; i
< growths
; i
++)
2149 if (infp
->growth
[i
] % STACK_BYTES
)
2151 fprintf (stderr
,"stack growth of %d is not %d aligned\n",
2152 infp
->growth
[i
], STACK_BYTES
);
2158 /* Define the offset between two registers, one to be eliminated, and
2159 the other its replacement, at the start of a routine. */
2162 mcore_initial_elimination_offset (int from
, int to
)
2166 struct mcore_frame fi
;
2168 layout_mcore_frame (& fi
);
2171 above_frame
= fi
.local_size
+ fi
.pad_local
+ fi
.reg_size
+ fi
.pad_reg
;
2173 below_frame
= fi
.outbound_size
+ fi
.pad_outbound
;
2175 if (from
== ARG_POINTER_REGNUM
&& to
== FRAME_POINTER_REGNUM
)
2178 if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
2179 return above_frame
+ below_frame
;
2181 if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
2189 /* Keep track of some information about varargs for the prolog. */
2192 mcore_setup_incoming_varargs (CUMULATIVE_ARGS args_so_far
,
2193 enum machine_mode mode
, tree type
,
2194 int * ptr_pretend_size ATTRIBUTE_UNUSED
)
2196 current_function_anonymous_args
= 1;
2198 /* We need to know how many argument registers are used before
2199 the varargs start, so that we can push the remaining argument
2200 registers during the prologue. */
2201 number_of_regs_before_varargs
= args_so_far
+ mcore_num_arg_regs (mode
, type
);
2203 /* There is a bug somewhere in the arg handling code.
2204 Until I can find it this workaround always pushes the
2205 last named argument onto the stack. */
2206 number_of_regs_before_varargs
= args_so_far
;
2208 /* The last named argument may be split between argument registers
2209 and the stack. Allow for this here. */
2210 if (number_of_regs_before_varargs
> NPARM_REGS
)
2211 number_of_regs_before_varargs
= NPARM_REGS
;
2215 mcore_expand_prolog (void)
2217 struct mcore_frame fi
;
2218 int space_allocated
= 0;
2221 /* Find out what we're doing. */
2222 layout_mcore_frame (&fi
);
2224 space_allocated
= fi
.arg_size
+ fi
.reg_size
+ fi
.local_size
+
2225 fi
.outbound_size
+ fi
.pad_outbound
+ fi
.pad_local
+ fi
.pad_reg
;
2229 /* Emit a symbol for this routine's frame size. */
2232 x
= DECL_RTL (current_function_decl
);
2234 if (GET_CODE (x
) != MEM
)
2239 if (GET_CODE (x
) != SYMBOL_REF
)
2242 if (mcore_current_function_name
)
2243 free (mcore_current_function_name
);
2245 mcore_current_function_name
= xstrdup (XSTR (x
, 0));
2247 ASM_OUTPUT_CG_NODE (asm_out_file
, mcore_current_function_name
, space_allocated
);
2249 if (current_function_calls_alloca
)
2250 ASM_OUTPUT_CG_EDGE (asm_out_file
, mcore_current_function_name
, "alloca", 1);
2253 We're looking at how the 8byte alignment affects stack layout
2254 and where we had to pad things. This emits information we can
2255 extract which tells us about frame sizes and the like. */
2256 fprintf (asm_out_file
,
2257 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2258 mcore_current_function_name
,
2259 fi
.arg_size
, fi
.reg_size
, fi
.reg_mask
,
2260 fi
.local_size
, fi
.outbound_size
,
2261 frame_pointer_needed
);
2264 if (mcore_naked_function_p ())
2267 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2268 output_stack_adjust (-1, fi
.growth
[growth
++]); /* Grows it. */
2270 /* If we have a parameter passed partially in regs and partially in memory,
2271 the registers will have been stored to memory already in function.c. So
2272 we only need to do something here for varargs functions. */
2273 if (fi
.arg_size
!= 0 && current_function_pretend_args_size
== 0)
2276 int rn
= FIRST_PARM_REG
+ NPARM_REGS
- 1;
2277 int remaining
= fi
.arg_size
;
2279 for (offset
= fi
.arg_offset
; remaining
>= 4; offset
-= 4, rn
--, remaining
-= 4)
2281 emit_insn (gen_movsi
2282 (gen_rtx (MEM
, SImode
,
2283 plus_constant (stack_pointer_rtx
, offset
)),
2284 gen_rtx (REG
, SImode
, rn
)));
2288 /* Do we need another stack adjustment before we do the register saves? */
2289 if (growth
< fi
.reg_growth
)
2290 output_stack_adjust (-1, fi
.growth
[growth
++]); /* Grows it. */
2292 if (fi
.reg_size
!= 0)
2295 int offs
= fi
.reg_offset
;
2297 for (i
= 15; i
>= 0; i
--)
2299 if (offs
== 0 && i
== 15 && ((fi
.reg_mask
& 0xc000) == 0xc000))
2303 while (fi
.reg_mask
& (1 << first_reg
))
2307 emit_insn (gen_store_multiple (gen_rtx (MEM
, SImode
, stack_pointer_rtx
),
2308 gen_rtx (REG
, SImode
, first_reg
),
2309 GEN_INT (16 - first_reg
)));
2311 i
-= (15 - first_reg
);
2312 offs
+= (16 - first_reg
) * 4;
2314 else if (fi
.reg_mask
& (1 << i
))
2316 emit_insn (gen_movsi
2317 (gen_rtx (MEM
, SImode
,
2318 plus_constant (stack_pointer_rtx
, offs
)),
2319 gen_rtx (REG
, SImode
, i
)));
2325 /* Figure the locals + outbounds. */
2326 if (frame_pointer_needed
)
2328 /* If we haven't already purchased to 'fp'. */
2329 if (growth
< fi
.local_growth
)
2330 output_stack_adjust (-1, fi
.growth
[growth
++]); /* Grows it. */
2332 emit_insn (gen_movsi (frame_pointer_rtx
, stack_pointer_rtx
));
2334 /* ... and then go any remaining distance for outbounds, etc. */
2335 if (fi
.growth
[growth
])
2336 output_stack_adjust (-1, fi
.growth
[growth
++]);
2340 if (growth
< fi
.local_growth
)
2341 output_stack_adjust (-1, fi
.growth
[growth
++]); /* Grows it. */
2342 if (fi
.growth
[growth
])
2343 output_stack_adjust (-1, fi
.growth
[growth
++]);
2348 mcore_expand_epilog (void)
2350 struct mcore_frame fi
;
2353 int growth
= MAX_STACK_GROWS
- 1 ;
2356 /* Find out what we're doing. */
2357 layout_mcore_frame(&fi
);
2359 if (mcore_naked_function_p ())
2362 /* If we had a frame pointer, restore the sp from that. */
2363 if (frame_pointer_needed
)
2365 emit_insn (gen_movsi (stack_pointer_rtx
, frame_pointer_rtx
));
2366 growth
= fi
.local_growth
- 1;
2370 /* XXX: while loop should accumulate and do a single sell. */
2371 while (growth
>= fi
.local_growth
)
2373 if (fi
.growth
[growth
] != 0)
2374 output_stack_adjust (1, fi
.growth
[growth
]);
2379 /* Make sure we've shrunk stack back to the point where the registers
2380 were laid down. This is typically 0/1 iterations. Then pull the
2381 register save information back off the stack. */
2382 while (growth
>= fi
.reg_growth
)
2383 output_stack_adjust ( 1, fi
.growth
[growth
--]);
2385 offs
= fi
.reg_offset
;
2387 for (i
= 15; i
>= 0; i
--)
2389 if (offs
== 0 && i
== 15 && ((fi
.reg_mask
& 0xc000) == 0xc000))
2393 /* Find the starting register. */
2396 while (fi
.reg_mask
& (1 << first_reg
))
2401 emit_insn (gen_load_multiple (gen_rtx (REG
, SImode
, first_reg
),
2402 gen_rtx (MEM
, SImode
, stack_pointer_rtx
),
2403 GEN_INT (16 - first_reg
)));
2405 i
-= (15 - first_reg
);
2406 offs
+= (16 - first_reg
) * 4;
2408 else if (fi
.reg_mask
& (1 << i
))
2410 emit_insn (gen_movsi
2411 (gen_rtx (REG
, SImode
, i
),
2412 gen_rtx (MEM
, SImode
,
2413 plus_constant (stack_pointer_rtx
, offs
))));
2418 /* Give back anything else. */
2419 /* XXX: Should accumulate total and then give it back. */
2421 output_stack_adjust ( 1, fi
.growth
[growth
--]);
2424 /* This code is borrowed from the SH port. */
2426 /* The MCORE cannot load a large constant into a register, constants have to
2427 come from a pc relative load. The reference of a pc relative load
2428 instruction must be less than 1k infront of the instruction. This
2429 means that we often have to dump a constant inside a function, and
2430 generate code to branch around it.
2432 It is important to minimize this, since the branches will slow things
2433 down and make things bigger.
2435 Worst case code looks like:
2451 We fix this by performing a scan before scheduling, which notices which
2452 instructions need to have their operands fetched from the constant table
2453 and builds the table.
2457 scan, find an instruction which needs a pcrel move. Look forward, find the
2458 last barrier which is within MAX_COUNT bytes of the requirement.
2459 If there isn't one, make one. Process all the instructions between
2460 the find and the barrier.
2462 In the above example, we can tell that L3 is within 1k of L1, so
2463 the first move can be shrunk from the 2 insn+constant sequence into
2464 just 1 insn, and the constant moved to L3 to make:
2474 Then the second move becomes the target for the shortening process. */
2478 rtx value
; /* Value in table. */
2479 rtx label
; /* Label of value. */
2482 /* The maximum number of constants that can fit into one pool, since
2483 the pc relative range is 0...1020 bytes and constants are at least 4
2484 bytes long. We subtract 4 from the range to allow for the case where
2485 we need to add a branch/align before the constant pool. */
2487 #define MAX_COUNT 1016
2488 #define MAX_POOL_SIZE (MAX_COUNT/4)
2489 static pool_node pool_vector
[MAX_POOL_SIZE
];
2490 static int pool_size
;
2492 /* Dump out any constants accumulated in the final pass. These
2493 will only be labels. */
2496 mcore_output_jump_label_table (void)
2502 fprintf (asm_out_file
, "\t.align 2\n");
2504 for (i
= 0; i
< pool_size
; i
++)
2506 pool_node
* p
= pool_vector
+ i
;
2508 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (p
->label
));
2510 output_asm_insn (".long %0", &p
->value
);
2519 /* Check whether insn is a candidate for a conditional. */
2522 is_cond_candidate (rtx insn
)
2524 /* The only things we conditionalize are those that can be directly
2525 changed into a conditional. Only bother with SImode items. If
2526 we wanted to be a little more aggressive, we could also do other
2527 modes such as DImode with reg-reg move or load 0. */
2528 if (GET_CODE (insn
) == INSN
)
2530 rtx pat
= PATTERN (insn
);
2533 if (GET_CODE (pat
) != SET
)
2536 dst
= XEXP (pat
, 0);
2538 if ((GET_CODE (dst
) != REG
&&
2539 GET_CODE (dst
) != SUBREG
) ||
2540 GET_MODE (dst
) != SImode
)
2543 src
= XEXP (pat
, 1);
2545 if ((GET_CODE (src
) == REG
||
2546 (GET_CODE (src
) == SUBREG
&&
2547 GET_CODE (SUBREG_REG (src
)) == REG
)) &&
2548 GET_MODE (src
) == SImode
)
2549 return COND_MOV_INSN
;
2550 else if (GET_CODE (src
) == CONST_INT
&&
2552 return COND_CLR_INSN
;
2553 else if (GET_CODE (src
) == PLUS
&&
2554 (GET_CODE (XEXP (src
, 0)) == REG
||
2555 (GET_CODE (XEXP (src
, 0)) == SUBREG
&&
2556 GET_CODE (SUBREG_REG (XEXP (src
, 0))) == REG
)) &&
2557 GET_MODE (XEXP (src
, 0)) == SImode
&&
2558 GET_CODE (XEXP (src
, 1)) == CONST_INT
&&
2559 INTVAL (XEXP (src
, 1)) == 1)
2560 return COND_INC_INSN
;
2561 else if (((GET_CODE (src
) == MINUS
&&
2562 GET_CODE (XEXP (src
, 1)) == CONST_INT
&&
2563 INTVAL( XEXP (src
, 1)) == 1) ||
2564 (GET_CODE (src
) == PLUS
&&
2565 GET_CODE (XEXP (src
, 1)) == CONST_INT
&&
2566 INTVAL (XEXP (src
, 1)) == -1)) &&
2567 (GET_CODE (XEXP (src
, 0)) == REG
||
2568 (GET_CODE (XEXP (src
, 0)) == SUBREG
&&
2569 GET_CODE (SUBREG_REG (XEXP (src
, 0))) == REG
)) &&
2570 GET_MODE (XEXP (src
, 0)) == SImode
)
2571 return COND_DEC_INSN
;
2573 /* Some insns that we don't bother with:
2574 (set (rx:DI) (ry:DI))
2575 (set (rx:DI) (const_int 0))
2579 else if (GET_CODE (insn
) == JUMP_INSN
&&
2580 GET_CODE (PATTERN (insn
)) == SET
&&
2581 GET_CODE (XEXP (PATTERN (insn
), 1)) == LABEL_REF
)
2582 return COND_BRANCH_INSN
;
2587 /* Emit a conditional version of insn and replace the old insn with the
2588 new one. Return the new insn if emitted. */
2591 emit_new_cond_insn (rtx insn
, int cond
)
2597 if ((num
= is_cond_candidate (insn
)) == COND_NO
)
2600 pat
= PATTERN (insn
);
2602 if (GET_CODE (insn
) == INSN
)
2604 dst
= SET_DEST (pat
);
2605 src
= SET_SRC (pat
);
2609 dst
= JUMP_LABEL (insn
);
2618 c_insn
= gen_movt0 (dst
, src
, dst
);
2620 c_insn
= gen_movt0 (dst
, dst
, src
);
2625 c_insn
= gen_incscc (dst
, dst
);
2627 c_insn
= gen_incscc_false (dst
, dst
);
2632 c_insn
= gen_decscc (dst
, dst
);
2634 c_insn
= gen_decscc_false (dst
, dst
);
2637 case COND_BRANCH_INSN
:
2639 c_insn
= gen_branch_true (dst
);
2641 c_insn
= gen_branch_false (dst
);
2648 /* Only copy the notes if they exist. */
2649 if (rtx_length
[GET_CODE (c_insn
)] >= 7 && rtx_length
[GET_CODE (insn
)] >= 7)
2651 /* We really don't need to bother with the notes and links at this
2652 point, but go ahead and save the notes. This will help is_dead()
2653 when applying peepholes (links don't matter since they are not
2654 used any more beyond this point for the mcore). */
2655 REG_NOTES (c_insn
) = REG_NOTES (insn
);
2658 if (num
== COND_BRANCH_INSN
)
2660 /* For jumps, we need to be a little bit careful and emit the new jump
2661 before the old one and to update the use count for the target label.
2662 This way, the barrier following the old (uncond) jump will get
2663 deleted, but the label won't. */
2664 c_insn
= emit_jump_insn_before (c_insn
, insn
);
2666 ++ LABEL_NUSES (dst
);
2668 JUMP_LABEL (c_insn
) = dst
;
2671 c_insn
= emit_insn_after (c_insn
, insn
);
2678 /* Attempt to change a basic block into a series of conditional insns. This
2679 works by taking the branch at the end of the 1st block and scanning for the
2680 end of the 2nd block. If all instructions in the 2nd block have cond.
2681 versions and the label at the start of block 3 is the same as the target
2682 from the branch at block 1, then conditionalize all insn in block 2 using
2683 the inverse condition of the branch at block 1. (Note I'm bending the
2684 definition of basic block here.)
2688 bt L2 <-- end of block 1 (delete)
2691 br L3 <-- end of block 2
2693 L2: ... <-- start of block 3 (NUSES==1)
2704 we can delete the L2 label if NUSES==1 and re-apply the optimization
2705 starting at the last instruction of block 2. This may allow an entire
2706 if-then-else statement to be conditionalized. BRC */
2708 conditionalize_block (rtx first
)
2712 rtx end_blk_1_br
= 0;
2713 rtx end_blk_2_insn
= 0;
2714 rtx start_blk_3_lab
= 0;
2720 /* Check that the first insn is a candidate conditional jump. This is
2721 the one that we'll eliminate. If not, advance to the next insn to
2723 if (GET_CODE (first
) != JUMP_INSN
||
2724 GET_CODE (PATTERN (first
)) != SET
||
2725 GET_CODE (XEXP (PATTERN (first
), 1)) != IF_THEN_ELSE
)
2726 return NEXT_INSN (first
);
2728 /* Extract some information we need. */
2729 end_blk_1_br
= first
;
2730 br_pat
= PATTERN (end_blk_1_br
);
2732 /* Complement the condition since we use the reverse cond. for the insns. */
2733 cond
= (GET_CODE (XEXP (XEXP (br_pat
, 1), 0)) == EQ
);
2735 /* Determine what kind of branch we have. */
2736 if (GET_CODE (XEXP (XEXP (br_pat
, 1), 1)) == LABEL_REF
)
2738 /* A normal branch, so extract label out of first arm. */
2739 br_lab_num
= CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat
, 1), 1), 0));
2743 /* An inverse branch, so extract the label out of the 2nd arm
2744 and complement the condition. */
2746 br_lab_num
= CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat
, 1), 2), 0));
2749 /* Scan forward for the start of block 2: it must start with a
2750 label and that label must be the same as the branch target
2751 label from block 1. We don't care about whether block 2 actually
2752 ends with a branch or a label (an uncond. branch is
2753 conditionalizable). */
2754 for (insn
= NEXT_INSN (first
); insn
; insn
= NEXT_INSN (insn
))
2758 code
= GET_CODE (insn
);
2760 /* Look for the label at the start of block 3. */
2761 if (code
== CODE_LABEL
&& CODE_LABEL_NUMBER (insn
) == br_lab_num
)
2764 /* Skip barriers, notes, and conditionalizable insns. If the
2765 insn is not conditionalizable or makes this optimization fail,
2766 just return the next insn so we can start over from that point. */
2767 if (code
!= BARRIER
&& code
!= NOTE
&& !is_cond_candidate (insn
))
2768 return NEXT_INSN (insn
);
2770 /* Remember the last real insn before the label (ie end of block 2). */
2771 if (code
== JUMP_INSN
|| code
== INSN
)
2774 end_blk_2_insn
= insn
;
2781 /* It is possible for this optimization to slow performance if the blocks
2782 are long. This really depends upon whether the branch is likely taken
2783 or not. If the branch is taken, we slow performance in many cases. But,
2784 if the branch is not taken, we always help performance (for a single
2785 block, but for a double block (i.e. when the optimization is re-applied)
2786 this is not true since the 'right thing' depends on the overall length of
2787 the collapsed block). As a compromise, don't apply this optimization on
2788 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2789 the best threshold depends on the latencies of the instructions (i.e.,
2790 the branch penalty). */
2791 if (optimize
> 1 && blk_size
> 2)
2794 /* At this point, we've found the start of block 3 and we know that
2795 it is the destination of the branch from block 1. Also, all
2796 instructions in the block 2 are conditionalizable. So, apply the
2797 conditionalization and delete the branch. */
2798 start_blk_3_lab
= insn
;
2800 for (insn
= NEXT_INSN (end_blk_1_br
); insn
!= start_blk_3_lab
;
2801 insn
= NEXT_INSN (insn
))
2805 if (INSN_DELETED_P (insn
))
2808 /* Try to form a conditional variant of the instruction and emit it. */
2809 if ((newinsn
= emit_new_cond_insn (insn
, cond
)))
2811 if (end_blk_2_insn
== insn
)
2812 end_blk_2_insn
= newinsn
;
2818 /* Note whether we will delete the label starting blk 3 when the jump
2819 gets deleted. If so, we want to re-apply this optimization at the
2820 last real instruction right before the label. */
2821 if (LABEL_NUSES (start_blk_3_lab
) == 1)
2823 start_blk_3_lab
= 0;
2826 /* ??? we probably should redistribute the death notes for this insn, esp.
2827 the death of cc, but it doesn't really matter this late in the game.
2828 The peepholes all use is_dead() which will find the correct death
2829 regardless of whether there is a note. */
2830 delete_insn (end_blk_1_br
);
2832 if (! start_blk_3_lab
)
2833 return end_blk_2_insn
;
2835 /* Return the insn right after the label at the start of block 3. */
2836 return NEXT_INSN (start_blk_3_lab
);
2839 /* Apply the conditionalization of blocks optimization. This is the
2840 outer loop that traverses through the insns scanning for a branch
2841 that signifies an opportunity to apply the optimization. Note that
2842 this optimization is applied late. If we could apply it earlier,
2843 say before cse 2, it may expose more optimization opportunities.
2844 but, the pay back probably isn't really worth the effort (we'd have
2845 to update all reg/flow/notes/links/etc to make it work - and stick it
2846 in before cse 2). */
2849 conditionalize_optimization (void)
2853 for (insn
= get_insns (); insn
; insn
= conditionalize_block (insn
))
2857 static int saved_warn_return_type
= -1;
2858 static int saved_warn_return_type_count
= 0;
2860 /* This is to handle loads from the constant pool. */
2865 /* Reset this variable. */
2866 current_function_anonymous_args
= 0;
2868 /* Restore the warn_return_type if it has been altered. */
2869 if (saved_warn_return_type
!= -1)
2871 /* Only restore the value if we have reached another function.
2872 The test of warn_return_type occurs in final_function () in
2873 c-decl.c a long time after the code for the function is generated,
2874 so we need a counter to tell us when we have finished parsing that
2875 function and can restore the flag. */
2876 if (--saved_warn_return_type_count
== 0)
2878 warn_return_type
= saved_warn_return_type
;
2879 saved_warn_return_type
= -1;
2886 /* Conditionalize blocks where we can. */
2887 conditionalize_optimization ();
2889 /* Literal pool generation is now pushed off until the assembler. */
2893 /* Return true if X is something that can be moved directly into r15. */
2896 mcore_r15_operand_p (rtx x
)
2898 switch (GET_CODE (x
))
2901 return mcore_const_ok_for_inline (INTVAL (x
));
2913 /* Implement SECONDARY_RELOAD_CLASS. If CLASS contains r15, and we can't
2914 directly move X into it, use r1-r14 as a temporary. */
2917 mcore_secondary_reload_class (enum reg_class
class,
2918 enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2920 if (TEST_HARD_REG_BIT (reg_class_contents
[class], 15)
2921 && !mcore_r15_operand_p (x
))
2926 /* Return the reg_class to use when reloading the rtx X into the class
2927 CLASS. If X is too complex to move directly into r15, prefer to
2928 use LRW_REGS instead. */
2931 mcore_reload_class (rtx x
, enum reg_class
class)
2933 if (reg_class_subset_p (LRW_REGS
, class) && !mcore_r15_operand_p (x
))
2939 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2940 register. Note that the current version doesn't worry about whether
2941 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2942 in r2 matches an SImode in r2. Might think in the future about whether
2943 we want to be able to say something about modes. */
2946 mcore_is_same_reg (rtx x
, rtx y
)
2948 /* Strip any and all of the subreg wrappers. */
2949 while (GET_CODE (x
) == SUBREG
)
2952 while (GET_CODE (y
) == SUBREG
)
2955 if (GET_CODE(x
) == REG
&& GET_CODE(y
) == REG
&& REGNO(x
) == REGNO(y
))
2962 mcore_override_options (void)
2964 if (mcore_stack_increment_string
)
2966 mcore_stack_increment
= atoi (mcore_stack_increment_string
);
2968 if (mcore_stack_increment
< 0
2969 || (mcore_stack_increment
== 0
2970 && (mcore_stack_increment_string
[0] != '0'
2971 || mcore_stack_increment_string
[1] != 0)))
2972 error ("invalid option `-mstack-increment=%s'",
2973 mcore_stack_increment_string
);
2976 /* Only the m340 supports little endian code. */
2977 if (TARGET_LITTLE_END
&& ! TARGET_M340
)
2978 target_flags
|= M340_BIT
;
2982 mcore_must_pass_on_stack (enum machine_mode mode ATTRIBUTE_UNUSED
, tree type
)
2987 /* If the argument can have its address taken, it must
2988 be placed on the stack. */
2989 if (TREE_ADDRESSABLE (type
))
2995 /* Compute the number of word sized registers needed to
2996 hold a function argument of mode MODE and type TYPE. */
2999 mcore_num_arg_regs (enum machine_mode mode
, tree type
)
3003 if (MUST_PASS_IN_STACK (mode
, type
))
3006 if (type
&& mode
== BLKmode
)
3007 size
= int_size_in_bytes (type
);
3009 size
= GET_MODE_SIZE (mode
);
3011 return ROUND_ADVANCE (size
);
3015 handle_structs_in_regs (enum machine_mode mode
, tree type
, int reg
)
3019 /* The MCore ABI defines that a structure whoes size is not a whole multiple
3020 of bytes is passed packed into registers (or spilled onto the stack if
3021 not enough registers are available) with the last few bytes of the
3022 structure being packed, left-justified, into the last register/stack slot.
3023 GCC handles this correctly if the last word is in a stack slot, but we
3024 have to generate a special, PARALLEL RTX if the last word is in an
3025 argument register. */
3027 && TYPE_MODE (type
) == BLKmode
3028 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
3029 && (size
= int_size_in_bytes (type
)) > UNITS_PER_WORD
3030 && (size
% UNITS_PER_WORD
!= 0)
3031 && (reg
+ mcore_num_arg_regs (mode
, type
) <= (FIRST_PARM_REG
+ NPARM_REGS
)))
3033 rtx arg_regs
[NPARM_REGS
];
3038 for (nregs
= 0; size
> 0; size
-= UNITS_PER_WORD
)
3041 gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, reg
++),
3042 GEN_INT (nregs
* UNITS_PER_WORD
));
3046 /* We assume here that NPARM_REGS == 6. The assert checks this. */
3047 assert (ARRAY_SIZE (arg_regs
) == 6);
3048 rtvec
= gen_rtvec (nregs
, arg_regs
[0], arg_regs
[1], arg_regs
[2],
3049 arg_regs
[3], arg_regs
[4], arg_regs
[5]);
3051 result
= gen_rtx_PARALLEL (mode
, rtvec
);
3055 return gen_rtx_REG (mode
, reg
);
3059 mcore_function_value (tree valtype
, tree func ATTRIBUTE_UNUSED
)
3061 enum machine_mode mode
;
3064 mode
= TYPE_MODE (valtype
);
3066 PROMOTE_MODE (mode
, unsigned_p
, NULL
);
3068 return handle_structs_in_regs (mode
, valtype
, FIRST_RET_REG
);
3071 /* Define where to put the arguments to a function.
3072 Value is zero to push the argument on the stack,
3073 or a hard register in which to store the argument.
3075 MODE is the argument's machine mode.
3076 TYPE is the data type of the argument (as a tree).
3077 This is null for libcalls where that information may
3079 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3080 the preceding args and about the function being called.
3081 NAMED is nonzero if this argument is a named parameter
3082 (otherwise it is an extra parameter matching an ellipsis).
3084 On MCore the first args are normally in registers
3085 and the rest are pushed. Any arg that starts within the first
3086 NPARM_REGS words is at least partially passed in a register unless
3087 its data type forbids. */
3090 mcore_function_arg (CUMULATIVE_ARGS cum
, enum machine_mode mode
,
3091 tree type
, int named
)
3098 if (MUST_PASS_IN_STACK (mode
, type
))
3101 arg_reg
= ROUND_REG (cum
, mode
);
3103 if (arg_reg
< NPARM_REGS
)
3104 return handle_structs_in_regs (mode
, type
, FIRST_PARM_REG
+ arg_reg
);
3109 /* Implements the FUNCTION_ARG_PARTIAL_NREGS macro.
3110 Returns the number of argument registers required to hold *part* of
3111 a parameter of machine mode MODE and type TYPE (which may be NULL if
3112 the type is not known). If the argument fits entirely in the argument
3113 registers, or entirely on the stack, then 0 is returned. CUM is the
3114 number of argument registers already used by earlier parameters to
3118 mcore_function_arg_partial_nregs (CUMULATIVE_ARGS cum
, enum machine_mode mode
,
3119 tree type
, int named
)
3121 int reg
= ROUND_REG (cum
, mode
);
3126 if (MUST_PASS_IN_STACK (mode
, type
))
3129 /* REG is not the *hardware* register number of the register that holds
3130 the argument, it is the *argument* register number. So for example,
3131 the first argument to a function goes in argument register 0, which
3132 translates (for the MCore) into hardware register 2. The second
3133 argument goes into argument register 1, which translates into hardware
3134 register 3, and so on. NPARM_REGS is the number of argument registers
3135 supported by the target, not the maximum hardware register number of
3137 if (reg
>= NPARM_REGS
)
3140 /* If the argument fits entirely in registers, return 0. */
3141 if (reg
+ mcore_num_arg_regs (mode
, type
) <= NPARM_REGS
)
3144 /* The argument overflows the number of available argument registers.
3145 Compute how many argument registers have not yet been assigned to
3146 hold an argument. */
3147 reg
= NPARM_REGS
- reg
;
3149 /* Return partially in registers and partially on the stack. */
3153 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
3156 mcore_dllexport_name_p (const char * symbol
)
3158 return symbol
[0] == '@' && symbol
[1] == 'e' && symbol
[2] == '.';
3161 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
3164 mcore_dllimport_name_p (const char * symbol
)
3166 return symbol
[0] == '@' && symbol
[1] == 'i' && symbol
[2] == '.';
3169 /* Mark a DECL as being dllexport'd. */
3172 mcore_mark_dllexport (tree decl
)
3174 const char * oldname
;
3179 rtlname
= XEXP (DECL_RTL (decl
), 0);
3181 if (GET_CODE (rtlname
) == SYMBOL_REF
)
3182 oldname
= XSTR (rtlname
, 0);
3183 else if ( GET_CODE (rtlname
) == MEM
3184 && GET_CODE (XEXP (rtlname
, 0)) == SYMBOL_REF
)
3185 oldname
= XSTR (XEXP (rtlname
, 0), 0);
3189 if (mcore_dllexport_name_p (oldname
))
3190 return; /* Already done. */
3192 newname
= alloca (strlen (oldname
) + 4);
3193 sprintf (newname
, "@e.%s", oldname
);
3195 /* We pass newname through get_identifier to ensure it has a unique
3196 address. RTL processing can sometimes peek inside the symbol ref
3197 and compare the string's addresses to see if two symbols are
3199 /* ??? At least I think that's why we do this. */
3200 idp
= get_identifier (newname
);
3202 XEXP (DECL_RTL (decl
), 0) =
3203 gen_rtx (SYMBOL_REF
, Pmode
, IDENTIFIER_POINTER (idp
));
3206 /* Mark a DECL as being dllimport'd. */
3209 mcore_mark_dllimport (tree decl
)
3211 const char * oldname
;
3217 rtlname
= XEXP (DECL_RTL (decl
), 0);
3219 if (GET_CODE (rtlname
) == SYMBOL_REF
)
3220 oldname
= XSTR (rtlname
, 0);
3221 else if ( GET_CODE (rtlname
) == MEM
3222 && GET_CODE (XEXP (rtlname
, 0)) == SYMBOL_REF
)
3223 oldname
= XSTR (XEXP (rtlname
, 0), 0);
3227 if (mcore_dllexport_name_p (oldname
))
3228 abort (); /* This shouldn't happen. */
3229 else if (mcore_dllimport_name_p (oldname
))
3230 return; /* Already done. */
3232 /* ??? One can well ask why we're making these checks here,
3233 and that would be a good question. */
3235 /* Imported variables can't be initialized. */
3236 if (TREE_CODE (decl
) == VAR_DECL
3237 && !DECL_VIRTUAL_P (decl
)
3238 && DECL_INITIAL (decl
))
3240 error ("%Jinitialized variable '%D' is marked dllimport", decl
, decl
);
3244 /* `extern' needn't be specified with dllimport.
3245 Specify `extern' now and hope for the best. Sigh. */
3246 if (TREE_CODE (decl
) == VAR_DECL
3247 /* ??? Is this test for vtables needed? */
3248 && !DECL_VIRTUAL_P (decl
))
3250 DECL_EXTERNAL (decl
) = 1;
3251 TREE_PUBLIC (decl
) = 1;
3254 newname
= alloca (strlen (oldname
) + 11);
3255 sprintf (newname
, "@i.__imp_%s", oldname
);
3257 /* We pass newname through get_identifier to ensure it has a unique
3258 address. RTL processing can sometimes peek inside the symbol ref
3259 and compare the string's addresses to see if two symbols are
3261 /* ??? At least I think that's why we do this. */
3262 idp
= get_identifier (newname
);
3264 newrtl
= gen_rtx (MEM
, Pmode
,
3265 gen_rtx (SYMBOL_REF
, Pmode
,
3266 IDENTIFIER_POINTER (idp
)));
3267 XEXP (DECL_RTL (decl
), 0) = newrtl
;
3271 mcore_dllexport_p (tree decl
)
3273 if ( TREE_CODE (decl
) != VAR_DECL
3274 && TREE_CODE (decl
) != FUNCTION_DECL
)
3277 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl
)) != 0;
3281 mcore_dllimport_p (tree decl
)
3283 if ( TREE_CODE (decl
) != VAR_DECL
3284 && TREE_CODE (decl
) != FUNCTION_DECL
)
3287 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl
)) != 0;
3290 /* We must mark dll symbols specially. Definitions of dllexport'd objects
3291 install some info in the .drective (PE) or .exports (ELF) sections. */
3294 mcore_encode_section_info (tree decl
, rtx rtl ATTRIBUTE_UNUSED
, int first ATTRIBUTE_UNUSED
)
3296 /* Mark the decl so we can tell from the rtl whether the object is
3297 dllexport'd or dllimport'd. */
3298 if (mcore_dllexport_p (decl
))
3299 mcore_mark_dllexport (decl
);
3300 else if (mcore_dllimport_p (decl
))
3301 mcore_mark_dllimport (decl
);
3303 /* It might be that DECL has already been marked as dllimport, but
3304 a subsequent definition nullified that. The attribute is gone
3305 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3306 else if ((TREE_CODE (decl
) == FUNCTION_DECL
3307 || TREE_CODE (decl
) == VAR_DECL
)
3308 && DECL_RTL (decl
) != NULL_RTX
3309 && GET_CODE (DECL_RTL (decl
)) == MEM
3310 && GET_CODE (XEXP (DECL_RTL (decl
), 0)) == MEM
3311 && GET_CODE (XEXP (XEXP (DECL_RTL (decl
), 0), 0)) == SYMBOL_REF
3312 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl
), 0), 0), 0)))
3314 const char * oldname
= XSTR (XEXP (XEXP (DECL_RTL (decl
), 0), 0), 0);
3315 tree idp
= get_identifier (oldname
+ 9);
3316 rtx newrtl
= gen_rtx (SYMBOL_REF
, Pmode
, IDENTIFIER_POINTER (idp
));
3318 XEXP (DECL_RTL (decl
), 0) = newrtl
;
3320 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3321 ??? We leave these alone for now. */
3325 /* Undo the effects of the above. */
3328 mcore_strip_name_encoding (const char * str
)
3330 return str
+ (str
[0] == '@' ? 3 : 0);
3333 /* MCore specific attribute support.
3334 dllexport - for exporting a function/variable that will live in a dll
3335 dllimport - for importing a function/variable from a dll
3336 naked - do not create a function prologue/epilogue. */
3338 const struct attribute_spec mcore_attribute_table
[] =
3340 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
3341 { "dllexport", 0, 0, true, false, false, NULL
},
3342 { "dllimport", 0, 0, true, false, false, NULL
},
3343 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute
},
3344 { NULL
, 0, 0, false, false, false, NULL
}
3347 /* Handle a "naked" attribute; arguments as in
3348 struct attribute_spec.handler. */
3351 mcore_handle_naked_attribute (tree
* node
, tree name
, tree args ATTRIBUTE_UNUSED
,
3352 int flags ATTRIBUTE_UNUSED
, bool * no_add_attrs
)
3354 if (TREE_CODE (*node
) == FUNCTION_DECL
)
3356 /* PR14310 - don't complain about lack of return statement
3357 in naked functions. The solution here is a gross hack
3358 but this is the only way to solve the problem without
3359 adding a new feature to GCC. I did try submitting a patch
3360 that would add such a new feature, but it was (rightfully)
3361 rejected on the grounds that it was creeping featurism,
3362 so hence this code. */
3363 if (warn_return_type
)
3365 saved_warn_return_type
= warn_return_type
;
3366 warn_return_type
= 0;
3367 saved_warn_return_type_count
= 2;
3369 else if (saved_warn_return_type_count
)
3370 saved_warn_return_type_count
= 2;
3374 warning ("`%s' attribute only applies to functions",
3375 IDENTIFIER_POINTER (name
));
3376 *no_add_attrs
= true;
3382 /* ??? It looks like this is PE specific? Oh well, this is what the
3383 old code did as well. */
3386 mcore_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
3391 const char * prefix
;
3393 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
3395 /* Strip off any encoding in name. */
3396 name
= (* targetm
.strip_name_encoding
) (name
);
3398 /* The object is put in, for example, section .text$foo.
3399 The linker will then ultimately place them in .text
3400 (everything from the $ on is stripped). */
3401 if (TREE_CODE (decl
) == FUNCTION_DECL
)
3403 /* For compatibility with EPOC, we ignore the fact that the
3404 section might have relocs against it. */
3405 else if (decl_readonly_section (decl
, 0))
3410 len
= strlen (name
) + strlen (prefix
);
3411 string
= alloca (len
+ 1);
3413 sprintf (string
, "%s%s", prefix
, name
);
3415 DECL_SECTION_NAME (decl
) = build_string (len
, string
);
3419 mcore_naked_function_p (void)
3421 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl
)) != NULL_TREE
;
3424 #ifdef OBJECT_FORMAT_ELF
3426 mcore_asm_named_section (const char *name
, unsigned int flags ATTRIBUTE_UNUSED
)
3428 fprintf (asm_out_file
, "\t.section %s\n", name
);
3430 #endif /* OBJECT_FORMAT_ELF */