1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "stor-layout.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
37 #include "insn-attr.h"
48 #include "insn-codes.h"
51 #include "diagnostic-core.h"
53 #include "tm-constrs.h"
55 #include "dominance.h"
61 #include "cfgcleanup.h"
63 #include "basic-block.h"
70 #include "target-def.h"
72 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
73 names are not prefixed by underscores, to tell whether to prefix a
74 label with a plus sign or not, so that the assembler can tell
75 symbol names from register names. */
76 int mn10300_protect_label
;
78 /* Selected processor type for tuning. */
79 enum processor_type mn10300_tune_cpu
= PROCESSOR_DEFAULT
;
86 static int cc_flags_for_mode(machine_mode
);
87 static int cc_flags_for_code(enum rtx_code
);
89 /* Implement TARGET_OPTION_OVERRIDE. */
91 mn10300_option_override (void)
94 target_flags
&= ~MASK_MULT_BUG
;
97 /* Disable scheduling for the MN10300 as we do
98 not have timing information available for it. */
99 flag_schedule_insns
= 0;
100 flag_schedule_insns_after_reload
= 0;
102 /* Force enable splitting of wide types, as otherwise it is trivial
103 to run out of registers. Indeed, this works so well that register
104 allocation problems are now more common *without* optimization,
105 when this flag is not enabled by default. */
106 flag_split_wide_types
= 1;
109 if (mn10300_tune_string
)
111 if (strcasecmp (mn10300_tune_string
, "mn10300") == 0)
112 mn10300_tune_cpu
= PROCESSOR_MN10300
;
113 else if (strcasecmp (mn10300_tune_string
, "am33") == 0)
114 mn10300_tune_cpu
= PROCESSOR_AM33
;
115 else if (strcasecmp (mn10300_tune_string
, "am33-2") == 0)
116 mn10300_tune_cpu
= PROCESSOR_AM33_2
;
117 else if (strcasecmp (mn10300_tune_string
, "am34") == 0)
118 mn10300_tune_cpu
= PROCESSOR_AM34
;
120 error ("-mtune= expects mn10300, am33, am33-2, or am34");
125 mn10300_file_start (void)
127 default_file_start ();
130 fprintf (asm_out_file
, "\t.am33_2\n");
131 else if (TARGET_AM33
)
132 fprintf (asm_out_file
, "\t.am33\n");
135 /* Note: This list must match the liw_op attribute in mn10300.md. */
137 static const char *liw_op_names
[] =
139 "add", "cmp", "sub", "mov",
145 /* Print operand X using operand code CODE to assembly language output file
149 mn10300_print_operand (FILE *file
, rtx x
, int code
)
155 unsigned int liw_op
= UINTVAL (x
);
157 gcc_assert (TARGET_ALLOW_LIW
);
158 gcc_assert (liw_op
< LIW_OP_MAX
);
159 fputs (liw_op_names
[liw_op
], file
);
166 enum rtx_code cmp
= GET_CODE (x
);
167 machine_mode mode
= GET_MODE (XEXP (x
, 0));
172 cmp
= reverse_condition (cmp
);
173 have_flags
= cc_flags_for_mode (mode
);
184 /* bge is smaller than bnc. */
185 str
= (have_flags
& CC_FLAG_V
? "ge" : "nc");
188 str
= (have_flags
& CC_FLAG_V
? "lt" : "ns");
236 gcc_checking_assert ((cc_flags_for_code (cmp
) & ~have_flags
) == 0);
242 /* This is used for the operand to a call instruction;
243 if it's a REG, enclose it in parens, else output
244 the operand normally. */
248 mn10300_print_operand (file
, x
, 0);
252 mn10300_print_operand (file
, x
, 0);
256 switch (GET_CODE (x
))
260 output_address (XEXP (x
, 0));
265 fprintf (file
, "fd%d", REGNO (x
) - 18);
273 /* These are the least significant word in a 64bit value. */
275 switch (GET_CODE (x
))
279 output_address (XEXP (x
, 0));
284 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
288 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
296 switch (GET_MODE (x
))
299 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
300 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
301 fprintf (file
, "0x%lx", val
[0]);
304 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
305 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
[0]);
306 fprintf (file
, "0x%lx", val
[0]);
310 mn10300_print_operand_address (file
,
311 GEN_INT (CONST_DOUBLE_LOW (x
)));
322 split_double (x
, &low
, &high
);
323 fprintf (file
, "%ld", (long)INTVAL (low
));
332 /* Similarly, but for the most significant word. */
334 switch (GET_CODE (x
))
338 x
= adjust_address (x
, SImode
, 4);
339 output_address (XEXP (x
, 0));
344 fprintf (file
, "%s", reg_names
[REGNO (x
) + 1]);
348 fprintf (file
, "%s", reg_names
[subreg_regno (x
) + 1]);
356 switch (GET_MODE (x
))
359 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
360 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
361 fprintf (file
, "0x%lx", val
[1]);
367 mn10300_print_operand_address (file
,
368 GEN_INT (CONST_DOUBLE_HIGH (x
)));
379 split_double (x
, &low
, &high
);
380 fprintf (file
, "%ld", (long)INTVAL (high
));
391 if (REG_P (XEXP (x
, 0)))
392 output_address (gen_rtx_PLUS (SImode
, XEXP (x
, 0), const0_rtx
));
394 output_address (XEXP (x
, 0));
399 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
400 fprintf (file
, "%d", (int)((~INTVAL (x
)) & 0xff));
404 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
405 fprintf (file
, "%d", (int)(INTVAL (x
) & 0xff));
408 /* For shift counts. The hardware ignores the upper bits of
409 any immediate, but the assembler will flag an out of range
410 shift count as an error. So we mask off the high bits
411 of the immediate here. */
415 fprintf (file
, "%d", (int)(INTVAL (x
) & 0x1f));
421 switch (GET_CODE (x
))
425 output_address (XEXP (x
, 0));
434 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
438 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
441 /* This will only be single precision.... */
447 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
448 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
);
449 fprintf (file
, "0x%lx", val
);
459 mn10300_print_operand_address (file
, x
);
468 /* Output assembly language output for the address ADDR to FILE. */
471 mn10300_print_operand_address (FILE *file
, rtx addr
)
473 switch (GET_CODE (addr
))
476 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
481 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
484 mn10300_print_operand (file
, XEXP (addr
, 1), 0);
488 mn10300_print_operand (file
, addr
, 0);
492 rtx base
= XEXP (addr
, 0);
493 rtx index
= XEXP (addr
, 1);
495 if (REG_P (index
) && !REG_OK_FOR_INDEX_P (index
))
501 gcc_assert (REG_P (index
) && REG_OK_FOR_INDEX_P (index
));
503 gcc_assert (REG_OK_FOR_BASE_P (base
));
505 mn10300_print_operand (file
, index
, 0);
507 mn10300_print_operand (file
, base
, 0);
511 output_addr_const (file
, addr
);
514 output_addr_const (file
, addr
);
519 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
521 Used for PIC-specific UNSPECs. */
524 mn10300_asm_output_addr_const_extra (FILE *file
, rtx x
)
526 if (GET_CODE (x
) == UNSPEC
)
531 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
532 output_addr_const (file
, XVECEXP (x
, 0, 0));
535 output_addr_const (file
, XVECEXP (x
, 0, 0));
536 fputs ("@GOT", file
);
539 output_addr_const (file
, XVECEXP (x
, 0, 0));
540 fputs ("@GOTOFF", file
);
543 output_addr_const (file
, XVECEXP (x
, 0, 0));
544 fputs ("@PLT", file
);
546 case UNSPEC_GOTSYM_OFF
:
547 assemble_name (file
, GOT_SYMBOL_NAME
);
549 output_addr_const (file
, XVECEXP (x
, 0, 0));
561 /* Count the number of FP registers that have to be saved. */
563 fp_regs_to_save (void)
570 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
571 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
577 /* Print a set of registers in the format required by "movm" and "ret".
578 Register K is saved if bit K of MASK is set. The data and address
579 registers can be stored individually, but the extended registers cannot.
580 We assume that the mask already takes that into account. For instance,
581 bits 14 to 17 must have the same value. */
584 mn10300_print_reg_list (FILE *file
, int mask
)
592 for (i
= 0; i
< FIRST_EXTENDED_REGNUM
; i
++)
593 if ((mask
& (1 << i
)) != 0)
597 fputs (reg_names
[i
], file
);
601 if ((mask
& 0x3c000) != 0)
603 gcc_assert ((mask
& 0x3c000) == 0x3c000);
606 fputs ("exreg1", file
);
613 /* If the MDR register is never clobbered, we can use the RETF instruction
614 which takes the address from the MDR register. This is 3 cycles faster
615 than having to load the address from the stack. */
618 mn10300_can_use_retf_insn (void)
620 /* Don't bother if we're not optimizing. In this case we won't
621 have proper access to df_regs_ever_live_p. */
625 /* EH returns alter the saved return address; MDR is not current. */
626 if (crtl
->calls_eh_return
)
629 /* Obviously not if MDR is ever clobbered. */
630 if (df_regs_ever_live_p (MDR_REG
))
633 /* ??? Careful not to use this during expand_epilogue etc. */
634 gcc_assert (!in_sequence_p ());
635 return leaf_function_p ();
639 mn10300_can_use_rets_insn (void)
641 return !mn10300_initial_offset (ARG_POINTER_REGNUM
, STACK_POINTER_REGNUM
);
644 /* Returns the set of live, callee-saved registers as a bitmask. The
645 callee-saved extended registers cannot be stored individually, so
646 all of them will be included in the mask if any one of them is used.
647 Also returns the number of bytes in the registers in the mask if
648 BYTES_SAVED is not NULL. */
651 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved
)
658 for (i
= 0; i
<= LAST_EXTENDED_REGNUM
; i
++)
659 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
665 if ((mask
& 0x3c000) != 0)
667 for (i
= 0x04000; i
< 0x40000; i
<<= 1)
675 * bytes_saved
= count
* UNITS_PER_WORD
;
683 RTX_FRAME_RELATED_P (r
) = 1;
687 /* Generate an instruction that pushes several registers onto the stack.
688 Register K will be saved if bit K in MASK is set. The function does
689 nothing if MASK is zero.
691 To be compatible with the "movm" instruction, the lowest-numbered
692 register must be stored in the lowest slot. If MASK is the set
693 { R1,...,RN }, where R1...RN are ordered least first, the generated
694 instruction will have the form:
697 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
698 (set (mem:SI (plus:SI (reg:SI 9)
702 (set (mem:SI (plus:SI (reg:SI 9)
707 mn10300_gen_multiple_store (unsigned int mask
)
709 /* The order in which registers are stored, from SP-4 through SP-N*4. */
710 static const unsigned int store_order
[8] = {
711 /* e2, e3: never saved */
712 FIRST_EXTENDED_REGNUM
+ 4,
713 FIRST_EXTENDED_REGNUM
+ 5,
714 FIRST_EXTENDED_REGNUM
+ 6,
715 FIRST_EXTENDED_REGNUM
+ 7,
716 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
717 FIRST_DATA_REGNUM
+ 2,
718 FIRST_DATA_REGNUM
+ 3,
719 FIRST_ADDRESS_REGNUM
+ 2,
720 FIRST_ADDRESS_REGNUM
+ 3,
721 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
731 for (i
= count
= 0; i
< ARRAY_SIZE(store_order
); ++i
)
733 unsigned regno
= store_order
[i
];
735 if (((mask
>> regno
) & 1) == 0)
739 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
740 x
= gen_frame_mem (SImode
, x
);
741 x
= gen_rtx_SET (x
, gen_rtx_REG (SImode
, regno
));
744 /* Remove the register from the mask so that... */
745 mask
&= ~(1u << regno
);
748 /* ... we can make sure that we didn't try to use a register
749 not listed in the store order. */
750 gcc_assert (mask
== 0);
752 /* Create the instruction that updates the stack pointer. */
753 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
754 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
757 /* We need one PARALLEL element to update the stack pointer and
758 an additional element for each register that is stored. */
759 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (count
+ 1, elts
));
763 static inline unsigned int
764 popcount (unsigned int mask
)
766 unsigned int count
= 0;
771 mask
&= ~ (mask
& - mask
);
777 mn10300_expand_prologue (void)
779 HOST_WIDE_INT size
= mn10300_frame_size ();
782 mask
= mn10300_get_live_callee_saved_regs (NULL
);
783 /* If we use any of the callee-saved registers, save them now. */
784 mn10300_gen_multiple_store (mask
);
786 if (flag_stack_usage_info
)
787 current_function_static_stack_size
= size
+ popcount (mask
) * 4;
789 if (TARGET_AM33_2
&& fp_regs_to_save ())
791 int num_regs_to_save
= fp_regs_to_save (), i
;
797 save_sp_partial_merge
,
801 unsigned int strategy_size
= (unsigned)-1, this_strategy_size
;
804 if (flag_stack_usage_info
)
805 current_function_static_stack_size
+= num_regs_to_save
* 4;
807 /* We have several different strategies to save FP registers.
808 We can store them using SP offsets, which is beneficial if
809 there are just a few registers to save, or we can use `a0' in
810 post-increment mode (`a0' is the only call-clobbered address
811 register that is never used to pass information to a
812 function). Furthermore, if we don't need a frame pointer, we
813 can merge the two SP adds into a single one, but this isn't
814 always beneficial; sometimes we can just split the two adds
815 so that we don't exceed a 16-bit constant size. The code
816 below will select which strategy to use, so as to generate
817 smallest code. Ties are broken in favor or shorter sequences
818 (in terms of number of instructions). */
820 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
821 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
822 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
823 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
825 /* We add 0 * (S) in two places to promote to the type of S,
826 so that all arms of the conditional have the same type. */
827 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
828 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
829 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
830 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
832 #define SIZE_FMOV_SP_(S,N) \
833 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
834 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
835 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
836 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
838 /* Consider alternative save_sp_merge only if we don't need the
839 frame pointer and size is nonzero. */
840 if (! frame_pointer_needed
&& size
)
842 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
843 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
844 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
845 this_strategy_size
+= SIZE_FMOV_SP (size
, num_regs_to_save
);
847 if (this_strategy_size
< strategy_size
)
849 strategy
= save_sp_merge
;
850 strategy_size
= this_strategy_size
;
854 /* Consider alternative save_sp_no_merge unconditionally. */
855 /* Insn: add -4 * num_regs_to_save, sp. */
856 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
857 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
858 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
861 /* Insn: add -size, sp. */
862 this_strategy_size
+= SIZE_ADD_SP (-size
);
865 if (this_strategy_size
< strategy_size
)
867 strategy
= save_sp_no_merge
;
868 strategy_size
= this_strategy_size
;
871 /* Consider alternative save_sp_partial_merge only if we don't
872 need a frame pointer and size is reasonably large. */
873 if (! frame_pointer_needed
&& size
+ 4 * num_regs_to_save
> 128)
875 /* Insn: add -128, sp. */
876 this_strategy_size
= SIZE_ADD_SP (-128);
877 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
878 this_strategy_size
+= SIZE_FMOV_SP (128 - 4 * num_regs_to_save
,
882 /* Insn: add 128-size, sp. */
883 this_strategy_size
+= SIZE_ADD_SP (128 - size
);
886 if (this_strategy_size
< strategy_size
)
888 strategy
= save_sp_partial_merge
;
889 strategy_size
= this_strategy_size
;
893 /* Consider alternative save_a0_merge only if we don't need a
894 frame pointer, size is nonzero and the user hasn't
895 changed the calling conventions of a0. */
896 if (! frame_pointer_needed
&& size
897 && call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
898 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
900 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
901 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
902 /* Insn: mov sp, a0. */
903 this_strategy_size
++;
906 /* Insn: add size, a0. */
907 this_strategy_size
+= SIZE_ADD_AX (size
);
909 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
910 this_strategy_size
+= 3 * num_regs_to_save
;
912 if (this_strategy_size
< strategy_size
)
914 strategy
= save_a0_merge
;
915 strategy_size
= this_strategy_size
;
919 /* Consider alternative save_a0_no_merge if the user hasn't
920 changed the calling conventions of a0. */
921 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
922 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
924 /* Insn: add -4 * num_regs_to_save, sp. */
925 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
926 /* Insn: mov sp, a0. */
927 this_strategy_size
++;
928 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
929 this_strategy_size
+= 3 * num_regs_to_save
;
932 /* Insn: add -size, sp. */
933 this_strategy_size
+= SIZE_ADD_SP (-size
);
936 if (this_strategy_size
< strategy_size
)
938 strategy
= save_a0_no_merge
;
939 strategy_size
= this_strategy_size
;
943 /* Emit the initial SP add, common to all strategies. */
946 case save_sp_no_merge
:
947 case save_a0_no_merge
:
948 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
950 GEN_INT (-4 * num_regs_to_save
))));
954 case save_sp_partial_merge
:
955 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
958 xsize
= 128 - 4 * num_regs_to_save
;
964 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
966 GEN_INT (-(size
+ 4 * num_regs_to_save
)))));
967 /* We'll have to adjust FP register saves according to the
970 /* Since we've already created the stack frame, don't do it
971 again at the end of the function. */
979 /* Now prepare register a0, if we have decided to use it. */
983 case save_sp_no_merge
:
984 case save_sp_partial_merge
:
989 case save_a0_no_merge
:
990 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
);
991 F (emit_insn (gen_movsi (reg
, stack_pointer_rtx
)));
993 F (emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (xsize
))));
994 reg
= gen_rtx_POST_INC (SImode
, reg
);
1001 /* Now actually save the FP registers. */
1002 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1003 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1011 /* If we aren't using `a0', use an SP offset. */
1014 addr
= gen_rtx_PLUS (SImode
,
1019 addr
= stack_pointer_rtx
;
1024 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode
, addr
),
1025 gen_rtx_REG (SFmode
, i
))));
1029 /* Now put the frame pointer into the frame pointer register. */
1030 if (frame_pointer_needed
)
1031 F (emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
));
1033 /* Allocate stack for this frame. */
1035 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1039 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
1040 emit_insn (gen_load_pic ());
1044 mn10300_expand_epilogue (void)
1046 HOST_WIDE_INT size
= mn10300_frame_size ();
1047 unsigned int reg_save_bytes
;
1049 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1051 if (TARGET_AM33_2
&& fp_regs_to_save ())
1053 int num_regs_to_save
= fp_regs_to_save (), i
;
1056 /* We have several options to restore FP registers. We could
1057 load them from SP offsets, but, if there are enough FP
1058 registers to restore, we win if we use a post-increment
1061 /* If we have a frame pointer, it's the best option, because we
1062 already know it has the value we want. */
1063 if (frame_pointer_needed
)
1064 reg
= gen_rtx_REG (SImode
, FRAME_POINTER_REGNUM
);
1065 /* Otherwise, we may use `a1', since it's call-clobbered and
1066 it's never used for return values. But only do so if it's
1067 smaller than using SP offsets. */
1070 enum { restore_sp_post_adjust
,
1071 restore_sp_pre_adjust
,
1072 restore_sp_partial_adjust
,
1073 restore_a1
} strategy
;
1074 unsigned int this_strategy_size
, strategy_size
= (unsigned)-1;
1076 /* Consider using sp offsets before adjusting sp. */
1077 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1078 this_strategy_size
= SIZE_FMOV_SP (size
, num_regs_to_save
);
1079 /* If size is too large, we'll have to adjust SP with an
1081 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1083 /* Insn: add size + 4 * num_regs_to_save, sp. */
1084 this_strategy_size
+= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
);
1086 /* If we don't have to restore any non-FP registers,
1087 we'll be able to save one byte by using rets. */
1088 if (! reg_save_bytes
)
1089 this_strategy_size
--;
1091 if (this_strategy_size
< strategy_size
)
1093 strategy
= restore_sp_post_adjust
;
1094 strategy_size
= this_strategy_size
;
1097 /* Consider using sp offsets after adjusting sp. */
1098 /* Insn: add size, sp. */
1099 this_strategy_size
= SIZE_ADD_SP (size
);
1100 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1101 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
1102 /* We're going to use ret to release the FP registers
1103 save area, so, no savings. */
1105 if (this_strategy_size
< strategy_size
)
1107 strategy
= restore_sp_pre_adjust
;
1108 strategy_size
= this_strategy_size
;
1111 /* Consider using sp offsets after partially adjusting sp.
1112 When size is close to 32Kb, we may be able to adjust SP
1113 with an imm16 add instruction while still using fmov
1115 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1117 /* Insn: add size + 4 * num_regs_to_save
1118 + reg_save_bytes - 252,sp. */
1119 this_strategy_size
= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
1120 + (int) reg_save_bytes
- 252);
1121 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1122 this_strategy_size
+= SIZE_FMOV_SP (252 - reg_save_bytes
1123 - 4 * num_regs_to_save
,
1125 /* We're going to use ret to release the FP registers
1126 save area, so, no savings. */
1128 if (this_strategy_size
< strategy_size
)
1130 strategy
= restore_sp_partial_adjust
;
1131 strategy_size
= this_strategy_size
;
1135 /* Consider using a1 in post-increment mode, as long as the
1136 user hasn't changed the calling conventions of a1. */
1137 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
+ 1]
1138 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
+1])
1140 /* Insn: mov sp,a1. */
1141 this_strategy_size
= 1;
1144 /* Insn: add size,a1. */
1145 this_strategy_size
+= SIZE_ADD_AX (size
);
1147 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1148 this_strategy_size
+= 3 * num_regs_to_save
;
1149 /* If size is large enough, we may be able to save a
1151 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1153 /* Insn: mov a1,sp. */
1154 this_strategy_size
+= 2;
1156 /* If we don't have to restore any non-FP registers,
1157 we'll be able to save one byte by using rets. */
1158 if (! reg_save_bytes
)
1159 this_strategy_size
--;
1161 if (this_strategy_size
< strategy_size
)
1163 strategy
= restore_a1
;
1164 strategy_size
= this_strategy_size
;
1170 case restore_sp_post_adjust
:
1173 case restore_sp_pre_adjust
:
1174 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1180 case restore_sp_partial_adjust
:
1181 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1183 GEN_INT (size
+ 4 * num_regs_to_save
1184 + reg_save_bytes
- 252)));
1185 size
= 252 - reg_save_bytes
- 4 * num_regs_to_save
;
1189 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
+ 1);
1190 emit_insn (gen_movsi (reg
, stack_pointer_rtx
));
1192 emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (size
)));
1200 /* Adjust the selected register, if any, for post-increment. */
1202 reg
= gen_rtx_POST_INC (SImode
, reg
);
1204 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1205 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1213 /* If we aren't using a post-increment register, use an
1215 addr
= gen_rtx_PLUS (SImode
,
1220 addr
= stack_pointer_rtx
;
1224 emit_insn (gen_movsf (gen_rtx_REG (SFmode
, i
),
1225 gen_rtx_MEM (SFmode
, addr
)));
1228 /* If we were using the restore_a1 strategy and the number of
1229 bytes to be released won't fit in the `ret' byte, copy `a1'
1230 to `sp', to avoid having to use `add' to adjust it. */
1231 if (! frame_pointer_needed
&& reg
&& size
+ reg_save_bytes
> 255)
1233 emit_move_insn (stack_pointer_rtx
, XEXP (reg
, 0));
1238 /* Maybe cut back the stack, except for the register save area.
1240 If the frame pointer exists, then use the frame pointer to
1243 If the stack size + register save area is more than 255 bytes,
1244 then the stack must be cut back here since the size + register
1245 save size is too big for a ret/retf instruction.
1247 Else leave it alone, it will be cut back as part of the
1248 ret/retf instruction, or there wasn't any stack to begin with.
1250 Under no circumstances should the register save area be
1251 deallocated here, that would leave a window where an interrupt
1252 could occur and trash the register save area. */
1253 if (frame_pointer_needed
)
1255 emit_move_insn (stack_pointer_rtx
, frame_pointer_rtx
);
1258 else if (size
+ reg_save_bytes
> 255)
1260 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1266 /* Adjust the stack and restore callee-saved registers, if any. */
1267 if (mn10300_can_use_rets_insn ())
1268 emit_jump_insn (ret_rtx
);
1270 emit_jump_insn (gen_return_ret (GEN_INT (size
+ reg_save_bytes
)));
1273 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1274 This function is for MATCH_PARALLEL and so assumes OP is known to be
1275 parallel. If OP is a multiple store, return a mask indicating which
1276 registers it saves. Return 0 otherwise. */
1279 mn10300_store_multiple_regs (rtx op
)
1287 count
= XVECLEN (op
, 0);
1291 /* Check that first instruction has the form (set (sp) (plus A B)) */
1292 elt
= XVECEXP (op
, 0, 0);
1293 if (GET_CODE (elt
) != SET
1294 || (! REG_P (SET_DEST (elt
)))
1295 || REGNO (SET_DEST (elt
)) != STACK_POINTER_REGNUM
1296 || GET_CODE (SET_SRC (elt
)) != PLUS
)
1299 /* Check that A is the stack pointer and B is the expected stack size.
1300 For OP to match, each subsequent instruction should push a word onto
1301 the stack. We therefore expect the first instruction to create
1302 COUNT-1 stack slots. */
1303 elt
= SET_SRC (elt
);
1304 if ((! REG_P (XEXP (elt
, 0)))
1305 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1306 || (! CONST_INT_P (XEXP (elt
, 1)))
1307 || INTVAL (XEXP (elt
, 1)) != -(count
- 1) * 4)
1311 for (i
= 1; i
< count
; i
++)
1313 /* Check that element i is a (set (mem M) R). */
1314 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1315 Remember: the ordering is *not* monotonic. */
1316 elt
= XVECEXP (op
, 0, i
);
1317 if (GET_CODE (elt
) != SET
1318 || (! MEM_P (SET_DEST (elt
)))
1319 || (! REG_P (SET_SRC (elt
))))
1322 /* Remember which registers are to be saved. */
1323 last
= REGNO (SET_SRC (elt
));
1324 mask
|= (1 << last
);
1326 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1327 elt
= XEXP (SET_DEST (elt
), 0);
1328 if (GET_CODE (elt
) != PLUS
1329 || (! REG_P (XEXP (elt
, 0)))
1330 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1331 || (! CONST_INT_P (XEXP (elt
, 1)))
1332 || INTVAL (XEXP (elt
, 1)) != -i
* 4)
1336 /* All or none of the callee-saved extended registers must be in the set. */
1337 if ((mask
& 0x3c000) != 0
1338 && (mask
& 0x3c000) != 0x3c000)
1344 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1347 mn10300_preferred_reload_class (rtx x
, reg_class_t rclass
)
1349 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1350 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1353 && !HARD_REGISTER_P (x
))
1354 || (GET_CODE (x
) == SUBREG
1355 && REG_P (SUBREG_REG (x
))
1356 && !HARD_REGISTER_P (SUBREG_REG (x
))))
1357 return LIMIT_RELOAD_CLASS (GET_MODE (x
), rclass
);
1362 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1365 mn10300_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
1367 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1368 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1372 /* Implement TARGET_SECONDARY_RELOAD. */
1375 mn10300_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1376 machine_mode mode
, secondary_reload_info
*sri
)
1378 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1379 enum reg_class xclass
= NO_REGS
;
1380 unsigned int xregno
= INVALID_REGNUM
;
1385 if (xregno
>= FIRST_PSEUDO_REGISTER
)
1386 xregno
= true_regnum (x
);
1387 if (xregno
!= INVALID_REGNUM
)
1388 xclass
= REGNO_REG_CLASS (xregno
);
1393 /* Memory load/stores less than a full word wide can't have an
1394 address or stack pointer destination. They must use a data
1395 register as an intermediate register. */
1396 if (rclass
!= DATA_REGS
1397 && (mode
== QImode
|| mode
== HImode
)
1398 && xclass
== NO_REGS
)
1401 /* We can only move SP to/from an address register. */
1403 && rclass
== SP_REGS
1404 && xclass
!= ADDRESS_REGS
)
1405 return ADDRESS_REGS
;
1407 && xclass
== SP_REGS
1408 && rclass
!= ADDRESS_REGS
1409 && rclass
!= SP_OR_ADDRESS_REGS
)
1410 return ADDRESS_REGS
;
1413 /* We can't directly load sp + const_int into a register;
1414 we must use an address register as an scratch. */
1416 && rclass
!= SP_REGS
1417 && rclass
!= SP_OR_ADDRESS_REGS
1418 && rclass
!= SP_OR_GENERAL_REGS
1419 && GET_CODE (x
) == PLUS
1420 && (XEXP (x
, 0) == stack_pointer_rtx
1421 || XEXP (x
, 1) == stack_pointer_rtx
))
1423 sri
->icode
= CODE_FOR_reload_plus_sp_const
;
1427 /* We can only move MDR to/from a data register. */
1428 if (rclass
== MDR_REGS
&& xclass
!= DATA_REGS
)
1430 if (xclass
== MDR_REGS
&& rclass
!= DATA_REGS
)
1433 /* We can't load/store an FP register from a constant address. */
1435 && (rclass
== FP_REGS
|| xclass
== FP_REGS
)
1436 && (xclass
== NO_REGS
|| rclass
== NO_REGS
))
1440 if (xregno
>= FIRST_PSEUDO_REGISTER
&& xregno
!= INVALID_REGNUM
)
1442 addr
= reg_equiv_mem (xregno
);
1444 addr
= XEXP (addr
, 0);
1449 if (addr
&& CONSTANT_ADDRESS_P (addr
))
1450 return GENERAL_REGS
;
1452 /* Otherwise assume no secondary reloads are needed. */
1457 mn10300_frame_size (void)
1459 /* size includes the fixed stack space needed for function calls. */
1460 int size
= get_frame_size () + crtl
->outgoing_args_size
;
1462 /* And space for the return pointer. */
1463 size
+= crtl
->outgoing_args_size
? 4 : 0;
1469 mn10300_initial_offset (int from
, int to
)
1473 gcc_assert (from
== ARG_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
);
1474 gcc_assert (to
== FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
1476 if (to
== STACK_POINTER_REGNUM
)
1477 diff
= mn10300_frame_size ();
1479 /* The difference between the argument pointer and the frame pointer
1480 is the size of the callee register save area. */
1481 if (from
== ARG_POINTER_REGNUM
)
1483 unsigned int reg_save_bytes
;
1485 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1486 diff
+= reg_save_bytes
;
1487 diff
+= 4 * fp_regs_to_save ();
1493 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1496 mn10300_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
1498 /* Return values > 8 bytes in length in memory. */
1499 return (int_size_in_bytes (type
) > 8
1500 || int_size_in_bytes (type
) == 0
1501 || TYPE_MODE (type
) == BLKmode
);
1504 /* Flush the argument registers to the stack for a stdarg function;
1505 return the new argument pointer. */
1507 mn10300_builtin_saveregs (void)
1510 tree fntype
= TREE_TYPE (current_function_decl
);
1511 int argadj
= ((!stdarg_p (fntype
))
1512 ? UNITS_PER_WORD
: 0);
1513 alias_set_type set
= get_varargs_alias_set ();
1516 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
1518 offset
= crtl
->args
.arg_offset_rtx
;
1520 mem
= gen_rtx_MEM (SImode
, crtl
->args
.internal_arg_pointer
);
1521 set_mem_alias_set (mem
, set
);
1522 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
1524 mem
= gen_rtx_MEM (SImode
,
1525 plus_constant (Pmode
,
1526 crtl
->args
.internal_arg_pointer
, 4));
1527 set_mem_alias_set (mem
, set
);
1528 emit_move_insn (mem
, gen_rtx_REG (SImode
, 1));
1530 return copy_to_reg (expand_binop (Pmode
, add_optab
,
1531 crtl
->args
.internal_arg_pointer
,
1532 offset
, 0, 0, OPTAB_LIB_WIDEN
));
1536 mn10300_va_start (tree valist
, rtx nextarg
)
1538 nextarg
= expand_builtin_saveregs ();
1539 std_expand_builtin_va_start (valist
, nextarg
);
1542 /* Return true when a parameter should be passed by reference. */
1545 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
1546 machine_mode mode
, const_tree type
,
1547 bool named ATTRIBUTE_UNUSED
)
1549 unsigned HOST_WIDE_INT size
;
1552 size
= int_size_in_bytes (type
);
1554 size
= GET_MODE_SIZE (mode
);
1556 return (size
> 8 || size
== 0);
1559 /* Return an RTX to represent where a value with mode MODE will be returned
1560 from a function. If the result is NULL_RTX, the argument is pushed. */
1563 mn10300_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
1564 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1566 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1567 rtx result
= NULL_RTX
;
1570 /* We only support using 2 data registers as argument registers. */
1573 /* Figure out the size of the object to be passed. */
1574 if (mode
== BLKmode
)
1575 size
= int_size_in_bytes (type
);
1577 size
= GET_MODE_SIZE (mode
);
1579 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1581 /* Don't pass this arg via a register if all the argument registers
1583 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1586 /* Don't pass this arg via a register if it would be split between
1587 registers and memory. */
1588 if (type
== NULL_TREE
1589 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1592 switch (cum
->nbytes
/ UNITS_PER_WORD
)
1595 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
);
1598 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
+ 1);
1607 /* Update the data in CUM to advance over an argument
1608 of mode MODE and data type TYPE.
1609 (TYPE is null for libcalls where that information may not be available.) */
1612 mn10300_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
1613 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1615 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1617 cum
->nbytes
+= (mode
!= BLKmode
1618 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1619 : (int_size_in_bytes (type
) + 3) & ~3);
1622 /* Return the number of bytes of registers to use for an argument passed
1623 partially in registers and partially in memory. */
1626 mn10300_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
1627 tree type
, bool named ATTRIBUTE_UNUSED
)
1629 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1632 /* We only support using 2 data registers as argument registers. */
1635 /* Figure out the size of the object to be passed. */
1636 if (mode
== BLKmode
)
1637 size
= int_size_in_bytes (type
);
1639 size
= GET_MODE_SIZE (mode
);
1641 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1643 /* Don't pass this arg via a register if all the argument registers
1645 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1648 if (cum
->nbytes
+ size
<= nregs
* UNITS_PER_WORD
)
1651 /* Don't pass this arg via a register if it would be split between
1652 registers and memory. */
1653 if (type
== NULL_TREE
1654 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1657 return nregs
* UNITS_PER_WORD
- cum
->nbytes
;
1660 /* Return the location of the function's value. This will be either
1661 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1662 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1663 we only return the PARALLEL for outgoing values; we do not want
1664 callers relying on this extra copy. */
1667 mn10300_function_value (const_tree valtype
,
1668 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1672 machine_mode mode
= TYPE_MODE (valtype
);
1674 if (! POINTER_TYPE_P (valtype
))
1675 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1676 else if (! TARGET_PTR_A0D0
|| ! outgoing
1677 || cfun
->returns_struct
)
1678 return gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
);
1680 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (2));
1682 = gen_rtx_EXPR_LIST (VOIDmode
,
1683 gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
),
1687 = gen_rtx_EXPR_LIST (VOIDmode
,
1688 gen_rtx_REG (mode
, FIRST_DATA_REGNUM
),
1693 /* Implements TARGET_LIBCALL_VALUE. */
1696 mn10300_libcall_value (machine_mode mode
,
1697 const_rtx fun ATTRIBUTE_UNUSED
)
1699 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1702 /* Implements FUNCTION_VALUE_REGNO_P. */
1705 mn10300_function_value_regno_p (const unsigned int regno
)
1707 return (regno
== FIRST_DATA_REGNUM
|| regno
== FIRST_ADDRESS_REGNUM
);
1710 /* Output an addition operation. */
1713 mn10300_output_add (rtx operands
[3], bool need_flags
)
1715 rtx dest
, src1
, src2
;
1716 unsigned int dest_regnum
, src1_regnum
, src2_regnum
;
1717 enum reg_class src1_class
, src2_class
, dest_class
;
1723 dest_regnum
= true_regnum (dest
);
1724 src1_regnum
= true_regnum (src1
);
1726 dest_class
= REGNO_REG_CLASS (dest_regnum
);
1727 src1_class
= REGNO_REG_CLASS (src1_regnum
);
1729 if (CONST_INT_P (src2
))
1731 gcc_assert (dest_regnum
== src1_regnum
);
1733 if (src2
== const1_rtx
&& !need_flags
)
1735 if (INTVAL (src2
) == 4 && !need_flags
&& dest_class
!= DATA_REGS
)
1738 gcc_assert (!need_flags
|| dest_class
!= SP_REGS
);
1741 else if (CONSTANT_P (src2
))
1744 src2_regnum
= true_regnum (src2
);
1745 src2_class
= REGNO_REG_CLASS (src2_regnum
);
1747 if (dest_regnum
== src1_regnum
)
1749 if (dest_regnum
== src2_regnum
)
1752 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1753 this directly, as below, but when optimizing for space we can sometimes
1754 do better by using a mov+add. For MN103, we claimed that we could
1755 implement a three-operand add because the various move and add insns
1756 change sizes across register classes, and we can often do better than
1757 reload in choosing which operand to move. */
1758 if (TARGET_AM33
&& optimize_insn_for_speed_p ())
1759 return "add %2,%1,%0";
1761 /* Catch cases where no extended register was used. */
1762 if (src1_class
!= EXTENDED_REGS
1763 && src2_class
!= EXTENDED_REGS
1764 && dest_class
!= EXTENDED_REGS
)
1766 /* We have to copy one of the sources into the destination, then
1767 add the other source to the destination.
1769 Carefully select which source to copy to the destination; a
1770 naive implementation will waste a byte when the source classes
1771 are different and the destination is an address register.
1772 Selecting the lowest cost register copy will optimize this
1774 if (src1_class
== dest_class
)
1775 return "mov %1,%0\n\tadd %2,%0";
1777 return "mov %2,%0\n\tadd %1,%0";
1780 /* At least one register is an extended register. */
1782 /* The three operand add instruction on the am33 is a win iff the
1783 output register is an extended register, or if both source
1784 registers are extended registers. */
1785 if (dest_class
== EXTENDED_REGS
|| src1_class
== src2_class
)
1786 return "add %2,%1,%0";
1788 /* It is better to copy one of the sources to the destination, then
1789 perform a 2 address add. The destination in this case must be
1790 an address or data register and one of the sources must be an
1791 extended register and the remaining source must not be an extended
1794 The best code for this case is to copy the extended reg to the
1795 destination, then emit a two address add. */
1796 if (src1_class
== EXTENDED_REGS
)
1797 return "mov %1,%0\n\tadd %2,%0";
1799 return "mov %2,%0\n\tadd %1,%0";
1802 /* Return 1 if X contains a symbolic expression. We know these
1803 expressions will have one of a few well defined forms, so
1804 we need only check those forms. */
1807 mn10300_symbolic_operand (rtx op
,
1808 machine_mode mode ATTRIBUTE_UNUSED
)
1810 switch (GET_CODE (op
))
1817 return ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
1818 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1819 && CONST_INT_P (XEXP (op
, 1)));
1825 /* Try machine dependent ways of modifying an illegitimate address
1826 to be legitimate. If we find one, return the new valid address.
1827 This macro is used in only one place: `memory_address' in explow.c.
1829 OLDX is the address as it was before break_out_memory_refs was called.
1830 In some cases it is useful to look at this to decide what needs to be done.
1832 Normally it is always safe for this macro to do nothing. It exists to
1833 recognize opportunities to optimize the output.
1835 But on a few ports with segmented architectures and indexed addressing
1836 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1839 mn10300_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1840 machine_mode mode ATTRIBUTE_UNUSED
)
1842 if (flag_pic
&& ! mn10300_legitimate_pic_operand_p (x
))
1843 x
= mn10300_legitimize_pic_address (oldx
, NULL_RTX
);
1845 /* Uh-oh. We might have an address for x[n-100000]. This needs
1846 special handling to avoid creating an indexed memory address
1847 with x-100000 as the base. */
1848 if (GET_CODE (x
) == PLUS
1849 && mn10300_symbolic_operand (XEXP (x
, 1), VOIDmode
))
1851 /* Ugly. We modify things here so that the address offset specified
1852 by the index expression is computed first, then added to x to form
1853 the entire address. */
1855 rtx regx1
, regy1
, regy2
, y
;
1857 /* Strip off any CONST. */
1859 if (GET_CODE (y
) == CONST
)
1862 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1864 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1865 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1866 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1867 regx1
= force_reg (Pmode
,
1868 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
, regx1
,
1870 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1876 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1877 @GOTOFF in `reg'. */
1880 mn10300_legitimize_pic_address (rtx orig
, rtx reg
)
1884 if (GET_CODE (orig
) == LABEL_REF
1885 || (GET_CODE (orig
) == SYMBOL_REF
1886 && (CONSTANT_POOL_ADDRESS_P (orig
)
1887 || ! MN10300_GLOBAL_P (orig
))))
1890 reg
= gen_reg_rtx (Pmode
);
1892 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOTOFF
);
1893 x
= gen_rtx_CONST (SImode
, x
);
1894 emit_move_insn (reg
, x
);
1896 x
= emit_insn (gen_addsi3 (reg
, reg
, pic_offset_table_rtx
));
1898 else if (GET_CODE (orig
) == SYMBOL_REF
)
1901 reg
= gen_reg_rtx (Pmode
);
1903 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOT
);
1904 x
= gen_rtx_CONST (SImode
, x
);
1905 x
= gen_rtx_PLUS (SImode
, pic_offset_table_rtx
, x
);
1906 x
= gen_const_mem (SImode
, x
);
1908 x
= emit_move_insn (reg
, x
);
1913 set_unique_reg_note (x
, REG_EQUAL
, orig
);
1917 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1918 isn't protected by a PIC unspec; nonzero otherwise. */
1921 mn10300_legitimate_pic_operand_p (rtx x
)
1926 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1929 if (GET_CODE (x
) == UNSPEC
1930 && (XINT (x
, 1) == UNSPEC_PIC
1931 || XINT (x
, 1) == UNSPEC_GOT
1932 || XINT (x
, 1) == UNSPEC_GOTOFF
1933 || XINT (x
, 1) == UNSPEC_PLT
1934 || XINT (x
, 1) == UNSPEC_GOTSYM_OFF
))
1937 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1938 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1944 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1945 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x
, i
, j
)))
1948 else if (fmt
[i
] == 'e'
1949 && ! mn10300_legitimate_pic_operand_p (XEXP (x
, i
)))
1956 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1957 legitimate, and FALSE otherwise.
1959 On the mn10300, the value in the address register must be
1960 in the same memory space/segment as the effective address.
1962 This is problematical for reload since it does not understand
1963 that base+index != index+base in a memory reference.
1965 Note it is still possible to use reg+reg addressing modes,
1966 it's just much more difficult. For a discussion of a possible
1967 workaround and solution, see the comments in pa.c before the
1968 function record_unscaled_index_insn_codes. */
1971 mn10300_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1975 if (CONSTANT_ADDRESS_P (x
))
1976 return !flag_pic
|| mn10300_legitimate_pic_operand_p (x
);
1978 if (RTX_OK_FOR_BASE_P (x
, strict
))
1981 if (TARGET_AM33
&& (mode
== SImode
|| mode
== SFmode
|| mode
== HImode
))
1983 if (GET_CODE (x
) == POST_INC
)
1984 return RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
);
1985 if (GET_CODE (x
) == POST_MODIFY
)
1986 return (RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
)
1987 && CONSTANT_ADDRESS_P (XEXP (x
, 1)));
1990 if (GET_CODE (x
) != PLUS
)
1994 index
= XEXP (x
, 1);
2000 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
2001 addressing is hard to satisfy. */
2005 return (REGNO_GENERAL_P (REGNO (base
), strict
)
2006 && REGNO_GENERAL_P (REGNO (index
), strict
));
2009 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base
), strict
))
2012 if (CONST_INT_P (index
))
2013 return IN_RANGE (INTVAL (index
), -1 - 0x7fffffff, 0x7fffffff);
2015 if (CONSTANT_ADDRESS_P (index
))
2016 return !flag_pic
|| mn10300_legitimate_pic_operand_p (index
);
2022 mn10300_regno_in_class_p (unsigned regno
, int rclass
, bool strict
)
2024 if (regno
>= FIRST_PSEUDO_REGISTER
)
2030 regno
= reg_renumber
[regno
];
2031 if (regno
== INVALID_REGNUM
)
2034 return TEST_HARD_REG_BIT (reg_class_contents
[rclass
], regno
);
2038 mn10300_legitimize_reload_address (rtx x
,
2039 machine_mode mode ATTRIBUTE_UNUSED
,
2040 int opnum
, int type
,
2041 int ind_levels ATTRIBUTE_UNUSED
)
2043 bool any_change
= false;
2045 /* See above re disabling reg+reg addressing for MN103. */
2049 if (GET_CODE (x
) != PLUS
)
2052 if (XEXP (x
, 0) == stack_pointer_rtx
)
2054 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2055 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2056 opnum
, (enum reload_type
) type
);
2059 if (XEXP (x
, 1) == stack_pointer_rtx
)
2061 push_reload (XEXP (x
, 1), NULL_RTX
, &XEXP (x
, 1), NULL
,
2062 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2063 opnum
, (enum reload_type
) type
);
2067 return any_change
? x
: NULL_RTX
;
2070 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2071 constant. Note that some "constants" aren't valid, such as TLS
2072 symbols and unconverted GOT-based references, so we eliminate
2076 mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2078 switch (GET_CODE (x
))
2083 if (GET_CODE (x
) == PLUS
)
2085 if (! CONST_INT_P (XEXP (x
, 1)))
2090 /* Only some unspecs are valid as "constants". */
2091 if (GET_CODE (x
) == UNSPEC
)
2093 switch (XINT (x
, 1))
2105 /* We must have drilled down to a symbol. */
2106 if (! mn10300_symbolic_operand (x
, Pmode
))
2117 /* Undo pic address legitimization for the benefit of debug info. */
2120 mn10300_delegitimize_address (rtx orig_x
)
2122 rtx x
= orig_x
, ret
, addend
= NULL
;
2127 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
2130 if (XEXP (x
, 0) == pic_offset_table_rtx
)
2132 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2133 some odd-looking "addresses" that were never valid in the first place.
2134 We need to look harder to avoid warnings being emitted. */
2135 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
2137 rtx x0
= XEXP (x
, 0);
2138 rtx x00
= XEXP (x0
, 0);
2139 rtx x01
= XEXP (x0
, 1);
2141 if (x00
== pic_offset_table_rtx
)
2143 else if (x01
== pic_offset_table_rtx
)
2153 if (GET_CODE (x
) != CONST
)
2156 if (GET_CODE (x
) != UNSPEC
)
2159 ret
= XVECEXP (x
, 0, 0);
2160 if (XINT (x
, 1) == UNSPEC_GOTOFF
)
2162 else if (XINT (x
, 1) == UNSPEC_GOT
)
2167 gcc_assert (GET_CODE (ret
) == SYMBOL_REF
);
2168 if (need_mem
!= MEM_P (orig_x
))
2170 if (need_mem
&& addend
)
2173 ret
= gen_rtx_PLUS (Pmode
, addend
, ret
);
2177 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2178 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2179 with an address register. */
2182 mn10300_address_cost (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
,
2183 addr_space_t as ATTRIBUTE_UNUSED
, bool speed
)
2188 switch (GET_CODE (x
))
2193 /* We assume all of these require a 32-bit constant, even though
2194 some symbol and label references can be relaxed. */
2195 return speed
? 1 : 4;
2203 /* Assume any symbolic offset is a 32-bit constant. */
2204 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2205 if (IN_RANGE (i
, -128, 127))
2206 return speed
? 0 : 1;
2209 if (IN_RANGE (i
, -0x800000, 0x7fffff))
2215 index
= XEXP (x
, 1);
2216 if (register_operand (index
, SImode
))
2218 /* Attempt to minimize the number of registers in the address.
2219 This is similar to what other ports do. */
2220 if (register_operand (base
, SImode
))
2224 index
= XEXP (x
, 0);
2227 /* Assume any symbolic offset is a 32-bit constant. */
2228 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2229 if (IN_RANGE (i
, -128, 127))
2230 return speed
? 0 : 1;
2231 if (IN_RANGE (i
, -32768, 32767))
2232 return speed
? 0 : 2;
2233 return speed
? 2 : 6;
2236 return rtx_cost (x
, MEM
, 0, speed
);
2240 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2242 Recall that the base value of 2 is required by assumptions elsewhere
2243 in the body of the compiler, and that cost 2 is special-cased as an
2244 early exit from reload meaning no work is required. */
2247 mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2248 reg_class_t ifrom
, reg_class_t ito
)
2250 enum reg_class from
= (enum reg_class
) ifrom
;
2251 enum reg_class to
= (enum reg_class
) ito
;
2252 enum reg_class scratch
, test
;
2254 /* Simplify the following code by unifying the fp register classes. */
2255 if (to
== FP_ACC_REGS
)
2257 if (from
== FP_ACC_REGS
)
2260 /* Diagnose invalid moves by costing them as two moves. */
2265 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2266 else if (to
== MDR_REGS
)
2267 scratch
= DATA_REGS
;
2268 else if (to
== FP_REGS
&& to
!= from
)
2269 scratch
= GENERAL_REGS
;
2273 if (from
== SP_REGS
)
2274 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2275 else if (from
== MDR_REGS
)
2276 scratch
= DATA_REGS
;
2277 else if (from
== FP_REGS
&& to
!= from
)
2278 scratch
= GENERAL_REGS
;
2280 if (scratch
!= NO_REGS
&& !reg_class_subset_p (test
, scratch
))
2281 return (mn10300_register_move_cost (VOIDmode
, from
, scratch
)
2282 + mn10300_register_move_cost (VOIDmode
, scratch
, to
));
2284 /* From here on, all we need consider are legal combinations. */
2288 /* The scale here is bytes * 2. */
2290 if (from
== to
&& (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2293 if (from
== SP_REGS
)
2294 return (to
== ADDRESS_REGS
? 2 : 6);
2296 /* For MN103, all remaining legal moves are two bytes. */
2301 return (from
== ADDRESS_REGS
? 4 : 6);
2303 if ((from
== ADDRESS_REGS
|| from
== DATA_REGS
)
2304 && (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2307 if (to
== EXTENDED_REGS
)
2308 return (to
== from
? 6 : 4);
2310 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2315 /* The scale here is cycles * 2. */
2319 if (from
== FP_REGS
)
2322 /* All legal moves between integral registers are single cycle. */
2327 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2329 Given lack of the form of the address, this must be speed-relative,
2330 though we should never be less expensive than a size-relative register
2331 move cost above. This is not a problem. */
2334 mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2335 reg_class_t iclass
, bool in ATTRIBUTE_UNUSED
)
2337 enum reg_class rclass
= (enum reg_class
) iclass
;
2339 if (rclass
== FP_REGS
)
2344 /* Implement the TARGET_RTX_COSTS hook.
2346 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2347 to represent cycles. Size-relative costs are in bytes. */
2350 mn10300_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2351 int *ptotal
, bool speed
)
2353 /* This value is used for SYMBOL_REF etc where we want to pretend
2354 we have a full 32-bit constant. */
2355 HOST_WIDE_INT i
= 0x12345678;
2365 if (outer_code
== SET
)
2367 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2368 if (IN_RANGE (i
, -32768, 32767))
2369 total
= COSTS_N_INSNS (1);
2371 total
= COSTS_N_INSNS (2);
2375 /* 16-bit integer operands don't affect latency;
2376 24-bit and 32-bit operands add a cycle. */
2377 if (IN_RANGE (i
, -32768, 32767))
2380 total
= COSTS_N_INSNS (1);
2385 if (outer_code
== SET
)
2389 else if (IN_RANGE (i
, -128, 127))
2391 else if (IN_RANGE (i
, -32768, 32767))
2398 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2399 if (IN_RANGE (i
, -128, 127))
2401 else if (IN_RANGE (i
, -32768, 32767))
2403 else if (TARGET_AM33
&& IN_RANGE (i
, -0x01000000, 0x00ffffff))
2415 /* We assume all of these require a 32-bit constant, even though
2416 some symbol and label references can be relaxed. */
2420 switch (XINT (x
, 1))
2426 case UNSPEC_GOTSYM_OFF
:
2427 /* The PIC unspecs also resolve to a 32-bit constant. */
2431 /* Assume any non-listed unspec is some sort of arithmetic. */
2432 goto do_arith_costs
;
2436 /* Notice the size difference of INC and INC4. */
2437 if (!speed
&& outer_code
== SET
&& CONST_INT_P (XEXP (x
, 1)))
2439 i
= INTVAL (XEXP (x
, 1));
2440 if (i
== 1 || i
== 4)
2442 total
= 1 + rtx_cost (XEXP (x
, 0), PLUS
, 0, speed
);
2446 goto do_arith_costs
;
2460 total
= (speed
? COSTS_N_INSNS (1) : 2);
2464 /* Notice the size difference of ASL2 and variants. */
2465 if (!speed
&& CONST_INT_P (XEXP (x
, 1)))
2466 switch (INTVAL (XEXP (x
, 1)))
2481 total
= (speed
? COSTS_N_INSNS (1) : 3);
2485 total
= (speed
? COSTS_N_INSNS (3) : 2);
2492 total
= (speed
? COSTS_N_INSNS (39)
2493 /* Include space to load+retrieve MDR. */
2494 : code
== MOD
|| code
== UMOD
? 6 : 4);
2498 total
= mn10300_address_cost (XEXP (x
, 0), GET_MODE (x
),
2499 MEM_ADDR_SPACE (x
), speed
);
2501 total
= COSTS_N_INSNS (2 + total
);
2505 /* Probably not implemented. Assume external call. */
2506 total
= (speed
? COSTS_N_INSNS (10) : 7);
2518 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2519 may access it using GOTOFF instead of GOT. */
2522 mn10300_encode_section_info (tree decl
, rtx rtl
, int first
)
2526 default_encode_section_info (decl
, rtl
, first
);
2531 symbol
= XEXP (rtl
, 0);
2532 if (GET_CODE (symbol
) != SYMBOL_REF
)
2536 SYMBOL_REF_FLAG (symbol
) = (*targetm
.binds_local_p
) (decl
);
2539 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2540 and readonly data size. So we crank up the case threshold value to
2541 encourage a series of if/else comparisons to implement many small switch
2542 statements. In theory, this value could be increased much more if we
2543 were solely optimizing for space, but we keep it "reasonable" to avoid
2544 serious code efficiency lossage. */
2547 mn10300_case_values_threshold (void)
2552 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2555 mn10300_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
2557 rtx mem
, disp
, fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2559 /* This is a strict alignment target, which means that we play
2560 some games to make sure that the locations at which we need
2561 to store <chain> and <disp> wind up at aligned addresses.
2564 0xfc 0xdd mov chain,a1
2566 0xf8 0xed 0x00 btst 0,d1
2570 Note that the two extra insns are effectively nops; they
2571 clobber the flags but do not affect the contents of D0 or D1. */
2573 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
2574 plus_constant (Pmode
, XEXP (m_tramp
, 0), 11),
2575 NULL_RTX
, 1, OPTAB_DIRECT
);
2577 mem
= adjust_address (m_tramp
, SImode
, 0);
2578 emit_move_insn (mem
, gen_int_mode (0xddfc0028, SImode
));
2579 mem
= adjust_address (m_tramp
, SImode
, 4);
2580 emit_move_insn (mem
, chain_value
);
2581 mem
= adjust_address (m_tramp
, SImode
, 8);
2582 emit_move_insn (mem
, gen_int_mode (0xdc00edf8, SImode
));
2583 mem
= adjust_address (m_tramp
, SImode
, 12);
2584 emit_move_insn (mem
, disp
);
2587 /* Output the assembler code for a C++ thunk function.
2588 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2589 is the decl for the target function. DELTA is an immediate constant
2590 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2591 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2592 additionally added to THIS. Finally jump to the entry point of
2596 mn10300_asm_output_mi_thunk (FILE * file
,
2597 tree thunk_fndecl ATTRIBUTE_UNUSED
,
2598 HOST_WIDE_INT delta
,
2599 HOST_WIDE_INT vcall_offset
,
2604 /* Get the register holding the THIS parameter. Handle the case
2605 where there is a hidden first argument for a returned structure. */
2606 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2607 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
+ 1];
2609 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
];
2611 fprintf (file
, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START
);
2614 fprintf (file
, "\tadd %d, %s\n", (int) delta
, _this
);
2618 const char * scratch
= reg_names
[FIRST_ADDRESS_REGNUM
+ 1];
2620 fprintf (file
, "\tmov %s, %s\n", _this
, scratch
);
2621 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2622 fprintf (file
, "\tadd %d, %s\n", (int) vcall_offset
, scratch
);
2623 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2624 fprintf (file
, "\tadd %s, %s\n", scratch
, _this
);
2627 fputs ("\tjmp ", file
);
2628 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
2632 /* Return true if mn10300_output_mi_thunk would be able to output the
2633 assembler code for the thunk function specified by the arguments
2634 it is passed, and false otherwise. */
2637 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
2638 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
2639 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
2640 const_tree function ATTRIBUTE_UNUSED
)
2646 mn10300_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2648 if (REGNO_REG_CLASS (regno
) == FP_REGS
2649 || REGNO_REG_CLASS (regno
) == FP_ACC_REGS
)
2650 /* Do not store integer values in FP registers. */
2651 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ((regno
& 1) == 0);
2653 if (! TARGET_AM33
&& REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2656 if (((regno
) & 1) == 0 || GET_MODE_SIZE (mode
) == 4)
2659 if (REGNO_REG_CLASS (regno
) == DATA_REGS
2660 || (TARGET_AM33
&& REGNO_REG_CLASS (regno
) == ADDRESS_REGS
)
2661 || REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2662 return GET_MODE_SIZE (mode
) <= 4;
2668 mn10300_modes_tieable (machine_mode mode1
, machine_mode mode2
)
2670 if (GET_MODE_CLASS (mode1
) == MODE_FLOAT
2671 && GET_MODE_CLASS (mode2
) != MODE_FLOAT
)
2674 if (GET_MODE_CLASS (mode2
) == MODE_FLOAT
2675 && GET_MODE_CLASS (mode1
) != MODE_FLOAT
)
2680 || (GET_MODE_SIZE (mode1
) <= 4 && GET_MODE_SIZE (mode2
) <= 4))
2687 cc_flags_for_mode (machine_mode mode
)
2692 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
| CC_FLAG_V
;
2694 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
;
2696 return CC_FLAG_Z
| CC_FLAG_N
;
2705 cc_flags_for_code (enum rtx_code code
)
2718 case GT
: /* ~(Z|(N^V)) */
2719 case LE
: /* Z|(N^V) */
2720 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_V
;
2726 case GTU
: /* ~(C | Z) */
2727 case LEU
: /* C | Z */
2728 return CC_FLAG_Z
| CC_FLAG_C
;
2746 mn10300_select_cc_mode (enum rtx_code code
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2750 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2751 return CC_FLOATmode
;
2753 req
= cc_flags_for_code (code
);
2755 if (req
& CC_FLAG_V
)
2757 if (req
& CC_FLAG_C
)
2763 set_is_load_p (rtx set
)
2765 return MEM_P (SET_SRC (set
));
2769 set_is_store_p (rtx set
)
2771 return MEM_P (SET_DEST (set
));
2774 /* Update scheduling costs for situations that cannot be
2775 described using the attributes and DFA machinery.
2776 DEP is the insn being scheduled.
2777 INSN is the previous insn.
2778 COST is the current cycle cost for DEP. */
2781 mn10300_adjust_sched_cost (rtx_insn
*insn
, rtx link
, rtx_insn
*dep
, int cost
)
2790 /* We are only interested in pairs of SET. */
2791 insn_set
= single_set (insn
);
2795 dep_set
= single_set (dep
);
2799 /* For the AM34 a load instruction that follows a
2800 store instruction incurs an extra cycle of delay. */
2801 if (mn10300_tune_cpu
== PROCESSOR_AM34
2802 && set_is_load_p (dep_set
)
2803 && set_is_store_p (insn_set
))
2806 /* For the AM34 a non-store, non-branch FPU insn that follows
2807 another FPU insn incurs a one cycle throughput increase. */
2808 else if (mn10300_tune_cpu
== PROCESSOR_AM34
2809 && ! set_is_store_p (insn_set
)
2811 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) == MODE_FLOAT
2812 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) == MODE_FLOAT
)
2815 /* Resolve the conflict described in section 1-7-4 of
2816 Chapter 3 of the MN103E Series Instruction Manual
2819 "When the preceding instruction is a CPU load or
2820 store instruction, a following FPU instruction
2821 cannot be executed until the CPU completes the
2822 latency period even though there are no register
2823 or flag dependencies between them." */
2825 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2826 if (! TARGET_AM33_2
)
2829 /* If a data dependence already exists then the cost is correct. */
2830 if (REG_NOTE_KIND (link
) == 0)
2833 /* Check that the instruction about to scheduled is an FPU instruction. */
2834 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) != MODE_FLOAT
)
2837 /* Now check to see if the previous instruction is a load or store. */
2838 if (! set_is_load_p (insn_set
) && ! set_is_store_p (insn_set
))
2841 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2842 only applies when an INTEGER load/store precedes an FPU
2843 instruction, but is this true ? For now we assume that it is. */
2844 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) != MODE_INT
)
2847 /* Extract the latency value from the timings attribute. */
2848 timings
= get_attr_timings (insn
);
2849 return timings
< 100 ? (timings
% 10) : (timings
% 100);
2853 mn10300_conditional_register_usage (void)
2859 for (i
= FIRST_EXTENDED_REGNUM
;
2860 i
<= LAST_EXTENDED_REGNUM
; i
++)
2861 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2865 for (i
= FIRST_FP_REGNUM
;
2866 i
<= LAST_FP_REGNUM
; i
++)
2867 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2870 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] =
2871 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2874 /* Worker function for TARGET_MD_ASM_ADJUST.
2875 We do this in the mn10300 backend to maintain source compatibility
2876 with the old cc0-based compiler. */
2879 mn10300_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
2880 vec
<const char *> &/*constraints*/,
2881 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
2883 clobbers
.safe_push (gen_rtx_REG (CCmode
, CC_REG
));
2884 SET_HARD_REG_BIT (clobbered_regs
, CC_REG
);
2888 /* A helper function for splitting cbranch patterns after reload. */
2891 mn10300_split_cbranch (machine_mode cmp_mode
, rtx cmp_op
, rtx label_ref
)
2895 flags
= gen_rtx_REG (cmp_mode
, CC_REG
);
2896 x
= gen_rtx_COMPARE (cmp_mode
, XEXP (cmp_op
, 0), XEXP (cmp_op
, 1));
2897 x
= gen_rtx_SET (flags
, x
);
2900 x
= gen_rtx_fmt_ee (GET_CODE (cmp_op
), VOIDmode
, flags
, const0_rtx
);
2901 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
, label_ref
, pc_rtx
);
2902 x
= gen_rtx_SET (pc_rtx
, x
);
2906 /* A helper function for matching parallels that set the flags. */
2909 mn10300_match_ccmode (rtx insn
, machine_mode cc_mode
)
2912 machine_mode flags_mode
;
2914 gcc_checking_assert (XVECLEN (PATTERN (insn
), 0) == 2);
2916 op1
= XVECEXP (PATTERN (insn
), 0, 1);
2917 gcc_checking_assert (GET_CODE (SET_SRC (op1
)) == COMPARE
);
2919 flags
= SET_DEST (op1
);
2920 flags_mode
= GET_MODE (flags
);
2922 if (GET_MODE (SET_SRC (op1
)) != flags_mode
)
2924 if (GET_MODE_CLASS (flags_mode
) != MODE_CC
)
2927 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2928 if (cc_flags_for_mode (flags_mode
) & ~cc_flags_for_mode (cc_mode
))
2934 /* This function is used to help split:
2936 (set (reg) (and (reg) (int)))
2940 (set (reg) (shift (reg) (int))
2941 (set (reg) (shift (reg) (int))
2943 where the shitfs will be shorter than the "and" insn.
2945 It returns the number of bits that should be shifted. A positive
2946 values means that the low bits are to be cleared (and hence the
2947 shifts should be right followed by left) whereas a negative value
2948 means that the high bits are to be cleared (left followed by right).
2949 Zero is returned when it would not be economical to split the AND. */
2952 mn10300_split_and_operand_count (rtx op
)
2954 HOST_WIDE_INT val
= INTVAL (op
);
2959 /* High bit is set, look for bits clear at the bottom. */
2960 count
= exact_log2 (-val
);
2963 /* This is only size win if we can use the asl2 insn. Otherwise we
2964 would be replacing 1 6-byte insn with 2 3-byte insns. */
2965 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2971 /* High bit is clear, look for bits set at the bottom. */
2972 count
= exact_log2 (val
+ 1);
2974 /* Again, this is only a size win with asl2. */
2975 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2984 enum attr_liw_op op
;
2989 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2990 extract the operands and LIW attributes from the insn and use them to fill
2991 in the liw_data structure. Return true upon success or false if the insn
2992 cannot be bundled. */
2995 extract_bundle (rtx_insn
*insn
, struct liw_data
* pdata
)
2997 bool allow_consts
= true;
3000 gcc_assert (pdata
!= NULL
);
3004 /* Make sure that we are dealing with a simple SET insn. */
3005 p
= single_set (insn
);
3009 /* Make sure that it could go into one of the LIW pipelines. */
3010 pdata
->slot
= get_attr_liw (insn
);
3011 if (pdata
->slot
== LIW_BOTH
)
3014 pdata
->op
= get_attr_liw_op (insn
);
3019 pdata
->dest
= SET_DEST (p
);
3020 pdata
->src
= SET_SRC (p
);
3023 pdata
->dest
= XEXP (SET_SRC (p
), 0);
3024 pdata
->src
= XEXP (SET_SRC (p
), 1);
3031 /* The AND, OR and XOR long instruction words only accept register arguments. */
3032 allow_consts
= false;
3035 pdata
->dest
= SET_DEST (p
);
3036 pdata
->src
= XEXP (SET_SRC (p
), 1);
3040 if (! REG_P (pdata
->dest
))
3043 if (REG_P (pdata
->src
))
3046 return allow_consts
&& satisfies_constraint_O (pdata
->src
);
3049 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3050 the instructions with the assumption that LIW1 would be executed before LIW2
3051 so we must check for overlaps between their sources and destinations. */
3054 check_liw_constraints (struct liw_data
* pliw1
, struct liw_data
* pliw2
)
3056 /* Check for slot conflicts. */
3057 if (pliw2
->slot
== pliw1
->slot
&& pliw1
->slot
!= LIW_EITHER
)
3060 /* If either operation is a compare, then "dest" is really an input; the real
3061 destination is CC_REG. So these instructions need different checks. */
3063 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3064 check its values prior to any changes made by OP. */
3065 if (pliw1
->op
== LIW_OP_CMP
)
3067 /* Two sequential comparisons means dead code, which ought to
3068 have been eliminated given that bundling only happens with
3069 optimization. We cannot bundle them in any case. */
3070 gcc_assert (pliw1
->op
!= pliw2
->op
);
3074 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3075 is the destination of OP, as the CMP will look at the old value, not the new
3077 if (pliw2
->op
== LIW_OP_CMP
)
3079 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3082 if (REG_P (pliw2
->src
))
3083 return REGNO (pliw2
->src
) != REGNO (pliw1
->dest
);
3088 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3089 same destination register. */
3090 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3093 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3094 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3095 we can replace the source in OP2 with the source of OP1. */
3096 if (REG_P (pliw2
->src
) && REGNO (pliw2
->src
) == REGNO (pliw1
->dest
))
3098 if (pliw1
->op
== LIW_OP_MOV
&& REG_P (pliw1
->src
))
3100 if (! REG_P (pliw1
->src
)
3101 && (pliw2
->op
== LIW_OP_AND
3102 || pliw2
->op
== LIW_OP_OR
3103 || pliw2
->op
== LIW_OP_XOR
))
3106 pliw2
->src
= pliw1
->src
;
3112 /* Everything else is OK. */
3116 /* Combine pairs of insns into LIW bundles. */
3119 mn10300_bundle_liw (void)
3123 for (r
= get_insns (); r
!= NULL
; r
= next_nonnote_nondebug_insn (r
))
3125 rtx_insn
*insn1
, *insn2
;
3126 struct liw_data liw1
, liw2
;
3129 if (! extract_bundle (insn1
, & liw1
))
3132 insn2
= next_nonnote_nondebug_insn (insn1
);
3133 if (! extract_bundle (insn2
, & liw2
))
3136 /* Check for source/destination overlap. */
3137 if (! check_liw_constraints (& liw1
, & liw2
))
3140 if (liw1
.slot
== LIW_OP2
|| liw2
.slot
== LIW_OP1
)
3142 struct liw_data temp
;
3149 delete_insn (insn2
);
3152 if (liw1
.op
== LIW_OP_CMP
)
3153 insn2_pat
= gen_cmp_liw (liw2
.dest
, liw2
.src
, liw1
.dest
, liw1
.src
,
3155 else if (liw2
.op
== LIW_OP_CMP
)
3156 insn2_pat
= gen_liw_cmp (liw1
.dest
, liw1
.src
, liw2
.dest
, liw2
.src
,
3159 insn2_pat
= gen_liw (liw1
.dest
, liw2
.dest
, liw1
.src
, liw2
.src
,
3160 GEN_INT (liw1
.op
), GEN_INT (liw2
.op
));
3162 insn2
= emit_insn_after (insn2_pat
, insn1
);
3163 delete_insn (insn1
);
3168 #define DUMP(reason, insn) \
3173 fprintf (dump_file, reason "\n"); \
3174 if (insn != NULL_RTX) \
3175 print_rtl_single (dump_file, insn); \
3176 fprintf(dump_file, "\n"); \
3181 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3182 Insert a SETLB insn just before LABEL. */
3185 mn10300_insert_setlb_lcc (rtx label
, rtx branch
)
3187 rtx lcc
, comparison
, cmp_reg
;
3189 if (LABEL_NUSES (label
) > 1)
3193 /* This label is used both as an entry point to the loop
3194 and as a loop-back point for the loop. We need to separate
3195 these two functions so that the SETLB happens upon entry,
3196 but the loop-back does not go to the SETLB instruction. */
3197 DUMP ("Inserting SETLB insn after:", label
);
3198 insn
= emit_insn_after (gen_setlb (), label
);
3199 label
= gen_label_rtx ();
3200 emit_label_after (label
, insn
);
3201 DUMP ("Created new loop-back label:", label
);
3205 DUMP ("Inserting SETLB insn before:", label
);
3206 emit_insn_before (gen_setlb (), label
);
3209 comparison
= XEXP (SET_SRC (PATTERN (branch
)), 0);
3210 cmp_reg
= XEXP (comparison
, 0);
3211 gcc_assert (REG_P (cmp_reg
));
3213 /* If the comparison has not already been split out of the branch
3215 gcc_assert (REGNO (cmp_reg
) == CC_REG
);
3217 if (GET_MODE (cmp_reg
) == CC_FLOATmode
)
3218 lcc
= gen_FLcc (comparison
, label
);
3220 lcc
= gen_Lcc (comparison
, label
);
3222 rtx_insn
*jump
= emit_jump_insn_before (lcc
, branch
);
3223 mark_jump_label (XVECEXP (lcc
, 0, 0), jump
, 0);
3224 JUMP_LABEL (jump
) = label
;
3225 DUMP ("Replacing branch insn...", branch
);
3226 DUMP ("... with Lcc insn:", jump
);
3227 delete_insn (branch
);
3231 mn10300_block_contains_call (basic_block block
)
3235 FOR_BB_INSNS (block
, insn
)
3243 mn10300_loop_contains_call_insn (loop_p loop
)
3246 bool result
= false;
3249 bbs
= get_loop_body (loop
);
3251 for (i
= 0; i
< loop
->num_nodes
; i
++)
3252 if (mn10300_block_contains_call (bbs
[i
]))
3263 mn10300_scan_for_setlb_lcc (void)
3267 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX
);
3270 compute_bb_for_insn ();
3272 /* Find the loops. */
3273 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
3275 /* FIXME: For now we only investigate innermost loops. In practice however
3276 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3277 be the case that its parent loop is suitable. Thus we should check all
3278 loops, but work from the innermost outwards. */
3279 FOR_EACH_LOOP (loop
, LI_ONLY_INNERMOST
)
3281 const char * reason
= NULL
;
3283 /* Check to see if we can modify this loop. If we cannot
3284 then set 'reason' to describe why it could not be done. */
3285 if (loop
->latch
== NULL
)
3286 reason
= "it contains multiple latches";
3287 else if (loop
->header
!= loop
->latch
)
3288 /* FIXME: We could handle loops that span multiple blocks,
3289 but this requires a lot more work tracking down the branches
3290 that need altering, so for now keep things simple. */
3291 reason
= "the loop spans multiple blocks";
3292 else if (mn10300_loop_contains_call_insn (loop
))
3293 reason
= "it contains CALL insns";
3296 rtx_insn
*branch
= BB_END (loop
->latch
);
3298 gcc_assert (JUMP_P (branch
));
3299 if (single_set (branch
) == NULL_RTX
|| ! any_condjump_p (branch
))
3300 /* We cannot optimize tablejumps and the like. */
3301 /* FIXME: We could handle unconditional jumps. */
3302 reason
= "it is not a simple loop";
3308 flow_loop_dump (loop
, dump_file
, NULL
, 0);
3310 label
= BB_HEAD (loop
->header
);
3311 gcc_assert (LABEL_P (label
));
3313 mn10300_insert_setlb_lcc (label
, branch
);
3317 if (dump_file
&& reason
!= NULL
)
3318 fprintf (dump_file
, "Loop starting with insn %d is not suitable because %s\n",
3319 INSN_UID (BB_HEAD (loop
->header
)),
3323 loop_optimizer_finalize ();
3325 df_finish_pass (false);
3327 DUMP ("SETLB scan complete", NULL_RTX
);
3331 mn10300_reorg (void)
3333 /* These are optimizations, so only run them if optimizing. */
3334 if (TARGET_AM33
&& (optimize
> 0 || optimize_size
))
3336 if (TARGET_ALLOW_SETLB
)
3337 mn10300_scan_for_setlb_lcc ();
3339 if (TARGET_ALLOW_LIW
)
3340 mn10300_bundle_liw ();
3344 /* Initialize the GCC target structure. */
3346 #undef TARGET_MACHINE_DEPENDENT_REORG
3347 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3349 #undef TARGET_ASM_ALIGNED_HI_OP
3350 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3352 #undef TARGET_LEGITIMIZE_ADDRESS
3353 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3355 #undef TARGET_ADDRESS_COST
3356 #define TARGET_ADDRESS_COST mn10300_address_cost
3357 #undef TARGET_REGISTER_MOVE_COST
3358 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3359 #undef TARGET_MEMORY_MOVE_COST
3360 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3361 #undef TARGET_RTX_COSTS
3362 #define TARGET_RTX_COSTS mn10300_rtx_costs
3364 #undef TARGET_ASM_FILE_START
3365 #define TARGET_ASM_FILE_START mn10300_file_start
3366 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3367 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3369 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3370 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3372 #undef TARGET_OPTION_OVERRIDE
3373 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3375 #undef TARGET_ENCODE_SECTION_INFO
3376 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3378 #undef TARGET_PROMOTE_PROTOTYPES
3379 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3380 #undef TARGET_RETURN_IN_MEMORY
3381 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3382 #undef TARGET_PASS_BY_REFERENCE
3383 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3384 #undef TARGET_CALLEE_COPIES
3385 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3386 #undef TARGET_ARG_PARTIAL_BYTES
3387 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3388 #undef TARGET_FUNCTION_ARG
3389 #define TARGET_FUNCTION_ARG mn10300_function_arg
3390 #undef TARGET_FUNCTION_ARG_ADVANCE
3391 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3393 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3394 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3395 #undef TARGET_EXPAND_BUILTIN_VA_START
3396 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3398 #undef TARGET_CASE_VALUES_THRESHOLD
3399 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3401 #undef TARGET_LEGITIMATE_ADDRESS_P
3402 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3403 #undef TARGET_DELEGITIMIZE_ADDRESS
3404 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3405 #undef TARGET_LEGITIMATE_CONSTANT_P
3406 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3408 #undef TARGET_PREFERRED_RELOAD_CLASS
3409 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3410 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3411 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3412 mn10300_preferred_output_reload_class
3413 #undef TARGET_SECONDARY_RELOAD
3414 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3416 #undef TARGET_TRAMPOLINE_INIT
3417 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3419 #undef TARGET_FUNCTION_VALUE
3420 #define TARGET_FUNCTION_VALUE mn10300_function_value
3421 #undef TARGET_LIBCALL_VALUE
3422 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3424 #undef TARGET_ASM_OUTPUT_MI_THUNK
3425 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3426 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3427 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3429 #undef TARGET_SCHED_ADJUST_COST
3430 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3432 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3433 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3435 #undef TARGET_MD_ASM_ADJUST
3436 #define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
3438 #undef TARGET_FLAGS_REGNUM
3439 #define TARGET_FLAGS_REGNUM CC_REG
3441 struct gcc_target targetm
= TARGET_INITIALIZER
;