1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "stor-layout.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
43 #include "diagnostic-core.h"
45 #include "tm-constrs.h"
47 #include "target-def.h"
54 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
55 names are not prefixed by underscores, to tell whether to prefix a
56 label with a plus sign or not, so that the assembler can tell
57 symbol names from register names. */
58 int mn10300_protect_label
;
60 /* Selected processor type for tuning. */
61 enum processor_type mn10300_tune_cpu
= PROCESSOR_DEFAULT
;
68 static int cc_flags_for_mode(enum machine_mode
);
69 static int cc_flags_for_code(enum rtx_code
);
71 /* Implement TARGET_OPTION_OVERRIDE. */
73 mn10300_option_override (void)
76 target_flags
&= ~MASK_MULT_BUG
;
79 /* Disable scheduling for the MN10300 as we do
80 not have timing information available for it. */
81 flag_schedule_insns
= 0;
82 flag_schedule_insns_after_reload
= 0;
84 /* Force enable splitting of wide types, as otherwise it is trivial
85 to run out of registers. Indeed, this works so well that register
86 allocation problems are now more common *without* optimization,
87 when this flag is not enabled by default. */
88 flag_split_wide_types
= 1;
91 if (mn10300_tune_string
)
93 if (strcasecmp (mn10300_tune_string
, "mn10300") == 0)
94 mn10300_tune_cpu
= PROCESSOR_MN10300
;
95 else if (strcasecmp (mn10300_tune_string
, "am33") == 0)
96 mn10300_tune_cpu
= PROCESSOR_AM33
;
97 else if (strcasecmp (mn10300_tune_string
, "am33-2") == 0)
98 mn10300_tune_cpu
= PROCESSOR_AM33_2
;
99 else if (strcasecmp (mn10300_tune_string
, "am34") == 0)
100 mn10300_tune_cpu
= PROCESSOR_AM34
;
102 error ("-mtune= expects mn10300, am33, am33-2, or am34");
107 mn10300_file_start (void)
109 default_file_start ();
112 fprintf (asm_out_file
, "\t.am33_2\n");
113 else if (TARGET_AM33
)
114 fprintf (asm_out_file
, "\t.am33\n");
117 /* Note: This list must match the liw_op attribute in mn10300.md. */
119 static const char *liw_op_names
[] =
121 "add", "cmp", "sub", "mov",
127 /* Print operand X using operand code CODE to assembly language output file
131 mn10300_print_operand (FILE *file
, rtx x
, int code
)
137 unsigned int liw_op
= UINTVAL (x
);
139 gcc_assert (TARGET_ALLOW_LIW
);
140 gcc_assert (liw_op
< LIW_OP_MAX
);
141 fputs (liw_op_names
[liw_op
], file
);
148 enum rtx_code cmp
= GET_CODE (x
);
149 enum machine_mode mode
= GET_MODE (XEXP (x
, 0));
154 cmp
= reverse_condition (cmp
);
155 have_flags
= cc_flags_for_mode (mode
);
166 /* bge is smaller than bnc. */
167 str
= (have_flags
& CC_FLAG_V
? "ge" : "nc");
170 str
= (have_flags
& CC_FLAG_V
? "lt" : "ns");
218 gcc_checking_assert ((cc_flags_for_code (cmp
) & ~have_flags
) == 0);
224 /* This is used for the operand to a call instruction;
225 if it's a REG, enclose it in parens, else output
226 the operand normally. */
230 mn10300_print_operand (file
, x
, 0);
234 mn10300_print_operand (file
, x
, 0);
238 switch (GET_CODE (x
))
242 output_address (XEXP (x
, 0));
247 fprintf (file
, "fd%d", REGNO (x
) - 18);
255 /* These are the least significant word in a 64bit value. */
257 switch (GET_CODE (x
))
261 output_address (XEXP (x
, 0));
266 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
270 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
278 switch (GET_MODE (x
))
281 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
282 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
283 fprintf (file
, "0x%lx", val
[0]);
286 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
287 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
[0]);
288 fprintf (file
, "0x%lx", val
[0]);
292 mn10300_print_operand_address (file
,
293 GEN_INT (CONST_DOUBLE_LOW (x
)));
304 split_double (x
, &low
, &high
);
305 fprintf (file
, "%ld", (long)INTVAL (low
));
314 /* Similarly, but for the most significant word. */
316 switch (GET_CODE (x
))
320 x
= adjust_address (x
, SImode
, 4);
321 output_address (XEXP (x
, 0));
326 fprintf (file
, "%s", reg_names
[REGNO (x
) + 1]);
330 fprintf (file
, "%s", reg_names
[subreg_regno (x
) + 1]);
338 switch (GET_MODE (x
))
341 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
342 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
343 fprintf (file
, "0x%lx", val
[1]);
349 mn10300_print_operand_address (file
,
350 GEN_INT (CONST_DOUBLE_HIGH (x
)));
361 split_double (x
, &low
, &high
);
362 fprintf (file
, "%ld", (long)INTVAL (high
));
373 if (REG_P (XEXP (x
, 0)))
374 output_address (gen_rtx_PLUS (SImode
, XEXP (x
, 0), const0_rtx
));
376 output_address (XEXP (x
, 0));
381 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
382 fprintf (file
, "%d", (int)((~INTVAL (x
)) & 0xff));
386 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
387 fprintf (file
, "%d", (int)(INTVAL (x
) & 0xff));
390 /* For shift counts. The hardware ignores the upper bits of
391 any immediate, but the assembler will flag an out of range
392 shift count as an error. So we mask off the high bits
393 of the immediate here. */
397 fprintf (file
, "%d", (int)(INTVAL (x
) & 0x1f));
403 switch (GET_CODE (x
))
407 output_address (XEXP (x
, 0));
416 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
420 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
423 /* This will only be single precision.... */
429 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
430 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
);
431 fprintf (file
, "0x%lx", val
);
441 mn10300_print_operand_address (file
, x
);
450 /* Output assembly language output for the address ADDR to FILE. */
453 mn10300_print_operand_address (FILE *file
, rtx addr
)
455 switch (GET_CODE (addr
))
458 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
463 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
466 mn10300_print_operand (file
, XEXP (addr
, 1), 0);
470 mn10300_print_operand (file
, addr
, 0);
474 rtx base
= XEXP (addr
, 0);
475 rtx index
= XEXP (addr
, 1);
477 if (REG_P (index
) && !REG_OK_FOR_INDEX_P (index
))
483 gcc_assert (REG_P (index
) && REG_OK_FOR_INDEX_P (index
));
485 gcc_assert (REG_OK_FOR_BASE_P (base
));
487 mn10300_print_operand (file
, index
, 0);
489 mn10300_print_operand (file
, base
, 0);
493 output_addr_const (file
, addr
);
496 output_addr_const (file
, addr
);
501 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
503 Used for PIC-specific UNSPECs. */
506 mn10300_asm_output_addr_const_extra (FILE *file
, rtx x
)
508 if (GET_CODE (x
) == UNSPEC
)
513 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
514 output_addr_const (file
, XVECEXP (x
, 0, 0));
517 output_addr_const (file
, XVECEXP (x
, 0, 0));
518 fputs ("@GOT", file
);
521 output_addr_const (file
, XVECEXP (x
, 0, 0));
522 fputs ("@GOTOFF", file
);
525 output_addr_const (file
, XVECEXP (x
, 0, 0));
526 fputs ("@PLT", file
);
528 case UNSPEC_GOTSYM_OFF
:
529 assemble_name (file
, GOT_SYMBOL_NAME
);
531 output_addr_const (file
, XVECEXP (x
, 0, 0));
543 /* Count the number of FP registers that have to be saved. */
545 fp_regs_to_save (void)
552 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
553 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
559 /* Print a set of registers in the format required by "movm" and "ret".
560 Register K is saved if bit K of MASK is set. The data and address
561 registers can be stored individually, but the extended registers cannot.
562 We assume that the mask already takes that into account. For instance,
563 bits 14 to 17 must have the same value. */
566 mn10300_print_reg_list (FILE *file
, int mask
)
574 for (i
= 0; i
< FIRST_EXTENDED_REGNUM
; i
++)
575 if ((mask
& (1 << i
)) != 0)
579 fputs (reg_names
[i
], file
);
583 if ((mask
& 0x3c000) != 0)
585 gcc_assert ((mask
& 0x3c000) == 0x3c000);
588 fputs ("exreg1", file
);
595 /* If the MDR register is never clobbered, we can use the RETF instruction
596 which takes the address from the MDR register. This is 3 cycles faster
597 than having to load the address from the stack. */
600 mn10300_can_use_retf_insn (void)
602 /* Don't bother if we're not optimizing. In this case we won't
603 have proper access to df_regs_ever_live_p. */
607 /* EH returns alter the saved return address; MDR is not current. */
608 if (crtl
->calls_eh_return
)
611 /* Obviously not if MDR is ever clobbered. */
612 if (df_regs_ever_live_p (MDR_REG
))
615 /* ??? Careful not to use this during expand_epilogue etc. */
616 gcc_assert (!in_sequence_p ());
617 return leaf_function_p ();
621 mn10300_can_use_rets_insn (void)
623 return !mn10300_initial_offset (ARG_POINTER_REGNUM
, STACK_POINTER_REGNUM
);
626 /* Returns the set of live, callee-saved registers as a bitmask. The
627 callee-saved extended registers cannot be stored individually, so
628 all of them will be included in the mask if any one of them is used.
629 Also returns the number of bytes in the registers in the mask if
630 BYTES_SAVED is not NULL. */
633 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved
)
640 for (i
= 0; i
<= LAST_EXTENDED_REGNUM
; i
++)
641 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
647 if ((mask
& 0x3c000) != 0)
649 for (i
= 0x04000; i
< 0x40000; i
<<= 1)
657 * bytes_saved
= count
* UNITS_PER_WORD
;
665 RTX_FRAME_RELATED_P (r
) = 1;
669 /* Generate an instruction that pushes several registers onto the stack.
670 Register K will be saved if bit K in MASK is set. The function does
671 nothing if MASK is zero.
673 To be compatible with the "movm" instruction, the lowest-numbered
674 register must be stored in the lowest slot. If MASK is the set
675 { R1,...,RN }, where R1...RN are ordered least first, the generated
676 instruction will have the form:
679 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
680 (set (mem:SI (plus:SI (reg:SI 9)
684 (set (mem:SI (plus:SI (reg:SI 9)
689 mn10300_gen_multiple_store (unsigned int mask
)
691 /* The order in which registers are stored, from SP-4 through SP-N*4. */
692 static const unsigned int store_order
[8] = {
693 /* e2, e3: never saved */
694 FIRST_EXTENDED_REGNUM
+ 4,
695 FIRST_EXTENDED_REGNUM
+ 5,
696 FIRST_EXTENDED_REGNUM
+ 6,
697 FIRST_EXTENDED_REGNUM
+ 7,
698 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
699 FIRST_DATA_REGNUM
+ 2,
700 FIRST_DATA_REGNUM
+ 3,
701 FIRST_ADDRESS_REGNUM
+ 2,
702 FIRST_ADDRESS_REGNUM
+ 3,
703 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
713 for (i
= count
= 0; i
< ARRAY_SIZE(store_order
); ++i
)
715 unsigned regno
= store_order
[i
];
717 if (((mask
>> regno
) & 1) == 0)
721 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
722 x
= gen_frame_mem (SImode
, x
);
723 x
= gen_rtx_SET (VOIDmode
, x
, gen_rtx_REG (SImode
, regno
));
726 /* Remove the register from the mask so that... */
727 mask
&= ~(1u << regno
);
730 /* ... we can make sure that we didn't try to use a register
731 not listed in the store order. */
732 gcc_assert (mask
== 0);
734 /* Create the instruction that updates the stack pointer. */
735 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
736 x
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, x
);
739 /* We need one PARALLEL element to update the stack pointer and
740 an additional element for each register that is stored. */
741 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (count
+ 1, elts
));
745 static inline unsigned int
746 popcount (unsigned int mask
)
748 unsigned int count
= 0;
753 mask
&= ~ (mask
& - mask
);
759 mn10300_expand_prologue (void)
761 HOST_WIDE_INT size
= mn10300_frame_size ();
764 mask
= mn10300_get_live_callee_saved_regs (NULL
);
765 /* If we use any of the callee-saved registers, save them now. */
766 mn10300_gen_multiple_store (mask
);
768 if (flag_stack_usage_info
)
769 current_function_static_stack_size
= size
+ popcount (mask
) * 4;
771 if (TARGET_AM33_2
&& fp_regs_to_save ())
773 int num_regs_to_save
= fp_regs_to_save (), i
;
779 save_sp_partial_merge
,
783 unsigned int strategy_size
= (unsigned)-1, this_strategy_size
;
786 if (flag_stack_usage_info
)
787 current_function_static_stack_size
+= num_regs_to_save
* 4;
789 /* We have several different strategies to save FP registers.
790 We can store them using SP offsets, which is beneficial if
791 there are just a few registers to save, or we can use `a0' in
792 post-increment mode (`a0' is the only call-clobbered address
793 register that is never used to pass information to a
794 function). Furthermore, if we don't need a frame pointer, we
795 can merge the two SP adds into a single one, but this isn't
796 always beneficial; sometimes we can just split the two adds
797 so that we don't exceed a 16-bit constant size. The code
798 below will select which strategy to use, so as to generate
799 smallest code. Ties are broken in favor or shorter sequences
800 (in terms of number of instructions). */
802 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
803 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
804 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
805 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
807 /* We add 0 * (S) in two places to promote to the type of S,
808 so that all arms of the conditional have the same type. */
809 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
810 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
811 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
812 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
814 #define SIZE_FMOV_SP_(S,N) \
815 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
816 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
817 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
818 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
820 /* Consider alternative save_sp_merge only if we don't need the
821 frame pointer and size is nonzero. */
822 if (! frame_pointer_needed
&& size
)
824 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
825 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
826 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
827 this_strategy_size
+= SIZE_FMOV_SP (size
, num_regs_to_save
);
829 if (this_strategy_size
< strategy_size
)
831 strategy
= save_sp_merge
;
832 strategy_size
= this_strategy_size
;
836 /* Consider alternative save_sp_no_merge unconditionally. */
837 /* Insn: add -4 * num_regs_to_save, sp. */
838 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
839 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
840 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
843 /* Insn: add -size, sp. */
844 this_strategy_size
+= SIZE_ADD_SP (-size
);
847 if (this_strategy_size
< strategy_size
)
849 strategy
= save_sp_no_merge
;
850 strategy_size
= this_strategy_size
;
853 /* Consider alternative save_sp_partial_merge only if we don't
854 need a frame pointer and size is reasonably large. */
855 if (! frame_pointer_needed
&& size
+ 4 * num_regs_to_save
> 128)
857 /* Insn: add -128, sp. */
858 this_strategy_size
= SIZE_ADD_SP (-128);
859 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
860 this_strategy_size
+= SIZE_FMOV_SP (128 - 4 * num_regs_to_save
,
864 /* Insn: add 128-size, sp. */
865 this_strategy_size
+= SIZE_ADD_SP (128 - size
);
868 if (this_strategy_size
< strategy_size
)
870 strategy
= save_sp_partial_merge
;
871 strategy_size
= this_strategy_size
;
875 /* Consider alternative save_a0_merge only if we don't need a
876 frame pointer, size is nonzero and the user hasn't
877 changed the calling conventions of a0. */
878 if (! frame_pointer_needed
&& size
879 && call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
880 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
882 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
883 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
884 /* Insn: mov sp, a0. */
885 this_strategy_size
++;
888 /* Insn: add size, a0. */
889 this_strategy_size
+= SIZE_ADD_AX (size
);
891 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
892 this_strategy_size
+= 3 * num_regs_to_save
;
894 if (this_strategy_size
< strategy_size
)
896 strategy
= save_a0_merge
;
897 strategy_size
= this_strategy_size
;
901 /* Consider alternative save_a0_no_merge if the user hasn't
902 changed the calling conventions of a0. */
903 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
904 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
906 /* Insn: add -4 * num_regs_to_save, sp. */
907 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
908 /* Insn: mov sp, a0. */
909 this_strategy_size
++;
910 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
911 this_strategy_size
+= 3 * num_regs_to_save
;
914 /* Insn: add -size, sp. */
915 this_strategy_size
+= SIZE_ADD_SP (-size
);
918 if (this_strategy_size
< strategy_size
)
920 strategy
= save_a0_no_merge
;
921 strategy_size
= this_strategy_size
;
925 /* Emit the initial SP add, common to all strategies. */
928 case save_sp_no_merge
:
929 case save_a0_no_merge
:
930 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
932 GEN_INT (-4 * num_regs_to_save
))));
936 case save_sp_partial_merge
:
937 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
940 xsize
= 128 - 4 * num_regs_to_save
;
946 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
948 GEN_INT (-(size
+ 4 * num_regs_to_save
)))));
949 /* We'll have to adjust FP register saves according to the
952 /* Since we've already created the stack frame, don't do it
953 again at the end of the function. */
961 /* Now prepare register a0, if we have decided to use it. */
965 case save_sp_no_merge
:
966 case save_sp_partial_merge
:
971 case save_a0_no_merge
:
972 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
);
973 F (emit_insn (gen_movsi (reg
, stack_pointer_rtx
)));
975 F (emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (xsize
))));
976 reg
= gen_rtx_POST_INC (SImode
, reg
);
983 /* Now actually save the FP registers. */
984 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
985 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
993 /* If we aren't using `a0', use an SP offset. */
996 addr
= gen_rtx_PLUS (SImode
,
1001 addr
= stack_pointer_rtx
;
1006 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode
, addr
),
1007 gen_rtx_REG (SFmode
, i
))));
1011 /* Now put the frame pointer into the frame pointer register. */
1012 if (frame_pointer_needed
)
1013 F (emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
));
1015 /* Allocate stack for this frame. */
1017 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1021 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
1022 emit_insn (gen_load_pic ());
1026 mn10300_expand_epilogue (void)
1028 HOST_WIDE_INT size
= mn10300_frame_size ();
1029 unsigned int reg_save_bytes
;
1031 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1033 if (TARGET_AM33_2
&& fp_regs_to_save ())
1035 int num_regs_to_save
= fp_regs_to_save (), i
;
1038 /* We have several options to restore FP registers. We could
1039 load them from SP offsets, but, if there are enough FP
1040 registers to restore, we win if we use a post-increment
1043 /* If we have a frame pointer, it's the best option, because we
1044 already know it has the value we want. */
1045 if (frame_pointer_needed
)
1046 reg
= gen_rtx_REG (SImode
, FRAME_POINTER_REGNUM
);
1047 /* Otherwise, we may use `a1', since it's call-clobbered and
1048 it's never used for return values. But only do so if it's
1049 smaller than using SP offsets. */
1052 enum { restore_sp_post_adjust
,
1053 restore_sp_pre_adjust
,
1054 restore_sp_partial_adjust
,
1055 restore_a1
} strategy
;
1056 unsigned int this_strategy_size
, strategy_size
= (unsigned)-1;
1058 /* Consider using sp offsets before adjusting sp. */
1059 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1060 this_strategy_size
= SIZE_FMOV_SP (size
, num_regs_to_save
);
1061 /* If size is too large, we'll have to adjust SP with an
1063 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1065 /* Insn: add size + 4 * num_regs_to_save, sp. */
1066 this_strategy_size
+= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
);
1068 /* If we don't have to restore any non-FP registers,
1069 we'll be able to save one byte by using rets. */
1070 if (! reg_save_bytes
)
1071 this_strategy_size
--;
1073 if (this_strategy_size
< strategy_size
)
1075 strategy
= restore_sp_post_adjust
;
1076 strategy_size
= this_strategy_size
;
1079 /* Consider using sp offsets after adjusting sp. */
1080 /* Insn: add size, sp. */
1081 this_strategy_size
= SIZE_ADD_SP (size
);
1082 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1083 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
1084 /* We're going to use ret to release the FP registers
1085 save area, so, no savings. */
1087 if (this_strategy_size
< strategy_size
)
1089 strategy
= restore_sp_pre_adjust
;
1090 strategy_size
= this_strategy_size
;
1093 /* Consider using sp offsets after partially adjusting sp.
1094 When size is close to 32Kb, we may be able to adjust SP
1095 with an imm16 add instruction while still using fmov
1097 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1099 /* Insn: add size + 4 * num_regs_to_save
1100 + reg_save_bytes - 252,sp. */
1101 this_strategy_size
= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
1102 + (int) reg_save_bytes
- 252);
1103 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1104 this_strategy_size
+= SIZE_FMOV_SP (252 - reg_save_bytes
1105 - 4 * num_regs_to_save
,
1107 /* We're going to use ret to release the FP registers
1108 save area, so, no savings. */
1110 if (this_strategy_size
< strategy_size
)
1112 strategy
= restore_sp_partial_adjust
;
1113 strategy_size
= this_strategy_size
;
1117 /* Consider using a1 in post-increment mode, as long as the
1118 user hasn't changed the calling conventions of a1. */
1119 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
+ 1]
1120 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
+1])
1122 /* Insn: mov sp,a1. */
1123 this_strategy_size
= 1;
1126 /* Insn: add size,a1. */
1127 this_strategy_size
+= SIZE_ADD_AX (size
);
1129 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1130 this_strategy_size
+= 3 * num_regs_to_save
;
1131 /* If size is large enough, we may be able to save a
1133 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1135 /* Insn: mov a1,sp. */
1136 this_strategy_size
+= 2;
1138 /* If we don't have to restore any non-FP registers,
1139 we'll be able to save one byte by using rets. */
1140 if (! reg_save_bytes
)
1141 this_strategy_size
--;
1143 if (this_strategy_size
< strategy_size
)
1145 strategy
= restore_a1
;
1146 strategy_size
= this_strategy_size
;
1152 case restore_sp_post_adjust
:
1155 case restore_sp_pre_adjust
:
1156 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1162 case restore_sp_partial_adjust
:
1163 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1165 GEN_INT (size
+ 4 * num_regs_to_save
1166 + reg_save_bytes
- 252)));
1167 size
= 252 - reg_save_bytes
- 4 * num_regs_to_save
;
1171 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
+ 1);
1172 emit_insn (gen_movsi (reg
, stack_pointer_rtx
));
1174 emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (size
)));
1182 /* Adjust the selected register, if any, for post-increment. */
1184 reg
= gen_rtx_POST_INC (SImode
, reg
);
1186 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1187 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1195 /* If we aren't using a post-increment register, use an
1197 addr
= gen_rtx_PLUS (SImode
,
1202 addr
= stack_pointer_rtx
;
1206 emit_insn (gen_movsf (gen_rtx_REG (SFmode
, i
),
1207 gen_rtx_MEM (SFmode
, addr
)));
1210 /* If we were using the restore_a1 strategy and the number of
1211 bytes to be released won't fit in the `ret' byte, copy `a1'
1212 to `sp', to avoid having to use `add' to adjust it. */
1213 if (! frame_pointer_needed
&& reg
&& size
+ reg_save_bytes
> 255)
1215 emit_move_insn (stack_pointer_rtx
, XEXP (reg
, 0));
1220 /* Maybe cut back the stack, except for the register save area.
1222 If the frame pointer exists, then use the frame pointer to
1225 If the stack size + register save area is more than 255 bytes,
1226 then the stack must be cut back here since the size + register
1227 save size is too big for a ret/retf instruction.
1229 Else leave it alone, it will be cut back as part of the
1230 ret/retf instruction, or there wasn't any stack to begin with.
1232 Under no circumstances should the register save area be
1233 deallocated here, that would leave a window where an interrupt
1234 could occur and trash the register save area. */
1235 if (frame_pointer_needed
)
1237 emit_move_insn (stack_pointer_rtx
, frame_pointer_rtx
);
1240 else if (size
+ reg_save_bytes
> 255)
1242 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1248 /* Adjust the stack and restore callee-saved registers, if any. */
1249 if (mn10300_can_use_rets_insn ())
1250 emit_jump_insn (ret_rtx
);
1252 emit_jump_insn (gen_return_ret (GEN_INT (size
+ reg_save_bytes
)));
1255 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1256 This function is for MATCH_PARALLEL and so assumes OP is known to be
1257 parallel. If OP is a multiple store, return a mask indicating which
1258 registers it saves. Return 0 otherwise. */
1261 mn10300_store_multiple_regs (rtx op
)
1269 count
= XVECLEN (op
, 0);
1273 /* Check that first instruction has the form (set (sp) (plus A B)) */
1274 elt
= XVECEXP (op
, 0, 0);
1275 if (GET_CODE (elt
) != SET
1276 || (! REG_P (SET_DEST (elt
)))
1277 || REGNO (SET_DEST (elt
)) != STACK_POINTER_REGNUM
1278 || GET_CODE (SET_SRC (elt
)) != PLUS
)
1281 /* Check that A is the stack pointer and B is the expected stack size.
1282 For OP to match, each subsequent instruction should push a word onto
1283 the stack. We therefore expect the first instruction to create
1284 COUNT-1 stack slots. */
1285 elt
= SET_SRC (elt
);
1286 if ((! REG_P (XEXP (elt
, 0)))
1287 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1288 || (! CONST_INT_P (XEXP (elt
, 1)))
1289 || INTVAL (XEXP (elt
, 1)) != -(count
- 1) * 4)
1293 for (i
= 1; i
< count
; i
++)
1295 /* Check that element i is a (set (mem M) R). */
1296 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1297 Remember: the ordering is *not* monotonic. */
1298 elt
= XVECEXP (op
, 0, i
);
1299 if (GET_CODE (elt
) != SET
1300 || (! MEM_P (SET_DEST (elt
)))
1301 || (! REG_P (SET_SRC (elt
))))
1304 /* Remember which registers are to be saved. */
1305 last
= REGNO (SET_SRC (elt
));
1306 mask
|= (1 << last
);
1308 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1309 elt
= XEXP (SET_DEST (elt
), 0);
1310 if (GET_CODE (elt
) != PLUS
1311 || (! REG_P (XEXP (elt
, 0)))
1312 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1313 || (! CONST_INT_P (XEXP (elt
, 1)))
1314 || INTVAL (XEXP (elt
, 1)) != -i
* 4)
1318 /* All or none of the callee-saved extended registers must be in the set. */
1319 if ((mask
& 0x3c000) != 0
1320 && (mask
& 0x3c000) != 0x3c000)
1326 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1329 mn10300_preferred_reload_class (rtx x
, reg_class_t rclass
)
1331 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1332 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1335 && !HARD_REGISTER_P (x
))
1336 || (GET_CODE (x
) == SUBREG
1337 && REG_P (SUBREG_REG (x
))
1338 && !HARD_REGISTER_P (SUBREG_REG (x
))))
1339 return LIMIT_RELOAD_CLASS (GET_MODE (x
), rclass
);
1344 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1347 mn10300_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
1349 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1350 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1354 /* Implement TARGET_SECONDARY_RELOAD. */
1357 mn10300_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1358 enum machine_mode mode
, secondary_reload_info
*sri
)
1360 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1361 enum reg_class xclass
= NO_REGS
;
1362 unsigned int xregno
= INVALID_REGNUM
;
1367 if (xregno
>= FIRST_PSEUDO_REGISTER
)
1368 xregno
= true_regnum (x
);
1369 if (xregno
!= INVALID_REGNUM
)
1370 xclass
= REGNO_REG_CLASS (xregno
);
1375 /* Memory load/stores less than a full word wide can't have an
1376 address or stack pointer destination. They must use a data
1377 register as an intermediate register. */
1378 if (rclass
!= DATA_REGS
1379 && (mode
== QImode
|| mode
== HImode
)
1380 && xclass
== NO_REGS
)
1383 /* We can only move SP to/from an address register. */
1385 && rclass
== SP_REGS
1386 && xclass
!= ADDRESS_REGS
)
1387 return ADDRESS_REGS
;
1389 && xclass
== SP_REGS
1390 && rclass
!= ADDRESS_REGS
1391 && rclass
!= SP_OR_ADDRESS_REGS
)
1392 return ADDRESS_REGS
;
1395 /* We can't directly load sp + const_int into a register;
1396 we must use an address register as an scratch. */
1398 && rclass
!= SP_REGS
1399 && rclass
!= SP_OR_ADDRESS_REGS
1400 && rclass
!= SP_OR_GENERAL_REGS
1401 && GET_CODE (x
) == PLUS
1402 && (XEXP (x
, 0) == stack_pointer_rtx
1403 || XEXP (x
, 1) == stack_pointer_rtx
))
1405 sri
->icode
= CODE_FOR_reload_plus_sp_const
;
1409 /* We can only move MDR to/from a data register. */
1410 if (rclass
== MDR_REGS
&& xclass
!= DATA_REGS
)
1412 if (xclass
== MDR_REGS
&& rclass
!= DATA_REGS
)
1415 /* We can't load/store an FP register from a constant address. */
1417 && (rclass
== FP_REGS
|| xclass
== FP_REGS
)
1418 && (xclass
== NO_REGS
|| rclass
== NO_REGS
))
1422 if (xregno
>= FIRST_PSEUDO_REGISTER
&& xregno
!= INVALID_REGNUM
)
1424 addr
= reg_equiv_mem (xregno
);
1426 addr
= XEXP (addr
, 0);
1431 if (addr
&& CONSTANT_ADDRESS_P (addr
))
1432 return GENERAL_REGS
;
1434 /* Otherwise assume no secondary reloads are needed. */
1439 mn10300_frame_size (void)
1441 /* size includes the fixed stack space needed for function calls. */
1442 int size
= get_frame_size () + crtl
->outgoing_args_size
;
1444 /* And space for the return pointer. */
1445 size
+= crtl
->outgoing_args_size
? 4 : 0;
1451 mn10300_initial_offset (int from
, int to
)
1455 gcc_assert (from
== ARG_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
);
1456 gcc_assert (to
== FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
1458 if (to
== STACK_POINTER_REGNUM
)
1459 diff
= mn10300_frame_size ();
1461 /* The difference between the argument pointer and the frame pointer
1462 is the size of the callee register save area. */
1463 if (from
== ARG_POINTER_REGNUM
)
1465 unsigned int reg_save_bytes
;
1467 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1468 diff
+= reg_save_bytes
;
1469 diff
+= 4 * fp_regs_to_save ();
1475 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1478 mn10300_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
1480 /* Return values > 8 bytes in length in memory. */
1481 return (int_size_in_bytes (type
) > 8
1482 || int_size_in_bytes (type
) == 0
1483 || TYPE_MODE (type
) == BLKmode
);
1486 /* Flush the argument registers to the stack for a stdarg function;
1487 return the new argument pointer. */
1489 mn10300_builtin_saveregs (void)
1492 tree fntype
= TREE_TYPE (current_function_decl
);
1493 int argadj
= ((!stdarg_p (fntype
))
1494 ? UNITS_PER_WORD
: 0);
1495 alias_set_type set
= get_varargs_alias_set ();
1498 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
1500 offset
= crtl
->args
.arg_offset_rtx
;
1502 mem
= gen_rtx_MEM (SImode
, crtl
->args
.internal_arg_pointer
);
1503 set_mem_alias_set (mem
, set
);
1504 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
1506 mem
= gen_rtx_MEM (SImode
,
1507 plus_constant (Pmode
,
1508 crtl
->args
.internal_arg_pointer
, 4));
1509 set_mem_alias_set (mem
, set
);
1510 emit_move_insn (mem
, gen_rtx_REG (SImode
, 1));
1512 return copy_to_reg (expand_binop (Pmode
, add_optab
,
1513 crtl
->args
.internal_arg_pointer
,
1514 offset
, 0, 0, OPTAB_LIB_WIDEN
));
1518 mn10300_va_start (tree valist
, rtx nextarg
)
1520 nextarg
= expand_builtin_saveregs ();
1521 std_expand_builtin_va_start (valist
, nextarg
);
1524 /* Return true when a parameter should be passed by reference. */
1527 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
1528 enum machine_mode mode
, const_tree type
,
1529 bool named ATTRIBUTE_UNUSED
)
1531 unsigned HOST_WIDE_INT size
;
1534 size
= int_size_in_bytes (type
);
1536 size
= GET_MODE_SIZE (mode
);
1538 return (size
> 8 || size
== 0);
1541 /* Return an RTX to represent where a value with mode MODE will be returned
1542 from a function. If the result is NULL_RTX, the argument is pushed. */
1545 mn10300_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
1546 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1548 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1549 rtx result
= NULL_RTX
;
1552 /* We only support using 2 data registers as argument registers. */
1555 /* Figure out the size of the object to be passed. */
1556 if (mode
== BLKmode
)
1557 size
= int_size_in_bytes (type
);
1559 size
= GET_MODE_SIZE (mode
);
1561 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1563 /* Don't pass this arg via a register if all the argument registers
1565 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1568 /* Don't pass this arg via a register if it would be split between
1569 registers and memory. */
1570 if (type
== NULL_TREE
1571 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1574 switch (cum
->nbytes
/ UNITS_PER_WORD
)
1577 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
);
1580 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
+ 1);
1589 /* Update the data in CUM to advance over an argument
1590 of mode MODE and data type TYPE.
1591 (TYPE is null for libcalls where that information may not be available.) */
1594 mn10300_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
1595 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1597 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1599 cum
->nbytes
+= (mode
!= BLKmode
1600 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1601 : (int_size_in_bytes (type
) + 3) & ~3);
1604 /* Return the number of bytes of registers to use for an argument passed
1605 partially in registers and partially in memory. */
1608 mn10300_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
1609 tree type
, bool named ATTRIBUTE_UNUSED
)
1611 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1614 /* We only support using 2 data registers as argument registers. */
1617 /* Figure out the size of the object to be passed. */
1618 if (mode
== BLKmode
)
1619 size
= int_size_in_bytes (type
);
1621 size
= GET_MODE_SIZE (mode
);
1623 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1625 /* Don't pass this arg via a register if all the argument registers
1627 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1630 if (cum
->nbytes
+ size
<= nregs
* UNITS_PER_WORD
)
1633 /* Don't pass this arg via a register if it would be split between
1634 registers and memory. */
1635 if (type
== NULL_TREE
1636 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1639 return nregs
* UNITS_PER_WORD
- cum
->nbytes
;
1642 /* Return the location of the function's value. This will be either
1643 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1644 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1645 we only return the PARALLEL for outgoing values; we do not want
1646 callers relying on this extra copy. */
1649 mn10300_function_value (const_tree valtype
,
1650 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1654 enum machine_mode mode
= TYPE_MODE (valtype
);
1656 if (! POINTER_TYPE_P (valtype
))
1657 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1658 else if (! TARGET_PTR_A0D0
|| ! outgoing
1659 || cfun
->returns_struct
)
1660 return gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
);
1662 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (2));
1664 = gen_rtx_EXPR_LIST (VOIDmode
,
1665 gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
),
1669 = gen_rtx_EXPR_LIST (VOIDmode
,
1670 gen_rtx_REG (mode
, FIRST_DATA_REGNUM
),
1675 /* Implements TARGET_LIBCALL_VALUE. */
1678 mn10300_libcall_value (enum machine_mode mode
,
1679 const_rtx fun ATTRIBUTE_UNUSED
)
1681 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1684 /* Implements FUNCTION_VALUE_REGNO_P. */
1687 mn10300_function_value_regno_p (const unsigned int regno
)
1689 return (regno
== FIRST_DATA_REGNUM
|| regno
== FIRST_ADDRESS_REGNUM
);
1692 /* Output an addition operation. */
1695 mn10300_output_add (rtx operands
[3], bool need_flags
)
1697 rtx dest
, src1
, src2
;
1698 unsigned int dest_regnum
, src1_regnum
, src2_regnum
;
1699 enum reg_class src1_class
, src2_class
, dest_class
;
1705 dest_regnum
= true_regnum (dest
);
1706 src1_regnum
= true_regnum (src1
);
1708 dest_class
= REGNO_REG_CLASS (dest_regnum
);
1709 src1_class
= REGNO_REG_CLASS (src1_regnum
);
1711 if (CONST_INT_P (src2
))
1713 gcc_assert (dest_regnum
== src1_regnum
);
1715 if (src2
== const1_rtx
&& !need_flags
)
1717 if (INTVAL (src2
) == 4 && !need_flags
&& dest_class
!= DATA_REGS
)
1720 gcc_assert (!need_flags
|| dest_class
!= SP_REGS
);
1723 else if (CONSTANT_P (src2
))
1726 src2_regnum
= true_regnum (src2
);
1727 src2_class
= REGNO_REG_CLASS (src2_regnum
);
1729 if (dest_regnum
== src1_regnum
)
1731 if (dest_regnum
== src2_regnum
)
1734 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1735 this directly, as below, but when optimizing for space we can sometimes
1736 do better by using a mov+add. For MN103, we claimed that we could
1737 implement a three-operand add because the various move and add insns
1738 change sizes across register classes, and we can often do better than
1739 reload in choosing which operand to move. */
1740 if (TARGET_AM33
&& optimize_insn_for_speed_p ())
1741 return "add %2,%1,%0";
1743 /* Catch cases where no extended register was used. */
1744 if (src1_class
!= EXTENDED_REGS
1745 && src2_class
!= EXTENDED_REGS
1746 && dest_class
!= EXTENDED_REGS
)
1748 /* We have to copy one of the sources into the destination, then
1749 add the other source to the destination.
1751 Carefully select which source to copy to the destination; a
1752 naive implementation will waste a byte when the source classes
1753 are different and the destination is an address register.
1754 Selecting the lowest cost register copy will optimize this
1756 if (src1_class
== dest_class
)
1757 return "mov %1,%0\n\tadd %2,%0";
1759 return "mov %2,%0\n\tadd %1,%0";
1762 /* At least one register is an extended register. */
1764 /* The three operand add instruction on the am33 is a win iff the
1765 output register is an extended register, or if both source
1766 registers are extended registers. */
1767 if (dest_class
== EXTENDED_REGS
|| src1_class
== src2_class
)
1768 return "add %2,%1,%0";
1770 /* It is better to copy one of the sources to the destination, then
1771 perform a 2 address add. The destination in this case must be
1772 an address or data register and one of the sources must be an
1773 extended register and the remaining source must not be an extended
1776 The best code for this case is to copy the extended reg to the
1777 destination, then emit a two address add. */
1778 if (src1_class
== EXTENDED_REGS
)
1779 return "mov %1,%0\n\tadd %2,%0";
1781 return "mov %2,%0\n\tadd %1,%0";
1784 /* Return 1 if X contains a symbolic expression. We know these
1785 expressions will have one of a few well defined forms, so
1786 we need only check those forms. */
1789 mn10300_symbolic_operand (rtx op
,
1790 enum machine_mode mode ATTRIBUTE_UNUSED
)
1792 switch (GET_CODE (op
))
1799 return ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
1800 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1801 && CONST_INT_P (XEXP (op
, 1)));
1807 /* Try machine dependent ways of modifying an illegitimate address
1808 to be legitimate. If we find one, return the new valid address.
1809 This macro is used in only one place: `memory_address' in explow.c.
1811 OLDX is the address as it was before break_out_memory_refs was called.
1812 In some cases it is useful to look at this to decide what needs to be done.
1814 Normally it is always safe for this macro to do nothing. It exists to
1815 recognize opportunities to optimize the output.
1817 But on a few ports with segmented architectures and indexed addressing
1818 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1821 mn10300_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1822 enum machine_mode mode ATTRIBUTE_UNUSED
)
1824 if (flag_pic
&& ! mn10300_legitimate_pic_operand_p (x
))
1825 x
= mn10300_legitimize_pic_address (oldx
, NULL_RTX
);
1827 /* Uh-oh. We might have an address for x[n-100000]. This needs
1828 special handling to avoid creating an indexed memory address
1829 with x-100000 as the base. */
1830 if (GET_CODE (x
) == PLUS
1831 && mn10300_symbolic_operand (XEXP (x
, 1), VOIDmode
))
1833 /* Ugly. We modify things here so that the address offset specified
1834 by the index expression is computed first, then added to x to form
1835 the entire address. */
1837 rtx regx1
, regy1
, regy2
, y
;
1839 /* Strip off any CONST. */
1841 if (GET_CODE (y
) == CONST
)
1844 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1846 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1847 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1848 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1849 regx1
= force_reg (Pmode
,
1850 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
, regx1
,
1852 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1858 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1859 @GOTOFF in `reg'. */
1862 mn10300_legitimize_pic_address (rtx orig
, rtx reg
)
1866 if (GET_CODE (orig
) == LABEL_REF
1867 || (GET_CODE (orig
) == SYMBOL_REF
1868 && (CONSTANT_POOL_ADDRESS_P (orig
)
1869 || ! MN10300_GLOBAL_P (orig
))))
1872 reg
= gen_reg_rtx (Pmode
);
1874 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOTOFF
);
1875 x
= gen_rtx_CONST (SImode
, x
);
1876 emit_move_insn (reg
, x
);
1878 x
= emit_insn (gen_addsi3 (reg
, reg
, pic_offset_table_rtx
));
1880 else if (GET_CODE (orig
) == SYMBOL_REF
)
1883 reg
= gen_reg_rtx (Pmode
);
1885 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOT
);
1886 x
= gen_rtx_CONST (SImode
, x
);
1887 x
= gen_rtx_PLUS (SImode
, pic_offset_table_rtx
, x
);
1888 x
= gen_const_mem (SImode
, x
);
1890 x
= emit_move_insn (reg
, x
);
1895 set_unique_reg_note (x
, REG_EQUAL
, orig
);
1899 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1900 isn't protected by a PIC unspec; nonzero otherwise. */
1903 mn10300_legitimate_pic_operand_p (rtx x
)
1908 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1911 if (GET_CODE (x
) == UNSPEC
1912 && (XINT (x
, 1) == UNSPEC_PIC
1913 || XINT (x
, 1) == UNSPEC_GOT
1914 || XINT (x
, 1) == UNSPEC_GOTOFF
1915 || XINT (x
, 1) == UNSPEC_PLT
1916 || XINT (x
, 1) == UNSPEC_GOTSYM_OFF
))
1919 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1920 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1926 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1927 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x
, i
, j
)))
1930 else if (fmt
[i
] == 'e'
1931 && ! mn10300_legitimate_pic_operand_p (XEXP (x
, i
)))
1938 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1939 legitimate, and FALSE otherwise.
1941 On the mn10300, the value in the address register must be
1942 in the same memory space/segment as the effective address.
1944 This is problematical for reload since it does not understand
1945 that base+index != index+base in a memory reference.
1947 Note it is still possible to use reg+reg addressing modes,
1948 it's just much more difficult. For a discussion of a possible
1949 workaround and solution, see the comments in pa.c before the
1950 function record_unscaled_index_insn_codes. */
1953 mn10300_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict
)
1957 if (CONSTANT_ADDRESS_P (x
))
1958 return !flag_pic
|| mn10300_legitimate_pic_operand_p (x
);
1960 if (RTX_OK_FOR_BASE_P (x
, strict
))
1963 if (TARGET_AM33
&& (mode
== SImode
|| mode
== SFmode
|| mode
== HImode
))
1965 if (GET_CODE (x
) == POST_INC
)
1966 return RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
);
1967 if (GET_CODE (x
) == POST_MODIFY
)
1968 return (RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
)
1969 && CONSTANT_ADDRESS_P (XEXP (x
, 1)));
1972 if (GET_CODE (x
) != PLUS
)
1976 index
= XEXP (x
, 1);
1982 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1983 addressing is hard to satisfy. */
1987 return (REGNO_GENERAL_P (REGNO (base
), strict
)
1988 && REGNO_GENERAL_P (REGNO (index
), strict
));
1991 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base
), strict
))
1994 if (CONST_INT_P (index
))
1995 return IN_RANGE (INTVAL (index
), -1 - 0x7fffffff, 0x7fffffff);
1997 if (CONSTANT_ADDRESS_P (index
))
1998 return !flag_pic
|| mn10300_legitimate_pic_operand_p (index
);
2004 mn10300_regno_in_class_p (unsigned regno
, int rclass
, bool strict
)
2006 if (regno
>= FIRST_PSEUDO_REGISTER
)
2012 regno
= reg_renumber
[regno
];
2013 if (regno
== INVALID_REGNUM
)
2016 return TEST_HARD_REG_BIT (reg_class_contents
[rclass
], regno
);
2020 mn10300_legitimize_reload_address (rtx x
,
2021 enum machine_mode mode ATTRIBUTE_UNUSED
,
2022 int opnum
, int type
,
2023 int ind_levels ATTRIBUTE_UNUSED
)
2025 bool any_change
= false;
2027 /* See above re disabling reg+reg addressing for MN103. */
2031 if (GET_CODE (x
) != PLUS
)
2034 if (XEXP (x
, 0) == stack_pointer_rtx
)
2036 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2037 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2038 opnum
, (enum reload_type
) type
);
2041 if (XEXP (x
, 1) == stack_pointer_rtx
)
2043 push_reload (XEXP (x
, 1), NULL_RTX
, &XEXP (x
, 1), NULL
,
2044 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2045 opnum
, (enum reload_type
) type
);
2049 return any_change
? x
: NULL_RTX
;
2052 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2053 constant. Note that some "constants" aren't valid, such as TLS
2054 symbols and unconverted GOT-based references, so we eliminate
2058 mn10300_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2060 switch (GET_CODE (x
))
2065 if (GET_CODE (x
) == PLUS
)
2067 if (! CONST_INT_P (XEXP (x
, 1)))
2072 /* Only some unspecs are valid as "constants". */
2073 if (GET_CODE (x
) == UNSPEC
)
2075 switch (XINT (x
, 1))
2087 /* We must have drilled down to a symbol. */
2088 if (! mn10300_symbolic_operand (x
, Pmode
))
2099 /* Undo pic address legitimization for the benefit of debug info. */
2102 mn10300_delegitimize_address (rtx orig_x
)
2104 rtx x
= orig_x
, ret
, addend
= NULL
;
2109 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
2112 if (XEXP (x
, 0) == pic_offset_table_rtx
)
2114 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2115 some odd-looking "addresses" that were never valid in the first place.
2116 We need to look harder to avoid warnings being emitted. */
2117 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
2119 rtx x0
= XEXP (x
, 0);
2120 rtx x00
= XEXP (x0
, 0);
2121 rtx x01
= XEXP (x0
, 1);
2123 if (x00
== pic_offset_table_rtx
)
2125 else if (x01
== pic_offset_table_rtx
)
2135 if (GET_CODE (x
) != CONST
)
2138 if (GET_CODE (x
) != UNSPEC
)
2141 ret
= XVECEXP (x
, 0, 0);
2142 if (XINT (x
, 1) == UNSPEC_GOTOFF
)
2144 else if (XINT (x
, 1) == UNSPEC_GOT
)
2149 gcc_assert (GET_CODE (ret
) == SYMBOL_REF
);
2150 if (need_mem
!= MEM_P (orig_x
))
2152 if (need_mem
&& addend
)
2155 ret
= gen_rtx_PLUS (Pmode
, addend
, ret
);
2159 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2160 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2161 with an address register. */
2164 mn10300_address_cost (rtx x
, enum machine_mode mode ATTRIBUTE_UNUSED
,
2165 addr_space_t as ATTRIBUTE_UNUSED
, bool speed
)
2170 switch (GET_CODE (x
))
2175 /* We assume all of these require a 32-bit constant, even though
2176 some symbol and label references can be relaxed. */
2177 return speed
? 1 : 4;
2185 /* Assume any symbolic offset is a 32-bit constant. */
2186 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2187 if (IN_RANGE (i
, -128, 127))
2188 return speed
? 0 : 1;
2191 if (IN_RANGE (i
, -0x800000, 0x7fffff))
2197 index
= XEXP (x
, 1);
2198 if (register_operand (index
, SImode
))
2200 /* Attempt to minimize the number of registers in the address.
2201 This is similar to what other ports do. */
2202 if (register_operand (base
, SImode
))
2206 index
= XEXP (x
, 0);
2209 /* Assume any symbolic offset is a 32-bit constant. */
2210 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2211 if (IN_RANGE (i
, -128, 127))
2212 return speed
? 0 : 1;
2213 if (IN_RANGE (i
, -32768, 32767))
2214 return speed
? 0 : 2;
2215 return speed
? 2 : 6;
2218 return rtx_cost (x
, MEM
, 0, speed
);
2222 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2224 Recall that the base value of 2 is required by assumptions elsewhere
2225 in the body of the compiler, and that cost 2 is special-cased as an
2226 early exit from reload meaning no work is required. */
2229 mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2230 reg_class_t ifrom
, reg_class_t ito
)
2232 enum reg_class from
= (enum reg_class
) ifrom
;
2233 enum reg_class to
= (enum reg_class
) ito
;
2234 enum reg_class scratch
, test
;
2236 /* Simplify the following code by unifying the fp register classes. */
2237 if (to
== FP_ACC_REGS
)
2239 if (from
== FP_ACC_REGS
)
2242 /* Diagnose invalid moves by costing them as two moves. */
2247 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2248 else if (to
== MDR_REGS
)
2249 scratch
= DATA_REGS
;
2250 else if (to
== FP_REGS
&& to
!= from
)
2251 scratch
= GENERAL_REGS
;
2255 if (from
== SP_REGS
)
2256 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2257 else if (from
== MDR_REGS
)
2258 scratch
= DATA_REGS
;
2259 else if (from
== FP_REGS
&& to
!= from
)
2260 scratch
= GENERAL_REGS
;
2262 if (scratch
!= NO_REGS
&& !reg_class_subset_p (test
, scratch
))
2263 return (mn10300_register_move_cost (VOIDmode
, from
, scratch
)
2264 + mn10300_register_move_cost (VOIDmode
, scratch
, to
));
2266 /* From here on, all we need consider are legal combinations. */
2270 /* The scale here is bytes * 2. */
2272 if (from
== to
&& (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2275 if (from
== SP_REGS
)
2276 return (to
== ADDRESS_REGS
? 2 : 6);
2278 /* For MN103, all remaining legal moves are two bytes. */
2283 return (from
== ADDRESS_REGS
? 4 : 6);
2285 if ((from
== ADDRESS_REGS
|| from
== DATA_REGS
)
2286 && (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2289 if (to
== EXTENDED_REGS
)
2290 return (to
== from
? 6 : 4);
2292 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2297 /* The scale here is cycles * 2. */
2301 if (from
== FP_REGS
)
2304 /* All legal moves between integral registers are single cycle. */
2309 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2311 Given lack of the form of the address, this must be speed-relative,
2312 though we should never be less expensive than a size-relative register
2313 move cost above. This is not a problem. */
2316 mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2317 reg_class_t iclass
, bool in ATTRIBUTE_UNUSED
)
2319 enum reg_class rclass
= (enum reg_class
) iclass
;
2321 if (rclass
== FP_REGS
)
2326 /* Implement the TARGET_RTX_COSTS hook.
2328 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2329 to represent cycles. Size-relative costs are in bytes. */
2332 mn10300_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2333 int *ptotal
, bool speed
)
2335 /* This value is used for SYMBOL_REF etc where we want to pretend
2336 we have a full 32-bit constant. */
2337 HOST_WIDE_INT i
= 0x12345678;
2347 if (outer_code
== SET
)
2349 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2350 if (IN_RANGE (i
, -32768, 32767))
2351 total
= COSTS_N_INSNS (1);
2353 total
= COSTS_N_INSNS (2);
2357 /* 16-bit integer operands don't affect latency;
2358 24-bit and 32-bit operands add a cycle. */
2359 if (IN_RANGE (i
, -32768, 32767))
2362 total
= COSTS_N_INSNS (1);
2367 if (outer_code
== SET
)
2371 else if (IN_RANGE (i
, -128, 127))
2373 else if (IN_RANGE (i
, -32768, 32767))
2380 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2381 if (IN_RANGE (i
, -128, 127))
2383 else if (IN_RANGE (i
, -32768, 32767))
2385 else if (TARGET_AM33
&& IN_RANGE (i
, -0x01000000, 0x00ffffff))
2397 /* We assume all of these require a 32-bit constant, even though
2398 some symbol and label references can be relaxed. */
2402 switch (XINT (x
, 1))
2408 case UNSPEC_GOTSYM_OFF
:
2409 /* The PIC unspecs also resolve to a 32-bit constant. */
2413 /* Assume any non-listed unspec is some sort of arithmetic. */
2414 goto do_arith_costs
;
2418 /* Notice the size difference of INC and INC4. */
2419 if (!speed
&& outer_code
== SET
&& CONST_INT_P (XEXP (x
, 1)))
2421 i
= INTVAL (XEXP (x
, 1));
2422 if (i
== 1 || i
== 4)
2424 total
= 1 + rtx_cost (XEXP (x
, 0), PLUS
, 0, speed
);
2428 goto do_arith_costs
;
2442 total
= (speed
? COSTS_N_INSNS (1) : 2);
2446 /* Notice the size difference of ASL2 and variants. */
2447 if (!speed
&& CONST_INT_P (XEXP (x
, 1)))
2448 switch (INTVAL (XEXP (x
, 1)))
2463 total
= (speed
? COSTS_N_INSNS (1) : 3);
2467 total
= (speed
? COSTS_N_INSNS (3) : 2);
2474 total
= (speed
? COSTS_N_INSNS (39)
2475 /* Include space to load+retrieve MDR. */
2476 : code
== MOD
|| code
== UMOD
? 6 : 4);
2480 total
= mn10300_address_cost (XEXP (x
, 0), GET_MODE (x
),
2481 MEM_ADDR_SPACE (x
), speed
);
2483 total
= COSTS_N_INSNS (2 + total
);
2487 /* Probably not implemented. Assume external call. */
2488 total
= (speed
? COSTS_N_INSNS (10) : 7);
2500 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2501 may access it using GOTOFF instead of GOT. */
2504 mn10300_encode_section_info (tree decl
, rtx rtl
, int first
)
2508 default_encode_section_info (decl
, rtl
, first
);
2513 symbol
= XEXP (rtl
, 0);
2514 if (GET_CODE (symbol
) != SYMBOL_REF
)
2518 SYMBOL_REF_FLAG (symbol
) = (*targetm
.binds_local_p
) (decl
);
2521 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2522 and readonly data size. So we crank up the case threshold value to
2523 encourage a series of if/else comparisons to implement many small switch
2524 statements. In theory, this value could be increased much more if we
2525 were solely optimizing for space, but we keep it "reasonable" to avoid
2526 serious code efficiency lossage. */
2529 mn10300_case_values_threshold (void)
2534 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2537 mn10300_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
2539 rtx mem
, disp
, fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2541 /* This is a strict alignment target, which means that we play
2542 some games to make sure that the locations at which we need
2543 to store <chain> and <disp> wind up at aligned addresses.
2546 0xfc 0xdd mov chain,a1
2548 0xf8 0xed 0x00 btst 0,d1
2552 Note that the two extra insns are effectively nops; they
2553 clobber the flags but do not affect the contents of D0 or D1. */
2555 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
2556 plus_constant (Pmode
, XEXP (m_tramp
, 0), 11),
2557 NULL_RTX
, 1, OPTAB_DIRECT
);
2559 mem
= adjust_address (m_tramp
, SImode
, 0);
2560 emit_move_insn (mem
, gen_int_mode (0xddfc0028, SImode
));
2561 mem
= adjust_address (m_tramp
, SImode
, 4);
2562 emit_move_insn (mem
, chain_value
);
2563 mem
= adjust_address (m_tramp
, SImode
, 8);
2564 emit_move_insn (mem
, gen_int_mode (0xdc00edf8, SImode
));
2565 mem
= adjust_address (m_tramp
, SImode
, 12);
2566 emit_move_insn (mem
, disp
);
2569 /* Output the assembler code for a C++ thunk function.
2570 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2571 is the decl for the target function. DELTA is an immediate constant
2572 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2573 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2574 additionally added to THIS. Finally jump to the entry point of
2578 mn10300_asm_output_mi_thunk (FILE * file
,
2579 tree thunk_fndecl ATTRIBUTE_UNUSED
,
2580 HOST_WIDE_INT delta
,
2581 HOST_WIDE_INT vcall_offset
,
2586 /* Get the register holding the THIS parameter. Handle the case
2587 where there is a hidden first argument for a returned structure. */
2588 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2589 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
+ 1];
2591 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
];
2593 fprintf (file
, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START
);
2596 fprintf (file
, "\tadd %d, %s\n", (int) delta
, _this
);
2600 const char * scratch
= reg_names
[FIRST_ADDRESS_REGNUM
+ 1];
2602 fprintf (file
, "\tmov %s, %s\n", _this
, scratch
);
2603 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2604 fprintf (file
, "\tadd %d, %s\n", (int) vcall_offset
, scratch
);
2605 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2606 fprintf (file
, "\tadd %s, %s\n", scratch
, _this
);
2609 fputs ("\tjmp ", file
);
2610 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
2614 /* Return true if mn10300_output_mi_thunk would be able to output the
2615 assembler code for the thunk function specified by the arguments
2616 it is passed, and false otherwise. */
2619 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
2620 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
2621 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
2622 const_tree function ATTRIBUTE_UNUSED
)
2628 mn10300_hard_regno_mode_ok (unsigned int regno
, enum machine_mode mode
)
2630 if (REGNO_REG_CLASS (regno
) == FP_REGS
2631 || REGNO_REG_CLASS (regno
) == FP_ACC_REGS
)
2632 /* Do not store integer values in FP registers. */
2633 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ((regno
& 1) == 0);
2635 if (! TARGET_AM33
&& REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2638 if (((regno
) & 1) == 0 || GET_MODE_SIZE (mode
) == 4)
2641 if (REGNO_REG_CLASS (regno
) == DATA_REGS
2642 || (TARGET_AM33
&& REGNO_REG_CLASS (regno
) == ADDRESS_REGS
)
2643 || REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2644 return GET_MODE_SIZE (mode
) <= 4;
2650 mn10300_modes_tieable (enum machine_mode mode1
, enum machine_mode mode2
)
2652 if (GET_MODE_CLASS (mode1
) == MODE_FLOAT
2653 && GET_MODE_CLASS (mode2
) != MODE_FLOAT
)
2656 if (GET_MODE_CLASS (mode2
) == MODE_FLOAT
2657 && GET_MODE_CLASS (mode1
) != MODE_FLOAT
)
2662 || (GET_MODE_SIZE (mode1
) <= 4 && GET_MODE_SIZE (mode2
) <= 4))
2669 cc_flags_for_mode (enum machine_mode mode
)
2674 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
| CC_FLAG_V
;
2676 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
;
2678 return CC_FLAG_Z
| CC_FLAG_N
;
2687 cc_flags_for_code (enum rtx_code code
)
2700 case GT
: /* ~(Z|(N^V)) */
2701 case LE
: /* Z|(N^V) */
2702 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_V
;
2708 case GTU
: /* ~(C | Z) */
2709 case LEU
: /* C | Z */
2710 return CC_FLAG_Z
| CC_FLAG_C
;
2728 mn10300_select_cc_mode (enum rtx_code code
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2732 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2733 return CC_FLOATmode
;
2735 req
= cc_flags_for_code (code
);
2737 if (req
& CC_FLAG_V
)
2739 if (req
& CC_FLAG_C
)
2745 set_is_load_p (rtx set
)
2747 return MEM_P (SET_SRC (set
));
2751 set_is_store_p (rtx set
)
2753 return MEM_P (SET_DEST (set
));
2756 /* Update scheduling costs for situations that cannot be
2757 described using the attributes and DFA machinery.
2758 DEP is the insn being scheduled.
2759 INSN is the previous insn.
2760 COST is the current cycle cost for DEP. */
2763 mn10300_adjust_sched_cost (rtx_insn
*insn
, rtx link
, rtx_insn
*dep
, int cost
)
2772 /* We are only interested in pairs of SET. */
2773 insn_set
= single_set (insn
);
2777 dep_set
= single_set (dep
);
2781 /* For the AM34 a load instruction that follows a
2782 store instruction incurs an extra cycle of delay. */
2783 if (mn10300_tune_cpu
== PROCESSOR_AM34
2784 && set_is_load_p (dep_set
)
2785 && set_is_store_p (insn_set
))
2788 /* For the AM34 a non-store, non-branch FPU insn that follows
2789 another FPU insn incurs a one cycle throughput increase. */
2790 else if (mn10300_tune_cpu
== PROCESSOR_AM34
2791 && ! set_is_store_p (insn_set
)
2793 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) == MODE_FLOAT
2794 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) == MODE_FLOAT
)
2797 /* Resolve the conflict described in section 1-7-4 of
2798 Chapter 3 of the MN103E Series Instruction Manual
2801 "When the preceding instruction is a CPU load or
2802 store instruction, a following FPU instruction
2803 cannot be executed until the CPU completes the
2804 latency period even though there are no register
2805 or flag dependencies between them." */
2807 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2808 if (! TARGET_AM33_2
)
2811 /* If a data dependence already exists then the cost is correct. */
2812 if (REG_NOTE_KIND (link
) == 0)
2815 /* Check that the instruction about to scheduled is an FPU instruction. */
2816 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) != MODE_FLOAT
)
2819 /* Now check to see if the previous instruction is a load or store. */
2820 if (! set_is_load_p (insn_set
) && ! set_is_store_p (insn_set
))
2823 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2824 only applies when an INTEGER load/store precedes an FPU
2825 instruction, but is this true ? For now we assume that it is. */
2826 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) != MODE_INT
)
2829 /* Extract the latency value from the timings attribute. */
2830 timings
= get_attr_timings (insn
);
2831 return timings
< 100 ? (timings
% 10) : (timings
% 100);
2835 mn10300_conditional_register_usage (void)
2841 for (i
= FIRST_EXTENDED_REGNUM
;
2842 i
<= LAST_EXTENDED_REGNUM
; i
++)
2843 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2847 for (i
= FIRST_FP_REGNUM
;
2848 i
<= LAST_FP_REGNUM
; i
++)
2849 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2852 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] =
2853 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2856 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2857 We do this in the mn10300 backend to maintain source compatibility
2858 with the old cc0-based compiler. */
2861 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED
,
2862 tree inputs ATTRIBUTE_UNUSED
,
2865 clobbers
= tree_cons (NULL_TREE
, build_string (5, "EPSW"),
2870 /* A helper function for splitting cbranch patterns after reload. */
2873 mn10300_split_cbranch (enum machine_mode cmp_mode
, rtx cmp_op
, rtx label_ref
)
2877 flags
= gen_rtx_REG (cmp_mode
, CC_REG
);
2878 x
= gen_rtx_COMPARE (cmp_mode
, XEXP (cmp_op
, 0), XEXP (cmp_op
, 1));
2879 x
= gen_rtx_SET (VOIDmode
, flags
, x
);
2882 x
= gen_rtx_fmt_ee (GET_CODE (cmp_op
), VOIDmode
, flags
, const0_rtx
);
2883 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
, label_ref
, pc_rtx
);
2884 x
= gen_rtx_SET (VOIDmode
, pc_rtx
, x
);
2888 /* A helper function for matching parallels that set the flags. */
2891 mn10300_match_ccmode (rtx insn
, enum machine_mode cc_mode
)
2894 enum machine_mode flags_mode
;
2896 gcc_checking_assert (XVECLEN (PATTERN (insn
), 0) == 2);
2898 op1
= XVECEXP (PATTERN (insn
), 0, 1);
2899 gcc_checking_assert (GET_CODE (SET_SRC (op1
)) == COMPARE
);
2901 flags
= SET_DEST (op1
);
2902 flags_mode
= GET_MODE (flags
);
2904 if (GET_MODE (SET_SRC (op1
)) != flags_mode
)
2906 if (GET_MODE_CLASS (flags_mode
) != MODE_CC
)
2909 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2910 if (cc_flags_for_mode (flags_mode
) & ~cc_flags_for_mode (cc_mode
))
2916 /* This function is used to help split:
2918 (set (reg) (and (reg) (int)))
2922 (set (reg) (shift (reg) (int))
2923 (set (reg) (shift (reg) (int))
2925 where the shitfs will be shorter than the "and" insn.
2927 It returns the number of bits that should be shifted. A positive
2928 values means that the low bits are to be cleared (and hence the
2929 shifts should be right followed by left) whereas a negative value
2930 means that the high bits are to be cleared (left followed by right).
2931 Zero is returned when it would not be economical to split the AND. */
2934 mn10300_split_and_operand_count (rtx op
)
2936 HOST_WIDE_INT val
= INTVAL (op
);
2941 /* High bit is set, look for bits clear at the bottom. */
2942 count
= exact_log2 (-val
);
2945 /* This is only size win if we can use the asl2 insn. Otherwise we
2946 would be replacing 1 6-byte insn with 2 3-byte insns. */
2947 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2953 /* High bit is clear, look for bits set at the bottom. */
2954 count
= exact_log2 (val
+ 1);
2956 /* Again, this is only a size win with asl2. */
2957 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2966 enum attr_liw_op op
;
2971 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2972 extract the operands and LIW attributes from the insn and use them to fill
2973 in the liw_data structure. Return true upon success or false if the insn
2974 cannot be bundled. */
2977 extract_bundle (rtx insn
, struct liw_data
* pdata
)
2979 bool allow_consts
= true;
2982 gcc_assert (pdata
!= NULL
);
2984 if (insn
== NULL_RTX
)
2986 /* Make sure that we are dealing with a simple SET insn. */
2987 p
= single_set (insn
);
2991 /* Make sure that it could go into one of the LIW pipelines. */
2992 pdata
->slot
= get_attr_liw (insn
);
2993 if (pdata
->slot
== LIW_BOTH
)
2996 pdata
->op
= get_attr_liw_op (insn
);
3001 pdata
->dest
= SET_DEST (p
);
3002 pdata
->src
= SET_SRC (p
);
3005 pdata
->dest
= XEXP (SET_SRC (p
), 0);
3006 pdata
->src
= XEXP (SET_SRC (p
), 1);
3013 /* The AND, OR and XOR long instruction words only accept register arguments. */
3014 allow_consts
= false;
3017 pdata
->dest
= SET_DEST (p
);
3018 pdata
->src
= XEXP (SET_SRC (p
), 1);
3022 if (! REG_P (pdata
->dest
))
3025 if (REG_P (pdata
->src
))
3028 return allow_consts
&& satisfies_constraint_O (pdata
->src
);
3031 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3032 the instructions with the assumption that LIW1 would be executed before LIW2
3033 so we must check for overlaps between their sources and destinations. */
3036 check_liw_constraints (struct liw_data
* pliw1
, struct liw_data
* pliw2
)
3038 /* Check for slot conflicts. */
3039 if (pliw2
->slot
== pliw1
->slot
&& pliw1
->slot
!= LIW_EITHER
)
3042 /* If either operation is a compare, then "dest" is really an input; the real
3043 destination is CC_REG. So these instructions need different checks. */
3045 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3046 check its values prior to any changes made by OP. */
3047 if (pliw1
->op
== LIW_OP_CMP
)
3049 /* Two sequential comparisons means dead code, which ought to
3050 have been eliminated given that bundling only happens with
3051 optimization. We cannot bundle them in any case. */
3052 gcc_assert (pliw1
->op
!= pliw2
->op
);
3056 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3057 is the destination of OP, as the CMP will look at the old value, not the new
3059 if (pliw2
->op
== LIW_OP_CMP
)
3061 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3064 if (REG_P (pliw2
->src
))
3065 return REGNO (pliw2
->src
) != REGNO (pliw1
->dest
);
3070 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3071 same destination register. */
3072 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3075 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3076 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3077 we can replace the source in OP2 with the source of OP1. */
3078 if (REG_P (pliw2
->src
) && REGNO (pliw2
->src
) == REGNO (pliw1
->dest
))
3080 if (pliw1
->op
== LIW_OP_MOV
&& REG_P (pliw1
->src
))
3082 if (! REG_P (pliw1
->src
)
3083 && (pliw2
->op
== LIW_OP_AND
3084 || pliw2
->op
== LIW_OP_OR
3085 || pliw2
->op
== LIW_OP_XOR
))
3088 pliw2
->src
= pliw1
->src
;
3094 /* Everything else is OK. */
3098 /* Combine pairs of insns into LIW bundles. */
3101 mn10300_bundle_liw (void)
3105 for (r
= get_insns (); r
!= NULL_RTX
; r
= next_nonnote_nondebug_insn (r
))
3108 struct liw_data liw1
, liw2
;
3111 if (! extract_bundle (insn1
, & liw1
))
3114 insn2
= next_nonnote_nondebug_insn (insn1
);
3115 if (! extract_bundle (insn2
, & liw2
))
3118 /* Check for source/destination overlap. */
3119 if (! check_liw_constraints (& liw1
, & liw2
))
3122 if (liw1
.slot
== LIW_OP2
|| liw2
.slot
== LIW_OP1
)
3124 struct liw_data temp
;
3131 delete_insn (insn2
);
3133 if (liw1
.op
== LIW_OP_CMP
)
3134 insn2
= gen_cmp_liw (liw2
.dest
, liw2
.src
, liw1
.dest
, liw1
.src
,
3136 else if (liw2
.op
== LIW_OP_CMP
)
3137 insn2
= gen_liw_cmp (liw1
.dest
, liw1
.src
, liw2
.dest
, liw2
.src
,
3140 insn2
= gen_liw (liw1
.dest
, liw2
.dest
, liw1
.src
, liw2
.src
,
3141 GEN_INT (liw1
.op
), GEN_INT (liw2
.op
));
3143 insn2
= emit_insn_after (insn2
, insn1
);
3144 delete_insn (insn1
);
3149 #define DUMP(reason, insn) \
3154 fprintf (dump_file, reason "\n"); \
3155 if (insn != NULL_RTX) \
3156 print_rtl_single (dump_file, insn); \
3157 fprintf(dump_file, "\n"); \
3162 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3163 Insert a SETLB insn just before LABEL. */
3166 mn10300_insert_setlb_lcc (rtx label
, rtx branch
)
3168 rtx lcc
, comparison
, cmp_reg
;
3170 if (LABEL_NUSES (label
) > 1)
3174 /* This label is used both as an entry point to the loop
3175 and as a loop-back point for the loop. We need to separate
3176 these two functions so that the SETLB happens upon entry,
3177 but the loop-back does not go to the SETLB instruction. */
3178 DUMP ("Inserting SETLB insn after:", label
);
3179 insn
= emit_insn_after (gen_setlb (), label
);
3180 label
= gen_label_rtx ();
3181 emit_label_after (label
, insn
);
3182 DUMP ("Created new loop-back label:", label
);
3186 DUMP ("Inserting SETLB insn before:", label
);
3187 emit_insn_before (gen_setlb (), label
);
3190 comparison
= XEXP (SET_SRC (PATTERN (branch
)), 0);
3191 cmp_reg
= XEXP (comparison
, 0);
3192 gcc_assert (REG_P (cmp_reg
));
3194 /* If the comparison has not already been split out of the branch
3196 gcc_assert (REGNO (cmp_reg
) == CC_REG
);
3198 if (GET_MODE (cmp_reg
) == CC_FLOATmode
)
3199 lcc
= gen_FLcc (comparison
, label
);
3201 lcc
= gen_Lcc (comparison
, label
);
3203 lcc
= emit_jump_insn_before (lcc
, branch
);
3204 mark_jump_label (XVECEXP (PATTERN (lcc
), 0, 0), lcc
, 0);
3205 JUMP_LABEL (lcc
) = label
;
3206 DUMP ("Replacing branch insn...", branch
);
3207 DUMP ("... with Lcc insn:", lcc
);
3208 delete_insn (branch
);
3212 mn10300_block_contains_call (basic_block block
)
3216 FOR_BB_INSNS (block
, insn
)
3224 mn10300_loop_contains_call_insn (loop_p loop
)
3227 bool result
= false;
3230 bbs
= get_loop_body (loop
);
3232 for (i
= 0; i
< loop
->num_nodes
; i
++)
3233 if (mn10300_block_contains_call (bbs
[i
]))
3244 mn10300_scan_for_setlb_lcc (void)
3248 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX
);
3251 compute_bb_for_insn ();
3253 /* Find the loops. */
3254 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
3256 /* FIXME: For now we only investigate innermost loops. In practice however
3257 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3258 be the case that its parent loop is suitable. Thus we should check all
3259 loops, but work from the innermost outwards. */
3260 FOR_EACH_LOOP (loop
, LI_ONLY_INNERMOST
)
3262 const char * reason
= NULL
;
3264 /* Check to see if we can modify this loop. If we cannot
3265 then set 'reason' to describe why it could not be done. */
3266 if (loop
->latch
== NULL
)
3267 reason
= "it contains multiple latches";
3268 else if (loop
->header
!= loop
->latch
)
3269 /* FIXME: We could handle loops that span multiple blocks,
3270 but this requires a lot more work tracking down the branches
3271 that need altering, so for now keep things simple. */
3272 reason
= "the loop spans multiple blocks";
3273 else if (mn10300_loop_contains_call_insn (loop
))
3274 reason
= "it contains CALL insns";
3277 rtx branch
= BB_END (loop
->latch
);
3279 gcc_assert (JUMP_P (branch
));
3280 if (single_set (branch
) == NULL_RTX
|| ! any_condjump_p (branch
))
3281 /* We cannot optimize tablejumps and the like. */
3282 /* FIXME: We could handle unconditional jumps. */
3283 reason
= "it is not a simple loop";
3289 flow_loop_dump (loop
, dump_file
, NULL
, 0);
3291 label
= BB_HEAD (loop
->header
);
3292 gcc_assert (LABEL_P (label
));
3294 mn10300_insert_setlb_lcc (label
, branch
);
3298 if (dump_file
&& reason
!= NULL
)
3299 fprintf (dump_file
, "Loop starting with insn %d is not suitable because %s\n",
3300 INSN_UID (BB_HEAD (loop
->header
)),
3304 loop_optimizer_finalize ();
3306 df_finish_pass (false);
3308 DUMP ("SETLB scan complete", NULL_RTX
);
3312 mn10300_reorg (void)
3314 /* These are optimizations, so only run them if optimizing. */
3315 if (TARGET_AM33
&& (optimize
> 0 || optimize_size
))
3317 if (TARGET_ALLOW_SETLB
)
3318 mn10300_scan_for_setlb_lcc ();
3320 if (TARGET_ALLOW_LIW
)
3321 mn10300_bundle_liw ();
3325 /* Initialize the GCC target structure. */
3327 #undef TARGET_MACHINE_DEPENDENT_REORG
3328 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3330 #undef TARGET_ASM_ALIGNED_HI_OP
3331 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3333 #undef TARGET_LEGITIMIZE_ADDRESS
3334 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3336 #undef TARGET_ADDRESS_COST
3337 #define TARGET_ADDRESS_COST mn10300_address_cost
3338 #undef TARGET_REGISTER_MOVE_COST
3339 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3340 #undef TARGET_MEMORY_MOVE_COST
3341 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3342 #undef TARGET_RTX_COSTS
3343 #define TARGET_RTX_COSTS mn10300_rtx_costs
3345 #undef TARGET_ASM_FILE_START
3346 #define TARGET_ASM_FILE_START mn10300_file_start
3347 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3348 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3350 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3351 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3353 #undef TARGET_OPTION_OVERRIDE
3354 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3356 #undef TARGET_ENCODE_SECTION_INFO
3357 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3359 #undef TARGET_PROMOTE_PROTOTYPES
3360 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3361 #undef TARGET_RETURN_IN_MEMORY
3362 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3363 #undef TARGET_PASS_BY_REFERENCE
3364 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3365 #undef TARGET_CALLEE_COPIES
3366 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3367 #undef TARGET_ARG_PARTIAL_BYTES
3368 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3369 #undef TARGET_FUNCTION_ARG
3370 #define TARGET_FUNCTION_ARG mn10300_function_arg
3371 #undef TARGET_FUNCTION_ARG_ADVANCE
3372 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3374 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3375 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3376 #undef TARGET_EXPAND_BUILTIN_VA_START
3377 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3379 #undef TARGET_CASE_VALUES_THRESHOLD
3380 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3382 #undef TARGET_LEGITIMATE_ADDRESS_P
3383 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3384 #undef TARGET_DELEGITIMIZE_ADDRESS
3385 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3386 #undef TARGET_LEGITIMATE_CONSTANT_P
3387 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3389 #undef TARGET_PREFERRED_RELOAD_CLASS
3390 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3391 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3392 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3393 mn10300_preferred_output_reload_class
3394 #undef TARGET_SECONDARY_RELOAD
3395 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3397 #undef TARGET_TRAMPOLINE_INIT
3398 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3400 #undef TARGET_FUNCTION_VALUE
3401 #define TARGET_FUNCTION_VALUE mn10300_function_value
3402 #undef TARGET_LIBCALL_VALUE
3403 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3405 #undef TARGET_ASM_OUTPUT_MI_THUNK
3406 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3407 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3408 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3410 #undef TARGET_SCHED_ADJUST_COST
3411 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3413 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3414 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3416 #undef TARGET_MD_ASM_CLOBBERS
3417 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3419 #undef TARGET_FLAGS_REGNUM
3420 #define TARGET_FLAGS_REGNUM CC_REG
3422 struct gcc_target targetm
= TARGET_INITIALIZER
;