1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2013 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "stor-layout.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
43 #include "diagnostic-core.h"
45 #include "tm-constrs.h"
47 #include "target-def.h"
53 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
54 names are not prefixed by underscores, to tell whether to prefix a
55 label with a plus sign or not, so that the assembler can tell
56 symbol names from register names. */
57 int mn10300_protect_label
;
59 /* Selected processor type for tuning. */
60 enum processor_type mn10300_tune_cpu
= PROCESSOR_DEFAULT
;
67 static int cc_flags_for_mode(enum machine_mode
);
68 static int cc_flags_for_code(enum rtx_code
);
70 /* Implement TARGET_OPTION_OVERRIDE. */
73 mn10300_option_override (void)
76 target_flags
&= ~MASK_MULT_BUG
;
79 /* Disable scheduling for the MN10300 as we do
80 not have timing information available for it. */
81 flag_schedule_insns
= 0;
82 flag_schedule_insns_after_reload
= 0;
84 /* Force enable splitting of wide types, as otherwise it is trivial
85 to run out of registers. Indeed, this works so well that register
86 allocation problems are now more common *without* optimization,
87 when this flag is not enabled by default. */
88 flag_split_wide_types
= 1;
91 if (mn10300_tune_string
)
93 if (strcasecmp (mn10300_tune_string
, "mn10300") == 0)
94 mn10300_tune_cpu
= PROCESSOR_MN10300
;
95 else if (strcasecmp (mn10300_tune_string
, "am33") == 0)
96 mn10300_tune_cpu
= PROCESSOR_AM33
;
97 else if (strcasecmp (mn10300_tune_string
, "am33-2") == 0)
98 mn10300_tune_cpu
= PROCESSOR_AM33_2
;
99 else if (strcasecmp (mn10300_tune_string
, "am34") == 0)
100 mn10300_tune_cpu
= PROCESSOR_AM34
;
102 error ("-mtune= expects mn10300, am33, am33-2, or am34");
107 mn10300_file_start (void)
109 default_file_start ();
112 fprintf (asm_out_file
, "\t.am33_2\n");
113 else if (TARGET_AM33
)
114 fprintf (asm_out_file
, "\t.am33\n");
117 /* Note: This list must match the liw_op attribute in mn10300.md. */
119 static const char *liw_op_names
[] =
121 "add", "cmp", "sub", "mov",
127 /* Print operand X using operand code CODE to assembly language output file
131 mn10300_print_operand (FILE *file
, rtx x
, int code
)
137 unsigned int liw_op
= UINTVAL (x
);
139 gcc_assert (TARGET_ALLOW_LIW
);
140 gcc_assert (liw_op
< LIW_OP_MAX
);
141 fputs (liw_op_names
[liw_op
], file
);
148 enum rtx_code cmp
= GET_CODE (x
);
149 enum machine_mode mode
= GET_MODE (XEXP (x
, 0));
154 cmp
= reverse_condition (cmp
);
155 have_flags
= cc_flags_for_mode (mode
);
166 /* bge is smaller than bnc. */
167 str
= (have_flags
& CC_FLAG_V
? "ge" : "nc");
170 str
= (have_flags
& CC_FLAG_V
? "lt" : "ns");
218 gcc_checking_assert ((cc_flags_for_code (cmp
) & ~have_flags
) == 0);
224 /* This is used for the operand to a call instruction;
225 if it's a REG, enclose it in parens, else output
226 the operand normally. */
230 mn10300_print_operand (file
, x
, 0);
234 mn10300_print_operand (file
, x
, 0);
238 switch (GET_CODE (x
))
242 output_address (XEXP (x
, 0));
247 fprintf (file
, "fd%d", REGNO (x
) - 18);
255 /* These are the least significant word in a 64bit value. */
257 switch (GET_CODE (x
))
261 output_address (XEXP (x
, 0));
266 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
270 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
278 switch (GET_MODE (x
))
281 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
282 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
283 fprintf (file
, "0x%lx", val
[0]);
286 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
287 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
[0]);
288 fprintf (file
, "0x%lx", val
[0]);
292 mn10300_print_operand_address (file
,
293 GEN_INT (CONST_DOUBLE_LOW (x
)));
304 split_double (x
, &low
, &high
);
305 fprintf (file
, "%ld", (long)INTVAL (low
));
314 /* Similarly, but for the most significant word. */
316 switch (GET_CODE (x
))
320 x
= adjust_address (x
, SImode
, 4);
321 output_address (XEXP (x
, 0));
326 fprintf (file
, "%s", reg_names
[REGNO (x
) + 1]);
330 fprintf (file
, "%s", reg_names
[subreg_regno (x
) + 1]);
338 switch (GET_MODE (x
))
341 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
342 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
343 fprintf (file
, "0x%lx", val
[1]);
349 mn10300_print_operand_address (file
,
350 GEN_INT (CONST_DOUBLE_HIGH (x
)));
361 split_double (x
, &low
, &high
);
362 fprintf (file
, "%ld", (long)INTVAL (high
));
373 if (REG_P (XEXP (x
, 0)))
374 output_address (gen_rtx_PLUS (SImode
, XEXP (x
, 0), const0_rtx
));
376 output_address (XEXP (x
, 0));
381 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
382 fprintf (file
, "%d", (int)((~INTVAL (x
)) & 0xff));
386 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
387 fprintf (file
, "%d", (int)(INTVAL (x
) & 0xff));
390 /* For shift counts. The hardware ignores the upper bits of
391 any immediate, but the assembler will flag an out of range
392 shift count as an error. So we mask off the high bits
393 of the immediate here. */
397 fprintf (file
, "%d", (int)(INTVAL (x
) & 0x1f));
403 switch (GET_CODE (x
))
407 output_address (XEXP (x
, 0));
416 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
420 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
423 /* This will only be single precision.... */
429 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
430 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
);
431 fprintf (file
, "0x%lx", val
);
441 mn10300_print_operand_address (file
, x
);
450 /* Output assembly language output for the address ADDR to FILE. */
453 mn10300_print_operand_address (FILE *file
, rtx addr
)
455 switch (GET_CODE (addr
))
458 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
463 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
466 mn10300_print_operand (file
, XEXP (addr
, 1), 0);
470 mn10300_print_operand (file
, addr
, 0);
474 rtx base
= XEXP (addr
, 0);
475 rtx index
= XEXP (addr
, 1);
477 if (REG_P (index
) && !REG_OK_FOR_INDEX_P (index
))
483 gcc_assert (REG_P (index
) && REG_OK_FOR_INDEX_P (index
));
485 gcc_assert (REG_OK_FOR_BASE_P (base
));
487 mn10300_print_operand (file
, index
, 0);
489 mn10300_print_operand (file
, base
, 0);
493 output_addr_const (file
, addr
);
496 output_addr_const (file
, addr
);
501 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
503 Used for PIC-specific UNSPECs. */
506 mn10300_asm_output_addr_const_extra (FILE *file
, rtx x
)
508 if (GET_CODE (x
) == UNSPEC
)
513 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
514 output_addr_const (file
, XVECEXP (x
, 0, 0));
517 output_addr_const (file
, XVECEXP (x
, 0, 0));
518 fputs ("@GOT", file
);
521 output_addr_const (file
, XVECEXP (x
, 0, 0));
522 fputs ("@GOTOFF", file
);
525 output_addr_const (file
, XVECEXP (x
, 0, 0));
526 fputs ("@PLT", file
);
528 case UNSPEC_GOTSYM_OFF
:
529 assemble_name (file
, GOT_SYMBOL_NAME
);
531 output_addr_const (file
, XVECEXP (x
, 0, 0));
543 /* Count the number of FP registers that have to be saved. */
545 fp_regs_to_save (void)
552 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
553 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
559 /* Print a set of registers in the format required by "movm" and "ret".
560 Register K is saved if bit K of MASK is set. The data and address
561 registers can be stored individually, but the extended registers cannot.
562 We assume that the mask already takes that into account. For instance,
563 bits 14 to 17 must have the same value. */
566 mn10300_print_reg_list (FILE *file
, int mask
)
574 for (i
= 0; i
< FIRST_EXTENDED_REGNUM
; i
++)
575 if ((mask
& (1 << i
)) != 0)
579 fputs (reg_names
[i
], file
);
583 if ((mask
& 0x3c000) != 0)
585 gcc_assert ((mask
& 0x3c000) == 0x3c000);
588 fputs ("exreg1", file
);
595 /* If the MDR register is never clobbered, we can use the RETF instruction
596 which takes the address from the MDR register. This is 3 cycles faster
597 than having to load the address from the stack. */
600 mn10300_can_use_retf_insn (void)
602 /* Don't bother if we're not optimizing. In this case we won't
603 have proper access to df_regs_ever_live_p. */
607 /* EH returns alter the saved return address; MDR is not current. */
608 if (crtl
->calls_eh_return
)
611 /* Obviously not if MDR is ever clobbered. */
612 if (df_regs_ever_live_p (MDR_REG
))
615 /* ??? Careful not to use this during expand_epilogue etc. */
616 gcc_assert (!in_sequence_p ());
617 return leaf_function_p ();
621 mn10300_can_use_rets_insn (void)
623 return !mn10300_initial_offset (ARG_POINTER_REGNUM
, STACK_POINTER_REGNUM
);
626 /* Returns the set of live, callee-saved registers as a bitmask. The
627 callee-saved extended registers cannot be stored individually, so
628 all of them will be included in the mask if any one of them is used.
629 Also returns the number of bytes in the registers in the mask if
630 BYTES_SAVED is not NULL. */
633 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved
)
640 for (i
= 0; i
<= LAST_EXTENDED_REGNUM
; i
++)
641 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
647 if ((mask
& 0x3c000) != 0)
649 for (i
= 0x04000; i
< 0x40000; i
<<= 1)
657 * bytes_saved
= count
* UNITS_PER_WORD
;
665 RTX_FRAME_RELATED_P (r
) = 1;
669 /* Generate an instruction that pushes several registers onto the stack.
670 Register K will be saved if bit K in MASK is set. The function does
671 nothing if MASK is zero.
673 To be compatible with the "movm" instruction, the lowest-numbered
674 register must be stored in the lowest slot. If MASK is the set
675 { R1,...,RN }, where R1...RN are ordered least first, the generated
676 instruction will have the form:
679 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
680 (set (mem:SI (plus:SI (reg:SI 9)
684 (set (mem:SI (plus:SI (reg:SI 9)
689 mn10300_gen_multiple_store (unsigned int mask
)
691 /* The order in which registers are stored, from SP-4 through SP-N*4. */
692 static const unsigned int store_order
[8] = {
693 /* e2, e3: never saved */
694 FIRST_EXTENDED_REGNUM
+ 4,
695 FIRST_EXTENDED_REGNUM
+ 5,
696 FIRST_EXTENDED_REGNUM
+ 6,
697 FIRST_EXTENDED_REGNUM
+ 7,
698 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
699 FIRST_DATA_REGNUM
+ 2,
700 FIRST_DATA_REGNUM
+ 3,
701 FIRST_ADDRESS_REGNUM
+ 2,
702 FIRST_ADDRESS_REGNUM
+ 3,
703 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
713 for (i
= count
= 0; i
< ARRAY_SIZE(store_order
); ++i
)
715 unsigned regno
= store_order
[i
];
717 if (((mask
>> regno
) & 1) == 0)
721 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
722 x
= gen_frame_mem (SImode
, x
);
723 x
= gen_rtx_SET (VOIDmode
, x
, gen_rtx_REG (SImode
, regno
));
726 /* Remove the register from the mask so that... */
727 mask
&= ~(1u << regno
);
730 /* ... we can make sure that we didn't try to use a register
731 not listed in the store order. */
732 gcc_assert (mask
== 0);
734 /* Create the instruction that updates the stack pointer. */
735 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
736 x
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, x
);
739 /* We need one PARALLEL element to update the stack pointer and
740 an additional element for each register that is stored. */
741 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (count
+ 1, elts
));
746 mn10300_expand_prologue (void)
748 HOST_WIDE_INT size
= mn10300_frame_size ();
750 if (flag_stack_usage_info
)
751 current_function_static_stack_size
= size
;
753 /* If we use any of the callee-saved registers, save them now. */
754 mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs (NULL
));
756 if (TARGET_AM33_2
&& fp_regs_to_save ())
758 int num_regs_to_save
= fp_regs_to_save (), i
;
764 save_sp_partial_merge
,
768 unsigned int strategy_size
= (unsigned)-1, this_strategy_size
;
771 /* We have several different strategies to save FP registers.
772 We can store them using SP offsets, which is beneficial if
773 there are just a few registers to save, or we can use `a0' in
774 post-increment mode (`a0' is the only call-clobbered address
775 register that is never used to pass information to a
776 function). Furthermore, if we don't need a frame pointer, we
777 can merge the two SP adds into a single one, but this isn't
778 always beneficial; sometimes we can just split the two adds
779 so that we don't exceed a 16-bit constant size. The code
780 below will select which strategy to use, so as to generate
781 smallest code. Ties are broken in favor or shorter sequences
782 (in terms of number of instructions). */
784 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
785 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
786 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
787 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
789 /* We add 0 * (S) in two places to promote to the type of S,
790 so that all arms of the conditional have the same type. */
791 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
792 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
793 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
794 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
796 #define SIZE_FMOV_SP_(S,N) \
797 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
798 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
799 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
800 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
802 /* Consider alternative save_sp_merge only if we don't need the
803 frame pointer and size is nonzero. */
804 if (! frame_pointer_needed
&& size
)
806 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
807 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
808 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
809 this_strategy_size
+= SIZE_FMOV_SP (size
, num_regs_to_save
);
811 if (this_strategy_size
< strategy_size
)
813 strategy
= save_sp_merge
;
814 strategy_size
= this_strategy_size
;
818 /* Consider alternative save_sp_no_merge unconditionally. */
819 /* Insn: add -4 * num_regs_to_save, sp. */
820 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
821 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
822 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
825 /* Insn: add -size, sp. */
826 this_strategy_size
+= SIZE_ADD_SP (-size
);
829 if (this_strategy_size
< strategy_size
)
831 strategy
= save_sp_no_merge
;
832 strategy_size
= this_strategy_size
;
835 /* Consider alternative save_sp_partial_merge only if we don't
836 need a frame pointer and size is reasonably large. */
837 if (! frame_pointer_needed
&& size
+ 4 * num_regs_to_save
> 128)
839 /* Insn: add -128, sp. */
840 this_strategy_size
= SIZE_ADD_SP (-128);
841 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
842 this_strategy_size
+= SIZE_FMOV_SP (128 - 4 * num_regs_to_save
,
846 /* Insn: add 128-size, sp. */
847 this_strategy_size
+= SIZE_ADD_SP (128 - size
);
850 if (this_strategy_size
< strategy_size
)
852 strategy
= save_sp_partial_merge
;
853 strategy_size
= this_strategy_size
;
857 /* Consider alternative save_a0_merge only if we don't need a
858 frame pointer, size is nonzero and the user hasn't
859 changed the calling conventions of a0. */
860 if (! frame_pointer_needed
&& size
861 && call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
862 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
864 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
865 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
866 /* Insn: mov sp, a0. */
867 this_strategy_size
++;
870 /* Insn: add size, a0. */
871 this_strategy_size
+= SIZE_ADD_AX (size
);
873 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
874 this_strategy_size
+= 3 * num_regs_to_save
;
876 if (this_strategy_size
< strategy_size
)
878 strategy
= save_a0_merge
;
879 strategy_size
= this_strategy_size
;
883 /* Consider alternative save_a0_no_merge if the user hasn't
884 changed the calling conventions of a0. */
885 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
886 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
888 /* Insn: add -4 * num_regs_to_save, sp. */
889 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
890 /* Insn: mov sp, a0. */
891 this_strategy_size
++;
892 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
893 this_strategy_size
+= 3 * num_regs_to_save
;
896 /* Insn: add -size, sp. */
897 this_strategy_size
+= SIZE_ADD_SP (-size
);
900 if (this_strategy_size
< strategy_size
)
902 strategy
= save_a0_no_merge
;
903 strategy_size
= this_strategy_size
;
907 /* Emit the initial SP add, common to all strategies. */
910 case save_sp_no_merge
:
911 case save_a0_no_merge
:
912 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
914 GEN_INT (-4 * num_regs_to_save
))));
918 case save_sp_partial_merge
:
919 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
922 xsize
= 128 - 4 * num_regs_to_save
;
928 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
930 GEN_INT (-(size
+ 4 * num_regs_to_save
)))));
931 /* We'll have to adjust FP register saves according to the
934 /* Since we've already created the stack frame, don't do it
935 again at the end of the function. */
943 /* Now prepare register a0, if we have decided to use it. */
947 case save_sp_no_merge
:
948 case save_sp_partial_merge
:
953 case save_a0_no_merge
:
954 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
);
955 F (emit_insn (gen_movsi (reg
, stack_pointer_rtx
)));
957 F (emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (xsize
))));
958 reg
= gen_rtx_POST_INC (SImode
, reg
);
965 /* Now actually save the FP registers. */
966 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
967 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
975 /* If we aren't using `a0', use an SP offset. */
978 addr
= gen_rtx_PLUS (SImode
,
983 addr
= stack_pointer_rtx
;
988 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode
, addr
),
989 gen_rtx_REG (SFmode
, i
))));
993 /* Now put the frame pointer into the frame pointer register. */
994 if (frame_pointer_needed
)
995 F (emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
));
997 /* Allocate stack for this frame. */
999 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1003 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
1004 emit_insn (gen_load_pic ());
1008 mn10300_expand_epilogue (void)
1010 HOST_WIDE_INT size
= mn10300_frame_size ();
1011 unsigned int reg_save_bytes
;
1013 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1015 if (TARGET_AM33_2
&& fp_regs_to_save ())
1017 int num_regs_to_save
= fp_regs_to_save (), i
;
1020 /* We have several options to restore FP registers. We could
1021 load them from SP offsets, but, if there are enough FP
1022 registers to restore, we win if we use a post-increment
1025 /* If we have a frame pointer, it's the best option, because we
1026 already know it has the value we want. */
1027 if (frame_pointer_needed
)
1028 reg
= gen_rtx_REG (SImode
, FRAME_POINTER_REGNUM
);
1029 /* Otherwise, we may use `a1', since it's call-clobbered and
1030 it's never used for return values. But only do so if it's
1031 smaller than using SP offsets. */
1034 enum { restore_sp_post_adjust
,
1035 restore_sp_pre_adjust
,
1036 restore_sp_partial_adjust
,
1037 restore_a1
} strategy
;
1038 unsigned int this_strategy_size
, strategy_size
= (unsigned)-1;
1040 /* Consider using sp offsets before adjusting sp. */
1041 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1042 this_strategy_size
= SIZE_FMOV_SP (size
, num_regs_to_save
);
1043 /* If size is too large, we'll have to adjust SP with an
1045 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1047 /* Insn: add size + 4 * num_regs_to_save, sp. */
1048 this_strategy_size
+= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
);
1050 /* If we don't have to restore any non-FP registers,
1051 we'll be able to save one byte by using rets. */
1052 if (! reg_save_bytes
)
1053 this_strategy_size
--;
1055 if (this_strategy_size
< strategy_size
)
1057 strategy
= restore_sp_post_adjust
;
1058 strategy_size
= this_strategy_size
;
1061 /* Consider using sp offsets after adjusting sp. */
1062 /* Insn: add size, sp. */
1063 this_strategy_size
= SIZE_ADD_SP (size
);
1064 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1065 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
1066 /* We're going to use ret to release the FP registers
1067 save area, so, no savings. */
1069 if (this_strategy_size
< strategy_size
)
1071 strategy
= restore_sp_pre_adjust
;
1072 strategy_size
= this_strategy_size
;
1075 /* Consider using sp offsets after partially adjusting sp.
1076 When size is close to 32Kb, we may be able to adjust SP
1077 with an imm16 add instruction while still using fmov
1079 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1081 /* Insn: add size + 4 * num_regs_to_save
1082 + reg_save_bytes - 252,sp. */
1083 this_strategy_size
= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
1084 + (int) reg_save_bytes
- 252);
1085 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1086 this_strategy_size
+= SIZE_FMOV_SP (252 - reg_save_bytes
1087 - 4 * num_regs_to_save
,
1089 /* We're going to use ret to release the FP registers
1090 save area, so, no savings. */
1092 if (this_strategy_size
< strategy_size
)
1094 strategy
= restore_sp_partial_adjust
;
1095 strategy_size
= this_strategy_size
;
1099 /* Consider using a1 in post-increment mode, as long as the
1100 user hasn't changed the calling conventions of a1. */
1101 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
+ 1]
1102 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
+1])
1104 /* Insn: mov sp,a1. */
1105 this_strategy_size
= 1;
1108 /* Insn: add size,a1. */
1109 this_strategy_size
+= SIZE_ADD_AX (size
);
1111 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1112 this_strategy_size
+= 3 * num_regs_to_save
;
1113 /* If size is large enough, we may be able to save a
1115 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1117 /* Insn: mov a1,sp. */
1118 this_strategy_size
+= 2;
1120 /* If we don't have to restore any non-FP registers,
1121 we'll be able to save one byte by using rets. */
1122 if (! reg_save_bytes
)
1123 this_strategy_size
--;
1125 if (this_strategy_size
< strategy_size
)
1127 strategy
= restore_a1
;
1128 strategy_size
= this_strategy_size
;
1134 case restore_sp_post_adjust
:
1137 case restore_sp_pre_adjust
:
1138 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1144 case restore_sp_partial_adjust
:
1145 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1147 GEN_INT (size
+ 4 * num_regs_to_save
1148 + reg_save_bytes
- 252)));
1149 size
= 252 - reg_save_bytes
- 4 * num_regs_to_save
;
1153 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
+ 1);
1154 emit_insn (gen_movsi (reg
, stack_pointer_rtx
));
1156 emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (size
)));
1164 /* Adjust the selected register, if any, for post-increment. */
1166 reg
= gen_rtx_POST_INC (SImode
, reg
);
1168 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1169 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1177 /* If we aren't using a post-increment register, use an
1179 addr
= gen_rtx_PLUS (SImode
,
1184 addr
= stack_pointer_rtx
;
1188 emit_insn (gen_movsf (gen_rtx_REG (SFmode
, i
),
1189 gen_rtx_MEM (SFmode
, addr
)));
1192 /* If we were using the restore_a1 strategy and the number of
1193 bytes to be released won't fit in the `ret' byte, copy `a1'
1194 to `sp', to avoid having to use `add' to adjust it. */
1195 if (! frame_pointer_needed
&& reg
&& size
+ reg_save_bytes
> 255)
1197 emit_move_insn (stack_pointer_rtx
, XEXP (reg
, 0));
1202 /* Maybe cut back the stack, except for the register save area.
1204 If the frame pointer exists, then use the frame pointer to
1207 If the stack size + register save area is more than 255 bytes,
1208 then the stack must be cut back here since the size + register
1209 save size is too big for a ret/retf instruction.
1211 Else leave it alone, it will be cut back as part of the
1212 ret/retf instruction, or there wasn't any stack to begin with.
1214 Under no circumstances should the register save area be
1215 deallocated here, that would leave a window where an interrupt
1216 could occur and trash the register save area. */
1217 if (frame_pointer_needed
)
1219 emit_move_insn (stack_pointer_rtx
, frame_pointer_rtx
);
1222 else if (size
+ reg_save_bytes
> 255)
1224 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1230 /* Adjust the stack and restore callee-saved registers, if any. */
1231 if (mn10300_can_use_rets_insn ())
1232 emit_jump_insn (ret_rtx
);
1234 emit_jump_insn (gen_return_ret (GEN_INT (size
+ reg_save_bytes
)));
1237 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1238 This function is for MATCH_PARALLEL and so assumes OP is known to be
1239 parallel. If OP is a multiple store, return a mask indicating which
1240 registers it saves. Return 0 otherwise. */
1243 mn10300_store_multiple_operation (rtx op
,
1244 enum machine_mode mode ATTRIBUTE_UNUSED
)
1252 count
= XVECLEN (op
, 0);
1256 /* Check that first instruction has the form (set (sp) (plus A B)) */
1257 elt
= XVECEXP (op
, 0, 0);
1258 if (GET_CODE (elt
) != SET
1259 || (! REG_P (SET_DEST (elt
)))
1260 || REGNO (SET_DEST (elt
)) != STACK_POINTER_REGNUM
1261 || GET_CODE (SET_SRC (elt
)) != PLUS
)
1264 /* Check that A is the stack pointer and B is the expected stack size.
1265 For OP to match, each subsequent instruction should push a word onto
1266 the stack. We therefore expect the first instruction to create
1267 COUNT-1 stack slots. */
1268 elt
= SET_SRC (elt
);
1269 if ((! REG_P (XEXP (elt
, 0)))
1270 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1271 || (! CONST_INT_P (XEXP (elt
, 1)))
1272 || INTVAL (XEXP (elt
, 1)) != -(count
- 1) * 4)
1276 for (i
= 1; i
< count
; i
++)
1278 /* Check that element i is a (set (mem M) R). */
1279 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1280 Remember: the ordering is *not* monotonic. */
1281 elt
= XVECEXP (op
, 0, i
);
1282 if (GET_CODE (elt
) != SET
1283 || (! MEM_P (SET_DEST (elt
)))
1284 || (! REG_P (SET_SRC (elt
))))
1287 /* Remember which registers are to be saved. */
1288 last
= REGNO (SET_SRC (elt
));
1289 mask
|= (1 << last
);
1291 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1292 elt
= XEXP (SET_DEST (elt
), 0);
1293 if (GET_CODE (elt
) != PLUS
1294 || (! REG_P (XEXP (elt
, 0)))
1295 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1296 || (! CONST_INT_P (XEXP (elt
, 1)))
1297 || INTVAL (XEXP (elt
, 1)) != -i
* 4)
1301 /* All or none of the callee-saved extended registers must be in the set. */
1302 if ((mask
& 0x3c000) != 0
1303 && (mask
& 0x3c000) != 0x3c000)
1309 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1312 mn10300_preferred_reload_class (rtx x
, reg_class_t rclass
)
1314 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1315 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1318 && !HARD_REGISTER_P (x
))
1319 || (GET_CODE (x
) == SUBREG
1320 && REG_P (SUBREG_REG (x
))
1321 && !HARD_REGISTER_P (SUBREG_REG (x
))))
1322 return LIMIT_RELOAD_CLASS (GET_MODE (x
), rclass
);
1327 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1330 mn10300_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
1332 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1333 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1337 /* Implement TARGET_SECONDARY_RELOAD. */
1340 mn10300_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1341 enum machine_mode mode
, secondary_reload_info
*sri
)
1343 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1344 enum reg_class xclass
= NO_REGS
;
1345 unsigned int xregno
= INVALID_REGNUM
;
1350 if (xregno
>= FIRST_PSEUDO_REGISTER
)
1351 xregno
= true_regnum (x
);
1352 if (xregno
!= INVALID_REGNUM
)
1353 xclass
= REGNO_REG_CLASS (xregno
);
1358 /* Memory load/stores less than a full word wide can't have an
1359 address or stack pointer destination. They must use a data
1360 register as an intermediate register. */
1361 if (rclass
!= DATA_REGS
1362 && (mode
== QImode
|| mode
== HImode
)
1363 && xclass
== NO_REGS
)
1366 /* We can only move SP to/from an address register. */
1368 && rclass
== SP_REGS
1369 && xclass
!= ADDRESS_REGS
)
1370 return ADDRESS_REGS
;
1372 && xclass
== SP_REGS
1373 && rclass
!= ADDRESS_REGS
1374 && rclass
!= SP_OR_ADDRESS_REGS
)
1375 return ADDRESS_REGS
;
1378 /* We can't directly load sp + const_int into a register;
1379 we must use an address register as an scratch. */
1381 && rclass
!= SP_REGS
1382 && rclass
!= SP_OR_ADDRESS_REGS
1383 && rclass
!= SP_OR_GENERAL_REGS
1384 && GET_CODE (x
) == PLUS
1385 && (XEXP (x
, 0) == stack_pointer_rtx
1386 || XEXP (x
, 1) == stack_pointer_rtx
))
1388 sri
->icode
= CODE_FOR_reload_plus_sp_const
;
1392 /* We can only move MDR to/from a data register. */
1393 if (rclass
== MDR_REGS
&& xclass
!= DATA_REGS
)
1395 if (xclass
== MDR_REGS
&& rclass
!= DATA_REGS
)
1398 /* We can't load/store an FP register from a constant address. */
1400 && (rclass
== FP_REGS
|| xclass
== FP_REGS
)
1401 && (xclass
== NO_REGS
|| rclass
== NO_REGS
))
1405 if (xregno
>= FIRST_PSEUDO_REGISTER
&& xregno
!= INVALID_REGNUM
)
1407 addr
= reg_equiv_mem (xregno
);
1409 addr
= XEXP (addr
, 0);
1414 if (addr
&& CONSTANT_ADDRESS_P (addr
))
1415 return GENERAL_REGS
;
1418 /* Otherwise assume no secondary reloads are needed. */
1423 mn10300_frame_size (void)
1425 /* size includes the fixed stack space needed for function calls. */
1426 int size
= get_frame_size () + crtl
->outgoing_args_size
;
1428 /* And space for the return pointer. */
1429 size
+= crtl
->outgoing_args_size
? 4 : 0;
1435 mn10300_initial_offset (int from
, int to
)
1439 gcc_assert (from
== ARG_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
);
1440 gcc_assert (to
== FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
1442 if (to
== STACK_POINTER_REGNUM
)
1443 diff
= mn10300_frame_size ();
1445 /* The difference between the argument pointer and the frame pointer
1446 is the size of the callee register save area. */
1447 if (from
== ARG_POINTER_REGNUM
)
1449 unsigned int reg_save_bytes
;
1451 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1452 diff
+= reg_save_bytes
;
1453 diff
+= 4 * fp_regs_to_save ();
1459 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1462 mn10300_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
1464 /* Return values > 8 bytes in length in memory. */
1465 return (int_size_in_bytes (type
) > 8
1466 || int_size_in_bytes (type
) == 0
1467 || TYPE_MODE (type
) == BLKmode
);
1470 /* Flush the argument registers to the stack for a stdarg function;
1471 return the new argument pointer. */
1473 mn10300_builtin_saveregs (void)
1476 tree fntype
= TREE_TYPE (current_function_decl
);
1477 int argadj
= ((!stdarg_p (fntype
))
1478 ? UNITS_PER_WORD
: 0);
1479 alias_set_type set
= get_varargs_alias_set ();
1482 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
1484 offset
= crtl
->args
.arg_offset_rtx
;
1486 mem
= gen_rtx_MEM (SImode
, crtl
->args
.internal_arg_pointer
);
1487 set_mem_alias_set (mem
, set
);
1488 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
1490 mem
= gen_rtx_MEM (SImode
,
1491 plus_constant (Pmode
,
1492 crtl
->args
.internal_arg_pointer
, 4));
1493 set_mem_alias_set (mem
, set
);
1494 emit_move_insn (mem
, gen_rtx_REG (SImode
, 1));
1496 return copy_to_reg (expand_binop (Pmode
, add_optab
,
1497 crtl
->args
.internal_arg_pointer
,
1498 offset
, 0, 0, OPTAB_LIB_WIDEN
));
1502 mn10300_va_start (tree valist
, rtx nextarg
)
1504 nextarg
= expand_builtin_saveregs ();
1505 std_expand_builtin_va_start (valist
, nextarg
);
1508 /* Return true when a parameter should be passed by reference. */
1511 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
1512 enum machine_mode mode
, const_tree type
,
1513 bool named ATTRIBUTE_UNUSED
)
1515 unsigned HOST_WIDE_INT size
;
1518 size
= int_size_in_bytes (type
);
1520 size
= GET_MODE_SIZE (mode
);
1522 return (size
> 8 || size
== 0);
1525 /* Return an RTX to represent where a value with mode MODE will be returned
1526 from a function. If the result is NULL_RTX, the argument is pushed. */
1529 mn10300_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
1530 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1532 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1533 rtx result
= NULL_RTX
;
1536 /* We only support using 2 data registers as argument registers. */
1539 /* Figure out the size of the object to be passed. */
1540 if (mode
== BLKmode
)
1541 size
= int_size_in_bytes (type
);
1543 size
= GET_MODE_SIZE (mode
);
1545 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1547 /* Don't pass this arg via a register if all the argument registers
1549 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1552 /* Don't pass this arg via a register if it would be split between
1553 registers and memory. */
1554 if (type
== NULL_TREE
1555 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1558 switch (cum
->nbytes
/ UNITS_PER_WORD
)
1561 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
);
1564 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
+ 1);
1573 /* Update the data in CUM to advance over an argument
1574 of mode MODE and data type TYPE.
1575 (TYPE is null for libcalls where that information may not be available.) */
1578 mn10300_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
1579 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1581 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1583 cum
->nbytes
+= (mode
!= BLKmode
1584 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1585 : (int_size_in_bytes (type
) + 3) & ~3);
1588 /* Return the number of bytes of registers to use for an argument passed
1589 partially in registers and partially in memory. */
1592 mn10300_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
1593 tree type
, bool named ATTRIBUTE_UNUSED
)
1595 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1598 /* We only support using 2 data registers as argument registers. */
1601 /* Figure out the size of the object to be passed. */
1602 if (mode
== BLKmode
)
1603 size
= int_size_in_bytes (type
);
1605 size
= GET_MODE_SIZE (mode
);
1607 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1609 /* Don't pass this arg via a register if all the argument registers
1611 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1614 if (cum
->nbytes
+ size
<= nregs
* UNITS_PER_WORD
)
1617 /* Don't pass this arg via a register if it would be split between
1618 registers and memory. */
1619 if (type
== NULL_TREE
1620 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1623 return nregs
* UNITS_PER_WORD
- cum
->nbytes
;
1626 /* Return the location of the function's value. This will be either
1627 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1628 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1629 we only return the PARALLEL for outgoing values; we do not want
1630 callers relying on this extra copy. */
1633 mn10300_function_value (const_tree valtype
,
1634 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1638 enum machine_mode mode
= TYPE_MODE (valtype
);
1640 if (! POINTER_TYPE_P (valtype
))
1641 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1642 else if (! TARGET_PTR_A0D0
|| ! outgoing
1643 || cfun
->returns_struct
)
1644 return gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
);
1646 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (2));
1648 = gen_rtx_EXPR_LIST (VOIDmode
,
1649 gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
),
1653 = gen_rtx_EXPR_LIST (VOIDmode
,
1654 gen_rtx_REG (mode
, FIRST_DATA_REGNUM
),
1659 /* Implements TARGET_LIBCALL_VALUE. */
1662 mn10300_libcall_value (enum machine_mode mode
,
1663 const_rtx fun ATTRIBUTE_UNUSED
)
1665 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1668 /* Implements FUNCTION_VALUE_REGNO_P. */
1671 mn10300_function_value_regno_p (const unsigned int regno
)
1673 return (regno
== FIRST_DATA_REGNUM
|| regno
== FIRST_ADDRESS_REGNUM
);
1676 /* Output an addition operation. */
1679 mn10300_output_add (rtx operands
[3], bool need_flags
)
1681 rtx dest
, src1
, src2
;
1682 unsigned int dest_regnum
, src1_regnum
, src2_regnum
;
1683 enum reg_class src1_class
, src2_class
, dest_class
;
1689 dest_regnum
= true_regnum (dest
);
1690 src1_regnum
= true_regnum (src1
);
1692 dest_class
= REGNO_REG_CLASS (dest_regnum
);
1693 src1_class
= REGNO_REG_CLASS (src1_regnum
);
1695 if (CONST_INT_P (src2
))
1697 gcc_assert (dest_regnum
== src1_regnum
);
1699 if (src2
== const1_rtx
&& !need_flags
)
1701 if (INTVAL (src2
) == 4 && !need_flags
&& dest_class
!= DATA_REGS
)
1704 gcc_assert (!need_flags
|| dest_class
!= SP_REGS
);
1707 else if (CONSTANT_P (src2
))
1710 src2_regnum
= true_regnum (src2
);
1711 src2_class
= REGNO_REG_CLASS (src2_regnum
);
1713 if (dest_regnum
== src1_regnum
)
1715 if (dest_regnum
== src2_regnum
)
1718 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1719 this directly, as below, but when optimizing for space we can sometimes
1720 do better by using a mov+add. For MN103, we claimed that we could
1721 implement a three-operand add because the various move and add insns
1722 change sizes across register classes, and we can often do better than
1723 reload in choosing which operand to move. */
1724 if (TARGET_AM33
&& optimize_insn_for_speed_p ())
1725 return "add %2,%1,%0";
1727 /* Catch cases where no extended register was used. */
1728 if (src1_class
!= EXTENDED_REGS
1729 && src2_class
!= EXTENDED_REGS
1730 && dest_class
!= EXTENDED_REGS
)
1732 /* We have to copy one of the sources into the destination, then
1733 add the other source to the destination.
1735 Carefully select which source to copy to the destination; a
1736 naive implementation will waste a byte when the source classes
1737 are different and the destination is an address register.
1738 Selecting the lowest cost register copy will optimize this
1740 if (src1_class
== dest_class
)
1741 return "mov %1,%0\n\tadd %2,%0";
1743 return "mov %2,%0\n\tadd %1,%0";
1746 /* At least one register is an extended register. */
1748 /* The three operand add instruction on the am33 is a win iff the
1749 output register is an extended register, or if both source
1750 registers are extended registers. */
1751 if (dest_class
== EXTENDED_REGS
|| src1_class
== src2_class
)
1752 return "add %2,%1,%0";
1754 /* It is better to copy one of the sources to the destination, then
1755 perform a 2 address add. The destination in this case must be
1756 an address or data register and one of the sources must be an
1757 extended register and the remaining source must not be an extended
1760 The best code for this case is to copy the extended reg to the
1761 destination, then emit a two address add. */
1762 if (src1_class
== EXTENDED_REGS
)
1763 return "mov %1,%0\n\tadd %2,%0";
1765 return "mov %2,%0\n\tadd %1,%0";
1768 /* Return 1 if X contains a symbolic expression. We know these
1769 expressions will have one of a few well defined forms, so
1770 we need only check those forms. */
1773 mn10300_symbolic_operand (rtx op
,
1774 enum machine_mode mode ATTRIBUTE_UNUSED
)
1776 switch (GET_CODE (op
))
1783 return ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
1784 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1785 && CONST_INT_P (XEXP (op
, 1)));
1791 /* Try machine dependent ways of modifying an illegitimate address
1792 to be legitimate. If we find one, return the new valid address.
1793 This macro is used in only one place: `memory_address' in explow.c.
1795 OLDX is the address as it was before break_out_memory_refs was called.
1796 In some cases it is useful to look at this to decide what needs to be done.
1798 Normally it is always safe for this macro to do nothing. It exists to
1799 recognize opportunities to optimize the output.
1801 But on a few ports with segmented architectures and indexed addressing
1802 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1805 mn10300_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1806 enum machine_mode mode ATTRIBUTE_UNUSED
)
1808 if (flag_pic
&& ! mn10300_legitimate_pic_operand_p (x
))
1809 x
= mn10300_legitimize_pic_address (oldx
, NULL_RTX
);
1811 /* Uh-oh. We might have an address for x[n-100000]. This needs
1812 special handling to avoid creating an indexed memory address
1813 with x-100000 as the base. */
1814 if (GET_CODE (x
) == PLUS
1815 && mn10300_symbolic_operand (XEXP (x
, 1), VOIDmode
))
1817 /* Ugly. We modify things here so that the address offset specified
1818 by the index expression is computed first, then added to x to form
1819 the entire address. */
1821 rtx regx1
, regy1
, regy2
, y
;
1823 /* Strip off any CONST. */
1825 if (GET_CODE (y
) == CONST
)
1828 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1830 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1831 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1832 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1833 regx1
= force_reg (Pmode
,
1834 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
, regx1
,
1836 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1842 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1843 @GOTOFF in `reg'. */
1846 mn10300_legitimize_pic_address (rtx orig
, rtx reg
)
1850 if (GET_CODE (orig
) == LABEL_REF
1851 || (GET_CODE (orig
) == SYMBOL_REF
1852 && (CONSTANT_POOL_ADDRESS_P (orig
)
1853 || ! MN10300_GLOBAL_P (orig
))))
1856 reg
= gen_reg_rtx (Pmode
);
1858 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOTOFF
);
1859 x
= gen_rtx_CONST (SImode
, x
);
1860 emit_move_insn (reg
, x
);
1862 x
= emit_insn (gen_addsi3 (reg
, reg
, pic_offset_table_rtx
));
1864 else if (GET_CODE (orig
) == SYMBOL_REF
)
1867 reg
= gen_reg_rtx (Pmode
);
1869 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOT
);
1870 x
= gen_rtx_CONST (SImode
, x
);
1871 x
= gen_rtx_PLUS (SImode
, pic_offset_table_rtx
, x
);
1872 x
= gen_const_mem (SImode
, x
);
1874 x
= emit_move_insn (reg
, x
);
1879 set_unique_reg_note (x
, REG_EQUAL
, orig
);
1883 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1884 isn't protected by a PIC unspec; nonzero otherwise. */
1887 mn10300_legitimate_pic_operand_p (rtx x
)
1892 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1895 if (GET_CODE (x
) == UNSPEC
1896 && (XINT (x
, 1) == UNSPEC_PIC
1897 || XINT (x
, 1) == UNSPEC_GOT
1898 || XINT (x
, 1) == UNSPEC_GOTOFF
1899 || XINT (x
, 1) == UNSPEC_PLT
1900 || XINT (x
, 1) == UNSPEC_GOTSYM_OFF
))
1903 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1904 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1910 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1911 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x
, i
, j
)))
1914 else if (fmt
[i
] == 'e'
1915 && ! mn10300_legitimate_pic_operand_p (XEXP (x
, i
)))
1922 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1923 legitimate, and FALSE otherwise.
1925 On the mn10300, the value in the address register must be
1926 in the same memory space/segment as the effective address.
1928 This is problematical for reload since it does not understand
1929 that base+index != index+base in a memory reference.
1931 Note it is still possible to use reg+reg addressing modes,
1932 it's just much more difficult. For a discussion of a possible
1933 workaround and solution, see the comments in pa.c before the
1934 function record_unscaled_index_insn_codes. */
1937 mn10300_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict
)
1941 if (CONSTANT_ADDRESS_P (x
))
1942 return !flag_pic
|| mn10300_legitimate_pic_operand_p (x
);
1944 if (RTX_OK_FOR_BASE_P (x
, strict
))
1947 if (TARGET_AM33
&& (mode
== SImode
|| mode
== SFmode
|| mode
== HImode
))
1949 if (GET_CODE (x
) == POST_INC
)
1950 return RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
);
1951 if (GET_CODE (x
) == POST_MODIFY
)
1952 return (RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
)
1953 && CONSTANT_ADDRESS_P (XEXP (x
, 1)));
1956 if (GET_CODE (x
) != PLUS
)
1960 index
= XEXP (x
, 1);
1966 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1967 addressing is hard to satisfy. */
1971 return (REGNO_GENERAL_P (REGNO (base
), strict
)
1972 && REGNO_GENERAL_P (REGNO (index
), strict
));
1975 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base
), strict
))
1978 if (CONST_INT_P (index
))
1979 return IN_RANGE (INTVAL (index
), -1 - 0x7fffffff, 0x7fffffff);
1981 if (CONSTANT_ADDRESS_P (index
))
1982 return !flag_pic
|| mn10300_legitimate_pic_operand_p (index
);
1988 mn10300_regno_in_class_p (unsigned regno
, int rclass
, bool strict
)
1990 if (regno
>= FIRST_PSEUDO_REGISTER
)
1996 regno
= reg_renumber
[regno
];
1997 if (regno
== INVALID_REGNUM
)
2000 return TEST_HARD_REG_BIT (reg_class_contents
[rclass
], regno
);
2004 mn10300_legitimize_reload_address (rtx x
,
2005 enum machine_mode mode ATTRIBUTE_UNUSED
,
2006 int opnum
, int type
,
2007 int ind_levels ATTRIBUTE_UNUSED
)
2009 bool any_change
= false;
2011 /* See above re disabling reg+reg addressing for MN103. */
2015 if (GET_CODE (x
) != PLUS
)
2018 if (XEXP (x
, 0) == stack_pointer_rtx
)
2020 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2021 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2022 opnum
, (enum reload_type
) type
);
2025 if (XEXP (x
, 1) == stack_pointer_rtx
)
2027 push_reload (XEXP (x
, 1), NULL_RTX
, &XEXP (x
, 1), NULL
,
2028 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2029 opnum
, (enum reload_type
) type
);
2033 return any_change
? x
: NULL_RTX
;
2036 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2037 constant. Note that some "constants" aren't valid, such as TLS
2038 symbols and unconverted GOT-based references, so we eliminate
2042 mn10300_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2044 switch (GET_CODE (x
))
2049 if (GET_CODE (x
) == PLUS
)
2051 if (! CONST_INT_P (XEXP (x
, 1)))
2056 /* Only some unspecs are valid as "constants". */
2057 if (GET_CODE (x
) == UNSPEC
)
2059 switch (XINT (x
, 1))
2071 /* We must have drilled down to a symbol. */
2072 if (! mn10300_symbolic_operand (x
, Pmode
))
2083 /* Undo pic address legitimization for the benefit of debug info. */
2086 mn10300_delegitimize_address (rtx orig_x
)
2088 rtx x
= orig_x
, ret
, addend
= NULL
;
2093 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
2096 if (XEXP (x
, 0) == pic_offset_table_rtx
)
2098 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2099 some odd-looking "addresses" that were never valid in the first place.
2100 We need to look harder to avoid warnings being emitted. */
2101 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
2103 rtx x0
= XEXP (x
, 0);
2104 rtx x00
= XEXP (x0
, 0);
2105 rtx x01
= XEXP (x0
, 1);
2107 if (x00
== pic_offset_table_rtx
)
2109 else if (x01
== pic_offset_table_rtx
)
2119 if (GET_CODE (x
) != CONST
)
2122 if (GET_CODE (x
) != UNSPEC
)
2125 ret
= XVECEXP (x
, 0, 0);
2126 if (XINT (x
, 1) == UNSPEC_GOTOFF
)
2128 else if (XINT (x
, 1) == UNSPEC_GOT
)
2133 gcc_assert (GET_CODE (ret
) == SYMBOL_REF
);
2134 if (need_mem
!= MEM_P (orig_x
))
2136 if (need_mem
&& addend
)
2139 ret
= gen_rtx_PLUS (Pmode
, addend
, ret
);
2143 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2144 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2145 with an address register. */
2148 mn10300_address_cost (rtx x
, enum machine_mode mode ATTRIBUTE_UNUSED
,
2149 addr_space_t as ATTRIBUTE_UNUSED
, bool speed
)
2154 switch (GET_CODE (x
))
2159 /* We assume all of these require a 32-bit constant, even though
2160 some symbol and label references can be relaxed. */
2161 return speed
? 1 : 4;
2169 /* Assume any symbolic offset is a 32-bit constant. */
2170 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2171 if (IN_RANGE (i
, -128, 127))
2172 return speed
? 0 : 1;
2175 if (IN_RANGE (i
, -0x800000, 0x7fffff))
2181 index
= XEXP (x
, 1);
2182 if (register_operand (index
, SImode
))
2184 /* Attempt to minimize the number of registers in the address.
2185 This is similar to what other ports do. */
2186 if (register_operand (base
, SImode
))
2190 index
= XEXP (x
, 0);
2193 /* Assume any symbolic offset is a 32-bit constant. */
2194 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2195 if (IN_RANGE (i
, -128, 127))
2196 return speed
? 0 : 1;
2197 if (IN_RANGE (i
, -32768, 32767))
2198 return speed
? 0 : 2;
2199 return speed
? 2 : 6;
2202 return rtx_cost (x
, MEM
, 0, speed
);
2206 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2208 Recall that the base value of 2 is required by assumptions elsewhere
2209 in the body of the compiler, and that cost 2 is special-cased as an
2210 early exit from reload meaning no work is required. */
2213 mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2214 reg_class_t ifrom
, reg_class_t ito
)
2216 enum reg_class from
= (enum reg_class
) ifrom
;
2217 enum reg_class to
= (enum reg_class
) ito
;
2218 enum reg_class scratch
, test
;
2220 /* Simplify the following code by unifying the fp register classes. */
2221 if (to
== FP_ACC_REGS
)
2223 if (from
== FP_ACC_REGS
)
2226 /* Diagnose invalid moves by costing them as two moves. */
2231 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2232 else if (to
== MDR_REGS
)
2233 scratch
= DATA_REGS
;
2234 else if (to
== FP_REGS
&& to
!= from
)
2235 scratch
= GENERAL_REGS
;
2239 if (from
== SP_REGS
)
2240 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2241 else if (from
== MDR_REGS
)
2242 scratch
= DATA_REGS
;
2243 else if (from
== FP_REGS
&& to
!= from
)
2244 scratch
= GENERAL_REGS
;
2246 if (scratch
!= NO_REGS
&& !reg_class_subset_p (test
, scratch
))
2247 return (mn10300_register_move_cost (VOIDmode
, from
, scratch
)
2248 + mn10300_register_move_cost (VOIDmode
, scratch
, to
));
2250 /* From here on, all we need consider are legal combinations. */
2254 /* The scale here is bytes * 2. */
2256 if (from
== to
&& (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2259 if (from
== SP_REGS
)
2260 return (to
== ADDRESS_REGS
? 2 : 6);
2262 /* For MN103, all remaining legal moves are two bytes. */
2267 return (from
== ADDRESS_REGS
? 4 : 6);
2269 if ((from
== ADDRESS_REGS
|| from
== DATA_REGS
)
2270 && (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2273 if (to
== EXTENDED_REGS
)
2274 return (to
== from
? 6 : 4);
2276 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2281 /* The scale here is cycles * 2. */
2285 if (from
== FP_REGS
)
2288 /* All legal moves between integral registers are single cycle. */
2293 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2295 Given lack of the form of the address, this must be speed-relative,
2296 though we should never be less expensive than a size-relative register
2297 move cost above. This is not a problem. */
2300 mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2301 reg_class_t iclass
, bool in ATTRIBUTE_UNUSED
)
2303 enum reg_class rclass
= (enum reg_class
) iclass
;
2305 if (rclass
== FP_REGS
)
2310 /* Implement the TARGET_RTX_COSTS hook.
2312 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2313 to represent cycles. Size-relative costs are in bytes. */
2316 mn10300_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2317 int *ptotal
, bool speed
)
2319 /* This value is used for SYMBOL_REF etc where we want to pretend
2320 we have a full 32-bit constant. */
2321 HOST_WIDE_INT i
= 0x12345678;
2331 if (outer_code
== SET
)
2333 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2334 if (IN_RANGE (i
, -32768, 32767))
2335 total
= COSTS_N_INSNS (1);
2337 total
= COSTS_N_INSNS (2);
2341 /* 16-bit integer operands don't affect latency;
2342 24-bit and 32-bit operands add a cycle. */
2343 if (IN_RANGE (i
, -32768, 32767))
2346 total
= COSTS_N_INSNS (1);
2351 if (outer_code
== SET
)
2355 else if (IN_RANGE (i
, -128, 127))
2357 else if (IN_RANGE (i
, -32768, 32767))
2364 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2365 if (IN_RANGE (i
, -128, 127))
2367 else if (IN_RANGE (i
, -32768, 32767))
2369 else if (TARGET_AM33
&& IN_RANGE (i
, -0x01000000, 0x00ffffff))
2381 /* We assume all of these require a 32-bit constant, even though
2382 some symbol and label references can be relaxed. */
2386 switch (XINT (x
, 1))
2392 case UNSPEC_GOTSYM_OFF
:
2393 /* The PIC unspecs also resolve to a 32-bit constant. */
2397 /* Assume any non-listed unspec is some sort of arithmetic. */
2398 goto do_arith_costs
;
2402 /* Notice the size difference of INC and INC4. */
2403 if (!speed
&& outer_code
== SET
&& CONST_INT_P (XEXP (x
, 1)))
2405 i
= INTVAL (XEXP (x
, 1));
2406 if (i
== 1 || i
== 4)
2408 total
= 1 + rtx_cost (XEXP (x
, 0), PLUS
, 0, speed
);
2412 goto do_arith_costs
;
2426 total
= (speed
? COSTS_N_INSNS (1) : 2);
2430 /* Notice the size difference of ASL2 and variants. */
2431 if (!speed
&& CONST_INT_P (XEXP (x
, 1)))
2432 switch (INTVAL (XEXP (x
, 1)))
2447 total
= (speed
? COSTS_N_INSNS (1) : 3);
2451 total
= (speed
? COSTS_N_INSNS (3) : 2);
2458 total
= (speed
? COSTS_N_INSNS (39)
2459 /* Include space to load+retrieve MDR. */
2460 : code
== MOD
|| code
== UMOD
? 6 : 4);
2464 total
= mn10300_address_cost (XEXP (x
, 0), GET_MODE (x
),
2465 MEM_ADDR_SPACE (x
), speed
);
2467 total
= COSTS_N_INSNS (2 + total
);
2471 /* Probably not implemented. Assume external call. */
2472 total
= (speed
? COSTS_N_INSNS (10) : 7);
2484 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2485 may access it using GOTOFF instead of GOT. */
2488 mn10300_encode_section_info (tree decl
, rtx rtl
, int first
)
2492 default_encode_section_info (decl
, rtl
, first
);
2497 symbol
= XEXP (rtl
, 0);
2498 if (GET_CODE (symbol
) != SYMBOL_REF
)
2502 SYMBOL_REF_FLAG (symbol
) = (*targetm
.binds_local_p
) (decl
);
2505 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2506 and readonly data size. So we crank up the case threshold value to
2507 encourage a series of if/else comparisons to implement many small switch
2508 statements. In theory, this value could be increased much more if we
2509 were solely optimizing for space, but we keep it "reasonable" to avoid
2510 serious code efficiency lossage. */
2513 mn10300_case_values_threshold (void)
2518 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2521 mn10300_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
2523 rtx mem
, disp
, fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2525 /* This is a strict alignment target, which means that we play
2526 some games to make sure that the locations at which we need
2527 to store <chain> and <disp> wind up at aligned addresses.
2530 0xfc 0xdd mov chain,a1
2532 0xf8 0xed 0x00 btst 0,d1
2536 Note that the two extra insns are effectively nops; they
2537 clobber the flags but do not affect the contents of D0 or D1. */
2539 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
2540 plus_constant (Pmode
, XEXP (m_tramp
, 0), 11),
2541 NULL_RTX
, 1, OPTAB_DIRECT
);
2543 mem
= adjust_address (m_tramp
, SImode
, 0);
2544 emit_move_insn (mem
, gen_int_mode (0xddfc0028, SImode
));
2545 mem
= adjust_address (m_tramp
, SImode
, 4);
2546 emit_move_insn (mem
, chain_value
);
2547 mem
= adjust_address (m_tramp
, SImode
, 8);
2548 emit_move_insn (mem
, gen_int_mode (0xdc00edf8, SImode
));
2549 mem
= adjust_address (m_tramp
, SImode
, 12);
2550 emit_move_insn (mem
, disp
);
2553 /* Output the assembler code for a C++ thunk function.
2554 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2555 is the decl for the target function. DELTA is an immediate constant
2556 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2557 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2558 additionally added to THIS. Finally jump to the entry point of
2562 mn10300_asm_output_mi_thunk (FILE * file
,
2563 tree thunk_fndecl ATTRIBUTE_UNUSED
,
2564 HOST_WIDE_INT delta
,
2565 HOST_WIDE_INT vcall_offset
,
2570 /* Get the register holding the THIS parameter. Handle the case
2571 where there is a hidden first argument for a returned structure. */
2572 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2573 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
+ 1];
2575 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
];
2577 fprintf (file
, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START
);
2580 fprintf (file
, "\tadd %d, %s\n", (int) delta
, _this
);
2584 const char * scratch
= reg_names
[FIRST_ADDRESS_REGNUM
+ 1];
2586 fprintf (file
, "\tmov %s, %s\n", _this
, scratch
);
2587 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2588 fprintf (file
, "\tadd %d, %s\n", (int) vcall_offset
, scratch
);
2589 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2590 fprintf (file
, "\tadd %s, %s\n", scratch
, _this
);
2593 fputs ("\tjmp ", file
);
2594 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
2598 /* Return true if mn10300_output_mi_thunk would be able to output the
2599 assembler code for the thunk function specified by the arguments
2600 it is passed, and false otherwise. */
2603 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
2604 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
2605 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
2606 const_tree function ATTRIBUTE_UNUSED
)
2612 mn10300_hard_regno_mode_ok (unsigned int regno
, enum machine_mode mode
)
2614 if (REGNO_REG_CLASS (regno
) == FP_REGS
2615 || REGNO_REG_CLASS (regno
) == FP_ACC_REGS
)
2616 /* Do not store integer values in FP registers. */
2617 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ((regno
& 1) == 0);
2619 if (((regno
) & 1) == 0 || GET_MODE_SIZE (mode
) == 4)
2622 if (REGNO_REG_CLASS (regno
) == DATA_REGS
2623 || (TARGET_AM33
&& REGNO_REG_CLASS (regno
) == ADDRESS_REGS
)
2624 || REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2625 return GET_MODE_SIZE (mode
) <= 4;
2631 mn10300_modes_tieable (enum machine_mode mode1
, enum machine_mode mode2
)
2633 if (GET_MODE_CLASS (mode1
) == MODE_FLOAT
2634 && GET_MODE_CLASS (mode2
) != MODE_FLOAT
)
2637 if (GET_MODE_CLASS (mode2
) == MODE_FLOAT
2638 && GET_MODE_CLASS (mode1
) != MODE_FLOAT
)
2643 || (GET_MODE_SIZE (mode1
) <= 4 && GET_MODE_SIZE (mode2
) <= 4))
2650 cc_flags_for_mode (enum machine_mode mode
)
2655 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
| CC_FLAG_V
;
2657 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
;
2659 return CC_FLAG_Z
| CC_FLAG_N
;
2668 cc_flags_for_code (enum rtx_code code
)
2681 case GT
: /* ~(Z|(N^V)) */
2682 case LE
: /* Z|(N^V) */
2683 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_V
;
2689 case GTU
: /* ~(C | Z) */
2690 case LEU
: /* C | Z */
2691 return CC_FLAG_Z
| CC_FLAG_C
;
2709 mn10300_select_cc_mode (enum rtx_code code
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2713 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2714 return CC_FLOATmode
;
2716 req
= cc_flags_for_code (code
);
2718 if (req
& CC_FLAG_V
)
2720 if (req
& CC_FLAG_C
)
2726 is_load_insn (rtx insn
)
2728 if (GET_CODE (PATTERN (insn
)) != SET
)
2731 return MEM_P (SET_SRC (PATTERN (insn
)));
2735 is_store_insn (rtx insn
)
2737 if (GET_CODE (PATTERN (insn
)) != SET
)
2740 return MEM_P (SET_DEST (PATTERN (insn
)));
2743 /* Update scheduling costs for situations that cannot be
2744 described using the attributes and DFA machinery.
2745 DEP is the insn being scheduled.
2746 INSN is the previous insn.
2747 COST is the current cycle cost for DEP. */
2750 mn10300_adjust_sched_cost (rtx insn
, rtx link
, rtx dep
, int cost
)
2752 int timings
= get_attr_timings (insn
);
2757 if (GET_CODE (insn
) == PARALLEL
)
2758 insn
= XVECEXP (insn
, 0, 0);
2760 if (GET_CODE (dep
) == PARALLEL
)
2761 dep
= XVECEXP (dep
, 0, 0);
2763 /* For the AM34 a load instruction that follows a
2764 store instruction incurs an extra cycle of delay. */
2765 if (mn10300_tune_cpu
== PROCESSOR_AM34
2766 && is_load_insn (dep
)
2767 && is_store_insn (insn
))
2770 /* For the AM34 a non-store, non-branch FPU insn that follows
2771 another FPU insn incurs a one cycle throughput increase. */
2772 else if (mn10300_tune_cpu
== PROCESSOR_AM34
2773 && ! is_store_insn (insn
)
2775 && GET_CODE (PATTERN (dep
)) == SET
2776 && GET_CODE (PATTERN (insn
)) == SET
2777 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep
)))) == MODE_FLOAT
2778 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn
)))) == MODE_FLOAT
)
2781 /* Resolve the conflict described in section 1-7-4 of
2782 Chapter 3 of the MN103E Series Instruction Manual
2785 "When the preceding instruction is a CPU load or
2786 store instruction, a following FPU instruction
2787 cannot be executed until the CPU completes the
2788 latency period even though there are no register
2789 or flag dependencies between them." */
2791 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2792 if (! TARGET_AM33_2
)
2795 /* If a data dependence already exists then the cost is correct. */
2796 if (REG_NOTE_KIND (link
) == 0)
2799 /* Check that the instruction about to scheduled is an FPU instruction. */
2800 if (GET_CODE (PATTERN (dep
)) != SET
)
2803 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep
)))) != MODE_FLOAT
)
2806 /* Now check to see if the previous instruction is a load or store. */
2807 if (! is_load_insn (insn
) && ! is_store_insn (insn
))
2810 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2811 only applies when an INTEGER load/store precedes an FPU
2812 instruction, but is this true ? For now we assume that it is. */
2813 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn
)))) != MODE_INT
)
2816 /* Extract the latency value from the timings attribute. */
2817 return timings
< 100 ? (timings
% 10) : (timings
% 100);
2821 mn10300_conditional_register_usage (void)
2827 for (i
= FIRST_EXTENDED_REGNUM
;
2828 i
<= LAST_EXTENDED_REGNUM
; i
++)
2829 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2833 for (i
= FIRST_FP_REGNUM
;
2834 i
<= LAST_FP_REGNUM
; i
++)
2835 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2838 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] =
2839 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2842 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2843 We do this in the mn10300 backend to maintain source compatibility
2844 with the old cc0-based compiler. */
2847 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED
,
2848 tree inputs ATTRIBUTE_UNUSED
,
2851 clobbers
= tree_cons (NULL_TREE
, build_string (5, "EPSW"),
2856 /* A helper function for splitting cbranch patterns after reload. */
2859 mn10300_split_cbranch (enum machine_mode cmp_mode
, rtx cmp_op
, rtx label_ref
)
2863 flags
= gen_rtx_REG (cmp_mode
, CC_REG
);
2864 x
= gen_rtx_COMPARE (cmp_mode
, XEXP (cmp_op
, 0), XEXP (cmp_op
, 1));
2865 x
= gen_rtx_SET (VOIDmode
, flags
, x
);
2868 x
= gen_rtx_fmt_ee (GET_CODE (cmp_op
), VOIDmode
, flags
, const0_rtx
);
2869 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
, label_ref
, pc_rtx
);
2870 x
= gen_rtx_SET (VOIDmode
, pc_rtx
, x
);
2874 /* A helper function for matching parallels that set the flags. */
2877 mn10300_match_ccmode (rtx insn
, enum machine_mode cc_mode
)
2880 enum machine_mode flags_mode
;
2882 gcc_checking_assert (XVECLEN (PATTERN (insn
), 0) == 2);
2884 op1
= XVECEXP (PATTERN (insn
), 0, 1);
2885 gcc_checking_assert (GET_CODE (SET_SRC (op1
)) == COMPARE
);
2887 flags
= SET_DEST (op1
);
2888 flags_mode
= GET_MODE (flags
);
2890 if (GET_MODE (SET_SRC (op1
)) != flags_mode
)
2892 if (GET_MODE_CLASS (flags_mode
) != MODE_CC
)
2895 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2896 if (cc_flags_for_mode (flags_mode
) & ~cc_flags_for_mode (cc_mode
))
2902 /* This function is used to help split:
2904 (set (reg) (and (reg) (int)))
2908 (set (reg) (shift (reg) (int))
2909 (set (reg) (shift (reg) (int))
2911 where the shitfs will be shorter than the "and" insn.
2913 It returns the number of bits that should be shifted. A positive
2914 values means that the low bits are to be cleared (and hence the
2915 shifts should be right followed by left) whereas a negative value
2916 means that the high bits are to be cleared (left followed by right).
2917 Zero is returned when it would not be economical to split the AND. */
2920 mn10300_split_and_operand_count (rtx op
)
2922 HOST_WIDE_INT val
= INTVAL (op
);
2927 /* High bit is set, look for bits clear at the bottom. */
2928 count
= exact_log2 (-val
);
2931 /* This is only size win if we can use the asl2 insn. Otherwise we
2932 would be replacing 1 6-byte insn with 2 3-byte insns. */
2933 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2939 /* High bit is clear, look for bits set at the bottom. */
2940 count
= exact_log2 (val
+ 1);
2942 /* Again, this is only a size win with asl2. */
2943 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2952 enum attr_liw_op op
;
2957 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2958 extract the operands and LIW attributes from the insn and use them to fill
2959 in the liw_data structure. Return true upon success or false if the insn
2960 cannot be bundled. */
2963 extract_bundle (rtx insn
, struct liw_data
* pdata
)
2965 bool allow_consts
= true;
2968 gcc_assert (pdata
!= NULL
);
2970 if (insn
== NULL_RTX
)
2972 /* Make sure that we are dealing with a simple SET insn. */
2973 p
= single_set (insn
);
2977 /* Make sure that it could go into one of the LIW pipelines. */
2978 pdata
->slot
= get_attr_liw (insn
);
2979 if (pdata
->slot
== LIW_BOTH
)
2982 pdata
->op
= get_attr_liw_op (insn
);
2987 pdata
->dest
= SET_DEST (p
);
2988 pdata
->src
= SET_SRC (p
);
2991 pdata
->dest
= XEXP (SET_SRC (p
), 0);
2992 pdata
->src
= XEXP (SET_SRC (p
), 1);
2999 /* The AND, OR and XOR long instruction words only accept register arguments. */
3000 allow_consts
= false;
3003 pdata
->dest
= SET_DEST (p
);
3004 pdata
->src
= XEXP (SET_SRC (p
), 1);
3008 if (! REG_P (pdata
->dest
))
3011 if (REG_P (pdata
->src
))
3014 return allow_consts
&& satisfies_constraint_O (pdata
->src
);
3017 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3018 the instructions with the assumption that LIW1 would be executed before LIW2
3019 so we must check for overlaps between their sources and destinations. */
3022 check_liw_constraints (struct liw_data
* pliw1
, struct liw_data
* pliw2
)
3024 /* Check for slot conflicts. */
3025 if (pliw2
->slot
== pliw1
->slot
&& pliw1
->slot
!= LIW_EITHER
)
3028 /* If either operation is a compare, then "dest" is really an input; the real
3029 destination is CC_REG. So these instructions need different checks. */
3031 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3032 check its values prior to any changes made by OP. */
3033 if (pliw1
->op
== LIW_OP_CMP
)
3035 /* Two sequential comparisons means dead code, which ought to
3036 have been eliminated given that bundling only happens with
3037 optimization. We cannot bundle them in any case. */
3038 gcc_assert (pliw1
->op
!= pliw2
->op
);
3042 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3043 is the destination of OP, as the CMP will look at the old value, not the new
3045 if (pliw2
->op
== LIW_OP_CMP
)
3047 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3050 if (REG_P (pliw2
->src
))
3051 return REGNO (pliw2
->src
) != REGNO (pliw1
->dest
);
3056 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3057 same destination register. */
3058 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3061 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3062 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3063 we can replace the source in OP2 with the source of OP1. */
3064 if (REG_P (pliw2
->src
) && REGNO (pliw2
->src
) == REGNO (pliw1
->dest
))
3066 if (pliw1
->op
== LIW_OP_MOV
&& REG_P (pliw1
->src
))
3068 if (! REG_P (pliw1
->src
)
3069 && (pliw2
->op
== LIW_OP_AND
3070 || pliw2
->op
== LIW_OP_OR
3071 || pliw2
->op
== LIW_OP_XOR
))
3074 pliw2
->src
= pliw1
->src
;
3080 /* Everything else is OK. */
3084 /* Combine pairs of insns into LIW bundles. */
3087 mn10300_bundle_liw (void)
3091 for (r
= get_insns (); r
!= NULL_RTX
; r
= next_nonnote_nondebug_insn (r
))
3094 struct liw_data liw1
, liw2
;
3097 if (! extract_bundle (insn1
, & liw1
))
3100 insn2
= next_nonnote_nondebug_insn (insn1
);
3101 if (! extract_bundle (insn2
, & liw2
))
3104 /* Check for source/destination overlap. */
3105 if (! check_liw_constraints (& liw1
, & liw2
))
3108 if (liw1
.slot
== LIW_OP2
|| liw2
.slot
== LIW_OP1
)
3110 struct liw_data temp
;
3117 delete_insn (insn2
);
3119 if (liw1
.op
== LIW_OP_CMP
)
3120 insn2
= gen_cmp_liw (liw2
.dest
, liw2
.src
, liw1
.dest
, liw1
.src
,
3122 else if (liw2
.op
== LIW_OP_CMP
)
3123 insn2
= gen_liw_cmp (liw1
.dest
, liw1
.src
, liw2
.dest
, liw2
.src
,
3126 insn2
= gen_liw (liw1
.dest
, liw2
.dest
, liw1
.src
, liw2
.src
,
3127 GEN_INT (liw1
.op
), GEN_INT (liw2
.op
));
3129 insn2
= emit_insn_after (insn2
, insn1
);
3130 delete_insn (insn1
);
3135 #define DUMP(reason, insn) \
3140 fprintf (dump_file, reason "\n"); \
3141 if (insn != NULL_RTX) \
3142 print_rtl_single (dump_file, insn); \
3143 fprintf(dump_file, "\n"); \
3148 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3149 Insert a SETLB insn just before LABEL. */
3152 mn10300_insert_setlb_lcc (rtx label
, rtx branch
)
3154 rtx lcc
, comparison
, cmp_reg
;
3156 if (LABEL_NUSES (label
) > 1)
3160 /* This label is used both as an entry point to the loop
3161 and as a loop-back point for the loop. We need to separate
3162 these two functions so that the SETLB happens upon entry,
3163 but the loop-back does not go to the SETLB instruction. */
3164 DUMP ("Inserting SETLB insn after:", label
);
3165 insn
= emit_insn_after (gen_setlb (), label
);
3166 label
= gen_label_rtx ();
3167 emit_label_after (label
, insn
);
3168 DUMP ("Created new loop-back label:", label
);
3172 DUMP ("Inserting SETLB insn before:", label
);
3173 emit_insn_before (gen_setlb (), label
);
3176 comparison
= XEXP (SET_SRC (PATTERN (branch
)), 0);
3177 cmp_reg
= XEXP (comparison
, 0);
3178 gcc_assert (REG_P (cmp_reg
));
3180 /* If the comparison has not already been split out of the branch
3182 gcc_assert (REGNO (cmp_reg
) == CC_REG
);
3184 if (GET_MODE (cmp_reg
) == CC_FLOATmode
)
3185 lcc
= gen_FLcc (comparison
, label
);
3187 lcc
= gen_Lcc (comparison
, label
);
3189 lcc
= emit_jump_insn_before (lcc
, branch
);
3190 mark_jump_label (XVECEXP (PATTERN (lcc
), 0, 0), lcc
, 0);
3191 JUMP_LABEL (lcc
) = label
;
3192 DUMP ("Replacing branch insn...", branch
);
3193 DUMP ("... with Lcc insn:", lcc
);
3194 delete_insn (branch
);
3198 mn10300_block_contains_call (basic_block block
)
3202 FOR_BB_INSNS (block
, insn
)
3210 mn10300_loop_contains_call_insn (loop_p loop
)
3213 bool result
= false;
3216 bbs
= get_loop_body (loop
);
3218 for (i
= 0; i
< loop
->num_nodes
; i
++)
3219 if (mn10300_block_contains_call (bbs
[i
]))
3230 mn10300_scan_for_setlb_lcc (void)
3234 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX
);
3237 compute_bb_for_insn ();
3239 /* Find the loops. */
3240 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
3242 /* FIXME: For now we only investigate innermost loops. In practice however
3243 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3244 be the case that its parent loop is suitable. Thus we should check all
3245 loops, but work from the innermost outwards. */
3246 FOR_EACH_LOOP (loop
, LI_ONLY_INNERMOST
)
3248 const char * reason
= NULL
;
3250 /* Check to see if we can modify this loop. If we cannot
3251 then set 'reason' to describe why it could not be done. */
3252 if (loop
->latch
== NULL
)
3253 reason
= "it contains multiple latches";
3254 else if (loop
->header
!= loop
->latch
)
3255 /* FIXME: We could handle loops that span multiple blocks,
3256 but this requires a lot more work tracking down the branches
3257 that need altering, so for now keep things simple. */
3258 reason
= "the loop spans multiple blocks";
3259 else if (mn10300_loop_contains_call_insn (loop
))
3260 reason
= "it contains CALL insns";
3263 rtx branch
= BB_END (loop
->latch
);
3265 gcc_assert (JUMP_P (branch
));
3266 if (single_set (branch
) == NULL_RTX
|| ! any_condjump_p (branch
))
3267 /* We cannot optimize tablejumps and the like. */
3268 /* FIXME: We could handle unconditional jumps. */
3269 reason
= "it is not a simple loop";
3275 flow_loop_dump (loop
, dump_file
, NULL
, 0);
3277 label
= BB_HEAD (loop
->header
);
3278 gcc_assert (LABEL_P (label
));
3280 mn10300_insert_setlb_lcc (label
, branch
);
3284 if (dump_file
&& reason
!= NULL
)
3285 fprintf (dump_file
, "Loop starting with insn %d is not suitable because %s\n",
3286 INSN_UID (BB_HEAD (loop
->header
)),
3290 loop_optimizer_finalize ();
3292 df_finish_pass (false);
3294 DUMP ("SETLB scan complete", NULL_RTX
);
3298 mn10300_reorg (void)
3300 /* These are optimizations, so only run them if optimizing. */
3301 if (TARGET_AM33
&& (optimize
> 0 || optimize_size
))
3303 if (TARGET_ALLOW_SETLB
)
3304 mn10300_scan_for_setlb_lcc ();
3306 if (TARGET_ALLOW_LIW
)
3307 mn10300_bundle_liw ();
3311 /* Initialize the GCC target structure. */
3313 #undef TARGET_MACHINE_DEPENDENT_REORG
3314 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3316 #undef TARGET_ASM_ALIGNED_HI_OP
3317 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3319 #undef TARGET_LEGITIMIZE_ADDRESS
3320 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3322 #undef TARGET_ADDRESS_COST
3323 #define TARGET_ADDRESS_COST mn10300_address_cost
3324 #undef TARGET_REGISTER_MOVE_COST
3325 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3326 #undef TARGET_MEMORY_MOVE_COST
3327 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3328 #undef TARGET_RTX_COSTS
3329 #define TARGET_RTX_COSTS mn10300_rtx_costs
3331 #undef TARGET_ASM_FILE_START
3332 #define TARGET_ASM_FILE_START mn10300_file_start
3333 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3334 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3336 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3337 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3339 #undef TARGET_OPTION_OVERRIDE
3340 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3342 #undef TARGET_ENCODE_SECTION_INFO
3343 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3345 #undef TARGET_PROMOTE_PROTOTYPES
3346 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3347 #undef TARGET_RETURN_IN_MEMORY
3348 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3349 #undef TARGET_PASS_BY_REFERENCE
3350 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3351 #undef TARGET_CALLEE_COPIES
3352 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3353 #undef TARGET_ARG_PARTIAL_BYTES
3354 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3355 #undef TARGET_FUNCTION_ARG
3356 #define TARGET_FUNCTION_ARG mn10300_function_arg
3357 #undef TARGET_FUNCTION_ARG_ADVANCE
3358 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3360 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3361 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3362 #undef TARGET_EXPAND_BUILTIN_VA_START
3363 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3365 #undef TARGET_CASE_VALUES_THRESHOLD
3366 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3368 #undef TARGET_LEGITIMATE_ADDRESS_P
3369 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3370 #undef TARGET_DELEGITIMIZE_ADDRESS
3371 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3372 #undef TARGET_LEGITIMATE_CONSTANT_P
3373 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3375 #undef TARGET_PREFERRED_RELOAD_CLASS
3376 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3377 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3378 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3379 mn10300_preferred_output_reload_class
3380 #undef TARGET_SECONDARY_RELOAD
3381 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3383 #undef TARGET_TRAMPOLINE_INIT
3384 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3386 #undef TARGET_FUNCTION_VALUE
3387 #define TARGET_FUNCTION_VALUE mn10300_function_value
3388 #undef TARGET_LIBCALL_VALUE
3389 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3391 #undef TARGET_ASM_OUTPUT_MI_THUNK
3392 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3393 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3394 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3396 #undef TARGET_SCHED_ADJUST_COST
3397 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3399 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3400 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3402 #undef TARGET_MD_ASM_CLOBBERS
3403 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3405 #undef TARGET_FLAGS_REGNUM
3406 #define TARGET_FLAGS_REGNUM CC_REG
3408 struct gcc_target targetm
= TARGET_INITIALIZER
;