1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2018 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
25 #include "coretypes.h"
30 #include "stringpool.h"
41 #include "diagnostic-core.h"
43 #include "stor-layout.h"
47 #include "insn-attr.h"
51 #include "tm-constrs.h"
56 /* This file should be included last. */
57 #include "target-def.h"
59 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
60 names are not prefixed by underscores, to tell whether to prefix a
61 label with a plus sign or not, so that the assembler can tell
62 symbol names from register names. */
63 int mn10300_protect_label
;
65 /* Selected processor type for tuning. */
66 enum processor_type mn10300_tune_cpu
= PROCESSOR_DEFAULT
;
73 static int cc_flags_for_mode(machine_mode
);
74 static int cc_flags_for_code(enum rtx_code
);
76 /* Implement TARGET_OPTION_OVERRIDE. */
78 mn10300_option_override (void)
81 target_flags
&= ~MASK_MULT_BUG
;
84 /* Disable scheduling for the MN10300 as we do
85 not have timing information available for it. */
86 flag_schedule_insns
= 0;
87 flag_schedule_insns_after_reload
= 0;
89 /* Force enable splitting of wide types, as otherwise it is trivial
90 to run out of registers. Indeed, this works so well that register
91 allocation problems are now more common *without* optimization,
92 when this flag is not enabled by default. */
93 flag_split_wide_types
= 1;
96 if (mn10300_tune_string
)
98 if (strcasecmp (mn10300_tune_string
, "mn10300") == 0)
99 mn10300_tune_cpu
= PROCESSOR_MN10300
;
100 else if (strcasecmp (mn10300_tune_string
, "am33") == 0)
101 mn10300_tune_cpu
= PROCESSOR_AM33
;
102 else if (strcasecmp (mn10300_tune_string
, "am33-2") == 0)
103 mn10300_tune_cpu
= PROCESSOR_AM33_2
;
104 else if (strcasecmp (mn10300_tune_string
, "am34") == 0)
105 mn10300_tune_cpu
= PROCESSOR_AM34
;
107 error ("-mtune= expects mn10300, am33, am33-2, or am34");
112 mn10300_file_start (void)
114 default_file_start ();
117 fprintf (asm_out_file
, "\t.am33_2\n");
118 else if (TARGET_AM33
)
119 fprintf (asm_out_file
, "\t.am33\n");
122 /* Note: This list must match the liw_op attribute in mn10300.md. */
124 static const char *liw_op_names
[] =
126 "add", "cmp", "sub", "mov",
132 /* Print operand X using operand code CODE to assembly language output file
136 mn10300_print_operand (FILE *file
, rtx x
, int code
)
142 unsigned int liw_op
= UINTVAL (x
);
144 gcc_assert (TARGET_ALLOW_LIW
);
145 gcc_assert (liw_op
< LIW_OP_MAX
);
146 fputs (liw_op_names
[liw_op
], file
);
153 enum rtx_code cmp
= GET_CODE (x
);
154 machine_mode mode
= GET_MODE (XEXP (x
, 0));
159 cmp
= reverse_condition (cmp
);
160 have_flags
= cc_flags_for_mode (mode
);
171 /* bge is smaller than bnc. */
172 str
= (have_flags
& CC_FLAG_V
? "ge" : "nc");
175 str
= (have_flags
& CC_FLAG_V
? "lt" : "ns");
223 gcc_checking_assert ((cc_flags_for_code (cmp
) & ~have_flags
) == 0);
229 /* This is used for the operand to a call instruction;
230 if it's a REG, enclose it in parens, else output
231 the operand normally. */
235 mn10300_print_operand (file
, x
, 0);
239 mn10300_print_operand (file
, x
, 0);
243 switch (GET_CODE (x
))
247 output_address (GET_MODE (x
), XEXP (x
, 0));
252 fprintf (file
, "fd%d", REGNO (x
) - 18);
260 /* These are the least significant word in a 64bit value. */
262 switch (GET_CODE (x
))
266 output_address (GET_MODE (x
), XEXP (x
, 0));
271 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
275 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
282 switch (GET_MODE (x
))
285 REAL_VALUE_TO_TARGET_DOUBLE
286 (*CONST_DOUBLE_REAL_VALUE (x
), val
);
287 fprintf (file
, "0x%lx", val
[0]);
290 REAL_VALUE_TO_TARGET_SINGLE
291 (*CONST_DOUBLE_REAL_VALUE (x
), val
[0]);
292 fprintf (file
, "0x%lx", val
[0]);
296 mn10300_print_operand_address (file
,
297 GEN_INT (CONST_DOUBLE_LOW (x
)));
308 split_double (x
, &low
, &high
);
309 fprintf (file
, "%ld", (long)INTVAL (low
));
318 /* Similarly, but for the most significant word. */
320 switch (GET_CODE (x
))
324 x
= adjust_address (x
, SImode
, 4);
325 output_address (GET_MODE (x
), XEXP (x
, 0));
330 fprintf (file
, "%s", reg_names
[REGNO (x
) + 1]);
334 fprintf (file
, "%s", reg_names
[subreg_regno (x
) + 1]);
341 switch (GET_MODE (x
))
344 REAL_VALUE_TO_TARGET_DOUBLE
345 (*CONST_DOUBLE_REAL_VALUE (x
), val
);
346 fprintf (file
, "0x%lx", val
[1]);
352 mn10300_print_operand_address (file
,
353 GEN_INT (CONST_DOUBLE_HIGH (x
)));
364 split_double (x
, &low
, &high
);
365 fprintf (file
, "%ld", (long)INTVAL (high
));
376 if (REG_P (XEXP (x
, 0)))
377 output_address (VOIDmode
, gen_rtx_PLUS (SImode
,
378 XEXP (x
, 0), const0_rtx
));
380 output_address (VOIDmode
, XEXP (x
, 0));
385 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
386 fprintf (file
, "%d", (int)((~INTVAL (x
)) & 0xff));
390 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
391 fprintf (file
, "%d", (int)(INTVAL (x
) & 0xff));
394 /* For shift counts. The hardware ignores the upper bits of
395 any immediate, but the assembler will flag an out of range
396 shift count as an error. So we mask off the high bits
397 of the immediate here. */
401 fprintf (file
, "%d", (int)(INTVAL (x
) & 0x1f));
407 switch (GET_CODE (x
))
411 output_address (GET_MODE (x
), XEXP (x
, 0));
416 output_address (VOIDmode
, x
);
420 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
424 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
427 /* This will only be single precision.... */
432 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), val
);
433 fprintf (file
, "0x%lx", val
);
443 mn10300_print_operand_address (file
, x
);
452 /* Output assembly language output for the address ADDR to FILE. */
455 mn10300_print_operand_address (FILE *file
, rtx addr
)
457 switch (GET_CODE (addr
))
460 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
465 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
468 mn10300_print_operand (file
, XEXP (addr
, 1), 0);
472 mn10300_print_operand (file
, addr
, 0);
476 rtx base
= XEXP (addr
, 0);
477 rtx index
= XEXP (addr
, 1);
479 if (REG_P (index
) && !REG_OK_FOR_INDEX_P (index
))
485 gcc_assert (REG_P (index
) && REG_OK_FOR_INDEX_P (index
));
487 gcc_assert (REG_OK_FOR_BASE_P (base
));
489 mn10300_print_operand (file
, index
, 0);
491 mn10300_print_operand (file
, base
, 0);
495 output_addr_const (file
, addr
);
498 output_addr_const (file
, addr
);
503 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
505 Used for PIC-specific UNSPECs. */
508 mn10300_asm_output_addr_const_extra (FILE *file
, rtx x
)
510 if (GET_CODE (x
) == UNSPEC
)
515 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
516 output_addr_const (file
, XVECEXP (x
, 0, 0));
519 output_addr_const (file
, XVECEXP (x
, 0, 0));
520 fputs ("@GOT", file
);
523 output_addr_const (file
, XVECEXP (x
, 0, 0));
524 fputs ("@GOTOFF", file
);
527 output_addr_const (file
, XVECEXP (x
, 0, 0));
528 fputs ("@PLT", file
);
530 case UNSPEC_GOTSYM_OFF
:
531 assemble_name (file
, GOT_SYMBOL_NAME
);
533 output_addr_const (file
, XVECEXP (x
, 0, 0));
545 /* Count the number of FP registers that have to be saved. */
547 fp_regs_to_save (void)
554 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
555 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
561 /* Print a set of registers in the format required by "movm" and "ret".
562 Register K is saved if bit K of MASK is set. The data and address
563 registers can be stored individually, but the extended registers cannot.
564 We assume that the mask already takes that into account. For instance,
565 bits 14 to 17 must have the same value. */
568 mn10300_print_reg_list (FILE *file
, int mask
)
576 for (i
= 0; i
< FIRST_EXTENDED_REGNUM
; i
++)
577 if ((mask
& (1 << i
)) != 0)
581 fputs (reg_names
[i
], file
);
585 if ((mask
& 0x3c000) != 0)
587 gcc_assert ((mask
& 0x3c000) == 0x3c000);
590 fputs ("exreg1", file
);
597 /* If the MDR register is never clobbered, we can use the RETF instruction
598 which takes the address from the MDR register. This is 3 cycles faster
599 than having to load the address from the stack. */
602 mn10300_can_use_retf_insn (void)
604 /* Don't bother if we're not optimizing. In this case we won't
605 have proper access to df_regs_ever_live_p. */
609 /* EH returns alter the saved return address; MDR is not current. */
610 if (crtl
->calls_eh_return
)
613 /* Obviously not if MDR is ever clobbered. */
614 if (df_regs_ever_live_p (MDR_REG
))
617 /* ??? Careful not to use this during expand_epilogue etc. */
618 gcc_assert (!in_sequence_p ());
619 return leaf_function_p ();
623 mn10300_can_use_rets_insn (void)
625 return !mn10300_initial_offset (ARG_POINTER_REGNUM
, STACK_POINTER_REGNUM
);
628 /* Returns the set of live, callee-saved registers as a bitmask. The
629 callee-saved extended registers cannot be stored individually, so
630 all of them will be included in the mask if any one of them is used.
631 Also returns the number of bytes in the registers in the mask if
632 BYTES_SAVED is not NULL. */
635 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved
)
642 for (i
= 0; i
<= LAST_EXTENDED_REGNUM
; i
++)
643 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
649 if ((mask
& 0x3c000) != 0)
651 for (i
= 0x04000; i
< 0x40000; i
<<= 1)
659 * bytes_saved
= count
* UNITS_PER_WORD
;
667 RTX_FRAME_RELATED_P (r
) = 1;
671 /* Generate an instruction that pushes several registers onto the stack.
672 Register K will be saved if bit K in MASK is set. The function does
673 nothing if MASK is zero.
675 To be compatible with the "movm" instruction, the lowest-numbered
676 register must be stored in the lowest slot. If MASK is the set
677 { R1,...,RN }, where R1...RN are ordered least first, the generated
678 instruction will have the form:
681 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
682 (set (mem:SI (plus:SI (reg:SI 9)
686 (set (mem:SI (plus:SI (reg:SI 9)
691 mn10300_gen_multiple_store (unsigned int mask
)
693 /* The order in which registers are stored, from SP-4 through SP-N*4. */
694 static const unsigned int store_order
[8] = {
695 /* e2, e3: never saved */
696 FIRST_EXTENDED_REGNUM
+ 4,
697 FIRST_EXTENDED_REGNUM
+ 5,
698 FIRST_EXTENDED_REGNUM
+ 6,
699 FIRST_EXTENDED_REGNUM
+ 7,
700 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
701 FIRST_DATA_REGNUM
+ 2,
702 FIRST_DATA_REGNUM
+ 3,
703 FIRST_ADDRESS_REGNUM
+ 2,
704 FIRST_ADDRESS_REGNUM
+ 3,
705 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
715 for (i
= count
= 0; i
< ARRAY_SIZE(store_order
); ++i
)
717 unsigned regno
= store_order
[i
];
719 if (((mask
>> regno
) & 1) == 0)
723 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
724 x
= gen_frame_mem (SImode
, x
);
725 x
= gen_rtx_SET (x
, gen_rtx_REG (SImode
, regno
));
728 /* Remove the register from the mask so that... */
729 mask
&= ~(1u << regno
);
732 /* ... we can make sure that we didn't try to use a register
733 not listed in the store order. */
734 gcc_assert (mask
== 0);
736 /* Create the instruction that updates the stack pointer. */
737 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
738 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
741 /* We need one PARALLEL element to update the stack pointer and
742 an additional element for each register that is stored. */
743 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (count
+ 1, elts
));
747 static inline unsigned int
748 popcount (unsigned int mask
)
750 unsigned int count
= 0;
755 mask
&= ~ (mask
& - mask
);
761 mn10300_expand_prologue (void)
763 HOST_WIDE_INT size
= mn10300_frame_size ();
766 mask
= mn10300_get_live_callee_saved_regs (NULL
);
767 /* If we use any of the callee-saved registers, save them now. */
768 mn10300_gen_multiple_store (mask
);
770 if (flag_stack_usage_info
)
771 current_function_static_stack_size
= size
+ popcount (mask
) * 4;
773 if (TARGET_AM33_2
&& fp_regs_to_save ())
775 int num_regs_to_save
= fp_regs_to_save (), i
;
781 save_sp_partial_merge
,
785 unsigned int strategy_size
= (unsigned)-1, this_strategy_size
;
788 if (flag_stack_usage_info
)
789 current_function_static_stack_size
+= num_regs_to_save
* 4;
791 /* We have several different strategies to save FP registers.
792 We can store them using SP offsets, which is beneficial if
793 there are just a few registers to save, or we can use `a0' in
794 post-increment mode (`a0' is the only call-clobbered address
795 register that is never used to pass information to a
796 function). Furthermore, if we don't need a frame pointer, we
797 can merge the two SP adds into a single one, but this isn't
798 always beneficial; sometimes we can just split the two adds
799 so that we don't exceed a 16-bit constant size. The code
800 below will select which strategy to use, so as to generate
801 smallest code. Ties are broken in favor or shorter sequences
802 (in terms of number of instructions). */
804 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
805 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
806 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
807 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
809 /* We add 0 * (S) in two places to promote to the type of S,
810 so that all arms of the conditional have the same type. */
811 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
812 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
813 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
814 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
816 #define SIZE_FMOV_SP_(S,N) \
817 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
818 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
819 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
820 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
822 /* Consider alternative save_sp_merge only if we don't need the
823 frame pointer and size is nonzero. */
824 if (! frame_pointer_needed
&& size
)
826 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
827 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
828 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
829 this_strategy_size
+= SIZE_FMOV_SP (size
, num_regs_to_save
);
831 if (this_strategy_size
< strategy_size
)
833 strategy
= save_sp_merge
;
834 strategy_size
= this_strategy_size
;
838 /* Consider alternative save_sp_no_merge unconditionally. */
839 /* Insn: add -4 * num_regs_to_save, sp. */
840 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
841 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
842 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
845 /* Insn: add -size, sp. */
846 this_strategy_size
+= SIZE_ADD_SP (-size
);
849 if (this_strategy_size
< strategy_size
)
851 strategy
= save_sp_no_merge
;
852 strategy_size
= this_strategy_size
;
855 /* Consider alternative save_sp_partial_merge only if we don't
856 need a frame pointer and size is reasonably large. */
857 if (! frame_pointer_needed
&& size
+ 4 * num_regs_to_save
> 128)
859 /* Insn: add -128, sp. */
860 this_strategy_size
= SIZE_ADD_SP (-128);
861 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
862 this_strategy_size
+= SIZE_FMOV_SP (128 - 4 * num_regs_to_save
,
866 /* Insn: add 128-size, sp. */
867 this_strategy_size
+= SIZE_ADD_SP (128 - size
);
870 if (this_strategy_size
< strategy_size
)
872 strategy
= save_sp_partial_merge
;
873 strategy_size
= this_strategy_size
;
877 /* Consider alternative save_a0_merge only if we don't need a
878 frame pointer, size is nonzero and the user hasn't
879 changed the calling conventions of a0. */
880 if (! frame_pointer_needed
&& size
881 && call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
882 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
884 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
885 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
886 /* Insn: mov sp, a0. */
887 this_strategy_size
++;
890 /* Insn: add size, a0. */
891 this_strategy_size
+= SIZE_ADD_AX (size
);
893 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
894 this_strategy_size
+= 3 * num_regs_to_save
;
896 if (this_strategy_size
< strategy_size
)
898 strategy
= save_a0_merge
;
899 strategy_size
= this_strategy_size
;
903 /* Consider alternative save_a0_no_merge if the user hasn't
904 changed the calling conventions of a0. */
905 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
906 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
908 /* Insn: add -4 * num_regs_to_save, sp. */
909 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
910 /* Insn: mov sp, a0. */
911 this_strategy_size
++;
912 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
913 this_strategy_size
+= 3 * num_regs_to_save
;
916 /* Insn: add -size, sp. */
917 this_strategy_size
+= SIZE_ADD_SP (-size
);
920 if (this_strategy_size
< strategy_size
)
922 strategy
= save_a0_no_merge
;
923 strategy_size
= this_strategy_size
;
927 /* Emit the initial SP add, common to all strategies. */
930 case save_sp_no_merge
:
931 case save_a0_no_merge
:
932 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
934 GEN_INT (-4 * num_regs_to_save
))));
938 case save_sp_partial_merge
:
939 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
942 xsize
= 128 - 4 * num_regs_to_save
;
948 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
950 GEN_INT (-(size
+ 4 * num_regs_to_save
)))));
951 /* We'll have to adjust FP register saves according to the
954 /* Since we've already created the stack frame, don't do it
955 again at the end of the function. */
963 /* Now prepare register a0, if we have decided to use it. */
967 case save_sp_no_merge
:
968 case save_sp_partial_merge
:
973 case save_a0_no_merge
:
974 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
);
975 F (emit_insn (gen_movsi (reg
, stack_pointer_rtx
)));
977 F (emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (xsize
))));
978 reg
= gen_rtx_POST_INC (SImode
, reg
);
985 /* Now actually save the FP registers. */
986 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
987 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
995 /* If we aren't using `a0', use an SP offset. */
998 addr
= gen_rtx_PLUS (SImode
,
1003 addr
= stack_pointer_rtx
;
1008 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode
, addr
),
1009 gen_rtx_REG (SFmode
, i
))));
1013 /* Now put the frame pointer into the frame pointer register. */
1014 if (frame_pointer_needed
)
1015 F (emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
));
1017 /* Allocate stack for this frame. */
1019 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1023 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
1024 emit_insn (gen_load_pic ());
1028 mn10300_expand_epilogue (void)
1030 HOST_WIDE_INT size
= mn10300_frame_size ();
1031 unsigned int reg_save_bytes
;
1033 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1035 if (TARGET_AM33_2
&& fp_regs_to_save ())
1037 int num_regs_to_save
= fp_regs_to_save (), i
;
1040 /* We have several options to restore FP registers. We could
1041 load them from SP offsets, but, if there are enough FP
1042 registers to restore, we win if we use a post-increment
1045 /* If we have a frame pointer, it's the best option, because we
1046 already know it has the value we want. */
1047 if (frame_pointer_needed
)
1048 reg
= gen_rtx_REG (SImode
, FRAME_POINTER_REGNUM
);
1049 /* Otherwise, we may use `a1', since it's call-clobbered and
1050 it's never used for return values. But only do so if it's
1051 smaller than using SP offsets. */
1054 enum { restore_sp_post_adjust
,
1055 restore_sp_pre_adjust
,
1056 restore_sp_partial_adjust
,
1057 restore_a1
} strategy
;
1058 unsigned int this_strategy_size
, strategy_size
= (unsigned)-1;
1060 /* Consider using sp offsets before adjusting sp. */
1061 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1062 this_strategy_size
= SIZE_FMOV_SP (size
, num_regs_to_save
);
1063 /* If size is too large, we'll have to adjust SP with an
1065 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1067 /* Insn: add size + 4 * num_regs_to_save, sp. */
1068 this_strategy_size
+= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
);
1070 /* If we don't have to restore any non-FP registers,
1071 we'll be able to save one byte by using rets. */
1072 if (! reg_save_bytes
)
1073 this_strategy_size
--;
1075 if (this_strategy_size
< strategy_size
)
1077 strategy
= restore_sp_post_adjust
;
1078 strategy_size
= this_strategy_size
;
1081 /* Consider using sp offsets after adjusting sp. */
1082 /* Insn: add size, sp. */
1083 this_strategy_size
= SIZE_ADD_SP (size
);
1084 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1085 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
1086 /* We're going to use ret to release the FP registers
1087 save area, so, no savings. */
1089 if (this_strategy_size
< strategy_size
)
1091 strategy
= restore_sp_pre_adjust
;
1092 strategy_size
= this_strategy_size
;
1095 /* Consider using sp offsets after partially adjusting sp.
1096 When size is close to 32Kb, we may be able to adjust SP
1097 with an imm16 add instruction while still using fmov
1099 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1101 /* Insn: add size + 4 * num_regs_to_save
1102 + reg_save_bytes - 252,sp. */
1103 this_strategy_size
= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
1104 + (int) reg_save_bytes
- 252);
1105 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1106 this_strategy_size
+= SIZE_FMOV_SP (252 - reg_save_bytes
1107 - 4 * num_regs_to_save
,
1109 /* We're going to use ret to release the FP registers
1110 save area, so, no savings. */
1112 if (this_strategy_size
< strategy_size
)
1114 strategy
= restore_sp_partial_adjust
;
1115 strategy_size
= this_strategy_size
;
1119 /* Consider using a1 in post-increment mode, as long as the
1120 user hasn't changed the calling conventions of a1. */
1121 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
+ 1]
1122 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
+1])
1124 /* Insn: mov sp,a1. */
1125 this_strategy_size
= 1;
1128 /* Insn: add size,a1. */
1129 this_strategy_size
+= SIZE_ADD_AX (size
);
1131 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1132 this_strategy_size
+= 3 * num_regs_to_save
;
1133 /* If size is large enough, we may be able to save a
1135 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1137 /* Insn: mov a1,sp. */
1138 this_strategy_size
+= 2;
1140 /* If we don't have to restore any non-FP registers,
1141 we'll be able to save one byte by using rets. */
1142 if (! reg_save_bytes
)
1143 this_strategy_size
--;
1145 if (this_strategy_size
< strategy_size
)
1147 strategy
= restore_a1
;
1148 strategy_size
= this_strategy_size
;
1154 case restore_sp_post_adjust
:
1157 case restore_sp_pre_adjust
:
1158 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1164 case restore_sp_partial_adjust
:
1165 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1167 GEN_INT (size
+ 4 * num_regs_to_save
1168 + reg_save_bytes
- 252)));
1169 size
= 252 - reg_save_bytes
- 4 * num_regs_to_save
;
1173 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
+ 1);
1174 emit_insn (gen_movsi (reg
, stack_pointer_rtx
));
1176 emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (size
)));
1184 /* Adjust the selected register, if any, for post-increment. */
1186 reg
= gen_rtx_POST_INC (SImode
, reg
);
1188 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1189 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1197 /* If we aren't using a post-increment register, use an
1199 addr
= gen_rtx_PLUS (SImode
,
1204 addr
= stack_pointer_rtx
;
1208 emit_insn (gen_movsf (gen_rtx_REG (SFmode
, i
),
1209 gen_rtx_MEM (SFmode
, addr
)));
1212 /* If we were using the restore_a1 strategy and the number of
1213 bytes to be released won't fit in the `ret' byte, copy `a1'
1214 to `sp', to avoid having to use `add' to adjust it. */
1215 if (! frame_pointer_needed
&& reg
&& size
+ reg_save_bytes
> 255)
1217 emit_move_insn (stack_pointer_rtx
, XEXP (reg
, 0));
1222 /* Maybe cut back the stack, except for the register save area.
1224 If the frame pointer exists, then use the frame pointer to
1227 If the stack size + register save area is more than 255 bytes,
1228 then the stack must be cut back here since the size + register
1229 save size is too big for a ret/retf instruction.
1231 Else leave it alone, it will be cut back as part of the
1232 ret/retf instruction, or there wasn't any stack to begin with.
1234 Under no circumstances should the register save area be
1235 deallocated here, that would leave a window where an interrupt
1236 could occur and trash the register save area. */
1237 if (frame_pointer_needed
)
1239 emit_move_insn (stack_pointer_rtx
, frame_pointer_rtx
);
1242 else if (size
+ reg_save_bytes
> 255)
1244 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1250 /* Adjust the stack and restore callee-saved registers, if any. */
1251 if (mn10300_can_use_rets_insn ())
1252 emit_jump_insn (ret_rtx
);
1254 emit_jump_insn (gen_return_ret (GEN_INT (size
+ reg_save_bytes
)));
1257 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1258 This function is for MATCH_PARALLEL and so assumes OP is known to be
1259 parallel. If OP is a multiple store, return a mask indicating which
1260 registers it saves. Return 0 otherwise. */
1263 mn10300_store_multiple_regs (rtx op
)
1271 count
= XVECLEN (op
, 0);
1275 /* Check that first instruction has the form (set (sp) (plus A B)) */
1276 elt
= XVECEXP (op
, 0, 0);
1277 if (GET_CODE (elt
) != SET
1278 || (! REG_P (SET_DEST (elt
)))
1279 || REGNO (SET_DEST (elt
)) != STACK_POINTER_REGNUM
1280 || GET_CODE (SET_SRC (elt
)) != PLUS
)
1283 /* Check that A is the stack pointer and B is the expected stack size.
1284 For OP to match, each subsequent instruction should push a word onto
1285 the stack. We therefore expect the first instruction to create
1286 COUNT-1 stack slots. */
1287 elt
= SET_SRC (elt
);
1288 if ((! REG_P (XEXP (elt
, 0)))
1289 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1290 || (! CONST_INT_P (XEXP (elt
, 1)))
1291 || INTVAL (XEXP (elt
, 1)) != -(count
- 1) * 4)
1295 for (i
= 1; i
< count
; i
++)
1297 /* Check that element i is a (set (mem M) R). */
1298 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1299 Remember: the ordering is *not* monotonic. */
1300 elt
= XVECEXP (op
, 0, i
);
1301 if (GET_CODE (elt
) != SET
1302 || (! MEM_P (SET_DEST (elt
)))
1303 || (! REG_P (SET_SRC (elt
))))
1306 /* Remember which registers are to be saved. */
1307 last
= REGNO (SET_SRC (elt
));
1308 mask
|= (1 << last
);
1310 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1311 elt
= XEXP (SET_DEST (elt
), 0);
1312 if (GET_CODE (elt
) != PLUS
1313 || (! REG_P (XEXP (elt
, 0)))
1314 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1315 || (! CONST_INT_P (XEXP (elt
, 1)))
1316 || INTVAL (XEXP (elt
, 1)) != -i
* 4)
1320 /* All or none of the callee-saved extended registers must be in the set. */
1321 if ((mask
& 0x3c000) != 0
1322 && (mask
& 0x3c000) != 0x3c000)
1328 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1331 mn10300_preferred_reload_class (rtx x
, reg_class_t rclass
)
1333 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1334 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1337 && !HARD_REGISTER_P (x
))
1338 || (GET_CODE (x
) == SUBREG
1339 && REG_P (SUBREG_REG (x
))
1340 && !HARD_REGISTER_P (SUBREG_REG (x
))))
1341 return LIMIT_RELOAD_CLASS (GET_MODE (x
), rclass
);
1346 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1349 mn10300_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
1351 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1352 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1356 /* Implement TARGET_SECONDARY_RELOAD. */
1359 mn10300_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1360 machine_mode mode
, secondary_reload_info
*sri
)
1362 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1363 enum reg_class xclass
= NO_REGS
;
1364 unsigned int xregno
= INVALID_REGNUM
;
1369 if (xregno
>= FIRST_PSEUDO_REGISTER
)
1370 xregno
= true_regnum (x
);
1371 if (xregno
!= INVALID_REGNUM
)
1372 xclass
= REGNO_REG_CLASS (xregno
);
1377 /* Memory load/stores less than a full word wide can't have an
1378 address or stack pointer destination. They must use a data
1379 register as an intermediate register. */
1380 if (rclass
!= DATA_REGS
1381 && (mode
== QImode
|| mode
== HImode
)
1382 && xclass
== NO_REGS
)
1385 /* We can only move SP to/from an address register. */
1387 && rclass
== SP_REGS
1388 && xclass
!= ADDRESS_REGS
)
1389 return ADDRESS_REGS
;
1391 && xclass
== SP_REGS
1392 && rclass
!= ADDRESS_REGS
1393 && rclass
!= SP_OR_ADDRESS_REGS
)
1394 return ADDRESS_REGS
;
1397 /* We can't directly load sp + const_int into a register;
1398 we must use an address register as an scratch. */
1400 && rclass
!= SP_REGS
1401 && rclass
!= SP_OR_ADDRESS_REGS
1402 && rclass
!= SP_OR_GENERAL_REGS
1403 && GET_CODE (x
) == PLUS
1404 && (XEXP (x
, 0) == stack_pointer_rtx
1405 || XEXP (x
, 1) == stack_pointer_rtx
))
1407 sri
->icode
= CODE_FOR_reload_plus_sp_const
;
1411 /* We can only move MDR to/from a data register. */
1412 if (rclass
== MDR_REGS
&& xclass
!= DATA_REGS
)
1414 if (xclass
== MDR_REGS
&& rclass
!= DATA_REGS
)
1417 /* We can't load/store an FP register from a constant address. */
1419 && (rclass
== FP_REGS
|| xclass
== FP_REGS
)
1420 && (xclass
== NO_REGS
|| rclass
== NO_REGS
))
1424 if (xregno
>= FIRST_PSEUDO_REGISTER
&& xregno
!= INVALID_REGNUM
)
1426 addr
= reg_equiv_mem (xregno
);
1428 addr
= XEXP (addr
, 0);
1433 if (addr
&& CONSTANT_ADDRESS_P (addr
))
1434 return GENERAL_REGS
;
1436 /* Otherwise assume no secondary reloads are needed. */
1441 mn10300_frame_size (void)
1443 /* size includes the fixed stack space needed for function calls. */
1444 int size
= get_frame_size () + crtl
->outgoing_args_size
;
1446 /* And space for the return pointer. */
1447 size
+= crtl
->outgoing_args_size
? 4 : 0;
1453 mn10300_initial_offset (int from
, int to
)
1457 gcc_assert (from
== ARG_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
);
1458 gcc_assert (to
== FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
1460 if (to
== STACK_POINTER_REGNUM
)
1461 diff
= mn10300_frame_size ();
1463 /* The difference between the argument pointer and the frame pointer
1464 is the size of the callee register save area. */
1465 if (from
== ARG_POINTER_REGNUM
)
1467 unsigned int reg_save_bytes
;
1469 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1470 diff
+= reg_save_bytes
;
1471 diff
+= 4 * fp_regs_to_save ();
1477 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1480 mn10300_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
1482 /* Return values > 8 bytes in length in memory. */
1483 return (int_size_in_bytes (type
) > 8
1484 || int_size_in_bytes (type
) == 0
1485 || TYPE_MODE (type
) == BLKmode
);
1488 /* Flush the argument registers to the stack for a stdarg function;
1489 return the new argument pointer. */
1491 mn10300_builtin_saveregs (void)
1494 tree fntype
= TREE_TYPE (current_function_decl
);
1495 int argadj
= ((!stdarg_p (fntype
))
1496 ? UNITS_PER_WORD
: 0);
1497 alias_set_type set
= get_varargs_alias_set ();
1500 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
1502 offset
= crtl
->args
.arg_offset_rtx
;
1504 mem
= gen_rtx_MEM (SImode
, crtl
->args
.internal_arg_pointer
);
1505 set_mem_alias_set (mem
, set
);
1506 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
1508 mem
= gen_rtx_MEM (SImode
,
1509 plus_constant (Pmode
,
1510 crtl
->args
.internal_arg_pointer
, 4));
1511 set_mem_alias_set (mem
, set
);
1512 emit_move_insn (mem
, gen_rtx_REG (SImode
, 1));
1514 return copy_to_reg (expand_binop (Pmode
, add_optab
,
1515 crtl
->args
.internal_arg_pointer
,
1516 offset
, 0, 0, OPTAB_LIB_WIDEN
));
1520 mn10300_va_start (tree valist
, rtx nextarg
)
1522 nextarg
= expand_builtin_saveregs ();
1523 std_expand_builtin_va_start (valist
, nextarg
);
1526 /* Return true when a parameter should be passed by reference. */
1529 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
1530 machine_mode mode
, const_tree type
,
1531 bool named ATTRIBUTE_UNUSED
)
1533 unsigned HOST_WIDE_INT size
;
1536 size
= int_size_in_bytes (type
);
1538 size
= GET_MODE_SIZE (mode
);
1540 return (size
> 8 || size
== 0);
1543 /* Return an RTX to represent where a value with mode MODE will be returned
1544 from a function. If the result is NULL_RTX, the argument is pushed. */
1547 mn10300_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
1548 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1550 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1551 rtx result
= NULL_RTX
;
1554 /* We only support using 2 data registers as argument registers. */
1557 /* Figure out the size of the object to be passed. */
1558 if (mode
== BLKmode
)
1559 size
= int_size_in_bytes (type
);
1561 size
= GET_MODE_SIZE (mode
);
1563 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1565 /* Don't pass this arg via a register if all the argument registers
1567 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1570 /* Don't pass this arg via a register if it would be split between
1571 registers and memory. */
1572 if (type
== NULL_TREE
1573 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1576 switch (cum
->nbytes
/ UNITS_PER_WORD
)
1579 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
);
1582 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
+ 1);
1591 /* Update the data in CUM to advance over an argument
1592 of mode MODE and data type TYPE.
1593 (TYPE is null for libcalls where that information may not be available.) */
1596 mn10300_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
1597 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1599 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1601 cum
->nbytes
+= (mode
!= BLKmode
1602 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1603 : (int_size_in_bytes (type
) + 3) & ~3);
1606 /* Return the number of bytes of registers to use for an argument passed
1607 partially in registers and partially in memory. */
1610 mn10300_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
1611 tree type
, bool named ATTRIBUTE_UNUSED
)
1613 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1616 /* We only support using 2 data registers as argument registers. */
1619 /* Figure out the size of the object to be passed. */
1620 if (mode
== BLKmode
)
1621 size
= int_size_in_bytes (type
);
1623 size
= GET_MODE_SIZE (mode
);
1625 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1627 /* Don't pass this arg via a register if all the argument registers
1629 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1632 if (cum
->nbytes
+ size
<= nregs
* UNITS_PER_WORD
)
1635 /* Don't pass this arg via a register if it would be split between
1636 registers and memory. */
1637 if (type
== NULL_TREE
1638 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1641 return nregs
* UNITS_PER_WORD
- cum
->nbytes
;
1644 /* Return the location of the function's value. This will be either
1645 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1646 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1647 we only return the PARALLEL for outgoing values; we do not want
1648 callers relying on this extra copy. */
1651 mn10300_function_value (const_tree valtype
,
1652 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1656 machine_mode mode
= TYPE_MODE (valtype
);
1658 if (! POINTER_TYPE_P (valtype
))
1659 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1660 else if (! TARGET_PTR_A0D0
|| ! outgoing
1661 || cfun
->returns_struct
)
1662 return gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
);
1664 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (2));
1666 = gen_rtx_EXPR_LIST (VOIDmode
,
1667 gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
),
1671 = gen_rtx_EXPR_LIST (VOIDmode
,
1672 gen_rtx_REG (mode
, FIRST_DATA_REGNUM
),
1677 /* Implements TARGET_LIBCALL_VALUE. */
1680 mn10300_libcall_value (machine_mode mode
,
1681 const_rtx fun ATTRIBUTE_UNUSED
)
1683 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1686 /* Implements FUNCTION_VALUE_REGNO_P. */
1689 mn10300_function_value_regno_p (const unsigned int regno
)
1691 return (regno
== FIRST_DATA_REGNUM
|| regno
== FIRST_ADDRESS_REGNUM
);
1694 /* Output an addition operation. */
1697 mn10300_output_add (rtx operands
[3], bool need_flags
)
1699 rtx dest
, src1
, src2
;
1700 unsigned int dest_regnum
, src1_regnum
, src2_regnum
;
1701 enum reg_class src1_class
, src2_class
, dest_class
;
1707 dest_regnum
= true_regnum (dest
);
1708 src1_regnum
= true_regnum (src1
);
1710 dest_class
= REGNO_REG_CLASS (dest_regnum
);
1711 src1_class
= REGNO_REG_CLASS (src1_regnum
);
1713 if (CONST_INT_P (src2
))
1715 gcc_assert (dest_regnum
== src1_regnum
);
1717 if (src2
== const1_rtx
&& !need_flags
)
1719 if (INTVAL (src2
) == 4 && !need_flags
&& dest_class
!= DATA_REGS
)
1722 gcc_assert (!need_flags
|| dest_class
!= SP_REGS
);
1725 else if (CONSTANT_P (src2
))
1728 src2_regnum
= true_regnum (src2
);
1729 src2_class
= REGNO_REG_CLASS (src2_regnum
);
1731 if (dest_regnum
== src1_regnum
)
1733 if (dest_regnum
== src2_regnum
)
1736 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1737 this directly, as below, but when optimizing for space we can sometimes
1738 do better by using a mov+add. For MN103, we claimed that we could
1739 implement a three-operand add because the various move and add insns
1740 change sizes across register classes, and we can often do better than
1741 reload in choosing which operand to move. */
1742 if (TARGET_AM33
&& optimize_insn_for_speed_p ())
1743 return "add %2,%1,%0";
1745 /* Catch cases where no extended register was used. */
1746 if (src1_class
!= EXTENDED_REGS
1747 && src2_class
!= EXTENDED_REGS
1748 && dest_class
!= EXTENDED_REGS
)
1750 /* We have to copy one of the sources into the destination, then
1751 add the other source to the destination.
1753 Carefully select which source to copy to the destination; a
1754 naive implementation will waste a byte when the source classes
1755 are different and the destination is an address register.
1756 Selecting the lowest cost register copy will optimize this
1758 if (src1_class
== dest_class
)
1759 return "mov %1,%0\n\tadd %2,%0";
1761 return "mov %2,%0\n\tadd %1,%0";
1764 /* At least one register is an extended register. */
1766 /* The three operand add instruction on the am33 is a win iff the
1767 output register is an extended register, or if both source
1768 registers are extended registers. */
1769 if (dest_class
== EXTENDED_REGS
|| src1_class
== src2_class
)
1770 return "add %2,%1,%0";
1772 /* It is better to copy one of the sources to the destination, then
1773 perform a 2 address add. The destination in this case must be
1774 an address or data register and one of the sources must be an
1775 extended register and the remaining source must not be an extended
1778 The best code for this case is to copy the extended reg to the
1779 destination, then emit a two address add. */
1780 if (src1_class
== EXTENDED_REGS
)
1781 return "mov %1,%0\n\tadd %2,%0";
1783 return "mov %2,%0\n\tadd %1,%0";
1786 /* Return 1 if X contains a symbolic expression. We know these
1787 expressions will have one of a few well defined forms, so
1788 we need only check those forms. */
1791 mn10300_symbolic_operand (rtx op
,
1792 machine_mode mode ATTRIBUTE_UNUSED
)
1794 switch (GET_CODE (op
))
1801 return ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
1802 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1803 && CONST_INT_P (XEXP (op
, 1)));
1809 /* Try machine dependent ways of modifying an illegitimate address
1810 to be legitimate. If we find one, return the new valid address.
1811 This macro is used in only one place: `memory_address' in explow.c.
1813 OLDX is the address as it was before break_out_memory_refs was called.
1814 In some cases it is useful to look at this to decide what needs to be done.
1816 Normally it is always safe for this macro to do nothing. It exists to
1817 recognize opportunities to optimize the output.
1819 But on a few ports with segmented architectures and indexed addressing
1820 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1823 mn10300_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1824 machine_mode mode ATTRIBUTE_UNUSED
)
1826 if (flag_pic
&& ! mn10300_legitimate_pic_operand_p (x
))
1827 x
= mn10300_legitimize_pic_address (oldx
, NULL_RTX
);
1829 /* Uh-oh. We might have an address for x[n-100000]. This needs
1830 special handling to avoid creating an indexed memory address
1831 with x-100000 as the base. */
1832 if (GET_CODE (x
) == PLUS
1833 && mn10300_symbolic_operand (XEXP (x
, 1), VOIDmode
))
1835 /* Ugly. We modify things here so that the address offset specified
1836 by the index expression is computed first, then added to x to form
1837 the entire address. */
1839 rtx regx1
, regy1
, regy2
, y
;
1841 /* Strip off any CONST. */
1843 if (GET_CODE (y
) == CONST
)
1846 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1848 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1849 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1850 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1851 regx1
= force_reg (Pmode
,
1852 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
, regx1
,
1854 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1860 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1861 @GOTOFF in `reg'. */
1864 mn10300_legitimize_pic_address (rtx orig
, rtx reg
)
1869 if (GET_CODE (orig
) == LABEL_REF
1870 || (GET_CODE (orig
) == SYMBOL_REF
1871 && (CONSTANT_POOL_ADDRESS_P (orig
)
1872 || ! MN10300_GLOBAL_P (orig
))))
1875 reg
= gen_reg_rtx (Pmode
);
1877 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOTOFF
);
1878 x
= gen_rtx_CONST (SImode
, x
);
1879 emit_move_insn (reg
, x
);
1881 insn
= emit_insn (gen_addsi3 (reg
, reg
, pic_offset_table_rtx
));
1883 else if (GET_CODE (orig
) == SYMBOL_REF
)
1886 reg
= gen_reg_rtx (Pmode
);
1888 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOT
);
1889 x
= gen_rtx_CONST (SImode
, x
);
1890 x
= gen_rtx_PLUS (SImode
, pic_offset_table_rtx
, x
);
1891 x
= gen_const_mem (SImode
, x
);
1893 insn
= emit_move_insn (reg
, x
);
1898 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
1902 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1903 isn't protected by a PIC unspec; nonzero otherwise. */
1906 mn10300_legitimate_pic_operand_p (rtx x
)
1911 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1914 if (GET_CODE (x
) == UNSPEC
1915 && (XINT (x
, 1) == UNSPEC_PIC
1916 || XINT (x
, 1) == UNSPEC_GOT
1917 || XINT (x
, 1) == UNSPEC_GOTOFF
1918 || XINT (x
, 1) == UNSPEC_PLT
1919 || XINT (x
, 1) == UNSPEC_GOTSYM_OFF
))
1922 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1923 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1929 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1930 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x
, i
, j
)))
1933 else if (fmt
[i
] == 'e'
1934 && ! mn10300_legitimate_pic_operand_p (XEXP (x
, i
)))
1941 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1942 legitimate, and FALSE otherwise.
1944 On the mn10300, the value in the address register must be
1945 in the same memory space/segment as the effective address.
1947 This is problematical for reload since it does not understand
1948 that base+index != index+base in a memory reference.
1950 Note it is still possible to use reg+reg addressing modes,
1951 it's just much more difficult. For a discussion of a possible
1952 workaround and solution, see the comments in pa.c before the
1953 function record_unscaled_index_insn_codes. */
1956 mn10300_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1960 if (CONSTANT_ADDRESS_P (x
))
1961 return !flag_pic
|| mn10300_legitimate_pic_operand_p (x
);
1963 if (RTX_OK_FOR_BASE_P (x
, strict
))
1966 if (TARGET_AM33
&& (mode
== SImode
|| mode
== SFmode
|| mode
== HImode
))
1968 if (GET_CODE (x
) == POST_INC
)
1969 return RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
);
1970 if (GET_CODE (x
) == POST_MODIFY
)
1971 return (RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
)
1972 && CONSTANT_ADDRESS_P (XEXP (x
, 1)));
1975 if (GET_CODE (x
) != PLUS
)
1979 index
= XEXP (x
, 1);
1985 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1986 addressing is hard to satisfy. */
1990 return (REGNO_GENERAL_P (REGNO (base
), strict
)
1991 && REGNO_GENERAL_P (REGNO (index
), strict
));
1994 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base
), strict
))
1997 if (CONST_INT_P (index
))
1998 return IN_RANGE (INTVAL (index
), -1 - 0x7fffffff, 0x7fffffff);
2000 if (CONSTANT_ADDRESS_P (index
))
2001 return !flag_pic
|| mn10300_legitimate_pic_operand_p (index
);
2007 mn10300_regno_in_class_p (unsigned regno
, int rclass
, bool strict
)
2009 if (regno
>= FIRST_PSEUDO_REGISTER
)
2015 regno
= reg_renumber
[regno
];
2016 if (regno
== INVALID_REGNUM
)
2019 return TEST_HARD_REG_BIT (reg_class_contents
[rclass
], regno
);
2023 mn10300_legitimize_reload_address (rtx x
,
2024 machine_mode mode ATTRIBUTE_UNUSED
,
2025 int opnum
, int type
,
2026 int ind_levels ATTRIBUTE_UNUSED
)
2028 bool any_change
= false;
2030 /* See above re disabling reg+reg addressing for MN103. */
2034 if (GET_CODE (x
) != PLUS
)
2037 if (XEXP (x
, 0) == stack_pointer_rtx
)
2039 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2040 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2041 opnum
, (enum reload_type
) type
);
2044 if (XEXP (x
, 1) == stack_pointer_rtx
)
2046 push_reload (XEXP (x
, 1), NULL_RTX
, &XEXP (x
, 1), NULL
,
2047 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2048 opnum
, (enum reload_type
) type
);
2052 return any_change
? x
: NULL_RTX
;
2055 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2056 constant. Note that some "constants" aren't valid, such as TLS
2057 symbols and unconverted GOT-based references, so we eliminate
2061 mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2063 switch (GET_CODE (x
))
2068 if (GET_CODE (x
) == PLUS
)
2070 if (! CONST_INT_P (XEXP (x
, 1)))
2075 /* Only some unspecs are valid as "constants". */
2076 if (GET_CODE (x
) == UNSPEC
)
2078 switch (XINT (x
, 1))
2090 /* We must have drilled down to a symbol. */
2091 if (! mn10300_symbolic_operand (x
, Pmode
))
2102 /* Undo pic address legitimization for the benefit of debug info. */
2105 mn10300_delegitimize_address (rtx orig_x
)
2107 rtx x
= orig_x
, ret
, addend
= NULL
;
2112 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
2115 if (XEXP (x
, 0) == pic_offset_table_rtx
)
2117 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2118 some odd-looking "addresses" that were never valid in the first place.
2119 We need to look harder to avoid warnings being emitted. */
2120 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
2122 rtx x0
= XEXP (x
, 0);
2123 rtx x00
= XEXP (x0
, 0);
2124 rtx x01
= XEXP (x0
, 1);
2126 if (x00
== pic_offset_table_rtx
)
2128 else if (x01
== pic_offset_table_rtx
)
2138 if (GET_CODE (x
) != CONST
)
2141 if (GET_CODE (x
) != UNSPEC
)
2144 ret
= XVECEXP (x
, 0, 0);
2145 if (XINT (x
, 1) == UNSPEC_GOTOFF
)
2147 else if (XINT (x
, 1) == UNSPEC_GOT
)
2152 gcc_assert (GET_CODE (ret
) == SYMBOL_REF
);
2153 if (need_mem
!= MEM_P (orig_x
))
2155 if (need_mem
&& addend
)
2158 ret
= gen_rtx_PLUS (Pmode
, addend
, ret
);
2162 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2163 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2164 with an address register. */
2167 mn10300_address_cost (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
,
2168 addr_space_t as ATTRIBUTE_UNUSED
, bool speed
)
2173 switch (GET_CODE (x
))
2178 /* We assume all of these require a 32-bit constant, even though
2179 some symbol and label references can be relaxed. */
2180 return speed
? 1 : 4;
2188 /* Assume any symbolic offset is a 32-bit constant. */
2189 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2190 if (IN_RANGE (i
, -128, 127))
2191 return speed
? 0 : 1;
2194 if (IN_RANGE (i
, -0x800000, 0x7fffff))
2200 index
= XEXP (x
, 1);
2201 if (register_operand (index
, SImode
))
2203 /* Attempt to minimize the number of registers in the address.
2204 This is similar to what other ports do. */
2205 if (register_operand (base
, SImode
))
2209 index
= XEXP (x
, 0);
2212 /* Assume any symbolic offset is a 32-bit constant. */
2213 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2214 if (IN_RANGE (i
, -128, 127))
2215 return speed
? 0 : 1;
2216 if (IN_RANGE (i
, -32768, 32767))
2217 return speed
? 0 : 2;
2218 return speed
? 2 : 6;
2221 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
2225 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2227 Recall that the base value of 2 is required by assumptions elsewhere
2228 in the body of the compiler, and that cost 2 is special-cased as an
2229 early exit from reload meaning no work is required. */
2232 mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2233 reg_class_t ifrom
, reg_class_t ito
)
2235 enum reg_class from
= (enum reg_class
) ifrom
;
2236 enum reg_class to
= (enum reg_class
) ito
;
2237 enum reg_class scratch
, test
;
2239 /* Simplify the following code by unifying the fp register classes. */
2240 if (to
== FP_ACC_REGS
)
2242 if (from
== FP_ACC_REGS
)
2245 /* Diagnose invalid moves by costing them as two moves. */
2250 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2251 else if (to
== MDR_REGS
)
2252 scratch
= DATA_REGS
;
2253 else if (to
== FP_REGS
&& to
!= from
)
2254 scratch
= GENERAL_REGS
;
2258 if (from
== SP_REGS
)
2259 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2260 else if (from
== MDR_REGS
)
2261 scratch
= DATA_REGS
;
2262 else if (from
== FP_REGS
&& to
!= from
)
2263 scratch
= GENERAL_REGS
;
2265 if (scratch
!= NO_REGS
&& !reg_class_subset_p (test
, scratch
))
2266 return (mn10300_register_move_cost (VOIDmode
, from
, scratch
)
2267 + mn10300_register_move_cost (VOIDmode
, scratch
, to
));
2269 /* From here on, all we need consider are legal combinations. */
2273 /* The scale here is bytes * 2. */
2275 if (from
== to
&& (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2278 if (from
== SP_REGS
)
2279 return (to
== ADDRESS_REGS
? 2 : 6);
2281 /* For MN103, all remaining legal moves are two bytes. */
2286 return (from
== ADDRESS_REGS
? 4 : 6);
2288 if ((from
== ADDRESS_REGS
|| from
== DATA_REGS
)
2289 && (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2292 if (to
== EXTENDED_REGS
)
2293 return (to
== from
? 6 : 4);
2295 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2300 /* The scale here is cycles * 2. */
2304 if (from
== FP_REGS
)
2307 /* All legal moves between integral registers are single cycle. */
2312 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2314 Given lack of the form of the address, this must be speed-relative,
2315 though we should never be less expensive than a size-relative register
2316 move cost above. This is not a problem. */
2319 mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2320 reg_class_t iclass
, bool in ATTRIBUTE_UNUSED
)
2322 enum reg_class rclass
= (enum reg_class
) iclass
;
2324 if (rclass
== FP_REGS
)
2329 /* Implement the TARGET_RTX_COSTS hook.
2331 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2332 to represent cycles. Size-relative costs are in bytes. */
2335 mn10300_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
2336 int opno ATTRIBUTE_UNUSED
, int *ptotal
, bool speed
)
2338 /* This value is used for SYMBOL_REF etc where we want to pretend
2339 we have a full 32-bit constant. */
2340 HOST_WIDE_INT i
= 0x12345678;
2342 int code
= GET_CODE (x
);
2351 if (outer_code
== SET
)
2353 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2354 if (IN_RANGE (i
, -32768, 32767))
2355 total
= COSTS_N_INSNS (1);
2357 total
= COSTS_N_INSNS (2);
2361 /* 16-bit integer operands don't affect latency;
2362 24-bit and 32-bit operands add a cycle. */
2363 if (IN_RANGE (i
, -32768, 32767))
2366 total
= COSTS_N_INSNS (1);
2371 if (outer_code
== SET
)
2375 else if (IN_RANGE (i
, -128, 127))
2377 else if (IN_RANGE (i
, -32768, 32767))
2384 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2385 if (IN_RANGE (i
, -128, 127))
2387 else if (IN_RANGE (i
, -32768, 32767))
2389 else if (TARGET_AM33
&& IN_RANGE (i
, -0x01000000, 0x00ffffff))
2401 /* We assume all of these require a 32-bit constant, even though
2402 some symbol and label references can be relaxed. */
2406 switch (XINT (x
, 1))
2412 case UNSPEC_GOTSYM_OFF
:
2413 /* The PIC unspecs also resolve to a 32-bit constant. */
2417 /* Assume any non-listed unspec is some sort of arithmetic. */
2418 goto do_arith_costs
;
2422 /* Notice the size difference of INC and INC4. */
2423 if (!speed
&& outer_code
== SET
&& CONST_INT_P (XEXP (x
, 1)))
2425 i
= INTVAL (XEXP (x
, 1));
2426 if (i
== 1 || i
== 4)
2428 total
= 1 + rtx_cost (XEXP (x
, 0), mode
, PLUS
, 0, speed
);
2432 goto do_arith_costs
;
2446 total
= (speed
? COSTS_N_INSNS (1) : 2);
2450 /* Notice the size difference of ASL2 and variants. */
2451 if (!speed
&& CONST_INT_P (XEXP (x
, 1)))
2452 switch (INTVAL (XEXP (x
, 1)))
2467 total
= (speed
? COSTS_N_INSNS (1) : 3);
2471 total
= (speed
? COSTS_N_INSNS (3) : 2);
2478 total
= (speed
? COSTS_N_INSNS (39)
2479 /* Include space to load+retrieve MDR. */
2480 : code
== MOD
|| code
== UMOD
? 6 : 4);
2484 total
= mn10300_address_cost (XEXP (x
, 0), mode
,
2485 MEM_ADDR_SPACE (x
), speed
);
2487 total
= COSTS_N_INSNS (2 + total
);
2491 /* Probably not implemented. Assume external call. */
2492 total
= (speed
? COSTS_N_INSNS (10) : 7);
2504 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2505 may access it using GOTOFF instead of GOT. */
2508 mn10300_encode_section_info (tree decl
, rtx rtl
, int first
)
2512 default_encode_section_info (decl
, rtl
, first
);
2517 symbol
= XEXP (rtl
, 0);
2518 if (GET_CODE (symbol
) != SYMBOL_REF
)
2522 SYMBOL_REF_FLAG (symbol
) = (*targetm
.binds_local_p
) (decl
);
2525 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2526 and readonly data size. So we crank up the case threshold value to
2527 encourage a series of if/else comparisons to implement many small switch
2528 statements. In theory, this value could be increased much more if we
2529 were solely optimizing for space, but we keep it "reasonable" to avoid
2530 serious code efficiency lossage. */
2533 mn10300_case_values_threshold (void)
2538 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2541 mn10300_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
2543 rtx mem
, disp
, fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2545 /* This is a strict alignment target, which means that we play
2546 some games to make sure that the locations at which we need
2547 to store <chain> and <disp> wind up at aligned addresses.
2550 0xfc 0xdd mov chain,a1
2552 0xf8 0xed 0x00 btst 0,d1
2556 Note that the two extra insns are effectively nops; they
2557 clobber the flags but do not affect the contents of D0 or D1. */
2559 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
2560 plus_constant (Pmode
, XEXP (m_tramp
, 0), 11),
2561 NULL_RTX
, 1, OPTAB_DIRECT
);
2563 mem
= adjust_address (m_tramp
, SImode
, 0);
2564 emit_move_insn (mem
, gen_int_mode (0xddfc0028, SImode
));
2565 mem
= adjust_address (m_tramp
, SImode
, 4);
2566 emit_move_insn (mem
, chain_value
);
2567 mem
= adjust_address (m_tramp
, SImode
, 8);
2568 emit_move_insn (mem
, gen_int_mode (0xdc00edf8, SImode
));
2569 mem
= adjust_address (m_tramp
, SImode
, 12);
2570 emit_move_insn (mem
, disp
);
2573 /* Output the assembler code for a C++ thunk function.
2574 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2575 is the decl for the target function. DELTA is an immediate constant
2576 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2577 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2578 additionally added to THIS. Finally jump to the entry point of
2582 mn10300_asm_output_mi_thunk (FILE * file
,
2583 tree thunk_fndecl ATTRIBUTE_UNUSED
,
2584 HOST_WIDE_INT delta
,
2585 HOST_WIDE_INT vcall_offset
,
2590 /* Get the register holding the THIS parameter. Handle the case
2591 where there is a hidden first argument for a returned structure. */
2592 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2593 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
+ 1];
2595 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
];
2597 fprintf (file
, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START
);
2600 fprintf (file
, "\tadd %d, %s\n", (int) delta
, _this
);
2604 const char * scratch
= reg_names
[FIRST_ADDRESS_REGNUM
+ 1];
2606 fprintf (file
, "\tmov %s, %s\n", _this
, scratch
);
2607 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2608 fprintf (file
, "\tadd %d, %s\n", (int) vcall_offset
, scratch
);
2609 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2610 fprintf (file
, "\tadd %s, %s\n", scratch
, _this
);
2613 fputs ("\tjmp ", file
);
2614 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
2618 /* Return true if mn10300_output_mi_thunk would be able to output the
2619 assembler code for the thunk function specified by the arguments
2620 it is passed, and false otherwise. */
2623 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
2624 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
2625 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
2626 const_tree function ATTRIBUTE_UNUSED
)
2631 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2634 mn10300_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2636 if (REGNO_REG_CLASS (regno
) == FP_REGS
2637 || REGNO_REG_CLASS (regno
) == FP_ACC_REGS
)
2638 /* Do not store integer values in FP registers. */
2639 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ((regno
& 1) == 0);
2641 if (! TARGET_AM33
&& REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2644 if (((regno
) & 1) == 0 || GET_MODE_SIZE (mode
) == 4)
2647 if (REGNO_REG_CLASS (regno
) == DATA_REGS
2648 || (TARGET_AM33
&& REGNO_REG_CLASS (regno
) == ADDRESS_REGS
)
2649 || REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2650 return GET_MODE_SIZE (mode
) <= 4;
2655 /* Implement TARGET_MODES_TIEABLE_P. */
2658 mn10300_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
2660 if (GET_MODE_CLASS (mode1
) == MODE_FLOAT
2661 && GET_MODE_CLASS (mode2
) != MODE_FLOAT
)
2664 if (GET_MODE_CLASS (mode2
) == MODE_FLOAT
2665 && GET_MODE_CLASS (mode1
) != MODE_FLOAT
)
2670 || (GET_MODE_SIZE (mode1
) <= 4 && GET_MODE_SIZE (mode2
) <= 4))
2677 cc_flags_for_mode (machine_mode mode
)
2682 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
| CC_FLAG_V
;
2684 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
;
2686 return CC_FLAG_Z
| CC_FLAG_N
;
2687 case E_CC_FLOATmode
:
2695 cc_flags_for_code (enum rtx_code code
)
2707 case GT
: /* ~(Z|(N^V)) */
2708 case LE
: /* Z|(N^V) */
2709 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_V
;
2715 case GTU
: /* ~(C | Z) */
2716 case LEU
: /* C | Z */
2717 return CC_FLAG_Z
| CC_FLAG_C
;
2735 mn10300_select_cc_mode (enum rtx_code code
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2739 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2740 return CC_FLOATmode
;
2742 req
= cc_flags_for_code (code
);
2744 if (req
& CC_FLAG_V
)
2746 if (req
& CC_FLAG_C
)
2752 set_is_load_p (rtx set
)
2754 return MEM_P (SET_SRC (set
));
2758 set_is_store_p (rtx set
)
2760 return MEM_P (SET_DEST (set
));
2763 /* Update scheduling costs for situations that cannot be
2764 described using the attributes and DFA machinery.
2765 DEP is the insn being scheduled.
2766 INSN is the previous insn.
2767 COST is the current cycle cost for DEP. */
2770 mn10300_adjust_sched_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep
,
2771 int cost
, unsigned int)
2780 /* We are only interested in pairs of SET. */
2781 insn_set
= single_set (insn
);
2785 dep_set
= single_set (dep
);
2789 /* For the AM34 a load instruction that follows a
2790 store instruction incurs an extra cycle of delay. */
2791 if (mn10300_tune_cpu
== PROCESSOR_AM34
2792 && set_is_load_p (dep_set
)
2793 && set_is_store_p (insn_set
))
2796 /* For the AM34 a non-store, non-branch FPU insn that follows
2797 another FPU insn incurs a one cycle throughput increase. */
2798 else if (mn10300_tune_cpu
== PROCESSOR_AM34
2799 && ! set_is_store_p (insn_set
)
2801 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) == MODE_FLOAT
2802 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) == MODE_FLOAT
)
2805 /* Resolve the conflict described in section 1-7-4 of
2806 Chapter 3 of the MN103E Series Instruction Manual
2809 "When the preceding instruction is a CPU load or
2810 store instruction, a following FPU instruction
2811 cannot be executed until the CPU completes the
2812 latency period even though there are no register
2813 or flag dependencies between them." */
2815 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2816 if (! TARGET_AM33_2
)
2819 /* If a data dependence already exists then the cost is correct. */
2823 /* Check that the instruction about to scheduled is an FPU instruction. */
2824 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) != MODE_FLOAT
)
2827 /* Now check to see if the previous instruction is a load or store. */
2828 if (! set_is_load_p (insn_set
) && ! set_is_store_p (insn_set
))
2831 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2832 only applies when an INTEGER load/store precedes an FPU
2833 instruction, but is this true ? For now we assume that it is. */
2834 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) != MODE_INT
)
2837 /* Extract the latency value from the timings attribute. */
2838 timings
= get_attr_timings (insn
);
2839 return timings
< 100 ? (timings
% 10) : (timings
% 100);
2843 mn10300_conditional_register_usage (void)
2849 for (i
= FIRST_EXTENDED_REGNUM
;
2850 i
<= LAST_EXTENDED_REGNUM
; i
++)
2851 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2855 for (i
= FIRST_FP_REGNUM
;
2856 i
<= LAST_FP_REGNUM
; i
++)
2857 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2860 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] =
2861 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2864 /* Worker function for TARGET_MD_ASM_ADJUST.
2865 We do this in the mn10300 backend to maintain source compatibility
2866 with the old cc0-based compiler. */
2869 mn10300_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
2870 vec
<const char *> &/*constraints*/,
2871 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
2873 clobbers
.safe_push (gen_rtx_REG (CCmode
, CC_REG
));
2874 SET_HARD_REG_BIT (clobbered_regs
, CC_REG
);
2878 /* A helper function for splitting cbranch patterns after reload. */
2881 mn10300_split_cbranch (machine_mode cmp_mode
, rtx cmp_op
, rtx label_ref
)
2885 flags
= gen_rtx_REG (cmp_mode
, CC_REG
);
2886 x
= gen_rtx_COMPARE (cmp_mode
, XEXP (cmp_op
, 0), XEXP (cmp_op
, 1));
2887 x
= gen_rtx_SET (flags
, x
);
2890 x
= gen_rtx_fmt_ee (GET_CODE (cmp_op
), VOIDmode
, flags
, const0_rtx
);
2891 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
, label_ref
, pc_rtx
);
2892 x
= gen_rtx_SET (pc_rtx
, x
);
2896 /* A helper function for matching parallels that set the flags. */
2899 mn10300_match_ccmode (rtx insn
, machine_mode cc_mode
)
2902 machine_mode flags_mode
;
2904 gcc_checking_assert (XVECLEN (PATTERN (insn
), 0) == 2);
2906 op1
= XVECEXP (PATTERN (insn
), 0, 0);
2907 gcc_checking_assert (GET_CODE (SET_SRC (op1
)) == COMPARE
);
2909 flags
= SET_DEST (op1
);
2910 flags_mode
= GET_MODE (flags
);
2912 if (GET_MODE (SET_SRC (op1
)) != flags_mode
)
2914 if (GET_MODE_CLASS (flags_mode
) != MODE_CC
)
2917 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2918 if (cc_flags_for_mode (flags_mode
) & ~cc_flags_for_mode (cc_mode
))
2924 /* This function is used to help split:
2926 (set (reg) (and (reg) (int)))
2930 (set (reg) (shift (reg) (int))
2931 (set (reg) (shift (reg) (int))
2933 where the shitfs will be shorter than the "and" insn.
2935 It returns the number of bits that should be shifted. A positive
2936 values means that the low bits are to be cleared (and hence the
2937 shifts should be right followed by left) whereas a negative value
2938 means that the high bits are to be cleared (left followed by right).
2939 Zero is returned when it would not be economical to split the AND. */
2942 mn10300_split_and_operand_count (rtx op
)
2944 HOST_WIDE_INT val
= INTVAL (op
);
2949 /* High bit is set, look for bits clear at the bottom. */
2950 count
= exact_log2 (-val
);
2953 /* This is only size win if we can use the asl2 insn. Otherwise we
2954 would be replacing 1 6-byte insn with 2 3-byte insns. */
2955 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2961 /* High bit is clear, look for bits set at the bottom. */
2962 count
= exact_log2 (val
+ 1);
2964 /* Again, this is only a size win with asl2. */
2965 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2974 enum attr_liw_op op
;
2979 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2980 extract the operands and LIW attributes from the insn and use them to fill
2981 in the liw_data structure. Return true upon success or false if the insn
2982 cannot be bundled. */
2985 extract_bundle (rtx_insn
*insn
, struct liw_data
* pdata
)
2987 bool allow_consts
= true;
2990 gcc_assert (pdata
!= NULL
);
2994 /* Make sure that we are dealing with a simple SET insn. */
2995 p
= single_set (insn
);
2999 /* Make sure that it could go into one of the LIW pipelines. */
3000 pdata
->slot
= get_attr_liw (insn
);
3001 if (pdata
->slot
== LIW_BOTH
)
3004 pdata
->op
= get_attr_liw_op (insn
);
3009 pdata
->dest
= SET_DEST (p
);
3010 pdata
->src
= SET_SRC (p
);
3013 pdata
->dest
= XEXP (SET_SRC (p
), 0);
3014 pdata
->src
= XEXP (SET_SRC (p
), 1);
3021 /* The AND, OR and XOR long instruction words only accept register arguments. */
3022 allow_consts
= false;
3025 pdata
->dest
= SET_DEST (p
);
3026 pdata
->src
= XEXP (SET_SRC (p
), 1);
3030 if (! REG_P (pdata
->dest
))
3033 if (REG_P (pdata
->src
))
3036 return allow_consts
&& satisfies_constraint_O (pdata
->src
);
3039 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3040 the instructions with the assumption that LIW1 would be executed before LIW2
3041 so we must check for overlaps between their sources and destinations. */
3044 check_liw_constraints (struct liw_data
* pliw1
, struct liw_data
* pliw2
)
3046 /* Check for slot conflicts. */
3047 if (pliw2
->slot
== pliw1
->slot
&& pliw1
->slot
!= LIW_EITHER
)
3050 /* If either operation is a compare, then "dest" is really an input; the real
3051 destination is CC_REG. So these instructions need different checks. */
3053 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3054 check its values prior to any changes made by OP. */
3055 if (pliw1
->op
== LIW_OP_CMP
)
3057 /* Two sequential comparisons means dead code, which ought to
3058 have been eliminated given that bundling only happens with
3059 optimization. We cannot bundle them in any case. */
3060 gcc_assert (pliw1
->op
!= pliw2
->op
);
3064 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3065 is the destination of OP, as the CMP will look at the old value, not the new
3067 if (pliw2
->op
== LIW_OP_CMP
)
3069 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3072 if (REG_P (pliw2
->src
))
3073 return REGNO (pliw2
->src
) != REGNO (pliw1
->dest
);
3078 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3079 same destination register. */
3080 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3083 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3084 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3085 we can replace the source in OP2 with the source of OP1. */
3086 if (REG_P (pliw2
->src
) && REGNO (pliw2
->src
) == REGNO (pliw1
->dest
))
3088 if (pliw1
->op
== LIW_OP_MOV
&& REG_P (pliw1
->src
))
3090 if (! REG_P (pliw1
->src
)
3091 && (pliw2
->op
== LIW_OP_AND
3092 || pliw2
->op
== LIW_OP_OR
3093 || pliw2
->op
== LIW_OP_XOR
))
3096 pliw2
->src
= pliw1
->src
;
3102 /* Everything else is OK. */
3106 /* Combine pairs of insns into LIW bundles. */
3109 mn10300_bundle_liw (void)
3113 for (r
= get_insns (); r
!= NULL
; r
= next_nonnote_nondebug_insn (r
))
3115 rtx_insn
*insn1
, *insn2
;
3116 struct liw_data liw1
, liw2
;
3119 if (! extract_bundle (insn1
, & liw1
))
3122 insn2
= next_nonnote_nondebug_insn (insn1
);
3123 if (! extract_bundle (insn2
, & liw2
))
3126 /* Check for source/destination overlap. */
3127 if (! check_liw_constraints (& liw1
, & liw2
))
3130 if (liw1
.slot
== LIW_OP2
|| liw2
.slot
== LIW_OP1
)
3132 struct liw_data temp
;
3139 delete_insn (insn2
);
3142 if (liw1
.op
== LIW_OP_CMP
)
3143 insn2_pat
= gen_cmp_liw (liw2
.dest
, liw2
.src
, liw1
.dest
, liw1
.src
,
3145 else if (liw2
.op
== LIW_OP_CMP
)
3146 insn2_pat
= gen_liw_cmp (liw1
.dest
, liw1
.src
, liw2
.dest
, liw2
.src
,
3149 insn2_pat
= gen_liw (liw1
.dest
, liw2
.dest
, liw1
.src
, liw2
.src
,
3150 GEN_INT (liw1
.op
), GEN_INT (liw2
.op
));
3152 insn2
= emit_insn_after (insn2_pat
, insn1
);
3153 delete_insn (insn1
);
3158 #define DUMP(reason, insn) \
3163 fprintf (dump_file, reason "\n"); \
3164 if (insn != NULL_RTX) \
3165 print_rtl_single (dump_file, insn); \
3166 fprintf(dump_file, "\n"); \
3171 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3172 Insert a SETLB insn just before LABEL. */
3175 mn10300_insert_setlb_lcc (rtx_insn
*label
, rtx_insn
*branch
)
3177 rtx lcc
, comparison
, cmp_reg
;
3179 if (LABEL_NUSES (label
) > 1)
3183 /* This label is used both as an entry point to the loop
3184 and as a loop-back point for the loop. We need to separate
3185 these two functions so that the SETLB happens upon entry,
3186 but the loop-back does not go to the SETLB instruction. */
3187 DUMP ("Inserting SETLB insn after:", label
);
3188 insn
= emit_insn_after (gen_setlb (), label
);
3189 label
= gen_label_rtx ();
3190 emit_label_after (label
, insn
);
3191 DUMP ("Created new loop-back label:", label
);
3195 DUMP ("Inserting SETLB insn before:", label
);
3196 emit_insn_before (gen_setlb (), label
);
3199 comparison
= XEXP (SET_SRC (PATTERN (branch
)), 0);
3200 cmp_reg
= XEXP (comparison
, 0);
3201 gcc_assert (REG_P (cmp_reg
));
3203 /* If the comparison has not already been split out of the branch
3205 gcc_assert (REGNO (cmp_reg
) == CC_REG
);
3207 if (GET_MODE (cmp_reg
) == CC_FLOATmode
)
3208 lcc
= gen_FLcc (comparison
, label
);
3210 lcc
= gen_Lcc (comparison
, label
);
3212 rtx_insn
*jump
= emit_jump_insn_before (lcc
, branch
);
3213 mark_jump_label (XVECEXP (lcc
, 0, 0), jump
, 0);
3214 JUMP_LABEL (jump
) = label
;
3215 DUMP ("Replacing branch insn...", branch
);
3216 DUMP ("... with Lcc insn:", jump
);
3217 delete_insn (branch
);
3221 mn10300_block_contains_call (basic_block block
)
3225 FOR_BB_INSNS (block
, insn
)
3233 mn10300_loop_contains_call_insn (loop_p loop
)
3236 bool result
= false;
3239 bbs
= get_loop_body (loop
);
3241 for (i
= 0; i
< loop
->num_nodes
; i
++)
3242 if (mn10300_block_contains_call (bbs
[i
]))
3253 mn10300_scan_for_setlb_lcc (void)
3257 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX
);
3260 compute_bb_for_insn ();
3262 /* Find the loops. */
3263 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
3265 /* FIXME: For now we only investigate innermost loops. In practice however
3266 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3267 be the case that its parent loop is suitable. Thus we should check all
3268 loops, but work from the innermost outwards. */
3269 FOR_EACH_LOOP (loop
, LI_ONLY_INNERMOST
)
3271 const char * reason
= NULL
;
3273 /* Check to see if we can modify this loop. If we cannot
3274 then set 'reason' to describe why it could not be done. */
3275 if (loop
->latch
== NULL
)
3276 reason
= "it contains multiple latches";
3277 else if (loop
->header
!= loop
->latch
)
3278 /* FIXME: We could handle loops that span multiple blocks,
3279 but this requires a lot more work tracking down the branches
3280 that need altering, so for now keep things simple. */
3281 reason
= "the loop spans multiple blocks";
3282 else if (mn10300_loop_contains_call_insn (loop
))
3283 reason
= "it contains CALL insns";
3286 rtx_insn
*branch
= BB_END (loop
->latch
);
3288 gcc_assert (JUMP_P (branch
));
3289 if (single_set (branch
) == NULL_RTX
|| ! any_condjump_p (branch
))
3290 /* We cannot optimize tablejumps and the like. */
3291 /* FIXME: We could handle unconditional jumps. */
3292 reason
= "it is not a simple loop";
3298 flow_loop_dump (loop
, dump_file
, NULL
, 0);
3300 label
= BB_HEAD (loop
->header
);
3301 gcc_assert (LABEL_P (label
));
3303 mn10300_insert_setlb_lcc (label
, branch
);
3307 if (dump_file
&& reason
!= NULL
)
3308 fprintf (dump_file
, "Loop starting with insn %d is not suitable because %s\n",
3309 INSN_UID (BB_HEAD (loop
->header
)),
3313 loop_optimizer_finalize ();
3315 df_finish_pass (false);
3317 DUMP ("SETLB scan complete", NULL_RTX
);
3321 mn10300_reorg (void)
3323 /* These are optimizations, so only run them if optimizing. */
3324 if (TARGET_AM33
&& (optimize
> 0 || optimize_size
))
3326 if (TARGET_ALLOW_SETLB
)
3327 mn10300_scan_for_setlb_lcc ();
3329 if (TARGET_ALLOW_LIW
)
3330 mn10300_bundle_liw ();
3334 /* Initialize the GCC target structure. */
3336 #undef TARGET_MACHINE_DEPENDENT_REORG
3337 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3339 #undef TARGET_ASM_ALIGNED_HI_OP
3340 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3342 #undef TARGET_LEGITIMIZE_ADDRESS
3343 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3345 #undef TARGET_ADDRESS_COST
3346 #define TARGET_ADDRESS_COST mn10300_address_cost
3347 #undef TARGET_REGISTER_MOVE_COST
3348 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3349 #undef TARGET_MEMORY_MOVE_COST
3350 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3351 #undef TARGET_RTX_COSTS
3352 #define TARGET_RTX_COSTS mn10300_rtx_costs
3354 #undef TARGET_ASM_FILE_START
3355 #define TARGET_ASM_FILE_START mn10300_file_start
3356 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3357 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3359 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3360 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3362 #undef TARGET_OPTION_OVERRIDE
3363 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3365 #undef TARGET_ENCODE_SECTION_INFO
3366 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3368 #undef TARGET_PROMOTE_PROTOTYPES
3369 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3370 #undef TARGET_RETURN_IN_MEMORY
3371 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3372 #undef TARGET_PASS_BY_REFERENCE
3373 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3374 #undef TARGET_CALLEE_COPIES
3375 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3376 #undef TARGET_ARG_PARTIAL_BYTES
3377 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3378 #undef TARGET_FUNCTION_ARG
3379 #define TARGET_FUNCTION_ARG mn10300_function_arg
3380 #undef TARGET_FUNCTION_ARG_ADVANCE
3381 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3383 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3384 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3385 #undef TARGET_EXPAND_BUILTIN_VA_START
3386 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3388 #undef TARGET_CASE_VALUES_THRESHOLD
3389 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3392 #define TARGET_LRA_P hook_bool_void_false
3394 #undef TARGET_LEGITIMATE_ADDRESS_P
3395 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3396 #undef TARGET_DELEGITIMIZE_ADDRESS
3397 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3398 #undef TARGET_LEGITIMATE_CONSTANT_P
3399 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3401 #undef TARGET_PREFERRED_RELOAD_CLASS
3402 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3403 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3404 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3405 mn10300_preferred_output_reload_class
3406 #undef TARGET_SECONDARY_RELOAD
3407 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3409 #undef TARGET_TRAMPOLINE_INIT
3410 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3412 #undef TARGET_FUNCTION_VALUE
3413 #define TARGET_FUNCTION_VALUE mn10300_function_value
3414 #undef TARGET_LIBCALL_VALUE
3415 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3417 #undef TARGET_ASM_OUTPUT_MI_THUNK
3418 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3419 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3420 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3422 #undef TARGET_SCHED_ADJUST_COST
3423 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3425 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3426 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3428 #undef TARGET_MD_ASM_ADJUST
3429 #define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
3431 #undef TARGET_FLAGS_REGNUM
3432 #define TARGET_FLAGS_REGNUM CC_REG
3434 #undef TARGET_HARD_REGNO_MODE_OK
3435 #define TARGET_HARD_REGNO_MODE_OK mn10300_hard_regno_mode_ok
3437 #undef TARGET_MODES_TIEABLE_P
3438 #define TARGET_MODES_TIEABLE_P mn10300_modes_tieable_p
3440 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
3441 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
3443 struct gcc_target targetm
= TARGET_INITIALIZER
;