1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
32 #include "insn-attr.h"
36 #include "diagnostic-core.h"
41 #include "double-int.h"
48 #include "fold-const.h"
49 #include "stor-layout.h"
54 #include "statistics.h"
56 #include "fixed-value.h"
63 #include "insn-codes.h"
68 #include "target-def.h"
70 #include "langhooks.h"
71 #include "hash-table.h"
73 #include "dominance.h"
79 #include "cfgcleanup.h"
80 #include "basic-block.h"
81 #include "tree-ssa-alias.h"
82 #include "internal-fn.h"
83 #include "gimple-fold.h"
85 #include "gimple-expr.h"
89 #include "tm-constrs.h"
94 /* Used by m32c_pushm_popm. */
102 static bool m32c_function_needs_enter (void);
103 static tree
interrupt_handler (tree
*, tree
, tree
, int, bool *);
104 static tree
function_vector_handler (tree
*, tree
, tree
, int, bool *);
105 static int interrupt_p (tree node
);
106 static int bank_switch_p (tree node
);
107 static int fast_interrupt_p (tree node
);
108 static int interrupt_p (tree node
);
109 static bool m32c_asm_integer (rtx
, unsigned int, int);
110 static int m32c_comp_type_attributes (const_tree
, const_tree
);
111 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
112 static struct machine_function
*m32c_init_machine_status (void);
113 static void m32c_insert_attributes (tree
, tree
*);
114 static bool m32c_legitimate_address_p (machine_mode
, rtx
, bool);
115 static bool m32c_addr_space_legitimate_address_p (machine_mode
, rtx
, bool, addr_space_t
);
116 static rtx
m32c_function_arg (cumulative_args_t
, machine_mode
,
118 static bool m32c_pass_by_reference (cumulative_args_t
, machine_mode
,
120 static void m32c_function_arg_advance (cumulative_args_t
, machine_mode
,
122 static unsigned int m32c_function_arg_boundary (machine_mode
, const_tree
);
123 static int m32c_pushm_popm (Push_Pop_Type
);
124 static bool m32c_strict_argument_naming (cumulative_args_t
);
125 static rtx
m32c_struct_value_rtx (tree
, int);
126 static rtx
m32c_subreg (machine_mode
, rtx
, machine_mode
, int);
127 static int need_to_save (int);
128 static rtx
m32c_function_value (const_tree
, const_tree
, bool);
129 static rtx
m32c_libcall_value (machine_mode
, const_rtx
);
131 /* Returns true if an address is specified, else false. */
132 static bool m32c_get_pragma_address (const char *varname
, unsigned *addr
);
134 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
136 #define streq(a,b) (strcmp ((a), (b)) == 0)
138 /* Internal support routines */
140 /* Debugging statements are tagged with DEBUG0 only so that they can
141 be easily enabled individually, by replacing the '0' with '1' as
147 #include "print-tree.h"
148 /* This is needed by some of the commented-out debug statements
150 static char const *class_names
[LIM_REG_CLASSES
] = REG_CLASS_NAMES
;
152 static int class_contents
[LIM_REG_CLASSES
][1] = REG_CLASS_CONTENTS
;
154 /* These are all to support encode_pattern(). */
155 static char pattern
[30], *patternp
;
156 static GTY(()) rtx patternr
[30];
157 #define RTX_IS(x) (streq (pattern, x))
159 /* Some macros to simplify the logic throughout this file. */
160 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
161 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
163 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
164 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
167 far_addr_space_p (rtx x
)
169 if (GET_CODE (x
) != MEM
)
172 fprintf(stderr
, "\033[35mfar_addr_space: "); debug_rtx(x
);
173 fprintf(stderr
, " = %d\033[0m\n", MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
);
175 return MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
;
178 /* We do most RTX matching by converting the RTX into a string, and
179 using string compares. This vastly simplifies the logic in many of
180 the functions in this file.
182 On exit, pattern[] has the encoded string (use RTX_IS("...") to
183 compare it) and patternr[] has pointers to the nodes in the RTX
184 corresponding to each character in the encoded string. The latter
185 is mostly used by print_operand().
187 Unrecognized patterns have '?' in them; this shows up when the
188 assembler complains about syntax errors.
192 encode_pattern_1 (rtx x
)
196 if (patternp
== pattern
+ sizeof (pattern
) - 2)
202 patternr
[patternp
- pattern
] = x
;
204 switch (GET_CODE (x
))
210 if (GET_MODE_SIZE (GET_MODE (x
)) !=
211 GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))))
213 if (GET_MODE (x
) == PSImode
214 && GET_CODE (XEXP (x
, 0)) == REG
)
216 encode_pattern_1 (XEXP (x
, 0));
221 encode_pattern_1 (XEXP (x
, 0));
226 encode_pattern_1 (XEXP (x
, 0));
231 encode_pattern_1 (XEXP (x
, 0));
235 encode_pattern_1 (XEXP (x
, 0));
236 encode_pattern_1 (XEXP (x
, 1));
240 encode_pattern_1 (XEXP (x
, 0));
244 encode_pattern_1 (XEXP (x
, 0));
248 encode_pattern_1 (XEXP (x
, 0));
249 encode_pattern_1 (XEXP (x
, 1));
253 encode_pattern_1 (XEXP (x
, 0));
270 *patternp
++ = '0' + XCINT (x
, 1, UNSPEC
);
271 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
272 encode_pattern_1 (XVECEXP (x
, 0, i
));
279 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
280 encode_pattern_1 (XVECEXP (x
, 0, i
));
284 encode_pattern_1 (XEXP (x
, 0));
286 encode_pattern_1 (XEXP (x
, 1));
291 fprintf (stderr
, "can't encode pattern %s\n",
292 GET_RTX_NAME (GET_CODE (x
)));
300 encode_pattern (rtx x
)
303 encode_pattern_1 (x
);
307 /* Since register names indicate the mode they're used in, we need a
308 way to determine which name to refer to the register with. Called
309 by print_operand(). */
312 reg_name_with_mode (int regno
, machine_mode mode
)
314 int mlen
= GET_MODE_SIZE (mode
);
315 if (regno
== R0_REGNO
&& mlen
== 1)
317 if (regno
== R0_REGNO
&& (mlen
== 3 || mlen
== 4))
319 if (regno
== R0_REGNO
&& mlen
== 6)
321 if (regno
== R0_REGNO
&& mlen
== 8)
323 if (regno
== R1_REGNO
&& mlen
== 1)
325 if (regno
== R1_REGNO
&& (mlen
== 3 || mlen
== 4))
327 if (regno
== A0_REGNO
&& TARGET_A16
&& (mlen
== 3 || mlen
== 4))
329 return reg_names
[regno
];
332 /* How many bytes a register uses on stack when it's pushed. We need
333 to know this because the push opcode needs to explicitly indicate
334 the size of the register, even though the name of the register
335 already tells it that. Used by m32c_output_reg_{push,pop}, which
336 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
339 reg_push_size (int regno
)
364 /* Given two register classes, find the largest intersection between
365 them. If there is no intersection, return RETURNED_IF_EMPTY
368 reduce_class (reg_class_t original_class
, reg_class_t limiting_class
,
369 reg_class_t returned_if_empty
)
373 reg_class_t best
= NO_REGS
;
374 unsigned int best_size
= 0;
376 if (original_class
== limiting_class
)
377 return original_class
;
379 cc
= reg_class_contents
[original_class
];
380 AND_HARD_REG_SET (cc
, reg_class_contents
[limiting_class
]);
382 for (i
= 0; i
< LIM_REG_CLASSES
; i
++)
384 if (hard_reg_set_subset_p (reg_class_contents
[i
], cc
))
385 if (best_size
< reg_class_size
[i
])
387 best
= (reg_class_t
) i
;
388 best_size
= reg_class_size
[i
];
393 return returned_if_empty
;
397 /* Used by m32c_register_move_cost to determine if a move is
398 impossibly expensive. */
400 class_can_hold_mode (reg_class_t rclass
, machine_mode mode
)
402 /* Cache the results: 0=untested 1=no 2=yes */
403 static char results
[LIM_REG_CLASSES
][MAX_MACHINE_MODE
];
405 if (results
[(int) rclass
][mode
] == 0)
408 results
[rclass
][mode
] = 1;
409 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; r
++)
410 if (in_hard_reg_set_p (reg_class_contents
[(int) rclass
], mode
, r
)
411 && HARD_REGNO_MODE_OK (r
, mode
))
413 results
[rclass
][mode
] = 2;
419 fprintf (stderr
, "class %s can hold %s? %s\n",
420 class_names
[(int) rclass
], mode_name
[mode
],
421 (results
[rclass
][mode
] == 2) ? "yes" : "no");
423 return results
[(int) rclass
][mode
] == 2;
426 /* Run-time Target Specification. */
428 /* Memregs are memory locations that gcc treats like general
429 registers, as there are a limited number of true registers and the
430 m32c families can use memory in most places that registers can be
433 However, since memory accesses are more expensive than registers,
434 we allow the user to limit the number of memregs available, in
435 order to try to persuade gcc to try harder to use real registers.
437 Memregs are provided by lib1funcs.S.
440 int ok_to_change_target_memregs
= TRUE
;
442 /* Implements TARGET_OPTION_OVERRIDE. */
444 #undef TARGET_OPTION_OVERRIDE
445 #define TARGET_OPTION_OVERRIDE m32c_option_override
448 m32c_option_override (void)
450 /* We limit memregs to 0..16, and provide a default. */
451 if (global_options_set
.x_target_memregs
)
453 if (target_memregs
< 0 || target_memregs
> 16)
454 error ("invalid target memregs value '%d'", target_memregs
);
462 /* This target defaults to strict volatile bitfields. */
463 if (flag_strict_volatile_bitfields
< 0 && abi_version_at_least(2))
464 flag_strict_volatile_bitfields
= 1;
466 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
467 This is always worse than an absolute call. */
469 flag_no_function_cse
= 1;
471 /* This wants to put insns between compares and their jumps. */
472 /* FIXME: The right solution is to properly trace the flags register
473 values, but that is too much work for stage 4. */
474 flag_combine_stack_adjustments
= 0;
477 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
478 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
481 m32c_override_options_after_change (void)
484 flag_no_function_cse
= 1;
487 /* Defining data structures for per-function information */
489 /* The usual; we set up our machine_function data. */
490 static struct machine_function
*
491 m32c_init_machine_status (void)
493 return ggc_cleared_alloc
<machine_function
> ();
496 /* Implements INIT_EXPANDERS. We just set up to call the above
499 m32c_init_expanders (void)
501 init_machine_status
= m32c_init_machine_status
;
506 /* Register Basics */
508 /* Basic Characteristics of Registers */
510 /* Whether a mode fits in a register is complex enough to warrant a
519 } nregs_table
[FIRST_PSEUDO_REGISTER
] =
521 { 1, 1, 2, 2, 4 }, /* r0 */
522 { 0, 1, 0, 0, 0 }, /* r2 */
523 { 1, 1, 2, 2, 0 }, /* r1 */
524 { 0, 1, 0, 0, 0 }, /* r3 */
525 { 0, 1, 1, 0, 0 }, /* a0 */
526 { 0, 1, 1, 0, 0 }, /* a1 */
527 { 0, 1, 1, 0, 0 }, /* sb */
528 { 0, 1, 1, 0, 0 }, /* fb */
529 { 0, 1, 1, 0, 0 }, /* sp */
530 { 1, 1, 1, 0, 0 }, /* pc */
531 { 0, 0, 0, 0, 0 }, /* fl */
532 { 1, 1, 1, 0, 0 }, /* ap */
533 { 1, 1, 2, 2, 4 }, /* mem0 */
534 { 1, 1, 2, 2, 4 }, /* mem1 */
535 { 1, 1, 2, 2, 4 }, /* mem2 */
536 { 1, 1, 2, 2, 4 }, /* mem3 */
537 { 1, 1, 2, 2, 4 }, /* mem4 */
538 { 1, 1, 2, 2, 0 }, /* mem5 */
539 { 1, 1, 2, 2, 0 }, /* mem6 */
540 { 1, 1, 0, 0, 0 }, /* mem7 */
543 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
544 of available memregs, and select which registers need to be preserved
545 across calls based on the chip family. */
547 #undef TARGET_CONDITIONAL_REGISTER_USAGE
548 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
550 m32c_conditional_register_usage (void)
554 if (0 <= target_memregs
&& target_memregs
<= 16)
556 /* The command line option is bytes, but our "registers" are
558 for (i
= (target_memregs
+1)/2; i
< 8; i
++)
560 fixed_regs
[MEM0_REGNO
+ i
] = 1;
561 CLEAR_HARD_REG_BIT (reg_class_contents
[MEM_REGS
], MEM0_REGNO
+ i
);
565 /* M32CM and M32C preserve more registers across function calls. */
568 call_used_regs
[R1_REGNO
] = 0;
569 call_used_regs
[R2_REGNO
] = 0;
570 call_used_regs
[R3_REGNO
] = 0;
571 call_used_regs
[A0_REGNO
] = 0;
572 call_used_regs
[A1_REGNO
] = 0;
576 /* How Values Fit in Registers */
578 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
579 different registers are different sizes from each other, *and* may
580 be different sizes in different chip families. */
582 m32c_hard_regno_nregs_1 (int regno
, machine_mode mode
)
584 if (regno
== FLG_REGNO
&& mode
== CCmode
)
586 if (regno
>= FIRST_PSEUDO_REGISTER
)
587 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
589 if (regno
>= MEM0_REGNO
&& regno
<= MEM7_REGNO
)
590 return (GET_MODE_SIZE (mode
) + 1) / 2;
592 if (GET_MODE_SIZE (mode
) <= 1)
593 return nregs_table
[regno
].qi_regs
;
594 if (GET_MODE_SIZE (mode
) <= 2)
595 return nregs_table
[regno
].hi_regs
;
596 if (regno
== A0_REGNO
&& mode
== SImode
&& TARGET_A16
)
598 if ((GET_MODE_SIZE (mode
) <= 3 || mode
== PSImode
) && TARGET_A24
)
599 return nregs_table
[regno
].pi_regs
;
600 if (GET_MODE_SIZE (mode
) <= 4)
601 return nregs_table
[regno
].si_regs
;
602 if (GET_MODE_SIZE (mode
) <= 8)
603 return nregs_table
[regno
].di_regs
;
608 m32c_hard_regno_nregs (int regno
, machine_mode mode
)
610 int rv
= m32c_hard_regno_nregs_1 (regno
, mode
);
614 /* Implements HARD_REGNO_MODE_OK. The above function does the work
615 already; just test its return value. */
617 m32c_hard_regno_ok (int regno
, machine_mode mode
)
619 return m32c_hard_regno_nregs_1 (regno
, mode
) != 0;
622 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
623 registers are all different sizes. However, since most modes are
624 bigger than our registers anyway, it's easier to implement this
625 function that way, leaving QImode as the only unique case. */
627 m32c_modes_tieable_p (machine_mode m1
, machine_mode m2
)
629 if (GET_MODE_SIZE (m1
) == GET_MODE_SIZE (m2
))
633 if (m1
== QImode
|| m2
== QImode
)
640 /* Register Classes */
642 /* Implements REGNO_REG_CLASS. */
644 m32c_regno_reg_class (int regno
)
669 if (IS_MEM_REGNO (regno
))
675 /* Implements REGNO_OK_FOR_BASE_P. */
677 m32c_regno_ok_for_base_p (int regno
)
679 if (regno
== A0_REGNO
680 || regno
== A1_REGNO
|| regno
>= FIRST_PSEUDO_REGISTER
)
685 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
686 registers of the appropriate size. */
688 #undef TARGET_PREFERRED_RELOAD_CLASS
689 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
692 m32c_preferred_reload_class (rtx x
, reg_class_t rclass
)
694 reg_class_t newclass
= rclass
;
697 fprintf (stderr
, "\npreferred_reload_class for %s is ",
698 class_names
[rclass
]);
700 if (rclass
== NO_REGS
)
701 rclass
= GET_MODE (x
) == QImode
? HL_REGS
: R03_REGS
;
703 if (reg_classes_intersect_p (rclass
, CR_REGS
))
705 switch (GET_MODE (x
))
711 /* newclass = HI_REGS; */
716 else if (newclass
== QI_REGS
&& GET_MODE_SIZE (GET_MODE (x
)) > 2)
718 else if (GET_MODE_SIZE (GET_MODE (x
)) > 4
719 && ! reg_class_subset_p (R03_REGS
, rclass
))
722 rclass
= reduce_class (rclass
, newclass
, rclass
);
724 if (GET_MODE (x
) == QImode
)
725 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
728 fprintf (stderr
, "%s\n", class_names
[rclass
]);
731 if (GET_CODE (x
) == MEM
732 && GET_CODE (XEXP (x
, 0)) == PLUS
733 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
734 fprintf (stderr
, "Glorm!\n");
739 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
741 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
742 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
745 m32c_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
747 return m32c_preferred_reload_class (x
, rclass
);
750 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
751 address registers for reloads since they're needed for address
754 m32c_limit_reload_class (machine_mode mode
, int rclass
)
757 fprintf (stderr
, "limit_reload_class for %s: %s ->",
758 mode_name
[mode
], class_names
[rclass
]);
762 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
763 else if (mode
== HImode
)
764 rclass
= reduce_class (rclass
, HI_REGS
, rclass
);
765 else if (mode
== SImode
)
766 rclass
= reduce_class (rclass
, SI_REGS
, rclass
);
768 if (rclass
!= A_REGS
)
769 rclass
= reduce_class (rclass
, DI_REGS
, rclass
);
772 fprintf (stderr
, " %s\n", class_names
[rclass
]);
777 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
778 r0 or r1, as those are the only real QImode registers. CR regs get
779 reloaded through appropriately sized general or address
782 m32c_secondary_reload_class (int rclass
, machine_mode mode
, rtx x
)
784 int cc
= class_contents
[rclass
][0];
786 fprintf (stderr
, "\nsecondary reload class %s %s\n",
787 class_names
[rclass
], mode_name
[mode
]);
791 && GET_CODE (x
) == MEM
&& (cc
& ~class_contents
[R23_REGS
][0]) == 0)
793 if (reg_classes_intersect_p (rclass
, CR_REGS
)
794 && GET_CODE (x
) == REG
795 && REGNO (x
) >= SB_REGNO
&& REGNO (x
) <= SP_REGNO
)
796 return (TARGET_A16
|| mode
== HImode
) ? HI_REGS
: A_REGS
;
800 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
803 #undef TARGET_CLASS_LIKELY_SPILLED_P
804 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
807 m32c_class_likely_spilled_p (reg_class_t regclass
)
809 if (regclass
== A_REGS
)
812 return (reg_class_size
[(int) regclass
] == 1);
815 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
816 documented meaning, to avoid potential inconsistencies with actual
817 class definitions. */
819 #undef TARGET_CLASS_MAX_NREGS
820 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
823 m32c_class_max_nregs (reg_class_t regclass
, machine_mode mode
)
826 unsigned char max
= 0;
828 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
829 if (TEST_HARD_REG_BIT (reg_class_contents
[(int) regclass
], rn
))
831 unsigned char n
= m32c_hard_regno_nregs (rn
, mode
);
838 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
839 QI (r0l, r1l) because the chip doesn't support QI ops on other
840 registers (well, it does on a0/a1 but if we let gcc do that, reload
841 suffers). Otherwise, we allow changes to larger modes. */
843 m32c_cannot_change_mode_class (machine_mode from
,
844 machine_mode to
, int rclass
)
848 fprintf (stderr
, "cannot change from %s to %s in %s\n",
849 mode_name
[from
], mode_name
[to
], class_names
[rclass
]);
852 /* If the larger mode isn't allowed in any of these registers, we
853 can't allow the change. */
854 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
855 if (class_contents
[rclass
][0] & (1 << rn
))
856 if (! m32c_hard_regno_ok (rn
, to
))
860 return (class_contents
[rclass
][0] & 0x1ffa);
862 if (class_contents
[rclass
][0] & 0x0005 /* r0, r1 */
863 && GET_MODE_SIZE (from
) > 1)
865 if (GET_MODE_SIZE (from
) > 2) /* all other regs */
871 /* Helpers for the rest of the file. */
872 /* TRUE if the rtx is a REG rtx for the given register. */
873 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
874 && REGNO (rtx) == regno)
875 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
876 base register in address calculations (hence the "strict"
878 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
879 && (REGNO (rtx) == AP_REGNO \
880 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
882 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
884 /* Implements matching for constraints (see next function too). 'S' is
885 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
886 call return values. */
888 m32c_matches_constraint_p (rtx value
, int constraint
)
890 encode_pattern (value
);
892 switch (constraint
) {
894 return (far_addr_space_p (value
)
896 && A0_OR_PSEUDO (patternr
[1])
897 && GET_MODE (patternr
[1]) == SImode
)
898 || (RTX_IS ("m+^Sri")
899 && A0_OR_PSEUDO (patternr
[4])
900 && GET_MODE (patternr
[4]) == HImode
)
901 || (RTX_IS ("m+^Srs")
902 && A0_OR_PSEUDO (patternr
[4])
903 && GET_MODE (patternr
[4]) == HImode
)
904 || (RTX_IS ("m+^S+ris")
905 && A0_OR_PSEUDO (patternr
[5])
906 && GET_MODE (patternr
[5]) == HImode
)
910 /* This is the common "src/dest" address */
912 if (GET_CODE (value
) == MEM
&& CONSTANT_P (XEXP (value
, 0)))
914 if (RTX_IS ("ms") || RTX_IS ("m+si"))
916 if (RTX_IS ("m++rii"))
918 if (REGNO (patternr
[3]) == FB_REGNO
919 && INTVAL (patternr
[4]) == 0)
924 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
928 if (REGNO (r
) == SP_REGNO
)
930 return m32c_legitimate_address_p (GET_MODE (value
), XEXP (value
, 0), 1);
937 else if (RTX_IS ("m+ri"))
941 return (IS_REG (r
, A0_REGNO
) || IS_REG (r
, A1_REGNO
));
944 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
946 return ((RTX_IS ("mr")
947 && (IS_REG (patternr
[1], SP_REGNO
)))
948 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SP_REGNO
))));
950 return ((RTX_IS ("mr")
951 && (IS_REG (patternr
[1], FB_REGNO
)))
952 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], FB_REGNO
))));
954 return ((RTX_IS ("mr")
955 && (IS_REG (patternr
[1], SB_REGNO
)))
956 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SB_REGNO
))));
958 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
959 return (RTX_IS ("mi")
960 && !(INTVAL (patternr
[1]) & ~0x1fff));
962 return r1h_operand (value
, QImode
);
964 return GET_CODE (value
) == PARALLEL
;
970 /* STACK AND CALLING */
974 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
975 (yes, THREE bytes) onto the stack for the return address, but we
976 don't support pointers bigger than 16 bits on those chips. This
977 will likely wreak havoc with exception unwinding. FIXME. */
979 m32c_return_addr_rtx (int count
)
991 /* It's four bytes */
997 /* FIXME: it's really 3 bytes */
1003 gen_rtx_MEM (mode
, plus_constant (Pmode
, gen_rtx_REG (Pmode
, FP_REGNO
),
1005 return copy_to_mode_reg (mode
, ra_mem
);
1008 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1010 m32c_incoming_return_addr_rtx (void)
1013 return gen_rtx_MEM (PSImode
, gen_rtx_REG (PSImode
, SP_REGNO
));
1016 /* Exception Handling Support */
1018 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1021 m32c_eh_return_data_regno (int n
)
1028 return MEM0_REGNO
+4;
1030 return INVALID_REGNUM
;
1034 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1035 m32c_emit_eh_epilogue. */
1037 m32c_eh_return_stackadj_rtx (void)
1039 if (!cfun
->machine
->eh_stack_adjust
)
1043 sa
= gen_rtx_REG (Pmode
, R0_REGNO
);
1044 cfun
->machine
->eh_stack_adjust
= sa
;
1046 return cfun
->machine
->eh_stack_adjust
;
1049 /* Registers That Address the Stack Frame */
1051 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1052 the original spec called for dwarf numbers to vary with register
1053 width as well, for example, r0l, r0, and r2r0 would each have
1054 different dwarf numbers. GCC doesn't support this, and we don't do
1055 it, and gdb seems to like it this way anyway. */
1057 m32c_dwarf_frame_regnum (int n
)
1083 return DWARF_FRAME_REGISTERS
+ 1;
1087 /* The frame looks like this:
1089 ap -> +------------------------------
1090 | Return address (3 or 4 bytes)
1091 | Saved FB (2 or 4 bytes)
1092 fb -> +------------------------------
1095 | through r0 as needed
1096 sp -> +------------------------------
1099 /* We use this to wrap all emitted insns in the prologue. */
1103 RTX_FRAME_RELATED_P (x
) = 1;
1107 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1108 how much the stack pointer moves for each, for each cpu family. */
1117 /* These are in reverse push (nearest-to-sp) order. */
1118 { R0_REGNO
, 0x80, 2, 2 },
1119 { R1_REGNO
, 0x40, 2, 2 },
1120 { R2_REGNO
, 0x20, 2, 2 },
1121 { R3_REGNO
, 0x10, 2, 2 },
1122 { A0_REGNO
, 0x08, 2, 4 },
1123 { A1_REGNO
, 0x04, 2, 4 },
1124 { SB_REGNO
, 0x02, 2, 4 },
1125 { FB_REGNO
, 0x01, 2, 4 }
1128 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1130 /* Returns TRUE if we need to save/restore the given register. We
1131 save everything for exception handlers, so that any register can be
1132 unwound. For interrupt handlers, we save everything if the handler
1133 calls something else (because we don't know what *that* function
1134 might do), but try to be a bit smarter if the handler is a leaf
1135 function. We always save $a0, though, because we use that in the
1136 epilogue to copy $fb to $sp. */
1138 need_to_save (int regno
)
1140 if (fixed_regs
[regno
])
1142 if (crtl
->calls_eh_return
)
1144 if (regno
== FP_REGNO
)
1146 if (cfun
->machine
->is_interrupt
1147 && (!cfun
->machine
->is_leaf
1148 || (regno
== A0_REGNO
1149 && m32c_function_needs_enter ())
1152 if (df_regs_ever_live_p (regno
)
1153 && (!call_used_regs
[regno
] || cfun
->machine
->is_interrupt
))
1158 /* This function contains all the intelligence about saving and
1159 restoring registers. It always figures out the register save set.
1160 When called with PP_justcount, it merely returns the size of the
1161 save set (for eliminating the frame pointer, for example). When
1162 called with PP_pushm or PP_popm, it emits the appropriate
1163 instructions for saving (pushm) or restoring (popm) the
1166 m32c_pushm_popm (Push_Pop_Type ppt
)
1169 int byte_count
= 0, bytes
;
1171 rtx dwarf_set
[PUSHM_N
];
1173 int nosave_mask
= 0;
1175 if (crtl
->return_rtx
1176 && GET_CODE (crtl
->return_rtx
) == PARALLEL
1177 && !(crtl
->calls_eh_return
|| cfun
->machine
->is_interrupt
))
1179 rtx exp
= XVECEXP (crtl
->return_rtx
, 0, 0);
1180 rtx rv
= XEXP (exp
, 0);
1181 int rv_bytes
= GET_MODE_SIZE (GET_MODE (rv
));
1184 nosave_mask
|= 0x20; /* PSI, SI */
1186 nosave_mask
|= 0xf0; /* DF */
1188 nosave_mask
|= 0x50; /* DI */
1191 for (i
= 0; i
< (int) PUSHM_N
; i
++)
1193 /* Skip if neither register needs saving. */
1194 if (!need_to_save (pushm_info
[i
].reg1
))
1197 if (pushm_info
[i
].bit
& nosave_mask
)
1200 reg_mask
|= pushm_info
[i
].bit
;
1201 bytes
= TARGET_A16
? pushm_info
[i
].a16_bytes
: pushm_info
[i
].a24_bytes
;
1203 if (ppt
== PP_pushm
)
1205 machine_mode mode
= (bytes
== 2) ? HImode
: SImode
;
1208 /* Always use stack_pointer_rtx instead of calling
1209 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1210 that there is a single rtx representing the stack pointer,
1211 namely stack_pointer_rtx, and uses == to recognize it. */
1212 addr
= stack_pointer_rtx
;
1214 if (byte_count
!= 0)
1215 addr
= gen_rtx_PLUS (GET_MODE (addr
), addr
, GEN_INT (byte_count
));
1217 dwarf_set
[n_dwarfs
++] =
1218 gen_rtx_SET (gen_rtx_MEM (mode
, addr
),
1219 gen_rtx_REG (mode
, pushm_info
[i
].reg1
));
1220 F (dwarf_set
[n_dwarfs
- 1]);
1223 byte_count
+= bytes
;
1226 if (cfun
->machine
->is_interrupt
)
1228 cfun
->machine
->intr_pushm
= reg_mask
& 0xfe;
1233 if (cfun
->machine
->is_interrupt
)
1234 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1235 if (need_to_save (i
))
1238 cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
] = 1;
1241 if (ppt
== PP_pushm
&& byte_count
)
1243 rtx note
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (n_dwarfs
+ 1));
1248 XVECEXP (note
, 0, 0)
1249 = gen_rtx_SET (stack_pointer_rtx
,
1250 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx
),
1252 GEN_INT (-byte_count
)));
1253 F (XVECEXP (note
, 0, 0));
1255 for (i
= 0; i
< n_dwarfs
; i
++)
1256 XVECEXP (note
, 0, i
+ 1) = dwarf_set
[i
];
1258 pushm
= F (emit_insn (gen_pushm (GEN_INT (reg_mask
))));
1260 add_reg_note (pushm
, REG_FRAME_RELATED_EXPR
, note
);
1263 if (cfun
->machine
->is_interrupt
)
1264 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1265 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1268 pushm
= emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode
, i
)));
1270 pushm
= emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode
, i
)));
1274 if (ppt
== PP_popm
&& byte_count
)
1276 if (cfun
->machine
->is_interrupt
)
1277 for (i
= MEM7_REGNO
; i
>= MEM0_REGNO
; i
--)
1278 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1281 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, i
)));
1283 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode
, i
)));
1286 emit_insn (gen_popm (GEN_INT (reg_mask
)));
1292 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1293 diagrams our call frame. */
1295 m32c_initial_elimination_offset (int from
, int to
)
1299 if (from
== AP_REGNO
)
1309 ofs
+= m32c_pushm_popm (PP_justcount
);
1310 ofs
+= get_frame_size ();
1313 /* Account for push rounding. */
1315 ofs
= (ofs
+ 1) & ~1;
1317 fprintf (stderr
, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from
,
1323 /* Passing Function Arguments on the Stack */
1325 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1326 M32C has word stacks. */
1328 m32c_push_rounding (int n
)
1330 if (TARGET_R8C
|| TARGET_M16C
)
1332 return (n
+ 1) & ~1;
1335 /* Passing Arguments in Registers */
1337 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1338 registers, partly on stack. If our function returns a struct, a
1339 pointer to a buffer for it is at the top of the stack (last thing
1340 pushed). The first few real arguments may be in registers as
1343 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1344 arg2 in r2 if it's HI (else pushed on stack)
1346 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1349 Structs are not passed in registers, even if they fit. Only
1350 integer and pointer types are passed in registers.
1352 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1354 #undef TARGET_FUNCTION_ARG
1355 #define TARGET_FUNCTION_ARG m32c_function_arg
1357 m32c_function_arg (cumulative_args_t ca_v
,
1358 machine_mode mode
, const_tree type
, bool named
)
1360 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1362 /* Can return a reg, parallel, or 0 for stack */
1365 fprintf (stderr
, "func_arg %d (%s, %d)\n",
1366 ca
->parm_num
, mode_name
[mode
], named
);
1367 debug_tree ((tree
)type
);
1370 if (mode
== VOIDmode
)
1373 if (ca
->force_mem
|| !named
)
1376 fprintf (stderr
, "func arg: force %d named %d, mem\n", ca
->force_mem
,
1382 if (type
&& INTEGRAL_TYPE_P (type
) && POINTER_TYPE_P (type
))
1385 if (type
&& AGGREGATE_TYPE_P (type
))
1388 switch (ca
->parm_num
)
1391 if (GET_MODE_SIZE (mode
) == 1 || GET_MODE_SIZE (mode
) == 2)
1392 rv
= gen_rtx_REG (mode
, TARGET_A16
? R1_REGNO
: R0_REGNO
);
1396 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 2)
1397 rv
= gen_rtx_REG (mode
, R2_REGNO
);
1407 #undef TARGET_PASS_BY_REFERENCE
1408 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1410 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
1411 machine_mode mode ATTRIBUTE_UNUSED
,
1412 const_tree type ATTRIBUTE_UNUSED
,
1413 bool named ATTRIBUTE_UNUSED
)
1418 /* Implements INIT_CUMULATIVE_ARGS. */
1420 m32c_init_cumulative_args (CUMULATIVE_ARGS
* ca
,
1422 rtx libname ATTRIBUTE_UNUSED
,
1424 int n_named_args ATTRIBUTE_UNUSED
)
1426 if (fntype
&& aggregate_value_p (TREE_TYPE (fntype
), fndecl
))
1433 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1434 functions returning structures, so we always reset that. Otherwise,
1435 we only need to know the sequence number of the argument to know what
1437 #undef TARGET_FUNCTION_ARG_ADVANCE
1438 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1440 m32c_function_arg_advance (cumulative_args_t ca_v
,
1441 machine_mode mode ATTRIBUTE_UNUSED
,
1442 const_tree type ATTRIBUTE_UNUSED
,
1443 bool named ATTRIBUTE_UNUSED
)
1445 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1453 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1454 #undef TARGET_FUNCTION_ARG_BOUNDARY
1455 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1457 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED
,
1458 const_tree type ATTRIBUTE_UNUSED
)
1460 return (TARGET_A16
? 8 : 16);
1463 /* Implements FUNCTION_ARG_REGNO_P. */
1465 m32c_function_arg_regno_p (int r
)
1468 return (r
== R0_REGNO
);
1469 return (r
== R1_REGNO
|| r
== R2_REGNO
);
1472 /* HImode and PSImode are the two "native" modes as far as GCC is
1473 concerned, but the chips also support a 32-bit mode which is used
1474 for some opcodes in R8C/M16C and for reset vectors and such. */
1475 #undef TARGET_VALID_POINTER_MODE
1476 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1478 m32c_valid_pointer_mode (machine_mode mode
)
1488 /* How Scalar Function Values Are Returned */
1490 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1491 combination of registers starting there (r2r0 for longs, r3r1r2r0
1492 for long long, r3r2r1r0 for doubles), except that that ABI
1493 currently doesn't work because it ends up using all available
1494 general registers and gcc often can't compile it. So, instead, we
1495 return anything bigger than 16 bits in "mem0" (effectively, a
1496 memory location). */
1498 #undef TARGET_LIBCALL_VALUE
1499 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1502 m32c_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
1504 /* return reg or parallel */
1506 /* FIXME: GCC has difficulty returning large values in registers,
1507 because that ties up most of the general registers and gives the
1508 register allocator little to work with. Until we can resolve
1509 this, large values are returned in memory. */
1514 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (4));
1515 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1516 gen_rtx_REG (HImode
,
1519 XVECEXP (rv
, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode
,
1520 gen_rtx_REG (HImode
,
1523 XVECEXP (rv
, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode
,
1524 gen_rtx_REG (HImode
,
1527 XVECEXP (rv
, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode
,
1528 gen_rtx_REG (HImode
,
1534 if (TARGET_A24
&& GET_MODE_SIZE (mode
) > 2)
1538 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (1));
1539 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1547 if (GET_MODE_SIZE (mode
) > 2)
1548 return gen_rtx_REG (mode
, MEM0_REGNO
);
1549 return gen_rtx_REG (mode
, R0_REGNO
);
1552 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1555 #undef TARGET_FUNCTION_VALUE
1556 #define TARGET_FUNCTION_VALUE m32c_function_value
1559 m32c_function_value (const_tree valtype
,
1560 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1561 bool outgoing ATTRIBUTE_UNUSED
)
1563 /* return reg or parallel */
1564 const machine_mode mode
= TYPE_MODE (valtype
);
1565 return m32c_libcall_value (mode
, NULL_RTX
);
1568 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1570 #undef TARGET_FUNCTION_VALUE_REGNO_P
1571 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1574 m32c_function_value_regno_p (const unsigned int regno
)
1576 return (regno
== R0_REGNO
|| regno
== MEM0_REGNO
);
1579 /* How Large Values Are Returned */
1581 /* We return structures by pushing the address on the stack, even if
1582 we use registers for the first few "real" arguments. */
1583 #undef TARGET_STRUCT_VALUE_RTX
1584 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1586 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED
,
1587 int incoming ATTRIBUTE_UNUSED
)
1592 /* Function Entry and Exit */
1594 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1596 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
1598 if (cfun
->machine
->is_interrupt
)
1603 /* Implementing the Varargs Macros */
1605 #undef TARGET_STRICT_ARGUMENT_NAMING
1606 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1608 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
1613 /* Trampolines for Nested Functions */
1617 1 0000 75C43412 mov.w #0x1234,a0
1618 2 0004 FC000000 jmp.a label
1621 1 0000 BC563412 mov.l:s #0x123456,a0
1622 2 0004 CC000000 jmp.a label
1625 /* Implements TRAMPOLINE_SIZE. */
1627 m32c_trampoline_size (void)
1629 /* Allocate extra space so we can avoid the messy shifts when we
1630 initialize the trampoline; we just write past the end of the
1632 return TARGET_A16
? 8 : 10;
1635 /* Implements TRAMPOLINE_ALIGNMENT. */
1637 m32c_trampoline_alignment (void)
1642 /* Implements TARGET_TRAMPOLINE_INIT. */
1644 #undef TARGET_TRAMPOLINE_INIT
1645 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1647 m32c_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chainval
)
1649 rtx function
= XEXP (DECL_RTL (fndecl
), 0);
1651 #define A0(m,i) adjust_address (m_tramp, m, i)
1654 /* Note: we subtract a "word" because the moves want signed
1655 constants, not unsigned constants. */
1656 emit_move_insn (A0 (HImode
, 0), GEN_INT (0xc475 - 0x10000));
1657 emit_move_insn (A0 (HImode
, 2), chainval
);
1658 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xfc - 0x100));
1659 /* We use 16-bit addresses here, but store the zero to turn it
1660 into a 24-bit offset. */
1661 emit_move_insn (A0 (HImode
, 5), function
);
1662 emit_move_insn (A0 (QImode
, 7), GEN_INT (0x00));
1666 /* Note that the PSI moves actually write 4 bytes. Make sure we
1667 write stuff out in the right order, and leave room for the
1668 extra byte at the end. */
1669 emit_move_insn (A0 (QImode
, 0), GEN_INT (0xbc - 0x100));
1670 emit_move_insn (A0 (PSImode
, 1), chainval
);
1671 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xcc - 0x100));
1672 emit_move_insn (A0 (PSImode
, 5), function
);
1677 /* Addressing Modes */
1679 /* The r8c/m32c family supports a wide range of non-orthogonal
1680 addressing modes, including the ability to double-indirect on *some*
1681 of them. Not all insns support all modes, either, but we rely on
1682 predicates and constraints to deal with that. */
1683 #undef TARGET_LEGITIMATE_ADDRESS_P
1684 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1686 m32c_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1692 if (TARGET_A16
&& GET_MODE (x
) != HImode
&& GET_MODE (x
) != SImode
)
1694 if (TARGET_A24
&& GET_MODE (x
) != PSImode
)
1697 /* Wide references to memory will be split after reload, so we must
1698 ensure that all parts of such splits remain legitimate
1700 mode_adjust
= GET_MODE_SIZE (mode
) - 1;
1702 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1703 if (GET_CODE (x
) == PRE_DEC
1704 || GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_MODIFY
)
1706 return (GET_CODE (XEXP (x
, 0)) == REG
1707 && REGNO (XEXP (x
, 0)) == SP_REGNO
);
1711 /* This is the double indirection detection, but it currently
1712 doesn't work as cleanly as this code implies, so until we've had
1713 a chance to debug it, leave it disabled. */
1714 if (TARGET_A24
&& GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) != PLUS
)
1717 fprintf (stderr
, "double indirect\n");
1726 /* Most indexable registers can be used without displacements,
1727 although some of them will be emitted with an explicit zero
1728 to please the assembler. */
1729 switch (REGNO (patternr
[0]))
1735 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1741 if (IS_PSEUDO (patternr
[0], strict
))
1747 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1752 /* This is more interesting, because different base registers
1753 allow for different displacements - both range and signedness
1754 - and it differs from chip series to chip series too. */
1755 int rn
= REGNO (patternr
[1]);
1756 HOST_WIDE_INT offs
= INTVAL (patternr
[2]);
1762 /* The syntax only allows positive offsets, but when the
1763 offsets span the entire memory range, we can simulate
1764 negative offsets by wrapping. */
1766 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1768 return (offs
>= 0 && offs
<= 65535 - mode_adjust
);
1770 return (offs
>= -16777216 && offs
<= 16777215);
1774 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1775 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1778 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1781 if (IS_PSEUDO (patternr
[1], strict
))
1786 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1788 rtx reg
= patternr
[1];
1790 /* We don't know where the symbol is, so only allow base
1791 registers which support displacements spanning the whole
1793 switch (REGNO (reg
))
1797 /* $sb needs a secondary reload, but since it's involved in
1798 memory address reloads too, we don't deal with it very
1800 /* case SB_REGNO: */
1803 if (GET_CODE (reg
) == SUBREG
)
1805 if (IS_PSEUDO (reg
, strict
))
1813 /* Implements REG_OK_FOR_BASE_P. */
1815 m32c_reg_ok_for_base_p (rtx x
, int strict
)
1817 if (GET_CODE (x
) != REG
)
1828 if (IS_PSEUDO (x
, strict
))
1834 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1835 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1837 EB 4B FF mova -128[$fb],$a0
1838 D8 0C FF FF mov.w:Q #0,-1[$a0]
1840 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1843 77 54 00 01 sub #256,$a0
1844 D8 08 01 mov.w:Q #0,1[$a0]
1846 If we don't offset (i.e. offset by zero), we end up with:
1848 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1850 We have to subtract *something* so that we have a PLUS rtx to mark
1851 that we've done this reload. The -128 offset will never result in
1852 an 8-bit aN offset, and the payoff for the second case is five
1853 loads *if* those loads are within 256 bytes of the other end of the
1854 frame, so the third case seems best. Note that we subtract the
1855 zero, but detect that in the addhi3 pattern. */
1857 #define BIG_FB_ADJ 0
1859 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1860 worry about is frame base offsets, as $fb has a limited
1861 displacement range. We deal with this by attempting to reload $fb
1862 itself into an address register; that seems to result in the best
1864 #undef TARGET_LEGITIMIZE_ADDRESS
1865 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1867 m32c_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1871 fprintf (stderr
, "m32c_legitimize_address for mode %s\n", mode_name
[mode
]);
1873 fprintf (stderr
, "\n");
1876 if (GET_CODE (x
) == PLUS
1877 && GET_CODE (XEXP (x
, 0)) == REG
1878 && REGNO (XEXP (x
, 0)) == FB_REGNO
1879 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1880 && (INTVAL (XEXP (x
, 1)) < -128
1881 || INTVAL (XEXP (x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1883 /* reload FB to A_REGS */
1884 rtx temp
= gen_reg_rtx (Pmode
);
1886 emit_insn (gen_rtx_SET (temp
, XEXP (x
, 0)));
1893 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1895 m32c_legitimize_reload_address (rtx
* x
,
1898 int type
, int ind_levels ATTRIBUTE_UNUSED
)
1901 fprintf (stderr
, "\nm32c_legitimize_reload_address for mode %s\n",
1906 /* At one point, this function tried to get $fb copied to an address
1907 register, which in theory would maximize sharing, but gcc was
1908 *also* still trying to reload the whole address, and we'd run out
1909 of address registers. So we let gcc do the naive (but safe)
1910 reload instead, when the above function doesn't handle it for
1913 The code below is a second attempt at the above. */
1915 if (GET_CODE (*x
) == PLUS
1916 && GET_CODE (XEXP (*x
, 0)) == REG
1917 && REGNO (XEXP (*x
, 0)) == FB_REGNO
1918 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1919 && (INTVAL (XEXP (*x
, 1)) < -128
1920 || INTVAL (XEXP (*x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1923 int offset
= INTVAL (XEXP (*x
, 1));
1924 int adjustment
= -BIG_FB_ADJ
;
1926 sum
= gen_rtx_PLUS (Pmode
, XEXP (*x
, 0),
1927 GEN_INT (adjustment
));
1928 *x
= gen_rtx_PLUS (Pmode
, sum
, GEN_INT (offset
- adjustment
));
1929 if (type
== RELOAD_OTHER
)
1930 type
= RELOAD_FOR_OTHER_ADDRESS
;
1931 push_reload (sum
, NULL_RTX
, &XEXP (*x
, 0), NULL
,
1932 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1933 (enum reload_type
) type
);
1937 if (GET_CODE (*x
) == PLUS
1938 && GET_CODE (XEXP (*x
, 0)) == PLUS
1939 && GET_CODE (XEXP (XEXP (*x
, 0), 0)) == REG
1940 && REGNO (XEXP (XEXP (*x
, 0), 0)) == FB_REGNO
1941 && GET_CODE (XEXP (XEXP (*x
, 0), 1)) == CONST_INT
1942 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1945 if (type
== RELOAD_OTHER
)
1946 type
= RELOAD_FOR_OTHER_ADDRESS
;
1947 push_reload (XEXP (*x
, 0), NULL_RTX
, &XEXP (*x
, 0), NULL
,
1948 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1949 (enum reload_type
) type
);
1953 if (TARGET_A24
&& GET_MODE (*x
) == PSImode
)
1955 push_reload (*x
, NULL_RTX
, x
, NULL
,
1956 A_REGS
, PSImode
, VOIDmode
, 0, 0, opnum
,
1957 (enum reload_type
) type
);
1964 /* Return the appropriate mode for a named address pointer. */
1965 #undef TARGET_ADDR_SPACE_POINTER_MODE
1966 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1968 m32c_addr_space_pointer_mode (addr_space_t addrspace
)
1972 case ADDR_SPACE_GENERIC
:
1973 return TARGET_A24
? PSImode
: HImode
;
1974 case ADDR_SPACE_FAR
:
1981 /* Return the appropriate mode for a named address address. */
1982 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1983 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1985 m32c_addr_space_address_mode (addr_space_t addrspace
)
1989 case ADDR_SPACE_GENERIC
:
1990 return TARGET_A24
? PSImode
: HImode
;
1991 case ADDR_SPACE_FAR
:
1998 /* Like m32c_legitimate_address_p, except with named addresses. */
1999 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2000 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2001 m32c_addr_space_legitimate_address_p
2003 m32c_addr_space_legitimate_address_p (machine_mode mode
, rtx x
,
2004 bool strict
, addr_space_t as
)
2006 if (as
== ADDR_SPACE_FAR
)
2013 if (GET_MODE (x
) != SImode
)
2015 switch (REGNO (patternr
[0]))
2021 if (IS_PSEUDO (patternr
[0], strict
))
2026 if (RTX_IS ("+^Sri"))
2028 int rn
= REGNO (patternr
[3]);
2029 HOST_WIDE_INT offs
= INTVAL (patternr
[4]);
2030 if (GET_MODE (patternr
[3]) != HImode
)
2035 return (offs
>= 0 && offs
<= 0xfffff);
2038 if (IS_PSEUDO (patternr
[3], strict
))
2043 if (RTX_IS ("+^Srs"))
2045 int rn
= REGNO (patternr
[3]);
2046 if (GET_MODE (patternr
[3]) != HImode
)
2054 if (IS_PSEUDO (patternr
[3], strict
))
2059 if (RTX_IS ("+^S+ris"))
2061 int rn
= REGNO (patternr
[4]);
2062 if (GET_MODE (patternr
[4]) != HImode
)
2070 if (IS_PSEUDO (patternr
[4], strict
))
2082 else if (as
!= ADDR_SPACE_GENERIC
)
2085 return m32c_legitimate_address_p (mode
, x
, strict
);
2088 /* Like m32c_legitimate_address, except with named address support. */
2089 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2090 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2092 m32c_addr_space_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
,
2095 if (as
!= ADDR_SPACE_GENERIC
)
2098 fprintf (stderr
, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name
[mode
]);
2100 fprintf (stderr
, "\n");
2103 if (GET_CODE (x
) != REG
)
2105 x
= force_reg (SImode
, x
);
2110 return m32c_legitimize_address (x
, oldx
, mode
);
2113 /* Determine if one named address space is a subset of another. */
2114 #undef TARGET_ADDR_SPACE_SUBSET_P
2115 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2117 m32c_addr_space_subset_p (addr_space_t subset
, addr_space_t superset
)
2119 gcc_assert (subset
== ADDR_SPACE_GENERIC
|| subset
== ADDR_SPACE_FAR
);
2120 gcc_assert (superset
== ADDR_SPACE_GENERIC
|| superset
== ADDR_SPACE_FAR
);
2122 if (subset
== superset
)
2126 return (subset
== ADDR_SPACE_GENERIC
&& superset
== ADDR_SPACE_FAR
);
2129 #undef TARGET_ADDR_SPACE_CONVERT
2130 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2131 /* Convert from one address space to another. */
2133 m32c_addr_space_convert (rtx op
, tree from_type
, tree to_type
)
2135 addr_space_t from_as
= TYPE_ADDR_SPACE (TREE_TYPE (from_type
));
2136 addr_space_t to_as
= TYPE_ADDR_SPACE (TREE_TYPE (to_type
));
2139 gcc_assert (from_as
== ADDR_SPACE_GENERIC
|| from_as
== ADDR_SPACE_FAR
);
2140 gcc_assert (to_as
== ADDR_SPACE_GENERIC
|| to_as
== ADDR_SPACE_FAR
);
2142 if (to_as
== ADDR_SPACE_GENERIC
&& from_as
== ADDR_SPACE_FAR
)
2144 /* This is unpredictable, as we're truncating off usable address
2147 result
= gen_reg_rtx (HImode
);
2148 emit_move_insn (result
, simplify_subreg (HImode
, op
, SImode
, 0));
2151 else if (to_as
== ADDR_SPACE_FAR
&& from_as
== ADDR_SPACE_GENERIC
)
2153 /* This always works. */
2154 result
= gen_reg_rtx (SImode
);
2155 emit_insn (gen_zero_extendhisi2 (result
, op
));
2162 /* Condition Code Status */
2164 #undef TARGET_FIXED_CONDITION_CODE_REGS
2165 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2167 m32c_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
2170 *p2
= INVALID_REGNUM
;
2174 /* Describing Relative Costs of Operations */
2176 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2177 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2178 no opcodes to do that). We also discourage use of mem* registers
2179 since they're really memory. */
2181 #undef TARGET_REGISTER_MOVE_COST
2182 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2185 m32c_register_move_cost (machine_mode mode
, reg_class_t from
,
2188 int cost
= COSTS_N_INSNS (3);
2191 /* FIXME: pick real values, but not 2 for now. */
2192 COPY_HARD_REG_SET (cc
, reg_class_contents
[(int) from
]);
2193 IOR_HARD_REG_SET (cc
, reg_class_contents
[(int) to
]);
2196 && hard_reg_set_intersect_p (cc
, reg_class_contents
[R23_REGS
]))
2198 if (hard_reg_set_subset_p (cc
, reg_class_contents
[R23_REGS
]))
2199 cost
= COSTS_N_INSNS (1000);
2201 cost
= COSTS_N_INSNS (80);
2204 if (!class_can_hold_mode (from
, mode
) || !class_can_hold_mode (to
, mode
))
2205 cost
= COSTS_N_INSNS (1000);
2207 if (reg_classes_intersect_p (from
, CR_REGS
))
2208 cost
+= COSTS_N_INSNS (5);
2210 if (reg_classes_intersect_p (to
, CR_REGS
))
2211 cost
+= COSTS_N_INSNS (5);
2213 if (from
== MEM_REGS
|| to
== MEM_REGS
)
2214 cost
+= COSTS_N_INSNS (50);
2215 else if (reg_classes_intersect_p (from
, MEM_REGS
)
2216 || reg_classes_intersect_p (to
, MEM_REGS
))
2217 cost
+= COSTS_N_INSNS (10);
2220 fprintf (stderr
, "register_move_cost %s from %s to %s = %d\n",
2221 mode_name
[mode
], class_names
[(int) from
], class_names
[(int) to
],
2227 /* Implements TARGET_MEMORY_MOVE_COST. */
2229 #undef TARGET_MEMORY_MOVE_COST
2230 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2233 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2234 reg_class_t rclass ATTRIBUTE_UNUSED
,
2235 bool in ATTRIBUTE_UNUSED
)
2237 /* FIXME: pick real values. */
2238 return COSTS_N_INSNS (10);
2241 /* Here we try to describe when we use multiple opcodes for one RTX so
2242 that gcc knows when to use them. */
2243 #undef TARGET_RTX_COSTS
2244 #define TARGET_RTX_COSTS m32c_rtx_costs
2246 m32c_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2247 int *total
, bool speed ATTRIBUTE_UNUSED
)
2252 if (REGNO (x
) >= MEM0_REGNO
&& REGNO (x
) <= MEM7_REGNO
)
2253 *total
+= COSTS_N_INSNS (500);
2255 *total
+= COSTS_N_INSNS (1);
2261 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
2263 /* mov.b r1l, r1h */
2264 *total
+= COSTS_N_INSNS (1);
2267 if (INTVAL (XEXP (x
, 1)) > 8
2268 || INTVAL (XEXP (x
, 1)) < -8)
2271 /* mov.b r1l, r1h */
2272 *total
+= COSTS_N_INSNS (2);
2287 if (outer_code
== SET
)
2289 *total
+= COSTS_N_INSNS (2);
2296 rtx dest
= XEXP (x
, 0);
2297 rtx addr
= XEXP (dest
, 0);
2298 switch (GET_CODE (addr
))
2301 *total
+= COSTS_N_INSNS (1);
2304 *total
+= COSTS_N_INSNS (3);
2307 *total
+= COSTS_N_INSNS (2);
2315 /* Reasonable default. */
2316 if (TARGET_A16
&& GET_MODE(x
) == SImode
)
2317 *total
+= COSTS_N_INSNS (2);
2323 #undef TARGET_ADDRESS_COST
2324 #define TARGET_ADDRESS_COST m32c_address_cost
2326 m32c_address_cost (rtx addr
, machine_mode mode ATTRIBUTE_UNUSED
,
2327 addr_space_t as ATTRIBUTE_UNUSED
,
2328 bool speed ATTRIBUTE_UNUSED
)
2331 /* fprintf(stderr, "\naddress_cost\n");
2333 switch (GET_CODE (addr
))
2338 return COSTS_N_INSNS(1);
2339 if (0 < i
&& i
<= 255)
2340 return COSTS_N_INSNS(2);
2341 if (0 < i
&& i
<= 65535)
2342 return COSTS_N_INSNS(3);
2343 return COSTS_N_INSNS(4);
2345 return COSTS_N_INSNS(4);
2347 return COSTS_N_INSNS(1);
2349 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
2351 i
= INTVAL (XEXP (addr
, 1));
2353 return COSTS_N_INSNS(1);
2354 if (0 < i
&& i
<= 255)
2355 return COSTS_N_INSNS(2);
2356 if (0 < i
&& i
<= 65535)
2357 return COSTS_N_INSNS(3);
2359 return COSTS_N_INSNS(4);
2365 /* Defining the Output Assembler Language */
2367 /* Output of Data */
2369 /* We may have 24 bit sizes, which is the native address size.
2370 Currently unused, but provided for completeness. */
2371 #undef TARGET_ASM_INTEGER
2372 #define TARGET_ASM_INTEGER m32c_asm_integer
2374 m32c_asm_integer (rtx x
, unsigned int size
, int aligned_p
)
2379 fprintf (asm_out_file
, "\t.3byte\t");
2380 output_addr_const (asm_out_file
, x
);
2381 fputc ('\n', asm_out_file
);
2384 if (GET_CODE (x
) == SYMBOL_REF
)
2386 fprintf (asm_out_file
, "\t.long\t");
2387 output_addr_const (asm_out_file
, x
);
2388 fputc ('\n', asm_out_file
);
2393 return default_assemble_integer (x
, size
, aligned_p
);
2396 /* Output of Assembler Instructions */
2398 /* We use a lookup table because the addressing modes are non-orthogonal. */
2403 char const *pattern
;
2406 const conversions
[] = {
2409 { 0, "mr", "z[1]" },
2410 { 0, "m+ri", "3[2]" },
2411 { 0, "m+rs", "3[2]" },
2412 { 0, "m+^Zrs", "5[4]" },
2413 { 0, "m+^Zri", "5[4]" },
2414 { 0, "m+^Z+ris", "7+6[5]" },
2415 { 0, "m+^Srs", "5[4]" },
2416 { 0, "m+^Sri", "5[4]" },
2417 { 0, "m+^S+ris", "7+6[5]" },
2418 { 0, "m+r+si", "4+5[2]" },
2421 { 0, "m+si", "2+3" },
2423 { 0, "mmr", "[z[2]]" },
2424 { 0, "mm+ri", "[4[3]]" },
2425 { 0, "mm+rs", "[4[3]]" },
2426 { 0, "mm+r+si", "[5+6[3]]" },
2427 { 0, "mms", "[[2]]" },
2428 { 0, "mmi", "[[2]]" },
2429 { 0, "mm+si", "[4[3]]" },
2433 { 0, "+si", "#1+2" },
2439 { 'd', "+si", "1+2" },
2442 { 'D', "+si", "1+2" },
2453 /* This is in order according to the bitfield that pushm/popm use. */
2454 static char const *pushm_regs
[] = {
2455 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2458 /* Implements TARGET_PRINT_OPERAND. */
2460 #undef TARGET_PRINT_OPERAND
2461 #define TARGET_PRINT_OPERAND m32c_print_operand
2464 m32c_print_operand (FILE * file
, rtx x
, int code
)
2469 int unsigned_const
= 0;
2472 /* Multiplies; constants are converted to sign-extended format but
2473 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2485 /* This one is only for debugging; you can put it in a pattern to
2486 force this error. */
2489 fprintf (stderr
, "dj: unreviewed pattern:");
2490 if (current_output_insn
)
2491 debug_rtx (current_output_insn
);
2494 /* PSImode operations are either .w or .l depending on the target. */
2498 fprintf (file
, "w");
2500 fprintf (file
, "l");
2503 /* Inverted conditionals. */
2506 switch (GET_CODE (x
))
2512 fputs ("gtu", file
);
2518 fputs ("geu", file
);
2524 fputs ("leu", file
);
2530 fputs ("ltu", file
);
2543 /* Regular conditionals. */
2546 switch (GET_CODE (x
))
2552 fputs ("leu", file
);
2558 fputs ("ltu", file
);
2564 fputs ("gtu", file
);
2570 fputs ("geu", file
);
2583 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2585 if (code
== 'h' && GET_MODE (x
) == SImode
)
2587 x
= m32c_subreg (HImode
, x
, SImode
, 0);
2590 if (code
== 'H' && GET_MODE (x
) == SImode
)
2592 x
= m32c_subreg (HImode
, x
, SImode
, 2);
2595 if (code
== 'h' && GET_MODE (x
) == HImode
)
2597 x
= m32c_subreg (QImode
, x
, HImode
, 0);
2600 if (code
== 'H' && GET_MODE (x
) == HImode
)
2602 /* We can't actually represent this as an rtx. Do it here. */
2603 if (GET_CODE (x
) == REG
)
2608 fputs ("r0h", file
);
2611 fputs ("r1h", file
);
2617 /* This should be a MEM. */
2618 x
= m32c_subreg (QImode
, x
, HImode
, 1);
2621 /* This is for BMcond, which always wants word register names. */
2622 if (code
== 'h' && GET_MODE (x
) == QImode
)
2624 if (GET_CODE (x
) == REG
)
2625 x
= gen_rtx_REG (HImode
, REGNO (x
));
2628 /* 'x' and 'X' need to be ignored for non-immediates. */
2629 if ((code
== 'x' || code
== 'X') && GET_CODE (x
) != CONST_INT
)
2634 for (i
= 0; conversions
[i
].pattern
; i
++)
2635 if (conversions
[i
].code
== code
2636 && streq (conversions
[i
].pattern
, pattern
))
2638 for (j
= 0; conversions
[i
].format
[j
]; j
++)
2639 /* backslash quotes the next character in the output pattern. */
2640 if (conversions
[i
].format
[j
] == '\\')
2642 fputc (conversions
[i
].format
[j
+ 1], file
);
2645 /* Digits in the output pattern indicate that the
2646 corresponding RTX is to be output at that point. */
2647 else if (ISDIGIT (conversions
[i
].format
[j
]))
2649 rtx r
= patternr
[conversions
[i
].format
[j
] - '0'];
2650 switch (GET_CODE (r
))
2653 fprintf (file
, "%s",
2654 reg_name_with_mode (REGNO (r
), GET_MODE (r
)));
2663 int i
= (int) exact_log2 (v
);
2665 i
= (int) exact_log2 ((v
^ 0xffff) & 0xffff);
2667 i
= (int) exact_log2 ((v
^ 0xff) & 0xff);
2669 fprintf (file
, "%d", i
);
2673 /* Unsigned byte. */
2674 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2678 /* Unsigned word. */
2679 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2680 INTVAL (r
) & 0xffff);
2683 /* pushm and popm encode a register set into a single byte. */
2685 for (b
= 7; b
>= 0; b
--)
2686 if (INTVAL (r
) & (1 << b
))
2688 fprintf (file
, "%s%s", comma
, pushm_regs
[b
]);
2693 /* "Minus". Output -X */
2694 ival
= (-INTVAL (r
) & 0xffff);
2696 ival
= ival
- 0x10000;
2697 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2701 if (conversions
[i
].format
[j
+ 1] == '[' && ival
< 0)
2703 /* We can simulate negative displacements by
2704 taking advantage of address space
2705 wrapping when the offset can span the
2706 entire address range. */
2708 patternr
[conversions
[i
].format
[j
+ 2] - '0'];
2709 if (GET_CODE (base
) == REG
)
2710 switch (REGNO (base
))
2715 ival
= 0x1000000 + ival
;
2717 ival
= 0x10000 + ival
;
2721 ival
= 0x10000 + ival
;
2725 else if (code
== 'd' && ival
< 0 && j
== 0)
2726 /* The "mova" opcode is used to do addition by
2727 computing displacements, but again, we need
2728 displacements to be unsigned *if* they're
2729 the only component of the displacement
2730 (i.e. no "symbol-4" type displacement). */
2731 ival
= (TARGET_A24
? 0x1000000 : 0x10000) + ival
;
2733 if (conversions
[i
].format
[j
] == '0')
2735 /* More conversions to unsigned. */
2736 if (unsigned_const
== 2)
2738 if (unsigned_const
== 1)
2741 if (streq (conversions
[i
].pattern
, "mi")
2742 || streq (conversions
[i
].pattern
, "mmi"))
2744 /* Integers used as addresses are unsigned. */
2745 ival
&= (TARGET_A24
? 0xffffff : 0xffff);
2747 if (force_sign
&& ival
>= 0)
2749 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2754 /* We don't have const_double constants. If it
2755 happens, make it obvious. */
2756 fprintf (file
, "[const_double 0x%lx]",
2757 (unsigned long) CONST_DOUBLE_HIGH (r
));
2760 assemble_name (file
, XSTR (r
, 0));
2763 output_asm_label (r
);
2766 fprintf (stderr
, "don't know how to print this operand:");
2773 if (conversions
[i
].format
[j
] == 'z')
2775 /* Some addressing modes *must* have a displacement,
2776 so insert a zero here if needed. */
2778 for (k
= j
+ 1; conversions
[i
].format
[k
]; k
++)
2779 if (ISDIGIT (conversions
[i
].format
[k
]))
2781 rtx reg
= patternr
[conversions
[i
].format
[k
] - '0'];
2782 if (GET_CODE (reg
) == REG
2783 && (REGNO (reg
) == SB_REGNO
2784 || REGNO (reg
) == FB_REGNO
2785 || REGNO (reg
) == SP_REGNO
))
2790 /* Signed displacements off symbols need to have signs
2792 if (conversions
[i
].format
[j
] == '+'
2793 && (!code
|| code
== 'D' || code
== 'd')
2794 && ISDIGIT (conversions
[i
].format
[j
+ 1])
2795 && (GET_CODE (patternr
[conversions
[i
].format
[j
+ 1] - '0'])
2801 fputc (conversions
[i
].format
[j
], file
);
2805 if (!conversions
[i
].pattern
)
2807 fprintf (stderr
, "unconvertible operand %c `%s'", code
? code
: '-',
2810 fprintf (file
, "[%c.%s]", code
? code
: '-', pattern
);
2816 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2818 See m32c_print_operand above for descriptions of what these do. */
2820 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2821 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2824 m32c_print_operand_punct_valid_p (unsigned char c
)
2826 if (c
== '&' || c
== '!')
2832 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2834 #undef TARGET_PRINT_OPERAND_ADDRESS
2835 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2838 m32c_print_operand_address (FILE * stream
, rtx address
)
2840 if (GET_CODE (address
) == MEM
)
2841 address
= XEXP (address
, 0);
2843 /* cf: gcc.dg/asm-4.c. */
2844 gcc_assert (GET_CODE (address
) == REG
);
2846 m32c_print_operand (stream
, address
, 0);
2849 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2850 differently than general registers. */
2852 m32c_output_reg_push (FILE * s
, int regno
)
2854 if (regno
== FLG_REGNO
)
2855 fprintf (s
, "\tpushc\tflg\n");
2857 fprintf (s
, "\tpush.%c\t%s\n",
2858 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2861 /* Likewise for ASM_OUTPUT_REG_POP. */
2863 m32c_output_reg_pop (FILE * s
, int regno
)
2865 if (regno
== FLG_REGNO
)
2866 fprintf (s
, "\tpopc\tflg\n");
2868 fprintf (s
, "\tpop.%c\t%s\n",
2869 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2872 /* Defining target-specific uses of `__attribute__' */
2874 /* Used to simplify the logic below. Find the attributes wherever
2876 #define M32C_ATTRIBUTES(decl) \
2877 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2878 : DECL_ATTRIBUTES (decl) \
2879 ? (DECL_ATTRIBUTES (decl)) \
2880 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2882 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2884 interrupt_p (tree node ATTRIBUTE_UNUSED
)
2886 tree list
= M32C_ATTRIBUTES (node
);
2889 if (is_attribute_p ("interrupt", TREE_PURPOSE (list
)))
2891 list
= TREE_CHAIN (list
);
2893 return fast_interrupt_p (node
);
2896 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2898 bank_switch_p (tree node ATTRIBUTE_UNUSED
)
2900 tree list
= M32C_ATTRIBUTES (node
);
2903 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list
)))
2905 list
= TREE_CHAIN (list
);
2910 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2912 fast_interrupt_p (tree node ATTRIBUTE_UNUSED
)
2914 tree list
= M32C_ATTRIBUTES (node
);
2917 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list
)))
2919 list
= TREE_CHAIN (list
);
2925 interrupt_handler (tree
* node ATTRIBUTE_UNUSED
,
2926 tree name ATTRIBUTE_UNUSED
,
2927 tree args ATTRIBUTE_UNUSED
,
2928 int flags ATTRIBUTE_UNUSED
,
2929 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2934 /* Returns TRUE if given tree has the "function_vector" attribute. */
2936 m32c_special_page_vector_p (tree func
)
2940 if (TREE_CODE (func
) != FUNCTION_DECL
)
2943 list
= M32C_ATTRIBUTES (func
);
2946 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2948 list
= TREE_CHAIN (list
);
2954 function_vector_handler (tree
* node ATTRIBUTE_UNUSED
,
2955 tree name ATTRIBUTE_UNUSED
,
2956 tree args ATTRIBUTE_UNUSED
,
2957 int flags ATTRIBUTE_UNUSED
,
2958 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2962 /* The attribute is not supported for R8C target. */
2963 warning (OPT_Wattributes
,
2964 "%qE attribute is not supported for R8C target",
2966 *no_add_attrs
= true;
2968 else if (TREE_CODE (*node
) != FUNCTION_DECL
)
2970 /* The attribute must be applied to functions only. */
2971 warning (OPT_Wattributes
,
2972 "%qE attribute applies only to functions",
2974 *no_add_attrs
= true;
2976 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
2978 /* The argument must be a constant integer. */
2979 warning (OPT_Wattributes
,
2980 "%qE attribute argument not an integer constant",
2982 *no_add_attrs
= true;
2984 else if (TREE_INT_CST_LOW (TREE_VALUE (args
)) < 18
2985 || TREE_INT_CST_LOW (TREE_VALUE (args
)) > 255)
2987 /* The argument value must be between 18 to 255. */
2988 warning (OPT_Wattributes
,
2989 "%qE attribute argument should be between 18 to 255",
2991 *no_add_attrs
= true;
2996 /* If the function is assigned the attribute 'function_vector', it
2997 returns the function vector number, otherwise returns zero. */
2999 current_function_special_page_vector (rtx x
)
3003 if ((GET_CODE(x
) == SYMBOL_REF
)
3004 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
3007 tree t
= SYMBOL_REF_DECL (x
);
3009 if (TREE_CODE (t
) != FUNCTION_DECL
)
3012 list
= M32C_ATTRIBUTES (t
);
3015 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
3017 num
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list
)));
3021 list
= TREE_CHAIN (list
);
3030 #undef TARGET_ATTRIBUTE_TABLE
3031 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3032 static const struct attribute_spec m32c_attribute_table
[] = {
3033 {"interrupt", 0, 0, false, false, false, interrupt_handler
, false},
3034 {"bank_switch", 0, 0, false, false, false, interrupt_handler
, false},
3035 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler
, false},
3036 {"function_vector", 1, 1, true, false, false, function_vector_handler
,
3038 {0, 0, 0, 0, 0, 0, 0, false}
3041 #undef TARGET_COMP_TYPE_ATTRIBUTES
3042 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3044 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED
,
3045 const_tree type2 ATTRIBUTE_UNUSED
)
3047 /* 0=incompatible 1=compatible 2=warning */
3051 #undef TARGET_INSERT_ATTRIBUTES
3052 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3054 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED
,
3055 tree
* attr_ptr ATTRIBUTE_UNUSED
)
3058 /* See if we need to make #pragma address variables volatile. */
3060 if (TREE_CODE (node
) == VAR_DECL
)
3062 const char *name
= IDENTIFIER_POINTER (DECL_NAME (node
));
3063 if (m32c_get_pragma_address (name
, &addr
))
3065 TREE_THIS_VOLATILE (node
) = true;
3071 struct pragma_traits
: default_hashmap_traits
3073 static hashval_t
hash (const char *str
) { return htab_hash_string (str
); }
3075 equal_keys (const char *a
, const char *b
)
3077 return !strcmp (a
, b
);
3081 /* Hash table of pragma info. */
3082 static GTY(()) hash_map
<const char *, unsigned, pragma_traits
> *pragma_htab
;
3085 m32c_note_pragma_address (const char *varname
, unsigned address
)
3089 = hash_map
<const char *, unsigned, pragma_traits
>::create_ggc (31);
3091 const char *name
= ggc_strdup (varname
);
3092 unsigned int *slot
= &pragma_htab
->get_or_insert (name
);
3097 m32c_get_pragma_address (const char *varname
, unsigned *address
)
3102 unsigned int *slot
= pragma_htab
->get (varname
);
3112 m32c_output_aligned_common (FILE *stream
, tree decl ATTRIBUTE_UNUSED
,
3114 int size
, int align
, int global
)
3118 if (m32c_get_pragma_address (name
, &address
))
3120 /* We never output these as global. */
3121 assemble_name (stream
, name
);
3122 fprintf (stream
, " = 0x%04x\n", address
);
3127 fprintf (stream
, "\t.local\t");
3128 assemble_name (stream
, name
);
3129 fprintf (stream
, "\n");
3131 fprintf (stream
, "\t.comm\t");
3132 assemble_name (stream
, name
);
3133 fprintf (stream
, ",%u,%u\n", size
, align
/ BITS_PER_UNIT
);
3138 /* This is a list of legal subregs of hard regs. */
3139 static const struct {
3140 unsigned char outer_mode_size
;
3141 unsigned char inner_mode_size
;
3142 unsigned char byte_mask
;
3143 unsigned char legal_when
;
3145 } legal_subregs
[] = {
3146 {1, 2, 0x03, 1, R0_REGNO
}, /* r0h r0l */
3147 {1, 2, 0x03, 1, R1_REGNO
}, /* r1h r1l */
3148 {1, 2, 0x01, 1, A0_REGNO
},
3149 {1, 2, 0x01, 1, A1_REGNO
},
3151 {1, 4, 0x01, 1, A0_REGNO
},
3152 {1, 4, 0x01, 1, A1_REGNO
},
3154 {2, 4, 0x05, 1, R0_REGNO
}, /* r2 r0 */
3155 {2, 4, 0x05, 1, R1_REGNO
}, /* r3 r1 */
3156 {2, 4, 0x05, 16, A0_REGNO
}, /* a1 a0 */
3157 {2, 4, 0x01, 24, A0_REGNO
}, /* a1 a0 */
3158 {2, 4, 0x01, 24, A1_REGNO
}, /* a1 a0 */
3160 {4, 8, 0x55, 1, R0_REGNO
}, /* r3 r1 r2 r0 */
3163 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3164 support. We also bail on MEMs with illegal addresses. */
3166 m32c_illegal_subreg_p (rtx op
)
3170 machine_mode src_mode
, dest_mode
;
3172 if (GET_CODE (op
) == MEM
3173 && ! m32c_legitimate_address_p (Pmode
, XEXP (op
, 0), false))
3178 if (GET_CODE (op
) != SUBREG
)
3181 dest_mode
= GET_MODE (op
);
3182 offset
= SUBREG_BYTE (op
);
3183 op
= SUBREG_REG (op
);
3184 src_mode
= GET_MODE (op
);
3186 if (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (src_mode
))
3188 if (GET_CODE (op
) != REG
)
3190 if (REGNO (op
) >= MEM0_REGNO
)
3193 offset
= (1 << offset
);
3195 for (i
= 0; i
< ARRAY_SIZE (legal_subregs
); i
++)
3196 if (legal_subregs
[i
].outer_mode_size
== GET_MODE_SIZE (dest_mode
)
3197 && legal_subregs
[i
].regno
== REGNO (op
)
3198 && legal_subregs
[i
].inner_mode_size
== GET_MODE_SIZE (src_mode
)
3199 && legal_subregs
[i
].byte_mask
& offset
)
3201 switch (legal_subregs
[i
].legal_when
)
3218 /* Returns TRUE if we support a move between the first two operands.
3219 At the moment, we just want to discourage mem to mem moves until
3220 after reload, because reload has a hard time with our limited
3221 number of address registers, and we can get into a situation where
3222 we need three of them when we only have two. */
3224 m32c_mov_ok (rtx
* operands
, machine_mode mode ATTRIBUTE_UNUSED
)
3226 rtx op0
= operands
[0];
3227 rtx op1
= operands
[1];
3232 #define DEBUG_MOV_OK 0
3234 fprintf (stderr
, "m32c_mov_ok %s\n", mode_name
[mode
]);
3239 if (GET_CODE (op0
) == SUBREG
)
3240 op0
= XEXP (op0
, 0);
3241 if (GET_CODE (op1
) == SUBREG
)
3242 op1
= XEXP (op1
, 0);
3244 if (GET_CODE (op0
) == MEM
3245 && GET_CODE (op1
) == MEM
3246 && ! reload_completed
)
3249 fprintf (stderr
, " - no, mem to mem\n");
3255 fprintf (stderr
, " - ok\n");
3260 /* Returns TRUE if two consecutive HImode mov instructions, generated
3261 for moving an immediate double data to a double data type variable
3262 location, can be combined into single SImode mov instruction. */
3264 m32c_immd_dbl_mov (rtx
* operands ATTRIBUTE_UNUSED
,
3265 machine_mode mode ATTRIBUTE_UNUSED
)
3267 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3274 /* Subregs are non-orthogonal for us, because our registers are all
3277 m32c_subreg (machine_mode outer
,
3278 rtx x
, machine_mode inner
, int byte
)
3282 /* Converting MEMs to different types that are the same size, we
3283 just rewrite them. */
3284 if (GET_CODE (x
) == SUBREG
3285 && SUBREG_BYTE (x
) == 0
3286 && GET_CODE (SUBREG_REG (x
)) == MEM
3287 && (GET_MODE_SIZE (GET_MODE (x
))
3288 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
3291 x
= gen_rtx_MEM (GET_MODE (x
), XEXP (SUBREG_REG (x
), 0));
3292 MEM_COPY_ATTRIBUTES (x
, SUBREG_REG (oldx
));
3295 /* Push/pop get done as smaller push/pops. */
3296 if (GET_CODE (x
) == MEM
3297 && (GET_CODE (XEXP (x
, 0)) == PRE_DEC
3298 || GET_CODE (XEXP (x
, 0)) == POST_INC
))
3299 return gen_rtx_MEM (outer
, XEXP (x
, 0));
3300 if (GET_CODE (x
) == SUBREG
3301 && GET_CODE (XEXP (x
, 0)) == MEM
3302 && (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PRE_DEC
3303 || GET_CODE (XEXP (XEXP (x
, 0), 0)) == POST_INC
))
3304 return gen_rtx_MEM (outer
, XEXP (XEXP (x
, 0), 0));
3306 if (GET_CODE (x
) != REG
)
3308 rtx r
= simplify_gen_subreg (outer
, x
, inner
, byte
);
3309 if (GET_CODE (r
) == SUBREG
3310 && GET_CODE (x
) == MEM
3311 && MEM_VOLATILE_P (x
))
3313 /* Volatile MEMs don't get simplified, but we need them to
3314 be. We are little endian, so the subreg byte is the
3316 r
= adjust_address_nv (x
, outer
, byte
);
3322 if (r
>= FIRST_PSEUDO_REGISTER
|| r
== AP_REGNO
)
3323 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3325 if (IS_MEM_REGNO (r
))
3326 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3328 /* This is where the complexities of our register layout are
3332 else if (outer
== HImode
)
3334 if (r
== R0_REGNO
&& byte
== 2)
3336 else if (r
== R0_REGNO
&& byte
== 4)
3338 else if (r
== R0_REGNO
&& byte
== 6)
3340 else if (r
== R1_REGNO
&& byte
== 2)
3342 else if (r
== A0_REGNO
&& byte
== 2)
3345 else if (outer
== SImode
)
3347 if (r
== R0_REGNO
&& byte
== 0)
3349 else if (r
== R0_REGNO
&& byte
== 4)
3354 fprintf (stderr
, "m32c_subreg %s %s %d\n",
3355 mode_name
[outer
], mode_name
[inner
], byte
);
3359 return gen_rtx_REG (outer
, nr
);
3362 /* Used to emit move instructions. We split some moves,
3363 and avoid mem-mem moves. */
3365 m32c_prepare_move (rtx
* operands
, machine_mode mode
)
3367 if (far_addr_space_p (operands
[0])
3368 && CONSTANT_P (operands
[1]))
3370 operands
[1] = force_reg (GET_MODE (operands
[0]), operands
[1]);
3372 if (TARGET_A16
&& mode
== PSImode
)
3373 return m32c_split_move (operands
, mode
, 1);
3374 if ((GET_CODE (operands
[0]) == MEM
)
3375 && (GET_CODE (XEXP (operands
[0], 0)) == PRE_MODIFY
))
3377 rtx pmv
= XEXP (operands
[0], 0);
3378 rtx dest_reg
= XEXP (pmv
, 0);
3379 rtx dest_mod
= XEXP (pmv
, 1);
3381 emit_insn (gen_rtx_SET (dest_reg
, dest_mod
));
3382 operands
[0] = gen_rtx_MEM (mode
, dest_reg
);
3384 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3385 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
3389 #define DEBUG_SPLIT 0
3391 /* Returns TRUE if the given PSImode move should be split. We split
3392 for all r8c/m16c moves, since it doesn't support them, and for
3393 POP.L as we can only *push* SImode. */
3395 m32c_split_psi_p (rtx
* operands
)
3398 fprintf (stderr
, "\nm32c_split_psi_p\n");
3399 debug_rtx (operands
[0]);
3400 debug_rtx (operands
[1]);
3405 fprintf (stderr
, "yes, A16\n");
3409 if (GET_CODE (operands
[1]) == MEM
3410 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3413 fprintf (stderr
, "yes, pop.l\n");
3418 fprintf (stderr
, "no, default\n");
3423 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3424 (define_expand), 1 if it is not optional (define_insn_and_split),
3425 and 3 for define_split (alternate api). */
3427 m32c_split_move (rtx
* operands
, machine_mode mode
, int split_all
)
3430 int parts
, si
, di
, rev
= 0;
3431 int rv
= 0, opi
= 2;
3432 machine_mode submode
= HImode
;
3433 rtx
*ops
, local_ops
[10];
3435 /* define_split modifies the existing operands, but the other two
3436 emit new insns. OPS is where we store the operand pairs, which
3447 /* Before splitting mem-mem moves, force one operand into a
3449 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3452 fprintf (stderr
, "force_reg...\n");
3453 debug_rtx (operands
[1]);
3455 operands
[1] = force_reg (mode
, operands
[1]);
3457 debug_rtx (operands
[1]);
3464 fprintf (stderr
, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3466 debug_rtx (operands
[0]);
3467 debug_rtx (operands
[1]);
3470 /* Note that split_all is not used to select the api after this
3471 point, so it's safe to set it to 3 even with define_insn. */
3472 /* None of the chips can move SI operands to sp-relative addresses,
3473 so we always split those. */
3474 if (satisfies_constraint_Ss (operands
[0]))
3478 && (far_addr_space_p (operands
[0])
3479 || far_addr_space_p (operands
[1])))
3482 /* We don't need to split these. */
3485 && (mode
== SImode
|| mode
== PSImode
)
3486 && !(GET_CODE (operands
[1]) == MEM
3487 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
))
3490 /* First, enumerate the subregs we'll be dealing with. */
3491 for (si
= 0; si
< parts
; si
++)
3494 m32c_subreg (submode
, operands
[0], mode
,
3495 si
* GET_MODE_SIZE (submode
));
3497 m32c_subreg (submode
, operands
[1], mode
,
3498 si
* GET_MODE_SIZE (submode
));
3501 /* Split pushes by emitting a sequence of smaller pushes. */
3502 if (GET_CODE (d
[0]) == MEM
&& GET_CODE (XEXP (d
[0], 0)) == PRE_DEC
)
3504 for (si
= parts
- 1; si
>= 0; si
--)
3506 ops
[opi
++] = gen_rtx_MEM (submode
,
3507 gen_rtx_PRE_DEC (Pmode
,
3515 /* Likewise for pops. */
3516 else if (GET_CODE (s
[0]) == MEM
&& GET_CODE (XEXP (s
[0], 0)) == POST_INC
)
3518 for (di
= 0; di
< parts
; di
++)
3521 ops
[opi
++] = gen_rtx_MEM (submode
,
3522 gen_rtx_POST_INC (Pmode
,
3530 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3531 for (di
= 0; di
< parts
- 1; di
++)
3532 for (si
= di
+ 1; si
< parts
; si
++)
3533 if (reg_mentioned_p (d
[di
], s
[si
]))
3537 for (si
= 0; si
< parts
; si
++)
3543 for (si
= parts
- 1; si
>= 0; si
--)
3550 /* Now emit any moves we may have accumulated. */
3551 if (rv
&& split_all
!= 3)
3554 for (i
= 2; i
< opi
; i
+= 2)
3555 emit_move_insn (ops
[i
], ops
[i
+ 1]);
3560 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3561 the like. For the R8C they expect one of the addresses to be in
3562 R1L:An so we need to arrange for that. Otherwise, it's just a
3563 matter of picking out the operands we want and emitting the right
3564 pattern for them. All these expanders, which correspond to
3565 patterns in blkmov.md, must return nonzero if they expand the insn,
3566 or zero if they should FAIL. */
3568 /* This is a memset() opcode. All operands are implied, so we need to
3569 arrange for them to be in the right registers. The opcode wants
3570 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3571 the count (HI), and $2 the value (QI). */
3573 m32c_expand_setmemhi(rtx
*operands
)
3575 rtx desta
, count
, val
;
3578 desta
= XEXP (operands
[0], 0);
3579 count
= operands
[1];
3582 desto
= gen_reg_rtx (Pmode
);
3583 counto
= gen_reg_rtx (HImode
);
3585 if (GET_CODE (desta
) != REG
3586 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3587 desta
= copy_to_mode_reg (Pmode
, desta
);
3589 /* This looks like an arbitrary restriction, but this is by far the
3590 most common case. For counts 8..14 this actually results in
3591 smaller code with no speed penalty because the half-sized
3592 constant can be loaded with a shorter opcode. */
3593 if (GET_CODE (count
) == CONST_INT
3594 && GET_CODE (val
) == CONST_INT
3595 && ! (INTVAL (count
) & 1)
3596 && (INTVAL (count
) > 1)
3597 && (INTVAL (val
) <= 7 && INTVAL (val
) >= -8))
3599 unsigned v
= INTVAL (val
) & 0xff;
3601 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3602 val
= copy_to_mode_reg (HImode
, GEN_INT (v
));
3604 emit_insn (gen_setmemhi_whi_op (desto
, counto
, val
, desta
, count
));
3606 emit_insn (gen_setmemhi_wpsi_op (desto
, counto
, val
, desta
, count
));
3610 /* This is the generalized memset() case. */
3611 if (GET_CODE (val
) != REG
3612 || REGNO (val
) < FIRST_PSEUDO_REGISTER
)
3613 val
= copy_to_mode_reg (QImode
, val
);
3615 if (GET_CODE (count
) != REG
3616 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3617 count
= copy_to_mode_reg (HImode
, count
);
3620 emit_insn (gen_setmemhi_bhi_op (desto
, counto
, val
, desta
, count
));
3622 emit_insn (gen_setmemhi_bpsi_op (desto
, counto
, val
, desta
, count
));
3627 /* This is a memcpy() opcode. All operands are implied, so we need to
3628 arrange for them to be in the right registers. The opcode wants
3629 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3630 is the source (MEM:BLK), and $2 the count (HI). */
3632 m32c_expand_movmemhi(rtx
*operands
)
3634 rtx desta
, srca
, count
;
3635 rtx desto
, srco
, counto
;
3637 desta
= XEXP (operands
[0], 0);
3638 srca
= XEXP (operands
[1], 0);
3639 count
= operands
[2];
3641 desto
= gen_reg_rtx (Pmode
);
3642 srco
= gen_reg_rtx (Pmode
);
3643 counto
= gen_reg_rtx (HImode
);
3645 if (GET_CODE (desta
) != REG
3646 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3647 desta
= copy_to_mode_reg (Pmode
, desta
);
3649 if (GET_CODE (srca
) != REG
3650 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3651 srca
= copy_to_mode_reg (Pmode
, srca
);
3653 /* Similar to setmem, but we don't need to check the value. */
3654 if (GET_CODE (count
) == CONST_INT
3655 && ! (INTVAL (count
) & 1)
3656 && (INTVAL (count
) > 1))
3658 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3660 emit_insn (gen_movmemhi_whi_op (desto
, srco
, counto
, desta
, srca
, count
));
3662 emit_insn (gen_movmemhi_wpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3666 /* This is the generalized memset() case. */
3667 if (GET_CODE (count
) != REG
3668 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3669 count
= copy_to_mode_reg (HImode
, count
);
3672 emit_insn (gen_movmemhi_bhi_op (desto
, srco
, counto
, desta
, srca
, count
));
3674 emit_insn (gen_movmemhi_bpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3679 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3680 the copy, which should point to the NUL at the end of the string,
3681 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3682 Since our opcode leaves the destination pointing *after* the NUL,
3683 we must emit an adjustment. */
3685 m32c_expand_movstr(rtx
*operands
)
3690 desta
= XEXP (operands
[1], 0);
3691 srca
= XEXP (operands
[2], 0);
3693 desto
= gen_reg_rtx (Pmode
);
3694 srco
= gen_reg_rtx (Pmode
);
3696 if (GET_CODE (desta
) != REG
3697 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3698 desta
= copy_to_mode_reg (Pmode
, desta
);
3700 if (GET_CODE (srca
) != REG
3701 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3702 srca
= copy_to_mode_reg (Pmode
, srca
);
3704 emit_insn (gen_movstr_op (desto
, srco
, desta
, srca
));
3705 /* desto ends up being a1, which allows this type of add through MOVA. */
3706 emit_insn (gen_addpsi3 (operands
[0], desto
, GEN_INT (-1)));
3711 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3712 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3713 $2 is the other (MEM:BLK). We must do the comparison, and then
3714 convert the flags to a signed integer result. */
3716 m32c_expand_cmpstr(rtx
*operands
)
3720 src1a
= XEXP (operands
[1], 0);
3721 src2a
= XEXP (operands
[2], 0);
3723 if (GET_CODE (src1a
) != REG
3724 || REGNO (src1a
) < FIRST_PSEUDO_REGISTER
)
3725 src1a
= copy_to_mode_reg (Pmode
, src1a
);
3727 if (GET_CODE (src2a
) != REG
3728 || REGNO (src2a
) < FIRST_PSEUDO_REGISTER
)
3729 src2a
= copy_to_mode_reg (Pmode
, src2a
);
3731 emit_insn (gen_cmpstrhi_op (src1a
, src2a
, src1a
, src2a
));
3732 emit_insn (gen_cond_to_int (operands
[0]));
3738 typedef rtx (*shift_gen_func
)(rtx
, rtx
, rtx
);
3740 static shift_gen_func
3741 shift_gen_func_for (int mode
, int code
)
3743 #define GFF(m,c,f) if (mode == m && code == c) return f
3744 GFF(QImode
, ASHIFT
, gen_ashlqi3_i
);
3745 GFF(QImode
, ASHIFTRT
, gen_ashrqi3_i
);
3746 GFF(QImode
, LSHIFTRT
, gen_lshrqi3_i
);
3747 GFF(HImode
, ASHIFT
, gen_ashlhi3_i
);
3748 GFF(HImode
, ASHIFTRT
, gen_ashrhi3_i
);
3749 GFF(HImode
, LSHIFTRT
, gen_lshrhi3_i
);
3750 GFF(PSImode
, ASHIFT
, gen_ashlpsi3_i
);
3751 GFF(PSImode
, ASHIFTRT
, gen_ashrpsi3_i
);
3752 GFF(PSImode
, LSHIFTRT
, gen_lshrpsi3_i
);
3753 GFF(SImode
, ASHIFT
, TARGET_A16
? gen_ashlsi3_16
: gen_ashlsi3_24
);
3754 GFF(SImode
, ASHIFTRT
, TARGET_A16
? gen_ashrsi3_16
: gen_ashrsi3_24
);
3755 GFF(SImode
, LSHIFTRT
, TARGET_A16
? gen_lshrsi3_16
: gen_lshrsi3_24
);
3760 /* The m32c only has one shift, but it takes a signed count. GCC
3761 doesn't want this, so we fake it by negating any shift count when
3762 we're pretending to shift the other way. Also, the shift count is
3763 limited to -8..8. It's slightly better to use two shifts for 9..15
3764 than to load the count into r1h, so we do that too. */
3766 m32c_prepare_shift (rtx
* operands
, int scale
, int shift_code
)
3768 machine_mode mode
= GET_MODE (operands
[0]);
3769 shift_gen_func func
= shift_gen_func_for (mode
, shift_code
);
3772 if (GET_CODE (operands
[2]) == CONST_INT
)
3774 int maxc
= TARGET_A24
&& (mode
== PSImode
|| mode
== SImode
) ? 32 : 8;
3775 int count
= INTVAL (operands
[2]) * scale
;
3777 while (count
> maxc
)
3779 temp
= gen_reg_rtx (mode
);
3780 emit_insn (func (temp
, operands
[1], GEN_INT (maxc
)));
3784 while (count
< -maxc
)
3786 temp
= gen_reg_rtx (mode
);
3787 emit_insn (func (temp
, operands
[1], GEN_INT (-maxc
)));
3791 emit_insn (func (operands
[0], operands
[1], GEN_INT (count
)));
3795 temp
= gen_reg_rtx (QImode
);
3797 /* The pattern has a NEG that corresponds to this. */
3798 emit_move_insn (temp
, gen_rtx_NEG (QImode
, operands
[2]));
3799 else if (TARGET_A16
&& mode
== SImode
)
3800 /* We do this because the code below may modify this, we don't
3801 want to modify the origin of this value. */
3802 emit_move_insn (temp
, operands
[2]);
3804 /* We'll only use it for the shift, no point emitting a move. */
3807 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 4)
3809 /* The m16c has a limit of -16..16 for SI shifts, even when the
3810 shift count is in a register. Since there are so many targets
3811 of these shifts, it's better to expand the RTL here than to
3812 call a helper function.
3814 The resulting code looks something like this:
3826 We take advantage of the fact that "negative" shifts are
3827 undefined to skip one of the comparisons. */
3833 emit_move_insn (operands
[0], operands
[1]);
3836 label
= gen_label_rtx ();
3837 LABEL_NUSES (label
) ++;
3839 tempvar
= gen_reg_rtx (mode
);
3841 if (shift_code
== ASHIFT
)
3843 /* This is a left shift. We only need check positive counts. */
3844 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode
, 0, 0),
3845 count
, GEN_INT (16), label
));
3846 emit_insn (func (tempvar
, operands
[0], GEN_INT (8)));
3847 emit_insn (func (operands
[0], tempvar
, GEN_INT (8)));
3848 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (-16)));
3849 emit_label_after (label
, insn
);
3853 /* This is a right shift. We only need check negative counts. */
3854 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode
, 0, 0),
3855 count
, GEN_INT (-16), label
));
3856 emit_insn (func (tempvar
, operands
[0], GEN_INT (-8)));
3857 emit_insn (func (operands
[0], tempvar
, GEN_INT (-8)));
3858 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (16)));
3859 emit_label_after (label
, insn
);
3861 operands
[1] = operands
[0];
3862 emit_insn (func (operands
[0], operands
[0], count
));
3870 /* The m32c has a limited range of operations that work on PSImode
3871 values; we have to expand to SI, do the math, and truncate back to
3872 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3875 m32c_expand_neg_mulpsi3 (rtx
* operands
)
3877 /* operands: a = b * i */
3878 rtx temp1
; /* b as SI */
3879 rtx scale
/* i as SI */;
3880 rtx temp2
; /* a*b as SI */
3882 temp1
= gen_reg_rtx (SImode
);
3883 temp2
= gen_reg_rtx (SImode
);
3884 if (GET_CODE (operands
[2]) != CONST_INT
)
3886 scale
= gen_reg_rtx (SImode
);
3887 emit_insn (gen_zero_extendpsisi2 (scale
, operands
[2]));
3890 scale
= copy_to_mode_reg (SImode
, operands
[2]);
3892 emit_insn (gen_zero_extendpsisi2 (temp1
, operands
[1]));
3893 temp2
= expand_simple_binop (SImode
, MULT
, temp1
, scale
, temp2
, 1, OPTAB_LIB
);
3894 emit_insn (gen_truncsipsi2 (operands
[0], temp2
));
3897 /* Pattern Output Functions */
3900 m32c_expand_movcc (rtx
*operands
)
3902 rtx rel
= operands
[1];
3904 if (GET_CODE (rel
) != EQ
&& GET_CODE (rel
) != NE
)
3906 if (GET_CODE (operands
[2]) != CONST_INT
3907 || GET_CODE (operands
[3]) != CONST_INT
)
3909 if (GET_CODE (rel
) == NE
)
3911 rtx tmp
= operands
[2];
3912 operands
[2] = operands
[3];
3914 rel
= gen_rtx_EQ (GET_MODE (rel
), XEXP (rel
, 0), XEXP (rel
, 1));
3917 emit_move_insn (operands
[0],
3918 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
3925 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3927 m32c_expand_insv (rtx
*operands
)
3932 if (INTVAL (operands
[1]) != 1)
3935 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3936 if (GET_CODE (operands
[3]) != CONST_INT
)
3938 if (INTVAL (operands
[3]) != 0
3939 && INTVAL (operands
[3]) != 1
3940 && INTVAL (operands
[3]) != -1)
3943 mask
= 1 << INTVAL (operands
[2]);
3946 if (GET_CODE (op0
) == SUBREG
3947 && SUBREG_BYTE (op0
) == 0)
3949 rtx sub
= SUBREG_REG (op0
);
3950 if (GET_MODE (sub
) == HImode
|| GET_MODE (sub
) == QImode
)
3954 if (!can_create_pseudo_p ()
3955 || (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
)))
3959 src0
= gen_reg_rtx (GET_MODE (op0
));
3960 emit_move_insn (src0
, op0
);
3963 if (GET_MODE (op0
) == HImode
3964 && INTVAL (operands
[2]) >= 8
3965 && GET_CODE (op0
) == MEM
)
3967 /* We are little endian. */
3968 rtx new_mem
= gen_rtx_MEM (QImode
, plus_constant (Pmode
,
3970 MEM_COPY_ATTRIBUTES (new_mem
, op0
);
3974 /* First, we generate a mask with the correct polarity. If we are
3975 storing a zero, we want an AND mask, so invert it. */
3976 if (INTVAL (operands
[3]) == 0)
3978 /* Storing a zero, use an AND mask */
3979 if (GET_MODE (op0
) == HImode
)
3984 /* Now we need to properly sign-extend the mask in case we need to
3985 fall back to an AND or OR opcode. */
3986 if (GET_MODE (op0
) == HImode
)
3997 switch ( (INTVAL (operands
[3]) ? 4 : 0)
3998 + ((GET_MODE (op0
) == HImode
) ? 2 : 0)
3999 + (TARGET_A24
? 1 : 0))
4001 case 0: p
= gen_andqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
4002 case 1: p
= gen_andqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
4003 case 2: p
= gen_andhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
4004 case 3: p
= gen_andhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
4005 case 4: p
= gen_iorqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
4006 case 5: p
= gen_iorqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
4007 case 6: p
= gen_iorhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
4008 case 7: p
= gen_iorhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
4009 default: p
= NULL_RTX
; break; /* Not reached, but silences a warning. */
4017 m32c_scc_pattern(rtx
*operands
, RTX_CODE code
)
4019 static char buf
[30];
4020 if (GET_CODE (operands
[0]) == REG
4021 && REGNO (operands
[0]) == R0_REGNO
)
4024 return "stzx\t#1,#0,r0l";
4026 return "stzx\t#0,#1,r0l";
4028 sprintf(buf
, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code
));
4032 /* Encode symbol attributes of a SYMBOL_REF into its
4033 SYMBOL_REF_FLAGS. */
4035 m32c_encode_section_info (tree decl
, rtx rtl
, int first
)
4037 int extra_flags
= 0;
4039 default_encode_section_info (decl
, rtl
, first
);
4040 if (TREE_CODE (decl
) == FUNCTION_DECL
4041 && m32c_special_page_vector_p (decl
))
4043 extra_flags
= SYMBOL_FLAG_FUNCVEC_FUNCTION
;
4046 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= extra_flags
;
4049 /* Returns TRUE if the current function is a leaf, and thus we can
4050 determine which registers an interrupt function really needs to
4051 save. The logic below is mostly about finding the insn sequence
4052 that's the function, versus any sequence that might be open for the
4055 m32c_leaf_function_p (void)
4059 push_topmost_sequence ();
4060 rv
= leaf_function_p ();
4061 pop_topmost_sequence ();
4065 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4066 opcodes. If the function doesn't need the frame base or stack
4067 pointer, it can use the simpler RTS opcode. */
4069 m32c_function_needs_enter (void)
4072 rtx sp
= gen_rtx_REG (Pmode
, SP_REGNO
);
4073 rtx fb
= gen_rtx_REG (Pmode
, FB_REGNO
);
4075 for (insn
= get_topmost_sequence ()->first
; insn
; insn
= NEXT_INSN (insn
))
4076 if (NONDEBUG_INSN_P (insn
))
4078 if (reg_mentioned_p (sp
, insn
))
4080 if (reg_mentioned_p (fb
, insn
))
4086 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4087 frame-related. Return PAR.
4089 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4090 PARALLEL rtx other than the first if they do not have the
4091 FRAME_RELATED flag set on them. So this function is handy for
4092 marking up 'enter' instructions. */
4094 m32c_all_frame_related (rtx par
)
4096 int len
= XVECLEN (par
, 0);
4099 for (i
= 0; i
< len
; i
++)
4100 F (XVECEXP (par
, 0, i
));
4105 /* Emits the prologue. See the frame layout comment earlier in this
4106 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4107 that we manually update sp. */
4109 m32c_emit_prologue (void)
4111 int frame_size
, extra_frame_size
= 0, reg_save_size
;
4112 int complex_prologue
= 0;
4114 cfun
->machine
->is_leaf
= m32c_leaf_function_p ();
4115 if (interrupt_p (cfun
->decl
))
4117 cfun
->machine
->is_interrupt
= 1;
4118 complex_prologue
= 1;
4120 else if (bank_switch_p (cfun
->decl
))
4121 warning (OPT_Wattributes
,
4122 "%<bank_switch%> has no effect on non-interrupt functions");
4124 reg_save_size
= m32c_pushm_popm (PP_justcount
);
4126 if (interrupt_p (cfun
->decl
))
4128 if (bank_switch_p (cfun
->decl
))
4129 emit_insn (gen_fset_b ());
4130 else if (cfun
->machine
->intr_pushm
)
4131 emit_insn (gen_pushm (GEN_INT (cfun
->machine
->intr_pushm
)));
4135 m32c_initial_elimination_offset (FB_REGNO
, SP_REGNO
) - reg_save_size
;
4137 && !m32c_function_needs_enter ())
4138 cfun
->machine
->use_rts
= 1;
4140 if (frame_size
> 254)
4142 extra_frame_size
= frame_size
- 254;
4145 if (cfun
->machine
->use_rts
== 0)
4146 F (emit_insn (m32c_all_frame_related
4148 ? gen_prologue_enter_16 (GEN_INT (frame_size
+ 2))
4149 : gen_prologue_enter_24 (GEN_INT (frame_size
+ 4)))));
4151 if (extra_frame_size
)
4153 complex_prologue
= 1;
4155 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode
, SP_REGNO
),
4156 gen_rtx_REG (HImode
, SP_REGNO
),
4157 GEN_INT (-extra_frame_size
))));
4159 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode
, SP_REGNO
),
4160 gen_rtx_REG (PSImode
, SP_REGNO
),
4161 GEN_INT (-extra_frame_size
))));
4164 complex_prologue
+= m32c_pushm_popm (PP_pushm
);
4166 /* This just emits a comment into the .s file for debugging. */
4167 if (complex_prologue
)
4168 emit_insn (gen_prologue_end ());
4171 /* Likewise, for the epilogue. The only exception is that, for
4172 interrupts, we must manually unwind the frame as the REIT opcode
4175 m32c_emit_epilogue (void)
4177 int popm_count
= m32c_pushm_popm (PP_justcount
);
4179 /* This just emits a comment into the .s file for debugging. */
4180 if (popm_count
> 0 || cfun
->machine
->is_interrupt
)
4181 emit_insn (gen_epilogue_start ());
4184 m32c_pushm_popm (PP_popm
);
4186 if (cfun
->machine
->is_interrupt
)
4188 machine_mode spmode
= TARGET_A16
? HImode
: PSImode
;
4190 /* REIT clears B flag and restores $fp for us, but we still
4191 have to fix up the stack. USE_RTS just means we didn't
4193 if (!cfun
->machine
->use_rts
)
4195 emit_move_insn (gen_rtx_REG (spmode
, A0_REGNO
),
4196 gen_rtx_REG (spmode
, FP_REGNO
));
4197 emit_move_insn (gen_rtx_REG (spmode
, SP_REGNO
),
4198 gen_rtx_REG (spmode
, A0_REGNO
));
4199 /* We can't just add this to the POPM because it would be in
4200 the wrong order, and wouldn't fix the stack if we're bank
4203 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, FP_REGNO
)));
4205 emit_insn (gen_poppsi (gen_rtx_REG (PSImode
, FP_REGNO
)));
4207 if (!bank_switch_p (cfun
->decl
) && cfun
->machine
->intr_pushm
)
4208 emit_insn (gen_popm (GEN_INT (cfun
->machine
->intr_pushm
)));
4210 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4211 generated only for M32C/M32CM targets (generate the REIT
4212 instruction otherwise). */
4213 if (fast_interrupt_p (cfun
->decl
))
4215 /* Check if fast_attribute is set for M32C or M32CM. */
4218 emit_jump_insn (gen_epilogue_freit ());
4220 /* If fast_interrupt attribute is set for an R8C or M16C
4221 target ignore this attribute and generated REIT
4225 warning (OPT_Wattributes
,
4226 "%<fast_interrupt%> attribute directive ignored");
4227 emit_jump_insn (gen_epilogue_reit_16 ());
4230 else if (TARGET_A16
)
4231 emit_jump_insn (gen_epilogue_reit_16 ());
4233 emit_jump_insn (gen_epilogue_reit_24 ());
4235 else if (cfun
->machine
->use_rts
)
4236 emit_jump_insn (gen_epilogue_rts ());
4237 else if (TARGET_A16
)
4238 emit_jump_insn (gen_epilogue_exitd_16 ());
4240 emit_jump_insn (gen_epilogue_exitd_24 ());
4244 m32c_emit_eh_epilogue (rtx ret_addr
)
4246 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4247 return to. We have to fudge the stack, pop everything, pop SP
4248 (fudged), and return (fudged). This is actually easier to do in
4249 assembler, so punt to libgcc. */
4250 emit_jump_insn (gen_eh_epilogue (ret_addr
, cfun
->machine
->eh_stack_adjust
));
4251 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4254 /* Indicate which flags must be properly set for a given conditional. */
4256 flags_needed_for_conditional (rtx cond
)
4258 switch (GET_CODE (cond
))
4282 /* Returns true if a compare insn is redundant because it would only
4283 set flags that are already set correctly. */
4285 m32c_compare_redundant (rtx_insn
*cmp
, rtx
*operands
)
4300 fprintf(stderr
, "\n\033[32mm32c_compare_redundant\033[0m\n");
4304 fprintf(stderr
, "operands[%d] = ", i
);
4305 debug_rtx(operands
[i
]);
4309 next
= next_nonnote_insn (cmp
);
4310 if (!next
|| !INSN_P (next
))
4313 fprintf(stderr
, "compare not followed by insn\n");
4318 if (GET_CODE (PATTERN (next
)) == SET
4319 && GET_CODE (XEXP ( PATTERN (next
), 1)) == IF_THEN_ELSE
)
4321 next
= XEXP (XEXP (PATTERN (next
), 1), 0);
4323 else if (GET_CODE (PATTERN (next
)) == SET
)
4325 /* If this is a conditional, flags_needed will be something
4326 other than FLAGS_N, which we test below. */
4327 next
= XEXP (PATTERN (next
), 1);
4332 fprintf(stderr
, "compare not followed by conditional\n");
4338 fprintf(stderr
, "conditional is: ");
4342 flags_needed
= flags_needed_for_conditional (next
);
4343 if (flags_needed
== FLAGS_N
)
4346 fprintf(stderr
, "compare not followed by conditional\n");
4352 /* Compare doesn't set overflow and carry the same way that
4353 arithmetic instructions do, so we can't replace those. */
4354 if (flags_needed
& FLAGS_OC
)
4359 prev
= prev_nonnote_insn (prev
);
4363 fprintf(stderr
, "No previous insn.\n");
4370 fprintf(stderr
, "Previous insn is a non-insn.\n");
4374 pp
= PATTERN (prev
);
4375 if (GET_CODE (pp
) != SET
)
4378 fprintf(stderr
, "Previous insn is not a SET.\n");
4382 pflags
= get_attr_flags (prev
);
4384 /* Looking up attributes of previous insns corrupted the recog
4386 INSN_UID (cmp
) = -1;
4387 recog (PATTERN (cmp
), cmp
, 0);
4389 if (pflags
== FLAGS_N
4390 && reg_mentioned_p (op0
, pp
))
4393 fprintf(stderr
, "intermediate non-flags insn uses op:\n");
4399 /* Check for comparisons against memory - between volatiles and
4400 aliases, we just can't risk this one. */
4401 if (GET_CODE (operands
[0]) == MEM
4402 || GET_CODE (operands
[0]) == MEM
)
4405 fprintf(stderr
, "comparisons with memory:\n");
4411 /* Check for PREV changing a register that's used to compute a
4412 value in CMP, even if it doesn't otherwise change flags. */
4413 if (GET_CODE (operands
[0]) == REG
4414 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[0]))
4417 fprintf(stderr
, "sub-value affected, op0:\n");
4422 if (GET_CODE (operands
[1]) == REG
4423 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[1]))
4426 fprintf(stderr
, "sub-value affected, op1:\n");
4432 } while (pflags
== FLAGS_N
);
4434 fprintf(stderr
, "previous flag-setting insn:\n");
4439 if (GET_CODE (pp
) == SET
4440 && GET_CODE (XEXP (pp
, 0)) == REG
4441 && REGNO (XEXP (pp
, 0)) == FLG_REGNO
4442 && GET_CODE (XEXP (pp
, 1)) == COMPARE
)
4444 /* Adjacent cbranches must have the same operands to be
4446 rtx pop0
= XEXP (XEXP (pp
, 1), 0);
4447 rtx pop1
= XEXP (XEXP (pp
, 1), 1);
4449 fprintf(stderr
, "adjacent cbranches\n");
4453 if (rtx_equal_p (op0
, pop0
)
4454 && rtx_equal_p (op1
, pop1
))
4457 fprintf(stderr
, "prev cmp not same\n");
4462 /* Else the previous insn must be a SET, with either the source or
4463 dest equal to operands[0], and operands[1] must be zero. */
4465 if (!rtx_equal_p (op1
, const0_rtx
))
4468 fprintf(stderr
, "operands[1] not const0_rtx\n");
4472 if (GET_CODE (pp
) != SET
)
4475 fprintf (stderr
, "pp not set\n");
4479 if (!rtx_equal_p (op0
, SET_SRC (pp
))
4480 && !rtx_equal_p (op0
, SET_DEST (pp
)))
4483 fprintf(stderr
, "operands[0] not found in set\n");
4489 fprintf(stderr
, "cmp flags %x prev flags %x\n", flags_needed
, pflags
);
4491 if ((pflags
& flags_needed
) == flags_needed
)
4497 /* Return the pattern for a compare. This will be commented out if
4498 the compare is redundant, else a normal pattern is returned. Thus,
4499 the assembler output says where the compare would have been. */
4501 m32c_output_compare (rtx_insn
*insn
, rtx
*operands
)
4503 static char templ
[] = ";cmp.b\t%1,%0";
4506 templ
[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands
[0]))];
4507 if (m32c_compare_redundant (insn
, operands
))
4510 fprintf(stderr
, "cbranch: cmp not needed\n");
4516 fprintf(stderr
, "cbranch: cmp needed: `%s'\n", templ
+ 1);
4521 #undef TARGET_ENCODE_SECTION_INFO
4522 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4524 /* If the frame pointer isn't used, we detect it manually. But the
4525 stack pointer doesn't have as flexible addressing as the frame
4526 pointer, so we always assume we have it. */
4528 #undef TARGET_FRAME_POINTER_REQUIRED
4529 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4531 /* The Global `targetm' Variable. */
4533 struct gcc_target targetm
= TARGET_INITIALIZER
;
4535 #include "gt-m32c.h"