1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
32 #include "insn-attr.h"
36 #include "diagnostic-core.h"
39 #include "stor-layout.h"
43 #include "insn-codes.h"
54 #include "target-def.h"
56 #include "langhooks.h"
57 #include "hash-table.h"
59 #include "dominance.h"
65 #include "cfgcleanup.h"
66 #include "basic-block.h"
67 #include "tree-ssa-alias.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
71 #include "gimple-expr.h"
75 #include "tm-constrs.h"
80 /* Used by m32c_pushm_popm. */
88 static bool m32c_function_needs_enter (void);
89 static tree
interrupt_decl_handler (tree
*, tree
, tree
, int, bool *);
90 static tree
interrupt_type_handler (tree
*, tree
, tree
, int, bool *);
91 static tree
function_vector_handler (tree
*, tree
, tree
, int, bool *);
92 static int interrupt_p (tree node
);
93 static int bank_switch_p (tree node
);
94 static int fast_interrupt_p (tree node
);
95 static int interrupt_p (tree node
);
96 static bool m32c_asm_integer (rtx
, unsigned int, int);
97 static int m32c_comp_type_attributes (const_tree
, const_tree
);
98 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
99 static struct machine_function
*m32c_init_machine_status (void);
100 static void m32c_insert_attributes (tree
, tree
*);
101 static bool m32c_legitimate_address_p (machine_mode
, rtx
, bool);
102 static bool m32c_addr_space_legitimate_address_p (machine_mode
, rtx
, bool, addr_space_t
);
103 static rtx
m32c_function_arg (cumulative_args_t
, machine_mode
,
105 static bool m32c_pass_by_reference (cumulative_args_t
, machine_mode
,
107 static void m32c_function_arg_advance (cumulative_args_t
, machine_mode
,
109 static unsigned int m32c_function_arg_boundary (machine_mode
, const_tree
);
110 static int m32c_pushm_popm (Push_Pop_Type
);
111 static bool m32c_strict_argument_naming (cumulative_args_t
);
112 static rtx
m32c_struct_value_rtx (tree
, int);
113 static rtx
m32c_subreg (machine_mode
, rtx
, machine_mode
, int);
114 static int need_to_save (int);
115 static rtx
m32c_function_value (const_tree
, const_tree
, bool);
116 static rtx
m32c_libcall_value (machine_mode
, const_rtx
);
118 /* Returns true if an address is specified, else false. */
119 static bool m32c_get_pragma_address (const char *varname
, unsigned *addr
);
121 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
123 #define streq(a,b) (strcmp ((a), (b)) == 0)
125 /* Internal support routines */
127 /* Debugging statements are tagged with DEBUG0 only so that they can
128 be easily enabled individually, by replacing the '0' with '1' as
134 /* This is needed by some of the commented-out debug statements
136 static char const *class_names
[LIM_REG_CLASSES
] = REG_CLASS_NAMES
;
138 static int class_contents
[LIM_REG_CLASSES
][1] = REG_CLASS_CONTENTS
;
140 /* These are all to support encode_pattern(). */
141 static char pattern
[30], *patternp
;
142 static GTY(()) rtx patternr
[30];
143 #define RTX_IS(x) (streq (pattern, x))
145 /* Some macros to simplify the logic throughout this file. */
146 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
147 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
149 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
150 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
153 far_addr_space_p (rtx x
)
155 if (GET_CODE (x
) != MEM
)
158 fprintf(stderr
, "\033[35mfar_addr_space: "); debug_rtx(x
);
159 fprintf(stderr
, " = %d\033[0m\n", MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
);
161 return MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
;
164 /* We do most RTX matching by converting the RTX into a string, and
165 using string compares. This vastly simplifies the logic in many of
166 the functions in this file.
168 On exit, pattern[] has the encoded string (use RTX_IS("...") to
169 compare it) and patternr[] has pointers to the nodes in the RTX
170 corresponding to each character in the encoded string. The latter
171 is mostly used by print_operand().
173 Unrecognized patterns have '?' in them; this shows up when the
174 assembler complains about syntax errors.
178 encode_pattern_1 (rtx x
)
182 if (patternp
== pattern
+ sizeof (pattern
) - 2)
188 patternr
[patternp
- pattern
] = x
;
190 switch (GET_CODE (x
))
196 if (GET_MODE_SIZE (GET_MODE (x
)) !=
197 GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))))
199 if (GET_MODE (x
) == PSImode
200 && GET_CODE (XEXP (x
, 0)) == REG
)
202 encode_pattern_1 (XEXP (x
, 0));
207 encode_pattern_1 (XEXP (x
, 0));
212 encode_pattern_1 (XEXP (x
, 0));
217 encode_pattern_1 (XEXP (x
, 0));
221 encode_pattern_1 (XEXP (x
, 0));
222 encode_pattern_1 (XEXP (x
, 1));
226 encode_pattern_1 (XEXP (x
, 0));
230 encode_pattern_1 (XEXP (x
, 0));
234 encode_pattern_1 (XEXP (x
, 0));
235 encode_pattern_1 (XEXP (x
, 1));
239 encode_pattern_1 (XEXP (x
, 0));
256 *patternp
++ = '0' + XCINT (x
, 1, UNSPEC
);
257 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
258 encode_pattern_1 (XVECEXP (x
, 0, i
));
265 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
266 encode_pattern_1 (XVECEXP (x
, 0, i
));
270 encode_pattern_1 (XEXP (x
, 0));
272 encode_pattern_1 (XEXP (x
, 1));
277 fprintf (stderr
, "can't encode pattern %s\n",
278 GET_RTX_NAME (GET_CODE (x
)));
287 encode_pattern (rtx x
)
290 encode_pattern_1 (x
);
294 /* Since register names indicate the mode they're used in, we need a
295 way to determine which name to refer to the register with. Called
296 by print_operand(). */
299 reg_name_with_mode (int regno
, machine_mode mode
)
301 int mlen
= GET_MODE_SIZE (mode
);
302 if (regno
== R0_REGNO
&& mlen
== 1)
304 if (regno
== R0_REGNO
&& (mlen
== 3 || mlen
== 4))
306 if (regno
== R0_REGNO
&& mlen
== 6)
308 if (regno
== R0_REGNO
&& mlen
== 8)
310 if (regno
== R1_REGNO
&& mlen
== 1)
312 if (regno
== R1_REGNO
&& (mlen
== 3 || mlen
== 4))
314 if (regno
== A0_REGNO
&& TARGET_A16
&& (mlen
== 3 || mlen
== 4))
316 return reg_names
[regno
];
319 /* How many bytes a register uses on stack when it's pushed. We need
320 to know this because the push opcode needs to explicitly indicate
321 the size of the register, even though the name of the register
322 already tells it that. Used by m32c_output_reg_{push,pop}, which
323 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
326 reg_push_size (int regno
)
351 /* Given two register classes, find the largest intersection between
352 them. If there is no intersection, return RETURNED_IF_EMPTY
355 reduce_class (reg_class_t original_class
, reg_class_t limiting_class
,
356 reg_class_t returned_if_empty
)
360 reg_class_t best
= NO_REGS
;
361 unsigned int best_size
= 0;
363 if (original_class
== limiting_class
)
364 return original_class
;
366 cc
= reg_class_contents
[original_class
];
367 AND_HARD_REG_SET (cc
, reg_class_contents
[limiting_class
]);
369 for (i
= 0; i
< LIM_REG_CLASSES
; i
++)
371 if (hard_reg_set_subset_p (reg_class_contents
[i
], cc
))
372 if (best_size
< reg_class_size
[i
])
374 best
= (reg_class_t
) i
;
375 best_size
= reg_class_size
[i
];
380 return returned_if_empty
;
384 /* Used by m32c_register_move_cost to determine if a move is
385 impossibly expensive. */
387 class_can_hold_mode (reg_class_t rclass
, machine_mode mode
)
389 /* Cache the results: 0=untested 1=no 2=yes */
390 static char results
[LIM_REG_CLASSES
][MAX_MACHINE_MODE
];
392 if (results
[(int) rclass
][mode
] == 0)
395 results
[rclass
][mode
] = 1;
396 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; r
++)
397 if (in_hard_reg_set_p (reg_class_contents
[(int) rclass
], mode
, r
)
398 && HARD_REGNO_MODE_OK (r
, mode
))
400 results
[rclass
][mode
] = 2;
406 fprintf (stderr
, "class %s can hold %s? %s\n",
407 class_names
[(int) rclass
], mode_name
[mode
],
408 (results
[rclass
][mode
] == 2) ? "yes" : "no");
410 return results
[(int) rclass
][mode
] == 2;
413 /* Run-time Target Specification. */
415 /* Memregs are memory locations that gcc treats like general
416 registers, as there are a limited number of true registers and the
417 m32c families can use memory in most places that registers can be
420 However, since memory accesses are more expensive than registers,
421 we allow the user to limit the number of memregs available, in
422 order to try to persuade gcc to try harder to use real registers.
424 Memregs are provided by lib1funcs.S.
427 int ok_to_change_target_memregs
= TRUE
;
429 /* Implements TARGET_OPTION_OVERRIDE. */
431 #undef TARGET_OPTION_OVERRIDE
432 #define TARGET_OPTION_OVERRIDE m32c_option_override
435 m32c_option_override (void)
437 /* We limit memregs to 0..16, and provide a default. */
438 if (global_options_set
.x_target_memregs
)
440 if (target_memregs
< 0 || target_memregs
> 16)
441 error ("invalid target memregs value '%d'", target_memregs
);
449 /* This target defaults to strict volatile bitfields. */
450 if (flag_strict_volatile_bitfields
< 0 && abi_version_at_least(2))
451 flag_strict_volatile_bitfields
= 1;
453 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
454 This is always worse than an absolute call. */
456 flag_no_function_cse
= 1;
458 /* This wants to put insns between compares and their jumps. */
459 /* FIXME: The right solution is to properly trace the flags register
460 values, but that is too much work for stage 4. */
461 flag_combine_stack_adjustments
= 0;
464 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
465 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
468 m32c_override_options_after_change (void)
471 flag_no_function_cse
= 1;
474 /* Defining data structures for per-function information */
476 /* The usual; we set up our machine_function data. */
477 static struct machine_function
*
478 m32c_init_machine_status (void)
480 return ggc_cleared_alloc
<machine_function
> ();
483 /* Implements INIT_EXPANDERS. We just set up to call the above
486 m32c_init_expanders (void)
488 init_machine_status
= m32c_init_machine_status
;
493 /* Register Basics */
495 /* Basic Characteristics of Registers */
497 /* Whether a mode fits in a register is complex enough to warrant a
506 } nregs_table
[FIRST_PSEUDO_REGISTER
] =
508 { 1, 1, 2, 2, 4 }, /* r0 */
509 { 0, 1, 0, 0, 0 }, /* r2 */
510 { 1, 1, 2, 2, 0 }, /* r1 */
511 { 0, 1, 0, 0, 0 }, /* r3 */
512 { 0, 1, 1, 0, 0 }, /* a0 */
513 { 0, 1, 1, 0, 0 }, /* a1 */
514 { 0, 1, 1, 0, 0 }, /* sb */
515 { 0, 1, 1, 0, 0 }, /* fb */
516 { 0, 1, 1, 0, 0 }, /* sp */
517 { 1, 1, 1, 0, 0 }, /* pc */
518 { 0, 0, 0, 0, 0 }, /* fl */
519 { 1, 1, 1, 0, 0 }, /* ap */
520 { 1, 1, 2, 2, 4 }, /* mem0 */
521 { 1, 1, 2, 2, 4 }, /* mem1 */
522 { 1, 1, 2, 2, 4 }, /* mem2 */
523 { 1, 1, 2, 2, 4 }, /* mem3 */
524 { 1, 1, 2, 2, 4 }, /* mem4 */
525 { 1, 1, 2, 2, 0 }, /* mem5 */
526 { 1, 1, 2, 2, 0 }, /* mem6 */
527 { 1, 1, 0, 0, 0 }, /* mem7 */
530 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
531 of available memregs, and select which registers need to be preserved
532 across calls based on the chip family. */
534 #undef TARGET_CONDITIONAL_REGISTER_USAGE
535 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
537 m32c_conditional_register_usage (void)
541 if (0 <= target_memregs
&& target_memregs
<= 16)
543 /* The command line option is bytes, but our "registers" are
545 for (i
= (target_memregs
+1)/2; i
< 8; i
++)
547 fixed_regs
[MEM0_REGNO
+ i
] = 1;
548 CLEAR_HARD_REG_BIT (reg_class_contents
[MEM_REGS
], MEM0_REGNO
+ i
);
552 /* M32CM and M32C preserve more registers across function calls. */
555 call_used_regs
[R1_REGNO
] = 0;
556 call_used_regs
[R2_REGNO
] = 0;
557 call_used_regs
[R3_REGNO
] = 0;
558 call_used_regs
[A0_REGNO
] = 0;
559 call_used_regs
[A1_REGNO
] = 0;
563 /* How Values Fit in Registers */
565 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
566 different registers are different sizes from each other, *and* may
567 be different sizes in different chip families. */
569 m32c_hard_regno_nregs_1 (int regno
, machine_mode mode
)
571 if (regno
== FLG_REGNO
&& mode
== CCmode
)
573 if (regno
>= FIRST_PSEUDO_REGISTER
)
574 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
576 if (regno
>= MEM0_REGNO
&& regno
<= MEM7_REGNO
)
577 return (GET_MODE_SIZE (mode
) + 1) / 2;
579 if (GET_MODE_SIZE (mode
) <= 1)
580 return nregs_table
[regno
].qi_regs
;
581 if (GET_MODE_SIZE (mode
) <= 2)
582 return nregs_table
[regno
].hi_regs
;
583 if (regno
== A0_REGNO
&& mode
== SImode
&& TARGET_A16
)
585 if ((GET_MODE_SIZE (mode
) <= 3 || mode
== PSImode
) && TARGET_A24
)
586 return nregs_table
[regno
].pi_regs
;
587 if (GET_MODE_SIZE (mode
) <= 4)
588 return nregs_table
[regno
].si_regs
;
589 if (GET_MODE_SIZE (mode
) <= 8)
590 return nregs_table
[regno
].di_regs
;
595 m32c_hard_regno_nregs (int regno
, machine_mode mode
)
597 int rv
= m32c_hard_regno_nregs_1 (regno
, mode
);
601 /* Implements HARD_REGNO_MODE_OK. The above function does the work
602 already; just test its return value. */
604 m32c_hard_regno_ok (int regno
, machine_mode mode
)
606 return m32c_hard_regno_nregs_1 (regno
, mode
) != 0;
609 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
610 registers are all different sizes. However, since most modes are
611 bigger than our registers anyway, it's easier to implement this
612 function that way, leaving QImode as the only unique case. */
614 m32c_modes_tieable_p (machine_mode m1
, machine_mode m2
)
616 if (GET_MODE_SIZE (m1
) == GET_MODE_SIZE (m2
))
620 if (m1
== QImode
|| m2
== QImode
)
627 /* Register Classes */
629 /* Implements REGNO_REG_CLASS. */
631 m32c_regno_reg_class (int regno
)
656 if (IS_MEM_REGNO (regno
))
662 /* Implements REGNO_OK_FOR_BASE_P. */
664 m32c_regno_ok_for_base_p (int regno
)
666 if (regno
== A0_REGNO
667 || regno
== A1_REGNO
|| regno
>= FIRST_PSEUDO_REGISTER
)
672 #define DEBUG_RELOAD 0
674 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
675 registers of the appropriate size. */
677 #undef TARGET_PREFERRED_RELOAD_CLASS
678 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
681 m32c_preferred_reload_class (rtx x
, reg_class_t rclass
)
683 reg_class_t newclass
= rclass
;
686 fprintf (stderr
, "\npreferred_reload_class for %s is ",
687 class_names
[rclass
]);
689 if (rclass
== NO_REGS
)
690 rclass
= GET_MODE (x
) == QImode
? HL_REGS
: R03_REGS
;
692 if (reg_classes_intersect_p (rclass
, CR_REGS
))
694 switch (GET_MODE (x
))
700 /* newclass = HI_REGS; */
705 else if (newclass
== QI_REGS
&& GET_MODE_SIZE (GET_MODE (x
)) > 2)
707 else if (GET_MODE_SIZE (GET_MODE (x
)) > 4
708 && ! reg_class_subset_p (R03_REGS
, rclass
))
711 rclass
= reduce_class (rclass
, newclass
, rclass
);
713 if (GET_MODE (x
) == QImode
)
714 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
717 fprintf (stderr
, "%s\n", class_names
[rclass
]);
720 if (GET_CODE (x
) == MEM
721 && GET_CODE (XEXP (x
, 0)) == PLUS
722 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
723 fprintf (stderr
, "Glorm!\n");
728 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
730 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
731 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
734 m32c_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
736 return m32c_preferred_reload_class (x
, rclass
);
739 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
740 address registers for reloads since they're needed for address
743 m32c_limit_reload_class (machine_mode mode
, int rclass
)
746 fprintf (stderr
, "limit_reload_class for %s: %s ->",
747 mode_name
[mode
], class_names
[rclass
]);
751 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
752 else if (mode
== HImode
)
753 rclass
= reduce_class (rclass
, HI_REGS
, rclass
);
754 else if (mode
== SImode
)
755 rclass
= reduce_class (rclass
, SI_REGS
, rclass
);
757 if (rclass
!= A_REGS
)
758 rclass
= reduce_class (rclass
, DI_REGS
, rclass
);
761 fprintf (stderr
, " %s\n", class_names
[rclass
]);
766 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
767 r0 or r1, as those are the only real QImode registers. CR regs get
768 reloaded through appropriately sized general or address
771 m32c_secondary_reload_class (int rclass
, machine_mode mode
, rtx x
)
773 int cc
= class_contents
[rclass
][0];
775 fprintf (stderr
, "\nsecondary reload class %s %s\n",
776 class_names
[rclass
], mode_name
[mode
]);
780 && GET_CODE (x
) == MEM
&& (cc
& ~class_contents
[R23_REGS
][0]) == 0)
782 if (reg_classes_intersect_p (rclass
, CR_REGS
)
783 && GET_CODE (x
) == REG
784 && REGNO (x
) >= SB_REGNO
&& REGNO (x
) <= SP_REGNO
)
785 return (TARGET_A16
|| mode
== HImode
) ? HI_REGS
: A_REGS
;
789 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
792 #undef TARGET_CLASS_LIKELY_SPILLED_P
793 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
796 m32c_class_likely_spilled_p (reg_class_t regclass
)
798 if (regclass
== A_REGS
)
801 return (reg_class_size
[(int) regclass
] == 1);
804 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
805 documented meaning, to avoid potential inconsistencies with actual
806 class definitions. */
808 #undef TARGET_CLASS_MAX_NREGS
809 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
812 m32c_class_max_nregs (reg_class_t regclass
, machine_mode mode
)
815 unsigned char max
= 0;
817 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
818 if (TEST_HARD_REG_BIT (reg_class_contents
[(int) regclass
], rn
))
820 unsigned char n
= m32c_hard_regno_nregs (rn
, mode
);
827 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
828 QI (r0l, r1l) because the chip doesn't support QI ops on other
829 registers (well, it does on a0/a1 but if we let gcc do that, reload
830 suffers). Otherwise, we allow changes to larger modes. */
832 m32c_cannot_change_mode_class (machine_mode from
,
833 machine_mode to
, int rclass
)
837 fprintf (stderr
, "cannot change from %s to %s in %s\n",
838 mode_name
[from
], mode_name
[to
], class_names
[rclass
]);
841 /* If the larger mode isn't allowed in any of these registers, we
842 can't allow the change. */
843 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
844 if (class_contents
[rclass
][0] & (1 << rn
))
845 if (! m32c_hard_regno_ok (rn
, to
))
849 return (class_contents
[rclass
][0] & 0x1ffa);
851 if (class_contents
[rclass
][0] & 0x0005 /* r0, r1 */
852 && GET_MODE_SIZE (from
) > 1)
854 if (GET_MODE_SIZE (from
) > 2) /* all other regs */
860 /* Helpers for the rest of the file. */
861 /* TRUE if the rtx is a REG rtx for the given register. */
862 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
863 && REGNO (rtx) == regno)
864 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
865 base register in address calculations (hence the "strict"
867 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
868 && (REGNO (rtx) == AP_REGNO \
869 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
871 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
873 /* Implements matching for constraints (see next function too). 'S' is
874 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
875 call return values. */
877 m32c_matches_constraint_p (rtx value
, int constraint
)
879 encode_pattern (value
);
881 switch (constraint
) {
883 return (far_addr_space_p (value
)
885 && A0_OR_PSEUDO (patternr
[1])
886 && GET_MODE (patternr
[1]) == SImode
)
887 || (RTX_IS ("m+^Sri")
888 && A0_OR_PSEUDO (patternr
[4])
889 && GET_MODE (patternr
[4]) == HImode
)
890 || (RTX_IS ("m+^Srs")
891 && A0_OR_PSEUDO (patternr
[4])
892 && GET_MODE (patternr
[4]) == HImode
)
893 || (RTX_IS ("m+^S+ris")
894 && A0_OR_PSEUDO (patternr
[5])
895 && GET_MODE (patternr
[5]) == HImode
)
899 /* This is the common "src/dest" address */
901 if (GET_CODE (value
) == MEM
&& CONSTANT_P (XEXP (value
, 0)))
903 if (RTX_IS ("ms") || RTX_IS ("m+si"))
905 if (RTX_IS ("m++rii"))
907 if (REGNO (patternr
[3]) == FB_REGNO
908 && INTVAL (patternr
[4]) == 0)
913 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
917 if (REGNO (r
) == SP_REGNO
)
919 return m32c_legitimate_address_p (GET_MODE (value
), XEXP (value
, 0), 1);
926 else if (RTX_IS ("m+ri"))
930 return (IS_REG (r
, A0_REGNO
) || IS_REG (r
, A1_REGNO
));
933 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
935 return ((RTX_IS ("mr")
936 && (IS_REG (patternr
[1], SP_REGNO
)))
937 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SP_REGNO
))));
939 return ((RTX_IS ("mr")
940 && (IS_REG (patternr
[1], FB_REGNO
)))
941 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], FB_REGNO
))));
943 return ((RTX_IS ("mr")
944 && (IS_REG (patternr
[1], SB_REGNO
)))
945 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SB_REGNO
))));
947 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
948 return (RTX_IS ("mi")
949 && !(INTVAL (patternr
[1]) & ~0x1fff));
951 return r1h_operand (value
, QImode
);
953 return GET_CODE (value
) == PARALLEL
;
959 /* STACK AND CALLING */
963 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
964 (yes, THREE bytes) onto the stack for the return address, but we
965 don't support pointers bigger than 16 bits on those chips. This
966 will likely wreak havoc with exception unwinding. FIXME. */
968 m32c_return_addr_rtx (int count
)
980 /* It's four bytes */
986 /* FIXME: it's really 3 bytes */
992 gen_rtx_MEM (mode
, plus_constant (Pmode
, gen_rtx_REG (Pmode
, FP_REGNO
),
994 return copy_to_mode_reg (mode
, ra_mem
);
997 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
999 m32c_incoming_return_addr_rtx (void)
1002 return gen_rtx_MEM (PSImode
, gen_rtx_REG (PSImode
, SP_REGNO
));
1005 /* Exception Handling Support */
1007 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1010 m32c_eh_return_data_regno (int n
)
1017 return MEM0_REGNO
+4;
1019 return INVALID_REGNUM
;
1023 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1024 m32c_emit_eh_epilogue. */
1026 m32c_eh_return_stackadj_rtx (void)
1028 if (!cfun
->machine
->eh_stack_adjust
)
1032 sa
= gen_rtx_REG (Pmode
, R0_REGNO
);
1033 cfun
->machine
->eh_stack_adjust
= sa
;
1035 return cfun
->machine
->eh_stack_adjust
;
1038 /* Registers That Address the Stack Frame */
1040 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1041 the original spec called for dwarf numbers to vary with register
1042 width as well, for example, r0l, r0, and r2r0 would each have
1043 different dwarf numbers. GCC doesn't support this, and we don't do
1044 it, and gdb seems to like it this way anyway. */
1046 m32c_dwarf_frame_regnum (int n
)
1072 return DWARF_FRAME_REGISTERS
+ 1;
1076 /* The frame looks like this:
1078 ap -> +------------------------------
1079 | Return address (3 or 4 bytes)
1080 | Saved FB (2 or 4 bytes)
1081 fb -> +------------------------------
1084 | through r0 as needed
1085 sp -> +------------------------------
1088 /* We use this to wrap all emitted insns in the prologue. */
1092 RTX_FRAME_RELATED_P (x
) = 1;
1096 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1097 how much the stack pointer moves for each, for each cpu family. */
1106 /* These are in reverse push (nearest-to-sp) order. */
1107 { R0_REGNO
, 0x80, 2, 2 },
1108 { R1_REGNO
, 0x40, 2, 2 },
1109 { R2_REGNO
, 0x20, 2, 2 },
1110 { R3_REGNO
, 0x10, 2, 2 },
1111 { A0_REGNO
, 0x08, 2, 4 },
1112 { A1_REGNO
, 0x04, 2, 4 },
1113 { SB_REGNO
, 0x02, 2, 4 },
1114 { FB_REGNO
, 0x01, 2, 4 }
1117 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1119 /* Returns TRUE if we need to save/restore the given register. We
1120 save everything for exception handlers, so that any register can be
1121 unwound. For interrupt handlers, we save everything if the handler
1122 calls something else (because we don't know what *that* function
1123 might do), but try to be a bit smarter if the handler is a leaf
1124 function. We always save $a0, though, because we use that in the
1125 epilogue to copy $fb to $sp. */
1127 need_to_save (int regno
)
1129 if (fixed_regs
[regno
])
1131 if (crtl
->calls_eh_return
)
1133 if (regno
== FP_REGNO
)
1135 if (cfun
->machine
->is_interrupt
1136 && (!cfun
->machine
->is_leaf
1137 || (regno
== A0_REGNO
1138 && m32c_function_needs_enter ())
1141 if (df_regs_ever_live_p (regno
)
1142 && (!call_used_regs
[regno
] || cfun
->machine
->is_interrupt
))
1147 /* This function contains all the intelligence about saving and
1148 restoring registers. It always figures out the register save set.
1149 When called with PP_justcount, it merely returns the size of the
1150 save set (for eliminating the frame pointer, for example). When
1151 called with PP_pushm or PP_popm, it emits the appropriate
1152 instructions for saving (pushm) or restoring (popm) the
1155 m32c_pushm_popm (Push_Pop_Type ppt
)
1158 int byte_count
= 0, bytes
;
1160 rtx dwarf_set
[PUSHM_N
];
1162 int nosave_mask
= 0;
1164 if (crtl
->return_rtx
1165 && GET_CODE (crtl
->return_rtx
) == PARALLEL
1166 && !(crtl
->calls_eh_return
|| cfun
->machine
->is_interrupt
))
1168 rtx exp
= XVECEXP (crtl
->return_rtx
, 0, 0);
1169 rtx rv
= XEXP (exp
, 0);
1170 int rv_bytes
= GET_MODE_SIZE (GET_MODE (rv
));
1173 nosave_mask
|= 0x20; /* PSI, SI */
1175 nosave_mask
|= 0xf0; /* DF */
1177 nosave_mask
|= 0x50; /* DI */
1180 for (i
= 0; i
< (int) PUSHM_N
; i
++)
1182 /* Skip if neither register needs saving. */
1183 if (!need_to_save (pushm_info
[i
].reg1
))
1186 if (pushm_info
[i
].bit
& nosave_mask
)
1189 reg_mask
|= pushm_info
[i
].bit
;
1190 bytes
= TARGET_A16
? pushm_info
[i
].a16_bytes
: pushm_info
[i
].a24_bytes
;
1192 if (ppt
== PP_pushm
)
1194 machine_mode mode
= (bytes
== 2) ? HImode
: SImode
;
1197 /* Always use stack_pointer_rtx instead of calling
1198 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1199 that there is a single rtx representing the stack pointer,
1200 namely stack_pointer_rtx, and uses == to recognize it. */
1201 addr
= stack_pointer_rtx
;
1203 if (byte_count
!= 0)
1204 addr
= gen_rtx_PLUS (GET_MODE (addr
), addr
, GEN_INT (byte_count
));
1206 dwarf_set
[n_dwarfs
++] =
1207 gen_rtx_SET (VOIDmode
,
1208 gen_rtx_MEM (mode
, addr
),
1209 gen_rtx_REG (mode
, pushm_info
[i
].reg1
));
1210 F (dwarf_set
[n_dwarfs
- 1]);
1213 byte_count
+= bytes
;
1216 if (cfun
->machine
->is_interrupt
)
1218 cfun
->machine
->intr_pushm
= reg_mask
& 0xfe;
1223 if (cfun
->machine
->is_interrupt
)
1224 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1225 if (need_to_save (i
))
1228 cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
] = 1;
1231 if (ppt
== PP_pushm
&& byte_count
)
1233 rtx note
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (n_dwarfs
+ 1));
1238 XVECEXP (note
, 0, 0)
1239 = gen_rtx_SET (VOIDmode
,
1241 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx
),
1243 GEN_INT (-byte_count
)));
1244 F (XVECEXP (note
, 0, 0));
1246 for (i
= 0; i
< n_dwarfs
; i
++)
1247 XVECEXP (note
, 0, i
+ 1) = dwarf_set
[i
];
1249 pushm
= F (emit_insn (gen_pushm (GEN_INT (reg_mask
))));
1251 add_reg_note (pushm
, REG_FRAME_RELATED_EXPR
, note
);
1254 if (cfun
->machine
->is_interrupt
)
1255 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1256 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1259 pushm
= emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode
, i
)));
1261 pushm
= emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode
, i
)));
1265 if (ppt
== PP_popm
&& byte_count
)
1267 if (cfun
->machine
->is_interrupt
)
1268 for (i
= MEM7_REGNO
; i
>= MEM0_REGNO
; i
--)
1269 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1272 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, i
)));
1274 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode
, i
)));
1277 emit_insn (gen_popm (GEN_INT (reg_mask
)));
1283 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1284 diagrams our call frame. */
1286 m32c_initial_elimination_offset (int from
, int to
)
1290 if (from
== AP_REGNO
)
1300 ofs
+= m32c_pushm_popm (PP_justcount
);
1301 ofs
+= get_frame_size ();
1304 /* Account for push rounding. */
1306 ofs
= (ofs
+ 1) & ~1;
1308 fprintf (stderr
, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from
,
1314 /* Passing Function Arguments on the Stack */
1316 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1317 M32C has word stacks. */
1319 m32c_push_rounding (int n
)
1321 if (TARGET_R8C
|| TARGET_M16C
)
1323 return (n
+ 1) & ~1;
1326 /* Passing Arguments in Registers */
1328 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1329 registers, partly on stack. If our function returns a struct, a
1330 pointer to a buffer for it is at the top of the stack (last thing
1331 pushed). The first few real arguments may be in registers as
1334 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1335 arg2 in r2 if it's HI (else pushed on stack)
1337 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1340 Structs are not passed in registers, even if they fit. Only
1341 integer and pointer types are passed in registers.
1343 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1345 #undef TARGET_FUNCTION_ARG
1346 #define TARGET_FUNCTION_ARG m32c_function_arg
1348 m32c_function_arg (cumulative_args_t ca_v
,
1349 machine_mode mode
, const_tree type
, bool named
)
1351 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1353 /* Can return a reg, parallel, or 0 for stack */
1356 fprintf (stderr
, "func_arg %d (%s, %d)\n",
1357 ca
->parm_num
, mode_name
[mode
], named
);
1361 if (mode
== VOIDmode
)
1364 if (ca
->force_mem
|| !named
)
1367 fprintf (stderr
, "func arg: force %d named %d, mem\n", ca
->force_mem
,
1373 if (type
&& INTEGRAL_TYPE_P (type
) && POINTER_TYPE_P (type
))
1376 if (type
&& AGGREGATE_TYPE_P (type
))
1379 switch (ca
->parm_num
)
1382 if (GET_MODE_SIZE (mode
) == 1 || GET_MODE_SIZE (mode
) == 2)
1383 rv
= gen_rtx_REG (mode
, TARGET_A16
? R1_REGNO
: R0_REGNO
);
1387 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 2)
1388 rv
= gen_rtx_REG (mode
, R2_REGNO
);
1398 #undef TARGET_PASS_BY_REFERENCE
1399 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1401 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
1402 machine_mode mode ATTRIBUTE_UNUSED
,
1403 const_tree type ATTRIBUTE_UNUSED
,
1404 bool named ATTRIBUTE_UNUSED
)
1409 /* Implements INIT_CUMULATIVE_ARGS. */
1411 m32c_init_cumulative_args (CUMULATIVE_ARGS
* ca
,
1413 rtx libname ATTRIBUTE_UNUSED
,
1415 int n_named_args ATTRIBUTE_UNUSED
)
1417 if (fntype
&& aggregate_value_p (TREE_TYPE (fntype
), fndecl
))
1424 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1425 functions returning structures, so we always reset that. Otherwise,
1426 we only need to know the sequence number of the argument to know what
1428 #undef TARGET_FUNCTION_ARG_ADVANCE
1429 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1431 m32c_function_arg_advance (cumulative_args_t ca_v
,
1432 machine_mode mode ATTRIBUTE_UNUSED
,
1433 const_tree type ATTRIBUTE_UNUSED
,
1434 bool named ATTRIBUTE_UNUSED
)
1436 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1444 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1445 #undef TARGET_FUNCTION_ARG_BOUNDARY
1446 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1448 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED
,
1449 const_tree type ATTRIBUTE_UNUSED
)
1451 return (TARGET_A16
? 8 : 16);
1454 /* Implements FUNCTION_ARG_REGNO_P. */
1456 m32c_function_arg_regno_p (int r
)
1459 return (r
== R0_REGNO
);
1460 return (r
== R1_REGNO
|| r
== R2_REGNO
);
1463 /* HImode and PSImode are the two "native" modes as far as GCC is
1464 concerned, but the chips also support a 32-bit mode which is used
1465 for some opcodes in R8C/M16C and for reset vectors and such. */
1466 #undef TARGET_VALID_POINTER_MODE
1467 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1469 m32c_valid_pointer_mode (machine_mode mode
)
1479 /* How Scalar Function Values Are Returned */
1481 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1482 combination of registers starting there (r2r0 for longs, r3r1r2r0
1483 for long long, r3r2r1r0 for doubles), except that that ABI
1484 currently doesn't work because it ends up using all available
1485 general registers and gcc often can't compile it. So, instead, we
1486 return anything bigger than 16 bits in "mem0" (effectively, a
1487 memory location). */
1489 #undef TARGET_LIBCALL_VALUE
1490 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1493 m32c_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
1495 /* return reg or parallel */
1497 /* FIXME: GCC has difficulty returning large values in registers,
1498 because that ties up most of the general registers and gives the
1499 register allocator little to work with. Until we can resolve
1500 this, large values are returned in memory. */
1505 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (4));
1506 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1507 gen_rtx_REG (HImode
,
1510 XVECEXP (rv
, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode
,
1511 gen_rtx_REG (HImode
,
1514 XVECEXP (rv
, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode
,
1515 gen_rtx_REG (HImode
,
1518 XVECEXP (rv
, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode
,
1519 gen_rtx_REG (HImode
,
1525 if (TARGET_A24
&& GET_MODE_SIZE (mode
) > 2)
1529 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (1));
1530 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1538 if (GET_MODE_SIZE (mode
) > 2)
1539 return gen_rtx_REG (mode
, MEM0_REGNO
);
1540 return gen_rtx_REG (mode
, R0_REGNO
);
1543 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1546 #undef TARGET_FUNCTION_VALUE
1547 #define TARGET_FUNCTION_VALUE m32c_function_value
1550 m32c_function_value (const_tree valtype
,
1551 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1552 bool outgoing ATTRIBUTE_UNUSED
)
1554 /* return reg or parallel */
1555 const machine_mode mode
= TYPE_MODE (valtype
);
1556 return m32c_libcall_value (mode
, NULL_RTX
);
1559 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1561 #undef TARGET_FUNCTION_VALUE_REGNO_P
1562 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1565 m32c_function_value_regno_p (const unsigned int regno
)
1567 return (regno
== R0_REGNO
|| regno
== MEM0_REGNO
);
1570 /* How Large Values Are Returned */
1572 /* We return structures by pushing the address on the stack, even if
1573 we use registers for the first few "real" arguments. */
1574 #undef TARGET_STRUCT_VALUE_RTX
1575 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1577 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED
,
1578 int incoming ATTRIBUTE_UNUSED
)
1583 /* Function Entry and Exit */
1585 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1587 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
1589 if (cfun
->machine
->is_interrupt
)
1594 /* Implementing the Varargs Macros */
1596 #undef TARGET_STRICT_ARGUMENT_NAMING
1597 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1599 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
1604 /* Trampolines for Nested Functions */
1608 1 0000 75C43412 mov.w #0x1234,a0
1609 2 0004 FC000000 jmp.a label
1612 1 0000 BC563412 mov.l:s #0x123456,a0
1613 2 0004 CC000000 jmp.a label
1616 /* Implements TRAMPOLINE_SIZE. */
1618 m32c_trampoline_size (void)
1620 /* Allocate extra space so we can avoid the messy shifts when we
1621 initialize the trampoline; we just write past the end of the
1623 return TARGET_A16
? 8 : 10;
1626 /* Implements TRAMPOLINE_ALIGNMENT. */
1628 m32c_trampoline_alignment (void)
1633 /* Implements TARGET_TRAMPOLINE_INIT. */
1635 #undef TARGET_TRAMPOLINE_INIT
1636 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1638 m32c_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chainval
)
1640 rtx function
= XEXP (DECL_RTL (fndecl
), 0);
1642 #define A0(m,i) adjust_address (m_tramp, m, i)
1645 /* Note: we subtract a "word" because the moves want signed
1646 constants, not unsigned constants. */
1647 emit_move_insn (A0 (HImode
, 0), GEN_INT (0xc475 - 0x10000));
1648 emit_move_insn (A0 (HImode
, 2), chainval
);
1649 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xfc - 0x100));
1650 /* We use 16-bit addresses here, but store the zero to turn it
1651 into a 24-bit offset. */
1652 emit_move_insn (A0 (HImode
, 5), function
);
1653 emit_move_insn (A0 (QImode
, 7), GEN_INT (0x00));
1657 /* Note that the PSI moves actually write 4 bytes. Make sure we
1658 write stuff out in the right order, and leave room for the
1659 extra byte at the end. */
1660 emit_move_insn (A0 (QImode
, 0), GEN_INT (0xbc - 0x100));
1661 emit_move_insn (A0 (PSImode
, 1), chainval
);
1662 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xcc - 0x100));
1663 emit_move_insn (A0 (PSImode
, 5), function
);
1668 /* Addressing Modes */
1670 /* The r8c/m32c family supports a wide range of non-orthogonal
1671 addressing modes, including the ability to double-indirect on *some*
1672 of them. Not all insns support all modes, either, but we rely on
1673 predicates and constraints to deal with that. */
1674 #undef TARGET_LEGITIMATE_ADDRESS_P
1675 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1677 m32c_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1683 if (TARGET_A16
&& GET_MODE (x
) != HImode
&& GET_MODE (x
) != SImode
)
1685 if (TARGET_A24
&& GET_MODE (x
) != PSImode
)
1688 /* Wide references to memory will be split after reload, so we must
1689 ensure that all parts of such splits remain legitimate
1691 mode_adjust
= GET_MODE_SIZE (mode
) - 1;
1693 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1694 if (GET_CODE (x
) == PRE_DEC
1695 || GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_MODIFY
)
1697 return (GET_CODE (XEXP (x
, 0)) == REG
1698 && REGNO (XEXP (x
, 0)) == SP_REGNO
);
1702 /* This is the double indirection detection, but it currently
1703 doesn't work as cleanly as this code implies, so until we've had
1704 a chance to debug it, leave it disabled. */
1705 if (TARGET_A24
&& GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) != PLUS
)
1708 fprintf (stderr
, "double indirect\n");
1717 /* Most indexable registers can be used without displacements,
1718 although some of them will be emitted with an explicit zero
1719 to please the assembler. */
1720 switch (REGNO (patternr
[0]))
1726 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1732 if (IS_PSEUDO (patternr
[0], strict
))
1738 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1743 /* This is more interesting, because different base registers
1744 allow for different displacements - both range and signedness
1745 - and it differs from chip series to chip series too. */
1746 int rn
= REGNO (patternr
[1]);
1747 HOST_WIDE_INT offs
= INTVAL (patternr
[2]);
1753 /* The syntax only allows positive offsets, but when the
1754 offsets span the entire memory range, we can simulate
1755 negative offsets by wrapping. */
1757 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1759 return (offs
>= 0 && offs
<= 65535 - mode_adjust
);
1761 return (offs
>= -16777216 && offs
<= 16777215);
1765 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1766 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1769 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1772 if (IS_PSEUDO (patternr
[1], strict
))
1777 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1779 rtx reg
= patternr
[1];
1781 /* We don't know where the symbol is, so only allow base
1782 registers which support displacements spanning the whole
1784 switch (REGNO (reg
))
1788 /* $sb needs a secondary reload, but since it's involved in
1789 memory address reloads too, we don't deal with it very
1791 /* case SB_REGNO: */
1794 if (GET_CODE (reg
) == SUBREG
)
1796 if (IS_PSEUDO (reg
, strict
))
1804 /* Implements REG_OK_FOR_BASE_P. */
1806 m32c_reg_ok_for_base_p (rtx x
, int strict
)
1808 if (GET_CODE (x
) != REG
)
1819 if (IS_PSEUDO (x
, strict
))
1825 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1826 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1828 EB 4B FF mova -128[$fb],$a0
1829 D8 0C FF FF mov.w:Q #0,-1[$a0]
1831 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1834 77 54 00 01 sub #256,$a0
1835 D8 08 01 mov.w:Q #0,1[$a0]
1837 If we don't offset (i.e. offset by zero), we end up with:
1839 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1841 We have to subtract *something* so that we have a PLUS rtx to mark
1842 that we've done this reload. The -128 offset will never result in
1843 an 8-bit aN offset, and the payoff for the second case is five
1844 loads *if* those loads are within 256 bytes of the other end of the
1845 frame, so the third case seems best. Note that we subtract the
1846 zero, but detect that in the addhi3 pattern. */
1848 #define BIG_FB_ADJ 0
1850 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1851 worry about is frame base offsets, as $fb has a limited
1852 displacement range. We deal with this by attempting to reload $fb
1853 itself into an address register; that seems to result in the best
1855 #undef TARGET_LEGITIMIZE_ADDRESS
1856 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1858 m32c_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1862 fprintf (stderr
, "m32c_legitimize_address for mode %s\n", mode_name
[mode
]);
1864 fprintf (stderr
, "\n");
1867 if (GET_CODE (x
) == PLUS
1868 && GET_CODE (XEXP (x
, 0)) == REG
1869 && REGNO (XEXP (x
, 0)) == FB_REGNO
1870 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1871 && (INTVAL (XEXP (x
, 1)) < -128
1872 || INTVAL (XEXP (x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1874 /* reload FB to A_REGS */
1875 rtx temp
= gen_reg_rtx (Pmode
);
1877 emit_insn (gen_rtx_SET (VOIDmode
, temp
, XEXP (x
, 0)));
1884 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1886 m32c_legitimize_reload_address (rtx
* x
,
1889 int type
, int ind_levels ATTRIBUTE_UNUSED
)
1892 fprintf (stderr
, "\nm32c_legitimize_reload_address for mode %s\n",
1897 /* At one point, this function tried to get $fb copied to an address
1898 register, which in theory would maximize sharing, but gcc was
1899 *also* still trying to reload the whole address, and we'd run out
1900 of address registers. So we let gcc do the naive (but safe)
1901 reload instead, when the above function doesn't handle it for
1904 The code below is a second attempt at the above. */
1906 if (GET_CODE (*x
) == PLUS
1907 && GET_CODE (XEXP (*x
, 0)) == REG
1908 && REGNO (XEXP (*x
, 0)) == FB_REGNO
1909 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1910 && (INTVAL (XEXP (*x
, 1)) < -128
1911 || INTVAL (XEXP (*x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1914 int offset
= INTVAL (XEXP (*x
, 1));
1915 int adjustment
= -BIG_FB_ADJ
;
1917 sum
= gen_rtx_PLUS (Pmode
, XEXP (*x
, 0),
1918 GEN_INT (adjustment
));
1919 *x
= gen_rtx_PLUS (Pmode
, sum
, GEN_INT (offset
- adjustment
));
1920 if (type
== RELOAD_OTHER
)
1921 type
= RELOAD_FOR_OTHER_ADDRESS
;
1922 push_reload (sum
, NULL_RTX
, &XEXP (*x
, 0), NULL
,
1923 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1924 (enum reload_type
) type
);
1928 if (GET_CODE (*x
) == PLUS
1929 && GET_CODE (XEXP (*x
, 0)) == PLUS
1930 && GET_CODE (XEXP (XEXP (*x
, 0), 0)) == REG
1931 && REGNO (XEXP (XEXP (*x
, 0), 0)) == FB_REGNO
1932 && GET_CODE (XEXP (XEXP (*x
, 0), 1)) == CONST_INT
1933 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1936 if (type
== RELOAD_OTHER
)
1937 type
= RELOAD_FOR_OTHER_ADDRESS
;
1938 push_reload (XEXP (*x
, 0), NULL_RTX
, &XEXP (*x
, 0), NULL
,
1939 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1940 (enum reload_type
) type
);
1947 /* Return the appropriate mode for a named address pointer. */
1948 #undef TARGET_ADDR_SPACE_POINTER_MODE
1949 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1951 m32c_addr_space_pointer_mode (addr_space_t addrspace
)
1955 case ADDR_SPACE_GENERIC
:
1956 return TARGET_A24
? PSImode
: HImode
;
1957 case ADDR_SPACE_FAR
:
1964 /* Return the appropriate mode for a named address address. */
1965 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1966 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1968 m32c_addr_space_address_mode (addr_space_t addrspace
)
1972 case ADDR_SPACE_GENERIC
:
1973 return TARGET_A24
? PSImode
: HImode
;
1974 case ADDR_SPACE_FAR
:
1981 /* Like m32c_legitimate_address_p, except with named addresses. */
1982 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1983 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1984 m32c_addr_space_legitimate_address_p
1986 m32c_addr_space_legitimate_address_p (machine_mode mode
, rtx x
,
1987 bool strict
, addr_space_t as
)
1989 if (as
== ADDR_SPACE_FAR
)
1996 if (GET_MODE (x
) != SImode
)
1998 switch (REGNO (patternr
[0]))
2004 if (IS_PSEUDO (patternr
[0], strict
))
2009 if (RTX_IS ("+^Sri"))
2011 int rn
= REGNO (patternr
[3]);
2012 HOST_WIDE_INT offs
= INTVAL (patternr
[4]);
2013 if (GET_MODE (patternr
[3]) != HImode
)
2018 return (offs
>= 0 && offs
<= 0xfffff);
2021 if (IS_PSEUDO (patternr
[3], strict
))
2026 if (RTX_IS ("+^Srs"))
2028 int rn
= REGNO (patternr
[3]);
2029 if (GET_MODE (patternr
[3]) != HImode
)
2037 if (IS_PSEUDO (patternr
[3], strict
))
2042 if (RTX_IS ("+^S+ris"))
2044 int rn
= REGNO (patternr
[4]);
2045 if (GET_MODE (patternr
[4]) != HImode
)
2053 if (IS_PSEUDO (patternr
[4], strict
))
2065 else if (as
!= ADDR_SPACE_GENERIC
)
2068 return m32c_legitimate_address_p (mode
, x
, strict
);
2071 /* Like m32c_legitimate_address, except with named address support. */
2072 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2073 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2075 m32c_addr_space_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
,
2078 if (as
!= ADDR_SPACE_GENERIC
)
2081 fprintf (stderr
, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name
[mode
]);
2083 fprintf (stderr
, "\n");
2086 if (GET_CODE (x
) != REG
)
2088 x
= force_reg (SImode
, x
);
2093 return m32c_legitimize_address (x
, oldx
, mode
);
2096 /* Determine if one named address space is a subset of another. */
2097 #undef TARGET_ADDR_SPACE_SUBSET_P
2098 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2100 m32c_addr_space_subset_p (addr_space_t subset
, addr_space_t superset
)
2102 gcc_assert (subset
== ADDR_SPACE_GENERIC
|| subset
== ADDR_SPACE_FAR
);
2103 gcc_assert (superset
== ADDR_SPACE_GENERIC
|| superset
== ADDR_SPACE_FAR
);
2105 if (subset
== superset
)
2109 return (subset
== ADDR_SPACE_GENERIC
&& superset
== ADDR_SPACE_FAR
);
2112 #undef TARGET_ADDR_SPACE_CONVERT
2113 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2114 /* Convert from one address space to another. */
2116 m32c_addr_space_convert (rtx op
, tree from_type
, tree to_type
)
2118 addr_space_t from_as
= TYPE_ADDR_SPACE (TREE_TYPE (from_type
));
2119 addr_space_t to_as
= TYPE_ADDR_SPACE (TREE_TYPE (to_type
));
2122 gcc_assert (from_as
== ADDR_SPACE_GENERIC
|| from_as
== ADDR_SPACE_FAR
);
2123 gcc_assert (to_as
== ADDR_SPACE_GENERIC
|| to_as
== ADDR_SPACE_FAR
);
2125 if (to_as
== ADDR_SPACE_GENERIC
&& from_as
== ADDR_SPACE_FAR
)
2127 /* This is unpredictable, as we're truncating off usable address
2130 result
= gen_reg_rtx (HImode
);
2131 emit_move_insn (result
, simplify_subreg (HImode
, op
, SImode
, 0));
2134 else if (to_as
== ADDR_SPACE_FAR
&& from_as
== ADDR_SPACE_GENERIC
)
2136 /* This always works. */
2137 result
= gen_reg_rtx (SImode
);
2138 emit_insn (gen_zero_extendhisi2 (result
, op
));
2145 /* Condition Code Status */
2147 #undef TARGET_FIXED_CONDITION_CODE_REGS
2148 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2150 m32c_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
2153 *p2
= INVALID_REGNUM
;
2157 /* Describing Relative Costs of Operations */
2159 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2160 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2161 no opcodes to do that). We also discourage use of mem* registers
2162 since they're really memory. */
2164 #undef TARGET_REGISTER_MOVE_COST
2165 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2168 m32c_register_move_cost (machine_mode mode
, reg_class_t from
,
2171 int cost
= COSTS_N_INSNS (3);
2174 /* FIXME: pick real values, but not 2 for now. */
2175 COPY_HARD_REG_SET (cc
, reg_class_contents
[(int) from
]);
2176 IOR_HARD_REG_SET (cc
, reg_class_contents
[(int) to
]);
2179 && hard_reg_set_intersect_p (cc
, reg_class_contents
[R23_REGS
]))
2181 if (hard_reg_set_subset_p (cc
, reg_class_contents
[R23_REGS
]))
2182 cost
= COSTS_N_INSNS (1000);
2184 cost
= COSTS_N_INSNS (80);
2187 if (!class_can_hold_mode (from
, mode
) || !class_can_hold_mode (to
, mode
))
2188 cost
= COSTS_N_INSNS (1000);
2190 if (reg_classes_intersect_p (from
, CR_REGS
))
2191 cost
+= COSTS_N_INSNS (5);
2193 if (reg_classes_intersect_p (to
, CR_REGS
))
2194 cost
+= COSTS_N_INSNS (5);
2196 if (from
== MEM_REGS
|| to
== MEM_REGS
)
2197 cost
+= COSTS_N_INSNS (50);
2198 else if (reg_classes_intersect_p (from
, MEM_REGS
)
2199 || reg_classes_intersect_p (to
, MEM_REGS
))
2200 cost
+= COSTS_N_INSNS (10);
2203 fprintf (stderr
, "register_move_cost %s from %s to %s = %d\n",
2204 mode_name
[mode
], class_names
[(int) from
], class_names
[(int) to
],
2210 /* Implements TARGET_MEMORY_MOVE_COST. */
2212 #undef TARGET_MEMORY_MOVE_COST
2213 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2216 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2217 reg_class_t rclass ATTRIBUTE_UNUSED
,
2218 bool in ATTRIBUTE_UNUSED
)
2220 /* FIXME: pick real values. */
2221 return COSTS_N_INSNS (10);
2224 /* Here we try to describe when we use multiple opcodes for one RTX so
2225 that gcc knows when to use them. */
2226 #undef TARGET_RTX_COSTS
2227 #define TARGET_RTX_COSTS m32c_rtx_costs
2229 m32c_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2230 int *total
, bool speed ATTRIBUTE_UNUSED
)
2235 if (REGNO (x
) >= MEM0_REGNO
&& REGNO (x
) <= MEM7_REGNO
)
2236 *total
+= COSTS_N_INSNS (500);
2238 *total
+= COSTS_N_INSNS (1);
2244 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
2246 /* mov.b r1l, r1h */
2247 *total
+= COSTS_N_INSNS (1);
2250 if (INTVAL (XEXP (x
, 1)) > 8
2251 || INTVAL (XEXP (x
, 1)) < -8)
2254 /* mov.b r1l, r1h */
2255 *total
+= COSTS_N_INSNS (2);
2270 if (outer_code
== SET
)
2272 *total
+= COSTS_N_INSNS (2);
2279 rtx dest
= XEXP (x
, 0);
2280 rtx addr
= XEXP (dest
, 0);
2281 switch (GET_CODE (addr
))
2284 *total
+= COSTS_N_INSNS (1);
2287 *total
+= COSTS_N_INSNS (3);
2290 *total
+= COSTS_N_INSNS (2);
2298 /* Reasonable default. */
2299 if (TARGET_A16
&& GET_MODE(x
) == SImode
)
2300 *total
+= COSTS_N_INSNS (2);
2306 #undef TARGET_ADDRESS_COST
2307 #define TARGET_ADDRESS_COST m32c_address_cost
2309 m32c_address_cost (rtx addr
, machine_mode mode ATTRIBUTE_UNUSED
,
2310 addr_space_t as ATTRIBUTE_UNUSED
,
2311 bool speed ATTRIBUTE_UNUSED
)
2314 /* fprintf(stderr, "\naddress_cost\n");
2316 switch (GET_CODE (addr
))
2321 return COSTS_N_INSNS(1);
2322 if (0 < i
&& i
<= 255)
2323 return COSTS_N_INSNS(2);
2324 if (0 < i
&& i
<= 65535)
2325 return COSTS_N_INSNS(3);
2326 return COSTS_N_INSNS(4);
2328 return COSTS_N_INSNS(4);
2330 return COSTS_N_INSNS(1);
2332 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
2334 i
= INTVAL (XEXP (addr
, 1));
2336 return COSTS_N_INSNS(1);
2337 if (0 < i
&& i
<= 255)
2338 return COSTS_N_INSNS(2);
2339 if (0 < i
&& i
<= 65535)
2340 return COSTS_N_INSNS(3);
2342 return COSTS_N_INSNS(4);
2348 /* Defining the Output Assembler Language */
2350 /* Output of Data */
2352 /* We may have 24 bit sizes, which is the native address size.
2353 Currently unused, but provided for completeness. */
2354 #undef TARGET_ASM_INTEGER
2355 #define TARGET_ASM_INTEGER m32c_asm_integer
2357 m32c_asm_integer (rtx x
, unsigned int size
, int aligned_p
)
2362 fprintf (asm_out_file
, "\t.3byte\t");
2363 output_addr_const (asm_out_file
, x
);
2364 fputc ('\n', asm_out_file
);
2367 if (GET_CODE (x
) == SYMBOL_REF
)
2369 fprintf (asm_out_file
, "\t.long\t");
2370 output_addr_const (asm_out_file
, x
);
2371 fputc ('\n', asm_out_file
);
2376 return default_assemble_integer (x
, size
, aligned_p
);
2379 /* Output of Assembler Instructions */
2381 /* We use a lookup table because the addressing modes are non-orthogonal. */
2386 char const *pattern
;
2389 const conversions
[] = {
2392 { 0, "mr", "z[1]" },
2393 { 0, "m+ri", "3[2]" },
2394 { 0, "m+rs", "3[2]" },
2395 { 0, "m+^Zrs", "5[4]" },
2396 { 0, "m+^Zri", "5[4]" },
2397 { 0, "m+^Z+ris", "7+6[5]" },
2398 { 0, "m+^Srs", "5[4]" },
2399 { 0, "m+^Sri", "5[4]" },
2400 { 0, "m+^S+ris", "7+6[5]" },
2401 { 0, "m+r+si", "4+5[2]" },
2404 { 0, "m+si", "2+3" },
2406 { 0, "mmr", "[z[2]]" },
2407 { 0, "mm+ri", "[4[3]]" },
2408 { 0, "mm+rs", "[4[3]]" },
2409 { 0, "mm+r+si", "[5+6[3]]" },
2410 { 0, "mms", "[[2]]" },
2411 { 0, "mmi", "[[2]]" },
2412 { 0, "mm+si", "[4[3]]" },
2416 { 0, "+si", "#1+2" },
2422 { 'd', "+si", "1+2" },
2425 { 'D', "+si", "1+2" },
2436 /* This is in order according to the bitfield that pushm/popm use. */
2437 static char const *pushm_regs
[] = {
2438 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2441 /* Implements TARGET_PRINT_OPERAND. */
2443 #undef TARGET_PRINT_OPERAND
2444 #define TARGET_PRINT_OPERAND m32c_print_operand
2447 m32c_print_operand (FILE * file
, rtx x
, int code
)
2452 int unsigned_const
= 0;
2455 /* Multiplies; constants are converted to sign-extended format but
2456 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2468 /* This one is only for debugging; you can put it in a pattern to
2469 force this error. */
2472 fprintf (stderr
, "dj: unreviewed pattern:");
2473 if (current_output_insn
)
2474 debug_rtx (current_output_insn
);
2477 /* PSImode operations are either .w or .l depending on the target. */
2481 fprintf (file
, "w");
2483 fprintf (file
, "l");
2486 /* Inverted conditionals. */
2489 switch (GET_CODE (x
))
2495 fputs ("gtu", file
);
2501 fputs ("geu", file
);
2507 fputs ("leu", file
);
2513 fputs ("ltu", file
);
2526 /* Regular conditionals. */
2529 switch (GET_CODE (x
))
2535 fputs ("leu", file
);
2541 fputs ("ltu", file
);
2547 fputs ("gtu", file
);
2553 fputs ("geu", file
);
2566 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2568 if (code
== 'h' && GET_MODE (x
) == SImode
)
2570 x
= m32c_subreg (HImode
, x
, SImode
, 0);
2573 if (code
== 'H' && GET_MODE (x
) == SImode
)
2575 x
= m32c_subreg (HImode
, x
, SImode
, 2);
2578 if (code
== 'h' && GET_MODE (x
) == HImode
)
2580 x
= m32c_subreg (QImode
, x
, HImode
, 0);
2583 if (code
== 'H' && GET_MODE (x
) == HImode
)
2585 /* We can't actually represent this as an rtx. Do it here. */
2586 if (GET_CODE (x
) == REG
)
2591 fputs ("r0h", file
);
2594 fputs ("r1h", file
);
2600 /* This should be a MEM. */
2601 x
= m32c_subreg (QImode
, x
, HImode
, 1);
2604 /* This is for BMcond, which always wants word register names. */
2605 if (code
== 'h' && GET_MODE (x
) == QImode
)
2607 if (GET_CODE (x
) == REG
)
2608 x
= gen_rtx_REG (HImode
, REGNO (x
));
2611 /* 'x' and 'X' need to be ignored for non-immediates. */
2612 if ((code
== 'x' || code
== 'X') && GET_CODE (x
) != CONST_INT
)
2617 for (i
= 0; conversions
[i
].pattern
; i
++)
2618 if (conversions
[i
].code
== code
2619 && streq (conversions
[i
].pattern
, pattern
))
2621 for (j
= 0; conversions
[i
].format
[j
]; j
++)
2622 /* backslash quotes the next character in the output pattern. */
2623 if (conversions
[i
].format
[j
] == '\\')
2625 fputc (conversions
[i
].format
[j
+ 1], file
);
2628 /* Digits in the output pattern indicate that the
2629 corresponding RTX is to be output at that point. */
2630 else if (ISDIGIT (conversions
[i
].format
[j
]))
2632 rtx r
= patternr
[conversions
[i
].format
[j
] - '0'];
2633 switch (GET_CODE (r
))
2636 fprintf (file
, "%s",
2637 reg_name_with_mode (REGNO (r
), GET_MODE (r
)));
2646 int i
= (int) exact_log2 (v
);
2648 i
= (int) exact_log2 ((v
^ 0xffff) & 0xffff);
2650 i
= (int) exact_log2 ((v
^ 0xff) & 0xff);
2652 fprintf (file
, "%d", i
);
2656 /* Unsigned byte. */
2657 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2661 /* Unsigned word. */
2662 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2663 INTVAL (r
) & 0xffff);
2666 /* pushm and popm encode a register set into a single byte. */
2668 for (b
= 7; b
>= 0; b
--)
2669 if (INTVAL (r
) & (1 << b
))
2671 fprintf (file
, "%s%s", comma
, pushm_regs
[b
]);
2676 /* "Minus". Output -X */
2677 ival
= (-INTVAL (r
) & 0xffff);
2679 ival
= ival
- 0x10000;
2680 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2684 if (conversions
[i
].format
[j
+ 1] == '[' && ival
< 0)
2686 /* We can simulate negative displacements by
2687 taking advantage of address space
2688 wrapping when the offset can span the
2689 entire address range. */
2691 patternr
[conversions
[i
].format
[j
+ 2] - '0'];
2692 if (GET_CODE (base
) == REG
)
2693 switch (REGNO (base
))
2698 ival
= 0x1000000 + ival
;
2700 ival
= 0x10000 + ival
;
2704 ival
= 0x10000 + ival
;
2708 else if (code
== 'd' && ival
< 0 && j
== 0)
2709 /* The "mova" opcode is used to do addition by
2710 computing displacements, but again, we need
2711 displacements to be unsigned *if* they're
2712 the only component of the displacement
2713 (i.e. no "symbol-4" type displacement). */
2714 ival
= (TARGET_A24
? 0x1000000 : 0x10000) + ival
;
2716 if (conversions
[i
].format
[j
] == '0')
2718 /* More conversions to unsigned. */
2719 if (unsigned_const
== 2)
2721 if (unsigned_const
== 1)
2724 if (streq (conversions
[i
].pattern
, "mi")
2725 || streq (conversions
[i
].pattern
, "mmi"))
2727 /* Integers used as addresses are unsigned. */
2728 ival
&= (TARGET_A24
? 0xffffff : 0xffff);
2730 if (force_sign
&& ival
>= 0)
2732 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2737 /* We don't have const_double constants. If it
2738 happens, make it obvious. */
2739 fprintf (file
, "[const_double 0x%lx]",
2740 (unsigned long) CONST_DOUBLE_HIGH (r
));
2743 assemble_name (file
, XSTR (r
, 0));
2746 output_asm_label (r
);
2749 fprintf (stderr
, "don't know how to print this operand:");
2756 if (conversions
[i
].format
[j
] == 'z')
2758 /* Some addressing modes *must* have a displacement,
2759 so insert a zero here if needed. */
2761 for (k
= j
+ 1; conversions
[i
].format
[k
]; k
++)
2762 if (ISDIGIT (conversions
[i
].format
[k
]))
2764 rtx reg
= patternr
[conversions
[i
].format
[k
] - '0'];
2765 if (GET_CODE (reg
) == REG
2766 && (REGNO (reg
) == SB_REGNO
2767 || REGNO (reg
) == FB_REGNO
2768 || REGNO (reg
) == SP_REGNO
))
2773 /* Signed displacements off symbols need to have signs
2775 if (conversions
[i
].format
[j
] == '+'
2776 && (!code
|| code
== 'D' || code
== 'd')
2777 && ISDIGIT (conversions
[i
].format
[j
+ 1])
2778 && (GET_CODE (patternr
[conversions
[i
].format
[j
+ 1] - '0'])
2784 fputc (conversions
[i
].format
[j
], file
);
2788 if (!conversions
[i
].pattern
)
2790 fprintf (stderr
, "unconvertible operand %c `%s'", code
? code
: '-',
2793 fprintf (file
, "[%c.%s]", code
? code
: '-', pattern
);
2799 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2801 See m32c_print_operand above for descriptions of what these do. */
2803 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2804 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2807 m32c_print_operand_punct_valid_p (unsigned char c
)
2809 if (c
== '&' || c
== '!')
2815 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2817 #undef TARGET_PRINT_OPERAND_ADDRESS
2818 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2821 m32c_print_operand_address (FILE * stream
, rtx address
)
2823 if (GET_CODE (address
) == MEM
)
2824 address
= XEXP (address
, 0);
2826 /* cf: gcc.dg/asm-4.c. */
2827 gcc_assert (GET_CODE (address
) == REG
);
2829 m32c_print_operand (stream
, address
, 0);
2832 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2833 differently than general registers. */
2835 m32c_output_reg_push (FILE * s
, int regno
)
2837 if (regno
== FLG_REGNO
)
2838 fprintf (s
, "\tpushc\tflg\n");
2840 fprintf (s
, "\tpush.%c\t%s\n",
2841 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2844 /* Likewise for ASM_OUTPUT_REG_POP. */
2846 m32c_output_reg_pop (FILE * s
, int regno
)
2848 if (regno
== FLG_REGNO
)
2849 fprintf (s
, "\tpopc\tflg\n");
2851 fprintf (s
, "\tpop.%c\t%s\n",
2852 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2855 /* Defining target-specific uses of `__attribute__' */
2857 /* Used to simplify the logic below. Find the attributes wherever
2859 #define M32C_ATTRIBUTES(decl) \
2860 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2861 : DECL_ATTRIBUTES (decl) \
2862 ? (DECL_ATTRIBUTES (decl)) \
2863 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2865 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2867 interrupt_p (tree node ATTRIBUTE_UNUSED
)
2869 tree list
= M32C_ATTRIBUTES (node
);
2872 if (is_attribute_p ("interrupt", TREE_PURPOSE (list
)))
2874 list
= TREE_CHAIN (list
);
2876 return fast_interrupt_p (node
);
2879 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2881 bank_switch_p (tree node ATTRIBUTE_UNUSED
)
2883 tree list
= M32C_ATTRIBUTES (node
);
2886 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list
)))
2888 list
= TREE_CHAIN (list
);
2893 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2895 fast_interrupt_p (tree node ATTRIBUTE_UNUSED
)
2897 tree list
= M32C_ATTRIBUTES (node
);
2900 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list
)))
2902 list
= TREE_CHAIN (list
);
2908 interrupt_decl_handler (tree
* node ATTRIBUTE_UNUSED
,
2909 tree name ATTRIBUTE_UNUSED
,
2910 tree args ATTRIBUTE_UNUSED
,
2911 int flags ATTRIBUTE_UNUSED
,
2912 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2918 interrupt_type_handler (tree
* node ATTRIBUTE_UNUSED
,
2919 tree name ATTRIBUTE_UNUSED
,
2920 tree args ATTRIBUTE_UNUSED
,
2921 int flags ATTRIBUTE_UNUSED
,
2922 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2926 /* Returns TRUE if given tree has the "function_vector" attribute. */
2928 m32c_special_page_vector_p (tree func
)
2932 if (TREE_CODE (func
) != FUNCTION_DECL
)
2935 list
= M32C_ATTRIBUTES (func
);
2938 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2940 list
= TREE_CHAIN (list
);
2946 function_vector_handler (tree
* node ATTRIBUTE_UNUSED
,
2947 tree name ATTRIBUTE_UNUSED
,
2948 tree args ATTRIBUTE_UNUSED
,
2949 int flags ATTRIBUTE_UNUSED
,
2950 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2954 /* The attribute is not supported for R8C target. */
2955 warning (OPT_Wattributes
,
2956 "%qE attribute is not supported for R8C target",
2958 *no_add_attrs
= true;
2960 else if (TREE_CODE (*node
) != FUNCTION_DECL
)
2962 /* The attribute must be applied to functions only. */
2963 warning (OPT_Wattributes
,
2964 "%qE attribute applies only to functions",
2966 *no_add_attrs
= true;
2968 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
2970 /* The argument must be a constant integer. */
2971 warning (OPT_Wattributes
,
2972 "%qE attribute argument not an integer constant",
2974 *no_add_attrs
= true;
2976 else if (TREE_INT_CST_LOW (TREE_VALUE (args
)) < 18
2977 || TREE_INT_CST_LOW (TREE_VALUE (args
)) > 255)
2979 /* The argument value must be between 18 to 255. */
2980 warning (OPT_Wattributes
,
2981 "%qE attribute argument should be between 18 to 255",
2983 *no_add_attrs
= true;
2988 /* If the function is assigned the attribute 'function_vector', it
2989 returns the function vector number, otherwise returns zero. */
2991 current_function_special_page_vector (rtx x
)
2995 if ((GET_CODE(x
) == SYMBOL_REF
)
2996 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
2999 tree t
= SYMBOL_REF_DECL (x
);
3001 if (TREE_CODE (t
) != FUNCTION_DECL
)
3004 list
= M32C_ATTRIBUTES (t
);
3007 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
3009 num
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list
)));
3013 list
= TREE_CHAIN (list
);
3022 #undef TARGET_ATTRIBUTE_TABLE
3023 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3024 static const struct attribute_spec m32c_attribute_table
[] = {
3025 {"interrupt", 0, 0, false, false, false, interrupt_decl_handler
,
3026 interrupt_type_handler
, false},
3027 {"bank_switch", 0, 0, false, false, false, interrupt_decl_handler
,
3028 interrupt_type_handler
, false},
3029 {"fast_interrupt", 0, 0, false, false, false, interrupt_decl_handler
,
3030 interrupt_type_handler
, false},
3031 {"function_vector", 1, 1, true, false, false,function_vector_handler
, NULL
,
3033 {0, 0, 0, 0, 0, 0, 0, 0, false}
3036 #undef TARGET_COMP_TYPE_ATTRIBUTES
3037 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3039 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED
,
3040 const_tree type2 ATTRIBUTE_UNUSED
)
3042 /* 0=incompatible 1=compatible 2=warning */
3046 #undef TARGET_INSERT_ATTRIBUTES
3047 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3049 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED
,
3050 tree
* attr_ptr ATTRIBUTE_UNUSED
)
3053 /* See if we need to make #pragma address variables volatile. */
3055 if (TREE_CODE (node
) == VAR_DECL
)
3057 const char *name
= IDENTIFIER_POINTER (DECL_NAME (node
));
3058 if (m32c_get_pragma_address (name
, &addr
))
3060 TREE_THIS_VOLATILE (node
) = true;
3066 struct pragma_traits
: default_hashmap_traits
3068 static hashval_t
hash (const char *str
) { return htab_hash_string (str
); }
3070 equal_keys (const char *a
, const char *b
)
3072 return !strcmp (a
, b
);
3076 /* Hash table of pragma info. */
3077 static GTY(()) hash_map
<const char *, unsigned, pragma_traits
> *pragma_htab
;
3080 m32c_note_pragma_address (const char *varname
, unsigned address
)
3084 = hash_map
<const char *, unsigned, pragma_traits
>::create_ggc (31);
3086 const char *name
= ggc_strdup (varname
);
3087 unsigned int *slot
= &pragma_htab
->get_or_insert (name
);
3092 m32c_get_pragma_address (const char *varname
, unsigned *address
)
3097 unsigned int *slot
= pragma_htab
->get (varname
);
3107 m32c_output_aligned_common (FILE *stream
, tree decl ATTRIBUTE_UNUSED
,
3109 int size
, int align
, int global
)
3113 if (m32c_get_pragma_address (name
, &address
))
3115 /* We never output these as global. */
3116 assemble_name (stream
, name
);
3117 fprintf (stream
, " = 0x%04x\n", address
);
3122 fprintf (stream
, "\t.local\t");
3123 assemble_name (stream
, name
);
3124 fprintf (stream
, "\n");
3126 fprintf (stream
, "\t.comm\t");
3127 assemble_name (stream
, name
);
3128 fprintf (stream
, ",%u,%u\n", size
, align
/ BITS_PER_UNIT
);
3133 /* This is a list of legal subregs of hard regs. */
3134 static const struct {
3135 unsigned char outer_mode_size
;
3136 unsigned char inner_mode_size
;
3137 unsigned char byte_mask
;
3138 unsigned char legal_when
;
3140 } legal_subregs
[] = {
3141 {1, 2, 0x03, 1, R0_REGNO
}, /* r0h r0l */
3142 {1, 2, 0x03, 1, R1_REGNO
}, /* r1h r1l */
3143 {1, 2, 0x01, 1, A0_REGNO
},
3144 {1, 2, 0x01, 1, A1_REGNO
},
3146 {1, 4, 0x01, 1, A0_REGNO
},
3147 {1, 4, 0x01, 1, A1_REGNO
},
3149 {2, 4, 0x05, 1, R0_REGNO
}, /* r2 r0 */
3150 {2, 4, 0x05, 1, R1_REGNO
}, /* r3 r1 */
3151 {2, 4, 0x05, 16, A0_REGNO
}, /* a1 a0 */
3152 {2, 4, 0x01, 24, A0_REGNO
}, /* a1 a0 */
3153 {2, 4, 0x01, 24, A1_REGNO
}, /* a1 a0 */
3155 {4, 8, 0x55, 1, R0_REGNO
}, /* r3 r1 r2 r0 */
3158 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3159 support. We also bail on MEMs with illegal addresses. */
3161 m32c_illegal_subreg_p (rtx op
)
3165 machine_mode src_mode
, dest_mode
;
3167 if (GET_CODE (op
) == MEM
3168 && ! m32c_legitimate_address_p (Pmode
, XEXP (op
, 0), false))
3173 if (GET_CODE (op
) != SUBREG
)
3176 dest_mode
= GET_MODE (op
);
3177 offset
= SUBREG_BYTE (op
);
3178 op
= SUBREG_REG (op
);
3179 src_mode
= GET_MODE (op
);
3181 if (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (src_mode
))
3183 if (GET_CODE (op
) != REG
)
3185 if (REGNO (op
) >= MEM0_REGNO
)
3188 offset
= (1 << offset
);
3190 for (i
= 0; i
< ARRAY_SIZE (legal_subregs
); i
++)
3191 if (legal_subregs
[i
].outer_mode_size
== GET_MODE_SIZE (dest_mode
)
3192 && legal_subregs
[i
].regno
== REGNO (op
)
3193 && legal_subregs
[i
].inner_mode_size
== GET_MODE_SIZE (src_mode
)
3194 && legal_subregs
[i
].byte_mask
& offset
)
3196 switch (legal_subregs
[i
].legal_when
)
3213 /* Returns TRUE if we support a move between the first two operands.
3214 At the moment, we just want to discourage mem to mem moves until
3215 after reload, because reload has a hard time with our limited
3216 number of address registers, and we can get into a situation where
3217 we need three of them when we only have two. */
3219 m32c_mov_ok (rtx
* operands
, machine_mode mode ATTRIBUTE_UNUSED
)
3221 rtx op0
= operands
[0];
3222 rtx op1
= operands
[1];
3227 #define DEBUG_MOV_OK 0
3229 fprintf (stderr
, "m32c_mov_ok %s\n", mode_name
[mode
]);
3234 if (GET_CODE (op0
) == SUBREG
)
3235 op0
= XEXP (op0
, 0);
3236 if (GET_CODE (op1
) == SUBREG
)
3237 op1
= XEXP (op1
, 0);
3239 if (GET_CODE (op0
) == MEM
3240 && GET_CODE (op1
) == MEM
3241 && ! reload_completed
)
3244 fprintf (stderr
, " - no, mem to mem\n");
3250 fprintf (stderr
, " - ok\n");
3255 /* Returns TRUE if two consecutive HImode mov instructions, generated
3256 for moving an immediate double data to a double data type variable
3257 location, can be combined into single SImode mov instruction. */
3259 m32c_immd_dbl_mov (rtx
* operands ATTRIBUTE_UNUSED
,
3260 machine_mode mode ATTRIBUTE_UNUSED
)
3262 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3269 /* Subregs are non-orthogonal for us, because our registers are all
3272 m32c_subreg (machine_mode outer
,
3273 rtx x
, machine_mode inner
, int byte
)
3277 /* Converting MEMs to different types that are the same size, we
3278 just rewrite them. */
3279 if (GET_CODE (x
) == SUBREG
3280 && SUBREG_BYTE (x
) == 0
3281 && GET_CODE (SUBREG_REG (x
)) == MEM
3282 && (GET_MODE_SIZE (GET_MODE (x
))
3283 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
3286 x
= gen_rtx_MEM (GET_MODE (x
), XEXP (SUBREG_REG (x
), 0));
3287 MEM_COPY_ATTRIBUTES (x
, SUBREG_REG (oldx
));
3290 /* Push/pop get done as smaller push/pops. */
3291 if (GET_CODE (x
) == MEM
3292 && (GET_CODE (XEXP (x
, 0)) == PRE_DEC
3293 || GET_CODE (XEXP (x
, 0)) == POST_INC
))
3294 return gen_rtx_MEM (outer
, XEXP (x
, 0));
3295 if (GET_CODE (x
) == SUBREG
3296 && GET_CODE (XEXP (x
, 0)) == MEM
3297 && (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PRE_DEC
3298 || GET_CODE (XEXP (XEXP (x
, 0), 0)) == POST_INC
))
3299 return gen_rtx_MEM (outer
, XEXP (XEXP (x
, 0), 0));
3301 if (GET_CODE (x
) != REG
)
3303 rtx r
= simplify_gen_subreg (outer
, x
, inner
, byte
);
3304 if (GET_CODE (r
) == SUBREG
3305 && GET_CODE (x
) == MEM
3306 && MEM_VOLATILE_P (x
))
3308 /* Volatile MEMs don't get simplified, but we need them to
3309 be. We are little endian, so the subreg byte is the
3311 r
= adjust_address_nv (x
, outer
, byte
);
3317 if (r
>= FIRST_PSEUDO_REGISTER
|| r
== AP_REGNO
)
3318 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3320 if (IS_MEM_REGNO (r
))
3321 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3323 /* This is where the complexities of our register layout are
3327 else if (outer
== HImode
)
3329 if (r
== R0_REGNO
&& byte
== 2)
3331 else if (r
== R0_REGNO
&& byte
== 4)
3333 else if (r
== R0_REGNO
&& byte
== 6)
3335 else if (r
== R1_REGNO
&& byte
== 2)
3337 else if (r
== A0_REGNO
&& byte
== 2)
3340 else if (outer
== SImode
)
3342 if (r
== R0_REGNO
&& byte
== 0)
3344 else if (r
== R0_REGNO
&& byte
== 4)
3349 fprintf (stderr
, "m32c_subreg %s %s %d\n",
3350 mode_name
[outer
], mode_name
[inner
], byte
);
3354 return gen_rtx_REG (outer
, nr
);
3357 /* Used to emit move instructions. We split some moves,
3358 and avoid mem-mem moves. */
3360 m32c_prepare_move (rtx
* operands
, machine_mode mode
)
3362 if (far_addr_space_p (operands
[0])
3363 && CONSTANT_P (operands
[1]))
3365 operands
[1] = force_reg (GET_MODE (operands
[0]), operands
[1]);
3367 if (TARGET_A16
&& mode
== PSImode
)
3368 return m32c_split_move (operands
, mode
, 1);
3369 if ((GET_CODE (operands
[0]) == MEM
)
3370 && (GET_CODE (XEXP (operands
[0], 0)) == PRE_MODIFY
))
3372 rtx pmv
= XEXP (operands
[0], 0);
3373 rtx dest_reg
= XEXP (pmv
, 0);
3374 rtx dest_mod
= XEXP (pmv
, 1);
3376 emit_insn (gen_rtx_SET (Pmode
, dest_reg
, dest_mod
));
3377 operands
[0] = gen_rtx_MEM (mode
, dest_reg
);
3379 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3380 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
3384 #define DEBUG_SPLIT 0
3386 /* Returns TRUE if the given PSImode move should be split. We split
3387 for all r8c/m16c moves, since it doesn't support them, and for
3388 POP.L as we can only *push* SImode. */
3390 m32c_split_psi_p (rtx
* operands
)
3393 fprintf (stderr
, "\nm32c_split_psi_p\n");
3394 debug_rtx (operands
[0]);
3395 debug_rtx (operands
[1]);
3400 fprintf (stderr
, "yes, A16\n");
3404 if (GET_CODE (operands
[1]) == MEM
3405 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3408 fprintf (stderr
, "yes, pop.l\n");
3413 fprintf (stderr
, "no, default\n");
3418 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3419 (define_expand), 1 if it is not optional (define_insn_and_split),
3420 and 3 for define_split (alternate api). */
3422 m32c_split_move (rtx
* operands
, machine_mode mode
, int split_all
)
3425 int parts
, si
, di
, rev
= 0;
3426 int rv
= 0, opi
= 2;
3427 machine_mode submode
= HImode
;
3428 rtx
*ops
, local_ops
[10];
3430 /* define_split modifies the existing operands, but the other two
3431 emit new insns. OPS is where we store the operand pairs, which
3442 /* Before splitting mem-mem moves, force one operand into a
3444 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3447 fprintf (stderr
, "force_reg...\n");
3448 debug_rtx (operands
[1]);
3450 operands
[1] = force_reg (mode
, operands
[1]);
3452 debug_rtx (operands
[1]);
3459 fprintf (stderr
, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3461 debug_rtx (operands
[0]);
3462 debug_rtx (operands
[1]);
3465 /* Note that split_all is not used to select the api after this
3466 point, so it's safe to set it to 3 even with define_insn. */
3467 /* None of the chips can move SI operands to sp-relative addresses,
3468 so we always split those. */
3469 if (satisfies_constraint_Ss (operands
[0]))
3473 && (far_addr_space_p (operands
[0])
3474 || far_addr_space_p (operands
[1])))
3477 /* We don't need to split these. */
3480 && (mode
== SImode
|| mode
== PSImode
)
3481 && !(GET_CODE (operands
[1]) == MEM
3482 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
))
3485 /* First, enumerate the subregs we'll be dealing with. */
3486 for (si
= 0; si
< parts
; si
++)
3489 m32c_subreg (submode
, operands
[0], mode
,
3490 si
* GET_MODE_SIZE (submode
));
3492 m32c_subreg (submode
, operands
[1], mode
,
3493 si
* GET_MODE_SIZE (submode
));
3496 /* Split pushes by emitting a sequence of smaller pushes. */
3497 if (GET_CODE (d
[0]) == MEM
&& GET_CODE (XEXP (d
[0], 0)) == PRE_DEC
)
3499 for (si
= parts
- 1; si
>= 0; si
--)
3501 ops
[opi
++] = gen_rtx_MEM (submode
,
3502 gen_rtx_PRE_DEC (Pmode
,
3510 /* Likewise for pops. */
3511 else if (GET_CODE (s
[0]) == MEM
&& GET_CODE (XEXP (s
[0], 0)) == POST_INC
)
3513 for (di
= 0; di
< parts
; di
++)
3516 ops
[opi
++] = gen_rtx_MEM (submode
,
3517 gen_rtx_POST_INC (Pmode
,
3525 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3526 for (di
= 0; di
< parts
- 1; di
++)
3527 for (si
= di
+ 1; si
< parts
; si
++)
3528 if (reg_mentioned_p (d
[di
], s
[si
]))
3532 for (si
= 0; si
< parts
; si
++)
3538 for (si
= parts
- 1; si
>= 0; si
--)
3545 /* Now emit any moves we may have accumulated. */
3546 if (rv
&& split_all
!= 3)
3549 for (i
= 2; i
< opi
; i
+= 2)
3550 emit_move_insn (ops
[i
], ops
[i
+ 1]);
3555 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3556 the like. For the R8C they expect one of the addresses to be in
3557 R1L:An so we need to arrange for that. Otherwise, it's just a
3558 matter of picking out the operands we want and emitting the right
3559 pattern for them. All these expanders, which correspond to
3560 patterns in blkmov.md, must return nonzero if they expand the insn,
3561 or zero if they should FAIL. */
3563 /* This is a memset() opcode. All operands are implied, so we need to
3564 arrange for them to be in the right registers. The opcode wants
3565 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3566 the count (HI), and $2 the value (QI). */
3568 m32c_expand_setmemhi(rtx
*operands
)
3570 rtx desta
, count
, val
;
3573 desta
= XEXP (operands
[0], 0);
3574 count
= operands
[1];
3577 desto
= gen_reg_rtx (Pmode
);
3578 counto
= gen_reg_rtx (HImode
);
3580 if (GET_CODE (desta
) != REG
3581 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3582 desta
= copy_to_mode_reg (Pmode
, desta
);
3584 /* This looks like an arbitrary restriction, but this is by far the
3585 most common case. For counts 8..14 this actually results in
3586 smaller code with no speed penalty because the half-sized
3587 constant can be loaded with a shorter opcode. */
3588 if (GET_CODE (count
) == CONST_INT
3589 && GET_CODE (val
) == CONST_INT
3590 && ! (INTVAL (count
) & 1)
3591 && (INTVAL (count
) > 1)
3592 && (INTVAL (val
) <= 7 && INTVAL (val
) >= -8))
3594 unsigned v
= INTVAL (val
) & 0xff;
3596 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3597 val
= copy_to_mode_reg (HImode
, GEN_INT (v
));
3599 emit_insn (gen_setmemhi_whi_op (desto
, counto
, val
, desta
, count
));
3601 emit_insn (gen_setmemhi_wpsi_op (desto
, counto
, val
, desta
, count
));
3605 /* This is the generalized memset() case. */
3606 if (GET_CODE (val
) != REG
3607 || REGNO (val
) < FIRST_PSEUDO_REGISTER
)
3608 val
= copy_to_mode_reg (QImode
, val
);
3610 if (GET_CODE (count
) != REG
3611 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3612 count
= copy_to_mode_reg (HImode
, count
);
3615 emit_insn (gen_setmemhi_bhi_op (desto
, counto
, val
, desta
, count
));
3617 emit_insn (gen_setmemhi_bpsi_op (desto
, counto
, val
, desta
, count
));
3622 /* This is a memcpy() opcode. All operands are implied, so we need to
3623 arrange for them to be in the right registers. The opcode wants
3624 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3625 is the source (MEM:BLK), and $2 the count (HI). */
3627 m32c_expand_movmemhi(rtx
*operands
)
3629 rtx desta
, srca
, count
;
3630 rtx desto
, srco
, counto
;
3632 desta
= XEXP (operands
[0], 0);
3633 srca
= XEXP (operands
[1], 0);
3634 count
= operands
[2];
3636 desto
= gen_reg_rtx (Pmode
);
3637 srco
= gen_reg_rtx (Pmode
);
3638 counto
= gen_reg_rtx (HImode
);
3640 if (GET_CODE (desta
) != REG
3641 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3642 desta
= copy_to_mode_reg (Pmode
, desta
);
3644 if (GET_CODE (srca
) != REG
3645 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3646 srca
= copy_to_mode_reg (Pmode
, srca
);
3648 /* Similar to setmem, but we don't need to check the value. */
3649 if (GET_CODE (count
) == CONST_INT
3650 && ! (INTVAL (count
) & 1)
3651 && (INTVAL (count
) > 1))
3653 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3655 emit_insn (gen_movmemhi_whi_op (desto
, srco
, counto
, desta
, srca
, count
));
3657 emit_insn (gen_movmemhi_wpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3661 /* This is the generalized memset() case. */
3662 if (GET_CODE (count
) != REG
3663 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3664 count
= copy_to_mode_reg (HImode
, count
);
3667 emit_insn (gen_movmemhi_bhi_op (desto
, srco
, counto
, desta
, srca
, count
));
3669 emit_insn (gen_movmemhi_bpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3674 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3675 the copy, which should point to the NUL at the end of the string,
3676 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3677 Since our opcode leaves the destination pointing *after* the NUL,
3678 we must emit an adjustment. */
3680 m32c_expand_movstr(rtx
*operands
)
3685 desta
= XEXP (operands
[1], 0);
3686 srca
= XEXP (operands
[2], 0);
3688 desto
= gen_reg_rtx (Pmode
);
3689 srco
= gen_reg_rtx (Pmode
);
3691 if (GET_CODE (desta
) != REG
3692 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3693 desta
= copy_to_mode_reg (Pmode
, desta
);
3695 if (GET_CODE (srca
) != REG
3696 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3697 srca
= copy_to_mode_reg (Pmode
, srca
);
3699 emit_insn (gen_movstr_op (desto
, srco
, desta
, srca
));
3700 /* desto ends up being a1, which allows this type of add through MOVA. */
3701 emit_insn (gen_addpsi3 (operands
[0], desto
, GEN_INT (-1)));
3706 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3707 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3708 $2 is the other (MEM:BLK). We must do the comparison, and then
3709 convert the flags to a signed integer result. */
3711 m32c_expand_cmpstr(rtx
*operands
)
3715 src1a
= XEXP (operands
[1], 0);
3716 src2a
= XEXP (operands
[2], 0);
3718 if (GET_CODE (src1a
) != REG
3719 || REGNO (src1a
) < FIRST_PSEUDO_REGISTER
)
3720 src1a
= copy_to_mode_reg (Pmode
, src1a
);
3722 if (GET_CODE (src2a
) != REG
3723 || REGNO (src2a
) < FIRST_PSEUDO_REGISTER
)
3724 src2a
= copy_to_mode_reg (Pmode
, src2a
);
3726 emit_insn (gen_cmpstrhi_op (src1a
, src2a
, src1a
, src2a
));
3727 emit_insn (gen_cond_to_int (operands
[0]));
3733 typedef rtx (*shift_gen_func
)(rtx
, rtx
, rtx
);
3735 static shift_gen_func
3736 shift_gen_func_for (int mode
, int code
)
3738 #define GFF(m,c,f) if (mode == m && code == c) return f
3739 GFF(QImode
, ASHIFT
, gen_ashlqi3_i
);
3740 GFF(QImode
, ASHIFTRT
, gen_ashrqi3_i
);
3741 GFF(QImode
, LSHIFTRT
, gen_lshrqi3_i
);
3742 GFF(HImode
, ASHIFT
, gen_ashlhi3_i
);
3743 GFF(HImode
, ASHIFTRT
, gen_ashrhi3_i
);
3744 GFF(HImode
, LSHIFTRT
, gen_lshrhi3_i
);
3745 GFF(PSImode
, ASHIFT
, gen_ashlpsi3_i
);
3746 GFF(PSImode
, ASHIFTRT
, gen_ashrpsi3_i
);
3747 GFF(PSImode
, LSHIFTRT
, gen_lshrpsi3_i
);
3748 GFF(SImode
, ASHIFT
, TARGET_A16
? gen_ashlsi3_16
: gen_ashlsi3_24
);
3749 GFF(SImode
, ASHIFTRT
, TARGET_A16
? gen_ashrsi3_16
: gen_ashrsi3_24
);
3750 GFF(SImode
, LSHIFTRT
, TARGET_A16
? gen_lshrsi3_16
: gen_lshrsi3_24
);
3755 /* The m32c only has one shift, but it takes a signed count. GCC
3756 doesn't want this, so we fake it by negating any shift count when
3757 we're pretending to shift the other way. Also, the shift count is
3758 limited to -8..8. It's slightly better to use two shifts for 9..15
3759 than to load the count into r1h, so we do that too. */
3761 m32c_prepare_shift (rtx
* operands
, int scale
, int shift_code
)
3763 machine_mode mode
= GET_MODE (operands
[0]);
3764 shift_gen_func func
= shift_gen_func_for (mode
, shift_code
);
3767 if (GET_CODE (operands
[2]) == CONST_INT
)
3769 int maxc
= TARGET_A24
&& (mode
== PSImode
|| mode
== SImode
) ? 32 : 8;
3770 int count
= INTVAL (operands
[2]) * scale
;
3772 while (count
> maxc
)
3774 temp
= gen_reg_rtx (mode
);
3775 emit_insn (func (temp
, operands
[1], GEN_INT (maxc
)));
3779 while (count
< -maxc
)
3781 temp
= gen_reg_rtx (mode
);
3782 emit_insn (func (temp
, operands
[1], GEN_INT (-maxc
)));
3786 emit_insn (func (operands
[0], operands
[1], GEN_INT (count
)));
3790 temp
= gen_reg_rtx (QImode
);
3792 /* The pattern has a NEG that corresponds to this. */
3793 emit_move_insn (temp
, gen_rtx_NEG (QImode
, operands
[2]));
3794 else if (TARGET_A16
&& mode
== SImode
)
3795 /* We do this because the code below may modify this, we don't
3796 want to modify the origin of this value. */
3797 emit_move_insn (temp
, operands
[2]);
3799 /* We'll only use it for the shift, no point emitting a move. */
3802 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 4)
3804 /* The m16c has a limit of -16..16 for SI shifts, even when the
3805 shift count is in a register. Since there are so many targets
3806 of these shifts, it's better to expand the RTL here than to
3807 call a helper function.
3809 The resulting code looks something like this:
3821 We take advantage of the fact that "negative" shifts are
3822 undefined to skip one of the comparisons. */
3828 emit_move_insn (operands
[0], operands
[1]);
3831 label
= gen_label_rtx ();
3832 LABEL_NUSES (label
) ++;
3834 tempvar
= gen_reg_rtx (mode
);
3836 if (shift_code
== ASHIFT
)
3838 /* This is a left shift. We only need check positive counts. */
3839 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode
, 0, 0),
3840 count
, GEN_INT (16), label
));
3841 emit_insn (func (tempvar
, operands
[0], GEN_INT (8)));
3842 emit_insn (func (operands
[0], tempvar
, GEN_INT (8)));
3843 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (-16)));
3844 emit_label_after (label
, insn
);
3848 /* This is a right shift. We only need check negative counts. */
3849 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode
, 0, 0),
3850 count
, GEN_INT (-16), label
));
3851 emit_insn (func (tempvar
, operands
[0], GEN_INT (-8)));
3852 emit_insn (func (operands
[0], tempvar
, GEN_INT (-8)));
3853 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (16)));
3854 emit_label_after (label
, insn
);
3856 operands
[1] = operands
[0];
3857 emit_insn (func (operands
[0], operands
[0], count
));
3865 /* The m32c has a limited range of operations that work on PSImode
3866 values; we have to expand to SI, do the math, and truncate back to
3867 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3870 m32c_expand_neg_mulpsi3 (rtx
* operands
)
3872 /* operands: a = b * i */
3873 rtx temp1
; /* b as SI */
3874 rtx scale
/* i as SI */;
3875 rtx temp2
; /* a*b as SI */
3877 temp1
= gen_reg_rtx (SImode
);
3878 temp2
= gen_reg_rtx (SImode
);
3879 if (GET_CODE (operands
[2]) != CONST_INT
)
3881 scale
= gen_reg_rtx (SImode
);
3882 emit_insn (gen_zero_extendpsisi2 (scale
, operands
[2]));
3885 scale
= copy_to_mode_reg (SImode
, operands
[2]);
3887 emit_insn (gen_zero_extendpsisi2 (temp1
, operands
[1]));
3888 temp2
= expand_simple_binop (SImode
, MULT
, temp1
, scale
, temp2
, 1, OPTAB_LIB
);
3889 emit_insn (gen_truncsipsi2 (operands
[0], temp2
));
3892 /* Pattern Output Functions */
3895 m32c_expand_movcc (rtx
*operands
)
3897 rtx rel
= operands
[1];
3899 if (GET_CODE (rel
) != EQ
&& GET_CODE (rel
) != NE
)
3901 if (GET_CODE (operands
[2]) != CONST_INT
3902 || GET_CODE (operands
[3]) != CONST_INT
)
3904 if (GET_CODE (rel
) == NE
)
3906 rtx tmp
= operands
[2];
3907 operands
[2] = operands
[3];
3909 rel
= gen_rtx_EQ (GET_MODE (rel
), XEXP (rel
, 0), XEXP (rel
, 1));
3912 emit_move_insn (operands
[0],
3913 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
3920 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3922 m32c_expand_insv (rtx
*operands
)
3927 if (INTVAL (operands
[1]) != 1)
3930 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3931 if (GET_CODE (operands
[3]) != CONST_INT
)
3933 if (INTVAL (operands
[3]) != 0
3934 && INTVAL (operands
[3]) != 1
3935 && INTVAL (operands
[3]) != -1)
3938 mask
= 1 << INTVAL (operands
[2]);
3941 if (GET_CODE (op0
) == SUBREG
3942 && SUBREG_BYTE (op0
) == 0)
3944 rtx sub
= SUBREG_REG (op0
);
3945 if (GET_MODE (sub
) == HImode
|| GET_MODE (sub
) == QImode
)
3949 if (!can_create_pseudo_p ()
3950 || (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
)))
3954 src0
= gen_reg_rtx (GET_MODE (op0
));
3955 emit_move_insn (src0
, op0
);
3958 if (GET_MODE (op0
) == HImode
3959 && INTVAL (operands
[2]) >= 8
3960 && GET_CODE (op0
) == MEM
)
3962 /* We are little endian. */
3963 rtx new_mem
= gen_rtx_MEM (QImode
, plus_constant (Pmode
,
3965 MEM_COPY_ATTRIBUTES (new_mem
, op0
);
3969 /* First, we generate a mask with the correct polarity. If we are
3970 storing a zero, we want an AND mask, so invert it. */
3971 if (INTVAL (operands
[3]) == 0)
3973 /* Storing a zero, use an AND mask */
3974 if (GET_MODE (op0
) == HImode
)
3979 /* Now we need to properly sign-extend the mask in case we need to
3980 fall back to an AND or OR opcode. */
3981 if (GET_MODE (op0
) == HImode
)
3992 switch ( (INTVAL (operands
[3]) ? 4 : 0)
3993 + ((GET_MODE (op0
) == HImode
) ? 2 : 0)
3994 + (TARGET_A24
? 1 : 0))
3996 case 0: p
= gen_andqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3997 case 1: p
= gen_andqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3998 case 2: p
= gen_andhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3999 case 3: p
= gen_andhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
4000 case 4: p
= gen_iorqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
4001 case 5: p
= gen_iorqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
4002 case 6: p
= gen_iorhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
4003 case 7: p
= gen_iorhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
4004 default: p
= NULL_RTX
; break; /* Not reached, but silences a warning. */
4012 m32c_scc_pattern(rtx
*operands
, RTX_CODE code
)
4014 static char buf
[30];
4015 if (GET_CODE (operands
[0]) == REG
4016 && REGNO (operands
[0]) == R0_REGNO
)
4019 return "stzx\t#1,#0,r0l";
4021 return "stzx\t#0,#1,r0l";
4023 sprintf(buf
, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code
));
4027 /* Encode symbol attributes of a SYMBOL_REF into its
4028 SYMBOL_REF_FLAGS. */
4030 m32c_encode_section_info (tree decl
, rtx rtl
, int first
)
4032 int extra_flags
= 0;
4034 default_encode_section_info (decl
, rtl
, first
);
4035 if (TREE_CODE (decl
) == FUNCTION_DECL
4036 && m32c_special_page_vector_p (decl
))
4038 extra_flags
= SYMBOL_FLAG_FUNCVEC_FUNCTION
;
4041 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= extra_flags
;
4044 /* Returns TRUE if the current function is a leaf, and thus we can
4045 determine which registers an interrupt function really needs to
4046 save. The logic below is mostly about finding the insn sequence
4047 that's the function, versus any sequence that might be open for the
4050 m32c_leaf_function_p (void)
4052 rtx_insn
*saved_first
, *saved_last
;
4053 struct sequence_stack
*seq
;
4056 saved_first
= crtl
->emit
.x_first_insn
;
4057 saved_last
= crtl
->emit
.x_last_insn
;
4058 for (seq
= crtl
->emit
.sequence_stack
; seq
&& seq
->next
; seq
= seq
->next
)
4062 crtl
->emit
.x_first_insn
= seq
->first
;
4063 crtl
->emit
.x_last_insn
= seq
->last
;
4066 rv
= leaf_function_p ();
4068 crtl
->emit
.x_first_insn
= saved_first
;
4069 crtl
->emit
.x_last_insn
= saved_last
;
4073 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4074 opcodes. If the function doesn't need the frame base or stack
4075 pointer, it can use the simpler RTS opcode. */
4077 m32c_function_needs_enter (void)
4080 struct sequence_stack
*seq
;
4081 rtx sp
= gen_rtx_REG (Pmode
, SP_REGNO
);
4082 rtx fb
= gen_rtx_REG (Pmode
, FB_REGNO
);
4084 insn
= get_insns ();
4085 for (seq
= crtl
->emit
.sequence_stack
;
4087 insn
= seq
->first
, seq
= seq
->next
);
4091 if (reg_mentioned_p (sp
, insn
))
4093 if (reg_mentioned_p (fb
, insn
))
4095 insn
= NEXT_INSN (insn
);
4100 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4101 frame-related. Return PAR.
4103 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4104 PARALLEL rtx other than the first if they do not have the
4105 FRAME_RELATED flag set on them. So this function is handy for
4106 marking up 'enter' instructions. */
4108 m32c_all_frame_related (rtx par
)
4110 int len
= XVECLEN (par
, 0);
4113 for (i
= 0; i
< len
; i
++)
4114 F (XVECEXP (par
, 0, i
));
4119 /* Emits the prologue. See the frame layout comment earlier in this
4120 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4121 that we manually update sp. */
4123 m32c_emit_prologue (void)
4125 int frame_size
, extra_frame_size
= 0, reg_save_size
;
4126 int complex_prologue
= 0;
4128 cfun
->machine
->is_leaf
= m32c_leaf_function_p ();
4129 if (interrupt_p (cfun
->decl
))
4131 cfun
->machine
->is_interrupt
= 1;
4132 complex_prologue
= 1;
4134 else if (bank_switch_p (cfun
->decl
))
4135 warning (OPT_Wattributes
,
4136 "%<bank_switch%> has no effect on non-interrupt functions");
4138 reg_save_size
= m32c_pushm_popm (PP_justcount
);
4140 if (interrupt_p (cfun
->decl
))
4142 if (bank_switch_p (cfun
->decl
))
4143 emit_insn (gen_fset_b ());
4144 else if (cfun
->machine
->intr_pushm
)
4145 emit_insn (gen_pushm (GEN_INT (cfun
->machine
->intr_pushm
)));
4149 m32c_initial_elimination_offset (FB_REGNO
, SP_REGNO
) - reg_save_size
;
4151 && !m32c_function_needs_enter ())
4152 cfun
->machine
->use_rts
= 1;
4154 if (frame_size
> 254)
4156 extra_frame_size
= frame_size
- 254;
4159 if (cfun
->machine
->use_rts
== 0)
4160 F (emit_insn (m32c_all_frame_related
4162 ? gen_prologue_enter_16 (GEN_INT (frame_size
+ 2))
4163 : gen_prologue_enter_24 (GEN_INT (frame_size
+ 4)))));
4165 if (extra_frame_size
)
4167 complex_prologue
= 1;
4169 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode
, SP_REGNO
),
4170 gen_rtx_REG (HImode
, SP_REGNO
),
4171 GEN_INT (-extra_frame_size
))));
4173 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode
, SP_REGNO
),
4174 gen_rtx_REG (PSImode
, SP_REGNO
),
4175 GEN_INT (-extra_frame_size
))));
4178 complex_prologue
+= m32c_pushm_popm (PP_pushm
);
4180 /* This just emits a comment into the .s file for debugging. */
4181 if (complex_prologue
)
4182 emit_insn (gen_prologue_end ());
4185 /* Likewise, for the epilogue. The only exception is that, for
4186 interrupts, we must manually unwind the frame as the REIT opcode
4189 m32c_emit_epilogue (void)
4191 int popm_count
= m32c_pushm_popm (PP_justcount
);
4193 /* This just emits a comment into the .s file for debugging. */
4194 if (popm_count
> 0 || cfun
->machine
->is_interrupt
)
4195 emit_insn (gen_epilogue_start ());
4198 m32c_pushm_popm (PP_popm
);
4200 if (cfun
->machine
->is_interrupt
)
4202 machine_mode spmode
= TARGET_A16
? HImode
: PSImode
;
4204 /* REIT clears B flag and restores $fp for us, but we still
4205 have to fix up the stack. USE_RTS just means we didn't
4207 if (!cfun
->machine
->use_rts
)
4209 emit_move_insn (gen_rtx_REG (spmode
, A0_REGNO
),
4210 gen_rtx_REG (spmode
, FP_REGNO
));
4211 emit_move_insn (gen_rtx_REG (spmode
, SP_REGNO
),
4212 gen_rtx_REG (spmode
, A0_REGNO
));
4213 /* We can't just add this to the POPM because it would be in
4214 the wrong order, and wouldn't fix the stack if we're bank
4217 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, FP_REGNO
)));
4219 emit_insn (gen_poppsi (gen_rtx_REG (PSImode
, FP_REGNO
)));
4221 if (!bank_switch_p (cfun
->decl
) && cfun
->machine
->intr_pushm
)
4222 emit_insn (gen_popm (GEN_INT (cfun
->machine
->intr_pushm
)));
4224 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4225 generated only for M32C/M32CM targets (generate the REIT
4226 instruction otherwise). */
4227 if (fast_interrupt_p (cfun
->decl
))
4229 /* Check if fast_attribute is set for M32C or M32CM. */
4232 emit_jump_insn (gen_epilogue_freit ());
4234 /* If fast_interrupt attribute is set for an R8C or M16C
4235 target ignore this attribute and generated REIT
4239 warning (OPT_Wattributes
,
4240 "%<fast_interrupt%> attribute directive ignored");
4241 emit_jump_insn (gen_epilogue_reit_16 ());
4244 else if (TARGET_A16
)
4245 emit_jump_insn (gen_epilogue_reit_16 ());
4247 emit_jump_insn (gen_epilogue_reit_24 ());
4249 else if (cfun
->machine
->use_rts
)
4250 emit_jump_insn (gen_epilogue_rts ());
4251 else if (TARGET_A16
)
4252 emit_jump_insn (gen_epilogue_exitd_16 ());
4254 emit_jump_insn (gen_epilogue_exitd_24 ());
4258 m32c_emit_eh_epilogue (rtx ret_addr
)
4260 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4261 return to. We have to fudge the stack, pop everything, pop SP
4262 (fudged), and return (fudged). This is actually easier to do in
4263 assembler, so punt to libgcc. */
4264 emit_jump_insn (gen_eh_epilogue (ret_addr
, cfun
->machine
->eh_stack_adjust
));
4265 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4268 /* Indicate which flags must be properly set for a given conditional. */
4270 flags_needed_for_conditional (rtx cond
)
4272 switch (GET_CODE (cond
))
4296 /* Returns true if a compare insn is redundant because it would only
4297 set flags that are already set correctly. */
4299 m32c_compare_redundant (rtx_insn
*cmp
, rtx
*operands
)
4314 fprintf(stderr
, "\n\033[32mm32c_compare_redundant\033[0m\n");
4318 fprintf(stderr
, "operands[%d] = ", i
);
4319 debug_rtx(operands
[i
]);
4323 next
= next_nonnote_insn (cmp
);
4324 if (!next
|| !INSN_P (next
))
4327 fprintf(stderr
, "compare not followed by insn\n");
4332 if (GET_CODE (PATTERN (next
)) == SET
4333 && GET_CODE (XEXP ( PATTERN (next
), 1)) == IF_THEN_ELSE
)
4335 next
= XEXP (XEXP (PATTERN (next
), 1), 0);
4337 else if (GET_CODE (PATTERN (next
)) == SET
)
4339 /* If this is a conditional, flags_needed will be something
4340 other than FLAGS_N, which we test below. */
4341 next
= XEXP (PATTERN (next
), 1);
4346 fprintf(stderr
, "compare not followed by conditional\n");
4352 fprintf(stderr
, "conditional is: ");
4356 flags_needed
= flags_needed_for_conditional (next
);
4357 if (flags_needed
== FLAGS_N
)
4360 fprintf(stderr
, "compare not followed by conditional\n");
4366 /* Compare doesn't set overflow and carry the same way that
4367 arithmetic instructions do, so we can't replace those. */
4368 if (flags_needed
& FLAGS_OC
)
4373 prev
= prev_nonnote_insn (prev
);
4377 fprintf(stderr
, "No previous insn.\n");
4384 fprintf(stderr
, "Previous insn is a non-insn.\n");
4388 pp
= PATTERN (prev
);
4389 if (GET_CODE (pp
) != SET
)
4392 fprintf(stderr
, "Previous insn is not a SET.\n");
4396 pflags
= get_attr_flags (prev
);
4398 /* Looking up attributes of previous insns corrupted the recog
4400 INSN_UID (cmp
) = -1;
4401 recog (PATTERN (cmp
), cmp
, 0);
4403 if (pflags
== FLAGS_N
4404 && reg_mentioned_p (op0
, pp
))
4407 fprintf(stderr
, "intermediate non-flags insn uses op:\n");
4413 /* Check for comparisons against memory - between volatiles and
4414 aliases, we just can't risk this one. */
4415 if (GET_CODE (operands
[0]) == MEM
4416 || GET_CODE (operands
[0]) == MEM
)
4419 fprintf(stderr
, "comparisons with memory:\n");
4425 /* Check for PREV changing a register that's used to compute a
4426 value in CMP, even if it doesn't otherwise change flags. */
4427 if (GET_CODE (operands
[0]) == REG
4428 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[0]))
4431 fprintf(stderr
, "sub-value affected, op0:\n");
4436 if (GET_CODE (operands
[1]) == REG
4437 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[1]))
4440 fprintf(stderr
, "sub-value affected, op1:\n");
4446 } while (pflags
== FLAGS_N
);
4448 fprintf(stderr
, "previous flag-setting insn:\n");
4453 if (GET_CODE (pp
) == SET
4454 && GET_CODE (XEXP (pp
, 0)) == REG
4455 && REGNO (XEXP (pp
, 0)) == FLG_REGNO
4456 && GET_CODE (XEXP (pp
, 1)) == COMPARE
)
4458 /* Adjacent cbranches must have the same operands to be
4460 rtx pop0
= XEXP (XEXP (pp
, 1), 0);
4461 rtx pop1
= XEXP (XEXP (pp
, 1), 1);
4463 fprintf(stderr
, "adjacent cbranches\n");
4467 if (rtx_equal_p (op0
, pop0
)
4468 && rtx_equal_p (op1
, pop1
))
4471 fprintf(stderr
, "prev cmp not same\n");
4476 /* Else the previous insn must be a SET, with either the source or
4477 dest equal to operands[0], and operands[1] must be zero. */
4479 if (!rtx_equal_p (op1
, const0_rtx
))
4482 fprintf(stderr
, "operands[1] not const0_rtx\n");
4486 if (GET_CODE (pp
) != SET
)
4489 fprintf (stderr
, "pp not set\n");
4493 if (!rtx_equal_p (op0
, SET_SRC (pp
))
4494 && !rtx_equal_p (op0
, SET_DEST (pp
)))
4497 fprintf(stderr
, "operands[0] not found in set\n");
4503 fprintf(stderr
, "cmp flags %x prev flags %x\n", flags_needed
, pflags
);
4505 if ((pflags
& flags_needed
) == flags_needed
)
4511 /* Return the pattern for a compare. This will be commented out if
4512 the compare is redundant, else a normal pattern is returned. Thus,
4513 the assembler output says where the compare would have been. */
4515 m32c_output_compare (rtx_insn
*insn
, rtx
*operands
)
4517 static char templ
[] = ";cmp.b\t%1,%0";
4520 templ
[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands
[0]))];
4521 if (m32c_compare_redundant (insn
, operands
))
4524 fprintf(stderr
, "cbranch: cmp not needed\n");
4530 fprintf(stderr
, "cbranch: cmp needed: `%s'\n", templ
+ 1);
4535 #undef TARGET_ENCODE_SECTION_INFO
4536 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4538 /* If the frame pointer isn't used, we detect it manually. But the
4539 stack pointer doesn't have as flexible addressing as the frame
4540 pointer, so we always assume we have it. */
4542 #undef TARGET_FRAME_POINTER_REQUIRED
4543 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4545 /* The Global `targetm' Variable. */
4547 struct gcc_target targetm
= TARGET_INITIALIZER
;
4549 #include "gt-m32c.h"