1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
32 #include "insn-attr.h"
36 #include "diagnostic-core.h"
39 #include "stor-layout.h"
48 #include "target-def.h"
50 #include "langhooks.h"
51 #include "pointer-set.h"
52 #include "hash-table.h"
54 #include "basic-block.h"
55 #include "tree-ssa-alias.h"
56 #include "internal-fn.h"
57 #include "gimple-fold.h"
59 #include "gimple-expr.h"
63 #include "tm-constrs.h"
67 /* Used by m32c_pushm_popm. */
75 static bool m32c_function_needs_enter (void);
76 static tree
interrupt_handler (tree
*, tree
, tree
, int, bool *);
77 static tree
function_vector_handler (tree
*, tree
, tree
, int, bool *);
78 static int interrupt_p (tree node
);
79 static int bank_switch_p (tree node
);
80 static int fast_interrupt_p (tree node
);
81 static int interrupt_p (tree node
);
82 static bool m32c_asm_integer (rtx
, unsigned int, int);
83 static int m32c_comp_type_attributes (const_tree
, const_tree
);
84 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
85 static struct machine_function
*m32c_init_machine_status (void);
86 static void m32c_insert_attributes (tree
, tree
*);
87 static bool m32c_legitimate_address_p (enum machine_mode
, rtx
, bool);
88 static bool m32c_addr_space_legitimate_address_p (enum machine_mode
, rtx
, bool, addr_space_t
);
89 static rtx
m32c_function_arg (cumulative_args_t
, enum machine_mode
,
91 static bool m32c_pass_by_reference (cumulative_args_t
, enum machine_mode
,
93 static void m32c_function_arg_advance (cumulative_args_t
, enum machine_mode
,
95 static unsigned int m32c_function_arg_boundary (enum machine_mode
, const_tree
);
96 static int m32c_pushm_popm (Push_Pop_Type
);
97 static bool m32c_strict_argument_naming (cumulative_args_t
);
98 static rtx
m32c_struct_value_rtx (tree
, int);
99 static rtx
m32c_subreg (enum machine_mode
, rtx
, enum machine_mode
, int);
100 static int need_to_save (int);
101 static rtx
m32c_function_value (const_tree
, const_tree
, bool);
102 static rtx
m32c_libcall_value (enum machine_mode
, const_rtx
);
104 /* Returns true if an address is specified, else false. */
105 static bool m32c_get_pragma_address (const char *varname
, unsigned *addr
);
107 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
109 #define streq(a,b) (strcmp ((a), (b)) == 0)
111 /* Internal support routines */
113 /* Debugging statements are tagged with DEBUG0 only so that they can
114 be easily enabled individually, by replacing the '0' with '1' as
120 /* This is needed by some of the commented-out debug statements
122 static char const *class_names
[LIM_REG_CLASSES
] = REG_CLASS_NAMES
;
124 static int class_contents
[LIM_REG_CLASSES
][1] = REG_CLASS_CONTENTS
;
126 /* These are all to support encode_pattern(). */
127 static char pattern
[30], *patternp
;
128 static GTY(()) rtx patternr
[30];
129 #define RTX_IS(x) (streq (pattern, x))
131 /* Some macros to simplify the logic throughout this file. */
132 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
133 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
135 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
136 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
139 far_addr_space_p (rtx x
)
141 if (GET_CODE (x
) != MEM
)
144 fprintf(stderr
, "\033[35mfar_addr_space: "); debug_rtx(x
);
145 fprintf(stderr
, " = %d\033[0m\n", MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
);
147 return MEM_ADDR_SPACE (x
) == ADDR_SPACE_FAR
;
150 /* We do most RTX matching by converting the RTX into a string, and
151 using string compares. This vastly simplifies the logic in many of
152 the functions in this file.
154 On exit, pattern[] has the encoded string (use RTX_IS("...") to
155 compare it) and patternr[] has pointers to the nodes in the RTX
156 corresponding to each character in the encoded string. The latter
157 is mostly used by print_operand().
159 Unrecognized patterns have '?' in them; this shows up when the
160 assembler complains about syntax errors.
164 encode_pattern_1 (rtx x
)
168 if (patternp
== pattern
+ sizeof (pattern
) - 2)
174 patternr
[patternp
- pattern
] = x
;
176 switch (GET_CODE (x
))
182 if (GET_MODE_SIZE (GET_MODE (x
)) !=
183 GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))))
185 encode_pattern_1 (XEXP (x
, 0));
190 encode_pattern_1 (XEXP (x
, 0));
195 encode_pattern_1 (XEXP (x
, 0));
200 encode_pattern_1 (XEXP (x
, 0));
204 encode_pattern_1 (XEXP (x
, 0));
205 encode_pattern_1 (XEXP (x
, 1));
209 encode_pattern_1 (XEXP (x
, 0));
213 encode_pattern_1 (XEXP (x
, 0));
217 encode_pattern_1 (XEXP (x
, 0));
218 encode_pattern_1 (XEXP (x
, 1));
222 encode_pattern_1 (XEXP (x
, 0));
239 *patternp
++ = '0' + XCINT (x
, 1, UNSPEC
);
240 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
241 encode_pattern_1 (XVECEXP (x
, 0, i
));
248 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
249 encode_pattern_1 (XVECEXP (x
, 0, i
));
253 encode_pattern_1 (XEXP (x
, 0));
255 encode_pattern_1 (XEXP (x
, 1));
260 fprintf (stderr
, "can't encode pattern %s\n",
261 GET_RTX_NAME (GET_CODE (x
)));
270 encode_pattern (rtx x
)
273 encode_pattern_1 (x
);
277 /* Since register names indicate the mode they're used in, we need a
278 way to determine which name to refer to the register with. Called
279 by print_operand(). */
282 reg_name_with_mode (int regno
, enum machine_mode mode
)
284 int mlen
= GET_MODE_SIZE (mode
);
285 if (regno
== R0_REGNO
&& mlen
== 1)
287 if (regno
== R0_REGNO
&& (mlen
== 3 || mlen
== 4))
289 if (regno
== R0_REGNO
&& mlen
== 6)
291 if (regno
== R0_REGNO
&& mlen
== 8)
293 if (regno
== R1_REGNO
&& mlen
== 1)
295 if (regno
== R1_REGNO
&& (mlen
== 3 || mlen
== 4))
297 if (regno
== A0_REGNO
&& TARGET_A16
&& (mlen
== 3 || mlen
== 4))
299 return reg_names
[regno
];
302 /* How many bytes a register uses on stack when it's pushed. We need
303 to know this because the push opcode needs to explicitly indicate
304 the size of the register, even though the name of the register
305 already tells it that. Used by m32c_output_reg_{push,pop}, which
306 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
309 reg_push_size (int regno
)
334 /* Given two register classes, find the largest intersection between
335 them. If there is no intersection, return RETURNED_IF_EMPTY
338 reduce_class (reg_class_t original_class
, reg_class_t limiting_class
,
339 reg_class_t returned_if_empty
)
343 reg_class_t best
= NO_REGS
;
344 unsigned int best_size
= 0;
346 if (original_class
== limiting_class
)
347 return original_class
;
349 cc
= reg_class_contents
[original_class
];
350 AND_HARD_REG_SET (cc
, reg_class_contents
[limiting_class
]);
352 for (i
= 0; i
< LIM_REG_CLASSES
; i
++)
354 if (hard_reg_set_subset_p (reg_class_contents
[i
], cc
))
355 if (best_size
< reg_class_size
[i
])
357 best
= (reg_class_t
) i
;
358 best_size
= reg_class_size
[i
];
363 return returned_if_empty
;
367 /* Used by m32c_register_move_cost to determine if a move is
368 impossibly expensive. */
370 class_can_hold_mode (reg_class_t rclass
, enum machine_mode mode
)
372 /* Cache the results: 0=untested 1=no 2=yes */
373 static char results
[LIM_REG_CLASSES
][MAX_MACHINE_MODE
];
375 if (results
[(int) rclass
][mode
] == 0)
378 results
[rclass
][mode
] = 1;
379 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; r
++)
380 if (in_hard_reg_set_p (reg_class_contents
[(int) rclass
], mode
, r
)
381 && HARD_REGNO_MODE_OK (r
, mode
))
383 results
[rclass
][mode
] = 2;
389 fprintf (stderr
, "class %s can hold %s? %s\n",
390 class_names
[(int) rclass
], mode_name
[mode
],
391 (results
[rclass
][mode
] == 2) ? "yes" : "no");
393 return results
[(int) rclass
][mode
] == 2;
396 /* Run-time Target Specification. */
398 /* Memregs are memory locations that gcc treats like general
399 registers, as there are a limited number of true registers and the
400 m32c families can use memory in most places that registers can be
403 However, since memory accesses are more expensive than registers,
404 we allow the user to limit the number of memregs available, in
405 order to try to persuade gcc to try harder to use real registers.
407 Memregs are provided by lib1funcs.S.
410 int ok_to_change_target_memregs
= TRUE
;
412 /* Implements TARGET_OPTION_OVERRIDE. */
414 #undef TARGET_OPTION_OVERRIDE
415 #define TARGET_OPTION_OVERRIDE m32c_option_override
418 m32c_option_override (void)
420 /* We limit memregs to 0..16, and provide a default. */
421 if (global_options_set
.x_target_memregs
)
423 if (target_memregs
< 0 || target_memregs
> 16)
424 error ("invalid target memregs value '%d'", target_memregs
);
432 /* This target defaults to strict volatile bitfields. */
433 if (flag_strict_volatile_bitfields
< 0 && abi_version_at_least(2))
434 flag_strict_volatile_bitfields
= 1;
436 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
437 This is always worse than an absolute call. */
439 flag_no_function_cse
= 1;
441 /* This wants to put insns between compares and their jumps. */
442 /* FIXME: The right solution is to properly trace the flags register
443 values, but that is too much work for stage 4. */
444 flag_combine_stack_adjustments
= 0;
447 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
448 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
451 m32c_override_options_after_change (void)
454 flag_no_function_cse
= 1;
457 /* Defining data structures for per-function information */
459 /* The usual; we set up our machine_function data. */
460 static struct machine_function
*
461 m32c_init_machine_status (void)
463 return ggc_alloc_cleared_machine_function ();
466 /* Implements INIT_EXPANDERS. We just set up to call the above
469 m32c_init_expanders (void)
471 init_machine_status
= m32c_init_machine_status
;
476 /* Register Basics */
478 /* Basic Characteristics of Registers */
480 /* Whether a mode fits in a register is complex enough to warrant a
489 } nregs_table
[FIRST_PSEUDO_REGISTER
] =
491 { 1, 1, 2, 2, 4 }, /* r0 */
492 { 0, 1, 0, 0, 0 }, /* r2 */
493 { 1, 1, 2, 2, 0 }, /* r1 */
494 { 0, 1, 0, 0, 0 }, /* r3 */
495 { 0, 1, 1, 0, 0 }, /* a0 */
496 { 0, 1, 1, 0, 0 }, /* a1 */
497 { 0, 1, 1, 0, 0 }, /* sb */
498 { 0, 1, 1, 0, 0 }, /* fb */
499 { 0, 1, 1, 0, 0 }, /* sp */
500 { 1, 1, 1, 0, 0 }, /* pc */
501 { 0, 0, 0, 0, 0 }, /* fl */
502 { 1, 1, 1, 0, 0 }, /* ap */
503 { 1, 1, 2, 2, 4 }, /* mem0 */
504 { 1, 1, 2, 2, 4 }, /* mem1 */
505 { 1, 1, 2, 2, 4 }, /* mem2 */
506 { 1, 1, 2, 2, 4 }, /* mem3 */
507 { 1, 1, 2, 2, 4 }, /* mem4 */
508 { 1, 1, 2, 2, 0 }, /* mem5 */
509 { 1, 1, 2, 2, 0 }, /* mem6 */
510 { 1, 1, 0, 0, 0 }, /* mem7 */
513 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
514 of available memregs, and select which registers need to be preserved
515 across calls based on the chip family. */
517 #undef TARGET_CONDITIONAL_REGISTER_USAGE
518 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
520 m32c_conditional_register_usage (void)
524 if (0 <= target_memregs
&& target_memregs
<= 16)
526 /* The command line option is bytes, but our "registers" are
528 for (i
= (target_memregs
+1)/2; i
< 8; i
++)
530 fixed_regs
[MEM0_REGNO
+ i
] = 1;
531 CLEAR_HARD_REG_BIT (reg_class_contents
[MEM_REGS
], MEM0_REGNO
+ i
);
535 /* M32CM and M32C preserve more registers across function calls. */
538 call_used_regs
[R1_REGNO
] = 0;
539 call_used_regs
[R2_REGNO
] = 0;
540 call_used_regs
[R3_REGNO
] = 0;
541 call_used_regs
[A0_REGNO
] = 0;
542 call_used_regs
[A1_REGNO
] = 0;
546 /* How Values Fit in Registers */
548 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
549 different registers are different sizes from each other, *and* may
550 be different sizes in different chip families. */
552 m32c_hard_regno_nregs_1 (int regno
, enum machine_mode mode
)
554 if (regno
== FLG_REGNO
&& mode
== CCmode
)
556 if (regno
>= FIRST_PSEUDO_REGISTER
)
557 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
559 if (regno
>= MEM0_REGNO
&& regno
<= MEM7_REGNO
)
560 return (GET_MODE_SIZE (mode
) + 1) / 2;
562 if (GET_MODE_SIZE (mode
) <= 1)
563 return nregs_table
[regno
].qi_regs
;
564 if (GET_MODE_SIZE (mode
) <= 2)
565 return nregs_table
[regno
].hi_regs
;
566 if (regno
== A0_REGNO
&& mode
== SImode
&& TARGET_A16
)
568 if ((GET_MODE_SIZE (mode
) <= 3 || mode
== PSImode
) && TARGET_A24
)
569 return nregs_table
[regno
].pi_regs
;
570 if (GET_MODE_SIZE (mode
) <= 4)
571 return nregs_table
[regno
].si_regs
;
572 if (GET_MODE_SIZE (mode
) <= 8)
573 return nregs_table
[regno
].di_regs
;
578 m32c_hard_regno_nregs (int regno
, enum machine_mode mode
)
580 int rv
= m32c_hard_regno_nregs_1 (regno
, mode
);
584 /* Implements HARD_REGNO_MODE_OK. The above function does the work
585 already; just test its return value. */
587 m32c_hard_regno_ok (int regno
, enum machine_mode mode
)
589 return m32c_hard_regno_nregs_1 (regno
, mode
) != 0;
592 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
593 registers are all different sizes. However, since most modes are
594 bigger than our registers anyway, it's easier to implement this
595 function that way, leaving QImode as the only unique case. */
597 m32c_modes_tieable_p (enum machine_mode m1
, enum machine_mode m2
)
599 if (GET_MODE_SIZE (m1
) == GET_MODE_SIZE (m2
))
603 if (m1
== QImode
|| m2
== QImode
)
610 /* Register Classes */
612 /* Implements REGNO_REG_CLASS. */
614 m32c_regno_reg_class (int regno
)
639 if (IS_MEM_REGNO (regno
))
645 /* Implements REGNO_OK_FOR_BASE_P. */
647 m32c_regno_ok_for_base_p (int regno
)
649 if (regno
== A0_REGNO
650 || regno
== A1_REGNO
|| regno
>= FIRST_PSEUDO_REGISTER
)
655 #define DEBUG_RELOAD 0
657 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
658 registers of the appropriate size. */
660 #undef TARGET_PREFERRED_RELOAD_CLASS
661 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
664 m32c_preferred_reload_class (rtx x
, reg_class_t rclass
)
666 reg_class_t newclass
= rclass
;
669 fprintf (stderr
, "\npreferred_reload_class for %s is ",
670 class_names
[rclass
]);
672 if (rclass
== NO_REGS
)
673 rclass
= GET_MODE (x
) == QImode
? HL_REGS
: R03_REGS
;
675 if (reg_classes_intersect_p (rclass
, CR_REGS
))
677 switch (GET_MODE (x
))
683 /* newclass = HI_REGS; */
688 else if (newclass
== QI_REGS
&& GET_MODE_SIZE (GET_MODE (x
)) > 2)
690 else if (GET_MODE_SIZE (GET_MODE (x
)) > 4
691 && ! reg_class_subset_p (R03_REGS
, rclass
))
694 rclass
= reduce_class (rclass
, newclass
, rclass
);
696 if (GET_MODE (x
) == QImode
)
697 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
700 fprintf (stderr
, "%s\n", class_names
[rclass
]);
703 if (GET_CODE (x
) == MEM
704 && GET_CODE (XEXP (x
, 0)) == PLUS
705 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
706 fprintf (stderr
, "Glorm!\n");
711 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
713 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
714 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
717 m32c_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
719 return m32c_preferred_reload_class (x
, rclass
);
722 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
723 address registers for reloads since they're needed for address
726 m32c_limit_reload_class (enum machine_mode mode
, int rclass
)
729 fprintf (stderr
, "limit_reload_class for %s: %s ->",
730 mode_name
[mode
], class_names
[rclass
]);
734 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
735 else if (mode
== HImode
)
736 rclass
= reduce_class (rclass
, HI_REGS
, rclass
);
737 else if (mode
== SImode
)
738 rclass
= reduce_class (rclass
, SI_REGS
, rclass
);
740 if (rclass
!= A_REGS
)
741 rclass
= reduce_class (rclass
, DI_REGS
, rclass
);
744 fprintf (stderr
, " %s\n", class_names
[rclass
]);
749 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
750 r0 or r1, as those are the only real QImode registers. CR regs get
751 reloaded through appropriately sized general or address
754 m32c_secondary_reload_class (int rclass
, enum machine_mode mode
, rtx x
)
756 int cc
= class_contents
[rclass
][0];
758 fprintf (stderr
, "\nsecondary reload class %s %s\n",
759 class_names
[rclass
], mode_name
[mode
]);
763 && GET_CODE (x
) == MEM
&& (cc
& ~class_contents
[R23_REGS
][0]) == 0)
765 if (reg_classes_intersect_p (rclass
, CR_REGS
)
766 && GET_CODE (x
) == REG
767 && REGNO (x
) >= SB_REGNO
&& REGNO (x
) <= SP_REGNO
)
768 return (TARGET_A16
|| mode
== HImode
) ? HI_REGS
: A_REGS
;
772 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
775 #undef TARGET_CLASS_LIKELY_SPILLED_P
776 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
779 m32c_class_likely_spilled_p (reg_class_t regclass
)
781 if (regclass
== A_REGS
)
784 return (reg_class_size
[(int) regclass
] == 1);
787 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
788 documented meaning, to avoid potential inconsistencies with actual
789 class definitions. */
791 #undef TARGET_CLASS_MAX_NREGS
792 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
795 m32c_class_max_nregs (reg_class_t regclass
, enum machine_mode mode
)
798 unsigned char max
= 0;
800 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
801 if (TEST_HARD_REG_BIT (reg_class_contents
[(int) regclass
], rn
))
803 unsigned char n
= m32c_hard_regno_nregs (rn
, mode
);
810 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
811 QI (r0l, r1l) because the chip doesn't support QI ops on other
812 registers (well, it does on a0/a1 but if we let gcc do that, reload
813 suffers). Otherwise, we allow changes to larger modes. */
815 m32c_cannot_change_mode_class (enum machine_mode from
,
816 enum machine_mode to
, int rclass
)
820 fprintf (stderr
, "cannot change from %s to %s in %s\n",
821 mode_name
[from
], mode_name
[to
], class_names
[rclass
]);
824 /* If the larger mode isn't allowed in any of these registers, we
825 can't allow the change. */
826 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
827 if (class_contents
[rclass
][0] & (1 << rn
))
828 if (! m32c_hard_regno_ok (rn
, to
))
832 return (class_contents
[rclass
][0] & 0x1ffa);
834 if (class_contents
[rclass
][0] & 0x0005 /* r0, r1 */
835 && GET_MODE_SIZE (from
) > 1)
837 if (GET_MODE_SIZE (from
) > 2) /* all other regs */
843 /* Helpers for the rest of the file. */
844 /* TRUE if the rtx is a REG rtx for the given register. */
845 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
846 && REGNO (rtx) == regno)
847 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
848 base register in address calculations (hence the "strict"
850 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
851 && (REGNO (rtx) == AP_REGNO \
852 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
854 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
856 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
857 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
858 call return values. */
860 m32c_matches_constraint_p (rtx value
, int constraint
)
862 encode_pattern (value
);
864 switch (constraint
) {
866 return (far_addr_space_p (value
)
868 && A0_OR_PSEUDO (patternr
[1])
869 && GET_MODE (patternr
[1]) == SImode
)
870 || (RTX_IS ("m+^Sri")
871 && A0_OR_PSEUDO (patternr
[4])
872 && GET_MODE (patternr
[4]) == HImode
)
873 || (RTX_IS ("m+^Srs")
874 && A0_OR_PSEUDO (patternr
[4])
875 && GET_MODE (patternr
[4]) == HImode
)
876 || (RTX_IS ("m+^S+ris")
877 && A0_OR_PSEUDO (patternr
[5])
878 && GET_MODE (patternr
[5]) == HImode
)
882 /* This is the common "src/dest" address */
884 if (GET_CODE (value
) == MEM
&& CONSTANT_P (XEXP (value
, 0)))
886 if (RTX_IS ("ms") || RTX_IS ("m+si"))
888 if (RTX_IS ("m++rii"))
890 if (REGNO (patternr
[3]) == FB_REGNO
891 && INTVAL (patternr
[4]) == 0)
896 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
900 if (REGNO (r
) == SP_REGNO
)
902 return m32c_legitimate_address_p (GET_MODE (value
), XEXP (value
, 0), 1);
909 else if (RTX_IS ("m+ri"))
913 return (IS_REG (r
, A0_REGNO
) || IS_REG (r
, A1_REGNO
));
916 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
918 return ((RTX_IS ("mr")
919 && (IS_REG (patternr
[1], SP_REGNO
)))
920 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SP_REGNO
))));
922 return ((RTX_IS ("mr")
923 && (IS_REG (patternr
[1], FB_REGNO
)))
924 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], FB_REGNO
))));
926 return ((RTX_IS ("mr")
927 && (IS_REG (patternr
[1], SB_REGNO
)))
928 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SB_REGNO
))));
930 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
931 return (RTX_IS ("mi")
932 && !(INTVAL (patternr
[1]) & ~0x1fff));
934 return r1h_operand (value
, QImode
);
936 return GET_CODE (value
) == PARALLEL
;
942 /* STACK AND CALLING */
946 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
947 (yes, THREE bytes) onto the stack for the return address, but we
948 don't support pointers bigger than 16 bits on those chips. This
949 will likely wreak havoc with exception unwinding. FIXME. */
951 m32c_return_addr_rtx (int count
)
953 enum machine_mode mode
;
963 /* It's four bytes */
969 /* FIXME: it's really 3 bytes */
975 gen_rtx_MEM (mode
, plus_constant (Pmode
, gen_rtx_REG (Pmode
, FP_REGNO
),
977 return copy_to_mode_reg (mode
, ra_mem
);
980 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
982 m32c_incoming_return_addr_rtx (void)
985 return gen_rtx_MEM (PSImode
, gen_rtx_REG (PSImode
, SP_REGNO
));
988 /* Exception Handling Support */
990 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
993 m32c_eh_return_data_regno (int n
)
1005 return INVALID_REGNUM
;
1009 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1010 m32c_emit_eh_epilogue. */
1012 m32c_eh_return_stackadj_rtx (void)
1014 if (!cfun
->machine
->eh_stack_adjust
)
1018 sa
= gen_rtx_REG (Pmode
, R0_REGNO
);
1019 cfun
->machine
->eh_stack_adjust
= sa
;
1021 return cfun
->machine
->eh_stack_adjust
;
1024 /* Registers That Address the Stack Frame */
1026 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1027 the original spec called for dwarf numbers to vary with register
1028 width as well, for example, r0l, r0, and r2r0 would each have
1029 different dwarf numbers. GCC doesn't support this, and we don't do
1030 it, and gdb seems to like it this way anyway. */
1032 m32c_dwarf_frame_regnum (int n
)
1058 return DWARF_FRAME_REGISTERS
+ 1;
1062 /* The frame looks like this:
1064 ap -> +------------------------------
1065 | Return address (3 or 4 bytes)
1066 | Saved FB (2 or 4 bytes)
1067 fb -> +------------------------------
1070 | through r0 as needed
1071 sp -> +------------------------------
1074 /* We use this to wrap all emitted insns in the prologue. */
1078 RTX_FRAME_RELATED_P (x
) = 1;
1082 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1083 how much the stack pointer moves for each, for each cpu family. */
1092 /* These are in reverse push (nearest-to-sp) order. */
1093 { R0_REGNO
, 0x80, 2, 2 },
1094 { R1_REGNO
, 0x40, 2, 2 },
1095 { R2_REGNO
, 0x20, 2, 2 },
1096 { R3_REGNO
, 0x10, 2, 2 },
1097 { A0_REGNO
, 0x08, 2, 4 },
1098 { A1_REGNO
, 0x04, 2, 4 },
1099 { SB_REGNO
, 0x02, 2, 4 },
1100 { FB_REGNO
, 0x01, 2, 4 }
1103 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1105 /* Returns TRUE if we need to save/restore the given register. We
1106 save everything for exception handlers, so that any register can be
1107 unwound. For interrupt handlers, we save everything if the handler
1108 calls something else (because we don't know what *that* function
1109 might do), but try to be a bit smarter if the handler is a leaf
1110 function. We always save $a0, though, because we use that in the
1111 epilogue to copy $fb to $sp. */
1113 need_to_save (int regno
)
1115 if (fixed_regs
[regno
])
1117 if (crtl
->calls_eh_return
)
1119 if (regno
== FP_REGNO
)
1121 if (cfun
->machine
->is_interrupt
1122 && (!cfun
->machine
->is_leaf
1123 || (regno
== A0_REGNO
1124 && m32c_function_needs_enter ())
1127 if (df_regs_ever_live_p (regno
)
1128 && (!call_used_regs
[regno
] || cfun
->machine
->is_interrupt
))
1133 /* This function contains all the intelligence about saving and
1134 restoring registers. It always figures out the register save set.
1135 When called with PP_justcount, it merely returns the size of the
1136 save set (for eliminating the frame pointer, for example). When
1137 called with PP_pushm or PP_popm, it emits the appropriate
1138 instructions for saving (pushm) or restoring (popm) the
1141 m32c_pushm_popm (Push_Pop_Type ppt
)
1144 int byte_count
= 0, bytes
;
1146 rtx dwarf_set
[PUSHM_N
];
1148 int nosave_mask
= 0;
1150 if (crtl
->return_rtx
1151 && GET_CODE (crtl
->return_rtx
) == PARALLEL
1152 && !(crtl
->calls_eh_return
|| cfun
->machine
->is_interrupt
))
1154 rtx exp
= XVECEXP (crtl
->return_rtx
, 0, 0);
1155 rtx rv
= XEXP (exp
, 0);
1156 int rv_bytes
= GET_MODE_SIZE (GET_MODE (rv
));
1159 nosave_mask
|= 0x20; /* PSI, SI */
1161 nosave_mask
|= 0xf0; /* DF */
1163 nosave_mask
|= 0x50; /* DI */
1166 for (i
= 0; i
< (int) PUSHM_N
; i
++)
1168 /* Skip if neither register needs saving. */
1169 if (!need_to_save (pushm_info
[i
].reg1
))
1172 if (pushm_info
[i
].bit
& nosave_mask
)
1175 reg_mask
|= pushm_info
[i
].bit
;
1176 bytes
= TARGET_A16
? pushm_info
[i
].a16_bytes
: pushm_info
[i
].a24_bytes
;
1178 if (ppt
== PP_pushm
)
1180 enum machine_mode mode
= (bytes
== 2) ? HImode
: SImode
;
1183 /* Always use stack_pointer_rtx instead of calling
1184 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1185 that there is a single rtx representing the stack pointer,
1186 namely stack_pointer_rtx, and uses == to recognize it. */
1187 addr
= stack_pointer_rtx
;
1189 if (byte_count
!= 0)
1190 addr
= gen_rtx_PLUS (GET_MODE (addr
), addr
, GEN_INT (byte_count
));
1192 dwarf_set
[n_dwarfs
++] =
1193 gen_rtx_SET (VOIDmode
,
1194 gen_rtx_MEM (mode
, addr
),
1195 gen_rtx_REG (mode
, pushm_info
[i
].reg1
));
1196 F (dwarf_set
[n_dwarfs
- 1]);
1199 byte_count
+= bytes
;
1202 if (cfun
->machine
->is_interrupt
)
1204 cfun
->machine
->intr_pushm
= reg_mask
& 0xfe;
1209 if (cfun
->machine
->is_interrupt
)
1210 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1211 if (need_to_save (i
))
1214 cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
] = 1;
1217 if (ppt
== PP_pushm
&& byte_count
)
1219 rtx note
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (n_dwarfs
+ 1));
1224 XVECEXP (note
, 0, 0)
1225 = gen_rtx_SET (VOIDmode
,
1227 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx
),
1229 GEN_INT (-byte_count
)));
1230 F (XVECEXP (note
, 0, 0));
1232 for (i
= 0; i
< n_dwarfs
; i
++)
1233 XVECEXP (note
, 0, i
+ 1) = dwarf_set
[i
];
1235 pushm
= F (emit_insn (gen_pushm (GEN_INT (reg_mask
))));
1237 add_reg_note (pushm
, REG_FRAME_RELATED_EXPR
, note
);
1240 if (cfun
->machine
->is_interrupt
)
1241 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1242 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1245 pushm
= emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode
, i
)));
1247 pushm
= emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode
, i
)));
1251 if (ppt
== PP_popm
&& byte_count
)
1253 if (cfun
->machine
->is_interrupt
)
1254 for (i
= MEM7_REGNO
; i
>= MEM0_REGNO
; i
--)
1255 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1258 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, i
)));
1260 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode
, i
)));
1263 emit_insn (gen_popm (GEN_INT (reg_mask
)));
1269 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1270 diagrams our call frame. */
1272 m32c_initial_elimination_offset (int from
, int to
)
1276 if (from
== AP_REGNO
)
1286 ofs
+= m32c_pushm_popm (PP_justcount
);
1287 ofs
+= get_frame_size ();
1290 /* Account for push rounding. */
1292 ofs
= (ofs
+ 1) & ~1;
1294 fprintf (stderr
, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from
,
1300 /* Passing Function Arguments on the Stack */
1302 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1303 M32C has word stacks. */
1305 m32c_push_rounding (int n
)
1307 if (TARGET_R8C
|| TARGET_M16C
)
1309 return (n
+ 1) & ~1;
1312 /* Passing Arguments in Registers */
1314 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1315 registers, partly on stack. If our function returns a struct, a
1316 pointer to a buffer for it is at the top of the stack (last thing
1317 pushed). The first few real arguments may be in registers as
1320 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1321 arg2 in r2 if it's HI (else pushed on stack)
1323 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1326 Structs are not passed in registers, even if they fit. Only
1327 integer and pointer types are passed in registers.
1329 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1331 #undef TARGET_FUNCTION_ARG
1332 #define TARGET_FUNCTION_ARG m32c_function_arg
1334 m32c_function_arg (cumulative_args_t ca_v
,
1335 enum machine_mode mode
, const_tree type
, bool named
)
1337 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1339 /* Can return a reg, parallel, or 0 for stack */
1342 fprintf (stderr
, "func_arg %d (%s, %d)\n",
1343 ca
->parm_num
, mode_name
[mode
], named
);
1347 if (mode
== VOIDmode
)
1350 if (ca
->force_mem
|| !named
)
1353 fprintf (stderr
, "func arg: force %d named %d, mem\n", ca
->force_mem
,
1359 if (type
&& INTEGRAL_TYPE_P (type
) && POINTER_TYPE_P (type
))
1362 if (type
&& AGGREGATE_TYPE_P (type
))
1365 switch (ca
->parm_num
)
1368 if (GET_MODE_SIZE (mode
) == 1 || GET_MODE_SIZE (mode
) == 2)
1369 rv
= gen_rtx_REG (mode
, TARGET_A16
? R1_REGNO
: R0_REGNO
);
1373 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 2)
1374 rv
= gen_rtx_REG (mode
, R2_REGNO
);
1384 #undef TARGET_PASS_BY_REFERENCE
1385 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1387 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED
,
1388 enum machine_mode mode ATTRIBUTE_UNUSED
,
1389 const_tree type ATTRIBUTE_UNUSED
,
1390 bool named ATTRIBUTE_UNUSED
)
1395 /* Implements INIT_CUMULATIVE_ARGS. */
1397 m32c_init_cumulative_args (CUMULATIVE_ARGS
* ca
,
1399 rtx libname ATTRIBUTE_UNUSED
,
1401 int n_named_args ATTRIBUTE_UNUSED
)
1403 if (fntype
&& aggregate_value_p (TREE_TYPE (fntype
), fndecl
))
1410 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1411 functions returning structures, so we always reset that. Otherwise,
1412 we only need to know the sequence number of the argument to know what
1414 #undef TARGET_FUNCTION_ARG_ADVANCE
1415 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1417 m32c_function_arg_advance (cumulative_args_t ca_v
,
1418 enum machine_mode mode ATTRIBUTE_UNUSED
,
1419 const_tree type ATTRIBUTE_UNUSED
,
1420 bool named ATTRIBUTE_UNUSED
)
1422 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
1430 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1431 #undef TARGET_FUNCTION_ARG_BOUNDARY
1432 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1434 m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED
,
1435 const_tree type ATTRIBUTE_UNUSED
)
1437 return (TARGET_A16
? 8 : 16);
1440 /* Implements FUNCTION_ARG_REGNO_P. */
1442 m32c_function_arg_regno_p (int r
)
1445 return (r
== R0_REGNO
);
1446 return (r
== R1_REGNO
|| r
== R2_REGNO
);
1449 /* HImode and PSImode are the two "native" modes as far as GCC is
1450 concerned, but the chips also support a 32-bit mode which is used
1451 for some opcodes in R8C/M16C and for reset vectors and such. */
1452 #undef TARGET_VALID_POINTER_MODE
1453 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1455 m32c_valid_pointer_mode (enum machine_mode mode
)
1465 /* How Scalar Function Values Are Returned */
1467 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1468 combination of registers starting there (r2r0 for longs, r3r1r2r0
1469 for long long, r3r2r1r0 for doubles), except that that ABI
1470 currently doesn't work because it ends up using all available
1471 general registers and gcc often can't compile it. So, instead, we
1472 return anything bigger than 16 bits in "mem0" (effectively, a
1473 memory location). */
1475 #undef TARGET_LIBCALL_VALUE
1476 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1479 m32c_libcall_value (enum machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
1481 /* return reg or parallel */
1483 /* FIXME: GCC has difficulty returning large values in registers,
1484 because that ties up most of the general registers and gives the
1485 register allocator little to work with. Until we can resolve
1486 this, large values are returned in memory. */
1491 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (4));
1492 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1493 gen_rtx_REG (HImode
,
1496 XVECEXP (rv
, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode
,
1497 gen_rtx_REG (HImode
,
1500 XVECEXP (rv
, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode
,
1501 gen_rtx_REG (HImode
,
1504 XVECEXP (rv
, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode
,
1505 gen_rtx_REG (HImode
,
1511 if (TARGET_A24
&& GET_MODE_SIZE (mode
) > 2)
1515 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (1));
1516 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1524 if (GET_MODE_SIZE (mode
) > 2)
1525 return gen_rtx_REG (mode
, MEM0_REGNO
);
1526 return gen_rtx_REG (mode
, R0_REGNO
);
1529 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1532 #undef TARGET_FUNCTION_VALUE
1533 #define TARGET_FUNCTION_VALUE m32c_function_value
1536 m32c_function_value (const_tree valtype
,
1537 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1538 bool outgoing ATTRIBUTE_UNUSED
)
1540 /* return reg or parallel */
1541 const enum machine_mode mode
= TYPE_MODE (valtype
);
1542 return m32c_libcall_value (mode
, NULL_RTX
);
1545 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1547 #undef TARGET_FUNCTION_VALUE_REGNO_P
1548 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1551 m32c_function_value_regno_p (const unsigned int regno
)
1553 return (regno
== R0_REGNO
|| regno
== MEM0_REGNO
);
1556 /* How Large Values Are Returned */
1558 /* We return structures by pushing the address on the stack, even if
1559 we use registers for the first few "real" arguments. */
1560 #undef TARGET_STRUCT_VALUE_RTX
1561 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1563 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED
,
1564 int incoming ATTRIBUTE_UNUSED
)
1569 /* Function Entry and Exit */
1571 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1573 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
1575 if (cfun
->machine
->is_interrupt
)
1580 /* Implementing the Varargs Macros */
1582 #undef TARGET_STRICT_ARGUMENT_NAMING
1583 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1585 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
1590 /* Trampolines for Nested Functions */
1594 1 0000 75C43412 mov.w #0x1234,a0
1595 2 0004 FC000000 jmp.a label
1598 1 0000 BC563412 mov.l:s #0x123456,a0
1599 2 0004 CC000000 jmp.a label
1602 /* Implements TRAMPOLINE_SIZE. */
1604 m32c_trampoline_size (void)
1606 /* Allocate extra space so we can avoid the messy shifts when we
1607 initialize the trampoline; we just write past the end of the
1609 return TARGET_A16
? 8 : 10;
1612 /* Implements TRAMPOLINE_ALIGNMENT. */
1614 m32c_trampoline_alignment (void)
1619 /* Implements TARGET_TRAMPOLINE_INIT. */
1621 #undef TARGET_TRAMPOLINE_INIT
1622 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1624 m32c_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chainval
)
1626 rtx function
= XEXP (DECL_RTL (fndecl
), 0);
1628 #define A0(m,i) adjust_address (m_tramp, m, i)
1631 /* Note: we subtract a "word" because the moves want signed
1632 constants, not unsigned constants. */
1633 emit_move_insn (A0 (HImode
, 0), GEN_INT (0xc475 - 0x10000));
1634 emit_move_insn (A0 (HImode
, 2), chainval
);
1635 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xfc - 0x100));
1636 /* We use 16-bit addresses here, but store the zero to turn it
1637 into a 24-bit offset. */
1638 emit_move_insn (A0 (HImode
, 5), function
);
1639 emit_move_insn (A0 (QImode
, 7), GEN_INT (0x00));
1643 /* Note that the PSI moves actually write 4 bytes. Make sure we
1644 write stuff out in the right order, and leave room for the
1645 extra byte at the end. */
1646 emit_move_insn (A0 (QImode
, 0), GEN_INT (0xbc - 0x100));
1647 emit_move_insn (A0 (PSImode
, 1), chainval
);
1648 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xcc - 0x100));
1649 emit_move_insn (A0 (PSImode
, 5), function
);
1654 /* Addressing Modes */
1656 /* The r8c/m32c family supports a wide range of non-orthogonal
1657 addressing modes, including the ability to double-indirect on *some*
1658 of them. Not all insns support all modes, either, but we rely on
1659 predicates and constraints to deal with that. */
1660 #undef TARGET_LEGITIMATE_ADDRESS_P
1661 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1663 m32c_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict
)
1669 if (TARGET_A16
&& GET_MODE (x
) != HImode
&& GET_MODE (x
) != SImode
)
1671 if (TARGET_A24
&& GET_MODE (x
) != PSImode
)
1674 /* Wide references to memory will be split after reload, so we must
1675 ensure that all parts of such splits remain legitimate
1677 mode_adjust
= GET_MODE_SIZE (mode
) - 1;
1679 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1680 if (GET_CODE (x
) == PRE_DEC
1681 || GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_MODIFY
)
1683 return (GET_CODE (XEXP (x
, 0)) == REG
1684 && REGNO (XEXP (x
, 0)) == SP_REGNO
);
1688 /* This is the double indirection detection, but it currently
1689 doesn't work as cleanly as this code implies, so until we've had
1690 a chance to debug it, leave it disabled. */
1691 if (TARGET_A24
&& GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) != PLUS
)
1694 fprintf (stderr
, "double indirect\n");
1703 /* Most indexable registers can be used without displacements,
1704 although some of them will be emitted with an explicit zero
1705 to please the assembler. */
1706 switch (REGNO (patternr
[0]))
1712 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1718 if (IS_PSEUDO (patternr
[0], strict
))
1724 if (TARGET_A16
&& GET_MODE (x
) == SImode
)
1729 /* This is more interesting, because different base registers
1730 allow for different displacements - both range and signedness
1731 - and it differs from chip series to chip series too. */
1732 int rn
= REGNO (patternr
[1]);
1733 HOST_WIDE_INT offs
= INTVAL (patternr
[2]);
1739 /* The syntax only allows positive offsets, but when the
1740 offsets span the entire memory range, we can simulate
1741 negative offsets by wrapping. */
1743 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1745 return (offs
>= 0 && offs
<= 65535 - mode_adjust
);
1747 return (offs
>= -16777216 && offs
<= 16777215);
1751 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1752 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1755 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1758 if (IS_PSEUDO (patternr
[1], strict
))
1763 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1765 rtx reg
= patternr
[1];
1767 /* We don't know where the symbol is, so only allow base
1768 registers which support displacements spanning the whole
1770 switch (REGNO (reg
))
1774 /* $sb needs a secondary reload, but since it's involved in
1775 memory address reloads too, we don't deal with it very
1777 /* case SB_REGNO: */
1780 if (IS_PSEUDO (reg
, strict
))
1788 /* Implements REG_OK_FOR_BASE_P. */
1790 m32c_reg_ok_for_base_p (rtx x
, int strict
)
1792 if (GET_CODE (x
) != REG
)
1803 if (IS_PSEUDO (x
, strict
))
1809 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1810 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1812 EB 4B FF mova -128[$fb],$a0
1813 D8 0C FF FF mov.w:Q #0,-1[$a0]
1815 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1818 77 54 00 01 sub #256,$a0
1819 D8 08 01 mov.w:Q #0,1[$a0]
1821 If we don't offset (i.e. offset by zero), we end up with:
1823 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1825 We have to subtract *something* so that we have a PLUS rtx to mark
1826 that we've done this reload. The -128 offset will never result in
1827 an 8-bit aN offset, and the payoff for the second case is five
1828 loads *if* those loads are within 256 bytes of the other end of the
1829 frame, so the third case seems best. Note that we subtract the
1830 zero, but detect that in the addhi3 pattern. */
1832 #define BIG_FB_ADJ 0
1834 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1835 worry about is frame base offsets, as $fb has a limited
1836 displacement range. We deal with this by attempting to reload $fb
1837 itself into an address register; that seems to result in the best
1839 #undef TARGET_LEGITIMIZE_ADDRESS
1840 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1842 m32c_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1843 enum machine_mode mode
)
1846 fprintf (stderr
, "m32c_legitimize_address for mode %s\n", mode_name
[mode
]);
1848 fprintf (stderr
, "\n");
1851 if (GET_CODE (x
) == PLUS
1852 && GET_CODE (XEXP (x
, 0)) == REG
1853 && REGNO (XEXP (x
, 0)) == FB_REGNO
1854 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1855 && (INTVAL (XEXP (x
, 1)) < -128
1856 || INTVAL (XEXP (x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1858 /* reload FB to A_REGS */
1859 rtx temp
= gen_reg_rtx (Pmode
);
1861 emit_insn (gen_rtx_SET (VOIDmode
, temp
, XEXP (x
, 0)));
1868 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1870 m32c_legitimize_reload_address (rtx
* x
,
1871 enum machine_mode mode
,
1873 int type
, int ind_levels ATTRIBUTE_UNUSED
)
1876 fprintf (stderr
, "\nm32c_legitimize_reload_address for mode %s\n",
1881 /* At one point, this function tried to get $fb copied to an address
1882 register, which in theory would maximize sharing, but gcc was
1883 *also* still trying to reload the whole address, and we'd run out
1884 of address registers. So we let gcc do the naive (but safe)
1885 reload instead, when the above function doesn't handle it for
1888 The code below is a second attempt at the above. */
1890 if (GET_CODE (*x
) == PLUS
1891 && GET_CODE (XEXP (*x
, 0)) == REG
1892 && REGNO (XEXP (*x
, 0)) == FB_REGNO
1893 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1894 && (INTVAL (XEXP (*x
, 1)) < -128
1895 || INTVAL (XEXP (*x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1898 int offset
= INTVAL (XEXP (*x
, 1));
1899 int adjustment
= -BIG_FB_ADJ
;
1901 sum
= gen_rtx_PLUS (Pmode
, XEXP (*x
, 0),
1902 GEN_INT (adjustment
));
1903 *x
= gen_rtx_PLUS (Pmode
, sum
, GEN_INT (offset
- adjustment
));
1904 if (type
== RELOAD_OTHER
)
1905 type
= RELOAD_FOR_OTHER_ADDRESS
;
1906 push_reload (sum
, NULL_RTX
, &XEXP (*x
, 0), NULL
,
1907 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1908 (enum reload_type
) type
);
1912 if (GET_CODE (*x
) == PLUS
1913 && GET_CODE (XEXP (*x
, 0)) == PLUS
1914 && GET_CODE (XEXP (XEXP (*x
, 0), 0)) == REG
1915 && REGNO (XEXP (XEXP (*x
, 0), 0)) == FB_REGNO
1916 && GET_CODE (XEXP (XEXP (*x
, 0), 1)) == CONST_INT
1917 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1920 if (type
== RELOAD_OTHER
)
1921 type
= RELOAD_FOR_OTHER_ADDRESS
;
1922 push_reload (XEXP (*x
, 0), NULL_RTX
, &XEXP (*x
, 0), NULL
,
1923 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
1924 (enum reload_type
) type
);
1931 /* Return the appropriate mode for a named address pointer. */
1932 #undef TARGET_ADDR_SPACE_POINTER_MODE
1933 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1934 static enum machine_mode
1935 m32c_addr_space_pointer_mode (addr_space_t addrspace
)
1939 case ADDR_SPACE_GENERIC
:
1940 return TARGET_A24
? PSImode
: HImode
;
1941 case ADDR_SPACE_FAR
:
1948 /* Return the appropriate mode for a named address address. */
1949 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1950 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1951 static enum machine_mode
1952 m32c_addr_space_address_mode (addr_space_t addrspace
)
1956 case ADDR_SPACE_GENERIC
:
1957 return TARGET_A24
? PSImode
: HImode
;
1958 case ADDR_SPACE_FAR
:
1965 /* Like m32c_legitimate_address_p, except with named addresses. */
1966 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1967 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1968 m32c_addr_space_legitimate_address_p
1970 m32c_addr_space_legitimate_address_p (enum machine_mode mode
, rtx x
,
1971 bool strict
, addr_space_t as
)
1973 if (as
== ADDR_SPACE_FAR
)
1980 if (GET_MODE (x
) != SImode
)
1982 switch (REGNO (patternr
[0]))
1988 if (IS_PSEUDO (patternr
[0], strict
))
1993 if (RTX_IS ("+^Sri"))
1995 int rn
= REGNO (patternr
[3]);
1996 HOST_WIDE_INT offs
= INTVAL (patternr
[4]);
1997 if (GET_MODE (patternr
[3]) != HImode
)
2002 return (offs
>= 0 && offs
<= 0xfffff);
2005 if (IS_PSEUDO (patternr
[3], strict
))
2010 if (RTX_IS ("+^Srs"))
2012 int rn
= REGNO (patternr
[3]);
2013 if (GET_MODE (patternr
[3]) != HImode
)
2021 if (IS_PSEUDO (patternr
[3], strict
))
2026 if (RTX_IS ("+^S+ris"))
2028 int rn
= REGNO (patternr
[4]);
2029 if (GET_MODE (patternr
[4]) != HImode
)
2037 if (IS_PSEUDO (patternr
[4], strict
))
2049 else if (as
!= ADDR_SPACE_GENERIC
)
2052 return m32c_legitimate_address_p (mode
, x
, strict
);
2055 /* Like m32c_legitimate_address, except with named address support. */
2056 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2057 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2059 m32c_addr_space_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
,
2062 if (as
!= ADDR_SPACE_GENERIC
)
2065 fprintf (stderr
, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name
[mode
]);
2067 fprintf (stderr
, "\n");
2070 if (GET_CODE (x
) != REG
)
2072 x
= force_reg (SImode
, x
);
2077 return m32c_legitimize_address (x
, oldx
, mode
);
2080 /* Determine if one named address space is a subset of another. */
2081 #undef TARGET_ADDR_SPACE_SUBSET_P
2082 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2084 m32c_addr_space_subset_p (addr_space_t subset
, addr_space_t superset
)
2086 gcc_assert (subset
== ADDR_SPACE_GENERIC
|| subset
== ADDR_SPACE_FAR
);
2087 gcc_assert (superset
== ADDR_SPACE_GENERIC
|| superset
== ADDR_SPACE_FAR
);
2089 if (subset
== superset
)
2093 return (subset
== ADDR_SPACE_GENERIC
&& superset
== ADDR_SPACE_FAR
);
2096 #undef TARGET_ADDR_SPACE_CONVERT
2097 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2098 /* Convert from one address space to another. */
2100 m32c_addr_space_convert (rtx op
, tree from_type
, tree to_type
)
2102 addr_space_t from_as
= TYPE_ADDR_SPACE (TREE_TYPE (from_type
));
2103 addr_space_t to_as
= TYPE_ADDR_SPACE (TREE_TYPE (to_type
));
2106 gcc_assert (from_as
== ADDR_SPACE_GENERIC
|| from_as
== ADDR_SPACE_FAR
);
2107 gcc_assert (to_as
== ADDR_SPACE_GENERIC
|| to_as
== ADDR_SPACE_FAR
);
2109 if (to_as
== ADDR_SPACE_GENERIC
&& from_as
== ADDR_SPACE_FAR
)
2111 /* This is unpredictable, as we're truncating off usable address
2114 result
= gen_reg_rtx (HImode
);
2115 emit_move_insn (result
, simplify_subreg (HImode
, op
, SImode
, 0));
2118 else if (to_as
== ADDR_SPACE_FAR
&& from_as
== ADDR_SPACE_GENERIC
)
2120 /* This always works. */
2121 result
= gen_reg_rtx (SImode
);
2122 emit_insn (gen_zero_extendhisi2 (result
, op
));
2129 /* Condition Code Status */
2131 #undef TARGET_FIXED_CONDITION_CODE_REGS
2132 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2134 m32c_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
2137 *p2
= INVALID_REGNUM
;
2141 /* Describing Relative Costs of Operations */
2143 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2144 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2145 no opcodes to do that). We also discourage use of mem* registers
2146 since they're really memory. */
2148 #undef TARGET_REGISTER_MOVE_COST
2149 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2152 m32c_register_move_cost (enum machine_mode mode
, reg_class_t from
,
2155 int cost
= COSTS_N_INSNS (3);
2158 /* FIXME: pick real values, but not 2 for now. */
2159 COPY_HARD_REG_SET (cc
, reg_class_contents
[(int) from
]);
2160 IOR_HARD_REG_SET (cc
, reg_class_contents
[(int) to
]);
2163 && hard_reg_set_intersect_p (cc
, reg_class_contents
[R23_REGS
]))
2165 if (hard_reg_set_subset_p (cc
, reg_class_contents
[R23_REGS
]))
2166 cost
= COSTS_N_INSNS (1000);
2168 cost
= COSTS_N_INSNS (80);
2171 if (!class_can_hold_mode (from
, mode
) || !class_can_hold_mode (to
, mode
))
2172 cost
= COSTS_N_INSNS (1000);
2174 if (reg_classes_intersect_p (from
, CR_REGS
))
2175 cost
+= COSTS_N_INSNS (5);
2177 if (reg_classes_intersect_p (to
, CR_REGS
))
2178 cost
+= COSTS_N_INSNS (5);
2180 if (from
== MEM_REGS
|| to
== MEM_REGS
)
2181 cost
+= COSTS_N_INSNS (50);
2182 else if (reg_classes_intersect_p (from
, MEM_REGS
)
2183 || reg_classes_intersect_p (to
, MEM_REGS
))
2184 cost
+= COSTS_N_INSNS (10);
2187 fprintf (stderr
, "register_move_cost %s from %s to %s = %d\n",
2188 mode_name
[mode
], class_names
[(int) from
], class_names
[(int) to
],
2194 /* Implements TARGET_MEMORY_MOVE_COST. */
2196 #undef TARGET_MEMORY_MOVE_COST
2197 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2200 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2201 reg_class_t rclass ATTRIBUTE_UNUSED
,
2202 bool in ATTRIBUTE_UNUSED
)
2204 /* FIXME: pick real values. */
2205 return COSTS_N_INSNS (10);
2208 /* Here we try to describe when we use multiple opcodes for one RTX so
2209 that gcc knows when to use them. */
2210 #undef TARGET_RTX_COSTS
2211 #define TARGET_RTX_COSTS m32c_rtx_costs
2213 m32c_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2214 int *total
, bool speed ATTRIBUTE_UNUSED
)
2219 if (REGNO (x
) >= MEM0_REGNO
&& REGNO (x
) <= MEM7_REGNO
)
2220 *total
+= COSTS_N_INSNS (500);
2222 *total
+= COSTS_N_INSNS (1);
2228 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
2230 /* mov.b r1l, r1h */
2231 *total
+= COSTS_N_INSNS (1);
2234 if (INTVAL (XEXP (x
, 1)) > 8
2235 || INTVAL (XEXP (x
, 1)) < -8)
2238 /* mov.b r1l, r1h */
2239 *total
+= COSTS_N_INSNS (2);
2254 if (outer_code
== SET
)
2256 *total
+= COSTS_N_INSNS (2);
2263 rtx dest
= XEXP (x
, 0);
2264 rtx addr
= XEXP (dest
, 0);
2265 switch (GET_CODE (addr
))
2268 *total
+= COSTS_N_INSNS (1);
2271 *total
+= COSTS_N_INSNS (3);
2274 *total
+= COSTS_N_INSNS (2);
2282 /* Reasonable default. */
2283 if (TARGET_A16
&& GET_MODE(x
) == SImode
)
2284 *total
+= COSTS_N_INSNS (2);
2290 #undef TARGET_ADDRESS_COST
2291 #define TARGET_ADDRESS_COST m32c_address_cost
2293 m32c_address_cost (rtx addr
, enum machine_mode mode ATTRIBUTE_UNUSED
,
2294 addr_space_t as ATTRIBUTE_UNUSED
,
2295 bool speed ATTRIBUTE_UNUSED
)
2298 /* fprintf(stderr, "\naddress_cost\n");
2300 switch (GET_CODE (addr
))
2305 return COSTS_N_INSNS(1);
2306 if (0 < i
&& i
<= 255)
2307 return COSTS_N_INSNS(2);
2308 if (0 < i
&& i
<= 65535)
2309 return COSTS_N_INSNS(3);
2310 return COSTS_N_INSNS(4);
2312 return COSTS_N_INSNS(4);
2314 return COSTS_N_INSNS(1);
2316 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
2318 i
= INTVAL (XEXP (addr
, 1));
2320 return COSTS_N_INSNS(1);
2321 if (0 < i
&& i
<= 255)
2322 return COSTS_N_INSNS(2);
2323 if (0 < i
&& i
<= 65535)
2324 return COSTS_N_INSNS(3);
2326 return COSTS_N_INSNS(4);
2332 /* Defining the Output Assembler Language */
2334 /* Output of Data */
2336 /* We may have 24 bit sizes, which is the native address size.
2337 Currently unused, but provided for completeness. */
2338 #undef TARGET_ASM_INTEGER
2339 #define TARGET_ASM_INTEGER m32c_asm_integer
2341 m32c_asm_integer (rtx x
, unsigned int size
, int aligned_p
)
2346 fprintf (asm_out_file
, "\t.3byte\t");
2347 output_addr_const (asm_out_file
, x
);
2348 fputc ('\n', asm_out_file
);
2351 if (GET_CODE (x
) == SYMBOL_REF
)
2353 fprintf (asm_out_file
, "\t.long\t");
2354 output_addr_const (asm_out_file
, x
);
2355 fputc ('\n', asm_out_file
);
2360 return default_assemble_integer (x
, size
, aligned_p
);
2363 /* Output of Assembler Instructions */
2365 /* We use a lookup table because the addressing modes are non-orthogonal. */
2370 char const *pattern
;
2373 const conversions
[] = {
2376 { 0, "mr", "z[1]" },
2377 { 0, "m+ri", "3[2]" },
2378 { 0, "m+rs", "3[2]" },
2379 { 0, "m+^Zrs", "5[4]" },
2380 { 0, "m+^Zri", "5[4]" },
2381 { 0, "m+^Z+ris", "7+6[5]" },
2382 { 0, "m+^Srs", "5[4]" },
2383 { 0, "m+^Sri", "5[4]" },
2384 { 0, "m+^S+ris", "7+6[5]" },
2385 { 0, "m+r+si", "4+5[2]" },
2388 { 0, "m+si", "2+3" },
2390 { 0, "mmr", "[z[2]]" },
2391 { 0, "mm+ri", "[4[3]]" },
2392 { 0, "mm+rs", "[4[3]]" },
2393 { 0, "mm+r+si", "[5+6[3]]" },
2394 { 0, "mms", "[[2]]" },
2395 { 0, "mmi", "[[2]]" },
2396 { 0, "mm+si", "[4[3]]" },
2400 { 0, "+si", "#1+2" },
2406 { 'd', "+si", "1+2" },
2409 { 'D', "+si", "1+2" },
2420 /* This is in order according to the bitfield that pushm/popm use. */
2421 static char const *pushm_regs
[] = {
2422 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2425 /* Implements TARGET_PRINT_OPERAND. */
2427 #undef TARGET_PRINT_OPERAND
2428 #define TARGET_PRINT_OPERAND m32c_print_operand
2431 m32c_print_operand (FILE * file
, rtx x
, int code
)
2436 int unsigned_const
= 0;
2439 /* Multiplies; constants are converted to sign-extended format but
2440 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2452 /* This one is only for debugging; you can put it in a pattern to
2453 force this error. */
2456 fprintf (stderr
, "dj: unreviewed pattern:");
2457 if (current_output_insn
)
2458 debug_rtx (current_output_insn
);
2461 /* PSImode operations are either .w or .l depending on the target. */
2465 fprintf (file
, "w");
2467 fprintf (file
, "l");
2470 /* Inverted conditionals. */
2473 switch (GET_CODE (x
))
2479 fputs ("gtu", file
);
2485 fputs ("geu", file
);
2491 fputs ("leu", file
);
2497 fputs ("ltu", file
);
2510 /* Regular conditionals. */
2513 switch (GET_CODE (x
))
2519 fputs ("leu", file
);
2525 fputs ("ltu", file
);
2531 fputs ("gtu", file
);
2537 fputs ("geu", file
);
2550 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2552 if (code
== 'h' && GET_MODE (x
) == SImode
)
2554 x
= m32c_subreg (HImode
, x
, SImode
, 0);
2557 if (code
== 'H' && GET_MODE (x
) == SImode
)
2559 x
= m32c_subreg (HImode
, x
, SImode
, 2);
2562 if (code
== 'h' && GET_MODE (x
) == HImode
)
2564 x
= m32c_subreg (QImode
, x
, HImode
, 0);
2567 if (code
== 'H' && GET_MODE (x
) == HImode
)
2569 /* We can't actually represent this as an rtx. Do it here. */
2570 if (GET_CODE (x
) == REG
)
2575 fputs ("r0h", file
);
2578 fputs ("r1h", file
);
2584 /* This should be a MEM. */
2585 x
= m32c_subreg (QImode
, x
, HImode
, 1);
2588 /* This is for BMcond, which always wants word register names. */
2589 if (code
== 'h' && GET_MODE (x
) == QImode
)
2591 if (GET_CODE (x
) == REG
)
2592 x
= gen_rtx_REG (HImode
, REGNO (x
));
2595 /* 'x' and 'X' need to be ignored for non-immediates. */
2596 if ((code
== 'x' || code
== 'X') && GET_CODE (x
) != CONST_INT
)
2601 for (i
= 0; conversions
[i
].pattern
; i
++)
2602 if (conversions
[i
].code
== code
2603 && streq (conversions
[i
].pattern
, pattern
))
2605 for (j
= 0; conversions
[i
].format
[j
]; j
++)
2606 /* backslash quotes the next character in the output pattern. */
2607 if (conversions
[i
].format
[j
] == '\\')
2609 fputc (conversions
[i
].format
[j
+ 1], file
);
2612 /* Digits in the output pattern indicate that the
2613 corresponding RTX is to be output at that point. */
2614 else if (ISDIGIT (conversions
[i
].format
[j
]))
2616 rtx r
= patternr
[conversions
[i
].format
[j
] - '0'];
2617 switch (GET_CODE (r
))
2620 fprintf (file
, "%s",
2621 reg_name_with_mode (REGNO (r
), GET_MODE (r
)));
2630 int i
= (int) exact_log2 (v
);
2632 i
= (int) exact_log2 ((v
^ 0xffff) & 0xffff);
2634 i
= (int) exact_log2 ((v
^ 0xff) & 0xff);
2636 fprintf (file
, "%d", i
);
2640 /* Unsigned byte. */
2641 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2645 /* Unsigned word. */
2646 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2647 INTVAL (r
) & 0xffff);
2650 /* pushm and popm encode a register set into a single byte. */
2652 for (b
= 7; b
>= 0; b
--)
2653 if (INTVAL (r
) & (1 << b
))
2655 fprintf (file
, "%s%s", comma
, pushm_regs
[b
]);
2660 /* "Minus". Output -X */
2661 ival
= (-INTVAL (r
) & 0xffff);
2663 ival
= ival
- 0x10000;
2664 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2668 if (conversions
[i
].format
[j
+ 1] == '[' && ival
< 0)
2670 /* We can simulate negative displacements by
2671 taking advantage of address space
2672 wrapping when the offset can span the
2673 entire address range. */
2675 patternr
[conversions
[i
].format
[j
+ 2] - '0'];
2676 if (GET_CODE (base
) == REG
)
2677 switch (REGNO (base
))
2682 ival
= 0x1000000 + ival
;
2684 ival
= 0x10000 + ival
;
2688 ival
= 0x10000 + ival
;
2692 else if (code
== 'd' && ival
< 0 && j
== 0)
2693 /* The "mova" opcode is used to do addition by
2694 computing displacements, but again, we need
2695 displacements to be unsigned *if* they're
2696 the only component of the displacement
2697 (i.e. no "symbol-4" type displacement). */
2698 ival
= (TARGET_A24
? 0x1000000 : 0x10000) + ival
;
2700 if (conversions
[i
].format
[j
] == '0')
2702 /* More conversions to unsigned. */
2703 if (unsigned_const
== 2)
2705 if (unsigned_const
== 1)
2708 if (streq (conversions
[i
].pattern
, "mi")
2709 || streq (conversions
[i
].pattern
, "mmi"))
2711 /* Integers used as addresses are unsigned. */
2712 ival
&= (TARGET_A24
? 0xffffff : 0xffff);
2714 if (force_sign
&& ival
>= 0)
2716 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2721 /* We don't have const_double constants. If it
2722 happens, make it obvious. */
2723 fprintf (file
, "[const_double 0x%lx]",
2724 (unsigned long) CONST_DOUBLE_HIGH (r
));
2727 assemble_name (file
, XSTR (r
, 0));
2730 output_asm_label (r
);
2733 fprintf (stderr
, "don't know how to print this operand:");
2740 if (conversions
[i
].format
[j
] == 'z')
2742 /* Some addressing modes *must* have a displacement,
2743 so insert a zero here if needed. */
2745 for (k
= j
+ 1; conversions
[i
].format
[k
]; k
++)
2746 if (ISDIGIT (conversions
[i
].format
[k
]))
2748 rtx reg
= patternr
[conversions
[i
].format
[k
] - '0'];
2749 if (GET_CODE (reg
) == REG
2750 && (REGNO (reg
) == SB_REGNO
2751 || REGNO (reg
) == FB_REGNO
2752 || REGNO (reg
) == SP_REGNO
))
2757 /* Signed displacements off symbols need to have signs
2759 if (conversions
[i
].format
[j
] == '+'
2760 && (!code
|| code
== 'D' || code
== 'd')
2761 && ISDIGIT (conversions
[i
].format
[j
+ 1])
2762 && (GET_CODE (patternr
[conversions
[i
].format
[j
+ 1] - '0'])
2768 fputc (conversions
[i
].format
[j
], file
);
2772 if (!conversions
[i
].pattern
)
2774 fprintf (stderr
, "unconvertible operand %c `%s'", code
? code
: '-',
2777 fprintf (file
, "[%c.%s]", code
? code
: '-', pattern
);
2783 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2785 See m32c_print_operand above for descriptions of what these do. */
2787 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2788 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2791 m32c_print_operand_punct_valid_p (unsigned char c
)
2793 if (c
== '&' || c
== '!')
2799 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2801 #undef TARGET_PRINT_OPERAND_ADDRESS
2802 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2805 m32c_print_operand_address (FILE * stream
, rtx address
)
2807 if (GET_CODE (address
) == MEM
)
2808 address
= XEXP (address
, 0);
2810 /* cf: gcc.dg/asm-4.c. */
2811 gcc_assert (GET_CODE (address
) == REG
);
2813 m32c_print_operand (stream
, address
, 0);
2816 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2817 differently than general registers. */
2819 m32c_output_reg_push (FILE * s
, int regno
)
2821 if (regno
== FLG_REGNO
)
2822 fprintf (s
, "\tpushc\tflg\n");
2824 fprintf (s
, "\tpush.%c\t%s\n",
2825 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2828 /* Likewise for ASM_OUTPUT_REG_POP. */
2830 m32c_output_reg_pop (FILE * s
, int regno
)
2832 if (regno
== FLG_REGNO
)
2833 fprintf (s
, "\tpopc\tflg\n");
2835 fprintf (s
, "\tpop.%c\t%s\n",
2836 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2839 /* Defining target-specific uses of `__attribute__' */
2841 /* Used to simplify the logic below. Find the attributes wherever
2843 #define M32C_ATTRIBUTES(decl) \
2844 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2845 : DECL_ATTRIBUTES (decl) \
2846 ? (DECL_ATTRIBUTES (decl)) \
2847 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2849 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2851 interrupt_p (tree node ATTRIBUTE_UNUSED
)
2853 tree list
= M32C_ATTRIBUTES (node
);
2856 if (is_attribute_p ("interrupt", TREE_PURPOSE (list
)))
2858 list
= TREE_CHAIN (list
);
2860 return fast_interrupt_p (node
);
2863 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2865 bank_switch_p (tree node ATTRIBUTE_UNUSED
)
2867 tree list
= M32C_ATTRIBUTES (node
);
2870 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list
)))
2872 list
= TREE_CHAIN (list
);
2877 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2879 fast_interrupt_p (tree node ATTRIBUTE_UNUSED
)
2881 tree list
= M32C_ATTRIBUTES (node
);
2884 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list
)))
2886 list
= TREE_CHAIN (list
);
2892 interrupt_handler (tree
* node ATTRIBUTE_UNUSED
,
2893 tree name ATTRIBUTE_UNUSED
,
2894 tree args ATTRIBUTE_UNUSED
,
2895 int flags ATTRIBUTE_UNUSED
,
2896 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2901 /* Returns TRUE if given tree has the "function_vector" attribute. */
2903 m32c_special_page_vector_p (tree func
)
2907 if (TREE_CODE (func
) != FUNCTION_DECL
)
2910 list
= M32C_ATTRIBUTES (func
);
2913 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2915 list
= TREE_CHAIN (list
);
2921 function_vector_handler (tree
* node ATTRIBUTE_UNUSED
,
2922 tree name ATTRIBUTE_UNUSED
,
2923 tree args ATTRIBUTE_UNUSED
,
2924 int flags ATTRIBUTE_UNUSED
,
2925 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2929 /* The attribute is not supported for R8C target. */
2930 warning (OPT_Wattributes
,
2931 "%qE attribute is not supported for R8C target",
2933 *no_add_attrs
= true;
2935 else if (TREE_CODE (*node
) != FUNCTION_DECL
)
2937 /* The attribute must be applied to functions only. */
2938 warning (OPT_Wattributes
,
2939 "%qE attribute applies only to functions",
2941 *no_add_attrs
= true;
2943 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
2945 /* The argument must be a constant integer. */
2946 warning (OPT_Wattributes
,
2947 "%qE attribute argument not an integer constant",
2949 *no_add_attrs
= true;
2951 else if (TREE_INT_CST_LOW (TREE_VALUE (args
)) < 18
2952 || TREE_INT_CST_LOW (TREE_VALUE (args
)) > 255)
2954 /* The argument value must be between 18 to 255. */
2955 warning (OPT_Wattributes
,
2956 "%qE attribute argument should be between 18 to 255",
2958 *no_add_attrs
= true;
2963 /* If the function is assigned the attribute 'function_vector', it
2964 returns the function vector number, otherwise returns zero. */
2966 current_function_special_page_vector (rtx x
)
2970 if ((GET_CODE(x
) == SYMBOL_REF
)
2971 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
2974 tree t
= SYMBOL_REF_DECL (x
);
2976 if (TREE_CODE (t
) != FUNCTION_DECL
)
2979 list
= M32C_ATTRIBUTES (t
);
2982 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2984 num
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list
)));
2988 list
= TREE_CHAIN (list
);
2997 #undef TARGET_ATTRIBUTE_TABLE
2998 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2999 static const struct attribute_spec m32c_attribute_table
[] = {
3000 {"interrupt", 0, 0, false, false, false, interrupt_handler
, false},
3001 {"bank_switch", 0, 0, false, false, false, interrupt_handler
, false},
3002 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler
, false},
3003 {"function_vector", 1, 1, true, false, false, function_vector_handler
,
3005 {0, 0, 0, 0, 0, 0, 0, false}
3008 #undef TARGET_COMP_TYPE_ATTRIBUTES
3009 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3011 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED
,
3012 const_tree type2 ATTRIBUTE_UNUSED
)
3014 /* 0=incompatible 1=compatible 2=warning */
3018 #undef TARGET_INSERT_ATTRIBUTES
3019 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3021 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED
,
3022 tree
* attr_ptr ATTRIBUTE_UNUSED
)
3025 /* See if we need to make #pragma address variables volatile. */
3027 if (TREE_CODE (node
) == VAR_DECL
)
3029 const char *name
= IDENTIFIER_POINTER (DECL_NAME (node
));
3030 if (m32c_get_pragma_address (name
, &addr
))
3032 TREE_THIS_VOLATILE (node
) = true;
3038 struct GTY(()) pragma_entry
{
3039 const char *varname
;
3042 typedef struct pragma_entry pragma_entry
;
3044 /* Hash table of pragma info. */
3045 static GTY((param_is (pragma_entry
))) htab_t pragma_htab
;
3048 pragma_entry_eq (const void *p1
, const void *p2
)
3050 const pragma_entry
*old
= (const pragma_entry
*) p1
;
3051 const char *new_name
= (const char *) p2
;
3053 return strcmp (old
->varname
, new_name
) == 0;
3057 pragma_entry_hash (const void *p
)
3059 const pragma_entry
*old
= (const pragma_entry
*) p
;
3060 return htab_hash_string (old
->varname
);
3064 m32c_note_pragma_address (const char *varname
, unsigned address
)
3066 pragma_entry
**slot
;
3069 pragma_htab
= htab_create_ggc (31, pragma_entry_hash
,
3070 pragma_entry_eq
, NULL
);
3072 slot
= (pragma_entry
**)
3073 htab_find_slot_with_hash (pragma_htab
, varname
,
3074 htab_hash_string (varname
), INSERT
);
3078 *slot
= ggc_alloc_pragma_entry ();
3079 (*slot
)->varname
= ggc_strdup (varname
);
3081 (*slot
)->address
= address
;
3085 m32c_get_pragma_address (const char *varname
, unsigned *address
)
3087 pragma_entry
**slot
;
3092 slot
= (pragma_entry
**)
3093 htab_find_slot_with_hash (pragma_htab
, varname
,
3094 htab_hash_string (varname
), NO_INSERT
);
3097 *address
= (*slot
)->address
;
3104 m32c_output_aligned_common (FILE *stream
, tree decl ATTRIBUTE_UNUSED
,
3106 int size
, int align
, int global
)
3110 if (m32c_get_pragma_address (name
, &address
))
3112 /* We never output these as global. */
3113 assemble_name (stream
, name
);
3114 fprintf (stream
, " = 0x%04x\n", address
);
3119 fprintf (stream
, "\t.local\t");
3120 assemble_name (stream
, name
);
3121 fprintf (stream
, "\n");
3123 fprintf (stream
, "\t.comm\t");
3124 assemble_name (stream
, name
);
3125 fprintf (stream
, ",%u,%u\n", size
, align
/ BITS_PER_UNIT
);
3130 /* This is a list of legal subregs of hard regs. */
3131 static const struct {
3132 unsigned char outer_mode_size
;
3133 unsigned char inner_mode_size
;
3134 unsigned char byte_mask
;
3135 unsigned char legal_when
;
3137 } legal_subregs
[] = {
3138 {1, 2, 0x03, 1, R0_REGNO
}, /* r0h r0l */
3139 {1, 2, 0x03, 1, R1_REGNO
}, /* r1h r1l */
3140 {1, 2, 0x01, 1, A0_REGNO
},
3141 {1, 2, 0x01, 1, A1_REGNO
},
3143 {1, 4, 0x01, 1, A0_REGNO
},
3144 {1, 4, 0x01, 1, A1_REGNO
},
3146 {2, 4, 0x05, 1, R0_REGNO
}, /* r2 r0 */
3147 {2, 4, 0x05, 1, R1_REGNO
}, /* r3 r1 */
3148 {2, 4, 0x05, 16, A0_REGNO
}, /* a1 a0 */
3149 {2, 4, 0x01, 24, A0_REGNO
}, /* a1 a0 */
3150 {2, 4, 0x01, 24, A1_REGNO
}, /* a1 a0 */
3152 {4, 8, 0x55, 1, R0_REGNO
}, /* r3 r1 r2 r0 */
3155 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3156 support. We also bail on MEMs with illegal addresses. */
3158 m32c_illegal_subreg_p (rtx op
)
3162 int src_mode
, dest_mode
;
3164 if (GET_CODE (op
) == MEM
3165 && ! m32c_legitimate_address_p (Pmode
, XEXP (op
, 0), false))
3170 if (GET_CODE (op
) != SUBREG
)
3173 dest_mode
= GET_MODE (op
);
3174 offset
= SUBREG_BYTE (op
);
3175 op
= SUBREG_REG (op
);
3176 src_mode
= GET_MODE (op
);
3178 if (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (src_mode
))
3180 if (GET_CODE (op
) != REG
)
3182 if (REGNO (op
) >= MEM0_REGNO
)
3185 offset
= (1 << offset
);
3187 for (i
= 0; i
< ARRAY_SIZE (legal_subregs
); i
++)
3188 if (legal_subregs
[i
].outer_mode_size
== GET_MODE_SIZE (dest_mode
)
3189 && legal_subregs
[i
].regno
== REGNO (op
)
3190 && legal_subregs
[i
].inner_mode_size
== GET_MODE_SIZE (src_mode
)
3191 && legal_subregs
[i
].byte_mask
& offset
)
3193 switch (legal_subregs
[i
].legal_when
)
3210 /* Returns TRUE if we support a move between the first two operands.
3211 At the moment, we just want to discourage mem to mem moves until
3212 after reload, because reload has a hard time with our limited
3213 number of address registers, and we can get into a situation where
3214 we need three of them when we only have two. */
3216 m32c_mov_ok (rtx
* operands
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3218 rtx op0
= operands
[0];
3219 rtx op1
= operands
[1];
3224 #define DEBUG_MOV_OK 0
3226 fprintf (stderr
, "m32c_mov_ok %s\n", mode_name
[mode
]);
3231 if (GET_CODE (op0
) == SUBREG
)
3232 op0
= XEXP (op0
, 0);
3233 if (GET_CODE (op1
) == SUBREG
)
3234 op1
= XEXP (op1
, 0);
3236 if (GET_CODE (op0
) == MEM
3237 && GET_CODE (op1
) == MEM
3238 && ! reload_completed
)
3241 fprintf (stderr
, " - no, mem to mem\n");
3247 fprintf (stderr
, " - ok\n");
3252 /* Returns TRUE if two consecutive HImode mov instructions, generated
3253 for moving an immediate double data to a double data type variable
3254 location, can be combined into single SImode mov instruction. */
3256 m32c_immd_dbl_mov (rtx
* operands ATTRIBUTE_UNUSED
,
3257 enum machine_mode mode ATTRIBUTE_UNUSED
)
3259 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3266 /* Subregs are non-orthogonal for us, because our registers are all
3269 m32c_subreg (enum machine_mode outer
,
3270 rtx x
, enum machine_mode inner
, int byte
)
3274 /* Converting MEMs to different types that are the same size, we
3275 just rewrite them. */
3276 if (GET_CODE (x
) == SUBREG
3277 && SUBREG_BYTE (x
) == 0
3278 && GET_CODE (SUBREG_REG (x
)) == MEM
3279 && (GET_MODE_SIZE (GET_MODE (x
))
3280 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
3283 x
= gen_rtx_MEM (GET_MODE (x
), XEXP (SUBREG_REG (x
), 0));
3284 MEM_COPY_ATTRIBUTES (x
, SUBREG_REG (oldx
));
3287 /* Push/pop get done as smaller push/pops. */
3288 if (GET_CODE (x
) == MEM
3289 && (GET_CODE (XEXP (x
, 0)) == PRE_DEC
3290 || GET_CODE (XEXP (x
, 0)) == POST_INC
))
3291 return gen_rtx_MEM (outer
, XEXP (x
, 0));
3292 if (GET_CODE (x
) == SUBREG
3293 && GET_CODE (XEXP (x
, 0)) == MEM
3294 && (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PRE_DEC
3295 || GET_CODE (XEXP (XEXP (x
, 0), 0)) == POST_INC
))
3296 return gen_rtx_MEM (outer
, XEXP (XEXP (x
, 0), 0));
3298 if (GET_CODE (x
) != REG
)
3300 rtx r
= simplify_gen_subreg (outer
, x
, inner
, byte
);
3301 if (GET_CODE (r
) == SUBREG
3302 && GET_CODE (x
) == MEM
3303 && MEM_VOLATILE_P (x
))
3305 /* Volatile MEMs don't get simplified, but we need them to
3306 be. We are little endian, so the subreg byte is the
3308 r
= adjust_address_nv (x
, outer
, byte
);
3314 if (r
>= FIRST_PSEUDO_REGISTER
|| r
== AP_REGNO
)
3315 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3317 if (IS_MEM_REGNO (r
))
3318 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3320 /* This is where the complexities of our register layout are
3324 else if (outer
== HImode
)
3326 if (r
== R0_REGNO
&& byte
== 2)
3328 else if (r
== R0_REGNO
&& byte
== 4)
3330 else if (r
== R0_REGNO
&& byte
== 6)
3332 else if (r
== R1_REGNO
&& byte
== 2)
3334 else if (r
== A0_REGNO
&& byte
== 2)
3337 else if (outer
== SImode
)
3339 if (r
== R0_REGNO
&& byte
== 0)
3341 else if (r
== R0_REGNO
&& byte
== 4)
3346 fprintf (stderr
, "m32c_subreg %s %s %d\n",
3347 mode_name
[outer
], mode_name
[inner
], byte
);
3351 return gen_rtx_REG (outer
, nr
);
3354 /* Used to emit move instructions. We split some moves,
3355 and avoid mem-mem moves. */
3357 m32c_prepare_move (rtx
* operands
, enum machine_mode mode
)
3359 if (far_addr_space_p (operands
[0])
3360 && CONSTANT_P (operands
[1]))
3362 operands
[1] = force_reg (GET_MODE (operands
[0]), operands
[1]);
3364 if (TARGET_A16
&& mode
== PSImode
)
3365 return m32c_split_move (operands
, mode
, 1);
3366 if ((GET_CODE (operands
[0]) == MEM
)
3367 && (GET_CODE (XEXP (operands
[0], 0)) == PRE_MODIFY
))
3369 rtx pmv
= XEXP (operands
[0], 0);
3370 rtx dest_reg
= XEXP (pmv
, 0);
3371 rtx dest_mod
= XEXP (pmv
, 1);
3373 emit_insn (gen_rtx_SET (Pmode
, dest_reg
, dest_mod
));
3374 operands
[0] = gen_rtx_MEM (mode
, dest_reg
);
3376 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3377 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
3381 #define DEBUG_SPLIT 0
3383 /* Returns TRUE if the given PSImode move should be split. We split
3384 for all r8c/m16c moves, since it doesn't support them, and for
3385 POP.L as we can only *push* SImode. */
3387 m32c_split_psi_p (rtx
* operands
)
3390 fprintf (stderr
, "\nm32c_split_psi_p\n");
3391 debug_rtx (operands
[0]);
3392 debug_rtx (operands
[1]);
3397 fprintf (stderr
, "yes, A16\n");
3401 if (GET_CODE (operands
[1]) == MEM
3402 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3405 fprintf (stderr
, "yes, pop.l\n");
3410 fprintf (stderr
, "no, default\n");
3415 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3416 (define_expand), 1 if it is not optional (define_insn_and_split),
3417 and 3 for define_split (alternate api). */
3419 m32c_split_move (rtx
* operands
, enum machine_mode mode
, int split_all
)
3422 int parts
, si
, di
, rev
= 0;
3423 int rv
= 0, opi
= 2;
3424 enum machine_mode submode
= HImode
;
3425 rtx
*ops
, local_ops
[10];
3427 /* define_split modifies the existing operands, but the other two
3428 emit new insns. OPS is where we store the operand pairs, which
3439 /* Before splitting mem-mem moves, force one operand into a
3441 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3444 fprintf (stderr
, "force_reg...\n");
3445 debug_rtx (operands
[1]);
3447 operands
[1] = force_reg (mode
, operands
[1]);
3449 debug_rtx (operands
[1]);
3456 fprintf (stderr
, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3458 debug_rtx (operands
[0]);
3459 debug_rtx (operands
[1]);
3462 /* Note that split_all is not used to select the api after this
3463 point, so it's safe to set it to 3 even with define_insn. */
3464 /* None of the chips can move SI operands to sp-relative addresses,
3465 so we always split those. */
3466 if (satisfies_constraint_Ss (operands
[0]))
3470 && (far_addr_space_p (operands
[0])
3471 || far_addr_space_p (operands
[1])))
3474 /* We don't need to split these. */
3477 && (mode
== SImode
|| mode
== PSImode
)
3478 && !(GET_CODE (operands
[1]) == MEM
3479 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
))
3482 /* First, enumerate the subregs we'll be dealing with. */
3483 for (si
= 0; si
< parts
; si
++)
3486 m32c_subreg (submode
, operands
[0], mode
,
3487 si
* GET_MODE_SIZE (submode
));
3489 m32c_subreg (submode
, operands
[1], mode
,
3490 si
* GET_MODE_SIZE (submode
));
3493 /* Split pushes by emitting a sequence of smaller pushes. */
3494 if (GET_CODE (d
[0]) == MEM
&& GET_CODE (XEXP (d
[0], 0)) == PRE_DEC
)
3496 for (si
= parts
- 1; si
>= 0; si
--)
3498 ops
[opi
++] = gen_rtx_MEM (submode
,
3499 gen_rtx_PRE_DEC (Pmode
,
3507 /* Likewise for pops. */
3508 else if (GET_CODE (s
[0]) == MEM
&& GET_CODE (XEXP (s
[0], 0)) == POST_INC
)
3510 for (di
= 0; di
< parts
; di
++)
3513 ops
[opi
++] = gen_rtx_MEM (submode
,
3514 gen_rtx_POST_INC (Pmode
,
3522 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3523 for (di
= 0; di
< parts
- 1; di
++)
3524 for (si
= di
+ 1; si
< parts
; si
++)
3525 if (reg_mentioned_p (d
[di
], s
[si
]))
3529 for (si
= 0; si
< parts
; si
++)
3535 for (si
= parts
- 1; si
>= 0; si
--)
3542 /* Now emit any moves we may have accumulated. */
3543 if (rv
&& split_all
!= 3)
3546 for (i
= 2; i
< opi
; i
+= 2)
3547 emit_move_insn (ops
[i
], ops
[i
+ 1]);
3552 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3553 the like. For the R8C they expect one of the addresses to be in
3554 R1L:An so we need to arrange for that. Otherwise, it's just a
3555 matter of picking out the operands we want and emitting the right
3556 pattern for them. All these expanders, which correspond to
3557 patterns in blkmov.md, must return nonzero if they expand the insn,
3558 or zero if they should FAIL. */
3560 /* This is a memset() opcode. All operands are implied, so we need to
3561 arrange for them to be in the right registers. The opcode wants
3562 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3563 the count (HI), and $2 the value (QI). */
3565 m32c_expand_setmemhi(rtx
*operands
)
3567 rtx desta
, count
, val
;
3570 desta
= XEXP (operands
[0], 0);
3571 count
= operands
[1];
3574 desto
= gen_reg_rtx (Pmode
);
3575 counto
= gen_reg_rtx (HImode
);
3577 if (GET_CODE (desta
) != REG
3578 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3579 desta
= copy_to_mode_reg (Pmode
, desta
);
3581 /* This looks like an arbitrary restriction, but this is by far the
3582 most common case. For counts 8..14 this actually results in
3583 smaller code with no speed penalty because the half-sized
3584 constant can be loaded with a shorter opcode. */
3585 if (GET_CODE (count
) == CONST_INT
3586 && GET_CODE (val
) == CONST_INT
3587 && ! (INTVAL (count
) & 1)
3588 && (INTVAL (count
) > 1)
3589 && (INTVAL (val
) <= 7 && INTVAL (val
) >= -8))
3591 unsigned v
= INTVAL (val
) & 0xff;
3593 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3594 val
= copy_to_mode_reg (HImode
, GEN_INT (v
));
3596 emit_insn (gen_setmemhi_whi_op (desto
, counto
, val
, desta
, count
));
3598 emit_insn (gen_setmemhi_wpsi_op (desto
, counto
, val
, desta
, count
));
3602 /* This is the generalized memset() case. */
3603 if (GET_CODE (val
) != REG
3604 || REGNO (val
) < FIRST_PSEUDO_REGISTER
)
3605 val
= copy_to_mode_reg (QImode
, val
);
3607 if (GET_CODE (count
) != REG
3608 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3609 count
= copy_to_mode_reg (HImode
, count
);
3612 emit_insn (gen_setmemhi_bhi_op (desto
, counto
, val
, desta
, count
));
3614 emit_insn (gen_setmemhi_bpsi_op (desto
, counto
, val
, desta
, count
));
3619 /* This is a memcpy() opcode. All operands are implied, so we need to
3620 arrange for them to be in the right registers. The opcode wants
3621 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3622 is the source (MEM:BLK), and $2 the count (HI). */
3624 m32c_expand_movmemhi(rtx
*operands
)
3626 rtx desta
, srca
, count
;
3627 rtx desto
, srco
, counto
;
3629 desta
= XEXP (operands
[0], 0);
3630 srca
= XEXP (operands
[1], 0);
3631 count
= operands
[2];
3633 desto
= gen_reg_rtx (Pmode
);
3634 srco
= gen_reg_rtx (Pmode
);
3635 counto
= gen_reg_rtx (HImode
);
3637 if (GET_CODE (desta
) != REG
3638 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3639 desta
= copy_to_mode_reg (Pmode
, desta
);
3641 if (GET_CODE (srca
) != REG
3642 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3643 srca
= copy_to_mode_reg (Pmode
, srca
);
3645 /* Similar to setmem, but we don't need to check the value. */
3646 if (GET_CODE (count
) == CONST_INT
3647 && ! (INTVAL (count
) & 1)
3648 && (INTVAL (count
) > 1))
3650 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3652 emit_insn (gen_movmemhi_whi_op (desto
, srco
, counto
, desta
, srca
, count
));
3654 emit_insn (gen_movmemhi_wpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3658 /* This is the generalized memset() case. */
3659 if (GET_CODE (count
) != REG
3660 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3661 count
= copy_to_mode_reg (HImode
, count
);
3664 emit_insn (gen_movmemhi_bhi_op (desto
, srco
, counto
, desta
, srca
, count
));
3666 emit_insn (gen_movmemhi_bpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3671 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3672 the copy, which should point to the NUL at the end of the string,
3673 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3674 Since our opcode leaves the destination pointing *after* the NUL,
3675 we must emit an adjustment. */
3677 m32c_expand_movstr(rtx
*operands
)
3682 desta
= XEXP (operands
[1], 0);
3683 srca
= XEXP (operands
[2], 0);
3685 desto
= gen_reg_rtx (Pmode
);
3686 srco
= gen_reg_rtx (Pmode
);
3688 if (GET_CODE (desta
) != REG
3689 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3690 desta
= copy_to_mode_reg (Pmode
, desta
);
3692 if (GET_CODE (srca
) != REG
3693 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3694 srca
= copy_to_mode_reg (Pmode
, srca
);
3696 emit_insn (gen_movstr_op (desto
, srco
, desta
, srca
));
3697 /* desto ends up being a1, which allows this type of add through MOVA. */
3698 emit_insn (gen_addpsi3 (operands
[0], desto
, GEN_INT (-1)));
3703 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3704 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3705 $2 is the other (MEM:BLK). We must do the comparison, and then
3706 convert the flags to a signed integer result. */
3708 m32c_expand_cmpstr(rtx
*operands
)
3712 src1a
= XEXP (operands
[1], 0);
3713 src2a
= XEXP (operands
[2], 0);
3715 if (GET_CODE (src1a
) != REG
3716 || REGNO (src1a
) < FIRST_PSEUDO_REGISTER
)
3717 src1a
= copy_to_mode_reg (Pmode
, src1a
);
3719 if (GET_CODE (src2a
) != REG
3720 || REGNO (src2a
) < FIRST_PSEUDO_REGISTER
)
3721 src2a
= copy_to_mode_reg (Pmode
, src2a
);
3723 emit_insn (gen_cmpstrhi_op (src1a
, src2a
, src1a
, src2a
));
3724 emit_insn (gen_cond_to_int (operands
[0]));
3730 typedef rtx (*shift_gen_func
)(rtx
, rtx
, rtx
);
3732 static shift_gen_func
3733 shift_gen_func_for (int mode
, int code
)
3735 #define GFF(m,c,f) if (mode == m && code == c) return f
3736 GFF(QImode
, ASHIFT
, gen_ashlqi3_i
);
3737 GFF(QImode
, ASHIFTRT
, gen_ashrqi3_i
);
3738 GFF(QImode
, LSHIFTRT
, gen_lshrqi3_i
);
3739 GFF(HImode
, ASHIFT
, gen_ashlhi3_i
);
3740 GFF(HImode
, ASHIFTRT
, gen_ashrhi3_i
);
3741 GFF(HImode
, LSHIFTRT
, gen_lshrhi3_i
);
3742 GFF(PSImode
, ASHIFT
, gen_ashlpsi3_i
);
3743 GFF(PSImode
, ASHIFTRT
, gen_ashrpsi3_i
);
3744 GFF(PSImode
, LSHIFTRT
, gen_lshrpsi3_i
);
3745 GFF(SImode
, ASHIFT
, TARGET_A16
? gen_ashlsi3_16
: gen_ashlsi3_24
);
3746 GFF(SImode
, ASHIFTRT
, TARGET_A16
? gen_ashrsi3_16
: gen_ashrsi3_24
);
3747 GFF(SImode
, LSHIFTRT
, TARGET_A16
? gen_lshrsi3_16
: gen_lshrsi3_24
);
3752 /* The m32c only has one shift, but it takes a signed count. GCC
3753 doesn't want this, so we fake it by negating any shift count when
3754 we're pretending to shift the other way. Also, the shift count is
3755 limited to -8..8. It's slightly better to use two shifts for 9..15
3756 than to load the count into r1h, so we do that too. */
3758 m32c_prepare_shift (rtx
* operands
, int scale
, int shift_code
)
3760 enum machine_mode mode
= GET_MODE (operands
[0]);
3761 shift_gen_func func
= shift_gen_func_for (mode
, shift_code
);
3764 if (GET_CODE (operands
[2]) == CONST_INT
)
3766 int maxc
= TARGET_A24
&& (mode
== PSImode
|| mode
== SImode
) ? 32 : 8;
3767 int count
= INTVAL (operands
[2]) * scale
;
3769 while (count
> maxc
)
3771 temp
= gen_reg_rtx (mode
);
3772 emit_insn (func (temp
, operands
[1], GEN_INT (maxc
)));
3776 while (count
< -maxc
)
3778 temp
= gen_reg_rtx (mode
);
3779 emit_insn (func (temp
, operands
[1], GEN_INT (-maxc
)));
3783 emit_insn (func (operands
[0], operands
[1], GEN_INT (count
)));
3787 temp
= gen_reg_rtx (QImode
);
3789 /* The pattern has a NEG that corresponds to this. */
3790 emit_move_insn (temp
, gen_rtx_NEG (QImode
, operands
[2]));
3791 else if (TARGET_A16
&& mode
== SImode
)
3792 /* We do this because the code below may modify this, we don't
3793 want to modify the origin of this value. */
3794 emit_move_insn (temp
, operands
[2]);
3796 /* We'll only use it for the shift, no point emitting a move. */
3799 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 4)
3801 /* The m16c has a limit of -16..16 for SI shifts, even when the
3802 shift count is in a register. Since there are so many targets
3803 of these shifts, it's better to expand the RTL here than to
3804 call a helper function.
3806 The resulting code looks something like this:
3818 We take advantage of the fact that "negative" shifts are
3819 undefined to skip one of the comparisons. */
3822 rtx label
, insn
, tempvar
;
3824 emit_move_insn (operands
[0], operands
[1]);
3827 label
= gen_label_rtx ();
3828 LABEL_NUSES (label
) ++;
3830 tempvar
= gen_reg_rtx (mode
);
3832 if (shift_code
== ASHIFT
)
3834 /* This is a left shift. We only need check positive counts. */
3835 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode
, 0, 0),
3836 count
, GEN_INT (16), label
));
3837 emit_insn (func (tempvar
, operands
[0], GEN_INT (8)));
3838 emit_insn (func (operands
[0], tempvar
, GEN_INT (8)));
3839 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (-16)));
3840 emit_label_after (label
, insn
);
3844 /* This is a right shift. We only need check negative counts. */
3845 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode
, 0, 0),
3846 count
, GEN_INT (-16), label
));
3847 emit_insn (func (tempvar
, operands
[0], GEN_INT (-8)));
3848 emit_insn (func (operands
[0], tempvar
, GEN_INT (-8)));
3849 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (16)));
3850 emit_label_after (label
, insn
);
3852 operands
[1] = operands
[0];
3853 emit_insn (func (operands
[0], operands
[0], count
));
3861 /* The m32c has a limited range of operations that work on PSImode
3862 values; we have to expand to SI, do the math, and truncate back to
3863 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3866 m32c_expand_neg_mulpsi3 (rtx
* operands
)
3868 /* operands: a = b * i */
3869 rtx temp1
; /* b as SI */
3870 rtx scale
/* i as SI */;
3871 rtx temp2
; /* a*b as SI */
3873 temp1
= gen_reg_rtx (SImode
);
3874 temp2
= gen_reg_rtx (SImode
);
3875 if (GET_CODE (operands
[2]) != CONST_INT
)
3877 scale
= gen_reg_rtx (SImode
);
3878 emit_insn (gen_zero_extendpsisi2 (scale
, operands
[2]));
3881 scale
= copy_to_mode_reg (SImode
, operands
[2]);
3883 emit_insn (gen_zero_extendpsisi2 (temp1
, operands
[1]));
3884 temp2
= expand_simple_binop (SImode
, MULT
, temp1
, scale
, temp2
, 1, OPTAB_LIB
);
3885 emit_insn (gen_truncsipsi2 (operands
[0], temp2
));
3888 /* Pattern Output Functions */
3891 m32c_expand_movcc (rtx
*operands
)
3893 rtx rel
= operands
[1];
3895 if (GET_CODE (rel
) != EQ
&& GET_CODE (rel
) != NE
)
3897 if (GET_CODE (operands
[2]) != CONST_INT
3898 || GET_CODE (operands
[3]) != CONST_INT
)
3900 if (GET_CODE (rel
) == NE
)
3902 rtx tmp
= operands
[2];
3903 operands
[2] = operands
[3];
3905 rel
= gen_rtx_EQ (GET_MODE (rel
), XEXP (rel
, 0), XEXP (rel
, 1));
3908 emit_move_insn (operands
[0],
3909 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
3916 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3918 m32c_expand_insv (rtx
*operands
)
3923 if (INTVAL (operands
[1]) != 1)
3926 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3927 if (GET_CODE (operands
[3]) != CONST_INT
)
3929 if (INTVAL (operands
[3]) != 0
3930 && INTVAL (operands
[3]) != 1
3931 && INTVAL (operands
[3]) != -1)
3934 mask
= 1 << INTVAL (operands
[2]);
3937 if (GET_CODE (op0
) == SUBREG
3938 && SUBREG_BYTE (op0
) == 0)
3940 rtx sub
= SUBREG_REG (op0
);
3941 if (GET_MODE (sub
) == HImode
|| GET_MODE (sub
) == QImode
)
3945 if (!can_create_pseudo_p ()
3946 || (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
)))
3950 src0
= gen_reg_rtx (GET_MODE (op0
));
3951 emit_move_insn (src0
, op0
);
3954 if (GET_MODE (op0
) == HImode
3955 && INTVAL (operands
[2]) >= 8
3956 && GET_CODE (op0
) == MEM
)
3958 /* We are little endian. */
3959 rtx new_mem
= gen_rtx_MEM (QImode
, plus_constant (Pmode
,
3961 MEM_COPY_ATTRIBUTES (new_mem
, op0
);
3965 /* First, we generate a mask with the correct polarity. If we are
3966 storing a zero, we want an AND mask, so invert it. */
3967 if (INTVAL (operands
[3]) == 0)
3969 /* Storing a zero, use an AND mask */
3970 if (GET_MODE (op0
) == HImode
)
3975 /* Now we need to properly sign-extend the mask in case we need to
3976 fall back to an AND or OR opcode. */
3977 if (GET_MODE (op0
) == HImode
)
3988 switch ( (INTVAL (operands
[3]) ? 4 : 0)
3989 + ((GET_MODE (op0
) == HImode
) ? 2 : 0)
3990 + (TARGET_A24
? 1 : 0))
3992 case 0: p
= gen_andqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3993 case 1: p
= gen_andqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3994 case 2: p
= gen_andhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3995 case 3: p
= gen_andhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3996 case 4: p
= gen_iorqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3997 case 5: p
= gen_iorqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3998 case 6: p
= gen_iorhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3999 case 7: p
= gen_iorhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
4000 default: p
= NULL_RTX
; break; /* Not reached, but silences a warning. */
4008 m32c_scc_pattern(rtx
*operands
, RTX_CODE code
)
4010 static char buf
[30];
4011 if (GET_CODE (operands
[0]) == REG
4012 && REGNO (operands
[0]) == R0_REGNO
)
4015 return "stzx\t#1,#0,r0l";
4017 return "stzx\t#0,#1,r0l";
4019 sprintf(buf
, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code
));
4023 /* Encode symbol attributes of a SYMBOL_REF into its
4024 SYMBOL_REF_FLAGS. */
4026 m32c_encode_section_info (tree decl
, rtx rtl
, int first
)
4028 int extra_flags
= 0;
4030 default_encode_section_info (decl
, rtl
, first
);
4031 if (TREE_CODE (decl
) == FUNCTION_DECL
4032 && m32c_special_page_vector_p (decl
))
4034 extra_flags
= SYMBOL_FLAG_FUNCVEC_FUNCTION
;
4037 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= extra_flags
;
4040 /* Returns TRUE if the current function is a leaf, and thus we can
4041 determine which registers an interrupt function really needs to
4042 save. The logic below is mostly about finding the insn sequence
4043 that's the function, versus any sequence that might be open for the
4046 m32c_leaf_function_p (void)
4048 rtx saved_first
, saved_last
;
4049 struct sequence_stack
*seq
;
4052 saved_first
= crtl
->emit
.x_first_insn
;
4053 saved_last
= crtl
->emit
.x_last_insn
;
4054 for (seq
= crtl
->emit
.sequence_stack
; seq
&& seq
->next
; seq
= seq
->next
)
4058 crtl
->emit
.x_first_insn
= seq
->first
;
4059 crtl
->emit
.x_last_insn
= seq
->last
;
4062 rv
= leaf_function_p ();
4064 crtl
->emit
.x_first_insn
= saved_first
;
4065 crtl
->emit
.x_last_insn
= saved_last
;
4069 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4070 opcodes. If the function doesn't need the frame base or stack
4071 pointer, it can use the simpler RTS opcode. */
4073 m32c_function_needs_enter (void)
4076 struct sequence_stack
*seq
;
4077 rtx sp
= gen_rtx_REG (Pmode
, SP_REGNO
);
4078 rtx fb
= gen_rtx_REG (Pmode
, FB_REGNO
);
4080 insn
= get_insns ();
4081 for (seq
= crtl
->emit
.sequence_stack
;
4083 insn
= seq
->first
, seq
= seq
->next
);
4087 if (reg_mentioned_p (sp
, insn
))
4089 if (reg_mentioned_p (fb
, insn
))
4091 insn
= NEXT_INSN (insn
);
4096 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4097 frame-related. Return PAR.
4099 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4100 PARALLEL rtx other than the first if they do not have the
4101 FRAME_RELATED flag set on them. So this function is handy for
4102 marking up 'enter' instructions. */
4104 m32c_all_frame_related (rtx par
)
4106 int len
= XVECLEN (par
, 0);
4109 for (i
= 0; i
< len
; i
++)
4110 F (XVECEXP (par
, 0, i
));
4115 /* Emits the prologue. See the frame layout comment earlier in this
4116 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4117 that we manually update sp. */
4119 m32c_emit_prologue (void)
4121 int frame_size
, extra_frame_size
= 0, reg_save_size
;
4122 int complex_prologue
= 0;
4124 cfun
->machine
->is_leaf
= m32c_leaf_function_p ();
4125 if (interrupt_p (cfun
->decl
))
4127 cfun
->machine
->is_interrupt
= 1;
4128 complex_prologue
= 1;
4130 else if (bank_switch_p (cfun
->decl
))
4131 warning (OPT_Wattributes
,
4132 "%<bank_switch%> has no effect on non-interrupt functions");
4134 reg_save_size
= m32c_pushm_popm (PP_justcount
);
4136 if (interrupt_p (cfun
->decl
))
4138 if (bank_switch_p (cfun
->decl
))
4139 emit_insn (gen_fset_b ());
4140 else if (cfun
->machine
->intr_pushm
)
4141 emit_insn (gen_pushm (GEN_INT (cfun
->machine
->intr_pushm
)));
4145 m32c_initial_elimination_offset (FB_REGNO
, SP_REGNO
) - reg_save_size
;
4147 && !m32c_function_needs_enter ())
4148 cfun
->machine
->use_rts
= 1;
4150 if (frame_size
> 254)
4152 extra_frame_size
= frame_size
- 254;
4155 if (cfun
->machine
->use_rts
== 0)
4156 F (emit_insn (m32c_all_frame_related
4158 ? gen_prologue_enter_16 (GEN_INT (frame_size
+ 2))
4159 : gen_prologue_enter_24 (GEN_INT (frame_size
+ 4)))));
4161 if (extra_frame_size
)
4163 complex_prologue
= 1;
4165 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode
, SP_REGNO
),
4166 gen_rtx_REG (HImode
, SP_REGNO
),
4167 GEN_INT (-extra_frame_size
))));
4169 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode
, SP_REGNO
),
4170 gen_rtx_REG (PSImode
, SP_REGNO
),
4171 GEN_INT (-extra_frame_size
))));
4174 complex_prologue
+= m32c_pushm_popm (PP_pushm
);
4176 /* This just emits a comment into the .s file for debugging. */
4177 if (complex_prologue
)
4178 emit_insn (gen_prologue_end ());
4181 /* Likewise, for the epilogue. The only exception is that, for
4182 interrupts, we must manually unwind the frame as the REIT opcode
4185 m32c_emit_epilogue (void)
4187 int popm_count
= m32c_pushm_popm (PP_justcount
);
4189 /* This just emits a comment into the .s file for debugging. */
4190 if (popm_count
> 0 || cfun
->machine
->is_interrupt
)
4191 emit_insn (gen_epilogue_start ());
4194 m32c_pushm_popm (PP_popm
);
4196 if (cfun
->machine
->is_interrupt
)
4198 enum machine_mode spmode
= TARGET_A16
? HImode
: PSImode
;
4200 /* REIT clears B flag and restores $fp for us, but we still
4201 have to fix up the stack. USE_RTS just means we didn't
4203 if (!cfun
->machine
->use_rts
)
4205 emit_move_insn (gen_rtx_REG (spmode
, A0_REGNO
),
4206 gen_rtx_REG (spmode
, FP_REGNO
));
4207 emit_move_insn (gen_rtx_REG (spmode
, SP_REGNO
),
4208 gen_rtx_REG (spmode
, A0_REGNO
));
4209 /* We can't just add this to the POPM because it would be in
4210 the wrong order, and wouldn't fix the stack if we're bank
4213 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, FP_REGNO
)));
4215 emit_insn (gen_poppsi (gen_rtx_REG (PSImode
, FP_REGNO
)));
4217 if (!bank_switch_p (cfun
->decl
) && cfun
->machine
->intr_pushm
)
4218 emit_insn (gen_popm (GEN_INT (cfun
->machine
->intr_pushm
)));
4220 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4221 generated only for M32C/M32CM targets (generate the REIT
4222 instruction otherwise). */
4223 if (fast_interrupt_p (cfun
->decl
))
4225 /* Check if fast_attribute is set for M32C or M32CM. */
4228 emit_jump_insn (gen_epilogue_freit ());
4230 /* If fast_interrupt attribute is set for an R8C or M16C
4231 target ignore this attribute and generated REIT
4235 warning (OPT_Wattributes
,
4236 "%<fast_interrupt%> attribute directive ignored");
4237 emit_jump_insn (gen_epilogue_reit_16 ());
4240 else if (TARGET_A16
)
4241 emit_jump_insn (gen_epilogue_reit_16 ());
4243 emit_jump_insn (gen_epilogue_reit_24 ());
4245 else if (cfun
->machine
->use_rts
)
4246 emit_jump_insn (gen_epilogue_rts ());
4247 else if (TARGET_A16
)
4248 emit_jump_insn (gen_epilogue_exitd_16 ());
4250 emit_jump_insn (gen_epilogue_exitd_24 ());
4254 m32c_emit_eh_epilogue (rtx ret_addr
)
4256 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4257 return to. We have to fudge the stack, pop everything, pop SP
4258 (fudged), and return (fudged). This is actually easier to do in
4259 assembler, so punt to libgcc. */
4260 emit_jump_insn (gen_eh_epilogue (ret_addr
, cfun
->machine
->eh_stack_adjust
));
4261 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4264 /* Indicate which flags must be properly set for a given conditional. */
4266 flags_needed_for_conditional (rtx cond
)
4268 switch (GET_CODE (cond
))
4292 /* Returns true if a compare insn is redundant because it would only
4293 set flags that are already set correctly. */
4295 m32c_compare_redundant (rtx cmp
, rtx
*operands
)
4309 fprintf(stderr
, "\n\033[32mm32c_compare_redundant\033[0m\n");
4313 fprintf(stderr
, "operands[%d] = ", i
);
4314 debug_rtx(operands
[i
]);
4318 next
= next_nonnote_insn (cmp
);
4319 if (!next
|| !INSN_P (next
))
4322 fprintf(stderr
, "compare not followed by insn\n");
4327 if (GET_CODE (PATTERN (next
)) == SET
4328 && GET_CODE (XEXP ( PATTERN (next
), 1)) == IF_THEN_ELSE
)
4330 next
= XEXP (XEXP (PATTERN (next
), 1), 0);
4332 else if (GET_CODE (PATTERN (next
)) == SET
)
4334 /* If this is a conditional, flags_needed will be something
4335 other than FLAGS_N, which we test below. */
4336 next
= XEXP (PATTERN (next
), 1);
4341 fprintf(stderr
, "compare not followed by conditional\n");
4347 fprintf(stderr
, "conditional is: ");
4351 flags_needed
= flags_needed_for_conditional (next
);
4352 if (flags_needed
== FLAGS_N
)
4355 fprintf(stderr
, "compare not followed by conditional\n");
4361 /* Compare doesn't set overflow and carry the same way that
4362 arithmetic instructions do, so we can't replace those. */
4363 if (flags_needed
& FLAGS_OC
)
4368 prev
= prev_nonnote_insn (prev
);
4372 fprintf(stderr
, "No previous insn.\n");
4379 fprintf(stderr
, "Previous insn is a non-insn.\n");
4383 pp
= PATTERN (prev
);
4384 if (GET_CODE (pp
) != SET
)
4387 fprintf(stderr
, "Previous insn is not a SET.\n");
4391 pflags
= get_attr_flags (prev
);
4393 /* Looking up attributes of previous insns corrupted the recog
4395 INSN_UID (cmp
) = -1;
4396 recog (PATTERN (cmp
), cmp
, 0);
4398 if (pflags
== FLAGS_N
4399 && reg_mentioned_p (op0
, pp
))
4402 fprintf(stderr
, "intermediate non-flags insn uses op:\n");
4408 /* Check for comparisons against memory - between volatiles and
4409 aliases, we just can't risk this one. */
4410 if (GET_CODE (operands
[0]) == MEM
4411 || GET_CODE (operands
[0]) == MEM
)
4414 fprintf(stderr
, "comparisons with memory:\n");
4420 /* Check for PREV changing a register that's used to compute a
4421 value in CMP, even if it doesn't otherwise change flags. */
4422 if (GET_CODE (operands
[0]) == REG
4423 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[0]))
4426 fprintf(stderr
, "sub-value affected, op0:\n");
4431 if (GET_CODE (operands
[1]) == REG
4432 && rtx_referenced_p (SET_DEST (PATTERN (prev
)), operands
[1]))
4435 fprintf(stderr
, "sub-value affected, op1:\n");
4441 } while (pflags
== FLAGS_N
);
4443 fprintf(stderr
, "previous flag-setting insn:\n");
4448 if (GET_CODE (pp
) == SET
4449 && GET_CODE (XEXP (pp
, 0)) == REG
4450 && REGNO (XEXP (pp
, 0)) == FLG_REGNO
4451 && GET_CODE (XEXP (pp
, 1)) == COMPARE
)
4453 /* Adjacent cbranches must have the same operands to be
4455 rtx pop0
= XEXP (XEXP (pp
, 1), 0);
4456 rtx pop1
= XEXP (XEXP (pp
, 1), 1);
4458 fprintf(stderr
, "adjacent cbranches\n");
4462 if (rtx_equal_p (op0
, pop0
)
4463 && rtx_equal_p (op1
, pop1
))
4466 fprintf(stderr
, "prev cmp not same\n");
4471 /* Else the previous insn must be a SET, with either the source or
4472 dest equal to operands[0], and operands[1] must be zero. */
4474 if (!rtx_equal_p (op1
, const0_rtx
))
4477 fprintf(stderr
, "operands[1] not const0_rtx\n");
4481 if (GET_CODE (pp
) != SET
)
4484 fprintf (stderr
, "pp not set\n");
4488 if (!rtx_equal_p (op0
, SET_SRC (pp
))
4489 && !rtx_equal_p (op0
, SET_DEST (pp
)))
4492 fprintf(stderr
, "operands[0] not found in set\n");
4498 fprintf(stderr
, "cmp flags %x prev flags %x\n", flags_needed
, pflags
);
4500 if ((pflags
& flags_needed
) == flags_needed
)
4506 /* Return the pattern for a compare. This will be commented out if
4507 the compare is redundant, else a normal pattern is returned. Thus,
4508 the assembler output says where the compare would have been. */
4510 m32c_output_compare (rtx insn
, rtx
*operands
)
4512 static char templ
[] = ";cmp.b\t%1,%0";
4515 templ
[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands
[0]))];
4516 if (m32c_compare_redundant (insn
, operands
))
4519 fprintf(stderr
, "cbranch: cmp not needed\n");
4525 fprintf(stderr
, "cbranch: cmp needed: `%s'\n", templ
+ 1);
4530 #undef TARGET_ENCODE_SECTION_INFO
4531 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4533 /* If the frame pointer isn't used, we detect it manually. But the
4534 stack pointer doesn't have as flexible addressing as the frame
4535 pointer, so we always assume we have it. */
4537 #undef TARGET_FRAME_POINTER_REQUIRED
4538 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4540 /* The Global `targetm' Variable. */
4542 struct gcc_target targetm
= TARGET_INITIALIZER
;
4544 #include "gt-m32c.h"