2014-12-19 Andrew MacLeod <amacleod@redhat.com>
[official-gcc.git] / gcc / config / m32c / m32c.c
blobd5eda945e26205c1b4f33fb165bbd42ed4b2d80e
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "tree.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "expr.h"
43 #include "insn-codes.h"
44 #include "optabs.h"
45 #include "except.h"
46 #include "hashtab.h"
47 #include "hash-set.h"
48 #include "vec.h"
49 #include "machmode.h"
50 #include "input.h"
51 #include "function.h"
52 #include "ggc.h"
53 #include "target.h"
54 #include "target-def.h"
55 #include "tm_p.h"
56 #include "langhooks.h"
57 #include "hash-table.h"
58 #include "predict.h"
59 #include "dominance.h"
60 #include "cfg.h"
61 #include "cfgrtl.h"
62 #include "cfganal.h"
63 #include "lcm.h"
64 #include "cfgbuild.h"
65 #include "cfgcleanup.h"
66 #include "basic-block.h"
67 #include "tree-ssa-alias.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
70 #include "tree-eh.h"
71 #include "gimple-expr.h"
72 #include "is-a.h"
73 #include "gimple.h"
74 #include "df.h"
75 #include "tm-constrs.h"
76 #include "builtins.h"
78 /* Prototypes */
80 /* Used by m32c_pushm_popm. */
81 typedef enum
83 PP_pushm,
84 PP_popm,
85 PP_justcount
86 } Push_Pop_Type;
88 static bool m32c_function_needs_enter (void);
89 static tree interrupt_decl_handler (tree *, tree, tree, int, bool *);
90 static tree interrupt_type_handler (tree *, tree, tree, int, bool *);
91 static tree function_vector_handler (tree *, tree, tree, int, bool *);
92 static int interrupt_p (tree node);
93 static int bank_switch_p (tree node);
94 static int fast_interrupt_p (tree node);
95 static int interrupt_p (tree node);
96 static bool m32c_asm_integer (rtx, unsigned int, int);
97 static int m32c_comp_type_attributes (const_tree, const_tree);
98 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
99 static struct machine_function *m32c_init_machine_status (void);
100 static void m32c_insert_attributes (tree, tree *);
101 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
102 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
103 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
104 const_tree, bool);
105 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
106 const_tree, bool);
107 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
108 const_tree, bool);
109 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
110 static int m32c_pushm_popm (Push_Pop_Type);
111 static bool m32c_strict_argument_naming (cumulative_args_t);
112 static rtx m32c_struct_value_rtx (tree, int);
113 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
114 static int need_to_save (int);
115 static rtx m32c_function_value (const_tree, const_tree, bool);
116 static rtx m32c_libcall_value (machine_mode, const_rtx);
118 /* Returns true if an address is specified, else false. */
119 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
121 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
123 #define streq(a,b) (strcmp ((a), (b)) == 0)
125 /* Internal support routines */
127 /* Debugging statements are tagged with DEBUG0 only so that they can
128 be easily enabled individually, by replacing the '0' with '1' as
129 needed. */
130 #define DEBUG0 0
131 #define DEBUG1 1
133 #if DEBUG0
134 /* This is needed by some of the commented-out debug statements
135 below. */
136 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
137 #endif
138 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
140 /* These are all to support encode_pattern(). */
141 static char pattern[30], *patternp;
142 static GTY(()) rtx patternr[30];
143 #define RTX_IS(x) (streq (pattern, x))
145 /* Some macros to simplify the logic throughout this file. */
146 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
147 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
149 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
150 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
152 static int
153 far_addr_space_p (rtx x)
155 if (GET_CODE (x) != MEM)
156 return 0;
157 #if DEBUG0
158 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
159 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
160 #endif
161 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
164 /* We do most RTX matching by converting the RTX into a string, and
165 using string compares. This vastly simplifies the logic in many of
166 the functions in this file.
168 On exit, pattern[] has the encoded string (use RTX_IS("...") to
169 compare it) and patternr[] has pointers to the nodes in the RTX
170 corresponding to each character in the encoded string. The latter
171 is mostly used by print_operand().
173 Unrecognized patterns have '?' in them; this shows up when the
174 assembler complains about syntax errors.
177 static void
178 encode_pattern_1 (rtx x)
180 int i;
182 if (patternp == pattern + sizeof (pattern) - 2)
184 patternp[-1] = '?';
185 return;
188 patternr[patternp - pattern] = x;
190 switch (GET_CODE (x))
192 case REG:
193 *patternp++ = 'r';
194 break;
195 case SUBREG:
196 if (GET_MODE_SIZE (GET_MODE (x)) !=
197 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
198 *patternp++ = 'S';
199 if (GET_MODE (x) == PSImode
200 && GET_CODE (XEXP (x, 0)) == REG)
201 *patternp++ = 'S';
202 encode_pattern_1 (XEXP (x, 0));
203 break;
204 case MEM:
205 *patternp++ = 'm';
206 case CONST:
207 encode_pattern_1 (XEXP (x, 0));
208 break;
209 case SIGN_EXTEND:
210 *patternp++ = '^';
211 *patternp++ = 'S';
212 encode_pattern_1 (XEXP (x, 0));
213 break;
214 case ZERO_EXTEND:
215 *patternp++ = '^';
216 *patternp++ = 'Z';
217 encode_pattern_1 (XEXP (x, 0));
218 break;
219 case PLUS:
220 *patternp++ = '+';
221 encode_pattern_1 (XEXP (x, 0));
222 encode_pattern_1 (XEXP (x, 1));
223 break;
224 case PRE_DEC:
225 *patternp++ = '>';
226 encode_pattern_1 (XEXP (x, 0));
227 break;
228 case POST_INC:
229 *patternp++ = '<';
230 encode_pattern_1 (XEXP (x, 0));
231 break;
232 case LO_SUM:
233 *patternp++ = 'L';
234 encode_pattern_1 (XEXP (x, 0));
235 encode_pattern_1 (XEXP (x, 1));
236 break;
237 case HIGH:
238 *patternp++ = 'H';
239 encode_pattern_1 (XEXP (x, 0));
240 break;
241 case SYMBOL_REF:
242 *patternp++ = 's';
243 break;
244 case LABEL_REF:
245 *patternp++ = 'l';
246 break;
247 case CODE_LABEL:
248 *patternp++ = 'c';
249 break;
250 case CONST_INT:
251 case CONST_DOUBLE:
252 *patternp++ = 'i';
253 break;
254 case UNSPEC:
255 *patternp++ = 'u';
256 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
257 for (i = 0; i < XVECLEN (x, 0); i++)
258 encode_pattern_1 (XVECEXP (x, 0, i));
259 break;
260 case USE:
261 *patternp++ = 'U';
262 break;
263 case PARALLEL:
264 *patternp++ = '|';
265 for (i = 0; i < XVECLEN (x, 0); i++)
266 encode_pattern_1 (XVECEXP (x, 0, i));
267 break;
268 case EXPR_LIST:
269 *patternp++ = 'E';
270 encode_pattern_1 (XEXP (x, 0));
271 if (XEXP (x, 1))
272 encode_pattern_1 (XEXP (x, 1));
273 break;
274 default:
275 *patternp++ = '?';
276 #if DEBUG0
277 fprintf (stderr, "can't encode pattern %s\n",
278 GET_RTX_NAME (GET_CODE (x)));
279 debug_rtx (x);
280 gcc_unreachable ();
281 #endif
282 break;
286 static void
287 encode_pattern (rtx x)
289 patternp = pattern;
290 encode_pattern_1 (x);
291 *patternp = 0;
294 /* Since register names indicate the mode they're used in, we need a
295 way to determine which name to refer to the register with. Called
296 by print_operand(). */
298 static const char *
299 reg_name_with_mode (int regno, machine_mode mode)
301 int mlen = GET_MODE_SIZE (mode);
302 if (regno == R0_REGNO && mlen == 1)
303 return "r0l";
304 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
305 return "r2r0";
306 if (regno == R0_REGNO && mlen == 6)
307 return "r2r1r0";
308 if (regno == R0_REGNO && mlen == 8)
309 return "r3r1r2r0";
310 if (regno == R1_REGNO && mlen == 1)
311 return "r1l";
312 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
313 return "r3r1";
314 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
315 return "a1a0";
316 return reg_names[regno];
319 /* How many bytes a register uses on stack when it's pushed. We need
320 to know this because the push opcode needs to explicitly indicate
321 the size of the register, even though the name of the register
322 already tells it that. Used by m32c_output_reg_{push,pop}, which
323 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
325 static int
326 reg_push_size (int regno)
328 switch (regno)
330 case R0_REGNO:
331 case R1_REGNO:
332 return 2;
333 case R2_REGNO:
334 case R3_REGNO:
335 case FLG_REGNO:
336 return 2;
337 case A0_REGNO:
338 case A1_REGNO:
339 case SB_REGNO:
340 case FB_REGNO:
341 case SP_REGNO:
342 if (TARGET_A16)
343 return 2;
344 else
345 return 3;
346 default:
347 gcc_unreachable ();
351 /* Given two register classes, find the largest intersection between
352 them. If there is no intersection, return RETURNED_IF_EMPTY
353 instead. */
354 static reg_class_t
355 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
356 reg_class_t returned_if_empty)
358 HARD_REG_SET cc;
359 int i;
360 reg_class_t best = NO_REGS;
361 unsigned int best_size = 0;
363 if (original_class == limiting_class)
364 return original_class;
366 cc = reg_class_contents[original_class];
367 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
369 for (i = 0; i < LIM_REG_CLASSES; i++)
371 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
372 if (best_size < reg_class_size[i])
374 best = (reg_class_t) i;
375 best_size = reg_class_size[i];
379 if (best == NO_REGS)
380 return returned_if_empty;
381 return best;
384 /* Used by m32c_register_move_cost to determine if a move is
385 impossibly expensive. */
386 static bool
387 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
389 /* Cache the results: 0=untested 1=no 2=yes */
390 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
392 if (results[(int) rclass][mode] == 0)
394 int r;
395 results[rclass][mode] = 1;
396 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
397 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
398 && HARD_REGNO_MODE_OK (r, mode))
400 results[rclass][mode] = 2;
401 break;
405 #if DEBUG0
406 fprintf (stderr, "class %s can hold %s? %s\n",
407 class_names[(int) rclass], mode_name[mode],
408 (results[rclass][mode] == 2) ? "yes" : "no");
409 #endif
410 return results[(int) rclass][mode] == 2;
413 /* Run-time Target Specification. */
415 /* Memregs are memory locations that gcc treats like general
416 registers, as there are a limited number of true registers and the
417 m32c families can use memory in most places that registers can be
418 used.
420 However, since memory accesses are more expensive than registers,
421 we allow the user to limit the number of memregs available, in
422 order to try to persuade gcc to try harder to use real registers.
424 Memregs are provided by lib1funcs.S.
427 int ok_to_change_target_memregs = TRUE;
429 /* Implements TARGET_OPTION_OVERRIDE. */
431 #undef TARGET_OPTION_OVERRIDE
432 #define TARGET_OPTION_OVERRIDE m32c_option_override
434 static void
435 m32c_option_override (void)
437 /* We limit memregs to 0..16, and provide a default. */
438 if (global_options_set.x_target_memregs)
440 if (target_memregs < 0 || target_memregs > 16)
441 error ("invalid target memregs value '%d'", target_memregs);
443 else
444 target_memregs = 16;
446 if (TARGET_A24)
447 flag_ivopts = 0;
449 /* This target defaults to strict volatile bitfields. */
450 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
451 flag_strict_volatile_bitfields = 1;
453 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
454 This is always worse than an absolute call. */
455 if (TARGET_A16)
456 flag_no_function_cse = 1;
458 /* This wants to put insns between compares and their jumps. */
459 /* FIXME: The right solution is to properly trace the flags register
460 values, but that is too much work for stage 4. */
461 flag_combine_stack_adjustments = 0;
464 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
465 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
467 static void
468 m32c_override_options_after_change (void)
470 if (TARGET_A16)
471 flag_no_function_cse = 1;
474 /* Defining data structures for per-function information */
476 /* The usual; we set up our machine_function data. */
477 static struct machine_function *
478 m32c_init_machine_status (void)
480 return ggc_cleared_alloc<machine_function> ();
483 /* Implements INIT_EXPANDERS. We just set up to call the above
484 function. */
485 void
486 m32c_init_expanders (void)
488 init_machine_status = m32c_init_machine_status;
491 /* Storage Layout */
493 /* Register Basics */
495 /* Basic Characteristics of Registers */
497 /* Whether a mode fits in a register is complex enough to warrant a
498 table. */
499 static struct
501 char qi_regs;
502 char hi_regs;
503 char pi_regs;
504 char si_regs;
505 char di_regs;
506 } nregs_table[FIRST_PSEUDO_REGISTER] =
508 { 1, 1, 2, 2, 4 }, /* r0 */
509 { 0, 1, 0, 0, 0 }, /* r2 */
510 { 1, 1, 2, 2, 0 }, /* r1 */
511 { 0, 1, 0, 0, 0 }, /* r3 */
512 { 0, 1, 1, 0, 0 }, /* a0 */
513 { 0, 1, 1, 0, 0 }, /* a1 */
514 { 0, 1, 1, 0, 0 }, /* sb */
515 { 0, 1, 1, 0, 0 }, /* fb */
516 { 0, 1, 1, 0, 0 }, /* sp */
517 { 1, 1, 1, 0, 0 }, /* pc */
518 { 0, 0, 0, 0, 0 }, /* fl */
519 { 1, 1, 1, 0, 0 }, /* ap */
520 { 1, 1, 2, 2, 4 }, /* mem0 */
521 { 1, 1, 2, 2, 4 }, /* mem1 */
522 { 1, 1, 2, 2, 4 }, /* mem2 */
523 { 1, 1, 2, 2, 4 }, /* mem3 */
524 { 1, 1, 2, 2, 4 }, /* mem4 */
525 { 1, 1, 2, 2, 0 }, /* mem5 */
526 { 1, 1, 2, 2, 0 }, /* mem6 */
527 { 1, 1, 0, 0, 0 }, /* mem7 */
530 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
531 of available memregs, and select which registers need to be preserved
532 across calls based on the chip family. */
534 #undef TARGET_CONDITIONAL_REGISTER_USAGE
535 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
536 void
537 m32c_conditional_register_usage (void)
539 int i;
541 if (0 <= target_memregs && target_memregs <= 16)
543 /* The command line option is bytes, but our "registers" are
544 16-bit words. */
545 for (i = (target_memregs+1)/2; i < 8; i++)
547 fixed_regs[MEM0_REGNO + i] = 1;
548 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
552 /* M32CM and M32C preserve more registers across function calls. */
553 if (TARGET_A24)
555 call_used_regs[R1_REGNO] = 0;
556 call_used_regs[R2_REGNO] = 0;
557 call_used_regs[R3_REGNO] = 0;
558 call_used_regs[A0_REGNO] = 0;
559 call_used_regs[A1_REGNO] = 0;
563 /* How Values Fit in Registers */
565 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
566 different registers are different sizes from each other, *and* may
567 be different sizes in different chip families. */
568 static int
569 m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
571 if (regno == FLG_REGNO && mode == CCmode)
572 return 1;
573 if (regno >= FIRST_PSEUDO_REGISTER)
574 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
576 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
577 return (GET_MODE_SIZE (mode) + 1) / 2;
579 if (GET_MODE_SIZE (mode) <= 1)
580 return nregs_table[regno].qi_regs;
581 if (GET_MODE_SIZE (mode) <= 2)
582 return nregs_table[regno].hi_regs;
583 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
584 return 2;
585 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
586 return nregs_table[regno].pi_regs;
587 if (GET_MODE_SIZE (mode) <= 4)
588 return nregs_table[regno].si_regs;
589 if (GET_MODE_SIZE (mode) <= 8)
590 return nregs_table[regno].di_regs;
591 return 0;
595 m32c_hard_regno_nregs (int regno, machine_mode mode)
597 int rv = m32c_hard_regno_nregs_1 (regno, mode);
598 return rv ? rv : 1;
601 /* Implements HARD_REGNO_MODE_OK. The above function does the work
602 already; just test its return value. */
604 m32c_hard_regno_ok (int regno, machine_mode mode)
606 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
609 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
610 registers are all different sizes. However, since most modes are
611 bigger than our registers anyway, it's easier to implement this
612 function that way, leaving QImode as the only unique case. */
614 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
616 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
617 return 1;
619 #if 0
620 if (m1 == QImode || m2 == QImode)
621 return 0;
622 #endif
624 return 1;
627 /* Register Classes */
629 /* Implements REGNO_REG_CLASS. */
630 enum reg_class
631 m32c_regno_reg_class (int regno)
633 switch (regno)
635 case R0_REGNO:
636 return R0_REGS;
637 case R1_REGNO:
638 return R1_REGS;
639 case R2_REGNO:
640 return R2_REGS;
641 case R3_REGNO:
642 return R3_REGS;
643 case A0_REGNO:
644 return A0_REGS;
645 case A1_REGNO:
646 return A1_REGS;
647 case SB_REGNO:
648 return SB_REGS;
649 case FB_REGNO:
650 return FB_REGS;
651 case SP_REGNO:
652 return SP_REGS;
653 case FLG_REGNO:
654 return FLG_REGS;
655 default:
656 if (IS_MEM_REGNO (regno))
657 return MEM_REGS;
658 return ALL_REGS;
662 /* Implements REGNO_OK_FOR_BASE_P. */
664 m32c_regno_ok_for_base_p (int regno)
666 if (regno == A0_REGNO
667 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
668 return 1;
669 return 0;
672 #define DEBUG_RELOAD 0
674 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
675 registers of the appropriate size. */
677 #undef TARGET_PREFERRED_RELOAD_CLASS
678 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
680 static reg_class_t
681 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
683 reg_class_t newclass = rclass;
685 #if DEBUG_RELOAD
686 fprintf (stderr, "\npreferred_reload_class for %s is ",
687 class_names[rclass]);
688 #endif
689 if (rclass == NO_REGS)
690 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
692 if (reg_classes_intersect_p (rclass, CR_REGS))
694 switch (GET_MODE (x))
696 case QImode:
697 newclass = HL_REGS;
698 break;
699 default:
700 /* newclass = HI_REGS; */
701 break;
705 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
706 newclass = SI_REGS;
707 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
708 && ! reg_class_subset_p (R03_REGS, rclass))
709 newclass = DI_REGS;
711 rclass = reduce_class (rclass, newclass, rclass);
713 if (GET_MODE (x) == QImode)
714 rclass = reduce_class (rclass, HL_REGS, rclass);
716 #if DEBUG_RELOAD
717 fprintf (stderr, "%s\n", class_names[rclass]);
718 debug_rtx (x);
720 if (GET_CODE (x) == MEM
721 && GET_CODE (XEXP (x, 0)) == PLUS
722 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
723 fprintf (stderr, "Glorm!\n");
724 #endif
725 return rclass;
728 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
730 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
731 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
733 static reg_class_t
734 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
736 return m32c_preferred_reload_class (x, rclass);
739 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
740 address registers for reloads since they're needed for address
741 reloads. */
743 m32c_limit_reload_class (machine_mode mode, int rclass)
745 #if DEBUG_RELOAD
746 fprintf (stderr, "limit_reload_class for %s: %s ->",
747 mode_name[mode], class_names[rclass]);
748 #endif
750 if (mode == QImode)
751 rclass = reduce_class (rclass, HL_REGS, rclass);
752 else if (mode == HImode)
753 rclass = reduce_class (rclass, HI_REGS, rclass);
754 else if (mode == SImode)
755 rclass = reduce_class (rclass, SI_REGS, rclass);
757 if (rclass != A_REGS)
758 rclass = reduce_class (rclass, DI_REGS, rclass);
760 #if DEBUG_RELOAD
761 fprintf (stderr, " %s\n", class_names[rclass]);
762 #endif
763 return rclass;
766 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
767 r0 or r1, as those are the only real QImode registers. CR regs get
768 reloaded through appropriately sized general or address
769 registers. */
771 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
773 int cc = class_contents[rclass][0];
774 #if DEBUG0
775 fprintf (stderr, "\nsecondary reload class %s %s\n",
776 class_names[rclass], mode_name[mode]);
777 debug_rtx (x);
778 #endif
779 if (mode == QImode
780 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
781 return QI_REGS;
782 if (reg_classes_intersect_p (rclass, CR_REGS)
783 && GET_CODE (x) == REG
784 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
785 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
786 return NO_REGS;
789 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
790 reloads. */
792 #undef TARGET_CLASS_LIKELY_SPILLED_P
793 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
795 static bool
796 m32c_class_likely_spilled_p (reg_class_t regclass)
798 if (regclass == A_REGS)
799 return true;
801 return (reg_class_size[(int) regclass] == 1);
804 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
805 documented meaning, to avoid potential inconsistencies with actual
806 class definitions. */
808 #undef TARGET_CLASS_MAX_NREGS
809 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
811 static unsigned char
812 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
814 int rn;
815 unsigned char max = 0;
817 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
818 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
820 unsigned char n = m32c_hard_regno_nregs (rn, mode);
821 if (max < n)
822 max = n;
824 return max;
827 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
828 QI (r0l, r1l) because the chip doesn't support QI ops on other
829 registers (well, it does on a0/a1 but if we let gcc do that, reload
830 suffers). Otherwise, we allow changes to larger modes. */
832 m32c_cannot_change_mode_class (machine_mode from,
833 machine_mode to, int rclass)
835 int rn;
836 #if DEBUG0
837 fprintf (stderr, "cannot change from %s to %s in %s\n",
838 mode_name[from], mode_name[to], class_names[rclass]);
839 #endif
841 /* If the larger mode isn't allowed in any of these registers, we
842 can't allow the change. */
843 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
844 if (class_contents[rclass][0] & (1 << rn))
845 if (! m32c_hard_regno_ok (rn, to))
846 return 1;
848 if (to == QImode)
849 return (class_contents[rclass][0] & 0x1ffa);
851 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
852 && GET_MODE_SIZE (from) > 1)
853 return 0;
854 if (GET_MODE_SIZE (from) > 2) /* all other regs */
855 return 0;
857 return 1;
860 /* Helpers for the rest of the file. */
861 /* TRUE if the rtx is a REG rtx for the given register. */
862 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
863 && REGNO (rtx) == regno)
864 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
865 base register in address calculations (hence the "strict"
866 argument). */
867 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
868 && (REGNO (rtx) == AP_REGNO \
869 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
871 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
873 /* Implements matching for constraints (see next function too). 'S' is
874 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
875 call return values. */
876 bool
877 m32c_matches_constraint_p (rtx value, int constraint)
879 encode_pattern (value);
881 switch (constraint) {
882 case CONSTRAINT_SF:
883 return (far_addr_space_p (value)
884 && ((RTX_IS ("mr")
885 && A0_OR_PSEUDO (patternr[1])
886 && GET_MODE (patternr[1]) == SImode)
887 || (RTX_IS ("m+^Sri")
888 && A0_OR_PSEUDO (patternr[4])
889 && GET_MODE (patternr[4]) == HImode)
890 || (RTX_IS ("m+^Srs")
891 && A0_OR_PSEUDO (patternr[4])
892 && GET_MODE (patternr[4]) == HImode)
893 || (RTX_IS ("m+^S+ris")
894 && A0_OR_PSEUDO (patternr[5])
895 && GET_MODE (patternr[5]) == HImode)
896 || RTX_IS ("ms")));
897 case CONSTRAINT_Sd:
899 /* This is the common "src/dest" address */
900 rtx r;
901 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
902 return true;
903 if (RTX_IS ("ms") || RTX_IS ("m+si"))
904 return true;
905 if (RTX_IS ("m++rii"))
907 if (REGNO (patternr[3]) == FB_REGNO
908 && INTVAL (patternr[4]) == 0)
909 return true;
911 if (RTX_IS ("mr"))
912 r = patternr[1];
913 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
914 r = patternr[2];
915 else
916 return false;
917 if (REGNO (r) == SP_REGNO)
918 return false;
919 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
921 case CONSTRAINT_Sa:
923 rtx r;
924 if (RTX_IS ("mr"))
925 r = patternr[1];
926 else if (RTX_IS ("m+ri"))
927 r = patternr[2];
928 else
929 return false;
930 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
932 case CONSTRAINT_Si:
933 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
934 case CONSTRAINT_Ss:
935 return ((RTX_IS ("mr")
936 && (IS_REG (patternr[1], SP_REGNO)))
937 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
938 case CONSTRAINT_Sf:
939 return ((RTX_IS ("mr")
940 && (IS_REG (patternr[1], FB_REGNO)))
941 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
942 case CONSTRAINT_Sb:
943 return ((RTX_IS ("mr")
944 && (IS_REG (patternr[1], SB_REGNO)))
945 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
946 case CONSTRAINT_Sp:
947 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
948 return (RTX_IS ("mi")
949 && !(INTVAL (patternr[1]) & ~0x1fff));
950 case CONSTRAINT_S1:
951 return r1h_operand (value, QImode);
952 case CONSTRAINT_Rpa:
953 return GET_CODE (value) == PARALLEL;
954 default:
955 return false;
959 /* STACK AND CALLING */
961 /* Frame Layout */
963 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
964 (yes, THREE bytes) onto the stack for the return address, but we
965 don't support pointers bigger than 16 bits on those chips. This
966 will likely wreak havoc with exception unwinding. FIXME. */
968 m32c_return_addr_rtx (int count)
970 machine_mode mode;
971 int offset;
972 rtx ra_mem;
974 if (count)
975 return NULL_RTX;
976 /* we want 2[$fb] */
978 if (TARGET_A24)
980 /* It's four bytes */
981 mode = PSImode;
982 offset = 4;
984 else
986 /* FIXME: it's really 3 bytes */
987 mode = HImode;
988 offset = 2;
991 ra_mem =
992 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
993 offset));
994 return copy_to_mode_reg (mode, ra_mem);
997 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
999 m32c_incoming_return_addr_rtx (void)
1001 /* we want [sp] */
1002 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1005 /* Exception Handling Support */
1007 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1008 pointers. */
1010 m32c_eh_return_data_regno (int n)
1012 switch (n)
1014 case 0:
1015 return MEM0_REGNO;
1016 case 1:
1017 return MEM0_REGNO+4;
1018 default:
1019 return INVALID_REGNUM;
1023 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1024 m32c_emit_eh_epilogue. */
1026 m32c_eh_return_stackadj_rtx (void)
1028 if (!cfun->machine->eh_stack_adjust)
1030 rtx sa;
1032 sa = gen_rtx_REG (Pmode, R0_REGNO);
1033 cfun->machine->eh_stack_adjust = sa;
1035 return cfun->machine->eh_stack_adjust;
1038 /* Registers That Address the Stack Frame */
1040 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1041 the original spec called for dwarf numbers to vary with register
1042 width as well, for example, r0l, r0, and r2r0 would each have
1043 different dwarf numbers. GCC doesn't support this, and we don't do
1044 it, and gdb seems to like it this way anyway. */
1045 unsigned int
1046 m32c_dwarf_frame_regnum (int n)
1048 switch (n)
1050 case R0_REGNO:
1051 return 5;
1052 case R1_REGNO:
1053 return 6;
1054 case R2_REGNO:
1055 return 7;
1056 case R3_REGNO:
1057 return 8;
1058 case A0_REGNO:
1059 return 9;
1060 case A1_REGNO:
1061 return 10;
1062 case FB_REGNO:
1063 return 11;
1064 case SB_REGNO:
1065 return 19;
1067 case SP_REGNO:
1068 return 12;
1069 case PC_REGNO:
1070 return 13;
1071 default:
1072 return DWARF_FRAME_REGISTERS + 1;
1076 /* The frame looks like this:
1078 ap -> +------------------------------
1079 | Return address (3 or 4 bytes)
1080 | Saved FB (2 or 4 bytes)
1081 fb -> +------------------------------
1082 | local vars
1083 | register saves fb
1084 | through r0 as needed
1085 sp -> +------------------------------
1088 /* We use this to wrap all emitted insns in the prologue. */
1089 static rtx
1090 F (rtx x)
1092 RTX_FRAME_RELATED_P (x) = 1;
1093 return x;
1096 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1097 how much the stack pointer moves for each, for each cpu family. */
1098 static struct
1100 int reg1;
1101 int bit;
1102 int a16_bytes;
1103 int a24_bytes;
1104 } pushm_info[] =
1106 /* These are in reverse push (nearest-to-sp) order. */
1107 { R0_REGNO, 0x80, 2, 2 },
1108 { R1_REGNO, 0x40, 2, 2 },
1109 { R2_REGNO, 0x20, 2, 2 },
1110 { R3_REGNO, 0x10, 2, 2 },
1111 { A0_REGNO, 0x08, 2, 4 },
1112 { A1_REGNO, 0x04, 2, 4 },
1113 { SB_REGNO, 0x02, 2, 4 },
1114 { FB_REGNO, 0x01, 2, 4 }
1117 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1119 /* Returns TRUE if we need to save/restore the given register. We
1120 save everything for exception handlers, so that any register can be
1121 unwound. For interrupt handlers, we save everything if the handler
1122 calls something else (because we don't know what *that* function
1123 might do), but try to be a bit smarter if the handler is a leaf
1124 function. We always save $a0, though, because we use that in the
1125 epilogue to copy $fb to $sp. */
1126 static int
1127 need_to_save (int regno)
1129 if (fixed_regs[regno])
1130 return 0;
1131 if (crtl->calls_eh_return)
1132 return 1;
1133 if (regno == FP_REGNO)
1134 return 0;
1135 if (cfun->machine->is_interrupt
1136 && (!cfun->machine->is_leaf
1137 || (regno == A0_REGNO
1138 && m32c_function_needs_enter ())
1140 return 1;
1141 if (df_regs_ever_live_p (regno)
1142 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1143 return 1;
1144 return 0;
1147 /* This function contains all the intelligence about saving and
1148 restoring registers. It always figures out the register save set.
1149 When called with PP_justcount, it merely returns the size of the
1150 save set (for eliminating the frame pointer, for example). When
1151 called with PP_pushm or PP_popm, it emits the appropriate
1152 instructions for saving (pushm) or restoring (popm) the
1153 registers. */
1154 static int
1155 m32c_pushm_popm (Push_Pop_Type ppt)
1157 int reg_mask = 0;
1158 int byte_count = 0, bytes;
1159 int i;
1160 rtx dwarf_set[PUSHM_N];
1161 int n_dwarfs = 0;
1162 int nosave_mask = 0;
1164 if (crtl->return_rtx
1165 && GET_CODE (crtl->return_rtx) == PARALLEL
1166 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1168 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1169 rtx rv = XEXP (exp, 0);
1170 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1172 if (rv_bytes > 2)
1173 nosave_mask |= 0x20; /* PSI, SI */
1174 else
1175 nosave_mask |= 0xf0; /* DF */
1176 if (rv_bytes > 4)
1177 nosave_mask |= 0x50; /* DI */
1180 for (i = 0; i < (int) PUSHM_N; i++)
1182 /* Skip if neither register needs saving. */
1183 if (!need_to_save (pushm_info[i].reg1))
1184 continue;
1186 if (pushm_info[i].bit & nosave_mask)
1187 continue;
1189 reg_mask |= pushm_info[i].bit;
1190 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1192 if (ppt == PP_pushm)
1194 machine_mode mode = (bytes == 2) ? HImode : SImode;
1195 rtx addr;
1197 /* Always use stack_pointer_rtx instead of calling
1198 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1199 that there is a single rtx representing the stack pointer,
1200 namely stack_pointer_rtx, and uses == to recognize it. */
1201 addr = stack_pointer_rtx;
1203 if (byte_count != 0)
1204 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1206 dwarf_set[n_dwarfs++] =
1207 gen_rtx_SET (VOIDmode,
1208 gen_rtx_MEM (mode, addr),
1209 gen_rtx_REG (mode, pushm_info[i].reg1));
1210 F (dwarf_set[n_dwarfs - 1]);
1213 byte_count += bytes;
1216 if (cfun->machine->is_interrupt)
1218 cfun->machine->intr_pushm = reg_mask & 0xfe;
1219 reg_mask = 0;
1220 byte_count = 0;
1223 if (cfun->machine->is_interrupt)
1224 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1225 if (need_to_save (i))
1227 byte_count += 2;
1228 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1231 if (ppt == PP_pushm && byte_count)
1233 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1234 rtx pushm;
1236 if (reg_mask)
1238 XVECEXP (note, 0, 0)
1239 = gen_rtx_SET (VOIDmode,
1240 stack_pointer_rtx,
1241 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1242 stack_pointer_rtx,
1243 GEN_INT (-byte_count)));
1244 F (XVECEXP (note, 0, 0));
1246 for (i = 0; i < n_dwarfs; i++)
1247 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1249 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1251 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1254 if (cfun->machine->is_interrupt)
1255 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1256 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1258 if (TARGET_A16)
1259 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1260 else
1261 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1262 F (pushm);
1265 if (ppt == PP_popm && byte_count)
1267 if (cfun->machine->is_interrupt)
1268 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1269 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1271 if (TARGET_A16)
1272 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1273 else
1274 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1276 if (reg_mask)
1277 emit_insn (gen_popm (GEN_INT (reg_mask)));
1280 return byte_count;
1283 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1284 diagrams our call frame. */
1286 m32c_initial_elimination_offset (int from, int to)
1288 int ofs = 0;
1290 if (from == AP_REGNO)
1292 if (TARGET_A16)
1293 ofs += 5;
1294 else
1295 ofs += 8;
1298 if (to == SP_REGNO)
1300 ofs += m32c_pushm_popm (PP_justcount);
1301 ofs += get_frame_size ();
1304 /* Account for push rounding. */
1305 if (TARGET_A24)
1306 ofs = (ofs + 1) & ~1;
1307 #if DEBUG0
1308 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1309 to, ofs);
1310 #endif
1311 return ofs;
1314 /* Passing Function Arguments on the Stack */
1316 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1317 M32C has word stacks. */
1318 unsigned int
1319 m32c_push_rounding (int n)
1321 if (TARGET_R8C || TARGET_M16C)
1322 return n;
1323 return (n + 1) & ~1;
1326 /* Passing Arguments in Registers */
1328 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1329 registers, partly on stack. If our function returns a struct, a
1330 pointer to a buffer for it is at the top of the stack (last thing
1331 pushed). The first few real arguments may be in registers as
1332 follows:
1334 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1335 arg2 in r2 if it's HI (else pushed on stack)
1336 rest on stack
1337 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1338 rest on stack
1340 Structs are not passed in registers, even if they fit. Only
1341 integer and pointer types are passed in registers.
1343 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1344 r2 if it fits. */
1345 #undef TARGET_FUNCTION_ARG
1346 #define TARGET_FUNCTION_ARG m32c_function_arg
1347 static rtx
1348 m32c_function_arg (cumulative_args_t ca_v,
1349 machine_mode mode, const_tree type, bool named)
1351 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1353 /* Can return a reg, parallel, or 0 for stack */
1354 rtx rv = NULL_RTX;
1355 #if DEBUG0
1356 fprintf (stderr, "func_arg %d (%s, %d)\n",
1357 ca->parm_num, mode_name[mode], named);
1358 debug_tree (type);
1359 #endif
1361 if (mode == VOIDmode)
1362 return GEN_INT (0);
1364 if (ca->force_mem || !named)
1366 #if DEBUG0
1367 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1368 named);
1369 #endif
1370 return NULL_RTX;
1373 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1374 return NULL_RTX;
1376 if (type && AGGREGATE_TYPE_P (type))
1377 return NULL_RTX;
1379 switch (ca->parm_num)
1381 case 1:
1382 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1383 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1384 break;
1386 case 2:
1387 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1388 rv = gen_rtx_REG (mode, R2_REGNO);
1389 break;
1392 #if DEBUG0
1393 debug_rtx (rv);
1394 #endif
1395 return rv;
1398 #undef TARGET_PASS_BY_REFERENCE
1399 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1400 static bool
1401 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1402 machine_mode mode ATTRIBUTE_UNUSED,
1403 const_tree type ATTRIBUTE_UNUSED,
1404 bool named ATTRIBUTE_UNUSED)
1406 return 0;
1409 /* Implements INIT_CUMULATIVE_ARGS. */
1410 void
1411 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1412 tree fntype,
1413 rtx libname ATTRIBUTE_UNUSED,
1414 tree fndecl,
1415 int n_named_args ATTRIBUTE_UNUSED)
1417 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1418 ca->force_mem = 1;
1419 else
1420 ca->force_mem = 0;
1421 ca->parm_num = 1;
1424 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1425 functions returning structures, so we always reset that. Otherwise,
1426 we only need to know the sequence number of the argument to know what
1427 to do with it. */
1428 #undef TARGET_FUNCTION_ARG_ADVANCE
1429 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1430 static void
1431 m32c_function_arg_advance (cumulative_args_t ca_v,
1432 machine_mode mode ATTRIBUTE_UNUSED,
1433 const_tree type ATTRIBUTE_UNUSED,
1434 bool named ATTRIBUTE_UNUSED)
1436 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1438 if (ca->force_mem)
1439 ca->force_mem = 0;
1440 else
1441 ca->parm_num++;
1444 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1445 #undef TARGET_FUNCTION_ARG_BOUNDARY
1446 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1447 static unsigned int
1448 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1449 const_tree type ATTRIBUTE_UNUSED)
1451 return (TARGET_A16 ? 8 : 16);
1454 /* Implements FUNCTION_ARG_REGNO_P. */
1456 m32c_function_arg_regno_p (int r)
1458 if (TARGET_A24)
1459 return (r == R0_REGNO);
1460 return (r == R1_REGNO || r == R2_REGNO);
1463 /* HImode and PSImode are the two "native" modes as far as GCC is
1464 concerned, but the chips also support a 32-bit mode which is used
1465 for some opcodes in R8C/M16C and for reset vectors and such. */
1466 #undef TARGET_VALID_POINTER_MODE
1467 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1468 static bool
1469 m32c_valid_pointer_mode (machine_mode mode)
1471 if (mode == HImode
1472 || mode == PSImode
1473 || mode == SImode
1475 return 1;
1476 return 0;
1479 /* How Scalar Function Values Are Returned */
1481 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1482 combination of registers starting there (r2r0 for longs, r3r1r2r0
1483 for long long, r3r2r1r0 for doubles), except that that ABI
1484 currently doesn't work because it ends up using all available
1485 general registers and gcc often can't compile it. So, instead, we
1486 return anything bigger than 16 bits in "mem0" (effectively, a
1487 memory location). */
1489 #undef TARGET_LIBCALL_VALUE
1490 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1492 static rtx
1493 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1495 /* return reg or parallel */
1496 #if 0
1497 /* FIXME: GCC has difficulty returning large values in registers,
1498 because that ties up most of the general registers and gives the
1499 register allocator little to work with. Until we can resolve
1500 this, large values are returned in memory. */
1501 if (mode == DFmode)
1503 rtx rv;
1505 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1506 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1507 gen_rtx_REG (HImode,
1508 R0_REGNO),
1509 GEN_INT (0));
1510 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1511 gen_rtx_REG (HImode,
1512 R1_REGNO),
1513 GEN_INT (2));
1514 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1515 gen_rtx_REG (HImode,
1516 R2_REGNO),
1517 GEN_INT (4));
1518 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1519 gen_rtx_REG (HImode,
1520 R3_REGNO),
1521 GEN_INT (6));
1522 return rv;
1525 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1527 rtx rv;
1529 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1530 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1531 gen_rtx_REG (mode,
1532 R0_REGNO),
1533 GEN_INT (0));
1534 return rv;
1536 #endif
1538 if (GET_MODE_SIZE (mode) > 2)
1539 return gen_rtx_REG (mode, MEM0_REGNO);
1540 return gen_rtx_REG (mode, R0_REGNO);
1543 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1544 conventions. */
1546 #undef TARGET_FUNCTION_VALUE
1547 #define TARGET_FUNCTION_VALUE m32c_function_value
1549 static rtx
1550 m32c_function_value (const_tree valtype,
1551 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1552 bool outgoing ATTRIBUTE_UNUSED)
1554 /* return reg or parallel */
1555 const machine_mode mode = TYPE_MODE (valtype);
1556 return m32c_libcall_value (mode, NULL_RTX);
1559 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1561 #undef TARGET_FUNCTION_VALUE_REGNO_P
1562 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1564 static bool
1565 m32c_function_value_regno_p (const unsigned int regno)
1567 return (regno == R0_REGNO || regno == MEM0_REGNO);
1570 /* How Large Values Are Returned */
1572 /* We return structures by pushing the address on the stack, even if
1573 we use registers for the first few "real" arguments. */
1574 #undef TARGET_STRUCT_VALUE_RTX
1575 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1576 static rtx
1577 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1578 int incoming ATTRIBUTE_UNUSED)
1580 return 0;
1583 /* Function Entry and Exit */
1585 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1587 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1589 if (cfun->machine->is_interrupt)
1590 return 1;
1591 return 0;
1594 /* Implementing the Varargs Macros */
1596 #undef TARGET_STRICT_ARGUMENT_NAMING
1597 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1598 static bool
1599 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1601 return 1;
1604 /* Trampolines for Nested Functions */
1607 m16c:
1608 1 0000 75C43412 mov.w #0x1234,a0
1609 2 0004 FC000000 jmp.a label
1611 m32c:
1612 1 0000 BC563412 mov.l:s #0x123456,a0
1613 2 0004 CC000000 jmp.a label
1616 /* Implements TRAMPOLINE_SIZE. */
1618 m32c_trampoline_size (void)
1620 /* Allocate extra space so we can avoid the messy shifts when we
1621 initialize the trampoline; we just write past the end of the
1622 opcode. */
1623 return TARGET_A16 ? 8 : 10;
1626 /* Implements TRAMPOLINE_ALIGNMENT. */
1628 m32c_trampoline_alignment (void)
1630 return 2;
1633 /* Implements TARGET_TRAMPOLINE_INIT. */
1635 #undef TARGET_TRAMPOLINE_INIT
1636 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1637 static void
1638 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1640 rtx function = XEXP (DECL_RTL (fndecl), 0);
1642 #define A0(m,i) adjust_address (m_tramp, m, i)
1643 if (TARGET_A16)
1645 /* Note: we subtract a "word" because the moves want signed
1646 constants, not unsigned constants. */
1647 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1648 emit_move_insn (A0 (HImode, 2), chainval);
1649 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1650 /* We use 16-bit addresses here, but store the zero to turn it
1651 into a 24-bit offset. */
1652 emit_move_insn (A0 (HImode, 5), function);
1653 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1655 else
1657 /* Note that the PSI moves actually write 4 bytes. Make sure we
1658 write stuff out in the right order, and leave room for the
1659 extra byte at the end. */
1660 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1661 emit_move_insn (A0 (PSImode, 1), chainval);
1662 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1663 emit_move_insn (A0 (PSImode, 5), function);
1665 #undef A0
1668 /* Addressing Modes */
1670 /* The r8c/m32c family supports a wide range of non-orthogonal
1671 addressing modes, including the ability to double-indirect on *some*
1672 of them. Not all insns support all modes, either, but we rely on
1673 predicates and constraints to deal with that. */
1674 #undef TARGET_LEGITIMATE_ADDRESS_P
1675 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1676 bool
1677 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1679 int mode_adjust;
1680 if (CONSTANT_P (x))
1681 return 1;
1683 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1684 return 0;
1685 if (TARGET_A24 && GET_MODE (x) != PSImode)
1686 return 0;
1688 /* Wide references to memory will be split after reload, so we must
1689 ensure that all parts of such splits remain legitimate
1690 addresses. */
1691 mode_adjust = GET_MODE_SIZE (mode) - 1;
1693 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1694 if (GET_CODE (x) == PRE_DEC
1695 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1697 return (GET_CODE (XEXP (x, 0)) == REG
1698 && REGNO (XEXP (x, 0)) == SP_REGNO);
1701 #if 0
1702 /* This is the double indirection detection, but it currently
1703 doesn't work as cleanly as this code implies, so until we've had
1704 a chance to debug it, leave it disabled. */
1705 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1707 #if DEBUG_DOUBLE
1708 fprintf (stderr, "double indirect\n");
1709 #endif
1710 x = XEXP (x, 0);
1712 #endif
1714 encode_pattern (x);
1715 if (RTX_IS ("r"))
1717 /* Most indexable registers can be used without displacements,
1718 although some of them will be emitted with an explicit zero
1719 to please the assembler. */
1720 switch (REGNO (patternr[0]))
1722 case A1_REGNO:
1723 case SB_REGNO:
1724 case FB_REGNO:
1725 case SP_REGNO:
1726 if (TARGET_A16 && GET_MODE (x) == SImode)
1727 return 0;
1728 case A0_REGNO:
1729 return 1;
1731 default:
1732 if (IS_PSEUDO (patternr[0], strict))
1733 return 1;
1734 return 0;
1738 if (TARGET_A16 && GET_MODE (x) == SImode)
1739 return 0;
1741 if (RTX_IS ("+ri"))
1743 /* This is more interesting, because different base registers
1744 allow for different displacements - both range and signedness
1745 - and it differs from chip series to chip series too. */
1746 int rn = REGNO (patternr[1]);
1747 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1748 switch (rn)
1750 case A0_REGNO:
1751 case A1_REGNO:
1752 case SB_REGNO:
1753 /* The syntax only allows positive offsets, but when the
1754 offsets span the entire memory range, we can simulate
1755 negative offsets by wrapping. */
1756 if (TARGET_A16)
1757 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1758 if (rn == SB_REGNO)
1759 return (offs >= 0 && offs <= 65535 - mode_adjust);
1760 /* A0 or A1 */
1761 return (offs >= -16777216 && offs <= 16777215);
1763 case FB_REGNO:
1764 if (TARGET_A16)
1765 return (offs >= -128 && offs <= 127 - mode_adjust);
1766 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1768 case SP_REGNO:
1769 return (offs >= -128 && offs <= 127 - mode_adjust);
1771 default:
1772 if (IS_PSEUDO (patternr[1], strict))
1773 return 1;
1774 return 0;
1777 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1779 rtx reg = patternr[1];
1781 /* We don't know where the symbol is, so only allow base
1782 registers which support displacements spanning the whole
1783 address range. */
1784 switch (REGNO (reg))
1786 case A0_REGNO:
1787 case A1_REGNO:
1788 /* $sb needs a secondary reload, but since it's involved in
1789 memory address reloads too, we don't deal with it very
1790 well. */
1791 /* case SB_REGNO: */
1792 return 1;
1793 default:
1794 if (GET_CODE (reg) == SUBREG)
1795 return 0;
1796 if (IS_PSEUDO (reg, strict))
1797 return 1;
1798 return 0;
1801 return 0;
1804 /* Implements REG_OK_FOR_BASE_P. */
1806 m32c_reg_ok_for_base_p (rtx x, int strict)
1808 if (GET_CODE (x) != REG)
1809 return 0;
1810 switch (REGNO (x))
1812 case A0_REGNO:
1813 case A1_REGNO:
1814 case SB_REGNO:
1815 case FB_REGNO:
1816 case SP_REGNO:
1817 return 1;
1818 default:
1819 if (IS_PSEUDO (x, strict))
1820 return 1;
1821 return 0;
1825 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1826 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1827 like this:
1828 EB 4B FF mova -128[$fb],$a0
1829 D8 0C FF FF mov.w:Q #0,-1[$a0]
1831 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1832 displacements:
1833 7B F4 stc $fb,$a0
1834 77 54 00 01 sub #256,$a0
1835 D8 08 01 mov.w:Q #0,1[$a0]
1837 If we don't offset (i.e. offset by zero), we end up with:
1838 7B F4 stc $fb,$a0
1839 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1841 We have to subtract *something* so that we have a PLUS rtx to mark
1842 that we've done this reload. The -128 offset will never result in
1843 an 8-bit aN offset, and the payoff for the second case is five
1844 loads *if* those loads are within 256 bytes of the other end of the
1845 frame, so the third case seems best. Note that we subtract the
1846 zero, but detect that in the addhi3 pattern. */
1848 #define BIG_FB_ADJ 0
1850 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1851 worry about is frame base offsets, as $fb has a limited
1852 displacement range. We deal with this by attempting to reload $fb
1853 itself into an address register; that seems to result in the best
1854 code. */
1855 #undef TARGET_LEGITIMIZE_ADDRESS
1856 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1857 static rtx
1858 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1859 machine_mode mode)
1861 #if DEBUG0
1862 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1863 debug_rtx (x);
1864 fprintf (stderr, "\n");
1865 #endif
1867 if (GET_CODE (x) == PLUS
1868 && GET_CODE (XEXP (x, 0)) == REG
1869 && REGNO (XEXP (x, 0)) == FB_REGNO
1870 && GET_CODE (XEXP (x, 1)) == CONST_INT
1871 && (INTVAL (XEXP (x, 1)) < -128
1872 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1874 /* reload FB to A_REGS */
1875 rtx temp = gen_reg_rtx (Pmode);
1876 x = copy_rtx (x);
1877 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1878 XEXP (x, 0) = temp;
1881 return x;
1884 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1886 m32c_legitimize_reload_address (rtx * x,
1887 machine_mode mode,
1888 int opnum,
1889 int type, int ind_levels ATTRIBUTE_UNUSED)
1891 #if DEBUG0
1892 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1893 mode_name[mode]);
1894 debug_rtx (*x);
1895 #endif
1897 /* At one point, this function tried to get $fb copied to an address
1898 register, which in theory would maximize sharing, but gcc was
1899 *also* still trying to reload the whole address, and we'd run out
1900 of address registers. So we let gcc do the naive (but safe)
1901 reload instead, when the above function doesn't handle it for
1904 The code below is a second attempt at the above. */
1906 if (GET_CODE (*x) == PLUS
1907 && GET_CODE (XEXP (*x, 0)) == REG
1908 && REGNO (XEXP (*x, 0)) == FB_REGNO
1909 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1910 && (INTVAL (XEXP (*x, 1)) < -128
1911 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1913 rtx sum;
1914 int offset = INTVAL (XEXP (*x, 1));
1915 int adjustment = -BIG_FB_ADJ;
1917 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1918 GEN_INT (adjustment));
1919 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1920 if (type == RELOAD_OTHER)
1921 type = RELOAD_FOR_OTHER_ADDRESS;
1922 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1923 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1924 (enum reload_type) type);
1925 return 1;
1928 if (GET_CODE (*x) == PLUS
1929 && GET_CODE (XEXP (*x, 0)) == PLUS
1930 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1931 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1932 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1933 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1936 if (type == RELOAD_OTHER)
1937 type = RELOAD_FOR_OTHER_ADDRESS;
1938 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1939 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1940 (enum reload_type) type);
1941 return 1;
1944 return 0;
1947 /* Return the appropriate mode for a named address pointer. */
1948 #undef TARGET_ADDR_SPACE_POINTER_MODE
1949 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1950 static machine_mode
1951 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1953 switch (addrspace)
1955 case ADDR_SPACE_GENERIC:
1956 return TARGET_A24 ? PSImode : HImode;
1957 case ADDR_SPACE_FAR:
1958 return SImode;
1959 default:
1960 gcc_unreachable ();
1964 /* Return the appropriate mode for a named address address. */
1965 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1966 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1967 static machine_mode
1968 m32c_addr_space_address_mode (addr_space_t addrspace)
1970 switch (addrspace)
1972 case ADDR_SPACE_GENERIC:
1973 return TARGET_A24 ? PSImode : HImode;
1974 case ADDR_SPACE_FAR:
1975 return SImode;
1976 default:
1977 gcc_unreachable ();
1981 /* Like m32c_legitimate_address_p, except with named addresses. */
1982 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1983 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1984 m32c_addr_space_legitimate_address_p
1985 static bool
1986 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1987 bool strict, addr_space_t as)
1989 if (as == ADDR_SPACE_FAR)
1991 if (TARGET_A24)
1992 return 0;
1993 encode_pattern (x);
1994 if (RTX_IS ("r"))
1996 if (GET_MODE (x) != SImode)
1997 return 0;
1998 switch (REGNO (patternr[0]))
2000 case A0_REGNO:
2001 return 1;
2003 default:
2004 if (IS_PSEUDO (patternr[0], strict))
2005 return 1;
2006 return 0;
2009 if (RTX_IS ("+^Sri"))
2011 int rn = REGNO (patternr[3]);
2012 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2013 if (GET_MODE (patternr[3]) != HImode)
2014 return 0;
2015 switch (rn)
2017 case A0_REGNO:
2018 return (offs >= 0 && offs <= 0xfffff);
2020 default:
2021 if (IS_PSEUDO (patternr[3], strict))
2022 return 1;
2023 return 0;
2026 if (RTX_IS ("+^Srs"))
2028 int rn = REGNO (patternr[3]);
2029 if (GET_MODE (patternr[3]) != HImode)
2030 return 0;
2031 switch (rn)
2033 case A0_REGNO:
2034 return 1;
2036 default:
2037 if (IS_PSEUDO (patternr[3], strict))
2038 return 1;
2039 return 0;
2042 if (RTX_IS ("+^S+ris"))
2044 int rn = REGNO (patternr[4]);
2045 if (GET_MODE (patternr[4]) != HImode)
2046 return 0;
2047 switch (rn)
2049 case A0_REGNO:
2050 return 1;
2052 default:
2053 if (IS_PSEUDO (patternr[4], strict))
2054 return 1;
2055 return 0;
2058 if (RTX_IS ("s"))
2060 return 1;
2062 return 0;
2065 else if (as != ADDR_SPACE_GENERIC)
2066 gcc_unreachable ();
2068 return m32c_legitimate_address_p (mode, x, strict);
2071 /* Like m32c_legitimate_address, except with named address support. */
2072 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2073 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2074 static rtx
2075 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2076 addr_space_t as)
2078 if (as != ADDR_SPACE_GENERIC)
2080 #if DEBUG0
2081 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2082 debug_rtx (x);
2083 fprintf (stderr, "\n");
2084 #endif
2086 if (GET_CODE (x) != REG)
2088 x = force_reg (SImode, x);
2090 return x;
2093 return m32c_legitimize_address (x, oldx, mode);
2096 /* Determine if one named address space is a subset of another. */
2097 #undef TARGET_ADDR_SPACE_SUBSET_P
2098 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2099 static bool
2100 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2102 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2103 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2105 if (subset == superset)
2106 return true;
2108 else
2109 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2112 #undef TARGET_ADDR_SPACE_CONVERT
2113 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2114 /* Convert from one address space to another. */
2115 static rtx
2116 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2118 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2119 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2120 rtx result;
2122 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2123 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2125 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2127 /* This is unpredictable, as we're truncating off usable address
2128 bits. */
2130 result = gen_reg_rtx (HImode);
2131 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2132 return result;
2134 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2136 /* This always works. */
2137 result = gen_reg_rtx (SImode);
2138 emit_insn (gen_zero_extendhisi2 (result, op));
2139 return result;
2141 else
2142 gcc_unreachable ();
2145 /* Condition Code Status */
2147 #undef TARGET_FIXED_CONDITION_CODE_REGS
2148 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2149 static bool
2150 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2152 *p1 = FLG_REGNO;
2153 *p2 = INVALID_REGNUM;
2154 return true;
2157 /* Describing Relative Costs of Operations */
2159 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2160 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2161 no opcodes to do that). We also discourage use of mem* registers
2162 since they're really memory. */
2164 #undef TARGET_REGISTER_MOVE_COST
2165 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2167 static int
2168 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2169 reg_class_t to)
2171 int cost = COSTS_N_INSNS (3);
2172 HARD_REG_SET cc;
2174 /* FIXME: pick real values, but not 2 for now. */
2175 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2176 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2178 if (mode == QImode
2179 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2181 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2182 cost = COSTS_N_INSNS (1000);
2183 else
2184 cost = COSTS_N_INSNS (80);
2187 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2188 cost = COSTS_N_INSNS (1000);
2190 if (reg_classes_intersect_p (from, CR_REGS))
2191 cost += COSTS_N_INSNS (5);
2193 if (reg_classes_intersect_p (to, CR_REGS))
2194 cost += COSTS_N_INSNS (5);
2196 if (from == MEM_REGS || to == MEM_REGS)
2197 cost += COSTS_N_INSNS (50);
2198 else if (reg_classes_intersect_p (from, MEM_REGS)
2199 || reg_classes_intersect_p (to, MEM_REGS))
2200 cost += COSTS_N_INSNS (10);
2202 #if DEBUG0
2203 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2204 mode_name[mode], class_names[(int) from], class_names[(int) to],
2205 cost);
2206 #endif
2207 return cost;
2210 /* Implements TARGET_MEMORY_MOVE_COST. */
2212 #undef TARGET_MEMORY_MOVE_COST
2213 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2215 static int
2216 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2217 reg_class_t rclass ATTRIBUTE_UNUSED,
2218 bool in ATTRIBUTE_UNUSED)
2220 /* FIXME: pick real values. */
2221 return COSTS_N_INSNS (10);
2224 /* Here we try to describe when we use multiple opcodes for one RTX so
2225 that gcc knows when to use them. */
2226 #undef TARGET_RTX_COSTS
2227 #define TARGET_RTX_COSTS m32c_rtx_costs
2228 static bool
2229 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2230 int *total, bool speed ATTRIBUTE_UNUSED)
2232 switch (code)
2234 case REG:
2235 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2236 *total += COSTS_N_INSNS (500);
2237 else
2238 *total += COSTS_N_INSNS (1);
2239 return true;
2241 case ASHIFT:
2242 case LSHIFTRT:
2243 case ASHIFTRT:
2244 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2246 /* mov.b r1l, r1h */
2247 *total += COSTS_N_INSNS (1);
2248 return true;
2250 if (INTVAL (XEXP (x, 1)) > 8
2251 || INTVAL (XEXP (x, 1)) < -8)
2253 /* mov.b #N, r1l */
2254 /* mov.b r1l, r1h */
2255 *total += COSTS_N_INSNS (2);
2256 return true;
2258 return true;
2260 case LE:
2261 case LEU:
2262 case LT:
2263 case LTU:
2264 case GT:
2265 case GTU:
2266 case GE:
2267 case GEU:
2268 case NE:
2269 case EQ:
2270 if (outer_code == SET)
2272 *total += COSTS_N_INSNS (2);
2273 return true;
2275 break;
2277 case ZERO_EXTRACT:
2279 rtx dest = XEXP (x, 0);
2280 rtx addr = XEXP (dest, 0);
2281 switch (GET_CODE (addr))
2283 case CONST_INT:
2284 *total += COSTS_N_INSNS (1);
2285 break;
2286 case SYMBOL_REF:
2287 *total += COSTS_N_INSNS (3);
2288 break;
2289 default:
2290 *total += COSTS_N_INSNS (2);
2291 break;
2293 return true;
2295 break;
2297 default:
2298 /* Reasonable default. */
2299 if (TARGET_A16 && GET_MODE(x) == SImode)
2300 *total += COSTS_N_INSNS (2);
2301 break;
2303 return false;
2306 #undef TARGET_ADDRESS_COST
2307 #define TARGET_ADDRESS_COST m32c_address_cost
2308 static int
2309 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2310 addr_space_t as ATTRIBUTE_UNUSED,
2311 bool speed ATTRIBUTE_UNUSED)
2313 int i;
2314 /* fprintf(stderr, "\naddress_cost\n");
2315 debug_rtx(addr);*/
2316 switch (GET_CODE (addr))
2318 case CONST_INT:
2319 i = INTVAL (addr);
2320 if (i == 0)
2321 return COSTS_N_INSNS(1);
2322 if (0 < i && i <= 255)
2323 return COSTS_N_INSNS(2);
2324 if (0 < i && i <= 65535)
2325 return COSTS_N_INSNS(3);
2326 return COSTS_N_INSNS(4);
2327 case SYMBOL_REF:
2328 return COSTS_N_INSNS(4);
2329 case REG:
2330 return COSTS_N_INSNS(1);
2331 case PLUS:
2332 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2334 i = INTVAL (XEXP (addr, 1));
2335 if (i == 0)
2336 return COSTS_N_INSNS(1);
2337 if (0 < i && i <= 255)
2338 return COSTS_N_INSNS(2);
2339 if (0 < i && i <= 65535)
2340 return COSTS_N_INSNS(3);
2342 return COSTS_N_INSNS(4);
2343 default:
2344 return 0;
2348 /* Defining the Output Assembler Language */
2350 /* Output of Data */
2352 /* We may have 24 bit sizes, which is the native address size.
2353 Currently unused, but provided for completeness. */
2354 #undef TARGET_ASM_INTEGER
2355 #define TARGET_ASM_INTEGER m32c_asm_integer
2356 static bool
2357 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2359 switch (size)
2361 case 3:
2362 fprintf (asm_out_file, "\t.3byte\t");
2363 output_addr_const (asm_out_file, x);
2364 fputc ('\n', asm_out_file);
2365 return true;
2366 case 4:
2367 if (GET_CODE (x) == SYMBOL_REF)
2369 fprintf (asm_out_file, "\t.long\t");
2370 output_addr_const (asm_out_file, x);
2371 fputc ('\n', asm_out_file);
2372 return true;
2374 break;
2376 return default_assemble_integer (x, size, aligned_p);
2379 /* Output of Assembler Instructions */
2381 /* We use a lookup table because the addressing modes are non-orthogonal. */
2383 static struct
2385 char code;
2386 char const *pattern;
2387 char const *format;
2389 const conversions[] = {
2390 { 0, "r", "0" },
2392 { 0, "mr", "z[1]" },
2393 { 0, "m+ri", "3[2]" },
2394 { 0, "m+rs", "3[2]" },
2395 { 0, "m+^Zrs", "5[4]" },
2396 { 0, "m+^Zri", "5[4]" },
2397 { 0, "m+^Z+ris", "7+6[5]" },
2398 { 0, "m+^Srs", "5[4]" },
2399 { 0, "m+^Sri", "5[4]" },
2400 { 0, "m+^S+ris", "7+6[5]" },
2401 { 0, "m+r+si", "4+5[2]" },
2402 { 0, "ms", "1" },
2403 { 0, "mi", "1" },
2404 { 0, "m+si", "2+3" },
2406 { 0, "mmr", "[z[2]]" },
2407 { 0, "mm+ri", "[4[3]]" },
2408 { 0, "mm+rs", "[4[3]]" },
2409 { 0, "mm+r+si", "[5+6[3]]" },
2410 { 0, "mms", "[[2]]" },
2411 { 0, "mmi", "[[2]]" },
2412 { 0, "mm+si", "[4[3]]" },
2414 { 0, "i", "#0" },
2415 { 0, "s", "#0" },
2416 { 0, "+si", "#1+2" },
2417 { 0, "l", "#0" },
2419 { 'l', "l", "0" },
2420 { 'd', "i", "0" },
2421 { 'd', "s", "0" },
2422 { 'd', "+si", "1+2" },
2423 { 'D', "i", "0" },
2424 { 'D', "s", "0" },
2425 { 'D', "+si", "1+2" },
2426 { 'x', "i", "#0" },
2427 { 'X', "i", "#0" },
2428 { 'm', "i", "#0" },
2429 { 'b', "i", "#0" },
2430 { 'B', "i", "0" },
2431 { 'p', "i", "0" },
2433 { 0, 0, 0 }
2436 /* This is in order according to the bitfield that pushm/popm use. */
2437 static char const *pushm_regs[] = {
2438 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2441 /* Implements TARGET_PRINT_OPERAND. */
2443 #undef TARGET_PRINT_OPERAND
2444 #define TARGET_PRINT_OPERAND m32c_print_operand
2446 static void
2447 m32c_print_operand (FILE * file, rtx x, int code)
2449 int i, j, b;
2450 const char *comma;
2451 HOST_WIDE_INT ival;
2452 int unsigned_const = 0;
2453 int force_sign;
2455 /* Multiplies; constants are converted to sign-extended format but
2456 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2457 need. */
2458 if (code == 'u')
2460 unsigned_const = 2;
2461 code = 0;
2463 if (code == 'U')
2465 unsigned_const = 1;
2466 code = 0;
2468 /* This one is only for debugging; you can put it in a pattern to
2469 force this error. */
2470 if (code == '!')
2472 fprintf (stderr, "dj: unreviewed pattern:");
2473 if (current_output_insn)
2474 debug_rtx (current_output_insn);
2475 gcc_unreachable ();
2477 /* PSImode operations are either .w or .l depending on the target. */
2478 if (code == '&')
2480 if (TARGET_A16)
2481 fprintf (file, "w");
2482 else
2483 fprintf (file, "l");
2484 return;
2486 /* Inverted conditionals. */
2487 if (code == 'C')
2489 switch (GET_CODE (x))
2491 case LE:
2492 fputs ("gt", file);
2493 break;
2494 case LEU:
2495 fputs ("gtu", file);
2496 break;
2497 case LT:
2498 fputs ("ge", file);
2499 break;
2500 case LTU:
2501 fputs ("geu", file);
2502 break;
2503 case GT:
2504 fputs ("le", file);
2505 break;
2506 case GTU:
2507 fputs ("leu", file);
2508 break;
2509 case GE:
2510 fputs ("lt", file);
2511 break;
2512 case GEU:
2513 fputs ("ltu", file);
2514 break;
2515 case NE:
2516 fputs ("eq", file);
2517 break;
2518 case EQ:
2519 fputs ("ne", file);
2520 break;
2521 default:
2522 gcc_unreachable ();
2524 return;
2526 /* Regular conditionals. */
2527 if (code == 'c')
2529 switch (GET_CODE (x))
2531 case LE:
2532 fputs ("le", file);
2533 break;
2534 case LEU:
2535 fputs ("leu", file);
2536 break;
2537 case LT:
2538 fputs ("lt", file);
2539 break;
2540 case LTU:
2541 fputs ("ltu", file);
2542 break;
2543 case GT:
2544 fputs ("gt", file);
2545 break;
2546 case GTU:
2547 fputs ("gtu", file);
2548 break;
2549 case GE:
2550 fputs ("ge", file);
2551 break;
2552 case GEU:
2553 fputs ("geu", file);
2554 break;
2555 case NE:
2556 fputs ("ne", file);
2557 break;
2558 case EQ:
2559 fputs ("eq", file);
2560 break;
2561 default:
2562 gcc_unreachable ();
2564 return;
2566 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2567 operand. */
2568 if (code == 'h' && GET_MODE (x) == SImode)
2570 x = m32c_subreg (HImode, x, SImode, 0);
2571 code = 0;
2573 if (code == 'H' && GET_MODE (x) == SImode)
2575 x = m32c_subreg (HImode, x, SImode, 2);
2576 code = 0;
2578 if (code == 'h' && GET_MODE (x) == HImode)
2580 x = m32c_subreg (QImode, x, HImode, 0);
2581 code = 0;
2583 if (code == 'H' && GET_MODE (x) == HImode)
2585 /* We can't actually represent this as an rtx. Do it here. */
2586 if (GET_CODE (x) == REG)
2588 switch (REGNO (x))
2590 case R0_REGNO:
2591 fputs ("r0h", file);
2592 return;
2593 case R1_REGNO:
2594 fputs ("r1h", file);
2595 return;
2596 default:
2597 gcc_unreachable();
2600 /* This should be a MEM. */
2601 x = m32c_subreg (QImode, x, HImode, 1);
2602 code = 0;
2604 /* This is for BMcond, which always wants word register names. */
2605 if (code == 'h' && GET_MODE (x) == QImode)
2607 if (GET_CODE (x) == REG)
2608 x = gen_rtx_REG (HImode, REGNO (x));
2609 code = 0;
2611 /* 'x' and 'X' need to be ignored for non-immediates. */
2612 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2613 code = 0;
2615 encode_pattern (x);
2616 force_sign = 0;
2617 for (i = 0; conversions[i].pattern; i++)
2618 if (conversions[i].code == code
2619 && streq (conversions[i].pattern, pattern))
2621 for (j = 0; conversions[i].format[j]; j++)
2622 /* backslash quotes the next character in the output pattern. */
2623 if (conversions[i].format[j] == '\\')
2625 fputc (conversions[i].format[j + 1], file);
2626 j++;
2628 /* Digits in the output pattern indicate that the
2629 corresponding RTX is to be output at that point. */
2630 else if (ISDIGIT (conversions[i].format[j]))
2632 rtx r = patternr[conversions[i].format[j] - '0'];
2633 switch (GET_CODE (r))
2635 case REG:
2636 fprintf (file, "%s",
2637 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2638 break;
2639 case CONST_INT:
2640 switch (code)
2642 case 'b':
2643 case 'B':
2645 int v = INTVAL (r);
2646 int i = (int) exact_log2 (v);
2647 if (i == -1)
2648 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2649 if (i == -1)
2650 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2651 /* Bit position. */
2652 fprintf (file, "%d", i);
2654 break;
2655 case 'x':
2656 /* Unsigned byte. */
2657 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2658 INTVAL (r) & 0xff);
2659 break;
2660 case 'X':
2661 /* Unsigned word. */
2662 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2663 INTVAL (r) & 0xffff);
2664 break;
2665 case 'p':
2666 /* pushm and popm encode a register set into a single byte. */
2667 comma = "";
2668 for (b = 7; b >= 0; b--)
2669 if (INTVAL (r) & (1 << b))
2671 fprintf (file, "%s%s", comma, pushm_regs[b]);
2672 comma = ",";
2674 break;
2675 case 'm':
2676 /* "Minus". Output -X */
2677 ival = (-INTVAL (r) & 0xffff);
2678 if (ival & 0x8000)
2679 ival = ival - 0x10000;
2680 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2681 break;
2682 default:
2683 ival = INTVAL (r);
2684 if (conversions[i].format[j + 1] == '[' && ival < 0)
2686 /* We can simulate negative displacements by
2687 taking advantage of address space
2688 wrapping when the offset can span the
2689 entire address range. */
2690 rtx base =
2691 patternr[conversions[i].format[j + 2] - '0'];
2692 if (GET_CODE (base) == REG)
2693 switch (REGNO (base))
2695 case A0_REGNO:
2696 case A1_REGNO:
2697 if (TARGET_A24)
2698 ival = 0x1000000 + ival;
2699 else
2700 ival = 0x10000 + ival;
2701 break;
2702 case SB_REGNO:
2703 if (TARGET_A16)
2704 ival = 0x10000 + ival;
2705 break;
2708 else if (code == 'd' && ival < 0 && j == 0)
2709 /* The "mova" opcode is used to do addition by
2710 computing displacements, but again, we need
2711 displacements to be unsigned *if* they're
2712 the only component of the displacement
2713 (i.e. no "symbol-4" type displacement). */
2714 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2716 if (conversions[i].format[j] == '0')
2718 /* More conversions to unsigned. */
2719 if (unsigned_const == 2)
2720 ival &= 0xffff;
2721 if (unsigned_const == 1)
2722 ival &= 0xff;
2724 if (streq (conversions[i].pattern, "mi")
2725 || streq (conversions[i].pattern, "mmi"))
2727 /* Integers used as addresses are unsigned. */
2728 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2730 if (force_sign && ival >= 0)
2731 fputc ('+', file);
2732 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2733 break;
2735 break;
2736 case CONST_DOUBLE:
2737 /* We don't have const_double constants. If it
2738 happens, make it obvious. */
2739 fprintf (file, "[const_double 0x%lx]",
2740 (unsigned long) CONST_DOUBLE_HIGH (r));
2741 break;
2742 case SYMBOL_REF:
2743 assemble_name (file, XSTR (r, 0));
2744 break;
2745 case LABEL_REF:
2746 output_asm_label (r);
2747 break;
2748 default:
2749 fprintf (stderr, "don't know how to print this operand:");
2750 debug_rtx (r);
2751 gcc_unreachable ();
2754 else
2756 if (conversions[i].format[j] == 'z')
2758 /* Some addressing modes *must* have a displacement,
2759 so insert a zero here if needed. */
2760 int k;
2761 for (k = j + 1; conversions[i].format[k]; k++)
2762 if (ISDIGIT (conversions[i].format[k]))
2764 rtx reg = patternr[conversions[i].format[k] - '0'];
2765 if (GET_CODE (reg) == REG
2766 && (REGNO (reg) == SB_REGNO
2767 || REGNO (reg) == FB_REGNO
2768 || REGNO (reg) == SP_REGNO))
2769 fputc ('0', file);
2771 continue;
2773 /* Signed displacements off symbols need to have signs
2774 blended cleanly. */
2775 if (conversions[i].format[j] == '+'
2776 && (!code || code == 'D' || code == 'd')
2777 && ISDIGIT (conversions[i].format[j + 1])
2778 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2779 == CONST_INT))
2781 force_sign = 1;
2782 continue;
2784 fputc (conversions[i].format[j], file);
2786 break;
2788 if (!conversions[i].pattern)
2790 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2791 pattern);
2792 debug_rtx (x);
2793 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2796 return;
2799 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2801 See m32c_print_operand above for descriptions of what these do. */
2803 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2804 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2806 static bool
2807 m32c_print_operand_punct_valid_p (unsigned char c)
2809 if (c == '&' || c == '!')
2810 return true;
2812 return false;
2815 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2817 #undef TARGET_PRINT_OPERAND_ADDRESS
2818 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2820 static void
2821 m32c_print_operand_address (FILE * stream, rtx address)
2823 if (GET_CODE (address) == MEM)
2824 address = XEXP (address, 0);
2825 else
2826 /* cf: gcc.dg/asm-4.c. */
2827 gcc_assert (GET_CODE (address) == REG);
2829 m32c_print_operand (stream, address, 0);
2832 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2833 differently than general registers. */
2834 void
2835 m32c_output_reg_push (FILE * s, int regno)
2837 if (regno == FLG_REGNO)
2838 fprintf (s, "\tpushc\tflg\n");
2839 else
2840 fprintf (s, "\tpush.%c\t%s\n",
2841 " bwll"[reg_push_size (regno)], reg_names[regno]);
2844 /* Likewise for ASM_OUTPUT_REG_POP. */
2845 void
2846 m32c_output_reg_pop (FILE * s, int regno)
2848 if (regno == FLG_REGNO)
2849 fprintf (s, "\tpopc\tflg\n");
2850 else
2851 fprintf (s, "\tpop.%c\t%s\n",
2852 " bwll"[reg_push_size (regno)], reg_names[regno]);
2855 /* Defining target-specific uses of `__attribute__' */
2857 /* Used to simplify the logic below. Find the attributes wherever
2858 they may be. */
2859 #define M32C_ATTRIBUTES(decl) \
2860 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2861 : DECL_ATTRIBUTES (decl) \
2862 ? (DECL_ATTRIBUTES (decl)) \
2863 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2865 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2866 static int
2867 interrupt_p (tree node ATTRIBUTE_UNUSED)
2869 tree list = M32C_ATTRIBUTES (node);
2870 while (list)
2872 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2873 return 1;
2874 list = TREE_CHAIN (list);
2876 return fast_interrupt_p (node);
2879 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2880 static int
2881 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2883 tree list = M32C_ATTRIBUTES (node);
2884 while (list)
2886 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2887 return 1;
2888 list = TREE_CHAIN (list);
2890 return 0;
2893 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2894 static int
2895 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2897 tree list = M32C_ATTRIBUTES (node);
2898 while (list)
2900 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2901 return 1;
2902 list = TREE_CHAIN (list);
2904 return 0;
2907 static tree
2908 interrupt_decl_handler (tree * node ATTRIBUTE_UNUSED,
2909 tree name ATTRIBUTE_UNUSED,
2910 tree args ATTRIBUTE_UNUSED,
2911 int flags ATTRIBUTE_UNUSED,
2912 bool * no_add_attrs ATTRIBUTE_UNUSED)
2914 return NULL_TREE;
2917 static tree
2918 interrupt_type_handler (tree * node ATTRIBUTE_UNUSED,
2919 tree name ATTRIBUTE_UNUSED,
2920 tree args ATTRIBUTE_UNUSED,
2921 int flags ATTRIBUTE_UNUSED,
2922 bool * no_add_attrs ATTRIBUTE_UNUSED)
2924 return NULL_TREE;
2926 /* Returns TRUE if given tree has the "function_vector" attribute. */
2928 m32c_special_page_vector_p (tree func)
2930 tree list;
2932 if (TREE_CODE (func) != FUNCTION_DECL)
2933 return 0;
2935 list = M32C_ATTRIBUTES (func);
2936 while (list)
2938 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2939 return 1;
2940 list = TREE_CHAIN (list);
2942 return 0;
2945 static tree
2946 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2947 tree name ATTRIBUTE_UNUSED,
2948 tree args ATTRIBUTE_UNUSED,
2949 int flags ATTRIBUTE_UNUSED,
2950 bool * no_add_attrs ATTRIBUTE_UNUSED)
2952 if (TARGET_R8C)
2954 /* The attribute is not supported for R8C target. */
2955 warning (OPT_Wattributes,
2956 "%qE attribute is not supported for R8C target",
2957 name);
2958 *no_add_attrs = true;
2960 else if (TREE_CODE (*node) != FUNCTION_DECL)
2962 /* The attribute must be applied to functions only. */
2963 warning (OPT_Wattributes,
2964 "%qE attribute applies only to functions",
2965 name);
2966 *no_add_attrs = true;
2968 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2970 /* The argument must be a constant integer. */
2971 warning (OPT_Wattributes,
2972 "%qE attribute argument not an integer constant",
2973 name);
2974 *no_add_attrs = true;
2976 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2977 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2979 /* The argument value must be between 18 to 255. */
2980 warning (OPT_Wattributes,
2981 "%qE attribute argument should be between 18 to 255",
2982 name);
2983 *no_add_attrs = true;
2985 return NULL_TREE;
2988 /* If the function is assigned the attribute 'function_vector', it
2989 returns the function vector number, otherwise returns zero. */
2991 current_function_special_page_vector (rtx x)
2993 int num;
2995 if ((GET_CODE(x) == SYMBOL_REF)
2996 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2998 tree list;
2999 tree t = SYMBOL_REF_DECL (x);
3001 if (TREE_CODE (t) != FUNCTION_DECL)
3002 return 0;
3004 list = M32C_ATTRIBUTES (t);
3005 while (list)
3007 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3009 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3010 return num;
3013 list = TREE_CHAIN (list);
3016 return 0;
3018 else
3019 return 0;
3022 #undef TARGET_ATTRIBUTE_TABLE
3023 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3024 static const struct attribute_spec m32c_attribute_table[] = {
3025 {"interrupt", 0, 0, false, false, false, interrupt_decl_handler,
3026 interrupt_type_handler, false},
3027 {"bank_switch", 0, 0, false, false, false, interrupt_decl_handler,
3028 interrupt_type_handler, false},
3029 {"fast_interrupt", 0, 0, false, false, false, interrupt_decl_handler,
3030 interrupt_type_handler, false},
3031 {"function_vector", 1, 1, true, false, false,function_vector_handler, NULL,
3032 false},
3033 {0, 0, 0, 0, 0, 0, 0, 0, false}
3036 #undef TARGET_COMP_TYPE_ATTRIBUTES
3037 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3038 static int
3039 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3040 const_tree type2 ATTRIBUTE_UNUSED)
3042 /* 0=incompatible 1=compatible 2=warning */
3043 return 1;
3046 #undef TARGET_INSERT_ATTRIBUTES
3047 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3048 static void
3049 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3050 tree * attr_ptr ATTRIBUTE_UNUSED)
3052 unsigned addr;
3053 /* See if we need to make #pragma address variables volatile. */
3055 if (TREE_CODE (node) == VAR_DECL)
3057 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3058 if (m32c_get_pragma_address (name, &addr))
3060 TREE_THIS_VOLATILE (node) = true;
3066 struct pragma_traits : default_hashmap_traits
3068 static hashval_t hash (const char *str) { return htab_hash_string (str); }
3069 static bool
3070 equal_keys (const char *a, const char *b)
3072 return !strcmp (a, b);
3076 /* Hash table of pragma info. */
3077 static GTY(()) hash_map<const char *, unsigned, pragma_traits> *pragma_htab;
3079 void
3080 m32c_note_pragma_address (const char *varname, unsigned address)
3082 if (!pragma_htab)
3083 pragma_htab
3084 = hash_map<const char *, unsigned, pragma_traits>::create_ggc (31);
3086 const char *name = ggc_strdup (varname);
3087 unsigned int *slot = &pragma_htab->get_or_insert (name);
3088 *slot = address;
3091 static bool
3092 m32c_get_pragma_address (const char *varname, unsigned *address)
3094 if (!pragma_htab)
3095 return false;
3097 unsigned int *slot = pragma_htab->get (varname);
3098 if (slot)
3100 *address = *slot;
3101 return true;
3103 return false;
3106 void
3107 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3108 const char *name,
3109 int size, int align, int global)
3111 unsigned address;
3113 if (m32c_get_pragma_address (name, &address))
3115 /* We never output these as global. */
3116 assemble_name (stream, name);
3117 fprintf (stream, " = 0x%04x\n", address);
3118 return;
3120 if (!global)
3122 fprintf (stream, "\t.local\t");
3123 assemble_name (stream, name);
3124 fprintf (stream, "\n");
3126 fprintf (stream, "\t.comm\t");
3127 assemble_name (stream, name);
3128 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3131 /* Predicates */
3133 /* This is a list of legal subregs of hard regs. */
3134 static const struct {
3135 unsigned char outer_mode_size;
3136 unsigned char inner_mode_size;
3137 unsigned char byte_mask;
3138 unsigned char legal_when;
3139 unsigned int regno;
3140 } legal_subregs[] = {
3141 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3142 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3143 {1, 2, 0x01, 1, A0_REGNO},
3144 {1, 2, 0x01, 1, A1_REGNO},
3146 {1, 4, 0x01, 1, A0_REGNO},
3147 {1, 4, 0x01, 1, A1_REGNO},
3149 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3150 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3151 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3152 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3153 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3155 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3158 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3159 support. We also bail on MEMs with illegal addresses. */
3160 bool
3161 m32c_illegal_subreg_p (rtx op)
3163 int offset;
3164 unsigned int i;
3165 machine_mode src_mode, dest_mode;
3167 if (GET_CODE (op) == MEM
3168 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3170 return true;
3173 if (GET_CODE (op) != SUBREG)
3174 return false;
3176 dest_mode = GET_MODE (op);
3177 offset = SUBREG_BYTE (op);
3178 op = SUBREG_REG (op);
3179 src_mode = GET_MODE (op);
3181 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3182 return false;
3183 if (GET_CODE (op) != REG)
3184 return false;
3185 if (REGNO (op) >= MEM0_REGNO)
3186 return false;
3188 offset = (1 << offset);
3190 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3191 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3192 && legal_subregs[i].regno == REGNO (op)
3193 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3194 && legal_subregs[i].byte_mask & offset)
3196 switch (legal_subregs[i].legal_when)
3198 case 1:
3199 return false;
3200 case 16:
3201 if (TARGET_A16)
3202 return false;
3203 break;
3204 case 24:
3205 if (TARGET_A24)
3206 return false;
3207 break;
3210 return true;
3213 /* Returns TRUE if we support a move between the first two operands.
3214 At the moment, we just want to discourage mem to mem moves until
3215 after reload, because reload has a hard time with our limited
3216 number of address registers, and we can get into a situation where
3217 we need three of them when we only have two. */
3218 bool
3219 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3221 rtx op0 = operands[0];
3222 rtx op1 = operands[1];
3224 if (TARGET_A24)
3225 return true;
3227 #define DEBUG_MOV_OK 0
3228 #if DEBUG_MOV_OK
3229 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3230 debug_rtx (op0);
3231 debug_rtx (op1);
3232 #endif
3234 if (GET_CODE (op0) == SUBREG)
3235 op0 = XEXP (op0, 0);
3236 if (GET_CODE (op1) == SUBREG)
3237 op1 = XEXP (op1, 0);
3239 if (GET_CODE (op0) == MEM
3240 && GET_CODE (op1) == MEM
3241 && ! reload_completed)
3243 #if DEBUG_MOV_OK
3244 fprintf (stderr, " - no, mem to mem\n");
3245 #endif
3246 return false;
3249 #if DEBUG_MOV_OK
3250 fprintf (stderr, " - ok\n");
3251 #endif
3252 return true;
3255 /* Returns TRUE if two consecutive HImode mov instructions, generated
3256 for moving an immediate double data to a double data type variable
3257 location, can be combined into single SImode mov instruction. */
3258 bool
3259 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3260 machine_mode mode ATTRIBUTE_UNUSED)
3262 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3263 flags. */
3264 return false;
3267 /* Expanders */
3269 /* Subregs are non-orthogonal for us, because our registers are all
3270 different sizes. */
3271 static rtx
3272 m32c_subreg (machine_mode outer,
3273 rtx x, machine_mode inner, int byte)
3275 int r, nr = -1;
3277 /* Converting MEMs to different types that are the same size, we
3278 just rewrite them. */
3279 if (GET_CODE (x) == SUBREG
3280 && SUBREG_BYTE (x) == 0
3281 && GET_CODE (SUBREG_REG (x)) == MEM
3282 && (GET_MODE_SIZE (GET_MODE (x))
3283 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3285 rtx oldx = x;
3286 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3287 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3290 /* Push/pop get done as smaller push/pops. */
3291 if (GET_CODE (x) == MEM
3292 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3293 || GET_CODE (XEXP (x, 0)) == POST_INC))
3294 return gen_rtx_MEM (outer, XEXP (x, 0));
3295 if (GET_CODE (x) == SUBREG
3296 && GET_CODE (XEXP (x, 0)) == MEM
3297 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3298 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3299 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3301 if (GET_CODE (x) != REG)
3303 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3304 if (GET_CODE (r) == SUBREG
3305 && GET_CODE (x) == MEM
3306 && MEM_VOLATILE_P (x))
3308 /* Volatile MEMs don't get simplified, but we need them to
3309 be. We are little endian, so the subreg byte is the
3310 offset. */
3311 r = adjust_address_nv (x, outer, byte);
3313 return r;
3316 r = REGNO (x);
3317 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3318 return simplify_gen_subreg (outer, x, inner, byte);
3320 if (IS_MEM_REGNO (r))
3321 return simplify_gen_subreg (outer, x, inner, byte);
3323 /* This is where the complexities of our register layout are
3324 described. */
3325 if (byte == 0)
3326 nr = r;
3327 else if (outer == HImode)
3329 if (r == R0_REGNO && byte == 2)
3330 nr = R2_REGNO;
3331 else if (r == R0_REGNO && byte == 4)
3332 nr = R1_REGNO;
3333 else if (r == R0_REGNO && byte == 6)
3334 nr = R3_REGNO;
3335 else if (r == R1_REGNO && byte == 2)
3336 nr = R3_REGNO;
3337 else if (r == A0_REGNO && byte == 2)
3338 nr = A1_REGNO;
3340 else if (outer == SImode)
3342 if (r == R0_REGNO && byte == 0)
3343 nr = R0_REGNO;
3344 else if (r == R0_REGNO && byte == 4)
3345 nr = R1_REGNO;
3347 if (nr == -1)
3349 fprintf (stderr, "m32c_subreg %s %s %d\n",
3350 mode_name[outer], mode_name[inner], byte);
3351 debug_rtx (x);
3352 gcc_unreachable ();
3354 return gen_rtx_REG (outer, nr);
3357 /* Used to emit move instructions. We split some moves,
3358 and avoid mem-mem moves. */
3360 m32c_prepare_move (rtx * operands, machine_mode mode)
3362 if (far_addr_space_p (operands[0])
3363 && CONSTANT_P (operands[1]))
3365 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3367 if (TARGET_A16 && mode == PSImode)
3368 return m32c_split_move (operands, mode, 1);
3369 if ((GET_CODE (operands[0]) == MEM)
3370 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3372 rtx pmv = XEXP (operands[0], 0);
3373 rtx dest_reg = XEXP (pmv, 0);
3374 rtx dest_mod = XEXP (pmv, 1);
3376 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3377 operands[0] = gen_rtx_MEM (mode, dest_reg);
3379 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3380 operands[1] = copy_to_mode_reg (mode, operands[1]);
3381 return 0;
3384 #define DEBUG_SPLIT 0
3386 /* Returns TRUE if the given PSImode move should be split. We split
3387 for all r8c/m16c moves, since it doesn't support them, and for
3388 POP.L as we can only *push* SImode. */
3390 m32c_split_psi_p (rtx * operands)
3392 #if DEBUG_SPLIT
3393 fprintf (stderr, "\nm32c_split_psi_p\n");
3394 debug_rtx (operands[0]);
3395 debug_rtx (operands[1]);
3396 #endif
3397 if (TARGET_A16)
3399 #if DEBUG_SPLIT
3400 fprintf (stderr, "yes, A16\n");
3401 #endif
3402 return 1;
3404 if (GET_CODE (operands[1]) == MEM
3405 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3407 #if DEBUG_SPLIT
3408 fprintf (stderr, "yes, pop.l\n");
3409 #endif
3410 return 1;
3412 #if DEBUG_SPLIT
3413 fprintf (stderr, "no, default\n");
3414 #endif
3415 return 0;
3418 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3419 (define_expand), 1 if it is not optional (define_insn_and_split),
3420 and 3 for define_split (alternate api). */
3422 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3424 rtx s[4], d[4];
3425 int parts, si, di, rev = 0;
3426 int rv = 0, opi = 2;
3427 machine_mode submode = HImode;
3428 rtx *ops, local_ops[10];
3430 /* define_split modifies the existing operands, but the other two
3431 emit new insns. OPS is where we store the operand pairs, which
3432 we emit later. */
3433 if (split_all == 3)
3434 ops = operands;
3435 else
3436 ops = local_ops;
3438 /* Else HImode. */
3439 if (mode == DImode)
3440 submode = SImode;
3442 /* Before splitting mem-mem moves, force one operand into a
3443 register. */
3444 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3446 #if DEBUG0
3447 fprintf (stderr, "force_reg...\n");
3448 debug_rtx (operands[1]);
3449 #endif
3450 operands[1] = force_reg (mode, operands[1]);
3451 #if DEBUG0
3452 debug_rtx (operands[1]);
3453 #endif
3456 parts = 2;
3458 #if DEBUG_SPLIT
3459 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3460 split_all);
3461 debug_rtx (operands[0]);
3462 debug_rtx (operands[1]);
3463 #endif
3465 /* Note that split_all is not used to select the api after this
3466 point, so it's safe to set it to 3 even with define_insn. */
3467 /* None of the chips can move SI operands to sp-relative addresses,
3468 so we always split those. */
3469 if (satisfies_constraint_Ss (operands[0]))
3470 split_all = 3;
3472 if (TARGET_A16
3473 && (far_addr_space_p (operands[0])
3474 || far_addr_space_p (operands[1])))
3475 split_all |= 1;
3477 /* We don't need to split these. */
3478 if (TARGET_A24
3479 && split_all != 3
3480 && (mode == SImode || mode == PSImode)
3481 && !(GET_CODE (operands[1]) == MEM
3482 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3483 return 0;
3485 /* First, enumerate the subregs we'll be dealing with. */
3486 for (si = 0; si < parts; si++)
3488 d[si] =
3489 m32c_subreg (submode, operands[0], mode,
3490 si * GET_MODE_SIZE (submode));
3491 s[si] =
3492 m32c_subreg (submode, operands[1], mode,
3493 si * GET_MODE_SIZE (submode));
3496 /* Split pushes by emitting a sequence of smaller pushes. */
3497 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3499 for (si = parts - 1; si >= 0; si--)
3501 ops[opi++] = gen_rtx_MEM (submode,
3502 gen_rtx_PRE_DEC (Pmode,
3503 gen_rtx_REG (Pmode,
3504 SP_REGNO)));
3505 ops[opi++] = s[si];
3508 rv = 1;
3510 /* Likewise for pops. */
3511 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3513 for (di = 0; di < parts; di++)
3515 ops[opi++] = d[di];
3516 ops[opi++] = gen_rtx_MEM (submode,
3517 gen_rtx_POST_INC (Pmode,
3518 gen_rtx_REG (Pmode,
3519 SP_REGNO)));
3521 rv = 1;
3523 else if (split_all)
3525 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3526 for (di = 0; di < parts - 1; di++)
3527 for (si = di + 1; si < parts; si++)
3528 if (reg_mentioned_p (d[di], s[si]))
3529 rev = 1;
3531 if (rev)
3532 for (si = 0; si < parts; si++)
3534 ops[opi++] = d[si];
3535 ops[opi++] = s[si];
3537 else
3538 for (si = parts - 1; si >= 0; si--)
3540 ops[opi++] = d[si];
3541 ops[opi++] = s[si];
3543 rv = 1;
3545 /* Now emit any moves we may have accumulated. */
3546 if (rv && split_all != 3)
3548 int i;
3549 for (i = 2; i < opi; i += 2)
3550 emit_move_insn (ops[i], ops[i + 1]);
3552 return rv;
3555 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3556 the like. For the R8C they expect one of the addresses to be in
3557 R1L:An so we need to arrange for that. Otherwise, it's just a
3558 matter of picking out the operands we want and emitting the right
3559 pattern for them. All these expanders, which correspond to
3560 patterns in blkmov.md, must return nonzero if they expand the insn,
3561 or zero if they should FAIL. */
3563 /* This is a memset() opcode. All operands are implied, so we need to
3564 arrange for them to be in the right registers. The opcode wants
3565 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3566 the count (HI), and $2 the value (QI). */
3568 m32c_expand_setmemhi(rtx *operands)
3570 rtx desta, count, val;
3571 rtx desto, counto;
3573 desta = XEXP (operands[0], 0);
3574 count = operands[1];
3575 val = operands[2];
3577 desto = gen_reg_rtx (Pmode);
3578 counto = gen_reg_rtx (HImode);
3580 if (GET_CODE (desta) != REG
3581 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3582 desta = copy_to_mode_reg (Pmode, desta);
3584 /* This looks like an arbitrary restriction, but this is by far the
3585 most common case. For counts 8..14 this actually results in
3586 smaller code with no speed penalty because the half-sized
3587 constant can be loaded with a shorter opcode. */
3588 if (GET_CODE (count) == CONST_INT
3589 && GET_CODE (val) == CONST_INT
3590 && ! (INTVAL (count) & 1)
3591 && (INTVAL (count) > 1)
3592 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3594 unsigned v = INTVAL (val) & 0xff;
3595 v = v | (v << 8);
3596 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3597 val = copy_to_mode_reg (HImode, GEN_INT (v));
3598 if (TARGET_A16)
3599 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3600 else
3601 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3602 return 1;
3605 /* This is the generalized memset() case. */
3606 if (GET_CODE (val) != REG
3607 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3608 val = copy_to_mode_reg (QImode, val);
3610 if (GET_CODE (count) != REG
3611 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3612 count = copy_to_mode_reg (HImode, count);
3614 if (TARGET_A16)
3615 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3616 else
3617 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3619 return 1;
3622 /* This is a memcpy() opcode. All operands are implied, so we need to
3623 arrange for them to be in the right registers. The opcode wants
3624 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3625 is the source (MEM:BLK), and $2 the count (HI). */
3627 m32c_expand_movmemhi(rtx *operands)
3629 rtx desta, srca, count;
3630 rtx desto, srco, counto;
3632 desta = XEXP (operands[0], 0);
3633 srca = XEXP (operands[1], 0);
3634 count = operands[2];
3636 desto = gen_reg_rtx (Pmode);
3637 srco = gen_reg_rtx (Pmode);
3638 counto = gen_reg_rtx (HImode);
3640 if (GET_CODE (desta) != REG
3641 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3642 desta = copy_to_mode_reg (Pmode, desta);
3644 if (GET_CODE (srca) != REG
3645 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3646 srca = copy_to_mode_reg (Pmode, srca);
3648 /* Similar to setmem, but we don't need to check the value. */
3649 if (GET_CODE (count) == CONST_INT
3650 && ! (INTVAL (count) & 1)
3651 && (INTVAL (count) > 1))
3653 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3654 if (TARGET_A16)
3655 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3656 else
3657 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3658 return 1;
3661 /* This is the generalized memset() case. */
3662 if (GET_CODE (count) != REG
3663 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3664 count = copy_to_mode_reg (HImode, count);
3666 if (TARGET_A16)
3667 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3668 else
3669 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3671 return 1;
3674 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3675 the copy, which should point to the NUL at the end of the string,
3676 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3677 Since our opcode leaves the destination pointing *after* the NUL,
3678 we must emit an adjustment. */
3680 m32c_expand_movstr(rtx *operands)
3682 rtx desta, srca;
3683 rtx desto, srco;
3685 desta = XEXP (operands[1], 0);
3686 srca = XEXP (operands[2], 0);
3688 desto = gen_reg_rtx (Pmode);
3689 srco = gen_reg_rtx (Pmode);
3691 if (GET_CODE (desta) != REG
3692 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3693 desta = copy_to_mode_reg (Pmode, desta);
3695 if (GET_CODE (srca) != REG
3696 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3697 srca = copy_to_mode_reg (Pmode, srca);
3699 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3700 /* desto ends up being a1, which allows this type of add through MOVA. */
3701 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3703 return 1;
3706 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3707 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3708 $2 is the other (MEM:BLK). We must do the comparison, and then
3709 convert the flags to a signed integer result. */
3711 m32c_expand_cmpstr(rtx *operands)
3713 rtx src1a, src2a;
3715 src1a = XEXP (operands[1], 0);
3716 src2a = XEXP (operands[2], 0);
3718 if (GET_CODE (src1a) != REG
3719 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3720 src1a = copy_to_mode_reg (Pmode, src1a);
3722 if (GET_CODE (src2a) != REG
3723 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3724 src2a = copy_to_mode_reg (Pmode, src2a);
3726 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3727 emit_insn (gen_cond_to_int (operands[0]));
3729 return 1;
3733 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3735 static shift_gen_func
3736 shift_gen_func_for (int mode, int code)
3738 #define GFF(m,c,f) if (mode == m && code == c) return f
3739 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3740 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3741 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3742 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3743 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3744 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3745 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3746 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3747 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3748 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3749 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3750 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3751 #undef GFF
3752 gcc_unreachable ();
3755 /* The m32c only has one shift, but it takes a signed count. GCC
3756 doesn't want this, so we fake it by negating any shift count when
3757 we're pretending to shift the other way. Also, the shift count is
3758 limited to -8..8. It's slightly better to use two shifts for 9..15
3759 than to load the count into r1h, so we do that too. */
3761 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3763 machine_mode mode = GET_MODE (operands[0]);
3764 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3765 rtx temp;
3767 if (GET_CODE (operands[2]) == CONST_INT)
3769 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3770 int count = INTVAL (operands[2]) * scale;
3772 while (count > maxc)
3774 temp = gen_reg_rtx (mode);
3775 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3776 operands[1] = temp;
3777 count -= maxc;
3779 while (count < -maxc)
3781 temp = gen_reg_rtx (mode);
3782 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3783 operands[1] = temp;
3784 count += maxc;
3786 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3787 return 1;
3790 temp = gen_reg_rtx (QImode);
3791 if (scale < 0)
3792 /* The pattern has a NEG that corresponds to this. */
3793 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3794 else if (TARGET_A16 && mode == SImode)
3795 /* We do this because the code below may modify this, we don't
3796 want to modify the origin of this value. */
3797 emit_move_insn (temp, operands[2]);
3798 else
3799 /* We'll only use it for the shift, no point emitting a move. */
3800 temp = operands[2];
3802 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3804 /* The m16c has a limit of -16..16 for SI shifts, even when the
3805 shift count is in a register. Since there are so many targets
3806 of these shifts, it's better to expand the RTL here than to
3807 call a helper function.
3809 The resulting code looks something like this:
3811 cmp.b r1h,-16
3812 jge.b 1f
3813 shl.l -16,dest
3814 add.b r1h,16
3815 1f: cmp.b r1h,16
3816 jle.b 1f
3817 shl.l 16,dest
3818 sub.b r1h,16
3819 1f: shl.l r1h,dest
3821 We take advantage of the fact that "negative" shifts are
3822 undefined to skip one of the comparisons. */
3824 rtx count;
3825 rtx label, tempvar;
3826 rtx_insn *insn;
3828 emit_move_insn (operands[0], operands[1]);
3830 count = temp;
3831 label = gen_label_rtx ();
3832 LABEL_NUSES (label) ++;
3834 tempvar = gen_reg_rtx (mode);
3836 if (shift_code == ASHIFT)
3838 /* This is a left shift. We only need check positive counts. */
3839 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3840 count, GEN_INT (16), label));
3841 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3842 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3843 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3844 emit_label_after (label, insn);
3846 else
3848 /* This is a right shift. We only need check negative counts. */
3849 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3850 count, GEN_INT (-16), label));
3851 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3852 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3853 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3854 emit_label_after (label, insn);
3856 operands[1] = operands[0];
3857 emit_insn (func (operands[0], operands[0], count));
3858 return 1;
3861 operands[2] = temp;
3862 return 0;
3865 /* The m32c has a limited range of operations that work on PSImode
3866 values; we have to expand to SI, do the math, and truncate back to
3867 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3868 those cases. */
3869 void
3870 m32c_expand_neg_mulpsi3 (rtx * operands)
3872 /* operands: a = b * i */
3873 rtx temp1; /* b as SI */
3874 rtx scale /* i as SI */;
3875 rtx temp2; /* a*b as SI */
3877 temp1 = gen_reg_rtx (SImode);
3878 temp2 = gen_reg_rtx (SImode);
3879 if (GET_CODE (operands[2]) != CONST_INT)
3881 scale = gen_reg_rtx (SImode);
3882 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3884 else
3885 scale = copy_to_mode_reg (SImode, operands[2]);
3887 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3888 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3889 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3892 /* Pattern Output Functions */
3895 m32c_expand_movcc (rtx *operands)
3897 rtx rel = operands[1];
3899 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3900 return 1;
3901 if (GET_CODE (operands[2]) != CONST_INT
3902 || GET_CODE (operands[3]) != CONST_INT)
3903 return 1;
3904 if (GET_CODE (rel) == NE)
3906 rtx tmp = operands[2];
3907 operands[2] = operands[3];
3908 operands[3] = tmp;
3909 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3912 emit_move_insn (operands[0],
3913 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3914 rel,
3915 operands[2],
3916 operands[3]));
3917 return 0;
3920 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3922 m32c_expand_insv (rtx *operands)
3924 rtx op0, src0, p;
3925 int mask;
3927 if (INTVAL (operands[1]) != 1)
3928 return 1;
3930 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3931 if (GET_CODE (operands[3]) != CONST_INT)
3932 return 1;
3933 if (INTVAL (operands[3]) != 0
3934 && INTVAL (operands[3]) != 1
3935 && INTVAL (operands[3]) != -1)
3936 return 1;
3938 mask = 1 << INTVAL (operands[2]);
3940 op0 = operands[0];
3941 if (GET_CODE (op0) == SUBREG
3942 && SUBREG_BYTE (op0) == 0)
3944 rtx sub = SUBREG_REG (op0);
3945 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3946 op0 = sub;
3949 if (!can_create_pseudo_p ()
3950 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3951 src0 = op0;
3952 else
3954 src0 = gen_reg_rtx (GET_MODE (op0));
3955 emit_move_insn (src0, op0);
3958 if (GET_MODE (op0) == HImode
3959 && INTVAL (operands[2]) >= 8
3960 && GET_CODE (op0) == MEM)
3962 /* We are little endian. */
3963 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3964 XEXP (op0, 0), 1));
3965 MEM_COPY_ATTRIBUTES (new_mem, op0);
3966 mask >>= 8;
3969 /* First, we generate a mask with the correct polarity. If we are
3970 storing a zero, we want an AND mask, so invert it. */
3971 if (INTVAL (operands[3]) == 0)
3973 /* Storing a zero, use an AND mask */
3974 if (GET_MODE (op0) == HImode)
3975 mask ^= 0xffff;
3976 else
3977 mask ^= 0xff;
3979 /* Now we need to properly sign-extend the mask in case we need to
3980 fall back to an AND or OR opcode. */
3981 if (GET_MODE (op0) == HImode)
3983 if (mask & 0x8000)
3984 mask -= 0x10000;
3986 else
3988 if (mask & 0x80)
3989 mask -= 0x100;
3992 switch ( (INTVAL (operands[3]) ? 4 : 0)
3993 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3994 + (TARGET_A24 ? 1 : 0))
3996 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3997 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3998 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3999 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4000 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4001 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4002 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4003 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
4004 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
4007 emit_insn (p);
4008 return 0;
4011 const char *
4012 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4014 static char buf[30];
4015 if (GET_CODE (operands[0]) == REG
4016 && REGNO (operands[0]) == R0_REGNO)
4018 if (code == EQ)
4019 return "stzx\t#1,#0,r0l";
4020 if (code == NE)
4021 return "stzx\t#0,#1,r0l";
4023 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4024 return buf;
4027 /* Encode symbol attributes of a SYMBOL_REF into its
4028 SYMBOL_REF_FLAGS. */
4029 static void
4030 m32c_encode_section_info (tree decl, rtx rtl, int first)
4032 int extra_flags = 0;
4034 default_encode_section_info (decl, rtl, first);
4035 if (TREE_CODE (decl) == FUNCTION_DECL
4036 && m32c_special_page_vector_p (decl))
4038 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4040 if (extra_flags)
4041 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4044 /* Returns TRUE if the current function is a leaf, and thus we can
4045 determine which registers an interrupt function really needs to
4046 save. The logic below is mostly about finding the insn sequence
4047 that's the function, versus any sequence that might be open for the
4048 current insn. */
4049 static int
4050 m32c_leaf_function_p (void)
4052 rtx_insn *saved_first, *saved_last;
4053 struct sequence_stack *seq;
4054 int rv;
4056 saved_first = crtl->emit.x_first_insn;
4057 saved_last = crtl->emit.x_last_insn;
4058 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4060 if (seq)
4062 crtl->emit.x_first_insn = seq->first;
4063 crtl->emit.x_last_insn = seq->last;
4066 rv = leaf_function_p ();
4068 crtl->emit.x_first_insn = saved_first;
4069 crtl->emit.x_last_insn = saved_last;
4070 return rv;
4073 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4074 opcodes. If the function doesn't need the frame base or stack
4075 pointer, it can use the simpler RTS opcode. */
4076 static bool
4077 m32c_function_needs_enter (void)
4079 rtx_insn *insn;
4080 struct sequence_stack *seq;
4081 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4082 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4084 insn = get_insns ();
4085 for (seq = crtl->emit.sequence_stack;
4086 seq;
4087 insn = seq->first, seq = seq->next);
4089 while (insn)
4091 if (reg_mentioned_p (sp, insn))
4092 return true;
4093 if (reg_mentioned_p (fb, insn))
4094 return true;
4095 insn = NEXT_INSN (insn);
4097 return false;
4100 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4101 frame-related. Return PAR.
4103 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4104 PARALLEL rtx other than the first if they do not have the
4105 FRAME_RELATED flag set on them. So this function is handy for
4106 marking up 'enter' instructions. */
4107 static rtx
4108 m32c_all_frame_related (rtx par)
4110 int len = XVECLEN (par, 0);
4111 int i;
4113 for (i = 0; i < len; i++)
4114 F (XVECEXP (par, 0, i));
4116 return par;
4119 /* Emits the prologue. See the frame layout comment earlier in this
4120 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4121 that we manually update sp. */
4122 void
4123 m32c_emit_prologue (void)
4125 int frame_size, extra_frame_size = 0, reg_save_size;
4126 int complex_prologue = 0;
4128 cfun->machine->is_leaf = m32c_leaf_function_p ();
4129 if (interrupt_p (cfun->decl))
4131 cfun->machine->is_interrupt = 1;
4132 complex_prologue = 1;
4134 else if (bank_switch_p (cfun->decl))
4135 warning (OPT_Wattributes,
4136 "%<bank_switch%> has no effect on non-interrupt functions");
4138 reg_save_size = m32c_pushm_popm (PP_justcount);
4140 if (interrupt_p (cfun->decl))
4142 if (bank_switch_p (cfun->decl))
4143 emit_insn (gen_fset_b ());
4144 else if (cfun->machine->intr_pushm)
4145 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4148 frame_size =
4149 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4150 if (frame_size == 0
4151 && !m32c_function_needs_enter ())
4152 cfun->machine->use_rts = 1;
4154 if (frame_size > 254)
4156 extra_frame_size = frame_size - 254;
4157 frame_size = 254;
4159 if (cfun->machine->use_rts == 0)
4160 F (emit_insn (m32c_all_frame_related
4161 (TARGET_A16
4162 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4163 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4165 if (extra_frame_size)
4167 complex_prologue = 1;
4168 if (TARGET_A16)
4169 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4170 gen_rtx_REG (HImode, SP_REGNO),
4171 GEN_INT (-extra_frame_size))));
4172 else
4173 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4174 gen_rtx_REG (PSImode, SP_REGNO),
4175 GEN_INT (-extra_frame_size))));
4178 complex_prologue += m32c_pushm_popm (PP_pushm);
4180 /* This just emits a comment into the .s file for debugging. */
4181 if (complex_prologue)
4182 emit_insn (gen_prologue_end ());
4185 /* Likewise, for the epilogue. The only exception is that, for
4186 interrupts, we must manually unwind the frame as the REIT opcode
4187 doesn't do that. */
4188 void
4189 m32c_emit_epilogue (void)
4191 int popm_count = m32c_pushm_popm (PP_justcount);
4193 /* This just emits a comment into the .s file for debugging. */
4194 if (popm_count > 0 || cfun->machine->is_interrupt)
4195 emit_insn (gen_epilogue_start ());
4197 if (popm_count > 0)
4198 m32c_pushm_popm (PP_popm);
4200 if (cfun->machine->is_interrupt)
4202 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4204 /* REIT clears B flag and restores $fp for us, but we still
4205 have to fix up the stack. USE_RTS just means we didn't
4206 emit ENTER. */
4207 if (!cfun->machine->use_rts)
4209 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4210 gen_rtx_REG (spmode, FP_REGNO));
4211 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4212 gen_rtx_REG (spmode, A0_REGNO));
4213 /* We can't just add this to the POPM because it would be in
4214 the wrong order, and wouldn't fix the stack if we're bank
4215 switching. */
4216 if (TARGET_A16)
4217 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4218 else
4219 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4221 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4222 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4224 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4225 generated only for M32C/M32CM targets (generate the REIT
4226 instruction otherwise). */
4227 if (fast_interrupt_p (cfun->decl))
4229 /* Check if fast_attribute is set for M32C or M32CM. */
4230 if (TARGET_A24)
4232 emit_jump_insn (gen_epilogue_freit ());
4234 /* If fast_interrupt attribute is set for an R8C or M16C
4235 target ignore this attribute and generated REIT
4236 instruction. */
4237 else
4239 warning (OPT_Wattributes,
4240 "%<fast_interrupt%> attribute directive ignored");
4241 emit_jump_insn (gen_epilogue_reit_16 ());
4244 else if (TARGET_A16)
4245 emit_jump_insn (gen_epilogue_reit_16 ());
4246 else
4247 emit_jump_insn (gen_epilogue_reit_24 ());
4249 else if (cfun->machine->use_rts)
4250 emit_jump_insn (gen_epilogue_rts ());
4251 else if (TARGET_A16)
4252 emit_jump_insn (gen_epilogue_exitd_16 ());
4253 else
4254 emit_jump_insn (gen_epilogue_exitd_24 ());
4257 void
4258 m32c_emit_eh_epilogue (rtx ret_addr)
4260 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4261 return to. We have to fudge the stack, pop everything, pop SP
4262 (fudged), and return (fudged). This is actually easier to do in
4263 assembler, so punt to libgcc. */
4264 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4265 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4268 /* Indicate which flags must be properly set for a given conditional. */
4269 static int
4270 flags_needed_for_conditional (rtx cond)
4272 switch (GET_CODE (cond))
4274 case LE:
4275 case GT:
4276 return FLAGS_OSZ;
4277 case LEU:
4278 case GTU:
4279 return FLAGS_ZC;
4280 case LT:
4281 case GE:
4282 return FLAGS_OS;
4283 case LTU:
4284 case GEU:
4285 return FLAGS_C;
4286 case EQ:
4287 case NE:
4288 return FLAGS_Z;
4289 default:
4290 return FLAGS_N;
4294 #define DEBUG_CMP 0
4296 /* Returns true if a compare insn is redundant because it would only
4297 set flags that are already set correctly. */
4298 static bool
4299 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4301 int flags_needed;
4302 int pflags;
4303 rtx_insn *prev;
4304 rtx pp, next;
4305 rtx op0, op1;
4306 #if DEBUG_CMP
4307 int prev_icode, i;
4308 #endif
4310 op0 = operands[0];
4311 op1 = operands[1];
4313 #if DEBUG_CMP
4314 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4315 debug_rtx(cmp);
4316 for (i=0; i<2; i++)
4318 fprintf(stderr, "operands[%d] = ", i);
4319 debug_rtx(operands[i]);
4321 #endif
4323 next = next_nonnote_insn (cmp);
4324 if (!next || !INSN_P (next))
4326 #if DEBUG_CMP
4327 fprintf(stderr, "compare not followed by insn\n");
4328 debug_rtx(next);
4329 #endif
4330 return false;
4332 if (GET_CODE (PATTERN (next)) == SET
4333 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4335 next = XEXP (XEXP (PATTERN (next), 1), 0);
4337 else if (GET_CODE (PATTERN (next)) == SET)
4339 /* If this is a conditional, flags_needed will be something
4340 other than FLAGS_N, which we test below. */
4341 next = XEXP (PATTERN (next), 1);
4343 else
4345 #if DEBUG_CMP
4346 fprintf(stderr, "compare not followed by conditional\n");
4347 debug_rtx(next);
4348 #endif
4349 return false;
4351 #if DEBUG_CMP
4352 fprintf(stderr, "conditional is: ");
4353 debug_rtx(next);
4354 #endif
4356 flags_needed = flags_needed_for_conditional (next);
4357 if (flags_needed == FLAGS_N)
4359 #if DEBUG_CMP
4360 fprintf(stderr, "compare not followed by conditional\n");
4361 debug_rtx(next);
4362 #endif
4363 return false;
4366 /* Compare doesn't set overflow and carry the same way that
4367 arithmetic instructions do, so we can't replace those. */
4368 if (flags_needed & FLAGS_OC)
4369 return false;
4371 prev = cmp;
4372 do {
4373 prev = prev_nonnote_insn (prev);
4374 if (!prev)
4376 #if DEBUG_CMP
4377 fprintf(stderr, "No previous insn.\n");
4378 #endif
4379 return false;
4381 if (!INSN_P (prev))
4383 #if DEBUG_CMP
4384 fprintf(stderr, "Previous insn is a non-insn.\n");
4385 #endif
4386 return false;
4388 pp = PATTERN (prev);
4389 if (GET_CODE (pp) != SET)
4391 #if DEBUG_CMP
4392 fprintf(stderr, "Previous insn is not a SET.\n");
4393 #endif
4394 return false;
4396 pflags = get_attr_flags (prev);
4398 /* Looking up attributes of previous insns corrupted the recog
4399 tables. */
4400 INSN_UID (cmp) = -1;
4401 recog (PATTERN (cmp), cmp, 0);
4403 if (pflags == FLAGS_N
4404 && reg_mentioned_p (op0, pp))
4406 #if DEBUG_CMP
4407 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4408 debug_rtx(prev);
4409 #endif
4410 return false;
4413 /* Check for comparisons against memory - between volatiles and
4414 aliases, we just can't risk this one. */
4415 if (GET_CODE (operands[0]) == MEM
4416 || GET_CODE (operands[0]) == MEM)
4418 #if DEBUG_CMP
4419 fprintf(stderr, "comparisons with memory:\n");
4420 debug_rtx(prev);
4421 #endif
4422 return false;
4425 /* Check for PREV changing a register that's used to compute a
4426 value in CMP, even if it doesn't otherwise change flags. */
4427 if (GET_CODE (operands[0]) == REG
4428 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4430 #if DEBUG_CMP
4431 fprintf(stderr, "sub-value affected, op0:\n");
4432 debug_rtx(prev);
4433 #endif
4434 return false;
4436 if (GET_CODE (operands[1]) == REG
4437 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4439 #if DEBUG_CMP
4440 fprintf(stderr, "sub-value affected, op1:\n");
4441 debug_rtx(prev);
4442 #endif
4443 return false;
4446 } while (pflags == FLAGS_N);
4447 #if DEBUG_CMP
4448 fprintf(stderr, "previous flag-setting insn:\n");
4449 debug_rtx(prev);
4450 debug_rtx(pp);
4451 #endif
4453 if (GET_CODE (pp) == SET
4454 && GET_CODE (XEXP (pp, 0)) == REG
4455 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4456 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4458 /* Adjacent cbranches must have the same operands to be
4459 redundant. */
4460 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4461 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4462 #if DEBUG_CMP
4463 fprintf(stderr, "adjacent cbranches\n");
4464 debug_rtx(pop0);
4465 debug_rtx(pop1);
4466 #endif
4467 if (rtx_equal_p (op0, pop0)
4468 && rtx_equal_p (op1, pop1))
4469 return true;
4470 #if DEBUG_CMP
4471 fprintf(stderr, "prev cmp not same\n");
4472 #endif
4473 return false;
4476 /* Else the previous insn must be a SET, with either the source or
4477 dest equal to operands[0], and operands[1] must be zero. */
4479 if (!rtx_equal_p (op1, const0_rtx))
4481 #if DEBUG_CMP
4482 fprintf(stderr, "operands[1] not const0_rtx\n");
4483 #endif
4484 return false;
4486 if (GET_CODE (pp) != SET)
4488 #if DEBUG_CMP
4489 fprintf (stderr, "pp not set\n");
4490 #endif
4491 return false;
4493 if (!rtx_equal_p (op0, SET_SRC (pp))
4494 && !rtx_equal_p (op0, SET_DEST (pp)))
4496 #if DEBUG_CMP
4497 fprintf(stderr, "operands[0] not found in set\n");
4498 #endif
4499 return false;
4502 #if DEBUG_CMP
4503 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4504 #endif
4505 if ((pflags & flags_needed) == flags_needed)
4506 return true;
4508 return false;
4511 /* Return the pattern for a compare. This will be commented out if
4512 the compare is redundant, else a normal pattern is returned. Thus,
4513 the assembler output says where the compare would have been. */
4514 char *
4515 m32c_output_compare (rtx_insn *insn, rtx *operands)
4517 static char templ[] = ";cmp.b\t%1,%0";
4518 /* ^ 5 */
4520 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4521 if (m32c_compare_redundant (insn, operands))
4523 #if DEBUG_CMP
4524 fprintf(stderr, "cbranch: cmp not needed\n");
4525 #endif
4526 return templ;
4529 #if DEBUG_CMP
4530 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4531 #endif
4532 return templ + 1;
4535 #undef TARGET_ENCODE_SECTION_INFO
4536 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4538 /* If the frame pointer isn't used, we detect it manually. But the
4539 stack pointer doesn't have as flexible addressing as the frame
4540 pointer, so we always assume we have it. */
4542 #undef TARGET_FRAME_POINTER_REQUIRED
4543 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4545 /* The Global `targetm' Variable. */
4547 struct gcc_target targetm = TARGET_INITIALIZER;
4549 #include "gt-m32c.h"