gcc/
[official-gcc.git] / gcc / config / m32c / m32c.c
blobad627750100429f116301495e1c0c8f4a49a9f17
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2015 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "hash-set.h"
39 #include "machmode.h"
40 #include "vec.h"
41 #include "double-int.h"
42 #include "input.h"
43 #include "alias.h"
44 #include "symtab.h"
45 #include "wide-int.h"
46 #include "inchash.h"
47 #include "tree.h"
48 #include "fold-const.h"
49 #include "stor-layout.h"
50 #include "varasm.h"
51 #include "calls.h"
52 #include "hashtab.h"
53 #include "function.h"
54 #include "statistics.h"
55 #include "real.h"
56 #include "fixed-value.h"
57 #include "expmed.h"
58 #include "dojump.h"
59 #include "explow.h"
60 #include "emit-rtl.h"
61 #include "stmt.h"
62 #include "expr.h"
63 #include "insn-codes.h"
64 #include "optabs.h"
65 #include "except.h"
66 #include "ggc.h"
67 #include "target.h"
68 #include "target-def.h"
69 #include "tm_p.h"
70 #include "langhooks.h"
71 #include "hash-table.h"
72 #include "predict.h"
73 #include "dominance.h"
74 #include "cfg.h"
75 #include "cfgrtl.h"
76 #include "cfganal.h"
77 #include "lcm.h"
78 #include "cfgbuild.h"
79 #include "cfgcleanup.h"
80 #include "basic-block.h"
81 #include "tree-ssa-alias.h"
82 #include "internal-fn.h"
83 #include "gimple-fold.h"
84 #include "tree-eh.h"
85 #include "gimple-expr.h"
86 #include "is-a.h"
87 #include "gimple.h"
88 #include "df.h"
89 #include "tm-constrs.h"
90 #include "builtins.h"
92 /* Prototypes */
94 /* Used by m32c_pushm_popm. */
95 typedef enum
97 PP_pushm,
98 PP_popm,
99 PP_justcount
100 } Push_Pop_Type;
102 static bool m32c_function_needs_enter (void);
103 static tree interrupt_handler (tree *, tree, tree, int, bool *);
104 static tree function_vector_handler (tree *, tree, tree, int, bool *);
105 static int interrupt_p (tree node);
106 static int bank_switch_p (tree node);
107 static int fast_interrupt_p (tree node);
108 static int interrupt_p (tree node);
109 static bool m32c_asm_integer (rtx, unsigned int, int);
110 static int m32c_comp_type_attributes (const_tree, const_tree);
111 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
112 static struct machine_function *m32c_init_machine_status (void);
113 static void m32c_insert_attributes (tree, tree *);
114 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
115 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
116 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
117 const_tree, bool);
118 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
119 const_tree, bool);
120 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
121 const_tree, bool);
122 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
123 static int m32c_pushm_popm (Push_Pop_Type);
124 static bool m32c_strict_argument_naming (cumulative_args_t);
125 static rtx m32c_struct_value_rtx (tree, int);
126 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
127 static int need_to_save (int);
128 static rtx m32c_function_value (const_tree, const_tree, bool);
129 static rtx m32c_libcall_value (machine_mode, const_rtx);
131 /* Returns true if an address is specified, else false. */
132 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
134 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
136 #define streq(a,b) (strcmp ((a), (b)) == 0)
138 /* Internal support routines */
140 /* Debugging statements are tagged with DEBUG0 only so that they can
141 be easily enabled individually, by replacing the '0' with '1' as
142 needed. */
143 #define DEBUG0 0
144 #define DEBUG1 1
146 #if DEBUG0
147 #include "print-tree.h"
148 /* This is needed by some of the commented-out debug statements
149 below. */
150 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
151 #endif
152 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
154 /* These are all to support encode_pattern(). */
155 static char pattern[30], *patternp;
156 static GTY(()) rtx patternr[30];
157 #define RTX_IS(x) (streq (pattern, x))
159 /* Some macros to simplify the logic throughout this file. */
160 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
161 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
163 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
164 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
166 static int
167 far_addr_space_p (rtx x)
169 if (GET_CODE (x) != MEM)
170 return 0;
171 #if DEBUG0
172 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
173 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
174 #endif
175 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
178 /* We do most RTX matching by converting the RTX into a string, and
179 using string compares. This vastly simplifies the logic in many of
180 the functions in this file.
182 On exit, pattern[] has the encoded string (use RTX_IS("...") to
183 compare it) and patternr[] has pointers to the nodes in the RTX
184 corresponding to each character in the encoded string. The latter
185 is mostly used by print_operand().
187 Unrecognized patterns have '?' in them; this shows up when the
188 assembler complains about syntax errors.
191 static void
192 encode_pattern_1 (rtx x)
194 int i;
196 if (patternp == pattern + sizeof (pattern) - 2)
198 patternp[-1] = '?';
199 return;
202 patternr[patternp - pattern] = x;
204 switch (GET_CODE (x))
206 case REG:
207 *patternp++ = 'r';
208 break;
209 case SUBREG:
210 if (GET_MODE_SIZE (GET_MODE (x)) !=
211 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
212 *patternp++ = 'S';
213 if (GET_MODE (x) == PSImode
214 && GET_CODE (XEXP (x, 0)) == REG)
215 *patternp++ = 'S';
216 encode_pattern_1 (XEXP (x, 0));
217 break;
218 case MEM:
219 *patternp++ = 'm';
220 case CONST:
221 encode_pattern_1 (XEXP (x, 0));
222 break;
223 case SIGN_EXTEND:
224 *patternp++ = '^';
225 *patternp++ = 'S';
226 encode_pattern_1 (XEXP (x, 0));
227 break;
228 case ZERO_EXTEND:
229 *patternp++ = '^';
230 *patternp++ = 'Z';
231 encode_pattern_1 (XEXP (x, 0));
232 break;
233 case PLUS:
234 *patternp++ = '+';
235 encode_pattern_1 (XEXP (x, 0));
236 encode_pattern_1 (XEXP (x, 1));
237 break;
238 case PRE_DEC:
239 *patternp++ = '>';
240 encode_pattern_1 (XEXP (x, 0));
241 break;
242 case POST_INC:
243 *patternp++ = '<';
244 encode_pattern_1 (XEXP (x, 0));
245 break;
246 case LO_SUM:
247 *patternp++ = 'L';
248 encode_pattern_1 (XEXP (x, 0));
249 encode_pattern_1 (XEXP (x, 1));
250 break;
251 case HIGH:
252 *patternp++ = 'H';
253 encode_pattern_1 (XEXP (x, 0));
254 break;
255 case SYMBOL_REF:
256 *patternp++ = 's';
257 break;
258 case LABEL_REF:
259 *patternp++ = 'l';
260 break;
261 case CODE_LABEL:
262 *patternp++ = 'c';
263 break;
264 case CONST_INT:
265 case CONST_DOUBLE:
266 *patternp++ = 'i';
267 break;
268 case UNSPEC:
269 *patternp++ = 'u';
270 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
271 for (i = 0; i < XVECLEN (x, 0); i++)
272 encode_pattern_1 (XVECEXP (x, 0, i));
273 break;
274 case USE:
275 *patternp++ = 'U';
276 break;
277 case PARALLEL:
278 *patternp++ = '|';
279 for (i = 0; i < XVECLEN (x, 0); i++)
280 encode_pattern_1 (XVECEXP (x, 0, i));
281 break;
282 case EXPR_LIST:
283 *patternp++ = 'E';
284 encode_pattern_1 (XEXP (x, 0));
285 if (XEXP (x, 1))
286 encode_pattern_1 (XEXP (x, 1));
287 break;
288 default:
289 *patternp++ = '?';
290 #if DEBUG0
291 fprintf (stderr, "can't encode pattern %s\n",
292 GET_RTX_NAME (GET_CODE (x)));
293 debug_rtx (x);
294 #endif
295 break;
299 static void
300 encode_pattern (rtx x)
302 patternp = pattern;
303 encode_pattern_1 (x);
304 *patternp = 0;
307 /* Since register names indicate the mode they're used in, we need a
308 way to determine which name to refer to the register with. Called
309 by print_operand(). */
311 static const char *
312 reg_name_with_mode (int regno, machine_mode mode)
314 int mlen = GET_MODE_SIZE (mode);
315 if (regno == R0_REGNO && mlen == 1)
316 return "r0l";
317 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
318 return "r2r0";
319 if (regno == R0_REGNO && mlen == 6)
320 return "r2r1r0";
321 if (regno == R0_REGNO && mlen == 8)
322 return "r3r1r2r0";
323 if (regno == R1_REGNO && mlen == 1)
324 return "r1l";
325 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
326 return "r3r1";
327 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
328 return "a1a0";
329 return reg_names[regno];
332 /* How many bytes a register uses on stack when it's pushed. We need
333 to know this because the push opcode needs to explicitly indicate
334 the size of the register, even though the name of the register
335 already tells it that. Used by m32c_output_reg_{push,pop}, which
336 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
338 static int
339 reg_push_size (int regno)
341 switch (regno)
343 case R0_REGNO:
344 case R1_REGNO:
345 return 2;
346 case R2_REGNO:
347 case R3_REGNO:
348 case FLG_REGNO:
349 return 2;
350 case A0_REGNO:
351 case A1_REGNO:
352 case SB_REGNO:
353 case FB_REGNO:
354 case SP_REGNO:
355 if (TARGET_A16)
356 return 2;
357 else
358 return 3;
359 default:
360 gcc_unreachable ();
364 /* Given two register classes, find the largest intersection between
365 them. If there is no intersection, return RETURNED_IF_EMPTY
366 instead. */
367 static reg_class_t
368 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
369 reg_class_t returned_if_empty)
371 HARD_REG_SET cc;
372 int i;
373 reg_class_t best = NO_REGS;
374 unsigned int best_size = 0;
376 if (original_class == limiting_class)
377 return original_class;
379 cc = reg_class_contents[original_class];
380 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
382 for (i = 0; i < LIM_REG_CLASSES; i++)
384 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
385 if (best_size < reg_class_size[i])
387 best = (reg_class_t) i;
388 best_size = reg_class_size[i];
392 if (best == NO_REGS)
393 return returned_if_empty;
394 return best;
397 /* Used by m32c_register_move_cost to determine if a move is
398 impossibly expensive. */
399 static bool
400 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
402 /* Cache the results: 0=untested 1=no 2=yes */
403 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
405 if (results[(int) rclass][mode] == 0)
407 int r;
408 results[rclass][mode] = 1;
409 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
410 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
411 && HARD_REGNO_MODE_OK (r, mode))
413 results[rclass][mode] = 2;
414 break;
418 #if DEBUG0
419 fprintf (stderr, "class %s can hold %s? %s\n",
420 class_names[(int) rclass], mode_name[mode],
421 (results[rclass][mode] == 2) ? "yes" : "no");
422 #endif
423 return results[(int) rclass][mode] == 2;
426 /* Run-time Target Specification. */
428 /* Memregs are memory locations that gcc treats like general
429 registers, as there are a limited number of true registers and the
430 m32c families can use memory in most places that registers can be
431 used.
433 However, since memory accesses are more expensive than registers,
434 we allow the user to limit the number of memregs available, in
435 order to try to persuade gcc to try harder to use real registers.
437 Memregs are provided by lib1funcs.S.
440 int ok_to_change_target_memregs = TRUE;
442 /* Implements TARGET_OPTION_OVERRIDE. */
444 #undef TARGET_OPTION_OVERRIDE
445 #define TARGET_OPTION_OVERRIDE m32c_option_override
447 static void
448 m32c_option_override (void)
450 /* We limit memregs to 0..16, and provide a default. */
451 if (global_options_set.x_target_memregs)
453 if (target_memregs < 0 || target_memregs > 16)
454 error ("invalid target memregs value '%d'", target_memregs);
456 else
457 target_memregs = 16;
459 if (TARGET_A24)
460 flag_ivopts = 0;
462 /* This target defaults to strict volatile bitfields. */
463 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
464 flag_strict_volatile_bitfields = 1;
466 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
467 This is always worse than an absolute call. */
468 if (TARGET_A16)
469 flag_no_function_cse = 1;
471 /* This wants to put insns between compares and their jumps. */
472 /* FIXME: The right solution is to properly trace the flags register
473 values, but that is too much work for stage 4. */
474 flag_combine_stack_adjustments = 0;
477 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
478 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
480 static void
481 m32c_override_options_after_change (void)
483 if (TARGET_A16)
484 flag_no_function_cse = 1;
487 /* Defining data structures for per-function information */
489 /* The usual; we set up our machine_function data. */
490 static struct machine_function *
491 m32c_init_machine_status (void)
493 return ggc_cleared_alloc<machine_function> ();
496 /* Implements INIT_EXPANDERS. We just set up to call the above
497 function. */
498 void
499 m32c_init_expanders (void)
501 init_machine_status = m32c_init_machine_status;
504 /* Storage Layout */
506 /* Register Basics */
508 /* Basic Characteristics of Registers */
510 /* Whether a mode fits in a register is complex enough to warrant a
511 table. */
512 static struct
514 char qi_regs;
515 char hi_regs;
516 char pi_regs;
517 char si_regs;
518 char di_regs;
519 } nregs_table[FIRST_PSEUDO_REGISTER] =
521 { 1, 1, 2, 2, 4 }, /* r0 */
522 { 0, 1, 0, 0, 0 }, /* r2 */
523 { 1, 1, 2, 2, 0 }, /* r1 */
524 { 0, 1, 0, 0, 0 }, /* r3 */
525 { 0, 1, 1, 0, 0 }, /* a0 */
526 { 0, 1, 1, 0, 0 }, /* a1 */
527 { 0, 1, 1, 0, 0 }, /* sb */
528 { 0, 1, 1, 0, 0 }, /* fb */
529 { 0, 1, 1, 0, 0 }, /* sp */
530 { 1, 1, 1, 0, 0 }, /* pc */
531 { 0, 0, 0, 0, 0 }, /* fl */
532 { 1, 1, 1, 0, 0 }, /* ap */
533 { 1, 1, 2, 2, 4 }, /* mem0 */
534 { 1, 1, 2, 2, 4 }, /* mem1 */
535 { 1, 1, 2, 2, 4 }, /* mem2 */
536 { 1, 1, 2, 2, 4 }, /* mem3 */
537 { 1, 1, 2, 2, 4 }, /* mem4 */
538 { 1, 1, 2, 2, 0 }, /* mem5 */
539 { 1, 1, 2, 2, 0 }, /* mem6 */
540 { 1, 1, 0, 0, 0 }, /* mem7 */
543 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
544 of available memregs, and select which registers need to be preserved
545 across calls based on the chip family. */
547 #undef TARGET_CONDITIONAL_REGISTER_USAGE
548 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
549 void
550 m32c_conditional_register_usage (void)
552 int i;
554 if (0 <= target_memregs && target_memregs <= 16)
556 /* The command line option is bytes, but our "registers" are
557 16-bit words. */
558 for (i = (target_memregs+1)/2; i < 8; i++)
560 fixed_regs[MEM0_REGNO + i] = 1;
561 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
565 /* M32CM and M32C preserve more registers across function calls. */
566 if (TARGET_A24)
568 call_used_regs[R1_REGNO] = 0;
569 call_used_regs[R2_REGNO] = 0;
570 call_used_regs[R3_REGNO] = 0;
571 call_used_regs[A0_REGNO] = 0;
572 call_used_regs[A1_REGNO] = 0;
576 /* How Values Fit in Registers */
578 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
579 different registers are different sizes from each other, *and* may
580 be different sizes in different chip families. */
581 static int
582 m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
584 if (regno == FLG_REGNO && mode == CCmode)
585 return 1;
586 if (regno >= FIRST_PSEUDO_REGISTER)
587 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
589 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
590 return (GET_MODE_SIZE (mode) + 1) / 2;
592 if (GET_MODE_SIZE (mode) <= 1)
593 return nregs_table[regno].qi_regs;
594 if (GET_MODE_SIZE (mode) <= 2)
595 return nregs_table[regno].hi_regs;
596 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
597 return 2;
598 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
599 return nregs_table[regno].pi_regs;
600 if (GET_MODE_SIZE (mode) <= 4)
601 return nregs_table[regno].si_regs;
602 if (GET_MODE_SIZE (mode) <= 8)
603 return nregs_table[regno].di_regs;
604 return 0;
608 m32c_hard_regno_nregs (int regno, machine_mode mode)
610 int rv = m32c_hard_regno_nregs_1 (regno, mode);
611 return rv ? rv : 1;
614 /* Implements HARD_REGNO_MODE_OK. The above function does the work
615 already; just test its return value. */
617 m32c_hard_regno_ok (int regno, machine_mode mode)
619 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
622 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
623 registers are all different sizes. However, since most modes are
624 bigger than our registers anyway, it's easier to implement this
625 function that way, leaving QImode as the only unique case. */
627 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
629 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
630 return 1;
632 #if 0
633 if (m1 == QImode || m2 == QImode)
634 return 0;
635 #endif
637 return 1;
640 /* Register Classes */
642 /* Implements REGNO_REG_CLASS. */
643 enum reg_class
644 m32c_regno_reg_class (int regno)
646 switch (regno)
648 case R0_REGNO:
649 return R0_REGS;
650 case R1_REGNO:
651 return R1_REGS;
652 case R2_REGNO:
653 return R2_REGS;
654 case R3_REGNO:
655 return R3_REGS;
656 case A0_REGNO:
657 return A0_REGS;
658 case A1_REGNO:
659 return A1_REGS;
660 case SB_REGNO:
661 return SB_REGS;
662 case FB_REGNO:
663 return FB_REGS;
664 case SP_REGNO:
665 return SP_REGS;
666 case FLG_REGNO:
667 return FLG_REGS;
668 default:
669 if (IS_MEM_REGNO (regno))
670 return MEM_REGS;
671 return ALL_REGS;
675 /* Implements REGNO_OK_FOR_BASE_P. */
677 m32c_regno_ok_for_base_p (int regno)
679 if (regno == A0_REGNO
680 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
681 return 1;
682 return 0;
685 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
686 registers of the appropriate size. */
688 #undef TARGET_PREFERRED_RELOAD_CLASS
689 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
691 static reg_class_t
692 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
694 reg_class_t newclass = rclass;
696 #if DEBUG0
697 fprintf (stderr, "\npreferred_reload_class for %s is ",
698 class_names[rclass]);
699 #endif
700 if (rclass == NO_REGS)
701 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
703 if (reg_classes_intersect_p (rclass, CR_REGS))
705 switch (GET_MODE (x))
707 case QImode:
708 newclass = HL_REGS;
709 break;
710 default:
711 /* newclass = HI_REGS; */
712 break;
716 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
717 newclass = SI_REGS;
718 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
719 && ! reg_class_subset_p (R03_REGS, rclass))
720 newclass = DI_REGS;
722 rclass = reduce_class (rclass, newclass, rclass);
724 if (GET_MODE (x) == QImode)
725 rclass = reduce_class (rclass, HL_REGS, rclass);
727 #if DEBUG0
728 fprintf (stderr, "%s\n", class_names[rclass]);
729 debug_rtx (x);
731 if (GET_CODE (x) == MEM
732 && GET_CODE (XEXP (x, 0)) == PLUS
733 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
734 fprintf (stderr, "Glorm!\n");
735 #endif
736 return rclass;
739 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
741 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
742 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
744 static reg_class_t
745 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
747 return m32c_preferred_reload_class (x, rclass);
750 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
751 address registers for reloads since they're needed for address
752 reloads. */
754 m32c_limit_reload_class (machine_mode mode, int rclass)
756 #if DEBUG0
757 fprintf (stderr, "limit_reload_class for %s: %s ->",
758 mode_name[mode], class_names[rclass]);
759 #endif
761 if (mode == QImode)
762 rclass = reduce_class (rclass, HL_REGS, rclass);
763 else if (mode == HImode)
764 rclass = reduce_class (rclass, HI_REGS, rclass);
765 else if (mode == SImode)
766 rclass = reduce_class (rclass, SI_REGS, rclass);
768 if (rclass != A_REGS)
769 rclass = reduce_class (rclass, DI_REGS, rclass);
771 #if DEBUG0
772 fprintf (stderr, " %s\n", class_names[rclass]);
773 #endif
774 return rclass;
777 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
778 r0 or r1, as those are the only real QImode registers. CR regs get
779 reloaded through appropriately sized general or address
780 registers. */
782 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
784 int cc = class_contents[rclass][0];
785 #if DEBUG0
786 fprintf (stderr, "\nsecondary reload class %s %s\n",
787 class_names[rclass], mode_name[mode]);
788 debug_rtx (x);
789 #endif
790 if (mode == QImode
791 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
792 return QI_REGS;
793 if (reg_classes_intersect_p (rclass, CR_REGS)
794 && GET_CODE (x) == REG
795 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
796 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
797 return NO_REGS;
800 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
801 reloads. */
803 #undef TARGET_CLASS_LIKELY_SPILLED_P
804 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
806 static bool
807 m32c_class_likely_spilled_p (reg_class_t regclass)
809 if (regclass == A_REGS)
810 return true;
812 return (reg_class_size[(int) regclass] == 1);
815 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
816 documented meaning, to avoid potential inconsistencies with actual
817 class definitions. */
819 #undef TARGET_CLASS_MAX_NREGS
820 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
822 static unsigned char
823 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
825 int rn;
826 unsigned char max = 0;
828 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
829 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
831 unsigned char n = m32c_hard_regno_nregs (rn, mode);
832 if (max < n)
833 max = n;
835 return max;
838 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
839 QI (r0l, r1l) because the chip doesn't support QI ops on other
840 registers (well, it does on a0/a1 but if we let gcc do that, reload
841 suffers). Otherwise, we allow changes to larger modes. */
843 m32c_cannot_change_mode_class (machine_mode from,
844 machine_mode to, int rclass)
846 int rn;
847 #if DEBUG0
848 fprintf (stderr, "cannot change from %s to %s in %s\n",
849 mode_name[from], mode_name[to], class_names[rclass]);
850 #endif
852 /* If the larger mode isn't allowed in any of these registers, we
853 can't allow the change. */
854 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
855 if (class_contents[rclass][0] & (1 << rn))
856 if (! m32c_hard_regno_ok (rn, to))
857 return 1;
859 if (to == QImode)
860 return (class_contents[rclass][0] & 0x1ffa);
862 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
863 && GET_MODE_SIZE (from) > 1)
864 return 0;
865 if (GET_MODE_SIZE (from) > 2) /* all other regs */
866 return 0;
868 return 1;
871 /* Helpers for the rest of the file. */
872 /* TRUE if the rtx is a REG rtx for the given register. */
873 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
874 && REGNO (rtx) == regno)
875 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
876 base register in address calculations (hence the "strict"
877 argument). */
878 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
879 && (REGNO (rtx) == AP_REGNO \
880 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
882 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
884 /* Implements matching for constraints (see next function too). 'S' is
885 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
886 call return values. */
887 bool
888 m32c_matches_constraint_p (rtx value, int constraint)
890 encode_pattern (value);
892 switch (constraint) {
893 case CONSTRAINT_SF:
894 return (far_addr_space_p (value)
895 && ((RTX_IS ("mr")
896 && A0_OR_PSEUDO (patternr[1])
897 && GET_MODE (patternr[1]) == SImode)
898 || (RTX_IS ("m+^Sri")
899 && A0_OR_PSEUDO (patternr[4])
900 && GET_MODE (patternr[4]) == HImode)
901 || (RTX_IS ("m+^Srs")
902 && A0_OR_PSEUDO (patternr[4])
903 && GET_MODE (patternr[4]) == HImode)
904 || (RTX_IS ("m+^S+ris")
905 && A0_OR_PSEUDO (patternr[5])
906 && GET_MODE (patternr[5]) == HImode)
907 || RTX_IS ("ms")));
908 case CONSTRAINT_Sd:
910 /* This is the common "src/dest" address */
911 rtx r;
912 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
913 return true;
914 if (RTX_IS ("ms") || RTX_IS ("m+si"))
915 return true;
916 if (RTX_IS ("m++rii"))
918 if (REGNO (patternr[3]) == FB_REGNO
919 && INTVAL (patternr[4]) == 0)
920 return true;
922 if (RTX_IS ("mr"))
923 r = patternr[1];
924 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
925 r = patternr[2];
926 else
927 return false;
928 if (REGNO (r) == SP_REGNO)
929 return false;
930 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
932 case CONSTRAINT_Sa:
934 rtx r;
935 if (RTX_IS ("mr"))
936 r = patternr[1];
937 else if (RTX_IS ("m+ri"))
938 r = patternr[2];
939 else
940 return false;
941 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
943 case CONSTRAINT_Si:
944 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
945 case CONSTRAINT_Ss:
946 return ((RTX_IS ("mr")
947 && (IS_REG (patternr[1], SP_REGNO)))
948 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
949 case CONSTRAINT_Sf:
950 return ((RTX_IS ("mr")
951 && (IS_REG (patternr[1], FB_REGNO)))
952 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
953 case CONSTRAINT_Sb:
954 return ((RTX_IS ("mr")
955 && (IS_REG (patternr[1], SB_REGNO)))
956 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
957 case CONSTRAINT_Sp:
958 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
959 return (RTX_IS ("mi")
960 && !(INTVAL (patternr[1]) & ~0x1fff));
961 case CONSTRAINT_S1:
962 return r1h_operand (value, QImode);
963 case CONSTRAINT_Rpa:
964 return GET_CODE (value) == PARALLEL;
965 default:
966 return false;
970 /* STACK AND CALLING */
972 /* Frame Layout */
974 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
975 (yes, THREE bytes) onto the stack for the return address, but we
976 don't support pointers bigger than 16 bits on those chips. This
977 will likely wreak havoc with exception unwinding. FIXME. */
979 m32c_return_addr_rtx (int count)
981 machine_mode mode;
982 int offset;
983 rtx ra_mem;
985 if (count)
986 return NULL_RTX;
987 /* we want 2[$fb] */
989 if (TARGET_A24)
991 /* It's four bytes */
992 mode = PSImode;
993 offset = 4;
995 else
997 /* FIXME: it's really 3 bytes */
998 mode = HImode;
999 offset = 2;
1002 ra_mem =
1003 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
1004 offset));
1005 return copy_to_mode_reg (mode, ra_mem);
1008 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1010 m32c_incoming_return_addr_rtx (void)
1012 /* we want [sp] */
1013 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1016 /* Exception Handling Support */
1018 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1019 pointers. */
1021 m32c_eh_return_data_regno (int n)
1023 switch (n)
1025 case 0:
1026 return MEM0_REGNO;
1027 case 1:
1028 return MEM0_REGNO+4;
1029 default:
1030 return INVALID_REGNUM;
1034 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1035 m32c_emit_eh_epilogue. */
1037 m32c_eh_return_stackadj_rtx (void)
1039 if (!cfun->machine->eh_stack_adjust)
1041 rtx sa;
1043 sa = gen_rtx_REG (Pmode, R0_REGNO);
1044 cfun->machine->eh_stack_adjust = sa;
1046 return cfun->machine->eh_stack_adjust;
1049 /* Registers That Address the Stack Frame */
1051 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1052 the original spec called for dwarf numbers to vary with register
1053 width as well, for example, r0l, r0, and r2r0 would each have
1054 different dwarf numbers. GCC doesn't support this, and we don't do
1055 it, and gdb seems to like it this way anyway. */
1056 unsigned int
1057 m32c_dwarf_frame_regnum (int n)
1059 switch (n)
1061 case R0_REGNO:
1062 return 5;
1063 case R1_REGNO:
1064 return 6;
1065 case R2_REGNO:
1066 return 7;
1067 case R3_REGNO:
1068 return 8;
1069 case A0_REGNO:
1070 return 9;
1071 case A1_REGNO:
1072 return 10;
1073 case FB_REGNO:
1074 return 11;
1075 case SB_REGNO:
1076 return 19;
1078 case SP_REGNO:
1079 return 12;
1080 case PC_REGNO:
1081 return 13;
1082 default:
1083 return DWARF_FRAME_REGISTERS + 1;
1087 /* The frame looks like this:
1089 ap -> +------------------------------
1090 | Return address (3 or 4 bytes)
1091 | Saved FB (2 or 4 bytes)
1092 fb -> +------------------------------
1093 | local vars
1094 | register saves fb
1095 | through r0 as needed
1096 sp -> +------------------------------
1099 /* We use this to wrap all emitted insns in the prologue. */
1100 static rtx
1101 F (rtx x)
1103 RTX_FRAME_RELATED_P (x) = 1;
1104 return x;
1107 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1108 how much the stack pointer moves for each, for each cpu family. */
1109 static struct
1111 int reg1;
1112 int bit;
1113 int a16_bytes;
1114 int a24_bytes;
1115 } pushm_info[] =
1117 /* These are in reverse push (nearest-to-sp) order. */
1118 { R0_REGNO, 0x80, 2, 2 },
1119 { R1_REGNO, 0x40, 2, 2 },
1120 { R2_REGNO, 0x20, 2, 2 },
1121 { R3_REGNO, 0x10, 2, 2 },
1122 { A0_REGNO, 0x08, 2, 4 },
1123 { A1_REGNO, 0x04, 2, 4 },
1124 { SB_REGNO, 0x02, 2, 4 },
1125 { FB_REGNO, 0x01, 2, 4 }
1128 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1130 /* Returns TRUE if we need to save/restore the given register. We
1131 save everything for exception handlers, so that any register can be
1132 unwound. For interrupt handlers, we save everything if the handler
1133 calls something else (because we don't know what *that* function
1134 might do), but try to be a bit smarter if the handler is a leaf
1135 function. We always save $a0, though, because we use that in the
1136 epilogue to copy $fb to $sp. */
1137 static int
1138 need_to_save (int regno)
1140 if (fixed_regs[regno])
1141 return 0;
1142 if (crtl->calls_eh_return)
1143 return 1;
1144 if (regno == FP_REGNO)
1145 return 0;
1146 if (cfun->machine->is_interrupt
1147 && (!cfun->machine->is_leaf
1148 || (regno == A0_REGNO
1149 && m32c_function_needs_enter ())
1151 return 1;
1152 if (df_regs_ever_live_p (regno)
1153 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1154 return 1;
1155 return 0;
1158 /* This function contains all the intelligence about saving and
1159 restoring registers. It always figures out the register save set.
1160 When called with PP_justcount, it merely returns the size of the
1161 save set (for eliminating the frame pointer, for example). When
1162 called with PP_pushm or PP_popm, it emits the appropriate
1163 instructions for saving (pushm) or restoring (popm) the
1164 registers. */
1165 static int
1166 m32c_pushm_popm (Push_Pop_Type ppt)
1168 int reg_mask = 0;
1169 int byte_count = 0, bytes;
1170 int i;
1171 rtx dwarf_set[PUSHM_N];
1172 int n_dwarfs = 0;
1173 int nosave_mask = 0;
1175 if (crtl->return_rtx
1176 && GET_CODE (crtl->return_rtx) == PARALLEL
1177 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1179 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1180 rtx rv = XEXP (exp, 0);
1181 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1183 if (rv_bytes > 2)
1184 nosave_mask |= 0x20; /* PSI, SI */
1185 else
1186 nosave_mask |= 0xf0; /* DF */
1187 if (rv_bytes > 4)
1188 nosave_mask |= 0x50; /* DI */
1191 for (i = 0; i < (int) PUSHM_N; i++)
1193 /* Skip if neither register needs saving. */
1194 if (!need_to_save (pushm_info[i].reg1))
1195 continue;
1197 if (pushm_info[i].bit & nosave_mask)
1198 continue;
1200 reg_mask |= pushm_info[i].bit;
1201 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1203 if (ppt == PP_pushm)
1205 machine_mode mode = (bytes == 2) ? HImode : SImode;
1206 rtx addr;
1208 /* Always use stack_pointer_rtx instead of calling
1209 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1210 that there is a single rtx representing the stack pointer,
1211 namely stack_pointer_rtx, and uses == to recognize it. */
1212 addr = stack_pointer_rtx;
1214 if (byte_count != 0)
1215 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1217 dwarf_set[n_dwarfs++] =
1218 gen_rtx_SET (gen_rtx_MEM (mode, addr),
1219 gen_rtx_REG (mode, pushm_info[i].reg1));
1220 F (dwarf_set[n_dwarfs - 1]);
1223 byte_count += bytes;
1226 if (cfun->machine->is_interrupt)
1228 cfun->machine->intr_pushm = reg_mask & 0xfe;
1229 reg_mask = 0;
1230 byte_count = 0;
1233 if (cfun->machine->is_interrupt)
1234 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1235 if (need_to_save (i))
1237 byte_count += 2;
1238 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1241 if (ppt == PP_pushm && byte_count)
1243 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1244 rtx pushm;
1246 if (reg_mask)
1248 XVECEXP (note, 0, 0)
1249 = gen_rtx_SET (stack_pointer_rtx,
1250 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1251 stack_pointer_rtx,
1252 GEN_INT (-byte_count)));
1253 F (XVECEXP (note, 0, 0));
1255 for (i = 0; i < n_dwarfs; i++)
1256 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1258 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1260 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1263 if (cfun->machine->is_interrupt)
1264 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1265 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1267 if (TARGET_A16)
1268 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1269 else
1270 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1271 F (pushm);
1274 if (ppt == PP_popm && byte_count)
1276 if (cfun->machine->is_interrupt)
1277 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1278 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1280 if (TARGET_A16)
1281 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1282 else
1283 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1285 if (reg_mask)
1286 emit_insn (gen_popm (GEN_INT (reg_mask)));
1289 return byte_count;
1292 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1293 diagrams our call frame. */
1295 m32c_initial_elimination_offset (int from, int to)
1297 int ofs = 0;
1299 if (from == AP_REGNO)
1301 if (TARGET_A16)
1302 ofs += 5;
1303 else
1304 ofs += 8;
1307 if (to == SP_REGNO)
1309 ofs += m32c_pushm_popm (PP_justcount);
1310 ofs += get_frame_size ();
1313 /* Account for push rounding. */
1314 if (TARGET_A24)
1315 ofs = (ofs + 1) & ~1;
1316 #if DEBUG0
1317 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1318 to, ofs);
1319 #endif
1320 return ofs;
1323 /* Passing Function Arguments on the Stack */
1325 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1326 M32C has word stacks. */
1327 unsigned int
1328 m32c_push_rounding (int n)
1330 if (TARGET_R8C || TARGET_M16C)
1331 return n;
1332 return (n + 1) & ~1;
1335 /* Passing Arguments in Registers */
1337 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1338 registers, partly on stack. If our function returns a struct, a
1339 pointer to a buffer for it is at the top of the stack (last thing
1340 pushed). The first few real arguments may be in registers as
1341 follows:
1343 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1344 arg2 in r2 if it's HI (else pushed on stack)
1345 rest on stack
1346 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1347 rest on stack
1349 Structs are not passed in registers, even if they fit. Only
1350 integer and pointer types are passed in registers.
1352 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1353 r2 if it fits. */
1354 #undef TARGET_FUNCTION_ARG
1355 #define TARGET_FUNCTION_ARG m32c_function_arg
1356 static rtx
1357 m32c_function_arg (cumulative_args_t ca_v,
1358 machine_mode mode, const_tree type, bool named)
1360 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1362 /* Can return a reg, parallel, or 0 for stack */
1363 rtx rv = NULL_RTX;
1364 #if DEBUG0
1365 fprintf (stderr, "func_arg %d (%s, %d)\n",
1366 ca->parm_num, mode_name[mode], named);
1367 debug_tree ((tree)type);
1368 #endif
1370 if (mode == VOIDmode)
1371 return GEN_INT (0);
1373 if (ca->force_mem || !named)
1375 #if DEBUG0
1376 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1377 named);
1378 #endif
1379 return NULL_RTX;
1382 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1383 return NULL_RTX;
1385 if (type && AGGREGATE_TYPE_P (type))
1386 return NULL_RTX;
1388 switch (ca->parm_num)
1390 case 1:
1391 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1392 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1393 break;
1395 case 2:
1396 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1397 rv = gen_rtx_REG (mode, R2_REGNO);
1398 break;
1401 #if DEBUG0
1402 debug_rtx (rv);
1403 #endif
1404 return rv;
1407 #undef TARGET_PASS_BY_REFERENCE
1408 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1409 static bool
1410 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1411 machine_mode mode ATTRIBUTE_UNUSED,
1412 const_tree type ATTRIBUTE_UNUSED,
1413 bool named ATTRIBUTE_UNUSED)
1415 return 0;
1418 /* Implements INIT_CUMULATIVE_ARGS. */
1419 void
1420 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1421 tree fntype,
1422 rtx libname ATTRIBUTE_UNUSED,
1423 tree fndecl,
1424 int n_named_args ATTRIBUTE_UNUSED)
1426 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1427 ca->force_mem = 1;
1428 else
1429 ca->force_mem = 0;
1430 ca->parm_num = 1;
1433 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1434 functions returning structures, so we always reset that. Otherwise,
1435 we only need to know the sequence number of the argument to know what
1436 to do with it. */
1437 #undef TARGET_FUNCTION_ARG_ADVANCE
1438 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1439 static void
1440 m32c_function_arg_advance (cumulative_args_t ca_v,
1441 machine_mode mode ATTRIBUTE_UNUSED,
1442 const_tree type ATTRIBUTE_UNUSED,
1443 bool named ATTRIBUTE_UNUSED)
1445 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1447 if (ca->force_mem)
1448 ca->force_mem = 0;
1449 else
1450 ca->parm_num++;
1453 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1454 #undef TARGET_FUNCTION_ARG_BOUNDARY
1455 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1456 static unsigned int
1457 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1458 const_tree type ATTRIBUTE_UNUSED)
1460 return (TARGET_A16 ? 8 : 16);
1463 /* Implements FUNCTION_ARG_REGNO_P. */
1465 m32c_function_arg_regno_p (int r)
1467 if (TARGET_A24)
1468 return (r == R0_REGNO);
1469 return (r == R1_REGNO || r == R2_REGNO);
1472 /* HImode and PSImode are the two "native" modes as far as GCC is
1473 concerned, but the chips also support a 32-bit mode which is used
1474 for some opcodes in R8C/M16C and for reset vectors and such. */
1475 #undef TARGET_VALID_POINTER_MODE
1476 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1477 static bool
1478 m32c_valid_pointer_mode (machine_mode mode)
1480 if (mode == HImode
1481 || mode == PSImode
1482 || mode == SImode
1484 return 1;
1485 return 0;
1488 /* How Scalar Function Values Are Returned */
1490 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1491 combination of registers starting there (r2r0 for longs, r3r1r2r0
1492 for long long, r3r2r1r0 for doubles), except that that ABI
1493 currently doesn't work because it ends up using all available
1494 general registers and gcc often can't compile it. So, instead, we
1495 return anything bigger than 16 bits in "mem0" (effectively, a
1496 memory location). */
1498 #undef TARGET_LIBCALL_VALUE
1499 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1501 static rtx
1502 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1504 /* return reg or parallel */
1505 #if 0
1506 /* FIXME: GCC has difficulty returning large values in registers,
1507 because that ties up most of the general registers and gives the
1508 register allocator little to work with. Until we can resolve
1509 this, large values are returned in memory. */
1510 if (mode == DFmode)
1512 rtx rv;
1514 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1515 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1516 gen_rtx_REG (HImode,
1517 R0_REGNO),
1518 GEN_INT (0));
1519 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1520 gen_rtx_REG (HImode,
1521 R1_REGNO),
1522 GEN_INT (2));
1523 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1524 gen_rtx_REG (HImode,
1525 R2_REGNO),
1526 GEN_INT (4));
1527 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1528 gen_rtx_REG (HImode,
1529 R3_REGNO),
1530 GEN_INT (6));
1531 return rv;
1534 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1536 rtx rv;
1538 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1539 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1540 gen_rtx_REG (mode,
1541 R0_REGNO),
1542 GEN_INT (0));
1543 return rv;
1545 #endif
1547 if (GET_MODE_SIZE (mode) > 2)
1548 return gen_rtx_REG (mode, MEM0_REGNO);
1549 return gen_rtx_REG (mode, R0_REGNO);
1552 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1553 conventions. */
1555 #undef TARGET_FUNCTION_VALUE
1556 #define TARGET_FUNCTION_VALUE m32c_function_value
1558 static rtx
1559 m32c_function_value (const_tree valtype,
1560 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1561 bool outgoing ATTRIBUTE_UNUSED)
1563 /* return reg or parallel */
1564 const machine_mode mode = TYPE_MODE (valtype);
1565 return m32c_libcall_value (mode, NULL_RTX);
1568 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1570 #undef TARGET_FUNCTION_VALUE_REGNO_P
1571 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1573 static bool
1574 m32c_function_value_regno_p (const unsigned int regno)
1576 return (regno == R0_REGNO || regno == MEM0_REGNO);
1579 /* How Large Values Are Returned */
1581 /* We return structures by pushing the address on the stack, even if
1582 we use registers for the first few "real" arguments. */
1583 #undef TARGET_STRUCT_VALUE_RTX
1584 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1585 static rtx
1586 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1587 int incoming ATTRIBUTE_UNUSED)
1589 return 0;
1592 /* Function Entry and Exit */
1594 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1596 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1598 if (cfun->machine->is_interrupt)
1599 return 1;
1600 return 0;
1603 /* Implementing the Varargs Macros */
1605 #undef TARGET_STRICT_ARGUMENT_NAMING
1606 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1607 static bool
1608 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1610 return 1;
1613 /* Trampolines for Nested Functions */
1616 m16c:
1617 1 0000 75C43412 mov.w #0x1234,a0
1618 2 0004 FC000000 jmp.a label
1620 m32c:
1621 1 0000 BC563412 mov.l:s #0x123456,a0
1622 2 0004 CC000000 jmp.a label
1625 /* Implements TRAMPOLINE_SIZE. */
1627 m32c_trampoline_size (void)
1629 /* Allocate extra space so we can avoid the messy shifts when we
1630 initialize the trampoline; we just write past the end of the
1631 opcode. */
1632 return TARGET_A16 ? 8 : 10;
1635 /* Implements TRAMPOLINE_ALIGNMENT. */
1637 m32c_trampoline_alignment (void)
1639 return 2;
1642 /* Implements TARGET_TRAMPOLINE_INIT. */
1644 #undef TARGET_TRAMPOLINE_INIT
1645 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1646 static void
1647 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1649 rtx function = XEXP (DECL_RTL (fndecl), 0);
1651 #define A0(m,i) adjust_address (m_tramp, m, i)
1652 if (TARGET_A16)
1654 /* Note: we subtract a "word" because the moves want signed
1655 constants, not unsigned constants. */
1656 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1657 emit_move_insn (A0 (HImode, 2), chainval);
1658 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1659 /* We use 16-bit addresses here, but store the zero to turn it
1660 into a 24-bit offset. */
1661 emit_move_insn (A0 (HImode, 5), function);
1662 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1664 else
1666 /* Note that the PSI moves actually write 4 bytes. Make sure we
1667 write stuff out in the right order, and leave room for the
1668 extra byte at the end. */
1669 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1670 emit_move_insn (A0 (PSImode, 1), chainval);
1671 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1672 emit_move_insn (A0 (PSImode, 5), function);
1674 #undef A0
1677 /* Addressing Modes */
1679 /* The r8c/m32c family supports a wide range of non-orthogonal
1680 addressing modes, including the ability to double-indirect on *some*
1681 of them. Not all insns support all modes, either, but we rely on
1682 predicates and constraints to deal with that. */
1683 #undef TARGET_LEGITIMATE_ADDRESS_P
1684 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1685 bool
1686 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1688 int mode_adjust;
1689 if (CONSTANT_P (x))
1690 return 1;
1692 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1693 return 0;
1694 if (TARGET_A24 && GET_MODE (x) != PSImode)
1695 return 0;
1697 /* Wide references to memory will be split after reload, so we must
1698 ensure that all parts of such splits remain legitimate
1699 addresses. */
1700 mode_adjust = GET_MODE_SIZE (mode) - 1;
1702 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1703 if (GET_CODE (x) == PRE_DEC
1704 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1706 return (GET_CODE (XEXP (x, 0)) == REG
1707 && REGNO (XEXP (x, 0)) == SP_REGNO);
1710 #if 0
1711 /* This is the double indirection detection, but it currently
1712 doesn't work as cleanly as this code implies, so until we've had
1713 a chance to debug it, leave it disabled. */
1714 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1716 #if DEBUG_DOUBLE
1717 fprintf (stderr, "double indirect\n");
1718 #endif
1719 x = XEXP (x, 0);
1721 #endif
1723 encode_pattern (x);
1724 if (RTX_IS ("r"))
1726 /* Most indexable registers can be used without displacements,
1727 although some of them will be emitted with an explicit zero
1728 to please the assembler. */
1729 switch (REGNO (patternr[0]))
1731 case A1_REGNO:
1732 case SB_REGNO:
1733 case FB_REGNO:
1734 case SP_REGNO:
1735 if (TARGET_A16 && GET_MODE (x) == SImode)
1736 return 0;
1737 case A0_REGNO:
1738 return 1;
1740 default:
1741 if (IS_PSEUDO (patternr[0], strict))
1742 return 1;
1743 return 0;
1747 if (TARGET_A16 && GET_MODE (x) == SImode)
1748 return 0;
1750 if (RTX_IS ("+ri"))
1752 /* This is more interesting, because different base registers
1753 allow for different displacements - both range and signedness
1754 - and it differs from chip series to chip series too. */
1755 int rn = REGNO (patternr[1]);
1756 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1757 switch (rn)
1759 case A0_REGNO:
1760 case A1_REGNO:
1761 case SB_REGNO:
1762 /* The syntax only allows positive offsets, but when the
1763 offsets span the entire memory range, we can simulate
1764 negative offsets by wrapping. */
1765 if (TARGET_A16)
1766 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1767 if (rn == SB_REGNO)
1768 return (offs >= 0 && offs <= 65535 - mode_adjust);
1769 /* A0 or A1 */
1770 return (offs >= -16777216 && offs <= 16777215);
1772 case FB_REGNO:
1773 if (TARGET_A16)
1774 return (offs >= -128 && offs <= 127 - mode_adjust);
1775 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1777 case SP_REGNO:
1778 return (offs >= -128 && offs <= 127 - mode_adjust);
1780 default:
1781 if (IS_PSEUDO (patternr[1], strict))
1782 return 1;
1783 return 0;
1786 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1788 rtx reg = patternr[1];
1790 /* We don't know where the symbol is, so only allow base
1791 registers which support displacements spanning the whole
1792 address range. */
1793 switch (REGNO (reg))
1795 case A0_REGNO:
1796 case A1_REGNO:
1797 /* $sb needs a secondary reload, but since it's involved in
1798 memory address reloads too, we don't deal with it very
1799 well. */
1800 /* case SB_REGNO: */
1801 return 1;
1802 default:
1803 if (GET_CODE (reg) == SUBREG)
1804 return 0;
1805 if (IS_PSEUDO (reg, strict))
1806 return 1;
1807 return 0;
1810 return 0;
1813 /* Implements REG_OK_FOR_BASE_P. */
1815 m32c_reg_ok_for_base_p (rtx x, int strict)
1817 if (GET_CODE (x) != REG)
1818 return 0;
1819 switch (REGNO (x))
1821 case A0_REGNO:
1822 case A1_REGNO:
1823 case SB_REGNO:
1824 case FB_REGNO:
1825 case SP_REGNO:
1826 return 1;
1827 default:
1828 if (IS_PSEUDO (x, strict))
1829 return 1;
1830 return 0;
1834 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1835 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1836 like this:
1837 EB 4B FF mova -128[$fb],$a0
1838 D8 0C FF FF mov.w:Q #0,-1[$a0]
1840 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1841 displacements:
1842 7B F4 stc $fb,$a0
1843 77 54 00 01 sub #256,$a0
1844 D8 08 01 mov.w:Q #0,1[$a0]
1846 If we don't offset (i.e. offset by zero), we end up with:
1847 7B F4 stc $fb,$a0
1848 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1850 We have to subtract *something* so that we have a PLUS rtx to mark
1851 that we've done this reload. The -128 offset will never result in
1852 an 8-bit aN offset, and the payoff for the second case is five
1853 loads *if* those loads are within 256 bytes of the other end of the
1854 frame, so the third case seems best. Note that we subtract the
1855 zero, but detect that in the addhi3 pattern. */
1857 #define BIG_FB_ADJ 0
1859 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1860 worry about is frame base offsets, as $fb has a limited
1861 displacement range. We deal with this by attempting to reload $fb
1862 itself into an address register; that seems to result in the best
1863 code. */
1864 #undef TARGET_LEGITIMIZE_ADDRESS
1865 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1866 static rtx
1867 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1868 machine_mode mode)
1870 #if DEBUG0
1871 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1872 debug_rtx (x);
1873 fprintf (stderr, "\n");
1874 #endif
1876 if (GET_CODE (x) == PLUS
1877 && GET_CODE (XEXP (x, 0)) == REG
1878 && REGNO (XEXP (x, 0)) == FB_REGNO
1879 && GET_CODE (XEXP (x, 1)) == CONST_INT
1880 && (INTVAL (XEXP (x, 1)) < -128
1881 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1883 /* reload FB to A_REGS */
1884 rtx temp = gen_reg_rtx (Pmode);
1885 x = copy_rtx (x);
1886 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
1887 XEXP (x, 0) = temp;
1890 return x;
1893 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1895 m32c_legitimize_reload_address (rtx * x,
1896 machine_mode mode,
1897 int opnum,
1898 int type, int ind_levels ATTRIBUTE_UNUSED)
1900 #if DEBUG0
1901 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1902 mode_name[mode]);
1903 debug_rtx (*x);
1904 #endif
1906 /* At one point, this function tried to get $fb copied to an address
1907 register, which in theory would maximize sharing, but gcc was
1908 *also* still trying to reload the whole address, and we'd run out
1909 of address registers. So we let gcc do the naive (but safe)
1910 reload instead, when the above function doesn't handle it for
1913 The code below is a second attempt at the above. */
1915 if (GET_CODE (*x) == PLUS
1916 && GET_CODE (XEXP (*x, 0)) == REG
1917 && REGNO (XEXP (*x, 0)) == FB_REGNO
1918 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1919 && (INTVAL (XEXP (*x, 1)) < -128
1920 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1922 rtx sum;
1923 int offset = INTVAL (XEXP (*x, 1));
1924 int adjustment = -BIG_FB_ADJ;
1926 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1927 GEN_INT (adjustment));
1928 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1929 if (type == RELOAD_OTHER)
1930 type = RELOAD_FOR_OTHER_ADDRESS;
1931 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1932 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1933 (enum reload_type) type);
1934 return 1;
1937 if (GET_CODE (*x) == PLUS
1938 && GET_CODE (XEXP (*x, 0)) == PLUS
1939 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1940 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1941 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1942 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1945 if (type == RELOAD_OTHER)
1946 type = RELOAD_FOR_OTHER_ADDRESS;
1947 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1948 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1949 (enum reload_type) type);
1950 return 1;
1953 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1955 push_reload (*x, NULL_RTX, x, NULL,
1956 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1957 (enum reload_type) type);
1958 return 1;
1961 return 0;
1964 /* Return the appropriate mode for a named address pointer. */
1965 #undef TARGET_ADDR_SPACE_POINTER_MODE
1966 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1967 static machine_mode
1968 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1970 switch (addrspace)
1972 case ADDR_SPACE_GENERIC:
1973 return TARGET_A24 ? PSImode : HImode;
1974 case ADDR_SPACE_FAR:
1975 return SImode;
1976 default:
1977 gcc_unreachable ();
1981 /* Return the appropriate mode for a named address address. */
1982 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1983 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1984 static machine_mode
1985 m32c_addr_space_address_mode (addr_space_t addrspace)
1987 switch (addrspace)
1989 case ADDR_SPACE_GENERIC:
1990 return TARGET_A24 ? PSImode : HImode;
1991 case ADDR_SPACE_FAR:
1992 return SImode;
1993 default:
1994 gcc_unreachable ();
1998 /* Like m32c_legitimate_address_p, except with named addresses. */
1999 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2000 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2001 m32c_addr_space_legitimate_address_p
2002 static bool
2003 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
2004 bool strict, addr_space_t as)
2006 if (as == ADDR_SPACE_FAR)
2008 if (TARGET_A24)
2009 return 0;
2010 encode_pattern (x);
2011 if (RTX_IS ("r"))
2013 if (GET_MODE (x) != SImode)
2014 return 0;
2015 switch (REGNO (patternr[0]))
2017 case A0_REGNO:
2018 return 1;
2020 default:
2021 if (IS_PSEUDO (patternr[0], strict))
2022 return 1;
2023 return 0;
2026 if (RTX_IS ("+^Sri"))
2028 int rn = REGNO (patternr[3]);
2029 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2030 if (GET_MODE (patternr[3]) != HImode)
2031 return 0;
2032 switch (rn)
2034 case A0_REGNO:
2035 return (offs >= 0 && offs <= 0xfffff);
2037 default:
2038 if (IS_PSEUDO (patternr[3], strict))
2039 return 1;
2040 return 0;
2043 if (RTX_IS ("+^Srs"))
2045 int rn = REGNO (patternr[3]);
2046 if (GET_MODE (patternr[3]) != HImode)
2047 return 0;
2048 switch (rn)
2050 case A0_REGNO:
2051 return 1;
2053 default:
2054 if (IS_PSEUDO (patternr[3], strict))
2055 return 1;
2056 return 0;
2059 if (RTX_IS ("+^S+ris"))
2061 int rn = REGNO (patternr[4]);
2062 if (GET_MODE (patternr[4]) != HImode)
2063 return 0;
2064 switch (rn)
2066 case A0_REGNO:
2067 return 1;
2069 default:
2070 if (IS_PSEUDO (patternr[4], strict))
2071 return 1;
2072 return 0;
2075 if (RTX_IS ("s"))
2077 return 1;
2079 return 0;
2082 else if (as != ADDR_SPACE_GENERIC)
2083 gcc_unreachable ();
2085 return m32c_legitimate_address_p (mode, x, strict);
2088 /* Like m32c_legitimate_address, except with named address support. */
2089 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2090 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2091 static rtx
2092 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2093 addr_space_t as)
2095 if (as != ADDR_SPACE_GENERIC)
2097 #if DEBUG0
2098 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2099 debug_rtx (x);
2100 fprintf (stderr, "\n");
2101 #endif
2103 if (GET_CODE (x) != REG)
2105 x = force_reg (SImode, x);
2107 return x;
2110 return m32c_legitimize_address (x, oldx, mode);
2113 /* Determine if one named address space is a subset of another. */
2114 #undef TARGET_ADDR_SPACE_SUBSET_P
2115 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2116 static bool
2117 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2119 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2120 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2122 if (subset == superset)
2123 return true;
2125 else
2126 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2129 #undef TARGET_ADDR_SPACE_CONVERT
2130 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2131 /* Convert from one address space to another. */
2132 static rtx
2133 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2135 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2136 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2137 rtx result;
2139 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2140 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2142 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2144 /* This is unpredictable, as we're truncating off usable address
2145 bits. */
2147 result = gen_reg_rtx (HImode);
2148 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2149 return result;
2151 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2153 /* This always works. */
2154 result = gen_reg_rtx (SImode);
2155 emit_insn (gen_zero_extendhisi2 (result, op));
2156 return result;
2158 else
2159 gcc_unreachable ();
2162 /* Condition Code Status */
2164 #undef TARGET_FIXED_CONDITION_CODE_REGS
2165 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2166 static bool
2167 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2169 *p1 = FLG_REGNO;
2170 *p2 = INVALID_REGNUM;
2171 return true;
2174 /* Describing Relative Costs of Operations */
2176 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2177 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2178 no opcodes to do that). We also discourage use of mem* registers
2179 since they're really memory. */
2181 #undef TARGET_REGISTER_MOVE_COST
2182 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2184 static int
2185 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2186 reg_class_t to)
2188 int cost = COSTS_N_INSNS (3);
2189 HARD_REG_SET cc;
2191 /* FIXME: pick real values, but not 2 for now. */
2192 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2193 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2195 if (mode == QImode
2196 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2198 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2199 cost = COSTS_N_INSNS (1000);
2200 else
2201 cost = COSTS_N_INSNS (80);
2204 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2205 cost = COSTS_N_INSNS (1000);
2207 if (reg_classes_intersect_p (from, CR_REGS))
2208 cost += COSTS_N_INSNS (5);
2210 if (reg_classes_intersect_p (to, CR_REGS))
2211 cost += COSTS_N_INSNS (5);
2213 if (from == MEM_REGS || to == MEM_REGS)
2214 cost += COSTS_N_INSNS (50);
2215 else if (reg_classes_intersect_p (from, MEM_REGS)
2216 || reg_classes_intersect_p (to, MEM_REGS))
2217 cost += COSTS_N_INSNS (10);
2219 #if DEBUG0
2220 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2221 mode_name[mode], class_names[(int) from], class_names[(int) to],
2222 cost);
2223 #endif
2224 return cost;
2227 /* Implements TARGET_MEMORY_MOVE_COST. */
2229 #undef TARGET_MEMORY_MOVE_COST
2230 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2232 static int
2233 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2234 reg_class_t rclass ATTRIBUTE_UNUSED,
2235 bool in ATTRIBUTE_UNUSED)
2237 /* FIXME: pick real values. */
2238 return COSTS_N_INSNS (10);
2241 /* Here we try to describe when we use multiple opcodes for one RTX so
2242 that gcc knows when to use them. */
2243 #undef TARGET_RTX_COSTS
2244 #define TARGET_RTX_COSTS m32c_rtx_costs
2245 static bool
2246 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2247 int *total, bool speed ATTRIBUTE_UNUSED)
2249 switch (code)
2251 case REG:
2252 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2253 *total += COSTS_N_INSNS (500);
2254 else
2255 *total += COSTS_N_INSNS (1);
2256 return true;
2258 case ASHIFT:
2259 case LSHIFTRT:
2260 case ASHIFTRT:
2261 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2263 /* mov.b r1l, r1h */
2264 *total += COSTS_N_INSNS (1);
2265 return true;
2267 if (INTVAL (XEXP (x, 1)) > 8
2268 || INTVAL (XEXP (x, 1)) < -8)
2270 /* mov.b #N, r1l */
2271 /* mov.b r1l, r1h */
2272 *total += COSTS_N_INSNS (2);
2273 return true;
2275 return true;
2277 case LE:
2278 case LEU:
2279 case LT:
2280 case LTU:
2281 case GT:
2282 case GTU:
2283 case GE:
2284 case GEU:
2285 case NE:
2286 case EQ:
2287 if (outer_code == SET)
2289 *total += COSTS_N_INSNS (2);
2290 return true;
2292 break;
2294 case ZERO_EXTRACT:
2296 rtx dest = XEXP (x, 0);
2297 rtx addr = XEXP (dest, 0);
2298 switch (GET_CODE (addr))
2300 case CONST_INT:
2301 *total += COSTS_N_INSNS (1);
2302 break;
2303 case SYMBOL_REF:
2304 *total += COSTS_N_INSNS (3);
2305 break;
2306 default:
2307 *total += COSTS_N_INSNS (2);
2308 break;
2310 return true;
2312 break;
2314 default:
2315 /* Reasonable default. */
2316 if (TARGET_A16 && GET_MODE(x) == SImode)
2317 *total += COSTS_N_INSNS (2);
2318 break;
2320 return false;
2323 #undef TARGET_ADDRESS_COST
2324 #define TARGET_ADDRESS_COST m32c_address_cost
2325 static int
2326 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2327 addr_space_t as ATTRIBUTE_UNUSED,
2328 bool speed ATTRIBUTE_UNUSED)
2330 int i;
2331 /* fprintf(stderr, "\naddress_cost\n");
2332 debug_rtx(addr);*/
2333 switch (GET_CODE (addr))
2335 case CONST_INT:
2336 i = INTVAL (addr);
2337 if (i == 0)
2338 return COSTS_N_INSNS(1);
2339 if (0 < i && i <= 255)
2340 return COSTS_N_INSNS(2);
2341 if (0 < i && i <= 65535)
2342 return COSTS_N_INSNS(3);
2343 return COSTS_N_INSNS(4);
2344 case SYMBOL_REF:
2345 return COSTS_N_INSNS(4);
2346 case REG:
2347 return COSTS_N_INSNS(1);
2348 case PLUS:
2349 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2351 i = INTVAL (XEXP (addr, 1));
2352 if (i == 0)
2353 return COSTS_N_INSNS(1);
2354 if (0 < i && i <= 255)
2355 return COSTS_N_INSNS(2);
2356 if (0 < i && i <= 65535)
2357 return COSTS_N_INSNS(3);
2359 return COSTS_N_INSNS(4);
2360 default:
2361 return 0;
2365 /* Defining the Output Assembler Language */
2367 /* Output of Data */
2369 /* We may have 24 bit sizes, which is the native address size.
2370 Currently unused, but provided for completeness. */
2371 #undef TARGET_ASM_INTEGER
2372 #define TARGET_ASM_INTEGER m32c_asm_integer
2373 static bool
2374 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2376 switch (size)
2378 case 3:
2379 fprintf (asm_out_file, "\t.3byte\t");
2380 output_addr_const (asm_out_file, x);
2381 fputc ('\n', asm_out_file);
2382 return true;
2383 case 4:
2384 if (GET_CODE (x) == SYMBOL_REF)
2386 fprintf (asm_out_file, "\t.long\t");
2387 output_addr_const (asm_out_file, x);
2388 fputc ('\n', asm_out_file);
2389 return true;
2391 break;
2393 return default_assemble_integer (x, size, aligned_p);
2396 /* Output of Assembler Instructions */
2398 /* We use a lookup table because the addressing modes are non-orthogonal. */
2400 static struct
2402 char code;
2403 char const *pattern;
2404 char const *format;
2406 const conversions[] = {
2407 { 0, "r", "0" },
2409 { 0, "mr", "z[1]" },
2410 { 0, "m+ri", "3[2]" },
2411 { 0, "m+rs", "3[2]" },
2412 { 0, "m+^Zrs", "5[4]" },
2413 { 0, "m+^Zri", "5[4]" },
2414 { 0, "m+^Z+ris", "7+6[5]" },
2415 { 0, "m+^Srs", "5[4]" },
2416 { 0, "m+^Sri", "5[4]" },
2417 { 0, "m+^S+ris", "7+6[5]" },
2418 { 0, "m+r+si", "4+5[2]" },
2419 { 0, "ms", "1" },
2420 { 0, "mi", "1" },
2421 { 0, "m+si", "2+3" },
2423 { 0, "mmr", "[z[2]]" },
2424 { 0, "mm+ri", "[4[3]]" },
2425 { 0, "mm+rs", "[4[3]]" },
2426 { 0, "mm+r+si", "[5+6[3]]" },
2427 { 0, "mms", "[[2]]" },
2428 { 0, "mmi", "[[2]]" },
2429 { 0, "mm+si", "[4[3]]" },
2431 { 0, "i", "#0" },
2432 { 0, "s", "#0" },
2433 { 0, "+si", "#1+2" },
2434 { 0, "l", "#0" },
2436 { 'l', "l", "0" },
2437 { 'd', "i", "0" },
2438 { 'd', "s", "0" },
2439 { 'd', "+si", "1+2" },
2440 { 'D', "i", "0" },
2441 { 'D', "s", "0" },
2442 { 'D', "+si", "1+2" },
2443 { 'x', "i", "#0" },
2444 { 'X', "i", "#0" },
2445 { 'm', "i", "#0" },
2446 { 'b', "i", "#0" },
2447 { 'B', "i", "0" },
2448 { 'p', "i", "0" },
2450 { 0, 0, 0 }
2453 /* This is in order according to the bitfield that pushm/popm use. */
2454 static char const *pushm_regs[] = {
2455 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2458 /* Implements TARGET_PRINT_OPERAND. */
2460 #undef TARGET_PRINT_OPERAND
2461 #define TARGET_PRINT_OPERAND m32c_print_operand
2463 static void
2464 m32c_print_operand (FILE * file, rtx x, int code)
2466 int i, j, b;
2467 const char *comma;
2468 HOST_WIDE_INT ival;
2469 int unsigned_const = 0;
2470 int force_sign;
2472 /* Multiplies; constants are converted to sign-extended format but
2473 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2474 need. */
2475 if (code == 'u')
2477 unsigned_const = 2;
2478 code = 0;
2480 if (code == 'U')
2482 unsigned_const = 1;
2483 code = 0;
2485 /* This one is only for debugging; you can put it in a pattern to
2486 force this error. */
2487 if (code == '!')
2489 fprintf (stderr, "dj: unreviewed pattern:");
2490 if (current_output_insn)
2491 debug_rtx (current_output_insn);
2492 gcc_unreachable ();
2494 /* PSImode operations are either .w or .l depending on the target. */
2495 if (code == '&')
2497 if (TARGET_A16)
2498 fprintf (file, "w");
2499 else
2500 fprintf (file, "l");
2501 return;
2503 /* Inverted conditionals. */
2504 if (code == 'C')
2506 switch (GET_CODE (x))
2508 case LE:
2509 fputs ("gt", file);
2510 break;
2511 case LEU:
2512 fputs ("gtu", file);
2513 break;
2514 case LT:
2515 fputs ("ge", file);
2516 break;
2517 case LTU:
2518 fputs ("geu", file);
2519 break;
2520 case GT:
2521 fputs ("le", file);
2522 break;
2523 case GTU:
2524 fputs ("leu", file);
2525 break;
2526 case GE:
2527 fputs ("lt", file);
2528 break;
2529 case GEU:
2530 fputs ("ltu", file);
2531 break;
2532 case NE:
2533 fputs ("eq", file);
2534 break;
2535 case EQ:
2536 fputs ("ne", file);
2537 break;
2538 default:
2539 gcc_unreachable ();
2541 return;
2543 /* Regular conditionals. */
2544 if (code == 'c')
2546 switch (GET_CODE (x))
2548 case LE:
2549 fputs ("le", file);
2550 break;
2551 case LEU:
2552 fputs ("leu", file);
2553 break;
2554 case LT:
2555 fputs ("lt", file);
2556 break;
2557 case LTU:
2558 fputs ("ltu", file);
2559 break;
2560 case GT:
2561 fputs ("gt", file);
2562 break;
2563 case GTU:
2564 fputs ("gtu", file);
2565 break;
2566 case GE:
2567 fputs ("ge", file);
2568 break;
2569 case GEU:
2570 fputs ("geu", file);
2571 break;
2572 case NE:
2573 fputs ("ne", file);
2574 break;
2575 case EQ:
2576 fputs ("eq", file);
2577 break;
2578 default:
2579 gcc_unreachable ();
2581 return;
2583 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2584 operand. */
2585 if (code == 'h' && GET_MODE (x) == SImode)
2587 x = m32c_subreg (HImode, x, SImode, 0);
2588 code = 0;
2590 if (code == 'H' && GET_MODE (x) == SImode)
2592 x = m32c_subreg (HImode, x, SImode, 2);
2593 code = 0;
2595 if (code == 'h' && GET_MODE (x) == HImode)
2597 x = m32c_subreg (QImode, x, HImode, 0);
2598 code = 0;
2600 if (code == 'H' && GET_MODE (x) == HImode)
2602 /* We can't actually represent this as an rtx. Do it here. */
2603 if (GET_CODE (x) == REG)
2605 switch (REGNO (x))
2607 case R0_REGNO:
2608 fputs ("r0h", file);
2609 return;
2610 case R1_REGNO:
2611 fputs ("r1h", file);
2612 return;
2613 default:
2614 gcc_unreachable();
2617 /* This should be a MEM. */
2618 x = m32c_subreg (QImode, x, HImode, 1);
2619 code = 0;
2621 /* This is for BMcond, which always wants word register names. */
2622 if (code == 'h' && GET_MODE (x) == QImode)
2624 if (GET_CODE (x) == REG)
2625 x = gen_rtx_REG (HImode, REGNO (x));
2626 code = 0;
2628 /* 'x' and 'X' need to be ignored for non-immediates. */
2629 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2630 code = 0;
2632 encode_pattern (x);
2633 force_sign = 0;
2634 for (i = 0; conversions[i].pattern; i++)
2635 if (conversions[i].code == code
2636 && streq (conversions[i].pattern, pattern))
2638 for (j = 0; conversions[i].format[j]; j++)
2639 /* backslash quotes the next character in the output pattern. */
2640 if (conversions[i].format[j] == '\\')
2642 fputc (conversions[i].format[j + 1], file);
2643 j++;
2645 /* Digits in the output pattern indicate that the
2646 corresponding RTX is to be output at that point. */
2647 else if (ISDIGIT (conversions[i].format[j]))
2649 rtx r = patternr[conversions[i].format[j] - '0'];
2650 switch (GET_CODE (r))
2652 case REG:
2653 fprintf (file, "%s",
2654 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2655 break;
2656 case CONST_INT:
2657 switch (code)
2659 case 'b':
2660 case 'B':
2662 int v = INTVAL (r);
2663 int i = (int) exact_log2 (v);
2664 if (i == -1)
2665 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2666 if (i == -1)
2667 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2668 /* Bit position. */
2669 fprintf (file, "%d", i);
2671 break;
2672 case 'x':
2673 /* Unsigned byte. */
2674 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2675 INTVAL (r) & 0xff);
2676 break;
2677 case 'X':
2678 /* Unsigned word. */
2679 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2680 INTVAL (r) & 0xffff);
2681 break;
2682 case 'p':
2683 /* pushm and popm encode a register set into a single byte. */
2684 comma = "";
2685 for (b = 7; b >= 0; b--)
2686 if (INTVAL (r) & (1 << b))
2688 fprintf (file, "%s%s", comma, pushm_regs[b]);
2689 comma = ",";
2691 break;
2692 case 'm':
2693 /* "Minus". Output -X */
2694 ival = (-INTVAL (r) & 0xffff);
2695 if (ival & 0x8000)
2696 ival = ival - 0x10000;
2697 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2698 break;
2699 default:
2700 ival = INTVAL (r);
2701 if (conversions[i].format[j + 1] == '[' && ival < 0)
2703 /* We can simulate negative displacements by
2704 taking advantage of address space
2705 wrapping when the offset can span the
2706 entire address range. */
2707 rtx base =
2708 patternr[conversions[i].format[j + 2] - '0'];
2709 if (GET_CODE (base) == REG)
2710 switch (REGNO (base))
2712 case A0_REGNO:
2713 case A1_REGNO:
2714 if (TARGET_A24)
2715 ival = 0x1000000 + ival;
2716 else
2717 ival = 0x10000 + ival;
2718 break;
2719 case SB_REGNO:
2720 if (TARGET_A16)
2721 ival = 0x10000 + ival;
2722 break;
2725 else if (code == 'd' && ival < 0 && j == 0)
2726 /* The "mova" opcode is used to do addition by
2727 computing displacements, but again, we need
2728 displacements to be unsigned *if* they're
2729 the only component of the displacement
2730 (i.e. no "symbol-4" type displacement). */
2731 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2733 if (conversions[i].format[j] == '0')
2735 /* More conversions to unsigned. */
2736 if (unsigned_const == 2)
2737 ival &= 0xffff;
2738 if (unsigned_const == 1)
2739 ival &= 0xff;
2741 if (streq (conversions[i].pattern, "mi")
2742 || streq (conversions[i].pattern, "mmi"))
2744 /* Integers used as addresses are unsigned. */
2745 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2747 if (force_sign && ival >= 0)
2748 fputc ('+', file);
2749 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2750 break;
2752 break;
2753 case CONST_DOUBLE:
2754 /* We don't have const_double constants. If it
2755 happens, make it obvious. */
2756 fprintf (file, "[const_double 0x%lx]",
2757 (unsigned long) CONST_DOUBLE_HIGH (r));
2758 break;
2759 case SYMBOL_REF:
2760 assemble_name (file, XSTR (r, 0));
2761 break;
2762 case LABEL_REF:
2763 output_asm_label (r);
2764 break;
2765 default:
2766 fprintf (stderr, "don't know how to print this operand:");
2767 debug_rtx (r);
2768 gcc_unreachable ();
2771 else
2773 if (conversions[i].format[j] == 'z')
2775 /* Some addressing modes *must* have a displacement,
2776 so insert a zero here if needed. */
2777 int k;
2778 for (k = j + 1; conversions[i].format[k]; k++)
2779 if (ISDIGIT (conversions[i].format[k]))
2781 rtx reg = patternr[conversions[i].format[k] - '0'];
2782 if (GET_CODE (reg) == REG
2783 && (REGNO (reg) == SB_REGNO
2784 || REGNO (reg) == FB_REGNO
2785 || REGNO (reg) == SP_REGNO))
2786 fputc ('0', file);
2788 continue;
2790 /* Signed displacements off symbols need to have signs
2791 blended cleanly. */
2792 if (conversions[i].format[j] == '+'
2793 && (!code || code == 'D' || code == 'd')
2794 && ISDIGIT (conversions[i].format[j + 1])
2795 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2796 == CONST_INT))
2798 force_sign = 1;
2799 continue;
2801 fputc (conversions[i].format[j], file);
2803 break;
2805 if (!conversions[i].pattern)
2807 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2808 pattern);
2809 debug_rtx (x);
2810 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2813 return;
2816 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2818 See m32c_print_operand above for descriptions of what these do. */
2820 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2821 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2823 static bool
2824 m32c_print_operand_punct_valid_p (unsigned char c)
2826 if (c == '&' || c == '!')
2827 return true;
2829 return false;
2832 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2834 #undef TARGET_PRINT_OPERAND_ADDRESS
2835 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2837 static void
2838 m32c_print_operand_address (FILE * stream, rtx address)
2840 if (GET_CODE (address) == MEM)
2841 address = XEXP (address, 0);
2842 else
2843 /* cf: gcc.dg/asm-4.c. */
2844 gcc_assert (GET_CODE (address) == REG);
2846 m32c_print_operand (stream, address, 0);
2849 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2850 differently than general registers. */
2851 void
2852 m32c_output_reg_push (FILE * s, int regno)
2854 if (regno == FLG_REGNO)
2855 fprintf (s, "\tpushc\tflg\n");
2856 else
2857 fprintf (s, "\tpush.%c\t%s\n",
2858 " bwll"[reg_push_size (regno)], reg_names[regno]);
2861 /* Likewise for ASM_OUTPUT_REG_POP. */
2862 void
2863 m32c_output_reg_pop (FILE * s, int regno)
2865 if (regno == FLG_REGNO)
2866 fprintf (s, "\tpopc\tflg\n");
2867 else
2868 fprintf (s, "\tpop.%c\t%s\n",
2869 " bwll"[reg_push_size (regno)], reg_names[regno]);
2872 /* Defining target-specific uses of `__attribute__' */
2874 /* Used to simplify the logic below. Find the attributes wherever
2875 they may be. */
2876 #define M32C_ATTRIBUTES(decl) \
2877 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2878 : DECL_ATTRIBUTES (decl) \
2879 ? (DECL_ATTRIBUTES (decl)) \
2880 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2882 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2883 static int
2884 interrupt_p (tree node ATTRIBUTE_UNUSED)
2886 tree list = M32C_ATTRIBUTES (node);
2887 while (list)
2889 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2890 return 1;
2891 list = TREE_CHAIN (list);
2893 return fast_interrupt_p (node);
2896 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2897 static int
2898 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2900 tree list = M32C_ATTRIBUTES (node);
2901 while (list)
2903 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2904 return 1;
2905 list = TREE_CHAIN (list);
2907 return 0;
2910 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2911 static int
2912 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2914 tree list = M32C_ATTRIBUTES (node);
2915 while (list)
2917 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2918 return 1;
2919 list = TREE_CHAIN (list);
2921 return 0;
2924 static tree
2925 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2926 tree name ATTRIBUTE_UNUSED,
2927 tree args ATTRIBUTE_UNUSED,
2928 int flags ATTRIBUTE_UNUSED,
2929 bool * no_add_attrs ATTRIBUTE_UNUSED)
2931 return NULL_TREE;
2934 /* Returns TRUE if given tree has the "function_vector" attribute. */
2936 m32c_special_page_vector_p (tree func)
2938 tree list;
2940 if (TREE_CODE (func) != FUNCTION_DECL)
2941 return 0;
2943 list = M32C_ATTRIBUTES (func);
2944 while (list)
2946 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2947 return 1;
2948 list = TREE_CHAIN (list);
2950 return 0;
2953 static tree
2954 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2955 tree name ATTRIBUTE_UNUSED,
2956 tree args ATTRIBUTE_UNUSED,
2957 int flags ATTRIBUTE_UNUSED,
2958 bool * no_add_attrs ATTRIBUTE_UNUSED)
2960 if (TARGET_R8C)
2962 /* The attribute is not supported for R8C target. */
2963 warning (OPT_Wattributes,
2964 "%qE attribute is not supported for R8C target",
2965 name);
2966 *no_add_attrs = true;
2968 else if (TREE_CODE (*node) != FUNCTION_DECL)
2970 /* The attribute must be applied to functions only. */
2971 warning (OPT_Wattributes,
2972 "%qE attribute applies only to functions",
2973 name);
2974 *no_add_attrs = true;
2976 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2978 /* The argument must be a constant integer. */
2979 warning (OPT_Wattributes,
2980 "%qE attribute argument not an integer constant",
2981 name);
2982 *no_add_attrs = true;
2984 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2985 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2987 /* The argument value must be between 18 to 255. */
2988 warning (OPT_Wattributes,
2989 "%qE attribute argument should be between 18 to 255",
2990 name);
2991 *no_add_attrs = true;
2993 return NULL_TREE;
2996 /* If the function is assigned the attribute 'function_vector', it
2997 returns the function vector number, otherwise returns zero. */
2999 current_function_special_page_vector (rtx x)
3001 int num;
3003 if ((GET_CODE(x) == SYMBOL_REF)
3004 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
3006 tree list;
3007 tree t = SYMBOL_REF_DECL (x);
3009 if (TREE_CODE (t) != FUNCTION_DECL)
3010 return 0;
3012 list = M32C_ATTRIBUTES (t);
3013 while (list)
3015 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3017 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3018 return num;
3021 list = TREE_CHAIN (list);
3024 return 0;
3026 else
3027 return 0;
3030 #undef TARGET_ATTRIBUTE_TABLE
3031 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3032 static const struct attribute_spec m32c_attribute_table[] = {
3033 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3034 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3035 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3036 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3037 false},
3038 {0, 0, 0, 0, 0, 0, 0, false}
3041 #undef TARGET_COMP_TYPE_ATTRIBUTES
3042 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3043 static int
3044 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3045 const_tree type2 ATTRIBUTE_UNUSED)
3047 /* 0=incompatible 1=compatible 2=warning */
3048 return 1;
3051 #undef TARGET_INSERT_ATTRIBUTES
3052 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3053 static void
3054 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3055 tree * attr_ptr ATTRIBUTE_UNUSED)
3057 unsigned addr;
3058 /* See if we need to make #pragma address variables volatile. */
3060 if (TREE_CODE (node) == VAR_DECL)
3062 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3063 if (m32c_get_pragma_address (name, &addr))
3065 TREE_THIS_VOLATILE (node) = true;
3071 struct pragma_traits : default_hashmap_traits
3073 static hashval_t hash (const char *str) { return htab_hash_string (str); }
3074 static bool
3075 equal_keys (const char *a, const char *b)
3077 return !strcmp (a, b);
3081 /* Hash table of pragma info. */
3082 static GTY(()) hash_map<const char *, unsigned, pragma_traits> *pragma_htab;
3084 void
3085 m32c_note_pragma_address (const char *varname, unsigned address)
3087 if (!pragma_htab)
3088 pragma_htab
3089 = hash_map<const char *, unsigned, pragma_traits>::create_ggc (31);
3091 const char *name = ggc_strdup (varname);
3092 unsigned int *slot = &pragma_htab->get_or_insert (name);
3093 *slot = address;
3096 static bool
3097 m32c_get_pragma_address (const char *varname, unsigned *address)
3099 if (!pragma_htab)
3100 return false;
3102 unsigned int *slot = pragma_htab->get (varname);
3103 if (slot)
3105 *address = *slot;
3106 return true;
3108 return false;
3111 void
3112 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3113 const char *name,
3114 int size, int align, int global)
3116 unsigned address;
3118 if (m32c_get_pragma_address (name, &address))
3120 /* We never output these as global. */
3121 assemble_name (stream, name);
3122 fprintf (stream, " = 0x%04x\n", address);
3123 return;
3125 if (!global)
3127 fprintf (stream, "\t.local\t");
3128 assemble_name (stream, name);
3129 fprintf (stream, "\n");
3131 fprintf (stream, "\t.comm\t");
3132 assemble_name (stream, name);
3133 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3136 /* Predicates */
3138 /* This is a list of legal subregs of hard regs. */
3139 static const struct {
3140 unsigned char outer_mode_size;
3141 unsigned char inner_mode_size;
3142 unsigned char byte_mask;
3143 unsigned char legal_when;
3144 unsigned int regno;
3145 } legal_subregs[] = {
3146 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3147 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3148 {1, 2, 0x01, 1, A0_REGNO},
3149 {1, 2, 0x01, 1, A1_REGNO},
3151 {1, 4, 0x01, 1, A0_REGNO},
3152 {1, 4, 0x01, 1, A1_REGNO},
3154 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3155 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3156 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3157 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3158 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3160 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3163 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3164 support. We also bail on MEMs with illegal addresses. */
3165 bool
3166 m32c_illegal_subreg_p (rtx op)
3168 int offset;
3169 unsigned int i;
3170 machine_mode src_mode, dest_mode;
3172 if (GET_CODE (op) == MEM
3173 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3175 return true;
3178 if (GET_CODE (op) != SUBREG)
3179 return false;
3181 dest_mode = GET_MODE (op);
3182 offset = SUBREG_BYTE (op);
3183 op = SUBREG_REG (op);
3184 src_mode = GET_MODE (op);
3186 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3187 return false;
3188 if (GET_CODE (op) != REG)
3189 return false;
3190 if (REGNO (op) >= MEM0_REGNO)
3191 return false;
3193 offset = (1 << offset);
3195 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3196 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3197 && legal_subregs[i].regno == REGNO (op)
3198 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3199 && legal_subregs[i].byte_mask & offset)
3201 switch (legal_subregs[i].legal_when)
3203 case 1:
3204 return false;
3205 case 16:
3206 if (TARGET_A16)
3207 return false;
3208 break;
3209 case 24:
3210 if (TARGET_A24)
3211 return false;
3212 break;
3215 return true;
3218 /* Returns TRUE if we support a move between the first two operands.
3219 At the moment, we just want to discourage mem to mem moves until
3220 after reload, because reload has a hard time with our limited
3221 number of address registers, and we can get into a situation where
3222 we need three of them when we only have two. */
3223 bool
3224 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3226 rtx op0 = operands[0];
3227 rtx op1 = operands[1];
3229 if (TARGET_A24)
3230 return true;
3232 #define DEBUG_MOV_OK 0
3233 #if DEBUG_MOV_OK
3234 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3235 debug_rtx (op0);
3236 debug_rtx (op1);
3237 #endif
3239 if (GET_CODE (op0) == SUBREG)
3240 op0 = XEXP (op0, 0);
3241 if (GET_CODE (op1) == SUBREG)
3242 op1 = XEXP (op1, 0);
3244 if (GET_CODE (op0) == MEM
3245 && GET_CODE (op1) == MEM
3246 && ! reload_completed)
3248 #if DEBUG_MOV_OK
3249 fprintf (stderr, " - no, mem to mem\n");
3250 #endif
3251 return false;
3254 #if DEBUG_MOV_OK
3255 fprintf (stderr, " - ok\n");
3256 #endif
3257 return true;
3260 /* Returns TRUE if two consecutive HImode mov instructions, generated
3261 for moving an immediate double data to a double data type variable
3262 location, can be combined into single SImode mov instruction. */
3263 bool
3264 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3265 machine_mode mode ATTRIBUTE_UNUSED)
3267 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3268 flags. */
3269 return false;
3272 /* Expanders */
3274 /* Subregs are non-orthogonal for us, because our registers are all
3275 different sizes. */
3276 static rtx
3277 m32c_subreg (machine_mode outer,
3278 rtx x, machine_mode inner, int byte)
3280 int r, nr = -1;
3282 /* Converting MEMs to different types that are the same size, we
3283 just rewrite them. */
3284 if (GET_CODE (x) == SUBREG
3285 && SUBREG_BYTE (x) == 0
3286 && GET_CODE (SUBREG_REG (x)) == MEM
3287 && (GET_MODE_SIZE (GET_MODE (x))
3288 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3290 rtx oldx = x;
3291 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3292 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3295 /* Push/pop get done as smaller push/pops. */
3296 if (GET_CODE (x) == MEM
3297 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3298 || GET_CODE (XEXP (x, 0)) == POST_INC))
3299 return gen_rtx_MEM (outer, XEXP (x, 0));
3300 if (GET_CODE (x) == SUBREG
3301 && GET_CODE (XEXP (x, 0)) == MEM
3302 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3303 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3304 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3306 if (GET_CODE (x) != REG)
3308 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3309 if (GET_CODE (r) == SUBREG
3310 && GET_CODE (x) == MEM
3311 && MEM_VOLATILE_P (x))
3313 /* Volatile MEMs don't get simplified, but we need them to
3314 be. We are little endian, so the subreg byte is the
3315 offset. */
3316 r = adjust_address_nv (x, outer, byte);
3318 return r;
3321 r = REGNO (x);
3322 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3323 return simplify_gen_subreg (outer, x, inner, byte);
3325 if (IS_MEM_REGNO (r))
3326 return simplify_gen_subreg (outer, x, inner, byte);
3328 /* This is where the complexities of our register layout are
3329 described. */
3330 if (byte == 0)
3331 nr = r;
3332 else if (outer == HImode)
3334 if (r == R0_REGNO && byte == 2)
3335 nr = R2_REGNO;
3336 else if (r == R0_REGNO && byte == 4)
3337 nr = R1_REGNO;
3338 else if (r == R0_REGNO && byte == 6)
3339 nr = R3_REGNO;
3340 else if (r == R1_REGNO && byte == 2)
3341 nr = R3_REGNO;
3342 else if (r == A0_REGNO && byte == 2)
3343 nr = A1_REGNO;
3345 else if (outer == SImode)
3347 if (r == R0_REGNO && byte == 0)
3348 nr = R0_REGNO;
3349 else if (r == R0_REGNO && byte == 4)
3350 nr = R1_REGNO;
3352 if (nr == -1)
3354 fprintf (stderr, "m32c_subreg %s %s %d\n",
3355 mode_name[outer], mode_name[inner], byte);
3356 debug_rtx (x);
3357 gcc_unreachable ();
3359 return gen_rtx_REG (outer, nr);
3362 /* Used to emit move instructions. We split some moves,
3363 and avoid mem-mem moves. */
3365 m32c_prepare_move (rtx * operands, machine_mode mode)
3367 if (far_addr_space_p (operands[0])
3368 && CONSTANT_P (operands[1]))
3370 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3372 if (TARGET_A16 && mode == PSImode)
3373 return m32c_split_move (operands, mode, 1);
3374 if ((GET_CODE (operands[0]) == MEM)
3375 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3377 rtx pmv = XEXP (operands[0], 0);
3378 rtx dest_reg = XEXP (pmv, 0);
3379 rtx dest_mod = XEXP (pmv, 1);
3381 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
3382 operands[0] = gen_rtx_MEM (mode, dest_reg);
3384 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3385 operands[1] = copy_to_mode_reg (mode, operands[1]);
3386 return 0;
3389 #define DEBUG_SPLIT 0
3391 /* Returns TRUE if the given PSImode move should be split. We split
3392 for all r8c/m16c moves, since it doesn't support them, and for
3393 POP.L as we can only *push* SImode. */
3395 m32c_split_psi_p (rtx * operands)
3397 #if DEBUG_SPLIT
3398 fprintf (stderr, "\nm32c_split_psi_p\n");
3399 debug_rtx (operands[0]);
3400 debug_rtx (operands[1]);
3401 #endif
3402 if (TARGET_A16)
3404 #if DEBUG_SPLIT
3405 fprintf (stderr, "yes, A16\n");
3406 #endif
3407 return 1;
3409 if (GET_CODE (operands[1]) == MEM
3410 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3412 #if DEBUG_SPLIT
3413 fprintf (stderr, "yes, pop.l\n");
3414 #endif
3415 return 1;
3417 #if DEBUG_SPLIT
3418 fprintf (stderr, "no, default\n");
3419 #endif
3420 return 0;
3423 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3424 (define_expand), 1 if it is not optional (define_insn_and_split),
3425 and 3 for define_split (alternate api). */
3427 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3429 rtx s[4], d[4];
3430 int parts, si, di, rev = 0;
3431 int rv = 0, opi = 2;
3432 machine_mode submode = HImode;
3433 rtx *ops, local_ops[10];
3435 /* define_split modifies the existing operands, but the other two
3436 emit new insns. OPS is where we store the operand pairs, which
3437 we emit later. */
3438 if (split_all == 3)
3439 ops = operands;
3440 else
3441 ops = local_ops;
3443 /* Else HImode. */
3444 if (mode == DImode)
3445 submode = SImode;
3447 /* Before splitting mem-mem moves, force one operand into a
3448 register. */
3449 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3451 #if DEBUG0
3452 fprintf (stderr, "force_reg...\n");
3453 debug_rtx (operands[1]);
3454 #endif
3455 operands[1] = force_reg (mode, operands[1]);
3456 #if DEBUG0
3457 debug_rtx (operands[1]);
3458 #endif
3461 parts = 2;
3463 #if DEBUG_SPLIT
3464 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3465 split_all);
3466 debug_rtx (operands[0]);
3467 debug_rtx (operands[1]);
3468 #endif
3470 /* Note that split_all is not used to select the api after this
3471 point, so it's safe to set it to 3 even with define_insn. */
3472 /* None of the chips can move SI operands to sp-relative addresses,
3473 so we always split those. */
3474 if (satisfies_constraint_Ss (operands[0]))
3475 split_all = 3;
3477 if (TARGET_A16
3478 && (far_addr_space_p (operands[0])
3479 || far_addr_space_p (operands[1])))
3480 split_all |= 1;
3482 /* We don't need to split these. */
3483 if (TARGET_A24
3484 && split_all != 3
3485 && (mode == SImode || mode == PSImode)
3486 && !(GET_CODE (operands[1]) == MEM
3487 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3488 return 0;
3490 /* First, enumerate the subregs we'll be dealing with. */
3491 for (si = 0; si < parts; si++)
3493 d[si] =
3494 m32c_subreg (submode, operands[0], mode,
3495 si * GET_MODE_SIZE (submode));
3496 s[si] =
3497 m32c_subreg (submode, operands[1], mode,
3498 si * GET_MODE_SIZE (submode));
3501 /* Split pushes by emitting a sequence of smaller pushes. */
3502 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3504 for (si = parts - 1; si >= 0; si--)
3506 ops[opi++] = gen_rtx_MEM (submode,
3507 gen_rtx_PRE_DEC (Pmode,
3508 gen_rtx_REG (Pmode,
3509 SP_REGNO)));
3510 ops[opi++] = s[si];
3513 rv = 1;
3515 /* Likewise for pops. */
3516 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3518 for (di = 0; di < parts; di++)
3520 ops[opi++] = d[di];
3521 ops[opi++] = gen_rtx_MEM (submode,
3522 gen_rtx_POST_INC (Pmode,
3523 gen_rtx_REG (Pmode,
3524 SP_REGNO)));
3526 rv = 1;
3528 else if (split_all)
3530 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3531 for (di = 0; di < parts - 1; di++)
3532 for (si = di + 1; si < parts; si++)
3533 if (reg_mentioned_p (d[di], s[si]))
3534 rev = 1;
3536 if (rev)
3537 for (si = 0; si < parts; si++)
3539 ops[opi++] = d[si];
3540 ops[opi++] = s[si];
3542 else
3543 for (si = parts - 1; si >= 0; si--)
3545 ops[opi++] = d[si];
3546 ops[opi++] = s[si];
3548 rv = 1;
3550 /* Now emit any moves we may have accumulated. */
3551 if (rv && split_all != 3)
3553 int i;
3554 for (i = 2; i < opi; i += 2)
3555 emit_move_insn (ops[i], ops[i + 1]);
3557 return rv;
3560 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3561 the like. For the R8C they expect one of the addresses to be in
3562 R1L:An so we need to arrange for that. Otherwise, it's just a
3563 matter of picking out the operands we want and emitting the right
3564 pattern for them. All these expanders, which correspond to
3565 patterns in blkmov.md, must return nonzero if they expand the insn,
3566 or zero if they should FAIL. */
3568 /* This is a memset() opcode. All operands are implied, so we need to
3569 arrange for them to be in the right registers. The opcode wants
3570 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3571 the count (HI), and $2 the value (QI). */
3573 m32c_expand_setmemhi(rtx *operands)
3575 rtx desta, count, val;
3576 rtx desto, counto;
3578 desta = XEXP (operands[0], 0);
3579 count = operands[1];
3580 val = operands[2];
3582 desto = gen_reg_rtx (Pmode);
3583 counto = gen_reg_rtx (HImode);
3585 if (GET_CODE (desta) != REG
3586 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3587 desta = copy_to_mode_reg (Pmode, desta);
3589 /* This looks like an arbitrary restriction, but this is by far the
3590 most common case. For counts 8..14 this actually results in
3591 smaller code with no speed penalty because the half-sized
3592 constant can be loaded with a shorter opcode. */
3593 if (GET_CODE (count) == CONST_INT
3594 && GET_CODE (val) == CONST_INT
3595 && ! (INTVAL (count) & 1)
3596 && (INTVAL (count) > 1)
3597 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3599 unsigned v = INTVAL (val) & 0xff;
3600 v = v | (v << 8);
3601 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3602 val = copy_to_mode_reg (HImode, GEN_INT (v));
3603 if (TARGET_A16)
3604 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3605 else
3606 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3607 return 1;
3610 /* This is the generalized memset() case. */
3611 if (GET_CODE (val) != REG
3612 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3613 val = copy_to_mode_reg (QImode, val);
3615 if (GET_CODE (count) != REG
3616 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3617 count = copy_to_mode_reg (HImode, count);
3619 if (TARGET_A16)
3620 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3621 else
3622 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3624 return 1;
3627 /* This is a memcpy() opcode. All operands are implied, so we need to
3628 arrange for them to be in the right registers. The opcode wants
3629 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3630 is the source (MEM:BLK), and $2 the count (HI). */
3632 m32c_expand_movmemhi(rtx *operands)
3634 rtx desta, srca, count;
3635 rtx desto, srco, counto;
3637 desta = XEXP (operands[0], 0);
3638 srca = XEXP (operands[1], 0);
3639 count = operands[2];
3641 desto = gen_reg_rtx (Pmode);
3642 srco = gen_reg_rtx (Pmode);
3643 counto = gen_reg_rtx (HImode);
3645 if (GET_CODE (desta) != REG
3646 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3647 desta = copy_to_mode_reg (Pmode, desta);
3649 if (GET_CODE (srca) != REG
3650 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3651 srca = copy_to_mode_reg (Pmode, srca);
3653 /* Similar to setmem, but we don't need to check the value. */
3654 if (GET_CODE (count) == CONST_INT
3655 && ! (INTVAL (count) & 1)
3656 && (INTVAL (count) > 1))
3658 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3659 if (TARGET_A16)
3660 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3661 else
3662 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3663 return 1;
3666 /* This is the generalized memset() case. */
3667 if (GET_CODE (count) != REG
3668 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3669 count = copy_to_mode_reg (HImode, count);
3671 if (TARGET_A16)
3672 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3673 else
3674 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3676 return 1;
3679 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3680 the copy, which should point to the NUL at the end of the string,
3681 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3682 Since our opcode leaves the destination pointing *after* the NUL,
3683 we must emit an adjustment. */
3685 m32c_expand_movstr(rtx *operands)
3687 rtx desta, srca;
3688 rtx desto, srco;
3690 desta = XEXP (operands[1], 0);
3691 srca = XEXP (operands[2], 0);
3693 desto = gen_reg_rtx (Pmode);
3694 srco = gen_reg_rtx (Pmode);
3696 if (GET_CODE (desta) != REG
3697 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3698 desta = copy_to_mode_reg (Pmode, desta);
3700 if (GET_CODE (srca) != REG
3701 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3702 srca = copy_to_mode_reg (Pmode, srca);
3704 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3705 /* desto ends up being a1, which allows this type of add through MOVA. */
3706 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3708 return 1;
3711 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3712 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3713 $2 is the other (MEM:BLK). We must do the comparison, and then
3714 convert the flags to a signed integer result. */
3716 m32c_expand_cmpstr(rtx *operands)
3718 rtx src1a, src2a;
3720 src1a = XEXP (operands[1], 0);
3721 src2a = XEXP (operands[2], 0);
3723 if (GET_CODE (src1a) != REG
3724 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3725 src1a = copy_to_mode_reg (Pmode, src1a);
3727 if (GET_CODE (src2a) != REG
3728 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3729 src2a = copy_to_mode_reg (Pmode, src2a);
3731 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3732 emit_insn (gen_cond_to_int (operands[0]));
3734 return 1;
3738 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3740 static shift_gen_func
3741 shift_gen_func_for (int mode, int code)
3743 #define GFF(m,c,f) if (mode == m && code == c) return f
3744 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3745 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3746 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3747 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3748 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3749 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3750 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3751 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3752 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3753 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3754 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3755 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3756 #undef GFF
3757 gcc_unreachable ();
3760 /* The m32c only has one shift, but it takes a signed count. GCC
3761 doesn't want this, so we fake it by negating any shift count when
3762 we're pretending to shift the other way. Also, the shift count is
3763 limited to -8..8. It's slightly better to use two shifts for 9..15
3764 than to load the count into r1h, so we do that too. */
3766 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3768 machine_mode mode = GET_MODE (operands[0]);
3769 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3770 rtx temp;
3772 if (GET_CODE (operands[2]) == CONST_INT)
3774 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3775 int count = INTVAL (operands[2]) * scale;
3777 while (count > maxc)
3779 temp = gen_reg_rtx (mode);
3780 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3781 operands[1] = temp;
3782 count -= maxc;
3784 while (count < -maxc)
3786 temp = gen_reg_rtx (mode);
3787 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3788 operands[1] = temp;
3789 count += maxc;
3791 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3792 return 1;
3795 temp = gen_reg_rtx (QImode);
3796 if (scale < 0)
3797 /* The pattern has a NEG that corresponds to this. */
3798 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3799 else if (TARGET_A16 && mode == SImode)
3800 /* We do this because the code below may modify this, we don't
3801 want to modify the origin of this value. */
3802 emit_move_insn (temp, operands[2]);
3803 else
3804 /* We'll only use it for the shift, no point emitting a move. */
3805 temp = operands[2];
3807 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3809 /* The m16c has a limit of -16..16 for SI shifts, even when the
3810 shift count is in a register. Since there are so many targets
3811 of these shifts, it's better to expand the RTL here than to
3812 call a helper function.
3814 The resulting code looks something like this:
3816 cmp.b r1h,-16
3817 jge.b 1f
3818 shl.l -16,dest
3819 add.b r1h,16
3820 1f: cmp.b r1h,16
3821 jle.b 1f
3822 shl.l 16,dest
3823 sub.b r1h,16
3824 1f: shl.l r1h,dest
3826 We take advantage of the fact that "negative" shifts are
3827 undefined to skip one of the comparisons. */
3829 rtx count;
3830 rtx label, tempvar;
3831 rtx_insn *insn;
3833 emit_move_insn (operands[0], operands[1]);
3835 count = temp;
3836 label = gen_label_rtx ();
3837 LABEL_NUSES (label) ++;
3839 tempvar = gen_reg_rtx (mode);
3841 if (shift_code == ASHIFT)
3843 /* This is a left shift. We only need check positive counts. */
3844 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3845 count, GEN_INT (16), label));
3846 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3847 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3848 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3849 emit_label_after (label, insn);
3851 else
3853 /* This is a right shift. We only need check negative counts. */
3854 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3855 count, GEN_INT (-16), label));
3856 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3857 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3858 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3859 emit_label_after (label, insn);
3861 operands[1] = operands[0];
3862 emit_insn (func (operands[0], operands[0], count));
3863 return 1;
3866 operands[2] = temp;
3867 return 0;
3870 /* The m32c has a limited range of operations that work on PSImode
3871 values; we have to expand to SI, do the math, and truncate back to
3872 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3873 those cases. */
3874 void
3875 m32c_expand_neg_mulpsi3 (rtx * operands)
3877 /* operands: a = b * i */
3878 rtx temp1; /* b as SI */
3879 rtx scale /* i as SI */;
3880 rtx temp2; /* a*b as SI */
3882 temp1 = gen_reg_rtx (SImode);
3883 temp2 = gen_reg_rtx (SImode);
3884 if (GET_CODE (operands[2]) != CONST_INT)
3886 scale = gen_reg_rtx (SImode);
3887 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3889 else
3890 scale = copy_to_mode_reg (SImode, operands[2]);
3892 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3893 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3894 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3897 /* Pattern Output Functions */
3900 m32c_expand_movcc (rtx *operands)
3902 rtx rel = operands[1];
3904 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3905 return 1;
3906 if (GET_CODE (operands[2]) != CONST_INT
3907 || GET_CODE (operands[3]) != CONST_INT)
3908 return 1;
3909 if (GET_CODE (rel) == NE)
3911 rtx tmp = operands[2];
3912 operands[2] = operands[3];
3913 operands[3] = tmp;
3914 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3917 emit_move_insn (operands[0],
3918 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3919 rel,
3920 operands[2],
3921 operands[3]));
3922 return 0;
3925 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3927 m32c_expand_insv (rtx *operands)
3929 rtx op0, src0, p;
3930 int mask;
3932 if (INTVAL (operands[1]) != 1)
3933 return 1;
3935 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3936 if (GET_CODE (operands[3]) != CONST_INT)
3937 return 1;
3938 if (INTVAL (operands[3]) != 0
3939 && INTVAL (operands[3]) != 1
3940 && INTVAL (operands[3]) != -1)
3941 return 1;
3943 mask = 1 << INTVAL (operands[2]);
3945 op0 = operands[0];
3946 if (GET_CODE (op0) == SUBREG
3947 && SUBREG_BYTE (op0) == 0)
3949 rtx sub = SUBREG_REG (op0);
3950 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3951 op0 = sub;
3954 if (!can_create_pseudo_p ()
3955 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3956 src0 = op0;
3957 else
3959 src0 = gen_reg_rtx (GET_MODE (op0));
3960 emit_move_insn (src0, op0);
3963 if (GET_MODE (op0) == HImode
3964 && INTVAL (operands[2]) >= 8
3965 && GET_CODE (op0) == MEM)
3967 /* We are little endian. */
3968 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3969 XEXP (op0, 0), 1));
3970 MEM_COPY_ATTRIBUTES (new_mem, op0);
3971 mask >>= 8;
3974 /* First, we generate a mask with the correct polarity. If we are
3975 storing a zero, we want an AND mask, so invert it. */
3976 if (INTVAL (operands[3]) == 0)
3978 /* Storing a zero, use an AND mask */
3979 if (GET_MODE (op0) == HImode)
3980 mask ^= 0xffff;
3981 else
3982 mask ^= 0xff;
3984 /* Now we need to properly sign-extend the mask in case we need to
3985 fall back to an AND or OR opcode. */
3986 if (GET_MODE (op0) == HImode)
3988 if (mask & 0x8000)
3989 mask -= 0x10000;
3991 else
3993 if (mask & 0x80)
3994 mask -= 0x100;
3997 switch ( (INTVAL (operands[3]) ? 4 : 0)
3998 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3999 + (TARGET_A24 ? 1 : 0))
4001 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4002 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4003 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4004 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4005 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4006 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4007 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4008 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
4009 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
4012 emit_insn (p);
4013 return 0;
4016 const char *
4017 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4019 static char buf[30];
4020 if (GET_CODE (operands[0]) == REG
4021 && REGNO (operands[0]) == R0_REGNO)
4023 if (code == EQ)
4024 return "stzx\t#1,#0,r0l";
4025 if (code == NE)
4026 return "stzx\t#0,#1,r0l";
4028 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4029 return buf;
4032 /* Encode symbol attributes of a SYMBOL_REF into its
4033 SYMBOL_REF_FLAGS. */
4034 static void
4035 m32c_encode_section_info (tree decl, rtx rtl, int first)
4037 int extra_flags = 0;
4039 default_encode_section_info (decl, rtl, first);
4040 if (TREE_CODE (decl) == FUNCTION_DECL
4041 && m32c_special_page_vector_p (decl))
4043 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4045 if (extra_flags)
4046 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4049 /* Returns TRUE if the current function is a leaf, and thus we can
4050 determine which registers an interrupt function really needs to
4051 save. The logic below is mostly about finding the insn sequence
4052 that's the function, versus any sequence that might be open for the
4053 current insn. */
4054 static int
4055 m32c_leaf_function_p (void)
4057 int rv;
4059 push_topmost_sequence ();
4060 rv = leaf_function_p ();
4061 pop_topmost_sequence ();
4062 return rv;
4065 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4066 opcodes. If the function doesn't need the frame base or stack
4067 pointer, it can use the simpler RTS opcode. */
4068 static bool
4069 m32c_function_needs_enter (void)
4071 rtx_insn *insn;
4072 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4073 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4075 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4076 if (NONDEBUG_INSN_P (insn))
4078 if (reg_mentioned_p (sp, insn))
4079 return true;
4080 if (reg_mentioned_p (fb, insn))
4081 return true;
4083 return false;
4086 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4087 frame-related. Return PAR.
4089 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4090 PARALLEL rtx other than the first if they do not have the
4091 FRAME_RELATED flag set on them. So this function is handy for
4092 marking up 'enter' instructions. */
4093 static rtx
4094 m32c_all_frame_related (rtx par)
4096 int len = XVECLEN (par, 0);
4097 int i;
4099 for (i = 0; i < len; i++)
4100 F (XVECEXP (par, 0, i));
4102 return par;
4105 /* Emits the prologue. See the frame layout comment earlier in this
4106 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4107 that we manually update sp. */
4108 void
4109 m32c_emit_prologue (void)
4111 int frame_size, extra_frame_size = 0, reg_save_size;
4112 int complex_prologue = 0;
4114 cfun->machine->is_leaf = m32c_leaf_function_p ();
4115 if (interrupt_p (cfun->decl))
4117 cfun->machine->is_interrupt = 1;
4118 complex_prologue = 1;
4120 else if (bank_switch_p (cfun->decl))
4121 warning (OPT_Wattributes,
4122 "%<bank_switch%> has no effect on non-interrupt functions");
4124 reg_save_size = m32c_pushm_popm (PP_justcount);
4126 if (interrupt_p (cfun->decl))
4128 if (bank_switch_p (cfun->decl))
4129 emit_insn (gen_fset_b ());
4130 else if (cfun->machine->intr_pushm)
4131 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4134 frame_size =
4135 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4136 if (frame_size == 0
4137 && !m32c_function_needs_enter ())
4138 cfun->machine->use_rts = 1;
4140 if (frame_size > 254)
4142 extra_frame_size = frame_size - 254;
4143 frame_size = 254;
4145 if (cfun->machine->use_rts == 0)
4146 F (emit_insn (m32c_all_frame_related
4147 (TARGET_A16
4148 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4149 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4151 if (extra_frame_size)
4153 complex_prologue = 1;
4154 if (TARGET_A16)
4155 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4156 gen_rtx_REG (HImode, SP_REGNO),
4157 GEN_INT (-extra_frame_size))));
4158 else
4159 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4160 gen_rtx_REG (PSImode, SP_REGNO),
4161 GEN_INT (-extra_frame_size))));
4164 complex_prologue += m32c_pushm_popm (PP_pushm);
4166 /* This just emits a comment into the .s file for debugging. */
4167 if (complex_prologue)
4168 emit_insn (gen_prologue_end ());
4171 /* Likewise, for the epilogue. The only exception is that, for
4172 interrupts, we must manually unwind the frame as the REIT opcode
4173 doesn't do that. */
4174 void
4175 m32c_emit_epilogue (void)
4177 int popm_count = m32c_pushm_popm (PP_justcount);
4179 /* This just emits a comment into the .s file for debugging. */
4180 if (popm_count > 0 || cfun->machine->is_interrupt)
4181 emit_insn (gen_epilogue_start ());
4183 if (popm_count > 0)
4184 m32c_pushm_popm (PP_popm);
4186 if (cfun->machine->is_interrupt)
4188 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4190 /* REIT clears B flag and restores $fp for us, but we still
4191 have to fix up the stack. USE_RTS just means we didn't
4192 emit ENTER. */
4193 if (!cfun->machine->use_rts)
4195 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4196 gen_rtx_REG (spmode, FP_REGNO));
4197 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4198 gen_rtx_REG (spmode, A0_REGNO));
4199 /* We can't just add this to the POPM because it would be in
4200 the wrong order, and wouldn't fix the stack if we're bank
4201 switching. */
4202 if (TARGET_A16)
4203 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4204 else
4205 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4207 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4208 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4210 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4211 generated only for M32C/M32CM targets (generate the REIT
4212 instruction otherwise). */
4213 if (fast_interrupt_p (cfun->decl))
4215 /* Check if fast_attribute is set for M32C or M32CM. */
4216 if (TARGET_A24)
4218 emit_jump_insn (gen_epilogue_freit ());
4220 /* If fast_interrupt attribute is set for an R8C or M16C
4221 target ignore this attribute and generated REIT
4222 instruction. */
4223 else
4225 warning (OPT_Wattributes,
4226 "%<fast_interrupt%> attribute directive ignored");
4227 emit_jump_insn (gen_epilogue_reit_16 ());
4230 else if (TARGET_A16)
4231 emit_jump_insn (gen_epilogue_reit_16 ());
4232 else
4233 emit_jump_insn (gen_epilogue_reit_24 ());
4235 else if (cfun->machine->use_rts)
4236 emit_jump_insn (gen_epilogue_rts ());
4237 else if (TARGET_A16)
4238 emit_jump_insn (gen_epilogue_exitd_16 ());
4239 else
4240 emit_jump_insn (gen_epilogue_exitd_24 ());
4243 void
4244 m32c_emit_eh_epilogue (rtx ret_addr)
4246 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4247 return to. We have to fudge the stack, pop everything, pop SP
4248 (fudged), and return (fudged). This is actually easier to do in
4249 assembler, so punt to libgcc. */
4250 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4251 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4254 /* Indicate which flags must be properly set for a given conditional. */
4255 static int
4256 flags_needed_for_conditional (rtx cond)
4258 switch (GET_CODE (cond))
4260 case LE:
4261 case GT:
4262 return FLAGS_OSZ;
4263 case LEU:
4264 case GTU:
4265 return FLAGS_ZC;
4266 case LT:
4267 case GE:
4268 return FLAGS_OS;
4269 case LTU:
4270 case GEU:
4271 return FLAGS_C;
4272 case EQ:
4273 case NE:
4274 return FLAGS_Z;
4275 default:
4276 return FLAGS_N;
4280 #define DEBUG_CMP 0
4282 /* Returns true if a compare insn is redundant because it would only
4283 set flags that are already set correctly. */
4284 static bool
4285 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4287 int flags_needed;
4288 int pflags;
4289 rtx_insn *prev;
4290 rtx pp, next;
4291 rtx op0, op1;
4292 #if DEBUG_CMP
4293 int prev_icode, i;
4294 #endif
4296 op0 = operands[0];
4297 op1 = operands[1];
4299 #if DEBUG_CMP
4300 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4301 debug_rtx(cmp);
4302 for (i=0; i<2; i++)
4304 fprintf(stderr, "operands[%d] = ", i);
4305 debug_rtx(operands[i]);
4307 #endif
4309 next = next_nonnote_insn (cmp);
4310 if (!next || !INSN_P (next))
4312 #if DEBUG_CMP
4313 fprintf(stderr, "compare not followed by insn\n");
4314 debug_rtx(next);
4315 #endif
4316 return false;
4318 if (GET_CODE (PATTERN (next)) == SET
4319 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4321 next = XEXP (XEXP (PATTERN (next), 1), 0);
4323 else if (GET_CODE (PATTERN (next)) == SET)
4325 /* If this is a conditional, flags_needed will be something
4326 other than FLAGS_N, which we test below. */
4327 next = XEXP (PATTERN (next), 1);
4329 else
4331 #if DEBUG_CMP
4332 fprintf(stderr, "compare not followed by conditional\n");
4333 debug_rtx(next);
4334 #endif
4335 return false;
4337 #if DEBUG_CMP
4338 fprintf(stderr, "conditional is: ");
4339 debug_rtx(next);
4340 #endif
4342 flags_needed = flags_needed_for_conditional (next);
4343 if (flags_needed == FLAGS_N)
4345 #if DEBUG_CMP
4346 fprintf(stderr, "compare not followed by conditional\n");
4347 debug_rtx(next);
4348 #endif
4349 return false;
4352 /* Compare doesn't set overflow and carry the same way that
4353 arithmetic instructions do, so we can't replace those. */
4354 if (flags_needed & FLAGS_OC)
4355 return false;
4357 prev = cmp;
4358 do {
4359 prev = prev_nonnote_insn (prev);
4360 if (!prev)
4362 #if DEBUG_CMP
4363 fprintf(stderr, "No previous insn.\n");
4364 #endif
4365 return false;
4367 if (!INSN_P (prev))
4369 #if DEBUG_CMP
4370 fprintf(stderr, "Previous insn is a non-insn.\n");
4371 #endif
4372 return false;
4374 pp = PATTERN (prev);
4375 if (GET_CODE (pp) != SET)
4377 #if DEBUG_CMP
4378 fprintf(stderr, "Previous insn is not a SET.\n");
4379 #endif
4380 return false;
4382 pflags = get_attr_flags (prev);
4384 /* Looking up attributes of previous insns corrupted the recog
4385 tables. */
4386 INSN_UID (cmp) = -1;
4387 recog (PATTERN (cmp), cmp, 0);
4389 if (pflags == FLAGS_N
4390 && reg_mentioned_p (op0, pp))
4392 #if DEBUG_CMP
4393 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4394 debug_rtx(prev);
4395 #endif
4396 return false;
4399 /* Check for comparisons against memory - between volatiles and
4400 aliases, we just can't risk this one. */
4401 if (GET_CODE (operands[0]) == MEM
4402 || GET_CODE (operands[0]) == MEM)
4404 #if DEBUG_CMP
4405 fprintf(stderr, "comparisons with memory:\n");
4406 debug_rtx(prev);
4407 #endif
4408 return false;
4411 /* Check for PREV changing a register that's used to compute a
4412 value in CMP, even if it doesn't otherwise change flags. */
4413 if (GET_CODE (operands[0]) == REG
4414 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4416 #if DEBUG_CMP
4417 fprintf(stderr, "sub-value affected, op0:\n");
4418 debug_rtx(prev);
4419 #endif
4420 return false;
4422 if (GET_CODE (operands[1]) == REG
4423 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4425 #if DEBUG_CMP
4426 fprintf(stderr, "sub-value affected, op1:\n");
4427 debug_rtx(prev);
4428 #endif
4429 return false;
4432 } while (pflags == FLAGS_N);
4433 #if DEBUG_CMP
4434 fprintf(stderr, "previous flag-setting insn:\n");
4435 debug_rtx(prev);
4436 debug_rtx(pp);
4437 #endif
4439 if (GET_CODE (pp) == SET
4440 && GET_CODE (XEXP (pp, 0)) == REG
4441 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4442 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4444 /* Adjacent cbranches must have the same operands to be
4445 redundant. */
4446 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4447 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4448 #if DEBUG_CMP
4449 fprintf(stderr, "adjacent cbranches\n");
4450 debug_rtx(pop0);
4451 debug_rtx(pop1);
4452 #endif
4453 if (rtx_equal_p (op0, pop0)
4454 && rtx_equal_p (op1, pop1))
4455 return true;
4456 #if DEBUG_CMP
4457 fprintf(stderr, "prev cmp not same\n");
4458 #endif
4459 return false;
4462 /* Else the previous insn must be a SET, with either the source or
4463 dest equal to operands[0], and operands[1] must be zero. */
4465 if (!rtx_equal_p (op1, const0_rtx))
4467 #if DEBUG_CMP
4468 fprintf(stderr, "operands[1] not const0_rtx\n");
4469 #endif
4470 return false;
4472 if (GET_CODE (pp) != SET)
4474 #if DEBUG_CMP
4475 fprintf (stderr, "pp not set\n");
4476 #endif
4477 return false;
4479 if (!rtx_equal_p (op0, SET_SRC (pp))
4480 && !rtx_equal_p (op0, SET_DEST (pp)))
4482 #if DEBUG_CMP
4483 fprintf(stderr, "operands[0] not found in set\n");
4484 #endif
4485 return false;
4488 #if DEBUG_CMP
4489 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4490 #endif
4491 if ((pflags & flags_needed) == flags_needed)
4492 return true;
4494 return false;
4497 /* Return the pattern for a compare. This will be commented out if
4498 the compare is redundant, else a normal pattern is returned. Thus,
4499 the assembler output says where the compare would have been. */
4500 char *
4501 m32c_output_compare (rtx_insn *insn, rtx *operands)
4503 static char templ[] = ";cmp.b\t%1,%0";
4504 /* ^ 5 */
4506 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4507 if (m32c_compare_redundant (insn, operands))
4509 #if DEBUG_CMP
4510 fprintf(stderr, "cbranch: cmp not needed\n");
4511 #endif
4512 return templ;
4515 #if DEBUG_CMP
4516 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4517 #endif
4518 return templ + 1;
4521 #undef TARGET_ENCODE_SECTION_INFO
4522 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4524 /* If the frame pointer isn't used, we detect it manually. But the
4525 stack pointer doesn't have as flexible addressing as the frame
4526 pointer, so we always assume we have it. */
4528 #undef TARGET_FRAME_POINTER_REQUIRED
4529 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4531 /* The Global `targetm' Variable. */
4533 struct gcc_target targetm = TARGET_INITIALIZER;
4535 #include "gt-m32c.h"