Rebase.
[official-gcc.git] / gcc / config / m32c / m32c.c
blob69b9c55e534c4cfbcfc089a50b5a5cf690d78578
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "tree.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "except.h"
45 #include "function.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "langhooks.h"
51 #include "pointer-set.h"
52 #include "hash-table.h"
53 #include "vec.h"
54 #include "basic-block.h"
55 #include "tree-ssa-alias.h"
56 #include "internal-fn.h"
57 #include "gimple-fold.h"
58 #include "tree-eh.h"
59 #include "gimple-expr.h"
60 #include "is-a.h"
61 #include "gimple.h"
62 #include "df.h"
63 #include "tm-constrs.h"
64 #include "builtins.h"
66 /* Prototypes */
68 /* Used by m32c_pushm_popm. */
69 typedef enum
71 PP_pushm,
72 PP_popm,
73 PP_justcount
74 } Push_Pop_Type;
76 static bool m32c_function_needs_enter (void);
77 static tree interrupt_handler (tree *, tree, tree, int, bool *);
78 static tree function_vector_handler (tree *, tree, tree, int, bool *);
79 static int interrupt_p (tree node);
80 static int bank_switch_p (tree node);
81 static int fast_interrupt_p (tree node);
82 static int interrupt_p (tree node);
83 static bool m32c_asm_integer (rtx, unsigned int, int);
84 static int m32c_comp_type_attributes (const_tree, const_tree);
85 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
86 static struct machine_function *m32c_init_machine_status (void);
87 static void m32c_insert_attributes (tree, tree *);
88 static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
89 static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
90 static rtx m32c_function_arg (cumulative_args_t, enum machine_mode,
91 const_tree, bool);
92 static bool m32c_pass_by_reference (cumulative_args_t, enum machine_mode,
93 const_tree, bool);
94 static void m32c_function_arg_advance (cumulative_args_t, enum machine_mode,
95 const_tree, bool);
96 static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
97 static int m32c_pushm_popm (Push_Pop_Type);
98 static bool m32c_strict_argument_naming (cumulative_args_t);
99 static rtx m32c_struct_value_rtx (tree, int);
100 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
101 static int need_to_save (int);
102 static rtx m32c_function_value (const_tree, const_tree, bool);
103 static rtx m32c_libcall_value (enum machine_mode, const_rtx);
105 /* Returns true if an address is specified, else false. */
106 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
108 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
110 #define streq(a,b) (strcmp ((a), (b)) == 0)
112 /* Internal support routines */
114 /* Debugging statements are tagged with DEBUG0 only so that they can
115 be easily enabled individually, by replacing the '0' with '1' as
116 needed. */
117 #define DEBUG0 0
118 #define DEBUG1 1
120 #if DEBUG0
121 /* This is needed by some of the commented-out debug statements
122 below. */
123 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
124 #endif
125 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
127 /* These are all to support encode_pattern(). */
128 static char pattern[30], *patternp;
129 static GTY(()) rtx patternr[30];
130 #define RTX_IS(x) (streq (pattern, x))
132 /* Some macros to simplify the logic throughout this file. */
133 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
134 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
136 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
137 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
139 static int
140 far_addr_space_p (rtx x)
142 if (GET_CODE (x) != MEM)
143 return 0;
144 #if DEBUG0
145 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
146 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
147 #endif
148 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
151 /* We do most RTX matching by converting the RTX into a string, and
152 using string compares. This vastly simplifies the logic in many of
153 the functions in this file.
155 On exit, pattern[] has the encoded string (use RTX_IS("...") to
156 compare it) and patternr[] has pointers to the nodes in the RTX
157 corresponding to each character in the encoded string. The latter
158 is mostly used by print_operand().
160 Unrecognized patterns have '?' in them; this shows up when the
161 assembler complains about syntax errors.
164 static void
165 encode_pattern_1 (rtx x)
167 int i;
169 if (patternp == pattern + sizeof (pattern) - 2)
171 patternp[-1] = '?';
172 return;
175 patternr[patternp - pattern] = x;
177 switch (GET_CODE (x))
179 case REG:
180 *patternp++ = 'r';
181 break;
182 case SUBREG:
183 if (GET_MODE_SIZE (GET_MODE (x)) !=
184 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
185 *patternp++ = 'S';
186 encode_pattern_1 (XEXP (x, 0));
187 break;
188 case MEM:
189 *patternp++ = 'm';
190 case CONST:
191 encode_pattern_1 (XEXP (x, 0));
192 break;
193 case SIGN_EXTEND:
194 *patternp++ = '^';
195 *patternp++ = 'S';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
198 case ZERO_EXTEND:
199 *patternp++ = '^';
200 *patternp++ = 'Z';
201 encode_pattern_1 (XEXP (x, 0));
202 break;
203 case PLUS:
204 *patternp++ = '+';
205 encode_pattern_1 (XEXP (x, 0));
206 encode_pattern_1 (XEXP (x, 1));
207 break;
208 case PRE_DEC:
209 *patternp++ = '>';
210 encode_pattern_1 (XEXP (x, 0));
211 break;
212 case POST_INC:
213 *patternp++ = '<';
214 encode_pattern_1 (XEXP (x, 0));
215 break;
216 case LO_SUM:
217 *patternp++ = 'L';
218 encode_pattern_1 (XEXP (x, 0));
219 encode_pattern_1 (XEXP (x, 1));
220 break;
221 case HIGH:
222 *patternp++ = 'H';
223 encode_pattern_1 (XEXP (x, 0));
224 break;
225 case SYMBOL_REF:
226 *patternp++ = 's';
227 break;
228 case LABEL_REF:
229 *patternp++ = 'l';
230 break;
231 case CODE_LABEL:
232 *patternp++ = 'c';
233 break;
234 case CONST_INT:
235 case CONST_DOUBLE:
236 *patternp++ = 'i';
237 break;
238 case UNSPEC:
239 *patternp++ = 'u';
240 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
241 for (i = 0; i < XVECLEN (x, 0); i++)
242 encode_pattern_1 (XVECEXP (x, 0, i));
243 break;
244 case USE:
245 *patternp++ = 'U';
246 break;
247 case PARALLEL:
248 *patternp++ = '|';
249 for (i = 0; i < XVECLEN (x, 0); i++)
250 encode_pattern_1 (XVECEXP (x, 0, i));
251 break;
252 case EXPR_LIST:
253 *patternp++ = 'E';
254 encode_pattern_1 (XEXP (x, 0));
255 if (XEXP (x, 1))
256 encode_pattern_1 (XEXP (x, 1));
257 break;
258 default:
259 *patternp++ = '?';
260 #if DEBUG0
261 fprintf (stderr, "can't encode pattern %s\n",
262 GET_RTX_NAME (GET_CODE (x)));
263 debug_rtx (x);
264 gcc_unreachable ();
265 #endif
266 break;
270 static void
271 encode_pattern (rtx x)
273 patternp = pattern;
274 encode_pattern_1 (x);
275 *patternp = 0;
278 /* Since register names indicate the mode they're used in, we need a
279 way to determine which name to refer to the register with. Called
280 by print_operand(). */
282 static const char *
283 reg_name_with_mode (int regno, enum machine_mode mode)
285 int mlen = GET_MODE_SIZE (mode);
286 if (regno == R0_REGNO && mlen == 1)
287 return "r0l";
288 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
289 return "r2r0";
290 if (regno == R0_REGNO && mlen == 6)
291 return "r2r1r0";
292 if (regno == R0_REGNO && mlen == 8)
293 return "r3r1r2r0";
294 if (regno == R1_REGNO && mlen == 1)
295 return "r1l";
296 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
297 return "r3r1";
298 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
299 return "a1a0";
300 return reg_names[regno];
303 /* How many bytes a register uses on stack when it's pushed. We need
304 to know this because the push opcode needs to explicitly indicate
305 the size of the register, even though the name of the register
306 already tells it that. Used by m32c_output_reg_{push,pop}, which
307 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
309 static int
310 reg_push_size (int regno)
312 switch (regno)
314 case R0_REGNO:
315 case R1_REGNO:
316 return 2;
317 case R2_REGNO:
318 case R3_REGNO:
319 case FLG_REGNO:
320 return 2;
321 case A0_REGNO:
322 case A1_REGNO:
323 case SB_REGNO:
324 case FB_REGNO:
325 case SP_REGNO:
326 if (TARGET_A16)
327 return 2;
328 else
329 return 3;
330 default:
331 gcc_unreachable ();
335 /* Given two register classes, find the largest intersection between
336 them. If there is no intersection, return RETURNED_IF_EMPTY
337 instead. */
338 static reg_class_t
339 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
340 reg_class_t returned_if_empty)
342 HARD_REG_SET cc;
343 int i;
344 reg_class_t best = NO_REGS;
345 unsigned int best_size = 0;
347 if (original_class == limiting_class)
348 return original_class;
350 cc = reg_class_contents[original_class];
351 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
353 for (i = 0; i < LIM_REG_CLASSES; i++)
355 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
356 if (best_size < reg_class_size[i])
358 best = (reg_class_t) i;
359 best_size = reg_class_size[i];
363 if (best == NO_REGS)
364 return returned_if_empty;
365 return best;
368 /* Used by m32c_register_move_cost to determine if a move is
369 impossibly expensive. */
370 static bool
371 class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
373 /* Cache the results: 0=untested 1=no 2=yes */
374 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
376 if (results[(int) rclass][mode] == 0)
378 int r;
379 results[rclass][mode] = 1;
380 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
381 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
382 && HARD_REGNO_MODE_OK (r, mode))
384 results[rclass][mode] = 2;
385 break;
389 #if DEBUG0
390 fprintf (stderr, "class %s can hold %s? %s\n",
391 class_names[(int) rclass], mode_name[mode],
392 (results[rclass][mode] == 2) ? "yes" : "no");
393 #endif
394 return results[(int) rclass][mode] == 2;
397 /* Run-time Target Specification. */
399 /* Memregs are memory locations that gcc treats like general
400 registers, as there are a limited number of true registers and the
401 m32c families can use memory in most places that registers can be
402 used.
404 However, since memory accesses are more expensive than registers,
405 we allow the user to limit the number of memregs available, in
406 order to try to persuade gcc to try harder to use real registers.
408 Memregs are provided by lib1funcs.S.
411 int ok_to_change_target_memregs = TRUE;
413 /* Implements TARGET_OPTION_OVERRIDE. */
415 #undef TARGET_OPTION_OVERRIDE
416 #define TARGET_OPTION_OVERRIDE m32c_option_override
418 static void
419 m32c_option_override (void)
421 /* We limit memregs to 0..16, and provide a default. */
422 if (global_options_set.x_target_memregs)
424 if (target_memregs < 0 || target_memregs > 16)
425 error ("invalid target memregs value '%d'", target_memregs);
427 else
428 target_memregs = 16;
430 if (TARGET_A24)
431 flag_ivopts = 0;
433 /* This target defaults to strict volatile bitfields. */
434 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
435 flag_strict_volatile_bitfields = 1;
437 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
438 This is always worse than an absolute call. */
439 if (TARGET_A16)
440 flag_no_function_cse = 1;
442 /* This wants to put insns between compares and their jumps. */
443 /* FIXME: The right solution is to properly trace the flags register
444 values, but that is too much work for stage 4. */
445 flag_combine_stack_adjustments = 0;
448 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
449 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
451 static void
452 m32c_override_options_after_change (void)
454 if (TARGET_A16)
455 flag_no_function_cse = 1;
458 /* Defining data structures for per-function information */
460 /* The usual; we set up our machine_function data. */
461 static struct machine_function *
462 m32c_init_machine_status (void)
464 return ggc_cleared_alloc<machine_function> ();
467 /* Implements INIT_EXPANDERS. We just set up to call the above
468 function. */
469 void
470 m32c_init_expanders (void)
472 init_machine_status = m32c_init_machine_status;
475 /* Storage Layout */
477 /* Register Basics */
479 /* Basic Characteristics of Registers */
481 /* Whether a mode fits in a register is complex enough to warrant a
482 table. */
483 static struct
485 char qi_regs;
486 char hi_regs;
487 char pi_regs;
488 char si_regs;
489 char di_regs;
490 } nregs_table[FIRST_PSEUDO_REGISTER] =
492 { 1, 1, 2, 2, 4 }, /* r0 */
493 { 0, 1, 0, 0, 0 }, /* r2 */
494 { 1, 1, 2, 2, 0 }, /* r1 */
495 { 0, 1, 0, 0, 0 }, /* r3 */
496 { 0, 1, 1, 0, 0 }, /* a0 */
497 { 0, 1, 1, 0, 0 }, /* a1 */
498 { 0, 1, 1, 0, 0 }, /* sb */
499 { 0, 1, 1, 0, 0 }, /* fb */
500 { 0, 1, 1, 0, 0 }, /* sp */
501 { 1, 1, 1, 0, 0 }, /* pc */
502 { 0, 0, 0, 0, 0 }, /* fl */
503 { 1, 1, 1, 0, 0 }, /* ap */
504 { 1, 1, 2, 2, 4 }, /* mem0 */
505 { 1, 1, 2, 2, 4 }, /* mem1 */
506 { 1, 1, 2, 2, 4 }, /* mem2 */
507 { 1, 1, 2, 2, 4 }, /* mem3 */
508 { 1, 1, 2, 2, 4 }, /* mem4 */
509 { 1, 1, 2, 2, 0 }, /* mem5 */
510 { 1, 1, 2, 2, 0 }, /* mem6 */
511 { 1, 1, 0, 0, 0 }, /* mem7 */
514 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
515 of available memregs, and select which registers need to be preserved
516 across calls based on the chip family. */
518 #undef TARGET_CONDITIONAL_REGISTER_USAGE
519 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
520 void
521 m32c_conditional_register_usage (void)
523 int i;
525 if (0 <= target_memregs && target_memregs <= 16)
527 /* The command line option is bytes, but our "registers" are
528 16-bit words. */
529 for (i = (target_memregs+1)/2; i < 8; i++)
531 fixed_regs[MEM0_REGNO + i] = 1;
532 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
536 /* M32CM and M32C preserve more registers across function calls. */
537 if (TARGET_A24)
539 call_used_regs[R1_REGNO] = 0;
540 call_used_regs[R2_REGNO] = 0;
541 call_used_regs[R3_REGNO] = 0;
542 call_used_regs[A0_REGNO] = 0;
543 call_used_regs[A1_REGNO] = 0;
547 /* How Values Fit in Registers */
549 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
550 different registers are different sizes from each other, *and* may
551 be different sizes in different chip families. */
552 static int
553 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
555 if (regno == FLG_REGNO && mode == CCmode)
556 return 1;
557 if (regno >= FIRST_PSEUDO_REGISTER)
558 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
560 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
561 return (GET_MODE_SIZE (mode) + 1) / 2;
563 if (GET_MODE_SIZE (mode) <= 1)
564 return nregs_table[regno].qi_regs;
565 if (GET_MODE_SIZE (mode) <= 2)
566 return nregs_table[regno].hi_regs;
567 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
568 return 2;
569 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
570 return nregs_table[regno].pi_regs;
571 if (GET_MODE_SIZE (mode) <= 4)
572 return nregs_table[regno].si_regs;
573 if (GET_MODE_SIZE (mode) <= 8)
574 return nregs_table[regno].di_regs;
575 return 0;
579 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
581 int rv = m32c_hard_regno_nregs_1 (regno, mode);
582 return rv ? rv : 1;
585 /* Implements HARD_REGNO_MODE_OK. The above function does the work
586 already; just test its return value. */
588 m32c_hard_regno_ok (int regno, enum machine_mode mode)
590 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
593 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
594 registers are all different sizes. However, since most modes are
595 bigger than our registers anyway, it's easier to implement this
596 function that way, leaving QImode as the only unique case. */
598 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
600 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
601 return 1;
603 #if 0
604 if (m1 == QImode || m2 == QImode)
605 return 0;
606 #endif
608 return 1;
611 /* Register Classes */
613 /* Implements REGNO_REG_CLASS. */
614 enum reg_class
615 m32c_regno_reg_class (int regno)
617 switch (regno)
619 case R0_REGNO:
620 return R0_REGS;
621 case R1_REGNO:
622 return R1_REGS;
623 case R2_REGNO:
624 return R2_REGS;
625 case R3_REGNO:
626 return R3_REGS;
627 case A0_REGNO:
628 return A0_REGS;
629 case A1_REGNO:
630 return A1_REGS;
631 case SB_REGNO:
632 return SB_REGS;
633 case FB_REGNO:
634 return FB_REGS;
635 case SP_REGNO:
636 return SP_REGS;
637 case FLG_REGNO:
638 return FLG_REGS;
639 default:
640 if (IS_MEM_REGNO (regno))
641 return MEM_REGS;
642 return ALL_REGS;
646 /* Implements REGNO_OK_FOR_BASE_P. */
648 m32c_regno_ok_for_base_p (int regno)
650 if (regno == A0_REGNO
651 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
652 return 1;
653 return 0;
656 #define DEBUG_RELOAD 0
658 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
659 registers of the appropriate size. */
661 #undef TARGET_PREFERRED_RELOAD_CLASS
662 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
664 static reg_class_t
665 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
667 reg_class_t newclass = rclass;
669 #if DEBUG_RELOAD
670 fprintf (stderr, "\npreferred_reload_class for %s is ",
671 class_names[rclass]);
672 #endif
673 if (rclass == NO_REGS)
674 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
676 if (reg_classes_intersect_p (rclass, CR_REGS))
678 switch (GET_MODE (x))
680 case QImode:
681 newclass = HL_REGS;
682 break;
683 default:
684 /* newclass = HI_REGS; */
685 break;
689 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
690 newclass = SI_REGS;
691 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
692 && ! reg_class_subset_p (R03_REGS, rclass))
693 newclass = DI_REGS;
695 rclass = reduce_class (rclass, newclass, rclass);
697 if (GET_MODE (x) == QImode)
698 rclass = reduce_class (rclass, HL_REGS, rclass);
700 #if DEBUG_RELOAD
701 fprintf (stderr, "%s\n", class_names[rclass]);
702 debug_rtx (x);
704 if (GET_CODE (x) == MEM
705 && GET_CODE (XEXP (x, 0)) == PLUS
706 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
707 fprintf (stderr, "Glorm!\n");
708 #endif
709 return rclass;
712 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
714 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
715 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
717 static reg_class_t
718 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
720 return m32c_preferred_reload_class (x, rclass);
723 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
724 address registers for reloads since they're needed for address
725 reloads. */
727 m32c_limit_reload_class (enum machine_mode mode, int rclass)
729 #if DEBUG_RELOAD
730 fprintf (stderr, "limit_reload_class for %s: %s ->",
731 mode_name[mode], class_names[rclass]);
732 #endif
734 if (mode == QImode)
735 rclass = reduce_class (rclass, HL_REGS, rclass);
736 else if (mode == HImode)
737 rclass = reduce_class (rclass, HI_REGS, rclass);
738 else if (mode == SImode)
739 rclass = reduce_class (rclass, SI_REGS, rclass);
741 if (rclass != A_REGS)
742 rclass = reduce_class (rclass, DI_REGS, rclass);
744 #if DEBUG_RELOAD
745 fprintf (stderr, " %s\n", class_names[rclass]);
746 #endif
747 return rclass;
750 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
751 r0 or r1, as those are the only real QImode registers. CR regs get
752 reloaded through appropriately sized general or address
753 registers. */
755 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
757 int cc = class_contents[rclass][0];
758 #if DEBUG0
759 fprintf (stderr, "\nsecondary reload class %s %s\n",
760 class_names[rclass], mode_name[mode]);
761 debug_rtx (x);
762 #endif
763 if (mode == QImode
764 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
765 return QI_REGS;
766 if (reg_classes_intersect_p (rclass, CR_REGS)
767 && GET_CODE (x) == REG
768 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
769 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
770 return NO_REGS;
773 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
774 reloads. */
776 #undef TARGET_CLASS_LIKELY_SPILLED_P
777 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
779 static bool
780 m32c_class_likely_spilled_p (reg_class_t regclass)
782 if (regclass == A_REGS)
783 return true;
785 return (reg_class_size[(int) regclass] == 1);
788 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
789 documented meaning, to avoid potential inconsistencies with actual
790 class definitions. */
792 #undef TARGET_CLASS_MAX_NREGS
793 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
795 static unsigned char
796 m32c_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
798 int rn;
799 unsigned char max = 0;
801 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
802 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
804 unsigned char n = m32c_hard_regno_nregs (rn, mode);
805 if (max < n)
806 max = n;
808 return max;
811 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
812 QI (r0l, r1l) because the chip doesn't support QI ops on other
813 registers (well, it does on a0/a1 but if we let gcc do that, reload
814 suffers). Otherwise, we allow changes to larger modes. */
816 m32c_cannot_change_mode_class (enum machine_mode from,
817 enum machine_mode to, int rclass)
819 int rn;
820 #if DEBUG0
821 fprintf (stderr, "cannot change from %s to %s in %s\n",
822 mode_name[from], mode_name[to], class_names[rclass]);
823 #endif
825 /* If the larger mode isn't allowed in any of these registers, we
826 can't allow the change. */
827 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
828 if (class_contents[rclass][0] & (1 << rn))
829 if (! m32c_hard_regno_ok (rn, to))
830 return 1;
832 if (to == QImode)
833 return (class_contents[rclass][0] & 0x1ffa);
835 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
836 && GET_MODE_SIZE (from) > 1)
837 return 0;
838 if (GET_MODE_SIZE (from) > 2) /* all other regs */
839 return 0;
841 return 1;
844 /* Helpers for the rest of the file. */
845 /* TRUE if the rtx is a REG rtx for the given register. */
846 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
847 && REGNO (rtx) == regno)
848 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
849 base register in address calculations (hence the "strict"
850 argument). */
851 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
852 && (REGNO (rtx) == AP_REGNO \
853 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
855 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
857 /* Implements matching for constraints (see next function too). 'S' is
858 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
859 call return values. */
860 bool
861 m32c_matches_constraint_p (rtx value, int constraint)
863 encode_pattern (value);
865 switch (constraint) {
866 case CONSTRAINT_SF:
867 return (far_addr_space_p (value)
868 && ((RTX_IS ("mr")
869 && A0_OR_PSEUDO (patternr[1])
870 && GET_MODE (patternr[1]) == SImode)
871 || (RTX_IS ("m+^Sri")
872 && A0_OR_PSEUDO (patternr[4])
873 && GET_MODE (patternr[4]) == HImode)
874 || (RTX_IS ("m+^Srs")
875 && A0_OR_PSEUDO (patternr[4])
876 && GET_MODE (patternr[4]) == HImode)
877 || (RTX_IS ("m+^S+ris")
878 && A0_OR_PSEUDO (patternr[5])
879 && GET_MODE (patternr[5]) == HImode)
880 || RTX_IS ("ms")));
881 case CONSTRAINT_Sd:
883 /* This is the common "src/dest" address */
884 rtx r;
885 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
886 return true;
887 if (RTX_IS ("ms") || RTX_IS ("m+si"))
888 return true;
889 if (RTX_IS ("m++rii"))
891 if (REGNO (patternr[3]) == FB_REGNO
892 && INTVAL (patternr[4]) == 0)
893 return true;
895 if (RTX_IS ("mr"))
896 r = patternr[1];
897 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
898 r = patternr[2];
899 else
900 return false;
901 if (REGNO (r) == SP_REGNO)
902 return false;
903 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
905 case CONSTRAINT_Sa:
907 rtx r;
908 if (RTX_IS ("mr"))
909 r = patternr[1];
910 else if (RTX_IS ("m+ri"))
911 r = patternr[2];
912 else
913 return false;
914 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
916 case CONSTRAINT_Si:
917 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
918 case CONSTRAINT_Ss:
919 return ((RTX_IS ("mr")
920 && (IS_REG (patternr[1], SP_REGNO)))
921 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
922 case CONSTRAINT_Sf:
923 return ((RTX_IS ("mr")
924 && (IS_REG (patternr[1], FB_REGNO)))
925 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
926 case CONSTRAINT_Sb:
927 return ((RTX_IS ("mr")
928 && (IS_REG (patternr[1], SB_REGNO)))
929 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
930 case CONSTRAINT_Sp:
931 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
932 return (RTX_IS ("mi")
933 && !(INTVAL (patternr[1]) & ~0x1fff));
934 case CONSTRAINT_S1:
935 return r1h_operand (value, QImode);
936 case CONSTRAINT_Rpa:
937 return GET_CODE (value) == PARALLEL;
938 default:
939 return false;
943 /* STACK AND CALLING */
945 /* Frame Layout */
947 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
948 (yes, THREE bytes) onto the stack for the return address, but we
949 don't support pointers bigger than 16 bits on those chips. This
950 will likely wreak havoc with exception unwinding. FIXME. */
952 m32c_return_addr_rtx (int count)
954 enum machine_mode mode;
955 int offset;
956 rtx ra_mem;
958 if (count)
959 return NULL_RTX;
960 /* we want 2[$fb] */
962 if (TARGET_A24)
964 /* It's four bytes */
965 mode = PSImode;
966 offset = 4;
968 else
970 /* FIXME: it's really 3 bytes */
971 mode = HImode;
972 offset = 2;
975 ra_mem =
976 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
977 offset));
978 return copy_to_mode_reg (mode, ra_mem);
981 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
983 m32c_incoming_return_addr_rtx (void)
985 /* we want [sp] */
986 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
989 /* Exception Handling Support */
991 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
992 pointers. */
994 m32c_eh_return_data_regno (int n)
996 switch (n)
998 case 0:
999 return A0_REGNO;
1000 case 1:
1001 if (TARGET_A16)
1002 return R3_REGNO;
1003 else
1004 return R1_REGNO;
1005 default:
1006 return INVALID_REGNUM;
1010 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1011 m32c_emit_eh_epilogue. */
1013 m32c_eh_return_stackadj_rtx (void)
1015 if (!cfun->machine->eh_stack_adjust)
1017 rtx sa;
1019 sa = gen_rtx_REG (Pmode, R0_REGNO);
1020 cfun->machine->eh_stack_adjust = sa;
1022 return cfun->machine->eh_stack_adjust;
1025 /* Registers That Address the Stack Frame */
1027 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1028 the original spec called for dwarf numbers to vary with register
1029 width as well, for example, r0l, r0, and r2r0 would each have
1030 different dwarf numbers. GCC doesn't support this, and we don't do
1031 it, and gdb seems to like it this way anyway. */
1032 unsigned int
1033 m32c_dwarf_frame_regnum (int n)
1035 switch (n)
1037 case R0_REGNO:
1038 return 5;
1039 case R1_REGNO:
1040 return 6;
1041 case R2_REGNO:
1042 return 7;
1043 case R3_REGNO:
1044 return 8;
1045 case A0_REGNO:
1046 return 9;
1047 case A1_REGNO:
1048 return 10;
1049 case FB_REGNO:
1050 return 11;
1051 case SB_REGNO:
1052 return 19;
1054 case SP_REGNO:
1055 return 12;
1056 case PC_REGNO:
1057 return 13;
1058 default:
1059 return DWARF_FRAME_REGISTERS + 1;
1063 /* The frame looks like this:
1065 ap -> +------------------------------
1066 | Return address (3 or 4 bytes)
1067 | Saved FB (2 or 4 bytes)
1068 fb -> +------------------------------
1069 | local vars
1070 | register saves fb
1071 | through r0 as needed
1072 sp -> +------------------------------
1075 /* We use this to wrap all emitted insns in the prologue. */
1076 static rtx
1077 F (rtx x)
1079 RTX_FRAME_RELATED_P (x) = 1;
1080 return x;
1083 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1084 how much the stack pointer moves for each, for each cpu family. */
1085 static struct
1087 int reg1;
1088 int bit;
1089 int a16_bytes;
1090 int a24_bytes;
1091 } pushm_info[] =
1093 /* These are in reverse push (nearest-to-sp) order. */
1094 { R0_REGNO, 0x80, 2, 2 },
1095 { R1_REGNO, 0x40, 2, 2 },
1096 { R2_REGNO, 0x20, 2, 2 },
1097 { R3_REGNO, 0x10, 2, 2 },
1098 { A0_REGNO, 0x08, 2, 4 },
1099 { A1_REGNO, 0x04, 2, 4 },
1100 { SB_REGNO, 0x02, 2, 4 },
1101 { FB_REGNO, 0x01, 2, 4 }
1104 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1106 /* Returns TRUE if we need to save/restore the given register. We
1107 save everything for exception handlers, so that any register can be
1108 unwound. For interrupt handlers, we save everything if the handler
1109 calls something else (because we don't know what *that* function
1110 might do), but try to be a bit smarter if the handler is a leaf
1111 function. We always save $a0, though, because we use that in the
1112 epilogue to copy $fb to $sp. */
1113 static int
1114 need_to_save (int regno)
1116 if (fixed_regs[regno])
1117 return 0;
1118 if (crtl->calls_eh_return)
1119 return 1;
1120 if (regno == FP_REGNO)
1121 return 0;
1122 if (cfun->machine->is_interrupt
1123 && (!cfun->machine->is_leaf
1124 || (regno == A0_REGNO
1125 && m32c_function_needs_enter ())
1127 return 1;
1128 if (df_regs_ever_live_p (regno)
1129 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1130 return 1;
1131 return 0;
1134 /* This function contains all the intelligence about saving and
1135 restoring registers. It always figures out the register save set.
1136 When called with PP_justcount, it merely returns the size of the
1137 save set (for eliminating the frame pointer, for example). When
1138 called with PP_pushm or PP_popm, it emits the appropriate
1139 instructions for saving (pushm) or restoring (popm) the
1140 registers. */
1141 static int
1142 m32c_pushm_popm (Push_Pop_Type ppt)
1144 int reg_mask = 0;
1145 int byte_count = 0, bytes;
1146 int i;
1147 rtx dwarf_set[PUSHM_N];
1148 int n_dwarfs = 0;
1149 int nosave_mask = 0;
1151 if (crtl->return_rtx
1152 && GET_CODE (crtl->return_rtx) == PARALLEL
1153 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1155 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1156 rtx rv = XEXP (exp, 0);
1157 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1159 if (rv_bytes > 2)
1160 nosave_mask |= 0x20; /* PSI, SI */
1161 else
1162 nosave_mask |= 0xf0; /* DF */
1163 if (rv_bytes > 4)
1164 nosave_mask |= 0x50; /* DI */
1167 for (i = 0; i < (int) PUSHM_N; i++)
1169 /* Skip if neither register needs saving. */
1170 if (!need_to_save (pushm_info[i].reg1))
1171 continue;
1173 if (pushm_info[i].bit & nosave_mask)
1174 continue;
1176 reg_mask |= pushm_info[i].bit;
1177 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1179 if (ppt == PP_pushm)
1181 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1182 rtx addr;
1184 /* Always use stack_pointer_rtx instead of calling
1185 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1186 that there is a single rtx representing the stack pointer,
1187 namely stack_pointer_rtx, and uses == to recognize it. */
1188 addr = stack_pointer_rtx;
1190 if (byte_count != 0)
1191 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1193 dwarf_set[n_dwarfs++] =
1194 gen_rtx_SET (VOIDmode,
1195 gen_rtx_MEM (mode, addr),
1196 gen_rtx_REG (mode, pushm_info[i].reg1));
1197 F (dwarf_set[n_dwarfs - 1]);
1200 byte_count += bytes;
1203 if (cfun->machine->is_interrupt)
1205 cfun->machine->intr_pushm = reg_mask & 0xfe;
1206 reg_mask = 0;
1207 byte_count = 0;
1210 if (cfun->machine->is_interrupt)
1211 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1212 if (need_to_save (i))
1214 byte_count += 2;
1215 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1218 if (ppt == PP_pushm && byte_count)
1220 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1221 rtx pushm;
1223 if (reg_mask)
1225 XVECEXP (note, 0, 0)
1226 = gen_rtx_SET (VOIDmode,
1227 stack_pointer_rtx,
1228 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1229 stack_pointer_rtx,
1230 GEN_INT (-byte_count)));
1231 F (XVECEXP (note, 0, 0));
1233 for (i = 0; i < n_dwarfs; i++)
1234 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1236 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1238 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1241 if (cfun->machine->is_interrupt)
1242 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1243 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1245 if (TARGET_A16)
1246 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1247 else
1248 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1249 F (pushm);
1252 if (ppt == PP_popm && byte_count)
1254 if (cfun->machine->is_interrupt)
1255 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1256 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1258 if (TARGET_A16)
1259 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1260 else
1261 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1263 if (reg_mask)
1264 emit_insn (gen_popm (GEN_INT (reg_mask)));
1267 return byte_count;
1270 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1271 diagrams our call frame. */
1273 m32c_initial_elimination_offset (int from, int to)
1275 int ofs = 0;
1277 if (from == AP_REGNO)
1279 if (TARGET_A16)
1280 ofs += 5;
1281 else
1282 ofs += 8;
1285 if (to == SP_REGNO)
1287 ofs += m32c_pushm_popm (PP_justcount);
1288 ofs += get_frame_size ();
1291 /* Account for push rounding. */
1292 if (TARGET_A24)
1293 ofs = (ofs + 1) & ~1;
1294 #if DEBUG0
1295 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1296 to, ofs);
1297 #endif
1298 return ofs;
1301 /* Passing Function Arguments on the Stack */
1303 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1304 M32C has word stacks. */
1305 unsigned int
1306 m32c_push_rounding (int n)
1308 if (TARGET_R8C || TARGET_M16C)
1309 return n;
1310 return (n + 1) & ~1;
1313 /* Passing Arguments in Registers */
1315 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1316 registers, partly on stack. If our function returns a struct, a
1317 pointer to a buffer for it is at the top of the stack (last thing
1318 pushed). The first few real arguments may be in registers as
1319 follows:
1321 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1322 arg2 in r2 if it's HI (else pushed on stack)
1323 rest on stack
1324 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1325 rest on stack
1327 Structs are not passed in registers, even if they fit. Only
1328 integer and pointer types are passed in registers.
1330 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1331 r2 if it fits. */
1332 #undef TARGET_FUNCTION_ARG
1333 #define TARGET_FUNCTION_ARG m32c_function_arg
1334 static rtx
1335 m32c_function_arg (cumulative_args_t ca_v,
1336 enum machine_mode mode, const_tree type, bool named)
1338 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1340 /* Can return a reg, parallel, or 0 for stack */
1341 rtx rv = NULL_RTX;
1342 #if DEBUG0
1343 fprintf (stderr, "func_arg %d (%s, %d)\n",
1344 ca->parm_num, mode_name[mode], named);
1345 debug_tree (type);
1346 #endif
1348 if (mode == VOIDmode)
1349 return GEN_INT (0);
1351 if (ca->force_mem || !named)
1353 #if DEBUG0
1354 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1355 named);
1356 #endif
1357 return NULL_RTX;
1360 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1361 return NULL_RTX;
1363 if (type && AGGREGATE_TYPE_P (type))
1364 return NULL_RTX;
1366 switch (ca->parm_num)
1368 case 1:
1369 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1370 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1371 break;
1373 case 2:
1374 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1375 rv = gen_rtx_REG (mode, R2_REGNO);
1376 break;
1379 #if DEBUG0
1380 debug_rtx (rv);
1381 #endif
1382 return rv;
1385 #undef TARGET_PASS_BY_REFERENCE
1386 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1387 static bool
1388 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1389 enum machine_mode mode ATTRIBUTE_UNUSED,
1390 const_tree type ATTRIBUTE_UNUSED,
1391 bool named ATTRIBUTE_UNUSED)
1393 return 0;
1396 /* Implements INIT_CUMULATIVE_ARGS. */
1397 void
1398 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1399 tree fntype,
1400 rtx libname ATTRIBUTE_UNUSED,
1401 tree fndecl,
1402 int n_named_args ATTRIBUTE_UNUSED)
1404 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1405 ca->force_mem = 1;
1406 else
1407 ca->force_mem = 0;
1408 ca->parm_num = 1;
1411 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1412 functions returning structures, so we always reset that. Otherwise,
1413 we only need to know the sequence number of the argument to know what
1414 to do with it. */
1415 #undef TARGET_FUNCTION_ARG_ADVANCE
1416 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1417 static void
1418 m32c_function_arg_advance (cumulative_args_t ca_v,
1419 enum machine_mode mode ATTRIBUTE_UNUSED,
1420 const_tree type ATTRIBUTE_UNUSED,
1421 bool named ATTRIBUTE_UNUSED)
1423 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1425 if (ca->force_mem)
1426 ca->force_mem = 0;
1427 else
1428 ca->parm_num++;
1431 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1432 #undef TARGET_FUNCTION_ARG_BOUNDARY
1433 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1434 static unsigned int
1435 m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1436 const_tree type ATTRIBUTE_UNUSED)
1438 return (TARGET_A16 ? 8 : 16);
1441 /* Implements FUNCTION_ARG_REGNO_P. */
1443 m32c_function_arg_regno_p (int r)
1445 if (TARGET_A24)
1446 return (r == R0_REGNO);
1447 return (r == R1_REGNO || r == R2_REGNO);
1450 /* HImode and PSImode are the two "native" modes as far as GCC is
1451 concerned, but the chips also support a 32-bit mode which is used
1452 for some opcodes in R8C/M16C and for reset vectors and such. */
1453 #undef TARGET_VALID_POINTER_MODE
1454 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1455 static bool
1456 m32c_valid_pointer_mode (enum machine_mode mode)
1458 if (mode == HImode
1459 || mode == PSImode
1460 || mode == SImode
1462 return 1;
1463 return 0;
1466 /* How Scalar Function Values Are Returned */
1468 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1469 combination of registers starting there (r2r0 for longs, r3r1r2r0
1470 for long long, r3r2r1r0 for doubles), except that that ABI
1471 currently doesn't work because it ends up using all available
1472 general registers and gcc often can't compile it. So, instead, we
1473 return anything bigger than 16 bits in "mem0" (effectively, a
1474 memory location). */
1476 #undef TARGET_LIBCALL_VALUE
1477 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1479 static rtx
1480 m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1482 /* return reg or parallel */
1483 #if 0
1484 /* FIXME: GCC has difficulty returning large values in registers,
1485 because that ties up most of the general registers and gives the
1486 register allocator little to work with. Until we can resolve
1487 this, large values are returned in memory. */
1488 if (mode == DFmode)
1490 rtx rv;
1492 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1493 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1494 gen_rtx_REG (HImode,
1495 R0_REGNO),
1496 GEN_INT (0));
1497 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1498 gen_rtx_REG (HImode,
1499 R1_REGNO),
1500 GEN_INT (2));
1501 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1502 gen_rtx_REG (HImode,
1503 R2_REGNO),
1504 GEN_INT (4));
1505 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1506 gen_rtx_REG (HImode,
1507 R3_REGNO),
1508 GEN_INT (6));
1509 return rv;
1512 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1514 rtx rv;
1516 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1517 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1518 gen_rtx_REG (mode,
1519 R0_REGNO),
1520 GEN_INT (0));
1521 return rv;
1523 #endif
1525 if (GET_MODE_SIZE (mode) > 2)
1526 return gen_rtx_REG (mode, MEM0_REGNO);
1527 return gen_rtx_REG (mode, R0_REGNO);
1530 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1531 conventions. */
1533 #undef TARGET_FUNCTION_VALUE
1534 #define TARGET_FUNCTION_VALUE m32c_function_value
1536 static rtx
1537 m32c_function_value (const_tree valtype,
1538 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1539 bool outgoing ATTRIBUTE_UNUSED)
1541 /* return reg or parallel */
1542 const enum machine_mode mode = TYPE_MODE (valtype);
1543 return m32c_libcall_value (mode, NULL_RTX);
1546 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1548 #undef TARGET_FUNCTION_VALUE_REGNO_P
1549 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1551 static bool
1552 m32c_function_value_regno_p (const unsigned int regno)
1554 return (regno == R0_REGNO || regno == MEM0_REGNO);
1557 /* How Large Values Are Returned */
1559 /* We return structures by pushing the address on the stack, even if
1560 we use registers for the first few "real" arguments. */
1561 #undef TARGET_STRUCT_VALUE_RTX
1562 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1563 static rtx
1564 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1565 int incoming ATTRIBUTE_UNUSED)
1567 return 0;
1570 /* Function Entry and Exit */
1572 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1574 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1576 if (cfun->machine->is_interrupt)
1577 return 1;
1578 return 0;
1581 /* Implementing the Varargs Macros */
1583 #undef TARGET_STRICT_ARGUMENT_NAMING
1584 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1585 static bool
1586 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1588 return 1;
1591 /* Trampolines for Nested Functions */
1594 m16c:
1595 1 0000 75C43412 mov.w #0x1234,a0
1596 2 0004 FC000000 jmp.a label
1598 m32c:
1599 1 0000 BC563412 mov.l:s #0x123456,a0
1600 2 0004 CC000000 jmp.a label
1603 /* Implements TRAMPOLINE_SIZE. */
1605 m32c_trampoline_size (void)
1607 /* Allocate extra space so we can avoid the messy shifts when we
1608 initialize the trampoline; we just write past the end of the
1609 opcode. */
1610 return TARGET_A16 ? 8 : 10;
1613 /* Implements TRAMPOLINE_ALIGNMENT. */
1615 m32c_trampoline_alignment (void)
1617 return 2;
1620 /* Implements TARGET_TRAMPOLINE_INIT. */
1622 #undef TARGET_TRAMPOLINE_INIT
1623 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1624 static void
1625 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1627 rtx function = XEXP (DECL_RTL (fndecl), 0);
1629 #define A0(m,i) adjust_address (m_tramp, m, i)
1630 if (TARGET_A16)
1632 /* Note: we subtract a "word" because the moves want signed
1633 constants, not unsigned constants. */
1634 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1635 emit_move_insn (A0 (HImode, 2), chainval);
1636 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1637 /* We use 16-bit addresses here, but store the zero to turn it
1638 into a 24-bit offset. */
1639 emit_move_insn (A0 (HImode, 5), function);
1640 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1642 else
1644 /* Note that the PSI moves actually write 4 bytes. Make sure we
1645 write stuff out in the right order, and leave room for the
1646 extra byte at the end. */
1647 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1648 emit_move_insn (A0 (PSImode, 1), chainval);
1649 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1650 emit_move_insn (A0 (PSImode, 5), function);
1652 #undef A0
1655 /* Addressing Modes */
1657 /* The r8c/m32c family supports a wide range of non-orthogonal
1658 addressing modes, including the ability to double-indirect on *some*
1659 of them. Not all insns support all modes, either, but we rely on
1660 predicates and constraints to deal with that. */
1661 #undef TARGET_LEGITIMATE_ADDRESS_P
1662 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1663 bool
1664 m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1666 int mode_adjust;
1667 if (CONSTANT_P (x))
1668 return 1;
1670 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1671 return 0;
1672 if (TARGET_A24 && GET_MODE (x) != PSImode)
1673 return 0;
1675 /* Wide references to memory will be split after reload, so we must
1676 ensure that all parts of such splits remain legitimate
1677 addresses. */
1678 mode_adjust = GET_MODE_SIZE (mode) - 1;
1680 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1681 if (GET_CODE (x) == PRE_DEC
1682 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1684 return (GET_CODE (XEXP (x, 0)) == REG
1685 && REGNO (XEXP (x, 0)) == SP_REGNO);
1688 #if 0
1689 /* This is the double indirection detection, but it currently
1690 doesn't work as cleanly as this code implies, so until we've had
1691 a chance to debug it, leave it disabled. */
1692 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1694 #if DEBUG_DOUBLE
1695 fprintf (stderr, "double indirect\n");
1696 #endif
1697 x = XEXP (x, 0);
1699 #endif
1701 encode_pattern (x);
1702 if (RTX_IS ("r"))
1704 /* Most indexable registers can be used without displacements,
1705 although some of them will be emitted with an explicit zero
1706 to please the assembler. */
1707 switch (REGNO (patternr[0]))
1709 case A1_REGNO:
1710 case SB_REGNO:
1711 case FB_REGNO:
1712 case SP_REGNO:
1713 if (TARGET_A16 && GET_MODE (x) == SImode)
1714 return 0;
1715 case A0_REGNO:
1716 return 1;
1718 default:
1719 if (IS_PSEUDO (patternr[0], strict))
1720 return 1;
1721 return 0;
1725 if (TARGET_A16 && GET_MODE (x) == SImode)
1726 return 0;
1728 if (RTX_IS ("+ri"))
1730 /* This is more interesting, because different base registers
1731 allow for different displacements - both range and signedness
1732 - and it differs from chip series to chip series too. */
1733 int rn = REGNO (patternr[1]);
1734 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1735 switch (rn)
1737 case A0_REGNO:
1738 case A1_REGNO:
1739 case SB_REGNO:
1740 /* The syntax only allows positive offsets, but when the
1741 offsets span the entire memory range, we can simulate
1742 negative offsets by wrapping. */
1743 if (TARGET_A16)
1744 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1745 if (rn == SB_REGNO)
1746 return (offs >= 0 && offs <= 65535 - mode_adjust);
1747 /* A0 or A1 */
1748 return (offs >= -16777216 && offs <= 16777215);
1750 case FB_REGNO:
1751 if (TARGET_A16)
1752 return (offs >= -128 && offs <= 127 - mode_adjust);
1753 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1755 case SP_REGNO:
1756 return (offs >= -128 && offs <= 127 - mode_adjust);
1758 default:
1759 if (IS_PSEUDO (patternr[1], strict))
1760 return 1;
1761 return 0;
1764 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1766 rtx reg = patternr[1];
1768 /* We don't know where the symbol is, so only allow base
1769 registers which support displacements spanning the whole
1770 address range. */
1771 switch (REGNO (reg))
1773 case A0_REGNO:
1774 case A1_REGNO:
1775 /* $sb needs a secondary reload, but since it's involved in
1776 memory address reloads too, we don't deal with it very
1777 well. */
1778 /* case SB_REGNO: */
1779 return 1;
1780 default:
1781 if (IS_PSEUDO (reg, strict))
1782 return 1;
1783 return 0;
1786 return 0;
1789 /* Implements REG_OK_FOR_BASE_P. */
1791 m32c_reg_ok_for_base_p (rtx x, int strict)
1793 if (GET_CODE (x) != REG)
1794 return 0;
1795 switch (REGNO (x))
1797 case A0_REGNO:
1798 case A1_REGNO:
1799 case SB_REGNO:
1800 case FB_REGNO:
1801 case SP_REGNO:
1802 return 1;
1803 default:
1804 if (IS_PSEUDO (x, strict))
1805 return 1;
1806 return 0;
1810 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1811 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1812 like this:
1813 EB 4B FF mova -128[$fb],$a0
1814 D8 0C FF FF mov.w:Q #0,-1[$a0]
1816 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1817 displacements:
1818 7B F4 stc $fb,$a0
1819 77 54 00 01 sub #256,$a0
1820 D8 08 01 mov.w:Q #0,1[$a0]
1822 If we don't offset (i.e. offset by zero), we end up with:
1823 7B F4 stc $fb,$a0
1824 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1826 We have to subtract *something* so that we have a PLUS rtx to mark
1827 that we've done this reload. The -128 offset will never result in
1828 an 8-bit aN offset, and the payoff for the second case is five
1829 loads *if* those loads are within 256 bytes of the other end of the
1830 frame, so the third case seems best. Note that we subtract the
1831 zero, but detect that in the addhi3 pattern. */
1833 #define BIG_FB_ADJ 0
1835 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1836 worry about is frame base offsets, as $fb has a limited
1837 displacement range. We deal with this by attempting to reload $fb
1838 itself into an address register; that seems to result in the best
1839 code. */
1840 #undef TARGET_LEGITIMIZE_ADDRESS
1841 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1842 static rtx
1843 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1844 enum machine_mode mode)
1846 #if DEBUG0
1847 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1848 debug_rtx (x);
1849 fprintf (stderr, "\n");
1850 #endif
1852 if (GET_CODE (x) == PLUS
1853 && GET_CODE (XEXP (x, 0)) == REG
1854 && REGNO (XEXP (x, 0)) == FB_REGNO
1855 && GET_CODE (XEXP (x, 1)) == CONST_INT
1856 && (INTVAL (XEXP (x, 1)) < -128
1857 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1859 /* reload FB to A_REGS */
1860 rtx temp = gen_reg_rtx (Pmode);
1861 x = copy_rtx (x);
1862 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1863 XEXP (x, 0) = temp;
1866 return x;
1869 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1871 m32c_legitimize_reload_address (rtx * x,
1872 enum machine_mode mode,
1873 int opnum,
1874 int type, int ind_levels ATTRIBUTE_UNUSED)
1876 #if DEBUG0
1877 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1878 mode_name[mode]);
1879 debug_rtx (*x);
1880 #endif
1882 /* At one point, this function tried to get $fb copied to an address
1883 register, which in theory would maximize sharing, but gcc was
1884 *also* still trying to reload the whole address, and we'd run out
1885 of address registers. So we let gcc do the naive (but safe)
1886 reload instead, when the above function doesn't handle it for
1889 The code below is a second attempt at the above. */
1891 if (GET_CODE (*x) == PLUS
1892 && GET_CODE (XEXP (*x, 0)) == REG
1893 && REGNO (XEXP (*x, 0)) == FB_REGNO
1894 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1895 && (INTVAL (XEXP (*x, 1)) < -128
1896 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1898 rtx sum;
1899 int offset = INTVAL (XEXP (*x, 1));
1900 int adjustment = -BIG_FB_ADJ;
1902 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1903 GEN_INT (adjustment));
1904 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1905 if (type == RELOAD_OTHER)
1906 type = RELOAD_FOR_OTHER_ADDRESS;
1907 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1908 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1909 (enum reload_type) type);
1910 return 1;
1913 if (GET_CODE (*x) == PLUS
1914 && GET_CODE (XEXP (*x, 0)) == PLUS
1915 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1916 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1917 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1918 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1921 if (type == RELOAD_OTHER)
1922 type = RELOAD_FOR_OTHER_ADDRESS;
1923 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1924 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1925 (enum reload_type) type);
1926 return 1;
1929 return 0;
1932 /* Return the appropriate mode for a named address pointer. */
1933 #undef TARGET_ADDR_SPACE_POINTER_MODE
1934 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1935 static enum machine_mode
1936 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1938 switch (addrspace)
1940 case ADDR_SPACE_GENERIC:
1941 return TARGET_A24 ? PSImode : HImode;
1942 case ADDR_SPACE_FAR:
1943 return SImode;
1944 default:
1945 gcc_unreachable ();
1949 /* Return the appropriate mode for a named address address. */
1950 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1951 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1952 static enum machine_mode
1953 m32c_addr_space_address_mode (addr_space_t addrspace)
1955 switch (addrspace)
1957 case ADDR_SPACE_GENERIC:
1958 return TARGET_A24 ? PSImode : HImode;
1959 case ADDR_SPACE_FAR:
1960 return SImode;
1961 default:
1962 gcc_unreachable ();
1966 /* Like m32c_legitimate_address_p, except with named addresses. */
1967 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1968 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1969 m32c_addr_space_legitimate_address_p
1970 static bool
1971 m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
1972 bool strict, addr_space_t as)
1974 if (as == ADDR_SPACE_FAR)
1976 if (TARGET_A24)
1977 return 0;
1978 encode_pattern (x);
1979 if (RTX_IS ("r"))
1981 if (GET_MODE (x) != SImode)
1982 return 0;
1983 switch (REGNO (patternr[0]))
1985 case A0_REGNO:
1986 return 1;
1988 default:
1989 if (IS_PSEUDO (patternr[0], strict))
1990 return 1;
1991 return 0;
1994 if (RTX_IS ("+^Sri"))
1996 int rn = REGNO (patternr[3]);
1997 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1998 if (GET_MODE (patternr[3]) != HImode)
1999 return 0;
2000 switch (rn)
2002 case A0_REGNO:
2003 return (offs >= 0 && offs <= 0xfffff);
2005 default:
2006 if (IS_PSEUDO (patternr[3], strict))
2007 return 1;
2008 return 0;
2011 if (RTX_IS ("+^Srs"))
2013 int rn = REGNO (patternr[3]);
2014 if (GET_MODE (patternr[3]) != HImode)
2015 return 0;
2016 switch (rn)
2018 case A0_REGNO:
2019 return 1;
2021 default:
2022 if (IS_PSEUDO (patternr[3], strict))
2023 return 1;
2024 return 0;
2027 if (RTX_IS ("+^S+ris"))
2029 int rn = REGNO (patternr[4]);
2030 if (GET_MODE (patternr[4]) != HImode)
2031 return 0;
2032 switch (rn)
2034 case A0_REGNO:
2035 return 1;
2037 default:
2038 if (IS_PSEUDO (patternr[4], strict))
2039 return 1;
2040 return 0;
2043 if (RTX_IS ("s"))
2045 return 1;
2047 return 0;
2050 else if (as != ADDR_SPACE_GENERIC)
2051 gcc_unreachable ();
2053 return m32c_legitimate_address_p (mode, x, strict);
2056 /* Like m32c_legitimate_address, except with named address support. */
2057 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2058 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2059 static rtx
2060 m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2061 addr_space_t as)
2063 if (as != ADDR_SPACE_GENERIC)
2065 #if DEBUG0
2066 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2067 debug_rtx (x);
2068 fprintf (stderr, "\n");
2069 #endif
2071 if (GET_CODE (x) != REG)
2073 x = force_reg (SImode, x);
2075 return x;
2078 return m32c_legitimize_address (x, oldx, mode);
2081 /* Determine if one named address space is a subset of another. */
2082 #undef TARGET_ADDR_SPACE_SUBSET_P
2083 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2084 static bool
2085 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2087 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2088 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2090 if (subset == superset)
2091 return true;
2093 else
2094 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2097 #undef TARGET_ADDR_SPACE_CONVERT
2098 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2099 /* Convert from one address space to another. */
2100 static rtx
2101 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2103 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2104 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2105 rtx result;
2107 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2108 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2110 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2112 /* This is unpredictable, as we're truncating off usable address
2113 bits. */
2115 result = gen_reg_rtx (HImode);
2116 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2117 return result;
2119 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2121 /* This always works. */
2122 result = gen_reg_rtx (SImode);
2123 emit_insn (gen_zero_extendhisi2 (result, op));
2124 return result;
2126 else
2127 gcc_unreachable ();
2130 /* Condition Code Status */
2132 #undef TARGET_FIXED_CONDITION_CODE_REGS
2133 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2134 static bool
2135 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2137 *p1 = FLG_REGNO;
2138 *p2 = INVALID_REGNUM;
2139 return true;
2142 /* Describing Relative Costs of Operations */
2144 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2145 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2146 no opcodes to do that). We also discourage use of mem* registers
2147 since they're really memory. */
2149 #undef TARGET_REGISTER_MOVE_COST
2150 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2152 static int
2153 m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2154 reg_class_t to)
2156 int cost = COSTS_N_INSNS (3);
2157 HARD_REG_SET cc;
2159 /* FIXME: pick real values, but not 2 for now. */
2160 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2161 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2163 if (mode == QImode
2164 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2166 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2167 cost = COSTS_N_INSNS (1000);
2168 else
2169 cost = COSTS_N_INSNS (80);
2172 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2173 cost = COSTS_N_INSNS (1000);
2175 if (reg_classes_intersect_p (from, CR_REGS))
2176 cost += COSTS_N_INSNS (5);
2178 if (reg_classes_intersect_p (to, CR_REGS))
2179 cost += COSTS_N_INSNS (5);
2181 if (from == MEM_REGS || to == MEM_REGS)
2182 cost += COSTS_N_INSNS (50);
2183 else if (reg_classes_intersect_p (from, MEM_REGS)
2184 || reg_classes_intersect_p (to, MEM_REGS))
2185 cost += COSTS_N_INSNS (10);
2187 #if DEBUG0
2188 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2189 mode_name[mode], class_names[(int) from], class_names[(int) to],
2190 cost);
2191 #endif
2192 return cost;
2195 /* Implements TARGET_MEMORY_MOVE_COST. */
2197 #undef TARGET_MEMORY_MOVE_COST
2198 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2200 static int
2201 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2202 reg_class_t rclass ATTRIBUTE_UNUSED,
2203 bool in ATTRIBUTE_UNUSED)
2205 /* FIXME: pick real values. */
2206 return COSTS_N_INSNS (10);
2209 /* Here we try to describe when we use multiple opcodes for one RTX so
2210 that gcc knows when to use them. */
2211 #undef TARGET_RTX_COSTS
2212 #define TARGET_RTX_COSTS m32c_rtx_costs
2213 static bool
2214 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2215 int *total, bool speed ATTRIBUTE_UNUSED)
2217 switch (code)
2219 case REG:
2220 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2221 *total += COSTS_N_INSNS (500);
2222 else
2223 *total += COSTS_N_INSNS (1);
2224 return true;
2226 case ASHIFT:
2227 case LSHIFTRT:
2228 case ASHIFTRT:
2229 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2231 /* mov.b r1l, r1h */
2232 *total += COSTS_N_INSNS (1);
2233 return true;
2235 if (INTVAL (XEXP (x, 1)) > 8
2236 || INTVAL (XEXP (x, 1)) < -8)
2238 /* mov.b #N, r1l */
2239 /* mov.b r1l, r1h */
2240 *total += COSTS_N_INSNS (2);
2241 return true;
2243 return true;
2245 case LE:
2246 case LEU:
2247 case LT:
2248 case LTU:
2249 case GT:
2250 case GTU:
2251 case GE:
2252 case GEU:
2253 case NE:
2254 case EQ:
2255 if (outer_code == SET)
2257 *total += COSTS_N_INSNS (2);
2258 return true;
2260 break;
2262 case ZERO_EXTRACT:
2264 rtx dest = XEXP (x, 0);
2265 rtx addr = XEXP (dest, 0);
2266 switch (GET_CODE (addr))
2268 case CONST_INT:
2269 *total += COSTS_N_INSNS (1);
2270 break;
2271 case SYMBOL_REF:
2272 *total += COSTS_N_INSNS (3);
2273 break;
2274 default:
2275 *total += COSTS_N_INSNS (2);
2276 break;
2278 return true;
2280 break;
2282 default:
2283 /* Reasonable default. */
2284 if (TARGET_A16 && GET_MODE(x) == SImode)
2285 *total += COSTS_N_INSNS (2);
2286 break;
2288 return false;
2291 #undef TARGET_ADDRESS_COST
2292 #define TARGET_ADDRESS_COST m32c_address_cost
2293 static int
2294 m32c_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2295 addr_space_t as ATTRIBUTE_UNUSED,
2296 bool speed ATTRIBUTE_UNUSED)
2298 int i;
2299 /* fprintf(stderr, "\naddress_cost\n");
2300 debug_rtx(addr);*/
2301 switch (GET_CODE (addr))
2303 case CONST_INT:
2304 i = INTVAL (addr);
2305 if (i == 0)
2306 return COSTS_N_INSNS(1);
2307 if (0 < i && i <= 255)
2308 return COSTS_N_INSNS(2);
2309 if (0 < i && i <= 65535)
2310 return COSTS_N_INSNS(3);
2311 return COSTS_N_INSNS(4);
2312 case SYMBOL_REF:
2313 return COSTS_N_INSNS(4);
2314 case REG:
2315 return COSTS_N_INSNS(1);
2316 case PLUS:
2317 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2319 i = INTVAL (XEXP (addr, 1));
2320 if (i == 0)
2321 return COSTS_N_INSNS(1);
2322 if (0 < i && i <= 255)
2323 return COSTS_N_INSNS(2);
2324 if (0 < i && i <= 65535)
2325 return COSTS_N_INSNS(3);
2327 return COSTS_N_INSNS(4);
2328 default:
2329 return 0;
2333 /* Defining the Output Assembler Language */
2335 /* Output of Data */
2337 /* We may have 24 bit sizes, which is the native address size.
2338 Currently unused, but provided for completeness. */
2339 #undef TARGET_ASM_INTEGER
2340 #define TARGET_ASM_INTEGER m32c_asm_integer
2341 static bool
2342 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2344 switch (size)
2346 case 3:
2347 fprintf (asm_out_file, "\t.3byte\t");
2348 output_addr_const (asm_out_file, x);
2349 fputc ('\n', asm_out_file);
2350 return true;
2351 case 4:
2352 if (GET_CODE (x) == SYMBOL_REF)
2354 fprintf (asm_out_file, "\t.long\t");
2355 output_addr_const (asm_out_file, x);
2356 fputc ('\n', asm_out_file);
2357 return true;
2359 break;
2361 return default_assemble_integer (x, size, aligned_p);
2364 /* Output of Assembler Instructions */
2366 /* We use a lookup table because the addressing modes are non-orthogonal. */
2368 static struct
2370 char code;
2371 char const *pattern;
2372 char const *format;
2374 const conversions[] = {
2375 { 0, "r", "0" },
2377 { 0, "mr", "z[1]" },
2378 { 0, "m+ri", "3[2]" },
2379 { 0, "m+rs", "3[2]" },
2380 { 0, "m+^Zrs", "5[4]" },
2381 { 0, "m+^Zri", "5[4]" },
2382 { 0, "m+^Z+ris", "7+6[5]" },
2383 { 0, "m+^Srs", "5[4]" },
2384 { 0, "m+^Sri", "5[4]" },
2385 { 0, "m+^S+ris", "7+6[5]" },
2386 { 0, "m+r+si", "4+5[2]" },
2387 { 0, "ms", "1" },
2388 { 0, "mi", "1" },
2389 { 0, "m+si", "2+3" },
2391 { 0, "mmr", "[z[2]]" },
2392 { 0, "mm+ri", "[4[3]]" },
2393 { 0, "mm+rs", "[4[3]]" },
2394 { 0, "mm+r+si", "[5+6[3]]" },
2395 { 0, "mms", "[[2]]" },
2396 { 0, "mmi", "[[2]]" },
2397 { 0, "mm+si", "[4[3]]" },
2399 { 0, "i", "#0" },
2400 { 0, "s", "#0" },
2401 { 0, "+si", "#1+2" },
2402 { 0, "l", "#0" },
2404 { 'l', "l", "0" },
2405 { 'd', "i", "0" },
2406 { 'd', "s", "0" },
2407 { 'd', "+si", "1+2" },
2408 { 'D', "i", "0" },
2409 { 'D', "s", "0" },
2410 { 'D', "+si", "1+2" },
2411 { 'x', "i", "#0" },
2412 { 'X', "i", "#0" },
2413 { 'm', "i", "#0" },
2414 { 'b', "i", "#0" },
2415 { 'B', "i", "0" },
2416 { 'p', "i", "0" },
2418 { 0, 0, 0 }
2421 /* This is in order according to the bitfield that pushm/popm use. */
2422 static char const *pushm_regs[] = {
2423 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2426 /* Implements TARGET_PRINT_OPERAND. */
2428 #undef TARGET_PRINT_OPERAND
2429 #define TARGET_PRINT_OPERAND m32c_print_operand
2431 static void
2432 m32c_print_operand (FILE * file, rtx x, int code)
2434 int i, j, b;
2435 const char *comma;
2436 HOST_WIDE_INT ival;
2437 int unsigned_const = 0;
2438 int force_sign;
2440 /* Multiplies; constants are converted to sign-extended format but
2441 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2442 need. */
2443 if (code == 'u')
2445 unsigned_const = 2;
2446 code = 0;
2448 if (code == 'U')
2450 unsigned_const = 1;
2451 code = 0;
2453 /* This one is only for debugging; you can put it in a pattern to
2454 force this error. */
2455 if (code == '!')
2457 fprintf (stderr, "dj: unreviewed pattern:");
2458 if (current_output_insn)
2459 debug_rtx (current_output_insn);
2460 gcc_unreachable ();
2462 /* PSImode operations are either .w or .l depending on the target. */
2463 if (code == '&')
2465 if (TARGET_A16)
2466 fprintf (file, "w");
2467 else
2468 fprintf (file, "l");
2469 return;
2471 /* Inverted conditionals. */
2472 if (code == 'C')
2474 switch (GET_CODE (x))
2476 case LE:
2477 fputs ("gt", file);
2478 break;
2479 case LEU:
2480 fputs ("gtu", file);
2481 break;
2482 case LT:
2483 fputs ("ge", file);
2484 break;
2485 case LTU:
2486 fputs ("geu", file);
2487 break;
2488 case GT:
2489 fputs ("le", file);
2490 break;
2491 case GTU:
2492 fputs ("leu", file);
2493 break;
2494 case GE:
2495 fputs ("lt", file);
2496 break;
2497 case GEU:
2498 fputs ("ltu", file);
2499 break;
2500 case NE:
2501 fputs ("eq", file);
2502 break;
2503 case EQ:
2504 fputs ("ne", file);
2505 break;
2506 default:
2507 gcc_unreachable ();
2509 return;
2511 /* Regular conditionals. */
2512 if (code == 'c')
2514 switch (GET_CODE (x))
2516 case LE:
2517 fputs ("le", file);
2518 break;
2519 case LEU:
2520 fputs ("leu", file);
2521 break;
2522 case LT:
2523 fputs ("lt", file);
2524 break;
2525 case LTU:
2526 fputs ("ltu", file);
2527 break;
2528 case GT:
2529 fputs ("gt", file);
2530 break;
2531 case GTU:
2532 fputs ("gtu", file);
2533 break;
2534 case GE:
2535 fputs ("ge", file);
2536 break;
2537 case GEU:
2538 fputs ("geu", file);
2539 break;
2540 case NE:
2541 fputs ("ne", file);
2542 break;
2543 case EQ:
2544 fputs ("eq", file);
2545 break;
2546 default:
2547 gcc_unreachable ();
2549 return;
2551 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2552 operand. */
2553 if (code == 'h' && GET_MODE (x) == SImode)
2555 x = m32c_subreg (HImode, x, SImode, 0);
2556 code = 0;
2558 if (code == 'H' && GET_MODE (x) == SImode)
2560 x = m32c_subreg (HImode, x, SImode, 2);
2561 code = 0;
2563 if (code == 'h' && GET_MODE (x) == HImode)
2565 x = m32c_subreg (QImode, x, HImode, 0);
2566 code = 0;
2568 if (code == 'H' && GET_MODE (x) == HImode)
2570 /* We can't actually represent this as an rtx. Do it here. */
2571 if (GET_CODE (x) == REG)
2573 switch (REGNO (x))
2575 case R0_REGNO:
2576 fputs ("r0h", file);
2577 return;
2578 case R1_REGNO:
2579 fputs ("r1h", file);
2580 return;
2581 default:
2582 gcc_unreachable();
2585 /* This should be a MEM. */
2586 x = m32c_subreg (QImode, x, HImode, 1);
2587 code = 0;
2589 /* This is for BMcond, which always wants word register names. */
2590 if (code == 'h' && GET_MODE (x) == QImode)
2592 if (GET_CODE (x) == REG)
2593 x = gen_rtx_REG (HImode, REGNO (x));
2594 code = 0;
2596 /* 'x' and 'X' need to be ignored for non-immediates. */
2597 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2598 code = 0;
2600 encode_pattern (x);
2601 force_sign = 0;
2602 for (i = 0; conversions[i].pattern; i++)
2603 if (conversions[i].code == code
2604 && streq (conversions[i].pattern, pattern))
2606 for (j = 0; conversions[i].format[j]; j++)
2607 /* backslash quotes the next character in the output pattern. */
2608 if (conversions[i].format[j] == '\\')
2610 fputc (conversions[i].format[j + 1], file);
2611 j++;
2613 /* Digits in the output pattern indicate that the
2614 corresponding RTX is to be output at that point. */
2615 else if (ISDIGIT (conversions[i].format[j]))
2617 rtx r = patternr[conversions[i].format[j] - '0'];
2618 switch (GET_CODE (r))
2620 case REG:
2621 fprintf (file, "%s",
2622 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2623 break;
2624 case CONST_INT:
2625 switch (code)
2627 case 'b':
2628 case 'B':
2630 int v = INTVAL (r);
2631 int i = (int) exact_log2 (v);
2632 if (i == -1)
2633 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2634 if (i == -1)
2635 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2636 /* Bit position. */
2637 fprintf (file, "%d", i);
2639 break;
2640 case 'x':
2641 /* Unsigned byte. */
2642 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2643 INTVAL (r) & 0xff);
2644 break;
2645 case 'X':
2646 /* Unsigned word. */
2647 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2648 INTVAL (r) & 0xffff);
2649 break;
2650 case 'p':
2651 /* pushm and popm encode a register set into a single byte. */
2652 comma = "";
2653 for (b = 7; b >= 0; b--)
2654 if (INTVAL (r) & (1 << b))
2656 fprintf (file, "%s%s", comma, pushm_regs[b]);
2657 comma = ",";
2659 break;
2660 case 'm':
2661 /* "Minus". Output -X */
2662 ival = (-INTVAL (r) & 0xffff);
2663 if (ival & 0x8000)
2664 ival = ival - 0x10000;
2665 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2666 break;
2667 default:
2668 ival = INTVAL (r);
2669 if (conversions[i].format[j + 1] == '[' && ival < 0)
2671 /* We can simulate negative displacements by
2672 taking advantage of address space
2673 wrapping when the offset can span the
2674 entire address range. */
2675 rtx base =
2676 patternr[conversions[i].format[j + 2] - '0'];
2677 if (GET_CODE (base) == REG)
2678 switch (REGNO (base))
2680 case A0_REGNO:
2681 case A1_REGNO:
2682 if (TARGET_A24)
2683 ival = 0x1000000 + ival;
2684 else
2685 ival = 0x10000 + ival;
2686 break;
2687 case SB_REGNO:
2688 if (TARGET_A16)
2689 ival = 0x10000 + ival;
2690 break;
2693 else if (code == 'd' && ival < 0 && j == 0)
2694 /* The "mova" opcode is used to do addition by
2695 computing displacements, but again, we need
2696 displacements to be unsigned *if* they're
2697 the only component of the displacement
2698 (i.e. no "symbol-4" type displacement). */
2699 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2701 if (conversions[i].format[j] == '0')
2703 /* More conversions to unsigned. */
2704 if (unsigned_const == 2)
2705 ival &= 0xffff;
2706 if (unsigned_const == 1)
2707 ival &= 0xff;
2709 if (streq (conversions[i].pattern, "mi")
2710 || streq (conversions[i].pattern, "mmi"))
2712 /* Integers used as addresses are unsigned. */
2713 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2715 if (force_sign && ival >= 0)
2716 fputc ('+', file);
2717 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2718 break;
2720 break;
2721 case CONST_DOUBLE:
2722 /* We don't have const_double constants. If it
2723 happens, make it obvious. */
2724 fprintf (file, "[const_double 0x%lx]",
2725 (unsigned long) CONST_DOUBLE_HIGH (r));
2726 break;
2727 case SYMBOL_REF:
2728 assemble_name (file, XSTR (r, 0));
2729 break;
2730 case LABEL_REF:
2731 output_asm_label (r);
2732 break;
2733 default:
2734 fprintf (stderr, "don't know how to print this operand:");
2735 debug_rtx (r);
2736 gcc_unreachable ();
2739 else
2741 if (conversions[i].format[j] == 'z')
2743 /* Some addressing modes *must* have a displacement,
2744 so insert a zero here if needed. */
2745 int k;
2746 for (k = j + 1; conversions[i].format[k]; k++)
2747 if (ISDIGIT (conversions[i].format[k]))
2749 rtx reg = patternr[conversions[i].format[k] - '0'];
2750 if (GET_CODE (reg) == REG
2751 && (REGNO (reg) == SB_REGNO
2752 || REGNO (reg) == FB_REGNO
2753 || REGNO (reg) == SP_REGNO))
2754 fputc ('0', file);
2756 continue;
2758 /* Signed displacements off symbols need to have signs
2759 blended cleanly. */
2760 if (conversions[i].format[j] == '+'
2761 && (!code || code == 'D' || code == 'd')
2762 && ISDIGIT (conversions[i].format[j + 1])
2763 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2764 == CONST_INT))
2766 force_sign = 1;
2767 continue;
2769 fputc (conversions[i].format[j], file);
2771 break;
2773 if (!conversions[i].pattern)
2775 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2776 pattern);
2777 debug_rtx (x);
2778 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2781 return;
2784 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2786 See m32c_print_operand above for descriptions of what these do. */
2788 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2789 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2791 static bool
2792 m32c_print_operand_punct_valid_p (unsigned char c)
2794 if (c == '&' || c == '!')
2795 return true;
2797 return false;
2800 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2802 #undef TARGET_PRINT_OPERAND_ADDRESS
2803 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2805 static void
2806 m32c_print_operand_address (FILE * stream, rtx address)
2808 if (GET_CODE (address) == MEM)
2809 address = XEXP (address, 0);
2810 else
2811 /* cf: gcc.dg/asm-4.c. */
2812 gcc_assert (GET_CODE (address) == REG);
2814 m32c_print_operand (stream, address, 0);
2817 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2818 differently than general registers. */
2819 void
2820 m32c_output_reg_push (FILE * s, int regno)
2822 if (regno == FLG_REGNO)
2823 fprintf (s, "\tpushc\tflg\n");
2824 else
2825 fprintf (s, "\tpush.%c\t%s\n",
2826 " bwll"[reg_push_size (regno)], reg_names[regno]);
2829 /* Likewise for ASM_OUTPUT_REG_POP. */
2830 void
2831 m32c_output_reg_pop (FILE * s, int regno)
2833 if (regno == FLG_REGNO)
2834 fprintf (s, "\tpopc\tflg\n");
2835 else
2836 fprintf (s, "\tpop.%c\t%s\n",
2837 " bwll"[reg_push_size (regno)], reg_names[regno]);
2840 /* Defining target-specific uses of `__attribute__' */
2842 /* Used to simplify the logic below. Find the attributes wherever
2843 they may be. */
2844 #define M32C_ATTRIBUTES(decl) \
2845 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2846 : DECL_ATTRIBUTES (decl) \
2847 ? (DECL_ATTRIBUTES (decl)) \
2848 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2850 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2851 static int
2852 interrupt_p (tree node ATTRIBUTE_UNUSED)
2854 tree list = M32C_ATTRIBUTES (node);
2855 while (list)
2857 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2858 return 1;
2859 list = TREE_CHAIN (list);
2861 return fast_interrupt_p (node);
2864 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2865 static int
2866 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2868 tree list = M32C_ATTRIBUTES (node);
2869 while (list)
2871 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2872 return 1;
2873 list = TREE_CHAIN (list);
2875 return 0;
2878 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2879 static int
2880 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2882 tree list = M32C_ATTRIBUTES (node);
2883 while (list)
2885 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2886 return 1;
2887 list = TREE_CHAIN (list);
2889 return 0;
2892 static tree
2893 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2894 tree name ATTRIBUTE_UNUSED,
2895 tree args ATTRIBUTE_UNUSED,
2896 int flags ATTRIBUTE_UNUSED,
2897 bool * no_add_attrs ATTRIBUTE_UNUSED)
2899 return NULL_TREE;
2902 /* Returns TRUE if given tree has the "function_vector" attribute. */
2904 m32c_special_page_vector_p (tree func)
2906 tree list;
2908 if (TREE_CODE (func) != FUNCTION_DECL)
2909 return 0;
2911 list = M32C_ATTRIBUTES (func);
2912 while (list)
2914 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2915 return 1;
2916 list = TREE_CHAIN (list);
2918 return 0;
2921 static tree
2922 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2923 tree name ATTRIBUTE_UNUSED,
2924 tree args ATTRIBUTE_UNUSED,
2925 int flags ATTRIBUTE_UNUSED,
2926 bool * no_add_attrs ATTRIBUTE_UNUSED)
2928 if (TARGET_R8C)
2930 /* The attribute is not supported for R8C target. */
2931 warning (OPT_Wattributes,
2932 "%qE attribute is not supported for R8C target",
2933 name);
2934 *no_add_attrs = true;
2936 else if (TREE_CODE (*node) != FUNCTION_DECL)
2938 /* The attribute must be applied to functions only. */
2939 warning (OPT_Wattributes,
2940 "%qE attribute applies only to functions",
2941 name);
2942 *no_add_attrs = true;
2944 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2946 /* The argument must be a constant integer. */
2947 warning (OPT_Wattributes,
2948 "%qE attribute argument not an integer constant",
2949 name);
2950 *no_add_attrs = true;
2952 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2953 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2955 /* The argument value must be between 18 to 255. */
2956 warning (OPT_Wattributes,
2957 "%qE attribute argument should be between 18 to 255",
2958 name);
2959 *no_add_attrs = true;
2961 return NULL_TREE;
2964 /* If the function is assigned the attribute 'function_vector', it
2965 returns the function vector number, otherwise returns zero. */
2967 current_function_special_page_vector (rtx x)
2969 int num;
2971 if ((GET_CODE(x) == SYMBOL_REF)
2972 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2974 tree list;
2975 tree t = SYMBOL_REF_DECL (x);
2977 if (TREE_CODE (t) != FUNCTION_DECL)
2978 return 0;
2980 list = M32C_ATTRIBUTES (t);
2981 while (list)
2983 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2985 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2986 return num;
2989 list = TREE_CHAIN (list);
2992 return 0;
2994 else
2995 return 0;
2998 #undef TARGET_ATTRIBUTE_TABLE
2999 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3000 static const struct attribute_spec m32c_attribute_table[] = {
3001 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3002 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3003 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3004 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3005 false},
3006 {0, 0, 0, 0, 0, 0, 0, false}
3009 #undef TARGET_COMP_TYPE_ATTRIBUTES
3010 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3011 static int
3012 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3013 const_tree type2 ATTRIBUTE_UNUSED)
3015 /* 0=incompatible 1=compatible 2=warning */
3016 return 1;
3019 #undef TARGET_INSERT_ATTRIBUTES
3020 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3021 static void
3022 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3023 tree * attr_ptr ATTRIBUTE_UNUSED)
3025 unsigned addr;
3026 /* See if we need to make #pragma address variables volatile. */
3028 if (TREE_CODE (node) == VAR_DECL)
3030 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3031 if (m32c_get_pragma_address (name, &addr))
3033 TREE_THIS_VOLATILE (node) = true;
3039 struct GTY(()) pragma_entry {
3040 const char *varname;
3041 unsigned address;
3043 typedef struct pragma_entry pragma_entry;
3045 /* Hash table of pragma info. */
3046 static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3048 static int
3049 pragma_entry_eq (const void *p1, const void *p2)
3051 const pragma_entry *old = (const pragma_entry *) p1;
3052 const char *new_name = (const char *) p2;
3054 return strcmp (old->varname, new_name) == 0;
3057 static hashval_t
3058 pragma_entry_hash (const void *p)
3060 const pragma_entry *old = (const pragma_entry *) p;
3061 return htab_hash_string (old->varname);
3064 void
3065 m32c_note_pragma_address (const char *varname, unsigned address)
3067 pragma_entry **slot;
3069 if (!pragma_htab)
3070 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3071 pragma_entry_eq, NULL);
3073 slot = (pragma_entry **)
3074 htab_find_slot_with_hash (pragma_htab, varname,
3075 htab_hash_string (varname), INSERT);
3077 if (!*slot)
3079 *slot = ggc_alloc<pragma_entry> ();
3080 (*slot)->varname = ggc_strdup (varname);
3082 (*slot)->address = address;
3085 static bool
3086 m32c_get_pragma_address (const char *varname, unsigned *address)
3088 pragma_entry **slot;
3090 if (!pragma_htab)
3091 return false;
3093 slot = (pragma_entry **)
3094 htab_find_slot_with_hash (pragma_htab, varname,
3095 htab_hash_string (varname), NO_INSERT);
3096 if (slot && *slot)
3098 *address = (*slot)->address;
3099 return true;
3101 return false;
3104 void
3105 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3106 const char *name,
3107 int size, int align, int global)
3109 unsigned address;
3111 if (m32c_get_pragma_address (name, &address))
3113 /* We never output these as global. */
3114 assemble_name (stream, name);
3115 fprintf (stream, " = 0x%04x\n", address);
3116 return;
3118 if (!global)
3120 fprintf (stream, "\t.local\t");
3121 assemble_name (stream, name);
3122 fprintf (stream, "\n");
3124 fprintf (stream, "\t.comm\t");
3125 assemble_name (stream, name);
3126 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3129 /* Predicates */
3131 /* This is a list of legal subregs of hard regs. */
3132 static const struct {
3133 unsigned char outer_mode_size;
3134 unsigned char inner_mode_size;
3135 unsigned char byte_mask;
3136 unsigned char legal_when;
3137 unsigned int regno;
3138 } legal_subregs[] = {
3139 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3140 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3141 {1, 2, 0x01, 1, A0_REGNO},
3142 {1, 2, 0x01, 1, A1_REGNO},
3144 {1, 4, 0x01, 1, A0_REGNO},
3145 {1, 4, 0x01, 1, A1_REGNO},
3147 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3148 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3149 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3150 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3151 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3153 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3156 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3157 support. We also bail on MEMs with illegal addresses. */
3158 bool
3159 m32c_illegal_subreg_p (rtx op)
3161 int offset;
3162 unsigned int i;
3163 enum machine_mode src_mode, dest_mode;
3165 if (GET_CODE (op) == MEM
3166 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3168 return true;
3171 if (GET_CODE (op) != SUBREG)
3172 return false;
3174 dest_mode = GET_MODE (op);
3175 offset = SUBREG_BYTE (op);
3176 op = SUBREG_REG (op);
3177 src_mode = GET_MODE (op);
3179 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3180 return false;
3181 if (GET_CODE (op) != REG)
3182 return false;
3183 if (REGNO (op) >= MEM0_REGNO)
3184 return false;
3186 offset = (1 << offset);
3188 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3189 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3190 && legal_subregs[i].regno == REGNO (op)
3191 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3192 && legal_subregs[i].byte_mask & offset)
3194 switch (legal_subregs[i].legal_when)
3196 case 1:
3197 return false;
3198 case 16:
3199 if (TARGET_A16)
3200 return false;
3201 break;
3202 case 24:
3203 if (TARGET_A24)
3204 return false;
3205 break;
3208 return true;
3211 /* Returns TRUE if we support a move between the first two operands.
3212 At the moment, we just want to discourage mem to mem moves until
3213 after reload, because reload has a hard time with our limited
3214 number of address registers, and we can get into a situation where
3215 we need three of them when we only have two. */
3216 bool
3217 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3219 rtx op0 = operands[0];
3220 rtx op1 = operands[1];
3222 if (TARGET_A24)
3223 return true;
3225 #define DEBUG_MOV_OK 0
3226 #if DEBUG_MOV_OK
3227 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3228 debug_rtx (op0);
3229 debug_rtx (op1);
3230 #endif
3232 if (GET_CODE (op0) == SUBREG)
3233 op0 = XEXP (op0, 0);
3234 if (GET_CODE (op1) == SUBREG)
3235 op1 = XEXP (op1, 0);
3237 if (GET_CODE (op0) == MEM
3238 && GET_CODE (op1) == MEM
3239 && ! reload_completed)
3241 #if DEBUG_MOV_OK
3242 fprintf (stderr, " - no, mem to mem\n");
3243 #endif
3244 return false;
3247 #if DEBUG_MOV_OK
3248 fprintf (stderr, " - ok\n");
3249 #endif
3250 return true;
3253 /* Returns TRUE if two consecutive HImode mov instructions, generated
3254 for moving an immediate double data to a double data type variable
3255 location, can be combined into single SImode mov instruction. */
3256 bool
3257 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3258 enum machine_mode mode ATTRIBUTE_UNUSED)
3260 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3261 flags. */
3262 return false;
3265 /* Expanders */
3267 /* Subregs are non-orthogonal for us, because our registers are all
3268 different sizes. */
3269 static rtx
3270 m32c_subreg (enum machine_mode outer,
3271 rtx x, enum machine_mode inner, int byte)
3273 int r, nr = -1;
3275 /* Converting MEMs to different types that are the same size, we
3276 just rewrite them. */
3277 if (GET_CODE (x) == SUBREG
3278 && SUBREG_BYTE (x) == 0
3279 && GET_CODE (SUBREG_REG (x)) == MEM
3280 && (GET_MODE_SIZE (GET_MODE (x))
3281 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3283 rtx oldx = x;
3284 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3285 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3288 /* Push/pop get done as smaller push/pops. */
3289 if (GET_CODE (x) == MEM
3290 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3291 || GET_CODE (XEXP (x, 0)) == POST_INC))
3292 return gen_rtx_MEM (outer, XEXP (x, 0));
3293 if (GET_CODE (x) == SUBREG
3294 && GET_CODE (XEXP (x, 0)) == MEM
3295 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3296 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3297 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3299 if (GET_CODE (x) != REG)
3301 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3302 if (GET_CODE (r) == SUBREG
3303 && GET_CODE (x) == MEM
3304 && MEM_VOLATILE_P (x))
3306 /* Volatile MEMs don't get simplified, but we need them to
3307 be. We are little endian, so the subreg byte is the
3308 offset. */
3309 r = adjust_address_nv (x, outer, byte);
3311 return r;
3314 r = REGNO (x);
3315 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3316 return simplify_gen_subreg (outer, x, inner, byte);
3318 if (IS_MEM_REGNO (r))
3319 return simplify_gen_subreg (outer, x, inner, byte);
3321 /* This is where the complexities of our register layout are
3322 described. */
3323 if (byte == 0)
3324 nr = r;
3325 else if (outer == HImode)
3327 if (r == R0_REGNO && byte == 2)
3328 nr = R2_REGNO;
3329 else if (r == R0_REGNO && byte == 4)
3330 nr = R1_REGNO;
3331 else if (r == R0_REGNO && byte == 6)
3332 nr = R3_REGNO;
3333 else if (r == R1_REGNO && byte == 2)
3334 nr = R3_REGNO;
3335 else if (r == A0_REGNO && byte == 2)
3336 nr = A1_REGNO;
3338 else if (outer == SImode)
3340 if (r == R0_REGNO && byte == 0)
3341 nr = R0_REGNO;
3342 else if (r == R0_REGNO && byte == 4)
3343 nr = R1_REGNO;
3345 if (nr == -1)
3347 fprintf (stderr, "m32c_subreg %s %s %d\n",
3348 mode_name[outer], mode_name[inner], byte);
3349 debug_rtx (x);
3350 gcc_unreachable ();
3352 return gen_rtx_REG (outer, nr);
3355 /* Used to emit move instructions. We split some moves,
3356 and avoid mem-mem moves. */
3358 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3360 if (far_addr_space_p (operands[0])
3361 && CONSTANT_P (operands[1]))
3363 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3365 if (TARGET_A16 && mode == PSImode)
3366 return m32c_split_move (operands, mode, 1);
3367 if ((GET_CODE (operands[0]) == MEM)
3368 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3370 rtx pmv = XEXP (operands[0], 0);
3371 rtx dest_reg = XEXP (pmv, 0);
3372 rtx dest_mod = XEXP (pmv, 1);
3374 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3375 operands[0] = gen_rtx_MEM (mode, dest_reg);
3377 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3378 operands[1] = copy_to_mode_reg (mode, operands[1]);
3379 return 0;
3382 #define DEBUG_SPLIT 0
3384 /* Returns TRUE if the given PSImode move should be split. We split
3385 for all r8c/m16c moves, since it doesn't support them, and for
3386 POP.L as we can only *push* SImode. */
3388 m32c_split_psi_p (rtx * operands)
3390 #if DEBUG_SPLIT
3391 fprintf (stderr, "\nm32c_split_psi_p\n");
3392 debug_rtx (operands[0]);
3393 debug_rtx (operands[1]);
3394 #endif
3395 if (TARGET_A16)
3397 #if DEBUG_SPLIT
3398 fprintf (stderr, "yes, A16\n");
3399 #endif
3400 return 1;
3402 if (GET_CODE (operands[1]) == MEM
3403 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3405 #if DEBUG_SPLIT
3406 fprintf (stderr, "yes, pop.l\n");
3407 #endif
3408 return 1;
3410 #if DEBUG_SPLIT
3411 fprintf (stderr, "no, default\n");
3412 #endif
3413 return 0;
3416 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3417 (define_expand), 1 if it is not optional (define_insn_and_split),
3418 and 3 for define_split (alternate api). */
3420 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3422 rtx s[4], d[4];
3423 int parts, si, di, rev = 0;
3424 int rv = 0, opi = 2;
3425 enum machine_mode submode = HImode;
3426 rtx *ops, local_ops[10];
3428 /* define_split modifies the existing operands, but the other two
3429 emit new insns. OPS is where we store the operand pairs, which
3430 we emit later. */
3431 if (split_all == 3)
3432 ops = operands;
3433 else
3434 ops = local_ops;
3436 /* Else HImode. */
3437 if (mode == DImode)
3438 submode = SImode;
3440 /* Before splitting mem-mem moves, force one operand into a
3441 register. */
3442 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3444 #if DEBUG0
3445 fprintf (stderr, "force_reg...\n");
3446 debug_rtx (operands[1]);
3447 #endif
3448 operands[1] = force_reg (mode, operands[1]);
3449 #if DEBUG0
3450 debug_rtx (operands[1]);
3451 #endif
3454 parts = 2;
3456 #if DEBUG_SPLIT
3457 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3458 split_all);
3459 debug_rtx (operands[0]);
3460 debug_rtx (operands[1]);
3461 #endif
3463 /* Note that split_all is not used to select the api after this
3464 point, so it's safe to set it to 3 even with define_insn. */
3465 /* None of the chips can move SI operands to sp-relative addresses,
3466 so we always split those. */
3467 if (satisfies_constraint_Ss (operands[0]))
3468 split_all = 3;
3470 if (TARGET_A16
3471 && (far_addr_space_p (operands[0])
3472 || far_addr_space_p (operands[1])))
3473 split_all |= 1;
3475 /* We don't need to split these. */
3476 if (TARGET_A24
3477 && split_all != 3
3478 && (mode == SImode || mode == PSImode)
3479 && !(GET_CODE (operands[1]) == MEM
3480 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3481 return 0;
3483 /* First, enumerate the subregs we'll be dealing with. */
3484 for (si = 0; si < parts; si++)
3486 d[si] =
3487 m32c_subreg (submode, operands[0], mode,
3488 si * GET_MODE_SIZE (submode));
3489 s[si] =
3490 m32c_subreg (submode, operands[1], mode,
3491 si * GET_MODE_SIZE (submode));
3494 /* Split pushes by emitting a sequence of smaller pushes. */
3495 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3497 for (si = parts - 1; si >= 0; si--)
3499 ops[opi++] = gen_rtx_MEM (submode,
3500 gen_rtx_PRE_DEC (Pmode,
3501 gen_rtx_REG (Pmode,
3502 SP_REGNO)));
3503 ops[opi++] = s[si];
3506 rv = 1;
3508 /* Likewise for pops. */
3509 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3511 for (di = 0; di < parts; di++)
3513 ops[opi++] = d[di];
3514 ops[opi++] = gen_rtx_MEM (submode,
3515 gen_rtx_POST_INC (Pmode,
3516 gen_rtx_REG (Pmode,
3517 SP_REGNO)));
3519 rv = 1;
3521 else if (split_all)
3523 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3524 for (di = 0; di < parts - 1; di++)
3525 for (si = di + 1; si < parts; si++)
3526 if (reg_mentioned_p (d[di], s[si]))
3527 rev = 1;
3529 if (rev)
3530 for (si = 0; si < parts; si++)
3532 ops[opi++] = d[si];
3533 ops[opi++] = s[si];
3535 else
3536 for (si = parts - 1; si >= 0; si--)
3538 ops[opi++] = d[si];
3539 ops[opi++] = s[si];
3541 rv = 1;
3543 /* Now emit any moves we may have accumulated. */
3544 if (rv && split_all != 3)
3546 int i;
3547 for (i = 2; i < opi; i += 2)
3548 emit_move_insn (ops[i], ops[i + 1]);
3550 return rv;
3553 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3554 the like. For the R8C they expect one of the addresses to be in
3555 R1L:An so we need to arrange for that. Otherwise, it's just a
3556 matter of picking out the operands we want and emitting the right
3557 pattern for them. All these expanders, which correspond to
3558 patterns in blkmov.md, must return nonzero if they expand the insn,
3559 or zero if they should FAIL. */
3561 /* This is a memset() opcode. All operands are implied, so we need to
3562 arrange for them to be in the right registers. The opcode wants
3563 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3564 the count (HI), and $2 the value (QI). */
3566 m32c_expand_setmemhi(rtx *operands)
3568 rtx desta, count, val;
3569 rtx desto, counto;
3571 desta = XEXP (operands[0], 0);
3572 count = operands[1];
3573 val = operands[2];
3575 desto = gen_reg_rtx (Pmode);
3576 counto = gen_reg_rtx (HImode);
3578 if (GET_CODE (desta) != REG
3579 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3580 desta = copy_to_mode_reg (Pmode, desta);
3582 /* This looks like an arbitrary restriction, but this is by far the
3583 most common case. For counts 8..14 this actually results in
3584 smaller code with no speed penalty because the half-sized
3585 constant can be loaded with a shorter opcode. */
3586 if (GET_CODE (count) == CONST_INT
3587 && GET_CODE (val) == CONST_INT
3588 && ! (INTVAL (count) & 1)
3589 && (INTVAL (count) > 1)
3590 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3592 unsigned v = INTVAL (val) & 0xff;
3593 v = v | (v << 8);
3594 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3595 val = copy_to_mode_reg (HImode, GEN_INT (v));
3596 if (TARGET_A16)
3597 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3598 else
3599 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3600 return 1;
3603 /* This is the generalized memset() case. */
3604 if (GET_CODE (val) != REG
3605 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3606 val = copy_to_mode_reg (QImode, val);
3608 if (GET_CODE (count) != REG
3609 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3610 count = copy_to_mode_reg (HImode, count);
3612 if (TARGET_A16)
3613 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3614 else
3615 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3617 return 1;
3620 /* This is a memcpy() opcode. All operands are implied, so we need to
3621 arrange for them to be in the right registers. The opcode wants
3622 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3623 is the source (MEM:BLK), and $2 the count (HI). */
3625 m32c_expand_movmemhi(rtx *operands)
3627 rtx desta, srca, count;
3628 rtx desto, srco, counto;
3630 desta = XEXP (operands[0], 0);
3631 srca = XEXP (operands[1], 0);
3632 count = operands[2];
3634 desto = gen_reg_rtx (Pmode);
3635 srco = gen_reg_rtx (Pmode);
3636 counto = gen_reg_rtx (HImode);
3638 if (GET_CODE (desta) != REG
3639 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3640 desta = copy_to_mode_reg (Pmode, desta);
3642 if (GET_CODE (srca) != REG
3643 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3644 srca = copy_to_mode_reg (Pmode, srca);
3646 /* Similar to setmem, but we don't need to check the value. */
3647 if (GET_CODE (count) == CONST_INT
3648 && ! (INTVAL (count) & 1)
3649 && (INTVAL (count) > 1))
3651 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3652 if (TARGET_A16)
3653 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3654 else
3655 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3656 return 1;
3659 /* This is the generalized memset() case. */
3660 if (GET_CODE (count) != REG
3661 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3662 count = copy_to_mode_reg (HImode, count);
3664 if (TARGET_A16)
3665 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3666 else
3667 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3669 return 1;
3672 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3673 the copy, which should point to the NUL at the end of the string,
3674 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3675 Since our opcode leaves the destination pointing *after* the NUL,
3676 we must emit an adjustment. */
3678 m32c_expand_movstr(rtx *operands)
3680 rtx desta, srca;
3681 rtx desto, srco;
3683 desta = XEXP (operands[1], 0);
3684 srca = XEXP (operands[2], 0);
3686 desto = gen_reg_rtx (Pmode);
3687 srco = gen_reg_rtx (Pmode);
3689 if (GET_CODE (desta) != REG
3690 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3691 desta = copy_to_mode_reg (Pmode, desta);
3693 if (GET_CODE (srca) != REG
3694 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3695 srca = copy_to_mode_reg (Pmode, srca);
3697 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3698 /* desto ends up being a1, which allows this type of add through MOVA. */
3699 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3701 return 1;
3704 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3705 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3706 $2 is the other (MEM:BLK). We must do the comparison, and then
3707 convert the flags to a signed integer result. */
3709 m32c_expand_cmpstr(rtx *operands)
3711 rtx src1a, src2a;
3713 src1a = XEXP (operands[1], 0);
3714 src2a = XEXP (operands[2], 0);
3716 if (GET_CODE (src1a) != REG
3717 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3718 src1a = copy_to_mode_reg (Pmode, src1a);
3720 if (GET_CODE (src2a) != REG
3721 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3722 src2a = copy_to_mode_reg (Pmode, src2a);
3724 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3725 emit_insn (gen_cond_to_int (operands[0]));
3727 return 1;
3731 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3733 static shift_gen_func
3734 shift_gen_func_for (int mode, int code)
3736 #define GFF(m,c,f) if (mode == m && code == c) return f
3737 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3738 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3739 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3740 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3741 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3742 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3743 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3744 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3745 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3746 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3747 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3748 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3749 #undef GFF
3750 gcc_unreachable ();
3753 /* The m32c only has one shift, but it takes a signed count. GCC
3754 doesn't want this, so we fake it by negating any shift count when
3755 we're pretending to shift the other way. Also, the shift count is
3756 limited to -8..8. It's slightly better to use two shifts for 9..15
3757 than to load the count into r1h, so we do that too. */
3759 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3761 enum machine_mode mode = GET_MODE (operands[0]);
3762 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3763 rtx temp;
3765 if (GET_CODE (operands[2]) == CONST_INT)
3767 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3768 int count = INTVAL (operands[2]) * scale;
3770 while (count > maxc)
3772 temp = gen_reg_rtx (mode);
3773 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3774 operands[1] = temp;
3775 count -= maxc;
3777 while (count < -maxc)
3779 temp = gen_reg_rtx (mode);
3780 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3781 operands[1] = temp;
3782 count += maxc;
3784 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3785 return 1;
3788 temp = gen_reg_rtx (QImode);
3789 if (scale < 0)
3790 /* The pattern has a NEG that corresponds to this. */
3791 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3792 else if (TARGET_A16 && mode == SImode)
3793 /* We do this because the code below may modify this, we don't
3794 want to modify the origin of this value. */
3795 emit_move_insn (temp, operands[2]);
3796 else
3797 /* We'll only use it for the shift, no point emitting a move. */
3798 temp = operands[2];
3800 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3802 /* The m16c has a limit of -16..16 for SI shifts, even when the
3803 shift count is in a register. Since there are so many targets
3804 of these shifts, it's better to expand the RTL here than to
3805 call a helper function.
3807 The resulting code looks something like this:
3809 cmp.b r1h,-16
3810 jge.b 1f
3811 shl.l -16,dest
3812 add.b r1h,16
3813 1f: cmp.b r1h,16
3814 jle.b 1f
3815 shl.l 16,dest
3816 sub.b r1h,16
3817 1f: shl.l r1h,dest
3819 We take advantage of the fact that "negative" shifts are
3820 undefined to skip one of the comparisons. */
3822 rtx count;
3823 rtx label, insn, tempvar;
3825 emit_move_insn (operands[0], operands[1]);
3827 count = temp;
3828 label = gen_label_rtx ();
3829 LABEL_NUSES (label) ++;
3831 tempvar = gen_reg_rtx (mode);
3833 if (shift_code == ASHIFT)
3835 /* This is a left shift. We only need check positive counts. */
3836 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3837 count, GEN_INT (16), label));
3838 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3839 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3840 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3841 emit_label_after (label, insn);
3843 else
3845 /* This is a right shift. We only need check negative counts. */
3846 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3847 count, GEN_INT (-16), label));
3848 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3849 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3850 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3851 emit_label_after (label, insn);
3853 operands[1] = operands[0];
3854 emit_insn (func (operands[0], operands[0], count));
3855 return 1;
3858 operands[2] = temp;
3859 return 0;
3862 /* The m32c has a limited range of operations that work on PSImode
3863 values; we have to expand to SI, do the math, and truncate back to
3864 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3865 those cases. */
3866 void
3867 m32c_expand_neg_mulpsi3 (rtx * operands)
3869 /* operands: a = b * i */
3870 rtx temp1; /* b as SI */
3871 rtx scale /* i as SI */;
3872 rtx temp2; /* a*b as SI */
3874 temp1 = gen_reg_rtx (SImode);
3875 temp2 = gen_reg_rtx (SImode);
3876 if (GET_CODE (operands[2]) != CONST_INT)
3878 scale = gen_reg_rtx (SImode);
3879 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3881 else
3882 scale = copy_to_mode_reg (SImode, operands[2]);
3884 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3885 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3886 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3889 /* Pattern Output Functions */
3892 m32c_expand_movcc (rtx *operands)
3894 rtx rel = operands[1];
3896 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3897 return 1;
3898 if (GET_CODE (operands[2]) != CONST_INT
3899 || GET_CODE (operands[3]) != CONST_INT)
3900 return 1;
3901 if (GET_CODE (rel) == NE)
3903 rtx tmp = operands[2];
3904 operands[2] = operands[3];
3905 operands[3] = tmp;
3906 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3909 emit_move_insn (operands[0],
3910 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3911 rel,
3912 operands[2],
3913 operands[3]));
3914 return 0;
3917 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3919 m32c_expand_insv (rtx *operands)
3921 rtx op0, src0, p;
3922 int mask;
3924 if (INTVAL (operands[1]) != 1)
3925 return 1;
3927 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3928 if (GET_CODE (operands[3]) != CONST_INT)
3929 return 1;
3930 if (INTVAL (operands[3]) != 0
3931 && INTVAL (operands[3]) != 1
3932 && INTVAL (operands[3]) != -1)
3933 return 1;
3935 mask = 1 << INTVAL (operands[2]);
3937 op0 = operands[0];
3938 if (GET_CODE (op0) == SUBREG
3939 && SUBREG_BYTE (op0) == 0)
3941 rtx sub = SUBREG_REG (op0);
3942 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3943 op0 = sub;
3946 if (!can_create_pseudo_p ()
3947 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3948 src0 = op0;
3949 else
3951 src0 = gen_reg_rtx (GET_MODE (op0));
3952 emit_move_insn (src0, op0);
3955 if (GET_MODE (op0) == HImode
3956 && INTVAL (operands[2]) >= 8
3957 && GET_CODE (op0) == MEM)
3959 /* We are little endian. */
3960 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3961 XEXP (op0, 0), 1));
3962 MEM_COPY_ATTRIBUTES (new_mem, op0);
3963 mask >>= 8;
3966 /* First, we generate a mask with the correct polarity. If we are
3967 storing a zero, we want an AND mask, so invert it. */
3968 if (INTVAL (operands[3]) == 0)
3970 /* Storing a zero, use an AND mask */
3971 if (GET_MODE (op0) == HImode)
3972 mask ^= 0xffff;
3973 else
3974 mask ^= 0xff;
3976 /* Now we need to properly sign-extend the mask in case we need to
3977 fall back to an AND or OR opcode. */
3978 if (GET_MODE (op0) == HImode)
3980 if (mask & 0x8000)
3981 mask -= 0x10000;
3983 else
3985 if (mask & 0x80)
3986 mask -= 0x100;
3989 switch ( (INTVAL (operands[3]) ? 4 : 0)
3990 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3991 + (TARGET_A24 ? 1 : 0))
3993 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3994 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3995 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3996 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3997 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3998 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3999 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4000 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
4001 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
4004 emit_insn (p);
4005 return 0;
4008 const char *
4009 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4011 static char buf[30];
4012 if (GET_CODE (operands[0]) == REG
4013 && REGNO (operands[0]) == R0_REGNO)
4015 if (code == EQ)
4016 return "stzx\t#1,#0,r0l";
4017 if (code == NE)
4018 return "stzx\t#0,#1,r0l";
4020 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4021 return buf;
4024 /* Encode symbol attributes of a SYMBOL_REF into its
4025 SYMBOL_REF_FLAGS. */
4026 static void
4027 m32c_encode_section_info (tree decl, rtx rtl, int first)
4029 int extra_flags = 0;
4031 default_encode_section_info (decl, rtl, first);
4032 if (TREE_CODE (decl) == FUNCTION_DECL
4033 && m32c_special_page_vector_p (decl))
4035 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4037 if (extra_flags)
4038 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4041 /* Returns TRUE if the current function is a leaf, and thus we can
4042 determine which registers an interrupt function really needs to
4043 save. The logic below is mostly about finding the insn sequence
4044 that's the function, versus any sequence that might be open for the
4045 current insn. */
4046 static int
4047 m32c_leaf_function_p (void)
4049 rtx saved_first, saved_last;
4050 struct sequence_stack *seq;
4051 int rv;
4053 saved_first = crtl->emit.x_first_insn;
4054 saved_last = crtl->emit.x_last_insn;
4055 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4057 if (seq)
4059 crtl->emit.x_first_insn = seq->first;
4060 crtl->emit.x_last_insn = seq->last;
4063 rv = leaf_function_p ();
4065 crtl->emit.x_first_insn = saved_first;
4066 crtl->emit.x_last_insn = saved_last;
4067 return rv;
4070 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4071 opcodes. If the function doesn't need the frame base or stack
4072 pointer, it can use the simpler RTS opcode. */
4073 static bool
4074 m32c_function_needs_enter (void)
4076 rtx insn;
4077 struct sequence_stack *seq;
4078 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4079 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4081 insn = get_insns ();
4082 for (seq = crtl->emit.sequence_stack;
4083 seq;
4084 insn = seq->first, seq = seq->next);
4086 while (insn)
4088 if (reg_mentioned_p (sp, insn))
4089 return true;
4090 if (reg_mentioned_p (fb, insn))
4091 return true;
4092 insn = NEXT_INSN (insn);
4094 return false;
4097 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4098 frame-related. Return PAR.
4100 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4101 PARALLEL rtx other than the first if they do not have the
4102 FRAME_RELATED flag set on them. So this function is handy for
4103 marking up 'enter' instructions. */
4104 static rtx
4105 m32c_all_frame_related (rtx par)
4107 int len = XVECLEN (par, 0);
4108 int i;
4110 for (i = 0; i < len; i++)
4111 F (XVECEXP (par, 0, i));
4113 return par;
4116 /* Emits the prologue. See the frame layout comment earlier in this
4117 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4118 that we manually update sp. */
4119 void
4120 m32c_emit_prologue (void)
4122 int frame_size, extra_frame_size = 0, reg_save_size;
4123 int complex_prologue = 0;
4125 cfun->machine->is_leaf = m32c_leaf_function_p ();
4126 if (interrupt_p (cfun->decl))
4128 cfun->machine->is_interrupt = 1;
4129 complex_prologue = 1;
4131 else if (bank_switch_p (cfun->decl))
4132 warning (OPT_Wattributes,
4133 "%<bank_switch%> has no effect on non-interrupt functions");
4135 reg_save_size = m32c_pushm_popm (PP_justcount);
4137 if (interrupt_p (cfun->decl))
4139 if (bank_switch_p (cfun->decl))
4140 emit_insn (gen_fset_b ());
4141 else if (cfun->machine->intr_pushm)
4142 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4145 frame_size =
4146 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4147 if (frame_size == 0
4148 && !m32c_function_needs_enter ())
4149 cfun->machine->use_rts = 1;
4151 if (frame_size > 254)
4153 extra_frame_size = frame_size - 254;
4154 frame_size = 254;
4156 if (cfun->machine->use_rts == 0)
4157 F (emit_insn (m32c_all_frame_related
4158 (TARGET_A16
4159 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4160 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4162 if (extra_frame_size)
4164 complex_prologue = 1;
4165 if (TARGET_A16)
4166 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4167 gen_rtx_REG (HImode, SP_REGNO),
4168 GEN_INT (-extra_frame_size))));
4169 else
4170 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4171 gen_rtx_REG (PSImode, SP_REGNO),
4172 GEN_INT (-extra_frame_size))));
4175 complex_prologue += m32c_pushm_popm (PP_pushm);
4177 /* This just emits a comment into the .s file for debugging. */
4178 if (complex_prologue)
4179 emit_insn (gen_prologue_end ());
4182 /* Likewise, for the epilogue. The only exception is that, for
4183 interrupts, we must manually unwind the frame as the REIT opcode
4184 doesn't do that. */
4185 void
4186 m32c_emit_epilogue (void)
4188 int popm_count = m32c_pushm_popm (PP_justcount);
4190 /* This just emits a comment into the .s file for debugging. */
4191 if (popm_count > 0 || cfun->machine->is_interrupt)
4192 emit_insn (gen_epilogue_start ());
4194 if (popm_count > 0)
4195 m32c_pushm_popm (PP_popm);
4197 if (cfun->machine->is_interrupt)
4199 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4201 /* REIT clears B flag and restores $fp for us, but we still
4202 have to fix up the stack. USE_RTS just means we didn't
4203 emit ENTER. */
4204 if (!cfun->machine->use_rts)
4206 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4207 gen_rtx_REG (spmode, FP_REGNO));
4208 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4209 gen_rtx_REG (spmode, A0_REGNO));
4210 /* We can't just add this to the POPM because it would be in
4211 the wrong order, and wouldn't fix the stack if we're bank
4212 switching. */
4213 if (TARGET_A16)
4214 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4215 else
4216 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4218 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4219 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4221 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4222 generated only for M32C/M32CM targets (generate the REIT
4223 instruction otherwise). */
4224 if (fast_interrupt_p (cfun->decl))
4226 /* Check if fast_attribute is set for M32C or M32CM. */
4227 if (TARGET_A24)
4229 emit_jump_insn (gen_epilogue_freit ());
4231 /* If fast_interrupt attribute is set for an R8C or M16C
4232 target ignore this attribute and generated REIT
4233 instruction. */
4234 else
4236 warning (OPT_Wattributes,
4237 "%<fast_interrupt%> attribute directive ignored");
4238 emit_jump_insn (gen_epilogue_reit_16 ());
4241 else if (TARGET_A16)
4242 emit_jump_insn (gen_epilogue_reit_16 ());
4243 else
4244 emit_jump_insn (gen_epilogue_reit_24 ());
4246 else if (cfun->machine->use_rts)
4247 emit_jump_insn (gen_epilogue_rts ());
4248 else if (TARGET_A16)
4249 emit_jump_insn (gen_epilogue_exitd_16 ());
4250 else
4251 emit_jump_insn (gen_epilogue_exitd_24 ());
4254 void
4255 m32c_emit_eh_epilogue (rtx ret_addr)
4257 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4258 return to. We have to fudge the stack, pop everything, pop SP
4259 (fudged), and return (fudged). This is actually easier to do in
4260 assembler, so punt to libgcc. */
4261 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4262 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4265 /* Indicate which flags must be properly set for a given conditional. */
4266 static int
4267 flags_needed_for_conditional (rtx cond)
4269 switch (GET_CODE (cond))
4271 case LE:
4272 case GT:
4273 return FLAGS_OSZ;
4274 case LEU:
4275 case GTU:
4276 return FLAGS_ZC;
4277 case LT:
4278 case GE:
4279 return FLAGS_OS;
4280 case LTU:
4281 case GEU:
4282 return FLAGS_C;
4283 case EQ:
4284 case NE:
4285 return FLAGS_Z;
4286 default:
4287 return FLAGS_N;
4291 #define DEBUG_CMP 0
4293 /* Returns true if a compare insn is redundant because it would only
4294 set flags that are already set correctly. */
4295 static bool
4296 m32c_compare_redundant (rtx cmp, rtx *operands)
4298 int flags_needed;
4299 int pflags;
4300 rtx prev, pp, next;
4301 rtx op0, op1;
4302 #if DEBUG_CMP
4303 int prev_icode, i;
4304 #endif
4306 op0 = operands[0];
4307 op1 = operands[1];
4309 #if DEBUG_CMP
4310 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4311 debug_rtx(cmp);
4312 for (i=0; i<2; i++)
4314 fprintf(stderr, "operands[%d] = ", i);
4315 debug_rtx(operands[i]);
4317 #endif
4319 next = next_nonnote_insn (cmp);
4320 if (!next || !INSN_P (next))
4322 #if DEBUG_CMP
4323 fprintf(stderr, "compare not followed by insn\n");
4324 debug_rtx(next);
4325 #endif
4326 return false;
4328 if (GET_CODE (PATTERN (next)) == SET
4329 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4331 next = XEXP (XEXP (PATTERN (next), 1), 0);
4333 else if (GET_CODE (PATTERN (next)) == SET)
4335 /* If this is a conditional, flags_needed will be something
4336 other than FLAGS_N, which we test below. */
4337 next = XEXP (PATTERN (next), 1);
4339 else
4341 #if DEBUG_CMP
4342 fprintf(stderr, "compare not followed by conditional\n");
4343 debug_rtx(next);
4344 #endif
4345 return false;
4347 #if DEBUG_CMP
4348 fprintf(stderr, "conditional is: ");
4349 debug_rtx(next);
4350 #endif
4352 flags_needed = flags_needed_for_conditional (next);
4353 if (flags_needed == FLAGS_N)
4355 #if DEBUG_CMP
4356 fprintf(stderr, "compare not followed by conditional\n");
4357 debug_rtx(next);
4358 #endif
4359 return false;
4362 /* Compare doesn't set overflow and carry the same way that
4363 arithmetic instructions do, so we can't replace those. */
4364 if (flags_needed & FLAGS_OC)
4365 return false;
4367 prev = cmp;
4368 do {
4369 prev = prev_nonnote_insn (prev);
4370 if (!prev)
4372 #if DEBUG_CMP
4373 fprintf(stderr, "No previous insn.\n");
4374 #endif
4375 return false;
4377 if (!INSN_P (prev))
4379 #if DEBUG_CMP
4380 fprintf(stderr, "Previous insn is a non-insn.\n");
4381 #endif
4382 return false;
4384 pp = PATTERN (prev);
4385 if (GET_CODE (pp) != SET)
4387 #if DEBUG_CMP
4388 fprintf(stderr, "Previous insn is not a SET.\n");
4389 #endif
4390 return false;
4392 pflags = get_attr_flags (prev);
4394 /* Looking up attributes of previous insns corrupted the recog
4395 tables. */
4396 INSN_UID (cmp) = -1;
4397 recog (PATTERN (cmp), cmp, 0);
4399 if (pflags == FLAGS_N
4400 && reg_mentioned_p (op0, pp))
4402 #if DEBUG_CMP
4403 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4404 debug_rtx(prev);
4405 #endif
4406 return false;
4409 /* Check for comparisons against memory - between volatiles and
4410 aliases, we just can't risk this one. */
4411 if (GET_CODE (operands[0]) == MEM
4412 || GET_CODE (operands[0]) == MEM)
4414 #if DEBUG_CMP
4415 fprintf(stderr, "comparisons with memory:\n");
4416 debug_rtx(prev);
4417 #endif
4418 return false;
4421 /* Check for PREV changing a register that's used to compute a
4422 value in CMP, even if it doesn't otherwise change flags. */
4423 if (GET_CODE (operands[0]) == REG
4424 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4426 #if DEBUG_CMP
4427 fprintf(stderr, "sub-value affected, op0:\n");
4428 debug_rtx(prev);
4429 #endif
4430 return false;
4432 if (GET_CODE (operands[1]) == REG
4433 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4435 #if DEBUG_CMP
4436 fprintf(stderr, "sub-value affected, op1:\n");
4437 debug_rtx(prev);
4438 #endif
4439 return false;
4442 } while (pflags == FLAGS_N);
4443 #if DEBUG_CMP
4444 fprintf(stderr, "previous flag-setting insn:\n");
4445 debug_rtx(prev);
4446 debug_rtx(pp);
4447 #endif
4449 if (GET_CODE (pp) == SET
4450 && GET_CODE (XEXP (pp, 0)) == REG
4451 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4452 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4454 /* Adjacent cbranches must have the same operands to be
4455 redundant. */
4456 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4457 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4458 #if DEBUG_CMP
4459 fprintf(stderr, "adjacent cbranches\n");
4460 debug_rtx(pop0);
4461 debug_rtx(pop1);
4462 #endif
4463 if (rtx_equal_p (op0, pop0)
4464 && rtx_equal_p (op1, pop1))
4465 return true;
4466 #if DEBUG_CMP
4467 fprintf(stderr, "prev cmp not same\n");
4468 #endif
4469 return false;
4472 /* Else the previous insn must be a SET, with either the source or
4473 dest equal to operands[0], and operands[1] must be zero. */
4475 if (!rtx_equal_p (op1, const0_rtx))
4477 #if DEBUG_CMP
4478 fprintf(stderr, "operands[1] not const0_rtx\n");
4479 #endif
4480 return false;
4482 if (GET_CODE (pp) != SET)
4484 #if DEBUG_CMP
4485 fprintf (stderr, "pp not set\n");
4486 #endif
4487 return false;
4489 if (!rtx_equal_p (op0, SET_SRC (pp))
4490 && !rtx_equal_p (op0, SET_DEST (pp)))
4492 #if DEBUG_CMP
4493 fprintf(stderr, "operands[0] not found in set\n");
4494 #endif
4495 return false;
4498 #if DEBUG_CMP
4499 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4500 #endif
4501 if ((pflags & flags_needed) == flags_needed)
4502 return true;
4504 return false;
4507 /* Return the pattern for a compare. This will be commented out if
4508 the compare is redundant, else a normal pattern is returned. Thus,
4509 the assembler output says where the compare would have been. */
4510 char *
4511 m32c_output_compare (rtx insn, rtx *operands)
4513 static char templ[] = ";cmp.b\t%1,%0";
4514 /* ^ 5 */
4516 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4517 if (m32c_compare_redundant (insn, operands))
4519 #if DEBUG_CMP
4520 fprintf(stderr, "cbranch: cmp not needed\n");
4521 #endif
4522 return templ;
4525 #if DEBUG_CMP
4526 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4527 #endif
4528 return templ + 1;
4531 #undef TARGET_ENCODE_SECTION_INFO
4532 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4534 /* If the frame pointer isn't used, we detect it manually. But the
4535 stack pointer doesn't have as flexible addressing as the frame
4536 pointer, so we always assume we have it. */
4538 #undef TARGET_FRAME_POINTER_REQUIRED
4539 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4541 /* The Global `targetm' Variable. */
4543 struct gcc_target targetm = TARGET_INITIALIZER;
4545 #include "gt-m32c.h"