gcc/ada/
[official-gcc.git] / gcc / config / m32c / m32c.c
blob132873f6550176d2f2b77f729395f38e89daa3b7
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "tree.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "except.h"
45 #include "hashtab.h"
46 #include "hash-set.h"
47 #include "vec.h"
48 #include "machmode.h"
49 #include "input.h"
50 #include "function.h"
51 #include "ggc.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "tm_p.h"
55 #include "langhooks.h"
56 #include "hash-table.h"
57 #include "predict.h"
58 #include "dominance.h"
59 #include "cfg.h"
60 #include "cfgrtl.h"
61 #include "cfganal.h"
62 #include "lcm.h"
63 #include "cfgbuild.h"
64 #include "cfgcleanup.h"
65 #include "basic-block.h"
66 #include "tree-ssa-alias.h"
67 #include "internal-fn.h"
68 #include "gimple-fold.h"
69 #include "tree-eh.h"
70 #include "gimple-expr.h"
71 #include "is-a.h"
72 #include "gimple.h"
73 #include "df.h"
74 #include "tm-constrs.h"
75 #include "builtins.h"
77 /* Prototypes */
79 /* Used by m32c_pushm_popm. */
80 typedef enum
82 PP_pushm,
83 PP_popm,
84 PP_justcount
85 } Push_Pop_Type;
87 static bool m32c_function_needs_enter (void);
88 static tree interrupt_handler (tree *, tree, tree, int, bool *);
89 static tree function_vector_handler (tree *, tree, tree, int, bool *);
90 static int interrupt_p (tree node);
91 static int bank_switch_p (tree node);
92 static int fast_interrupt_p (tree node);
93 static int interrupt_p (tree node);
94 static bool m32c_asm_integer (rtx, unsigned int, int);
95 static int m32c_comp_type_attributes (const_tree, const_tree);
96 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
97 static struct machine_function *m32c_init_machine_status (void);
98 static void m32c_insert_attributes (tree, tree *);
99 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
100 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
101 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
102 const_tree, bool);
103 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
104 const_tree, bool);
105 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
106 const_tree, bool);
107 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
108 static int m32c_pushm_popm (Push_Pop_Type);
109 static bool m32c_strict_argument_naming (cumulative_args_t);
110 static rtx m32c_struct_value_rtx (tree, int);
111 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
112 static int need_to_save (int);
113 static rtx m32c_function_value (const_tree, const_tree, bool);
114 static rtx m32c_libcall_value (machine_mode, const_rtx);
116 /* Returns true if an address is specified, else false. */
117 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
119 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
121 #define streq(a,b) (strcmp ((a), (b)) == 0)
123 /* Internal support routines */
125 /* Debugging statements are tagged with DEBUG0 only so that they can
126 be easily enabled individually, by replacing the '0' with '1' as
127 needed. */
128 #define DEBUG0 0
129 #define DEBUG1 1
131 #if DEBUG0
132 /* This is needed by some of the commented-out debug statements
133 below. */
134 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
135 #endif
136 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
138 /* These are all to support encode_pattern(). */
139 static char pattern[30], *patternp;
140 static GTY(()) rtx patternr[30];
141 #define RTX_IS(x) (streq (pattern, x))
143 /* Some macros to simplify the logic throughout this file. */
144 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
145 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
147 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
148 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
150 static int
151 far_addr_space_p (rtx x)
153 if (GET_CODE (x) != MEM)
154 return 0;
155 #if DEBUG0
156 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
157 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
158 #endif
159 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
162 /* We do most RTX matching by converting the RTX into a string, and
163 using string compares. This vastly simplifies the logic in many of
164 the functions in this file.
166 On exit, pattern[] has the encoded string (use RTX_IS("...") to
167 compare it) and patternr[] has pointers to the nodes in the RTX
168 corresponding to each character in the encoded string. The latter
169 is mostly used by print_operand().
171 Unrecognized patterns have '?' in them; this shows up when the
172 assembler complains about syntax errors.
175 static void
176 encode_pattern_1 (rtx x)
178 int i;
180 if (patternp == pattern + sizeof (pattern) - 2)
182 patternp[-1] = '?';
183 return;
186 patternr[patternp - pattern] = x;
188 switch (GET_CODE (x))
190 case REG:
191 *patternp++ = 'r';
192 break;
193 case SUBREG:
194 if (GET_MODE_SIZE (GET_MODE (x)) !=
195 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
196 *patternp++ = 'S';
197 encode_pattern_1 (XEXP (x, 0));
198 break;
199 case MEM:
200 *patternp++ = 'm';
201 case CONST:
202 encode_pattern_1 (XEXP (x, 0));
203 break;
204 case SIGN_EXTEND:
205 *patternp++ = '^';
206 *patternp++ = 'S';
207 encode_pattern_1 (XEXP (x, 0));
208 break;
209 case ZERO_EXTEND:
210 *patternp++ = '^';
211 *patternp++ = 'Z';
212 encode_pattern_1 (XEXP (x, 0));
213 break;
214 case PLUS:
215 *patternp++ = '+';
216 encode_pattern_1 (XEXP (x, 0));
217 encode_pattern_1 (XEXP (x, 1));
218 break;
219 case PRE_DEC:
220 *patternp++ = '>';
221 encode_pattern_1 (XEXP (x, 0));
222 break;
223 case POST_INC:
224 *patternp++ = '<';
225 encode_pattern_1 (XEXP (x, 0));
226 break;
227 case LO_SUM:
228 *patternp++ = 'L';
229 encode_pattern_1 (XEXP (x, 0));
230 encode_pattern_1 (XEXP (x, 1));
231 break;
232 case HIGH:
233 *patternp++ = 'H';
234 encode_pattern_1 (XEXP (x, 0));
235 break;
236 case SYMBOL_REF:
237 *patternp++ = 's';
238 break;
239 case LABEL_REF:
240 *patternp++ = 'l';
241 break;
242 case CODE_LABEL:
243 *patternp++ = 'c';
244 break;
245 case CONST_INT:
246 case CONST_DOUBLE:
247 *patternp++ = 'i';
248 break;
249 case UNSPEC:
250 *patternp++ = 'u';
251 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
252 for (i = 0; i < XVECLEN (x, 0); i++)
253 encode_pattern_1 (XVECEXP (x, 0, i));
254 break;
255 case USE:
256 *patternp++ = 'U';
257 break;
258 case PARALLEL:
259 *patternp++ = '|';
260 for (i = 0; i < XVECLEN (x, 0); i++)
261 encode_pattern_1 (XVECEXP (x, 0, i));
262 break;
263 case EXPR_LIST:
264 *patternp++ = 'E';
265 encode_pattern_1 (XEXP (x, 0));
266 if (XEXP (x, 1))
267 encode_pattern_1 (XEXP (x, 1));
268 break;
269 default:
270 *patternp++ = '?';
271 #if DEBUG0
272 fprintf (stderr, "can't encode pattern %s\n",
273 GET_RTX_NAME (GET_CODE (x)));
274 debug_rtx (x);
275 gcc_unreachable ();
276 #endif
277 break;
281 static void
282 encode_pattern (rtx x)
284 patternp = pattern;
285 encode_pattern_1 (x);
286 *patternp = 0;
289 /* Since register names indicate the mode they're used in, we need a
290 way to determine which name to refer to the register with. Called
291 by print_operand(). */
293 static const char *
294 reg_name_with_mode (int regno, machine_mode mode)
296 int mlen = GET_MODE_SIZE (mode);
297 if (regno == R0_REGNO && mlen == 1)
298 return "r0l";
299 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
300 return "r2r0";
301 if (regno == R0_REGNO && mlen == 6)
302 return "r2r1r0";
303 if (regno == R0_REGNO && mlen == 8)
304 return "r3r1r2r0";
305 if (regno == R1_REGNO && mlen == 1)
306 return "r1l";
307 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
308 return "r3r1";
309 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
310 return "a1a0";
311 return reg_names[regno];
314 /* How many bytes a register uses on stack when it's pushed. We need
315 to know this because the push opcode needs to explicitly indicate
316 the size of the register, even though the name of the register
317 already tells it that. Used by m32c_output_reg_{push,pop}, which
318 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
320 static int
321 reg_push_size (int regno)
323 switch (regno)
325 case R0_REGNO:
326 case R1_REGNO:
327 return 2;
328 case R2_REGNO:
329 case R3_REGNO:
330 case FLG_REGNO:
331 return 2;
332 case A0_REGNO:
333 case A1_REGNO:
334 case SB_REGNO:
335 case FB_REGNO:
336 case SP_REGNO:
337 if (TARGET_A16)
338 return 2;
339 else
340 return 3;
341 default:
342 gcc_unreachable ();
346 /* Given two register classes, find the largest intersection between
347 them. If there is no intersection, return RETURNED_IF_EMPTY
348 instead. */
349 static reg_class_t
350 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
351 reg_class_t returned_if_empty)
353 HARD_REG_SET cc;
354 int i;
355 reg_class_t best = NO_REGS;
356 unsigned int best_size = 0;
358 if (original_class == limiting_class)
359 return original_class;
361 cc = reg_class_contents[original_class];
362 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
364 for (i = 0; i < LIM_REG_CLASSES; i++)
366 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
367 if (best_size < reg_class_size[i])
369 best = (reg_class_t) i;
370 best_size = reg_class_size[i];
374 if (best == NO_REGS)
375 return returned_if_empty;
376 return best;
379 /* Used by m32c_register_move_cost to determine if a move is
380 impossibly expensive. */
381 static bool
382 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
384 /* Cache the results: 0=untested 1=no 2=yes */
385 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
387 if (results[(int) rclass][mode] == 0)
389 int r;
390 results[rclass][mode] = 1;
391 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
392 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
393 && HARD_REGNO_MODE_OK (r, mode))
395 results[rclass][mode] = 2;
396 break;
400 #if DEBUG0
401 fprintf (stderr, "class %s can hold %s? %s\n",
402 class_names[(int) rclass], mode_name[mode],
403 (results[rclass][mode] == 2) ? "yes" : "no");
404 #endif
405 return results[(int) rclass][mode] == 2;
408 /* Run-time Target Specification. */
410 /* Memregs are memory locations that gcc treats like general
411 registers, as there are a limited number of true registers and the
412 m32c families can use memory in most places that registers can be
413 used.
415 However, since memory accesses are more expensive than registers,
416 we allow the user to limit the number of memregs available, in
417 order to try to persuade gcc to try harder to use real registers.
419 Memregs are provided by lib1funcs.S.
422 int ok_to_change_target_memregs = TRUE;
424 /* Implements TARGET_OPTION_OVERRIDE. */
426 #undef TARGET_OPTION_OVERRIDE
427 #define TARGET_OPTION_OVERRIDE m32c_option_override
429 static void
430 m32c_option_override (void)
432 /* We limit memregs to 0..16, and provide a default. */
433 if (global_options_set.x_target_memregs)
435 if (target_memregs < 0 || target_memregs > 16)
436 error ("invalid target memregs value '%d'", target_memregs);
438 else
439 target_memregs = 16;
441 if (TARGET_A24)
442 flag_ivopts = 0;
444 /* This target defaults to strict volatile bitfields. */
445 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
446 flag_strict_volatile_bitfields = 1;
448 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
449 This is always worse than an absolute call. */
450 if (TARGET_A16)
451 flag_no_function_cse = 1;
453 /* This wants to put insns between compares and their jumps. */
454 /* FIXME: The right solution is to properly trace the flags register
455 values, but that is too much work for stage 4. */
456 flag_combine_stack_adjustments = 0;
459 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
460 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
462 static void
463 m32c_override_options_after_change (void)
465 if (TARGET_A16)
466 flag_no_function_cse = 1;
469 /* Defining data structures for per-function information */
471 /* The usual; we set up our machine_function data. */
472 static struct machine_function *
473 m32c_init_machine_status (void)
475 return ggc_cleared_alloc<machine_function> ();
478 /* Implements INIT_EXPANDERS. We just set up to call the above
479 function. */
480 void
481 m32c_init_expanders (void)
483 init_machine_status = m32c_init_machine_status;
486 /* Storage Layout */
488 /* Register Basics */
490 /* Basic Characteristics of Registers */
492 /* Whether a mode fits in a register is complex enough to warrant a
493 table. */
494 static struct
496 char qi_regs;
497 char hi_regs;
498 char pi_regs;
499 char si_regs;
500 char di_regs;
501 } nregs_table[FIRST_PSEUDO_REGISTER] =
503 { 1, 1, 2, 2, 4 }, /* r0 */
504 { 0, 1, 0, 0, 0 }, /* r2 */
505 { 1, 1, 2, 2, 0 }, /* r1 */
506 { 0, 1, 0, 0, 0 }, /* r3 */
507 { 0, 1, 1, 0, 0 }, /* a0 */
508 { 0, 1, 1, 0, 0 }, /* a1 */
509 { 0, 1, 1, 0, 0 }, /* sb */
510 { 0, 1, 1, 0, 0 }, /* fb */
511 { 0, 1, 1, 0, 0 }, /* sp */
512 { 1, 1, 1, 0, 0 }, /* pc */
513 { 0, 0, 0, 0, 0 }, /* fl */
514 { 1, 1, 1, 0, 0 }, /* ap */
515 { 1, 1, 2, 2, 4 }, /* mem0 */
516 { 1, 1, 2, 2, 4 }, /* mem1 */
517 { 1, 1, 2, 2, 4 }, /* mem2 */
518 { 1, 1, 2, 2, 4 }, /* mem3 */
519 { 1, 1, 2, 2, 4 }, /* mem4 */
520 { 1, 1, 2, 2, 0 }, /* mem5 */
521 { 1, 1, 2, 2, 0 }, /* mem6 */
522 { 1, 1, 0, 0, 0 }, /* mem7 */
525 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
526 of available memregs, and select which registers need to be preserved
527 across calls based on the chip family. */
529 #undef TARGET_CONDITIONAL_REGISTER_USAGE
530 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
531 void
532 m32c_conditional_register_usage (void)
534 int i;
536 if (0 <= target_memregs && target_memregs <= 16)
538 /* The command line option is bytes, but our "registers" are
539 16-bit words. */
540 for (i = (target_memregs+1)/2; i < 8; i++)
542 fixed_regs[MEM0_REGNO + i] = 1;
543 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
547 /* M32CM and M32C preserve more registers across function calls. */
548 if (TARGET_A24)
550 call_used_regs[R1_REGNO] = 0;
551 call_used_regs[R2_REGNO] = 0;
552 call_used_regs[R3_REGNO] = 0;
553 call_used_regs[A0_REGNO] = 0;
554 call_used_regs[A1_REGNO] = 0;
558 /* How Values Fit in Registers */
560 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
561 different registers are different sizes from each other, *and* may
562 be different sizes in different chip families. */
563 static int
564 m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
566 if (regno == FLG_REGNO && mode == CCmode)
567 return 1;
568 if (regno >= FIRST_PSEUDO_REGISTER)
569 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
571 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
572 return (GET_MODE_SIZE (mode) + 1) / 2;
574 if (GET_MODE_SIZE (mode) <= 1)
575 return nregs_table[regno].qi_regs;
576 if (GET_MODE_SIZE (mode) <= 2)
577 return nregs_table[regno].hi_regs;
578 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
579 return 2;
580 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
581 return nregs_table[regno].pi_regs;
582 if (GET_MODE_SIZE (mode) <= 4)
583 return nregs_table[regno].si_regs;
584 if (GET_MODE_SIZE (mode) <= 8)
585 return nregs_table[regno].di_regs;
586 return 0;
590 m32c_hard_regno_nregs (int regno, machine_mode mode)
592 int rv = m32c_hard_regno_nregs_1 (regno, mode);
593 return rv ? rv : 1;
596 /* Implements HARD_REGNO_MODE_OK. The above function does the work
597 already; just test its return value. */
599 m32c_hard_regno_ok (int regno, machine_mode mode)
601 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
604 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
605 registers are all different sizes. However, since most modes are
606 bigger than our registers anyway, it's easier to implement this
607 function that way, leaving QImode as the only unique case. */
609 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
611 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
612 return 1;
614 #if 0
615 if (m1 == QImode || m2 == QImode)
616 return 0;
617 #endif
619 return 1;
622 /* Register Classes */
624 /* Implements REGNO_REG_CLASS. */
625 enum reg_class
626 m32c_regno_reg_class (int regno)
628 switch (regno)
630 case R0_REGNO:
631 return R0_REGS;
632 case R1_REGNO:
633 return R1_REGS;
634 case R2_REGNO:
635 return R2_REGS;
636 case R3_REGNO:
637 return R3_REGS;
638 case A0_REGNO:
639 return A0_REGS;
640 case A1_REGNO:
641 return A1_REGS;
642 case SB_REGNO:
643 return SB_REGS;
644 case FB_REGNO:
645 return FB_REGS;
646 case SP_REGNO:
647 return SP_REGS;
648 case FLG_REGNO:
649 return FLG_REGS;
650 default:
651 if (IS_MEM_REGNO (regno))
652 return MEM_REGS;
653 return ALL_REGS;
657 /* Implements REGNO_OK_FOR_BASE_P. */
659 m32c_regno_ok_for_base_p (int regno)
661 if (regno == A0_REGNO
662 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
663 return 1;
664 return 0;
667 #define DEBUG_RELOAD 0
669 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
670 registers of the appropriate size. */
672 #undef TARGET_PREFERRED_RELOAD_CLASS
673 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
675 static reg_class_t
676 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
678 reg_class_t newclass = rclass;
680 #if DEBUG_RELOAD
681 fprintf (stderr, "\npreferred_reload_class for %s is ",
682 class_names[rclass]);
683 #endif
684 if (rclass == NO_REGS)
685 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
687 if (reg_classes_intersect_p (rclass, CR_REGS))
689 switch (GET_MODE (x))
691 case QImode:
692 newclass = HL_REGS;
693 break;
694 default:
695 /* newclass = HI_REGS; */
696 break;
700 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
701 newclass = SI_REGS;
702 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
703 && ! reg_class_subset_p (R03_REGS, rclass))
704 newclass = DI_REGS;
706 rclass = reduce_class (rclass, newclass, rclass);
708 if (GET_MODE (x) == QImode)
709 rclass = reduce_class (rclass, HL_REGS, rclass);
711 #if DEBUG_RELOAD
712 fprintf (stderr, "%s\n", class_names[rclass]);
713 debug_rtx (x);
715 if (GET_CODE (x) == MEM
716 && GET_CODE (XEXP (x, 0)) == PLUS
717 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
718 fprintf (stderr, "Glorm!\n");
719 #endif
720 return rclass;
723 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
725 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
726 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
728 static reg_class_t
729 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
731 return m32c_preferred_reload_class (x, rclass);
734 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
735 address registers for reloads since they're needed for address
736 reloads. */
738 m32c_limit_reload_class (machine_mode mode, int rclass)
740 #if DEBUG_RELOAD
741 fprintf (stderr, "limit_reload_class for %s: %s ->",
742 mode_name[mode], class_names[rclass]);
743 #endif
745 if (mode == QImode)
746 rclass = reduce_class (rclass, HL_REGS, rclass);
747 else if (mode == HImode)
748 rclass = reduce_class (rclass, HI_REGS, rclass);
749 else if (mode == SImode)
750 rclass = reduce_class (rclass, SI_REGS, rclass);
752 if (rclass != A_REGS)
753 rclass = reduce_class (rclass, DI_REGS, rclass);
755 #if DEBUG_RELOAD
756 fprintf (stderr, " %s\n", class_names[rclass]);
757 #endif
758 return rclass;
761 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
762 r0 or r1, as those are the only real QImode registers. CR regs get
763 reloaded through appropriately sized general or address
764 registers. */
766 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
768 int cc = class_contents[rclass][0];
769 #if DEBUG0
770 fprintf (stderr, "\nsecondary reload class %s %s\n",
771 class_names[rclass], mode_name[mode]);
772 debug_rtx (x);
773 #endif
774 if (mode == QImode
775 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
776 return QI_REGS;
777 if (reg_classes_intersect_p (rclass, CR_REGS)
778 && GET_CODE (x) == REG
779 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
780 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
781 return NO_REGS;
784 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
785 reloads. */
787 #undef TARGET_CLASS_LIKELY_SPILLED_P
788 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
790 static bool
791 m32c_class_likely_spilled_p (reg_class_t regclass)
793 if (regclass == A_REGS)
794 return true;
796 return (reg_class_size[(int) regclass] == 1);
799 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
800 documented meaning, to avoid potential inconsistencies with actual
801 class definitions. */
803 #undef TARGET_CLASS_MAX_NREGS
804 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
806 static unsigned char
807 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
809 int rn;
810 unsigned char max = 0;
812 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
813 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
815 unsigned char n = m32c_hard_regno_nregs (rn, mode);
816 if (max < n)
817 max = n;
819 return max;
822 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
823 QI (r0l, r1l) because the chip doesn't support QI ops on other
824 registers (well, it does on a0/a1 but if we let gcc do that, reload
825 suffers). Otherwise, we allow changes to larger modes. */
827 m32c_cannot_change_mode_class (machine_mode from,
828 machine_mode to, int rclass)
830 int rn;
831 #if DEBUG0
832 fprintf (stderr, "cannot change from %s to %s in %s\n",
833 mode_name[from], mode_name[to], class_names[rclass]);
834 #endif
836 /* If the larger mode isn't allowed in any of these registers, we
837 can't allow the change. */
838 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
839 if (class_contents[rclass][0] & (1 << rn))
840 if (! m32c_hard_regno_ok (rn, to))
841 return 1;
843 if (to == QImode)
844 return (class_contents[rclass][0] & 0x1ffa);
846 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
847 && GET_MODE_SIZE (from) > 1)
848 return 0;
849 if (GET_MODE_SIZE (from) > 2) /* all other regs */
850 return 0;
852 return 1;
855 /* Helpers for the rest of the file. */
856 /* TRUE if the rtx is a REG rtx for the given register. */
857 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
858 && REGNO (rtx) == regno)
859 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
860 base register in address calculations (hence the "strict"
861 argument). */
862 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
863 && (REGNO (rtx) == AP_REGNO \
864 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
866 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
868 /* Implements matching for constraints (see next function too). 'S' is
869 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
870 call return values. */
871 bool
872 m32c_matches_constraint_p (rtx value, int constraint)
874 encode_pattern (value);
876 switch (constraint) {
877 case CONSTRAINT_SF:
878 return (far_addr_space_p (value)
879 && ((RTX_IS ("mr")
880 && A0_OR_PSEUDO (patternr[1])
881 && GET_MODE (patternr[1]) == SImode)
882 || (RTX_IS ("m+^Sri")
883 && A0_OR_PSEUDO (patternr[4])
884 && GET_MODE (patternr[4]) == HImode)
885 || (RTX_IS ("m+^Srs")
886 && A0_OR_PSEUDO (patternr[4])
887 && GET_MODE (patternr[4]) == HImode)
888 || (RTX_IS ("m+^S+ris")
889 && A0_OR_PSEUDO (patternr[5])
890 && GET_MODE (patternr[5]) == HImode)
891 || RTX_IS ("ms")));
892 case CONSTRAINT_Sd:
894 /* This is the common "src/dest" address */
895 rtx r;
896 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
897 return true;
898 if (RTX_IS ("ms") || RTX_IS ("m+si"))
899 return true;
900 if (RTX_IS ("m++rii"))
902 if (REGNO (patternr[3]) == FB_REGNO
903 && INTVAL (patternr[4]) == 0)
904 return true;
906 if (RTX_IS ("mr"))
907 r = patternr[1];
908 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
909 r = patternr[2];
910 else
911 return false;
912 if (REGNO (r) == SP_REGNO)
913 return false;
914 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
916 case CONSTRAINT_Sa:
918 rtx r;
919 if (RTX_IS ("mr"))
920 r = patternr[1];
921 else if (RTX_IS ("m+ri"))
922 r = patternr[2];
923 else
924 return false;
925 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
927 case CONSTRAINT_Si:
928 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
929 case CONSTRAINT_Ss:
930 return ((RTX_IS ("mr")
931 && (IS_REG (patternr[1], SP_REGNO)))
932 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
933 case CONSTRAINT_Sf:
934 return ((RTX_IS ("mr")
935 && (IS_REG (patternr[1], FB_REGNO)))
936 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
937 case CONSTRAINT_Sb:
938 return ((RTX_IS ("mr")
939 && (IS_REG (patternr[1], SB_REGNO)))
940 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
941 case CONSTRAINT_Sp:
942 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
943 return (RTX_IS ("mi")
944 && !(INTVAL (patternr[1]) & ~0x1fff));
945 case CONSTRAINT_S1:
946 return r1h_operand (value, QImode);
947 case CONSTRAINT_Rpa:
948 return GET_CODE (value) == PARALLEL;
949 default:
950 return false;
954 /* STACK AND CALLING */
956 /* Frame Layout */
958 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
959 (yes, THREE bytes) onto the stack for the return address, but we
960 don't support pointers bigger than 16 bits on those chips. This
961 will likely wreak havoc with exception unwinding. FIXME. */
963 m32c_return_addr_rtx (int count)
965 machine_mode mode;
966 int offset;
967 rtx ra_mem;
969 if (count)
970 return NULL_RTX;
971 /* we want 2[$fb] */
973 if (TARGET_A24)
975 /* It's four bytes */
976 mode = PSImode;
977 offset = 4;
979 else
981 /* FIXME: it's really 3 bytes */
982 mode = HImode;
983 offset = 2;
986 ra_mem =
987 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
988 offset));
989 return copy_to_mode_reg (mode, ra_mem);
992 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
994 m32c_incoming_return_addr_rtx (void)
996 /* we want [sp] */
997 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1000 /* Exception Handling Support */
1002 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1003 pointers. */
1005 m32c_eh_return_data_regno (int n)
1007 switch (n)
1009 case 0:
1010 return A0_REGNO;
1011 case 1:
1012 if (TARGET_A16)
1013 return R3_REGNO;
1014 else
1015 return R1_REGNO;
1016 default:
1017 return INVALID_REGNUM;
1021 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1022 m32c_emit_eh_epilogue. */
1024 m32c_eh_return_stackadj_rtx (void)
1026 if (!cfun->machine->eh_stack_adjust)
1028 rtx sa;
1030 sa = gen_rtx_REG (Pmode, R0_REGNO);
1031 cfun->machine->eh_stack_adjust = sa;
1033 return cfun->machine->eh_stack_adjust;
1036 /* Registers That Address the Stack Frame */
1038 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1039 the original spec called for dwarf numbers to vary with register
1040 width as well, for example, r0l, r0, and r2r0 would each have
1041 different dwarf numbers. GCC doesn't support this, and we don't do
1042 it, and gdb seems to like it this way anyway. */
1043 unsigned int
1044 m32c_dwarf_frame_regnum (int n)
1046 switch (n)
1048 case R0_REGNO:
1049 return 5;
1050 case R1_REGNO:
1051 return 6;
1052 case R2_REGNO:
1053 return 7;
1054 case R3_REGNO:
1055 return 8;
1056 case A0_REGNO:
1057 return 9;
1058 case A1_REGNO:
1059 return 10;
1060 case FB_REGNO:
1061 return 11;
1062 case SB_REGNO:
1063 return 19;
1065 case SP_REGNO:
1066 return 12;
1067 case PC_REGNO:
1068 return 13;
1069 default:
1070 return DWARF_FRAME_REGISTERS + 1;
1074 /* The frame looks like this:
1076 ap -> +------------------------------
1077 | Return address (3 or 4 bytes)
1078 | Saved FB (2 or 4 bytes)
1079 fb -> +------------------------------
1080 | local vars
1081 | register saves fb
1082 | through r0 as needed
1083 sp -> +------------------------------
1086 /* We use this to wrap all emitted insns in the prologue. */
1087 static rtx
1088 F (rtx x)
1090 RTX_FRAME_RELATED_P (x) = 1;
1091 return x;
1094 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1095 how much the stack pointer moves for each, for each cpu family. */
1096 static struct
1098 int reg1;
1099 int bit;
1100 int a16_bytes;
1101 int a24_bytes;
1102 } pushm_info[] =
1104 /* These are in reverse push (nearest-to-sp) order. */
1105 { R0_REGNO, 0x80, 2, 2 },
1106 { R1_REGNO, 0x40, 2, 2 },
1107 { R2_REGNO, 0x20, 2, 2 },
1108 { R3_REGNO, 0x10, 2, 2 },
1109 { A0_REGNO, 0x08, 2, 4 },
1110 { A1_REGNO, 0x04, 2, 4 },
1111 { SB_REGNO, 0x02, 2, 4 },
1112 { FB_REGNO, 0x01, 2, 4 }
1115 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1117 /* Returns TRUE if we need to save/restore the given register. We
1118 save everything for exception handlers, so that any register can be
1119 unwound. For interrupt handlers, we save everything if the handler
1120 calls something else (because we don't know what *that* function
1121 might do), but try to be a bit smarter if the handler is a leaf
1122 function. We always save $a0, though, because we use that in the
1123 epilogue to copy $fb to $sp. */
1124 static int
1125 need_to_save (int regno)
1127 if (fixed_regs[regno])
1128 return 0;
1129 if (crtl->calls_eh_return)
1130 return 1;
1131 if (regno == FP_REGNO)
1132 return 0;
1133 if (cfun->machine->is_interrupt
1134 && (!cfun->machine->is_leaf
1135 || (regno == A0_REGNO
1136 && m32c_function_needs_enter ())
1138 return 1;
1139 if (df_regs_ever_live_p (regno)
1140 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1141 return 1;
1142 return 0;
1145 /* This function contains all the intelligence about saving and
1146 restoring registers. It always figures out the register save set.
1147 When called with PP_justcount, it merely returns the size of the
1148 save set (for eliminating the frame pointer, for example). When
1149 called with PP_pushm or PP_popm, it emits the appropriate
1150 instructions for saving (pushm) or restoring (popm) the
1151 registers. */
1152 static int
1153 m32c_pushm_popm (Push_Pop_Type ppt)
1155 int reg_mask = 0;
1156 int byte_count = 0, bytes;
1157 int i;
1158 rtx dwarf_set[PUSHM_N];
1159 int n_dwarfs = 0;
1160 int nosave_mask = 0;
1162 if (crtl->return_rtx
1163 && GET_CODE (crtl->return_rtx) == PARALLEL
1164 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1166 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1167 rtx rv = XEXP (exp, 0);
1168 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1170 if (rv_bytes > 2)
1171 nosave_mask |= 0x20; /* PSI, SI */
1172 else
1173 nosave_mask |= 0xf0; /* DF */
1174 if (rv_bytes > 4)
1175 nosave_mask |= 0x50; /* DI */
1178 for (i = 0; i < (int) PUSHM_N; i++)
1180 /* Skip if neither register needs saving. */
1181 if (!need_to_save (pushm_info[i].reg1))
1182 continue;
1184 if (pushm_info[i].bit & nosave_mask)
1185 continue;
1187 reg_mask |= pushm_info[i].bit;
1188 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1190 if (ppt == PP_pushm)
1192 machine_mode mode = (bytes == 2) ? HImode : SImode;
1193 rtx addr;
1195 /* Always use stack_pointer_rtx instead of calling
1196 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1197 that there is a single rtx representing the stack pointer,
1198 namely stack_pointer_rtx, and uses == to recognize it. */
1199 addr = stack_pointer_rtx;
1201 if (byte_count != 0)
1202 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1204 dwarf_set[n_dwarfs++] =
1205 gen_rtx_SET (VOIDmode,
1206 gen_rtx_MEM (mode, addr),
1207 gen_rtx_REG (mode, pushm_info[i].reg1));
1208 F (dwarf_set[n_dwarfs - 1]);
1211 byte_count += bytes;
1214 if (cfun->machine->is_interrupt)
1216 cfun->machine->intr_pushm = reg_mask & 0xfe;
1217 reg_mask = 0;
1218 byte_count = 0;
1221 if (cfun->machine->is_interrupt)
1222 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1223 if (need_to_save (i))
1225 byte_count += 2;
1226 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1229 if (ppt == PP_pushm && byte_count)
1231 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1232 rtx pushm;
1234 if (reg_mask)
1236 XVECEXP (note, 0, 0)
1237 = gen_rtx_SET (VOIDmode,
1238 stack_pointer_rtx,
1239 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1240 stack_pointer_rtx,
1241 GEN_INT (-byte_count)));
1242 F (XVECEXP (note, 0, 0));
1244 for (i = 0; i < n_dwarfs; i++)
1245 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1247 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1249 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1252 if (cfun->machine->is_interrupt)
1253 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1254 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1256 if (TARGET_A16)
1257 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1258 else
1259 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1260 F (pushm);
1263 if (ppt == PP_popm && byte_count)
1265 if (cfun->machine->is_interrupt)
1266 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1267 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1269 if (TARGET_A16)
1270 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1271 else
1272 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1274 if (reg_mask)
1275 emit_insn (gen_popm (GEN_INT (reg_mask)));
1278 return byte_count;
1281 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1282 diagrams our call frame. */
1284 m32c_initial_elimination_offset (int from, int to)
1286 int ofs = 0;
1288 if (from == AP_REGNO)
1290 if (TARGET_A16)
1291 ofs += 5;
1292 else
1293 ofs += 8;
1296 if (to == SP_REGNO)
1298 ofs += m32c_pushm_popm (PP_justcount);
1299 ofs += get_frame_size ();
1302 /* Account for push rounding. */
1303 if (TARGET_A24)
1304 ofs = (ofs + 1) & ~1;
1305 #if DEBUG0
1306 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1307 to, ofs);
1308 #endif
1309 return ofs;
1312 /* Passing Function Arguments on the Stack */
1314 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1315 M32C has word stacks. */
1316 unsigned int
1317 m32c_push_rounding (int n)
1319 if (TARGET_R8C || TARGET_M16C)
1320 return n;
1321 return (n + 1) & ~1;
1324 /* Passing Arguments in Registers */
1326 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1327 registers, partly on stack. If our function returns a struct, a
1328 pointer to a buffer for it is at the top of the stack (last thing
1329 pushed). The first few real arguments may be in registers as
1330 follows:
1332 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1333 arg2 in r2 if it's HI (else pushed on stack)
1334 rest on stack
1335 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1336 rest on stack
1338 Structs are not passed in registers, even if they fit. Only
1339 integer and pointer types are passed in registers.
1341 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1342 r2 if it fits. */
1343 #undef TARGET_FUNCTION_ARG
1344 #define TARGET_FUNCTION_ARG m32c_function_arg
1345 static rtx
1346 m32c_function_arg (cumulative_args_t ca_v,
1347 machine_mode mode, const_tree type, bool named)
1349 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1351 /* Can return a reg, parallel, or 0 for stack */
1352 rtx rv = NULL_RTX;
1353 #if DEBUG0
1354 fprintf (stderr, "func_arg %d (%s, %d)\n",
1355 ca->parm_num, mode_name[mode], named);
1356 debug_tree (type);
1357 #endif
1359 if (mode == VOIDmode)
1360 return GEN_INT (0);
1362 if (ca->force_mem || !named)
1364 #if DEBUG0
1365 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1366 named);
1367 #endif
1368 return NULL_RTX;
1371 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1372 return NULL_RTX;
1374 if (type && AGGREGATE_TYPE_P (type))
1375 return NULL_RTX;
1377 switch (ca->parm_num)
1379 case 1:
1380 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1381 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1382 break;
1384 case 2:
1385 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1386 rv = gen_rtx_REG (mode, R2_REGNO);
1387 break;
1390 #if DEBUG0
1391 debug_rtx (rv);
1392 #endif
1393 return rv;
1396 #undef TARGET_PASS_BY_REFERENCE
1397 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1398 static bool
1399 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1400 machine_mode mode ATTRIBUTE_UNUSED,
1401 const_tree type ATTRIBUTE_UNUSED,
1402 bool named ATTRIBUTE_UNUSED)
1404 return 0;
1407 /* Implements INIT_CUMULATIVE_ARGS. */
1408 void
1409 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1410 tree fntype,
1411 rtx libname ATTRIBUTE_UNUSED,
1412 tree fndecl,
1413 int n_named_args ATTRIBUTE_UNUSED)
1415 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1416 ca->force_mem = 1;
1417 else
1418 ca->force_mem = 0;
1419 ca->parm_num = 1;
1422 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1423 functions returning structures, so we always reset that. Otherwise,
1424 we only need to know the sequence number of the argument to know what
1425 to do with it. */
1426 #undef TARGET_FUNCTION_ARG_ADVANCE
1427 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1428 static void
1429 m32c_function_arg_advance (cumulative_args_t ca_v,
1430 machine_mode mode ATTRIBUTE_UNUSED,
1431 const_tree type ATTRIBUTE_UNUSED,
1432 bool named ATTRIBUTE_UNUSED)
1434 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1436 if (ca->force_mem)
1437 ca->force_mem = 0;
1438 else
1439 ca->parm_num++;
1442 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1443 #undef TARGET_FUNCTION_ARG_BOUNDARY
1444 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1445 static unsigned int
1446 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1447 const_tree type ATTRIBUTE_UNUSED)
1449 return (TARGET_A16 ? 8 : 16);
1452 /* Implements FUNCTION_ARG_REGNO_P. */
1454 m32c_function_arg_regno_p (int r)
1456 if (TARGET_A24)
1457 return (r == R0_REGNO);
1458 return (r == R1_REGNO || r == R2_REGNO);
1461 /* HImode and PSImode are the two "native" modes as far as GCC is
1462 concerned, but the chips also support a 32-bit mode which is used
1463 for some opcodes in R8C/M16C and for reset vectors and such. */
1464 #undef TARGET_VALID_POINTER_MODE
1465 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1466 static bool
1467 m32c_valid_pointer_mode (machine_mode mode)
1469 if (mode == HImode
1470 || mode == PSImode
1471 || mode == SImode
1473 return 1;
1474 return 0;
1477 /* How Scalar Function Values Are Returned */
1479 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1480 combination of registers starting there (r2r0 for longs, r3r1r2r0
1481 for long long, r3r2r1r0 for doubles), except that that ABI
1482 currently doesn't work because it ends up using all available
1483 general registers and gcc often can't compile it. So, instead, we
1484 return anything bigger than 16 bits in "mem0" (effectively, a
1485 memory location). */
1487 #undef TARGET_LIBCALL_VALUE
1488 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1490 static rtx
1491 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1493 /* return reg or parallel */
1494 #if 0
1495 /* FIXME: GCC has difficulty returning large values in registers,
1496 because that ties up most of the general registers and gives the
1497 register allocator little to work with. Until we can resolve
1498 this, large values are returned in memory. */
1499 if (mode == DFmode)
1501 rtx rv;
1503 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1504 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1505 gen_rtx_REG (HImode,
1506 R0_REGNO),
1507 GEN_INT (0));
1508 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1509 gen_rtx_REG (HImode,
1510 R1_REGNO),
1511 GEN_INT (2));
1512 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1513 gen_rtx_REG (HImode,
1514 R2_REGNO),
1515 GEN_INT (4));
1516 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1517 gen_rtx_REG (HImode,
1518 R3_REGNO),
1519 GEN_INT (6));
1520 return rv;
1523 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1525 rtx rv;
1527 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1528 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1529 gen_rtx_REG (mode,
1530 R0_REGNO),
1531 GEN_INT (0));
1532 return rv;
1534 #endif
1536 if (GET_MODE_SIZE (mode) > 2)
1537 return gen_rtx_REG (mode, MEM0_REGNO);
1538 return gen_rtx_REG (mode, R0_REGNO);
1541 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1542 conventions. */
1544 #undef TARGET_FUNCTION_VALUE
1545 #define TARGET_FUNCTION_VALUE m32c_function_value
1547 static rtx
1548 m32c_function_value (const_tree valtype,
1549 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1550 bool outgoing ATTRIBUTE_UNUSED)
1552 /* return reg or parallel */
1553 const machine_mode mode = TYPE_MODE (valtype);
1554 return m32c_libcall_value (mode, NULL_RTX);
1557 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1559 #undef TARGET_FUNCTION_VALUE_REGNO_P
1560 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1562 static bool
1563 m32c_function_value_regno_p (const unsigned int regno)
1565 return (regno == R0_REGNO || regno == MEM0_REGNO);
1568 /* How Large Values Are Returned */
1570 /* We return structures by pushing the address on the stack, even if
1571 we use registers for the first few "real" arguments. */
1572 #undef TARGET_STRUCT_VALUE_RTX
1573 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1574 static rtx
1575 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1576 int incoming ATTRIBUTE_UNUSED)
1578 return 0;
1581 /* Function Entry and Exit */
1583 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1585 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1587 if (cfun->machine->is_interrupt)
1588 return 1;
1589 return 0;
1592 /* Implementing the Varargs Macros */
1594 #undef TARGET_STRICT_ARGUMENT_NAMING
1595 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1596 static bool
1597 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1599 return 1;
1602 /* Trampolines for Nested Functions */
1605 m16c:
1606 1 0000 75C43412 mov.w #0x1234,a0
1607 2 0004 FC000000 jmp.a label
1609 m32c:
1610 1 0000 BC563412 mov.l:s #0x123456,a0
1611 2 0004 CC000000 jmp.a label
1614 /* Implements TRAMPOLINE_SIZE. */
1616 m32c_trampoline_size (void)
1618 /* Allocate extra space so we can avoid the messy shifts when we
1619 initialize the trampoline; we just write past the end of the
1620 opcode. */
1621 return TARGET_A16 ? 8 : 10;
1624 /* Implements TRAMPOLINE_ALIGNMENT. */
1626 m32c_trampoline_alignment (void)
1628 return 2;
1631 /* Implements TARGET_TRAMPOLINE_INIT. */
1633 #undef TARGET_TRAMPOLINE_INIT
1634 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1635 static void
1636 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1638 rtx function = XEXP (DECL_RTL (fndecl), 0);
1640 #define A0(m,i) adjust_address (m_tramp, m, i)
1641 if (TARGET_A16)
1643 /* Note: we subtract a "word" because the moves want signed
1644 constants, not unsigned constants. */
1645 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1646 emit_move_insn (A0 (HImode, 2), chainval);
1647 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1648 /* We use 16-bit addresses here, but store the zero to turn it
1649 into a 24-bit offset. */
1650 emit_move_insn (A0 (HImode, 5), function);
1651 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1653 else
1655 /* Note that the PSI moves actually write 4 bytes. Make sure we
1656 write stuff out in the right order, and leave room for the
1657 extra byte at the end. */
1658 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1659 emit_move_insn (A0 (PSImode, 1), chainval);
1660 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1661 emit_move_insn (A0 (PSImode, 5), function);
1663 #undef A0
1666 /* Addressing Modes */
1668 /* The r8c/m32c family supports a wide range of non-orthogonal
1669 addressing modes, including the ability to double-indirect on *some*
1670 of them. Not all insns support all modes, either, but we rely on
1671 predicates and constraints to deal with that. */
1672 #undef TARGET_LEGITIMATE_ADDRESS_P
1673 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1674 bool
1675 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1677 int mode_adjust;
1678 if (CONSTANT_P (x))
1679 return 1;
1681 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1682 return 0;
1683 if (TARGET_A24 && GET_MODE (x) != PSImode)
1684 return 0;
1686 /* Wide references to memory will be split after reload, so we must
1687 ensure that all parts of such splits remain legitimate
1688 addresses. */
1689 mode_adjust = GET_MODE_SIZE (mode) - 1;
1691 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1692 if (GET_CODE (x) == PRE_DEC
1693 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1695 return (GET_CODE (XEXP (x, 0)) == REG
1696 && REGNO (XEXP (x, 0)) == SP_REGNO);
1699 #if 0
1700 /* This is the double indirection detection, but it currently
1701 doesn't work as cleanly as this code implies, so until we've had
1702 a chance to debug it, leave it disabled. */
1703 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1705 #if DEBUG_DOUBLE
1706 fprintf (stderr, "double indirect\n");
1707 #endif
1708 x = XEXP (x, 0);
1710 #endif
1712 encode_pattern (x);
1713 if (RTX_IS ("r"))
1715 /* Most indexable registers can be used without displacements,
1716 although some of them will be emitted with an explicit zero
1717 to please the assembler. */
1718 switch (REGNO (patternr[0]))
1720 case A1_REGNO:
1721 case SB_REGNO:
1722 case FB_REGNO:
1723 case SP_REGNO:
1724 if (TARGET_A16 && GET_MODE (x) == SImode)
1725 return 0;
1726 case A0_REGNO:
1727 return 1;
1729 default:
1730 if (IS_PSEUDO (patternr[0], strict))
1731 return 1;
1732 return 0;
1736 if (TARGET_A16 && GET_MODE (x) == SImode)
1737 return 0;
1739 if (RTX_IS ("+ri"))
1741 /* This is more interesting, because different base registers
1742 allow for different displacements - both range and signedness
1743 - and it differs from chip series to chip series too. */
1744 int rn = REGNO (patternr[1]);
1745 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1746 switch (rn)
1748 case A0_REGNO:
1749 case A1_REGNO:
1750 case SB_REGNO:
1751 /* The syntax only allows positive offsets, but when the
1752 offsets span the entire memory range, we can simulate
1753 negative offsets by wrapping. */
1754 if (TARGET_A16)
1755 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1756 if (rn == SB_REGNO)
1757 return (offs >= 0 && offs <= 65535 - mode_adjust);
1758 /* A0 or A1 */
1759 return (offs >= -16777216 && offs <= 16777215);
1761 case FB_REGNO:
1762 if (TARGET_A16)
1763 return (offs >= -128 && offs <= 127 - mode_adjust);
1764 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1766 case SP_REGNO:
1767 return (offs >= -128 && offs <= 127 - mode_adjust);
1769 default:
1770 if (IS_PSEUDO (patternr[1], strict))
1771 return 1;
1772 return 0;
1775 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1777 rtx reg = patternr[1];
1779 /* We don't know where the symbol is, so only allow base
1780 registers which support displacements spanning the whole
1781 address range. */
1782 switch (REGNO (reg))
1784 case A0_REGNO:
1785 case A1_REGNO:
1786 /* $sb needs a secondary reload, but since it's involved in
1787 memory address reloads too, we don't deal with it very
1788 well. */
1789 /* case SB_REGNO: */
1790 return 1;
1791 default:
1792 if (IS_PSEUDO (reg, strict))
1793 return 1;
1794 return 0;
1797 return 0;
1800 /* Implements REG_OK_FOR_BASE_P. */
1802 m32c_reg_ok_for_base_p (rtx x, int strict)
1804 if (GET_CODE (x) != REG)
1805 return 0;
1806 switch (REGNO (x))
1808 case A0_REGNO:
1809 case A1_REGNO:
1810 case SB_REGNO:
1811 case FB_REGNO:
1812 case SP_REGNO:
1813 return 1;
1814 default:
1815 if (IS_PSEUDO (x, strict))
1816 return 1;
1817 return 0;
1821 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1822 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1823 like this:
1824 EB 4B FF mova -128[$fb],$a0
1825 D8 0C FF FF mov.w:Q #0,-1[$a0]
1827 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1828 displacements:
1829 7B F4 stc $fb,$a0
1830 77 54 00 01 sub #256,$a0
1831 D8 08 01 mov.w:Q #0,1[$a0]
1833 If we don't offset (i.e. offset by zero), we end up with:
1834 7B F4 stc $fb,$a0
1835 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1837 We have to subtract *something* so that we have a PLUS rtx to mark
1838 that we've done this reload. The -128 offset will never result in
1839 an 8-bit aN offset, and the payoff for the second case is five
1840 loads *if* those loads are within 256 bytes of the other end of the
1841 frame, so the third case seems best. Note that we subtract the
1842 zero, but detect that in the addhi3 pattern. */
1844 #define BIG_FB_ADJ 0
1846 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1847 worry about is frame base offsets, as $fb has a limited
1848 displacement range. We deal with this by attempting to reload $fb
1849 itself into an address register; that seems to result in the best
1850 code. */
1851 #undef TARGET_LEGITIMIZE_ADDRESS
1852 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1853 static rtx
1854 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1855 machine_mode mode)
1857 #if DEBUG0
1858 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1859 debug_rtx (x);
1860 fprintf (stderr, "\n");
1861 #endif
1863 if (GET_CODE (x) == PLUS
1864 && GET_CODE (XEXP (x, 0)) == REG
1865 && REGNO (XEXP (x, 0)) == FB_REGNO
1866 && GET_CODE (XEXP (x, 1)) == CONST_INT
1867 && (INTVAL (XEXP (x, 1)) < -128
1868 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1870 /* reload FB to A_REGS */
1871 rtx temp = gen_reg_rtx (Pmode);
1872 x = copy_rtx (x);
1873 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1874 XEXP (x, 0) = temp;
1877 return x;
1880 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1882 m32c_legitimize_reload_address (rtx * x,
1883 machine_mode mode,
1884 int opnum,
1885 int type, int ind_levels ATTRIBUTE_UNUSED)
1887 #if DEBUG0
1888 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1889 mode_name[mode]);
1890 debug_rtx (*x);
1891 #endif
1893 /* At one point, this function tried to get $fb copied to an address
1894 register, which in theory would maximize sharing, but gcc was
1895 *also* still trying to reload the whole address, and we'd run out
1896 of address registers. So we let gcc do the naive (but safe)
1897 reload instead, when the above function doesn't handle it for
1900 The code below is a second attempt at the above. */
1902 if (GET_CODE (*x) == PLUS
1903 && GET_CODE (XEXP (*x, 0)) == REG
1904 && REGNO (XEXP (*x, 0)) == FB_REGNO
1905 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1906 && (INTVAL (XEXP (*x, 1)) < -128
1907 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1909 rtx sum;
1910 int offset = INTVAL (XEXP (*x, 1));
1911 int adjustment = -BIG_FB_ADJ;
1913 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1914 GEN_INT (adjustment));
1915 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1916 if (type == RELOAD_OTHER)
1917 type = RELOAD_FOR_OTHER_ADDRESS;
1918 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1919 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1920 (enum reload_type) type);
1921 return 1;
1924 if (GET_CODE (*x) == PLUS
1925 && GET_CODE (XEXP (*x, 0)) == PLUS
1926 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1927 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1928 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1929 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1932 if (type == RELOAD_OTHER)
1933 type = RELOAD_FOR_OTHER_ADDRESS;
1934 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1935 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1936 (enum reload_type) type);
1937 return 1;
1940 return 0;
1943 /* Return the appropriate mode for a named address pointer. */
1944 #undef TARGET_ADDR_SPACE_POINTER_MODE
1945 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1946 static machine_mode
1947 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1949 switch (addrspace)
1951 case ADDR_SPACE_GENERIC:
1952 return TARGET_A24 ? PSImode : HImode;
1953 case ADDR_SPACE_FAR:
1954 return SImode;
1955 default:
1956 gcc_unreachable ();
1960 /* Return the appropriate mode for a named address address. */
1961 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1962 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1963 static machine_mode
1964 m32c_addr_space_address_mode (addr_space_t addrspace)
1966 switch (addrspace)
1968 case ADDR_SPACE_GENERIC:
1969 return TARGET_A24 ? PSImode : HImode;
1970 case ADDR_SPACE_FAR:
1971 return SImode;
1972 default:
1973 gcc_unreachable ();
1977 /* Like m32c_legitimate_address_p, except with named addresses. */
1978 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1979 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1980 m32c_addr_space_legitimate_address_p
1981 static bool
1982 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1983 bool strict, addr_space_t as)
1985 if (as == ADDR_SPACE_FAR)
1987 if (TARGET_A24)
1988 return 0;
1989 encode_pattern (x);
1990 if (RTX_IS ("r"))
1992 if (GET_MODE (x) != SImode)
1993 return 0;
1994 switch (REGNO (patternr[0]))
1996 case A0_REGNO:
1997 return 1;
1999 default:
2000 if (IS_PSEUDO (patternr[0], strict))
2001 return 1;
2002 return 0;
2005 if (RTX_IS ("+^Sri"))
2007 int rn = REGNO (patternr[3]);
2008 HOST_WIDE_INT offs = INTVAL (patternr[4]);
2009 if (GET_MODE (patternr[3]) != HImode)
2010 return 0;
2011 switch (rn)
2013 case A0_REGNO:
2014 return (offs >= 0 && offs <= 0xfffff);
2016 default:
2017 if (IS_PSEUDO (patternr[3], strict))
2018 return 1;
2019 return 0;
2022 if (RTX_IS ("+^Srs"))
2024 int rn = REGNO (patternr[3]);
2025 if (GET_MODE (patternr[3]) != HImode)
2026 return 0;
2027 switch (rn)
2029 case A0_REGNO:
2030 return 1;
2032 default:
2033 if (IS_PSEUDO (patternr[3], strict))
2034 return 1;
2035 return 0;
2038 if (RTX_IS ("+^S+ris"))
2040 int rn = REGNO (patternr[4]);
2041 if (GET_MODE (patternr[4]) != HImode)
2042 return 0;
2043 switch (rn)
2045 case A0_REGNO:
2046 return 1;
2048 default:
2049 if (IS_PSEUDO (patternr[4], strict))
2050 return 1;
2051 return 0;
2054 if (RTX_IS ("s"))
2056 return 1;
2058 return 0;
2061 else if (as != ADDR_SPACE_GENERIC)
2062 gcc_unreachable ();
2064 return m32c_legitimate_address_p (mode, x, strict);
2067 /* Like m32c_legitimate_address, except with named address support. */
2068 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2069 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2070 static rtx
2071 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2072 addr_space_t as)
2074 if (as != ADDR_SPACE_GENERIC)
2076 #if DEBUG0
2077 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2078 debug_rtx (x);
2079 fprintf (stderr, "\n");
2080 #endif
2082 if (GET_CODE (x) != REG)
2084 x = force_reg (SImode, x);
2086 return x;
2089 return m32c_legitimize_address (x, oldx, mode);
2092 /* Determine if one named address space is a subset of another. */
2093 #undef TARGET_ADDR_SPACE_SUBSET_P
2094 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2095 static bool
2096 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2098 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2099 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2101 if (subset == superset)
2102 return true;
2104 else
2105 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2108 #undef TARGET_ADDR_SPACE_CONVERT
2109 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2110 /* Convert from one address space to another. */
2111 static rtx
2112 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2114 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2115 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2116 rtx result;
2118 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2119 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2121 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2123 /* This is unpredictable, as we're truncating off usable address
2124 bits. */
2126 result = gen_reg_rtx (HImode);
2127 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2128 return result;
2130 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2132 /* This always works. */
2133 result = gen_reg_rtx (SImode);
2134 emit_insn (gen_zero_extendhisi2 (result, op));
2135 return result;
2137 else
2138 gcc_unreachable ();
2141 /* Condition Code Status */
2143 #undef TARGET_FIXED_CONDITION_CODE_REGS
2144 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2145 static bool
2146 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2148 *p1 = FLG_REGNO;
2149 *p2 = INVALID_REGNUM;
2150 return true;
2153 /* Describing Relative Costs of Operations */
2155 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2156 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2157 no opcodes to do that). We also discourage use of mem* registers
2158 since they're really memory. */
2160 #undef TARGET_REGISTER_MOVE_COST
2161 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2163 static int
2164 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2165 reg_class_t to)
2167 int cost = COSTS_N_INSNS (3);
2168 HARD_REG_SET cc;
2170 /* FIXME: pick real values, but not 2 for now. */
2171 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2172 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2174 if (mode == QImode
2175 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2177 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2178 cost = COSTS_N_INSNS (1000);
2179 else
2180 cost = COSTS_N_INSNS (80);
2183 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2184 cost = COSTS_N_INSNS (1000);
2186 if (reg_classes_intersect_p (from, CR_REGS))
2187 cost += COSTS_N_INSNS (5);
2189 if (reg_classes_intersect_p (to, CR_REGS))
2190 cost += COSTS_N_INSNS (5);
2192 if (from == MEM_REGS || to == MEM_REGS)
2193 cost += COSTS_N_INSNS (50);
2194 else if (reg_classes_intersect_p (from, MEM_REGS)
2195 || reg_classes_intersect_p (to, MEM_REGS))
2196 cost += COSTS_N_INSNS (10);
2198 #if DEBUG0
2199 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2200 mode_name[mode], class_names[(int) from], class_names[(int) to],
2201 cost);
2202 #endif
2203 return cost;
2206 /* Implements TARGET_MEMORY_MOVE_COST. */
2208 #undef TARGET_MEMORY_MOVE_COST
2209 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2211 static int
2212 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2213 reg_class_t rclass ATTRIBUTE_UNUSED,
2214 bool in ATTRIBUTE_UNUSED)
2216 /* FIXME: pick real values. */
2217 return COSTS_N_INSNS (10);
2220 /* Here we try to describe when we use multiple opcodes for one RTX so
2221 that gcc knows when to use them. */
2222 #undef TARGET_RTX_COSTS
2223 #define TARGET_RTX_COSTS m32c_rtx_costs
2224 static bool
2225 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2226 int *total, bool speed ATTRIBUTE_UNUSED)
2228 switch (code)
2230 case REG:
2231 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2232 *total += COSTS_N_INSNS (500);
2233 else
2234 *total += COSTS_N_INSNS (1);
2235 return true;
2237 case ASHIFT:
2238 case LSHIFTRT:
2239 case ASHIFTRT:
2240 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2242 /* mov.b r1l, r1h */
2243 *total += COSTS_N_INSNS (1);
2244 return true;
2246 if (INTVAL (XEXP (x, 1)) > 8
2247 || INTVAL (XEXP (x, 1)) < -8)
2249 /* mov.b #N, r1l */
2250 /* mov.b r1l, r1h */
2251 *total += COSTS_N_INSNS (2);
2252 return true;
2254 return true;
2256 case LE:
2257 case LEU:
2258 case LT:
2259 case LTU:
2260 case GT:
2261 case GTU:
2262 case GE:
2263 case GEU:
2264 case NE:
2265 case EQ:
2266 if (outer_code == SET)
2268 *total += COSTS_N_INSNS (2);
2269 return true;
2271 break;
2273 case ZERO_EXTRACT:
2275 rtx dest = XEXP (x, 0);
2276 rtx addr = XEXP (dest, 0);
2277 switch (GET_CODE (addr))
2279 case CONST_INT:
2280 *total += COSTS_N_INSNS (1);
2281 break;
2282 case SYMBOL_REF:
2283 *total += COSTS_N_INSNS (3);
2284 break;
2285 default:
2286 *total += COSTS_N_INSNS (2);
2287 break;
2289 return true;
2291 break;
2293 default:
2294 /* Reasonable default. */
2295 if (TARGET_A16 && GET_MODE(x) == SImode)
2296 *total += COSTS_N_INSNS (2);
2297 break;
2299 return false;
2302 #undef TARGET_ADDRESS_COST
2303 #define TARGET_ADDRESS_COST m32c_address_cost
2304 static int
2305 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2306 addr_space_t as ATTRIBUTE_UNUSED,
2307 bool speed ATTRIBUTE_UNUSED)
2309 int i;
2310 /* fprintf(stderr, "\naddress_cost\n");
2311 debug_rtx(addr);*/
2312 switch (GET_CODE (addr))
2314 case CONST_INT:
2315 i = INTVAL (addr);
2316 if (i == 0)
2317 return COSTS_N_INSNS(1);
2318 if (0 < i && i <= 255)
2319 return COSTS_N_INSNS(2);
2320 if (0 < i && i <= 65535)
2321 return COSTS_N_INSNS(3);
2322 return COSTS_N_INSNS(4);
2323 case SYMBOL_REF:
2324 return COSTS_N_INSNS(4);
2325 case REG:
2326 return COSTS_N_INSNS(1);
2327 case PLUS:
2328 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2330 i = INTVAL (XEXP (addr, 1));
2331 if (i == 0)
2332 return COSTS_N_INSNS(1);
2333 if (0 < i && i <= 255)
2334 return COSTS_N_INSNS(2);
2335 if (0 < i && i <= 65535)
2336 return COSTS_N_INSNS(3);
2338 return COSTS_N_INSNS(4);
2339 default:
2340 return 0;
2344 /* Defining the Output Assembler Language */
2346 /* Output of Data */
2348 /* We may have 24 bit sizes, which is the native address size.
2349 Currently unused, but provided for completeness. */
2350 #undef TARGET_ASM_INTEGER
2351 #define TARGET_ASM_INTEGER m32c_asm_integer
2352 static bool
2353 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2355 switch (size)
2357 case 3:
2358 fprintf (asm_out_file, "\t.3byte\t");
2359 output_addr_const (asm_out_file, x);
2360 fputc ('\n', asm_out_file);
2361 return true;
2362 case 4:
2363 if (GET_CODE (x) == SYMBOL_REF)
2365 fprintf (asm_out_file, "\t.long\t");
2366 output_addr_const (asm_out_file, x);
2367 fputc ('\n', asm_out_file);
2368 return true;
2370 break;
2372 return default_assemble_integer (x, size, aligned_p);
2375 /* Output of Assembler Instructions */
2377 /* We use a lookup table because the addressing modes are non-orthogonal. */
2379 static struct
2381 char code;
2382 char const *pattern;
2383 char const *format;
2385 const conversions[] = {
2386 { 0, "r", "0" },
2388 { 0, "mr", "z[1]" },
2389 { 0, "m+ri", "3[2]" },
2390 { 0, "m+rs", "3[2]" },
2391 { 0, "m+^Zrs", "5[4]" },
2392 { 0, "m+^Zri", "5[4]" },
2393 { 0, "m+^Z+ris", "7+6[5]" },
2394 { 0, "m+^Srs", "5[4]" },
2395 { 0, "m+^Sri", "5[4]" },
2396 { 0, "m+^S+ris", "7+6[5]" },
2397 { 0, "m+r+si", "4+5[2]" },
2398 { 0, "ms", "1" },
2399 { 0, "mi", "1" },
2400 { 0, "m+si", "2+3" },
2402 { 0, "mmr", "[z[2]]" },
2403 { 0, "mm+ri", "[4[3]]" },
2404 { 0, "mm+rs", "[4[3]]" },
2405 { 0, "mm+r+si", "[5+6[3]]" },
2406 { 0, "mms", "[[2]]" },
2407 { 0, "mmi", "[[2]]" },
2408 { 0, "mm+si", "[4[3]]" },
2410 { 0, "i", "#0" },
2411 { 0, "s", "#0" },
2412 { 0, "+si", "#1+2" },
2413 { 0, "l", "#0" },
2415 { 'l', "l", "0" },
2416 { 'd', "i", "0" },
2417 { 'd', "s", "0" },
2418 { 'd', "+si", "1+2" },
2419 { 'D', "i", "0" },
2420 { 'D', "s", "0" },
2421 { 'D', "+si", "1+2" },
2422 { 'x', "i", "#0" },
2423 { 'X', "i", "#0" },
2424 { 'm', "i", "#0" },
2425 { 'b', "i", "#0" },
2426 { 'B', "i", "0" },
2427 { 'p', "i", "0" },
2429 { 0, 0, 0 }
2432 /* This is in order according to the bitfield that pushm/popm use. */
2433 static char const *pushm_regs[] = {
2434 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2437 /* Implements TARGET_PRINT_OPERAND. */
2439 #undef TARGET_PRINT_OPERAND
2440 #define TARGET_PRINT_OPERAND m32c_print_operand
2442 static void
2443 m32c_print_operand (FILE * file, rtx x, int code)
2445 int i, j, b;
2446 const char *comma;
2447 HOST_WIDE_INT ival;
2448 int unsigned_const = 0;
2449 int force_sign;
2451 /* Multiplies; constants are converted to sign-extended format but
2452 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2453 need. */
2454 if (code == 'u')
2456 unsigned_const = 2;
2457 code = 0;
2459 if (code == 'U')
2461 unsigned_const = 1;
2462 code = 0;
2464 /* This one is only for debugging; you can put it in a pattern to
2465 force this error. */
2466 if (code == '!')
2468 fprintf (stderr, "dj: unreviewed pattern:");
2469 if (current_output_insn)
2470 debug_rtx (current_output_insn);
2471 gcc_unreachable ();
2473 /* PSImode operations are either .w or .l depending on the target. */
2474 if (code == '&')
2476 if (TARGET_A16)
2477 fprintf (file, "w");
2478 else
2479 fprintf (file, "l");
2480 return;
2482 /* Inverted conditionals. */
2483 if (code == 'C')
2485 switch (GET_CODE (x))
2487 case LE:
2488 fputs ("gt", file);
2489 break;
2490 case LEU:
2491 fputs ("gtu", file);
2492 break;
2493 case LT:
2494 fputs ("ge", file);
2495 break;
2496 case LTU:
2497 fputs ("geu", file);
2498 break;
2499 case GT:
2500 fputs ("le", file);
2501 break;
2502 case GTU:
2503 fputs ("leu", file);
2504 break;
2505 case GE:
2506 fputs ("lt", file);
2507 break;
2508 case GEU:
2509 fputs ("ltu", file);
2510 break;
2511 case NE:
2512 fputs ("eq", file);
2513 break;
2514 case EQ:
2515 fputs ("ne", file);
2516 break;
2517 default:
2518 gcc_unreachable ();
2520 return;
2522 /* Regular conditionals. */
2523 if (code == 'c')
2525 switch (GET_CODE (x))
2527 case LE:
2528 fputs ("le", file);
2529 break;
2530 case LEU:
2531 fputs ("leu", file);
2532 break;
2533 case LT:
2534 fputs ("lt", file);
2535 break;
2536 case LTU:
2537 fputs ("ltu", file);
2538 break;
2539 case GT:
2540 fputs ("gt", file);
2541 break;
2542 case GTU:
2543 fputs ("gtu", file);
2544 break;
2545 case GE:
2546 fputs ("ge", file);
2547 break;
2548 case GEU:
2549 fputs ("geu", file);
2550 break;
2551 case NE:
2552 fputs ("ne", file);
2553 break;
2554 case EQ:
2555 fputs ("eq", file);
2556 break;
2557 default:
2558 gcc_unreachable ();
2560 return;
2562 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2563 operand. */
2564 if (code == 'h' && GET_MODE (x) == SImode)
2566 x = m32c_subreg (HImode, x, SImode, 0);
2567 code = 0;
2569 if (code == 'H' && GET_MODE (x) == SImode)
2571 x = m32c_subreg (HImode, x, SImode, 2);
2572 code = 0;
2574 if (code == 'h' && GET_MODE (x) == HImode)
2576 x = m32c_subreg (QImode, x, HImode, 0);
2577 code = 0;
2579 if (code == 'H' && GET_MODE (x) == HImode)
2581 /* We can't actually represent this as an rtx. Do it here. */
2582 if (GET_CODE (x) == REG)
2584 switch (REGNO (x))
2586 case R0_REGNO:
2587 fputs ("r0h", file);
2588 return;
2589 case R1_REGNO:
2590 fputs ("r1h", file);
2591 return;
2592 default:
2593 gcc_unreachable();
2596 /* This should be a MEM. */
2597 x = m32c_subreg (QImode, x, HImode, 1);
2598 code = 0;
2600 /* This is for BMcond, which always wants word register names. */
2601 if (code == 'h' && GET_MODE (x) == QImode)
2603 if (GET_CODE (x) == REG)
2604 x = gen_rtx_REG (HImode, REGNO (x));
2605 code = 0;
2607 /* 'x' and 'X' need to be ignored for non-immediates. */
2608 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2609 code = 0;
2611 encode_pattern (x);
2612 force_sign = 0;
2613 for (i = 0; conversions[i].pattern; i++)
2614 if (conversions[i].code == code
2615 && streq (conversions[i].pattern, pattern))
2617 for (j = 0; conversions[i].format[j]; j++)
2618 /* backslash quotes the next character in the output pattern. */
2619 if (conversions[i].format[j] == '\\')
2621 fputc (conversions[i].format[j + 1], file);
2622 j++;
2624 /* Digits in the output pattern indicate that the
2625 corresponding RTX is to be output at that point. */
2626 else if (ISDIGIT (conversions[i].format[j]))
2628 rtx r = patternr[conversions[i].format[j] - '0'];
2629 switch (GET_CODE (r))
2631 case REG:
2632 fprintf (file, "%s",
2633 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2634 break;
2635 case CONST_INT:
2636 switch (code)
2638 case 'b':
2639 case 'B':
2641 int v = INTVAL (r);
2642 int i = (int) exact_log2 (v);
2643 if (i == -1)
2644 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2645 if (i == -1)
2646 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2647 /* Bit position. */
2648 fprintf (file, "%d", i);
2650 break;
2651 case 'x':
2652 /* Unsigned byte. */
2653 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2654 INTVAL (r) & 0xff);
2655 break;
2656 case 'X':
2657 /* Unsigned word. */
2658 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2659 INTVAL (r) & 0xffff);
2660 break;
2661 case 'p':
2662 /* pushm and popm encode a register set into a single byte. */
2663 comma = "";
2664 for (b = 7; b >= 0; b--)
2665 if (INTVAL (r) & (1 << b))
2667 fprintf (file, "%s%s", comma, pushm_regs[b]);
2668 comma = ",";
2670 break;
2671 case 'm':
2672 /* "Minus". Output -X */
2673 ival = (-INTVAL (r) & 0xffff);
2674 if (ival & 0x8000)
2675 ival = ival - 0x10000;
2676 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2677 break;
2678 default:
2679 ival = INTVAL (r);
2680 if (conversions[i].format[j + 1] == '[' && ival < 0)
2682 /* We can simulate negative displacements by
2683 taking advantage of address space
2684 wrapping when the offset can span the
2685 entire address range. */
2686 rtx base =
2687 patternr[conversions[i].format[j + 2] - '0'];
2688 if (GET_CODE (base) == REG)
2689 switch (REGNO (base))
2691 case A0_REGNO:
2692 case A1_REGNO:
2693 if (TARGET_A24)
2694 ival = 0x1000000 + ival;
2695 else
2696 ival = 0x10000 + ival;
2697 break;
2698 case SB_REGNO:
2699 if (TARGET_A16)
2700 ival = 0x10000 + ival;
2701 break;
2704 else if (code == 'd' && ival < 0 && j == 0)
2705 /* The "mova" opcode is used to do addition by
2706 computing displacements, but again, we need
2707 displacements to be unsigned *if* they're
2708 the only component of the displacement
2709 (i.e. no "symbol-4" type displacement). */
2710 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2712 if (conversions[i].format[j] == '0')
2714 /* More conversions to unsigned. */
2715 if (unsigned_const == 2)
2716 ival &= 0xffff;
2717 if (unsigned_const == 1)
2718 ival &= 0xff;
2720 if (streq (conversions[i].pattern, "mi")
2721 || streq (conversions[i].pattern, "mmi"))
2723 /* Integers used as addresses are unsigned. */
2724 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2726 if (force_sign && ival >= 0)
2727 fputc ('+', file);
2728 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2729 break;
2731 break;
2732 case CONST_DOUBLE:
2733 /* We don't have const_double constants. If it
2734 happens, make it obvious. */
2735 fprintf (file, "[const_double 0x%lx]",
2736 (unsigned long) CONST_DOUBLE_HIGH (r));
2737 break;
2738 case SYMBOL_REF:
2739 assemble_name (file, XSTR (r, 0));
2740 break;
2741 case LABEL_REF:
2742 output_asm_label (r);
2743 break;
2744 default:
2745 fprintf (stderr, "don't know how to print this operand:");
2746 debug_rtx (r);
2747 gcc_unreachable ();
2750 else
2752 if (conversions[i].format[j] == 'z')
2754 /* Some addressing modes *must* have a displacement,
2755 so insert a zero here if needed. */
2756 int k;
2757 for (k = j + 1; conversions[i].format[k]; k++)
2758 if (ISDIGIT (conversions[i].format[k]))
2760 rtx reg = patternr[conversions[i].format[k] - '0'];
2761 if (GET_CODE (reg) == REG
2762 && (REGNO (reg) == SB_REGNO
2763 || REGNO (reg) == FB_REGNO
2764 || REGNO (reg) == SP_REGNO))
2765 fputc ('0', file);
2767 continue;
2769 /* Signed displacements off symbols need to have signs
2770 blended cleanly. */
2771 if (conversions[i].format[j] == '+'
2772 && (!code || code == 'D' || code == 'd')
2773 && ISDIGIT (conversions[i].format[j + 1])
2774 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2775 == CONST_INT))
2777 force_sign = 1;
2778 continue;
2780 fputc (conversions[i].format[j], file);
2782 break;
2784 if (!conversions[i].pattern)
2786 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2787 pattern);
2788 debug_rtx (x);
2789 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2792 return;
2795 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2797 See m32c_print_operand above for descriptions of what these do. */
2799 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2800 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2802 static bool
2803 m32c_print_operand_punct_valid_p (unsigned char c)
2805 if (c == '&' || c == '!')
2806 return true;
2808 return false;
2811 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2813 #undef TARGET_PRINT_OPERAND_ADDRESS
2814 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2816 static void
2817 m32c_print_operand_address (FILE * stream, rtx address)
2819 if (GET_CODE (address) == MEM)
2820 address = XEXP (address, 0);
2821 else
2822 /* cf: gcc.dg/asm-4.c. */
2823 gcc_assert (GET_CODE (address) == REG);
2825 m32c_print_operand (stream, address, 0);
2828 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2829 differently than general registers. */
2830 void
2831 m32c_output_reg_push (FILE * s, int regno)
2833 if (regno == FLG_REGNO)
2834 fprintf (s, "\tpushc\tflg\n");
2835 else
2836 fprintf (s, "\tpush.%c\t%s\n",
2837 " bwll"[reg_push_size (regno)], reg_names[regno]);
2840 /* Likewise for ASM_OUTPUT_REG_POP. */
2841 void
2842 m32c_output_reg_pop (FILE * s, int regno)
2844 if (regno == FLG_REGNO)
2845 fprintf (s, "\tpopc\tflg\n");
2846 else
2847 fprintf (s, "\tpop.%c\t%s\n",
2848 " bwll"[reg_push_size (regno)], reg_names[regno]);
2851 /* Defining target-specific uses of `__attribute__' */
2853 /* Used to simplify the logic below. Find the attributes wherever
2854 they may be. */
2855 #define M32C_ATTRIBUTES(decl) \
2856 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2857 : DECL_ATTRIBUTES (decl) \
2858 ? (DECL_ATTRIBUTES (decl)) \
2859 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2861 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2862 static int
2863 interrupt_p (tree node ATTRIBUTE_UNUSED)
2865 tree list = M32C_ATTRIBUTES (node);
2866 while (list)
2868 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2869 return 1;
2870 list = TREE_CHAIN (list);
2872 return fast_interrupt_p (node);
2875 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2876 static int
2877 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2879 tree list = M32C_ATTRIBUTES (node);
2880 while (list)
2882 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2883 return 1;
2884 list = TREE_CHAIN (list);
2886 return 0;
2889 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2890 static int
2891 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2893 tree list = M32C_ATTRIBUTES (node);
2894 while (list)
2896 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2897 return 1;
2898 list = TREE_CHAIN (list);
2900 return 0;
2903 static tree
2904 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2905 tree name ATTRIBUTE_UNUSED,
2906 tree args ATTRIBUTE_UNUSED,
2907 int flags ATTRIBUTE_UNUSED,
2908 bool * no_add_attrs ATTRIBUTE_UNUSED)
2910 return NULL_TREE;
2913 /* Returns TRUE if given tree has the "function_vector" attribute. */
2915 m32c_special_page_vector_p (tree func)
2917 tree list;
2919 if (TREE_CODE (func) != FUNCTION_DECL)
2920 return 0;
2922 list = M32C_ATTRIBUTES (func);
2923 while (list)
2925 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2926 return 1;
2927 list = TREE_CHAIN (list);
2929 return 0;
2932 static tree
2933 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2934 tree name ATTRIBUTE_UNUSED,
2935 tree args ATTRIBUTE_UNUSED,
2936 int flags ATTRIBUTE_UNUSED,
2937 bool * no_add_attrs ATTRIBUTE_UNUSED)
2939 if (TARGET_R8C)
2941 /* The attribute is not supported for R8C target. */
2942 warning (OPT_Wattributes,
2943 "%qE attribute is not supported for R8C target",
2944 name);
2945 *no_add_attrs = true;
2947 else if (TREE_CODE (*node) != FUNCTION_DECL)
2949 /* The attribute must be applied to functions only. */
2950 warning (OPT_Wattributes,
2951 "%qE attribute applies only to functions",
2952 name);
2953 *no_add_attrs = true;
2955 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2957 /* The argument must be a constant integer. */
2958 warning (OPT_Wattributes,
2959 "%qE attribute argument not an integer constant",
2960 name);
2961 *no_add_attrs = true;
2963 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2964 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2966 /* The argument value must be between 18 to 255. */
2967 warning (OPT_Wattributes,
2968 "%qE attribute argument should be between 18 to 255",
2969 name);
2970 *no_add_attrs = true;
2972 return NULL_TREE;
2975 /* If the function is assigned the attribute 'function_vector', it
2976 returns the function vector number, otherwise returns zero. */
2978 current_function_special_page_vector (rtx x)
2980 int num;
2982 if ((GET_CODE(x) == SYMBOL_REF)
2983 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2985 tree list;
2986 tree t = SYMBOL_REF_DECL (x);
2988 if (TREE_CODE (t) != FUNCTION_DECL)
2989 return 0;
2991 list = M32C_ATTRIBUTES (t);
2992 while (list)
2994 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2996 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2997 return num;
3000 list = TREE_CHAIN (list);
3003 return 0;
3005 else
3006 return 0;
3009 #undef TARGET_ATTRIBUTE_TABLE
3010 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3011 static const struct attribute_spec m32c_attribute_table[] = {
3012 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3013 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3014 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3015 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3016 false},
3017 {0, 0, 0, 0, 0, 0, 0, false}
3020 #undef TARGET_COMP_TYPE_ATTRIBUTES
3021 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3022 static int
3023 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3024 const_tree type2 ATTRIBUTE_UNUSED)
3026 /* 0=incompatible 1=compatible 2=warning */
3027 return 1;
3030 #undef TARGET_INSERT_ATTRIBUTES
3031 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3032 static void
3033 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3034 tree * attr_ptr ATTRIBUTE_UNUSED)
3036 unsigned addr;
3037 /* See if we need to make #pragma address variables volatile. */
3039 if (TREE_CODE (node) == VAR_DECL)
3041 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3042 if (m32c_get_pragma_address (name, &addr))
3044 TREE_THIS_VOLATILE (node) = true;
3050 struct pragma_traits : default_hashmap_traits
3052 static hashval_t hash (const char *str) { return htab_hash_string (str); }
3053 static bool
3054 equal_keys (const char *a, const char *b)
3056 return !strcmp (a, b);
3060 /* Hash table of pragma info. */
3061 static GTY(()) hash_map<const char *, unsigned, pragma_traits> *pragma_htab;
3063 void
3064 m32c_note_pragma_address (const char *varname, unsigned address)
3066 if (!pragma_htab)
3067 pragma_htab
3068 = hash_map<const char *, unsigned, pragma_traits>::create_ggc (31);
3070 const char *name = ggc_strdup (varname);
3071 unsigned int *slot = &pragma_htab->get_or_insert (name);
3072 *slot = address;
3075 static bool
3076 m32c_get_pragma_address (const char *varname, unsigned *address)
3078 if (!pragma_htab)
3079 return false;
3081 unsigned int *slot = pragma_htab->get (varname);
3082 if (slot)
3084 *address = *slot;
3085 return true;
3087 return false;
3090 void
3091 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3092 const char *name,
3093 int size, int align, int global)
3095 unsigned address;
3097 if (m32c_get_pragma_address (name, &address))
3099 /* We never output these as global. */
3100 assemble_name (stream, name);
3101 fprintf (stream, " = 0x%04x\n", address);
3102 return;
3104 if (!global)
3106 fprintf (stream, "\t.local\t");
3107 assemble_name (stream, name);
3108 fprintf (stream, "\n");
3110 fprintf (stream, "\t.comm\t");
3111 assemble_name (stream, name);
3112 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3115 /* Predicates */
3117 /* This is a list of legal subregs of hard regs. */
3118 static const struct {
3119 unsigned char outer_mode_size;
3120 unsigned char inner_mode_size;
3121 unsigned char byte_mask;
3122 unsigned char legal_when;
3123 unsigned int regno;
3124 } legal_subregs[] = {
3125 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3126 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3127 {1, 2, 0x01, 1, A0_REGNO},
3128 {1, 2, 0x01, 1, A1_REGNO},
3130 {1, 4, 0x01, 1, A0_REGNO},
3131 {1, 4, 0x01, 1, A1_REGNO},
3133 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3134 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3135 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3136 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3137 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3139 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3142 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3143 support. We also bail on MEMs with illegal addresses. */
3144 bool
3145 m32c_illegal_subreg_p (rtx op)
3147 int offset;
3148 unsigned int i;
3149 machine_mode src_mode, dest_mode;
3151 if (GET_CODE (op) == MEM
3152 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3154 return true;
3157 if (GET_CODE (op) != SUBREG)
3158 return false;
3160 dest_mode = GET_MODE (op);
3161 offset = SUBREG_BYTE (op);
3162 op = SUBREG_REG (op);
3163 src_mode = GET_MODE (op);
3165 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3166 return false;
3167 if (GET_CODE (op) != REG)
3168 return false;
3169 if (REGNO (op) >= MEM0_REGNO)
3170 return false;
3172 offset = (1 << offset);
3174 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3175 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3176 && legal_subregs[i].regno == REGNO (op)
3177 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3178 && legal_subregs[i].byte_mask & offset)
3180 switch (legal_subregs[i].legal_when)
3182 case 1:
3183 return false;
3184 case 16:
3185 if (TARGET_A16)
3186 return false;
3187 break;
3188 case 24:
3189 if (TARGET_A24)
3190 return false;
3191 break;
3194 return true;
3197 /* Returns TRUE if we support a move between the first two operands.
3198 At the moment, we just want to discourage mem to mem moves until
3199 after reload, because reload has a hard time with our limited
3200 number of address registers, and we can get into a situation where
3201 we need three of them when we only have two. */
3202 bool
3203 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3205 rtx op0 = operands[0];
3206 rtx op1 = operands[1];
3208 if (TARGET_A24)
3209 return true;
3211 #define DEBUG_MOV_OK 0
3212 #if DEBUG_MOV_OK
3213 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3214 debug_rtx (op0);
3215 debug_rtx (op1);
3216 #endif
3218 if (GET_CODE (op0) == SUBREG)
3219 op0 = XEXP (op0, 0);
3220 if (GET_CODE (op1) == SUBREG)
3221 op1 = XEXP (op1, 0);
3223 if (GET_CODE (op0) == MEM
3224 && GET_CODE (op1) == MEM
3225 && ! reload_completed)
3227 #if DEBUG_MOV_OK
3228 fprintf (stderr, " - no, mem to mem\n");
3229 #endif
3230 return false;
3233 #if DEBUG_MOV_OK
3234 fprintf (stderr, " - ok\n");
3235 #endif
3236 return true;
3239 /* Returns TRUE if two consecutive HImode mov instructions, generated
3240 for moving an immediate double data to a double data type variable
3241 location, can be combined into single SImode mov instruction. */
3242 bool
3243 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3244 machine_mode mode ATTRIBUTE_UNUSED)
3246 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3247 flags. */
3248 return false;
3251 /* Expanders */
3253 /* Subregs are non-orthogonal for us, because our registers are all
3254 different sizes. */
3255 static rtx
3256 m32c_subreg (machine_mode outer,
3257 rtx x, machine_mode inner, int byte)
3259 int r, nr = -1;
3261 /* Converting MEMs to different types that are the same size, we
3262 just rewrite them. */
3263 if (GET_CODE (x) == SUBREG
3264 && SUBREG_BYTE (x) == 0
3265 && GET_CODE (SUBREG_REG (x)) == MEM
3266 && (GET_MODE_SIZE (GET_MODE (x))
3267 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3269 rtx oldx = x;
3270 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3271 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3274 /* Push/pop get done as smaller push/pops. */
3275 if (GET_CODE (x) == MEM
3276 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3277 || GET_CODE (XEXP (x, 0)) == POST_INC))
3278 return gen_rtx_MEM (outer, XEXP (x, 0));
3279 if (GET_CODE (x) == SUBREG
3280 && GET_CODE (XEXP (x, 0)) == MEM
3281 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3282 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3283 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3285 if (GET_CODE (x) != REG)
3287 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3288 if (GET_CODE (r) == SUBREG
3289 && GET_CODE (x) == MEM
3290 && MEM_VOLATILE_P (x))
3292 /* Volatile MEMs don't get simplified, but we need them to
3293 be. We are little endian, so the subreg byte is the
3294 offset. */
3295 r = adjust_address_nv (x, outer, byte);
3297 return r;
3300 r = REGNO (x);
3301 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3302 return simplify_gen_subreg (outer, x, inner, byte);
3304 if (IS_MEM_REGNO (r))
3305 return simplify_gen_subreg (outer, x, inner, byte);
3307 /* This is where the complexities of our register layout are
3308 described. */
3309 if (byte == 0)
3310 nr = r;
3311 else if (outer == HImode)
3313 if (r == R0_REGNO && byte == 2)
3314 nr = R2_REGNO;
3315 else if (r == R0_REGNO && byte == 4)
3316 nr = R1_REGNO;
3317 else if (r == R0_REGNO && byte == 6)
3318 nr = R3_REGNO;
3319 else if (r == R1_REGNO && byte == 2)
3320 nr = R3_REGNO;
3321 else if (r == A0_REGNO && byte == 2)
3322 nr = A1_REGNO;
3324 else if (outer == SImode)
3326 if (r == R0_REGNO && byte == 0)
3327 nr = R0_REGNO;
3328 else if (r == R0_REGNO && byte == 4)
3329 nr = R1_REGNO;
3331 if (nr == -1)
3333 fprintf (stderr, "m32c_subreg %s %s %d\n",
3334 mode_name[outer], mode_name[inner], byte);
3335 debug_rtx (x);
3336 gcc_unreachable ();
3338 return gen_rtx_REG (outer, nr);
3341 /* Used to emit move instructions. We split some moves,
3342 and avoid mem-mem moves. */
3344 m32c_prepare_move (rtx * operands, machine_mode mode)
3346 if (far_addr_space_p (operands[0])
3347 && CONSTANT_P (operands[1]))
3349 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3351 if (TARGET_A16 && mode == PSImode)
3352 return m32c_split_move (operands, mode, 1);
3353 if ((GET_CODE (operands[0]) == MEM)
3354 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3356 rtx pmv = XEXP (operands[0], 0);
3357 rtx dest_reg = XEXP (pmv, 0);
3358 rtx dest_mod = XEXP (pmv, 1);
3360 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3361 operands[0] = gen_rtx_MEM (mode, dest_reg);
3363 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3364 operands[1] = copy_to_mode_reg (mode, operands[1]);
3365 return 0;
3368 #define DEBUG_SPLIT 0
3370 /* Returns TRUE if the given PSImode move should be split. We split
3371 for all r8c/m16c moves, since it doesn't support them, and for
3372 POP.L as we can only *push* SImode. */
3374 m32c_split_psi_p (rtx * operands)
3376 #if DEBUG_SPLIT
3377 fprintf (stderr, "\nm32c_split_psi_p\n");
3378 debug_rtx (operands[0]);
3379 debug_rtx (operands[1]);
3380 #endif
3381 if (TARGET_A16)
3383 #if DEBUG_SPLIT
3384 fprintf (stderr, "yes, A16\n");
3385 #endif
3386 return 1;
3388 if (GET_CODE (operands[1]) == MEM
3389 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3391 #if DEBUG_SPLIT
3392 fprintf (stderr, "yes, pop.l\n");
3393 #endif
3394 return 1;
3396 #if DEBUG_SPLIT
3397 fprintf (stderr, "no, default\n");
3398 #endif
3399 return 0;
3402 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3403 (define_expand), 1 if it is not optional (define_insn_and_split),
3404 and 3 for define_split (alternate api). */
3406 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3408 rtx s[4], d[4];
3409 int parts, si, di, rev = 0;
3410 int rv = 0, opi = 2;
3411 machine_mode submode = HImode;
3412 rtx *ops, local_ops[10];
3414 /* define_split modifies the existing operands, but the other two
3415 emit new insns. OPS is where we store the operand pairs, which
3416 we emit later. */
3417 if (split_all == 3)
3418 ops = operands;
3419 else
3420 ops = local_ops;
3422 /* Else HImode. */
3423 if (mode == DImode)
3424 submode = SImode;
3426 /* Before splitting mem-mem moves, force one operand into a
3427 register. */
3428 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3430 #if DEBUG0
3431 fprintf (stderr, "force_reg...\n");
3432 debug_rtx (operands[1]);
3433 #endif
3434 operands[1] = force_reg (mode, operands[1]);
3435 #if DEBUG0
3436 debug_rtx (operands[1]);
3437 #endif
3440 parts = 2;
3442 #if DEBUG_SPLIT
3443 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3444 split_all);
3445 debug_rtx (operands[0]);
3446 debug_rtx (operands[1]);
3447 #endif
3449 /* Note that split_all is not used to select the api after this
3450 point, so it's safe to set it to 3 even with define_insn. */
3451 /* None of the chips can move SI operands to sp-relative addresses,
3452 so we always split those. */
3453 if (satisfies_constraint_Ss (operands[0]))
3454 split_all = 3;
3456 if (TARGET_A16
3457 && (far_addr_space_p (operands[0])
3458 || far_addr_space_p (operands[1])))
3459 split_all |= 1;
3461 /* We don't need to split these. */
3462 if (TARGET_A24
3463 && split_all != 3
3464 && (mode == SImode || mode == PSImode)
3465 && !(GET_CODE (operands[1]) == MEM
3466 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3467 return 0;
3469 /* First, enumerate the subregs we'll be dealing with. */
3470 for (si = 0; si < parts; si++)
3472 d[si] =
3473 m32c_subreg (submode, operands[0], mode,
3474 si * GET_MODE_SIZE (submode));
3475 s[si] =
3476 m32c_subreg (submode, operands[1], mode,
3477 si * GET_MODE_SIZE (submode));
3480 /* Split pushes by emitting a sequence of smaller pushes. */
3481 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3483 for (si = parts - 1; si >= 0; si--)
3485 ops[opi++] = gen_rtx_MEM (submode,
3486 gen_rtx_PRE_DEC (Pmode,
3487 gen_rtx_REG (Pmode,
3488 SP_REGNO)));
3489 ops[opi++] = s[si];
3492 rv = 1;
3494 /* Likewise for pops. */
3495 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3497 for (di = 0; di < parts; di++)
3499 ops[opi++] = d[di];
3500 ops[opi++] = gen_rtx_MEM (submode,
3501 gen_rtx_POST_INC (Pmode,
3502 gen_rtx_REG (Pmode,
3503 SP_REGNO)));
3505 rv = 1;
3507 else if (split_all)
3509 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3510 for (di = 0; di < parts - 1; di++)
3511 for (si = di + 1; si < parts; si++)
3512 if (reg_mentioned_p (d[di], s[si]))
3513 rev = 1;
3515 if (rev)
3516 for (si = 0; si < parts; si++)
3518 ops[opi++] = d[si];
3519 ops[opi++] = s[si];
3521 else
3522 for (si = parts - 1; si >= 0; si--)
3524 ops[opi++] = d[si];
3525 ops[opi++] = s[si];
3527 rv = 1;
3529 /* Now emit any moves we may have accumulated. */
3530 if (rv && split_all != 3)
3532 int i;
3533 for (i = 2; i < opi; i += 2)
3534 emit_move_insn (ops[i], ops[i + 1]);
3536 return rv;
3539 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3540 the like. For the R8C they expect one of the addresses to be in
3541 R1L:An so we need to arrange for that. Otherwise, it's just a
3542 matter of picking out the operands we want and emitting the right
3543 pattern for them. All these expanders, which correspond to
3544 patterns in blkmov.md, must return nonzero if they expand the insn,
3545 or zero if they should FAIL. */
3547 /* This is a memset() opcode. All operands are implied, so we need to
3548 arrange for them to be in the right registers. The opcode wants
3549 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3550 the count (HI), and $2 the value (QI). */
3552 m32c_expand_setmemhi(rtx *operands)
3554 rtx desta, count, val;
3555 rtx desto, counto;
3557 desta = XEXP (operands[0], 0);
3558 count = operands[1];
3559 val = operands[2];
3561 desto = gen_reg_rtx (Pmode);
3562 counto = gen_reg_rtx (HImode);
3564 if (GET_CODE (desta) != REG
3565 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3566 desta = copy_to_mode_reg (Pmode, desta);
3568 /* This looks like an arbitrary restriction, but this is by far the
3569 most common case. For counts 8..14 this actually results in
3570 smaller code with no speed penalty because the half-sized
3571 constant can be loaded with a shorter opcode. */
3572 if (GET_CODE (count) == CONST_INT
3573 && GET_CODE (val) == CONST_INT
3574 && ! (INTVAL (count) & 1)
3575 && (INTVAL (count) > 1)
3576 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3578 unsigned v = INTVAL (val) & 0xff;
3579 v = v | (v << 8);
3580 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3581 val = copy_to_mode_reg (HImode, GEN_INT (v));
3582 if (TARGET_A16)
3583 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3584 else
3585 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3586 return 1;
3589 /* This is the generalized memset() case. */
3590 if (GET_CODE (val) != REG
3591 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3592 val = copy_to_mode_reg (QImode, val);
3594 if (GET_CODE (count) != REG
3595 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3596 count = copy_to_mode_reg (HImode, count);
3598 if (TARGET_A16)
3599 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3600 else
3601 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3603 return 1;
3606 /* This is a memcpy() opcode. All operands are implied, so we need to
3607 arrange for them to be in the right registers. The opcode wants
3608 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3609 is the source (MEM:BLK), and $2 the count (HI). */
3611 m32c_expand_movmemhi(rtx *operands)
3613 rtx desta, srca, count;
3614 rtx desto, srco, counto;
3616 desta = XEXP (operands[0], 0);
3617 srca = XEXP (operands[1], 0);
3618 count = operands[2];
3620 desto = gen_reg_rtx (Pmode);
3621 srco = gen_reg_rtx (Pmode);
3622 counto = gen_reg_rtx (HImode);
3624 if (GET_CODE (desta) != REG
3625 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3626 desta = copy_to_mode_reg (Pmode, desta);
3628 if (GET_CODE (srca) != REG
3629 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3630 srca = copy_to_mode_reg (Pmode, srca);
3632 /* Similar to setmem, but we don't need to check the value. */
3633 if (GET_CODE (count) == CONST_INT
3634 && ! (INTVAL (count) & 1)
3635 && (INTVAL (count) > 1))
3637 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3638 if (TARGET_A16)
3639 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3640 else
3641 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3642 return 1;
3645 /* This is the generalized memset() case. */
3646 if (GET_CODE (count) != REG
3647 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3648 count = copy_to_mode_reg (HImode, count);
3650 if (TARGET_A16)
3651 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3652 else
3653 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3655 return 1;
3658 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3659 the copy, which should point to the NUL at the end of the string,
3660 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3661 Since our opcode leaves the destination pointing *after* the NUL,
3662 we must emit an adjustment. */
3664 m32c_expand_movstr(rtx *operands)
3666 rtx desta, srca;
3667 rtx desto, srco;
3669 desta = XEXP (operands[1], 0);
3670 srca = XEXP (operands[2], 0);
3672 desto = gen_reg_rtx (Pmode);
3673 srco = gen_reg_rtx (Pmode);
3675 if (GET_CODE (desta) != REG
3676 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3677 desta = copy_to_mode_reg (Pmode, desta);
3679 if (GET_CODE (srca) != REG
3680 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3681 srca = copy_to_mode_reg (Pmode, srca);
3683 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3684 /* desto ends up being a1, which allows this type of add through MOVA. */
3685 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3687 return 1;
3690 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3691 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3692 $2 is the other (MEM:BLK). We must do the comparison, and then
3693 convert the flags to a signed integer result. */
3695 m32c_expand_cmpstr(rtx *operands)
3697 rtx src1a, src2a;
3699 src1a = XEXP (operands[1], 0);
3700 src2a = XEXP (operands[2], 0);
3702 if (GET_CODE (src1a) != REG
3703 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3704 src1a = copy_to_mode_reg (Pmode, src1a);
3706 if (GET_CODE (src2a) != REG
3707 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3708 src2a = copy_to_mode_reg (Pmode, src2a);
3710 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3711 emit_insn (gen_cond_to_int (operands[0]));
3713 return 1;
3717 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3719 static shift_gen_func
3720 shift_gen_func_for (int mode, int code)
3722 #define GFF(m,c,f) if (mode == m && code == c) return f
3723 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3724 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3725 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3726 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3727 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3728 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3729 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3730 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3731 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3732 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3733 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3734 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3735 #undef GFF
3736 gcc_unreachable ();
3739 /* The m32c only has one shift, but it takes a signed count. GCC
3740 doesn't want this, so we fake it by negating any shift count when
3741 we're pretending to shift the other way. Also, the shift count is
3742 limited to -8..8. It's slightly better to use two shifts for 9..15
3743 than to load the count into r1h, so we do that too. */
3745 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3747 machine_mode mode = GET_MODE (operands[0]);
3748 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3749 rtx temp;
3751 if (GET_CODE (operands[2]) == CONST_INT)
3753 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3754 int count = INTVAL (operands[2]) * scale;
3756 while (count > maxc)
3758 temp = gen_reg_rtx (mode);
3759 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3760 operands[1] = temp;
3761 count -= maxc;
3763 while (count < -maxc)
3765 temp = gen_reg_rtx (mode);
3766 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3767 operands[1] = temp;
3768 count += maxc;
3770 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3771 return 1;
3774 temp = gen_reg_rtx (QImode);
3775 if (scale < 0)
3776 /* The pattern has a NEG that corresponds to this. */
3777 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3778 else if (TARGET_A16 && mode == SImode)
3779 /* We do this because the code below may modify this, we don't
3780 want to modify the origin of this value. */
3781 emit_move_insn (temp, operands[2]);
3782 else
3783 /* We'll only use it for the shift, no point emitting a move. */
3784 temp = operands[2];
3786 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3788 /* The m16c has a limit of -16..16 for SI shifts, even when the
3789 shift count is in a register. Since there are so many targets
3790 of these shifts, it's better to expand the RTL here than to
3791 call a helper function.
3793 The resulting code looks something like this:
3795 cmp.b r1h,-16
3796 jge.b 1f
3797 shl.l -16,dest
3798 add.b r1h,16
3799 1f: cmp.b r1h,16
3800 jle.b 1f
3801 shl.l 16,dest
3802 sub.b r1h,16
3803 1f: shl.l r1h,dest
3805 We take advantage of the fact that "negative" shifts are
3806 undefined to skip one of the comparisons. */
3808 rtx count;
3809 rtx label, tempvar;
3810 rtx_insn *insn;
3812 emit_move_insn (operands[0], operands[1]);
3814 count = temp;
3815 label = gen_label_rtx ();
3816 LABEL_NUSES (label) ++;
3818 tempvar = gen_reg_rtx (mode);
3820 if (shift_code == ASHIFT)
3822 /* This is a left shift. We only need check positive counts. */
3823 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3824 count, GEN_INT (16), label));
3825 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3826 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3827 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3828 emit_label_after (label, insn);
3830 else
3832 /* This is a right shift. We only need check negative counts. */
3833 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3834 count, GEN_INT (-16), label));
3835 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3836 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3837 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3838 emit_label_after (label, insn);
3840 operands[1] = operands[0];
3841 emit_insn (func (operands[0], operands[0], count));
3842 return 1;
3845 operands[2] = temp;
3846 return 0;
3849 /* The m32c has a limited range of operations that work on PSImode
3850 values; we have to expand to SI, do the math, and truncate back to
3851 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3852 those cases. */
3853 void
3854 m32c_expand_neg_mulpsi3 (rtx * operands)
3856 /* operands: a = b * i */
3857 rtx temp1; /* b as SI */
3858 rtx scale /* i as SI */;
3859 rtx temp2; /* a*b as SI */
3861 temp1 = gen_reg_rtx (SImode);
3862 temp2 = gen_reg_rtx (SImode);
3863 if (GET_CODE (operands[2]) != CONST_INT)
3865 scale = gen_reg_rtx (SImode);
3866 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3868 else
3869 scale = copy_to_mode_reg (SImode, operands[2]);
3871 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3872 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3873 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3876 /* Pattern Output Functions */
3879 m32c_expand_movcc (rtx *operands)
3881 rtx rel = operands[1];
3883 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3884 return 1;
3885 if (GET_CODE (operands[2]) != CONST_INT
3886 || GET_CODE (operands[3]) != CONST_INT)
3887 return 1;
3888 if (GET_CODE (rel) == NE)
3890 rtx tmp = operands[2];
3891 operands[2] = operands[3];
3892 operands[3] = tmp;
3893 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3896 emit_move_insn (operands[0],
3897 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3898 rel,
3899 operands[2],
3900 operands[3]));
3901 return 0;
3904 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3906 m32c_expand_insv (rtx *operands)
3908 rtx op0, src0, p;
3909 int mask;
3911 if (INTVAL (operands[1]) != 1)
3912 return 1;
3914 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3915 if (GET_CODE (operands[3]) != CONST_INT)
3916 return 1;
3917 if (INTVAL (operands[3]) != 0
3918 && INTVAL (operands[3]) != 1
3919 && INTVAL (operands[3]) != -1)
3920 return 1;
3922 mask = 1 << INTVAL (operands[2]);
3924 op0 = operands[0];
3925 if (GET_CODE (op0) == SUBREG
3926 && SUBREG_BYTE (op0) == 0)
3928 rtx sub = SUBREG_REG (op0);
3929 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3930 op0 = sub;
3933 if (!can_create_pseudo_p ()
3934 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3935 src0 = op0;
3936 else
3938 src0 = gen_reg_rtx (GET_MODE (op0));
3939 emit_move_insn (src0, op0);
3942 if (GET_MODE (op0) == HImode
3943 && INTVAL (operands[2]) >= 8
3944 && GET_CODE (op0) == MEM)
3946 /* We are little endian. */
3947 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3948 XEXP (op0, 0), 1));
3949 MEM_COPY_ATTRIBUTES (new_mem, op0);
3950 mask >>= 8;
3953 /* First, we generate a mask with the correct polarity. If we are
3954 storing a zero, we want an AND mask, so invert it. */
3955 if (INTVAL (operands[3]) == 0)
3957 /* Storing a zero, use an AND mask */
3958 if (GET_MODE (op0) == HImode)
3959 mask ^= 0xffff;
3960 else
3961 mask ^= 0xff;
3963 /* Now we need to properly sign-extend the mask in case we need to
3964 fall back to an AND or OR opcode. */
3965 if (GET_MODE (op0) == HImode)
3967 if (mask & 0x8000)
3968 mask -= 0x10000;
3970 else
3972 if (mask & 0x80)
3973 mask -= 0x100;
3976 switch ( (INTVAL (operands[3]) ? 4 : 0)
3977 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3978 + (TARGET_A24 ? 1 : 0))
3980 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3981 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3982 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3983 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3984 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3985 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3986 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3987 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3988 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3991 emit_insn (p);
3992 return 0;
3995 const char *
3996 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3998 static char buf[30];
3999 if (GET_CODE (operands[0]) == REG
4000 && REGNO (operands[0]) == R0_REGNO)
4002 if (code == EQ)
4003 return "stzx\t#1,#0,r0l";
4004 if (code == NE)
4005 return "stzx\t#0,#1,r0l";
4007 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4008 return buf;
4011 /* Encode symbol attributes of a SYMBOL_REF into its
4012 SYMBOL_REF_FLAGS. */
4013 static void
4014 m32c_encode_section_info (tree decl, rtx rtl, int first)
4016 int extra_flags = 0;
4018 default_encode_section_info (decl, rtl, first);
4019 if (TREE_CODE (decl) == FUNCTION_DECL
4020 && m32c_special_page_vector_p (decl))
4022 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4024 if (extra_flags)
4025 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4028 /* Returns TRUE if the current function is a leaf, and thus we can
4029 determine which registers an interrupt function really needs to
4030 save. The logic below is mostly about finding the insn sequence
4031 that's the function, versus any sequence that might be open for the
4032 current insn. */
4033 static int
4034 m32c_leaf_function_p (void)
4036 rtx_insn *saved_first, *saved_last;
4037 struct sequence_stack *seq;
4038 int rv;
4040 saved_first = crtl->emit.x_first_insn;
4041 saved_last = crtl->emit.x_last_insn;
4042 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4044 if (seq)
4046 crtl->emit.x_first_insn = seq->first;
4047 crtl->emit.x_last_insn = seq->last;
4050 rv = leaf_function_p ();
4052 crtl->emit.x_first_insn = saved_first;
4053 crtl->emit.x_last_insn = saved_last;
4054 return rv;
4057 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4058 opcodes. If the function doesn't need the frame base or stack
4059 pointer, it can use the simpler RTS opcode. */
4060 static bool
4061 m32c_function_needs_enter (void)
4063 rtx_insn *insn;
4064 struct sequence_stack *seq;
4065 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4066 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4068 insn = get_insns ();
4069 for (seq = crtl->emit.sequence_stack;
4070 seq;
4071 insn = seq->first, seq = seq->next);
4073 while (insn)
4075 if (reg_mentioned_p (sp, insn))
4076 return true;
4077 if (reg_mentioned_p (fb, insn))
4078 return true;
4079 insn = NEXT_INSN (insn);
4081 return false;
4084 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4085 frame-related. Return PAR.
4087 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4088 PARALLEL rtx other than the first if they do not have the
4089 FRAME_RELATED flag set on them. So this function is handy for
4090 marking up 'enter' instructions. */
4091 static rtx
4092 m32c_all_frame_related (rtx par)
4094 int len = XVECLEN (par, 0);
4095 int i;
4097 for (i = 0; i < len; i++)
4098 F (XVECEXP (par, 0, i));
4100 return par;
4103 /* Emits the prologue. See the frame layout comment earlier in this
4104 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4105 that we manually update sp. */
4106 void
4107 m32c_emit_prologue (void)
4109 int frame_size, extra_frame_size = 0, reg_save_size;
4110 int complex_prologue = 0;
4112 cfun->machine->is_leaf = m32c_leaf_function_p ();
4113 if (interrupt_p (cfun->decl))
4115 cfun->machine->is_interrupt = 1;
4116 complex_prologue = 1;
4118 else if (bank_switch_p (cfun->decl))
4119 warning (OPT_Wattributes,
4120 "%<bank_switch%> has no effect on non-interrupt functions");
4122 reg_save_size = m32c_pushm_popm (PP_justcount);
4124 if (interrupt_p (cfun->decl))
4126 if (bank_switch_p (cfun->decl))
4127 emit_insn (gen_fset_b ());
4128 else if (cfun->machine->intr_pushm)
4129 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4132 frame_size =
4133 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4134 if (frame_size == 0
4135 && !m32c_function_needs_enter ())
4136 cfun->machine->use_rts = 1;
4138 if (frame_size > 254)
4140 extra_frame_size = frame_size - 254;
4141 frame_size = 254;
4143 if (cfun->machine->use_rts == 0)
4144 F (emit_insn (m32c_all_frame_related
4145 (TARGET_A16
4146 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4147 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4149 if (extra_frame_size)
4151 complex_prologue = 1;
4152 if (TARGET_A16)
4153 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4154 gen_rtx_REG (HImode, SP_REGNO),
4155 GEN_INT (-extra_frame_size))));
4156 else
4157 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4158 gen_rtx_REG (PSImode, SP_REGNO),
4159 GEN_INT (-extra_frame_size))));
4162 complex_prologue += m32c_pushm_popm (PP_pushm);
4164 /* This just emits a comment into the .s file for debugging. */
4165 if (complex_prologue)
4166 emit_insn (gen_prologue_end ());
4169 /* Likewise, for the epilogue. The only exception is that, for
4170 interrupts, we must manually unwind the frame as the REIT opcode
4171 doesn't do that. */
4172 void
4173 m32c_emit_epilogue (void)
4175 int popm_count = m32c_pushm_popm (PP_justcount);
4177 /* This just emits a comment into the .s file for debugging. */
4178 if (popm_count > 0 || cfun->machine->is_interrupt)
4179 emit_insn (gen_epilogue_start ());
4181 if (popm_count > 0)
4182 m32c_pushm_popm (PP_popm);
4184 if (cfun->machine->is_interrupt)
4186 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4188 /* REIT clears B flag and restores $fp for us, but we still
4189 have to fix up the stack. USE_RTS just means we didn't
4190 emit ENTER. */
4191 if (!cfun->machine->use_rts)
4193 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4194 gen_rtx_REG (spmode, FP_REGNO));
4195 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4196 gen_rtx_REG (spmode, A0_REGNO));
4197 /* We can't just add this to the POPM because it would be in
4198 the wrong order, and wouldn't fix the stack if we're bank
4199 switching. */
4200 if (TARGET_A16)
4201 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4202 else
4203 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4205 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4206 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4208 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4209 generated only for M32C/M32CM targets (generate the REIT
4210 instruction otherwise). */
4211 if (fast_interrupt_p (cfun->decl))
4213 /* Check if fast_attribute is set for M32C or M32CM. */
4214 if (TARGET_A24)
4216 emit_jump_insn (gen_epilogue_freit ());
4218 /* If fast_interrupt attribute is set for an R8C or M16C
4219 target ignore this attribute and generated REIT
4220 instruction. */
4221 else
4223 warning (OPT_Wattributes,
4224 "%<fast_interrupt%> attribute directive ignored");
4225 emit_jump_insn (gen_epilogue_reit_16 ());
4228 else if (TARGET_A16)
4229 emit_jump_insn (gen_epilogue_reit_16 ());
4230 else
4231 emit_jump_insn (gen_epilogue_reit_24 ());
4233 else if (cfun->machine->use_rts)
4234 emit_jump_insn (gen_epilogue_rts ());
4235 else if (TARGET_A16)
4236 emit_jump_insn (gen_epilogue_exitd_16 ());
4237 else
4238 emit_jump_insn (gen_epilogue_exitd_24 ());
4241 void
4242 m32c_emit_eh_epilogue (rtx ret_addr)
4244 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4245 return to. We have to fudge the stack, pop everything, pop SP
4246 (fudged), and return (fudged). This is actually easier to do in
4247 assembler, so punt to libgcc. */
4248 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4249 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4252 /* Indicate which flags must be properly set for a given conditional. */
4253 static int
4254 flags_needed_for_conditional (rtx cond)
4256 switch (GET_CODE (cond))
4258 case LE:
4259 case GT:
4260 return FLAGS_OSZ;
4261 case LEU:
4262 case GTU:
4263 return FLAGS_ZC;
4264 case LT:
4265 case GE:
4266 return FLAGS_OS;
4267 case LTU:
4268 case GEU:
4269 return FLAGS_C;
4270 case EQ:
4271 case NE:
4272 return FLAGS_Z;
4273 default:
4274 return FLAGS_N;
4278 #define DEBUG_CMP 0
4280 /* Returns true if a compare insn is redundant because it would only
4281 set flags that are already set correctly. */
4282 static bool
4283 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4285 int flags_needed;
4286 int pflags;
4287 rtx_insn *prev;
4288 rtx pp, next;
4289 rtx op0, op1;
4290 #if DEBUG_CMP
4291 int prev_icode, i;
4292 #endif
4294 op0 = operands[0];
4295 op1 = operands[1];
4297 #if DEBUG_CMP
4298 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4299 debug_rtx(cmp);
4300 for (i=0; i<2; i++)
4302 fprintf(stderr, "operands[%d] = ", i);
4303 debug_rtx(operands[i]);
4305 #endif
4307 next = next_nonnote_insn (cmp);
4308 if (!next || !INSN_P (next))
4310 #if DEBUG_CMP
4311 fprintf(stderr, "compare not followed by insn\n");
4312 debug_rtx(next);
4313 #endif
4314 return false;
4316 if (GET_CODE (PATTERN (next)) == SET
4317 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4319 next = XEXP (XEXP (PATTERN (next), 1), 0);
4321 else if (GET_CODE (PATTERN (next)) == SET)
4323 /* If this is a conditional, flags_needed will be something
4324 other than FLAGS_N, which we test below. */
4325 next = XEXP (PATTERN (next), 1);
4327 else
4329 #if DEBUG_CMP
4330 fprintf(stderr, "compare not followed by conditional\n");
4331 debug_rtx(next);
4332 #endif
4333 return false;
4335 #if DEBUG_CMP
4336 fprintf(stderr, "conditional is: ");
4337 debug_rtx(next);
4338 #endif
4340 flags_needed = flags_needed_for_conditional (next);
4341 if (flags_needed == FLAGS_N)
4343 #if DEBUG_CMP
4344 fprintf(stderr, "compare not followed by conditional\n");
4345 debug_rtx(next);
4346 #endif
4347 return false;
4350 /* Compare doesn't set overflow and carry the same way that
4351 arithmetic instructions do, so we can't replace those. */
4352 if (flags_needed & FLAGS_OC)
4353 return false;
4355 prev = cmp;
4356 do {
4357 prev = prev_nonnote_insn (prev);
4358 if (!prev)
4360 #if DEBUG_CMP
4361 fprintf(stderr, "No previous insn.\n");
4362 #endif
4363 return false;
4365 if (!INSN_P (prev))
4367 #if DEBUG_CMP
4368 fprintf(stderr, "Previous insn is a non-insn.\n");
4369 #endif
4370 return false;
4372 pp = PATTERN (prev);
4373 if (GET_CODE (pp) != SET)
4375 #if DEBUG_CMP
4376 fprintf(stderr, "Previous insn is not a SET.\n");
4377 #endif
4378 return false;
4380 pflags = get_attr_flags (prev);
4382 /* Looking up attributes of previous insns corrupted the recog
4383 tables. */
4384 INSN_UID (cmp) = -1;
4385 recog (PATTERN (cmp), cmp, 0);
4387 if (pflags == FLAGS_N
4388 && reg_mentioned_p (op0, pp))
4390 #if DEBUG_CMP
4391 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4392 debug_rtx(prev);
4393 #endif
4394 return false;
4397 /* Check for comparisons against memory - between volatiles and
4398 aliases, we just can't risk this one. */
4399 if (GET_CODE (operands[0]) == MEM
4400 || GET_CODE (operands[0]) == MEM)
4402 #if DEBUG_CMP
4403 fprintf(stderr, "comparisons with memory:\n");
4404 debug_rtx(prev);
4405 #endif
4406 return false;
4409 /* Check for PREV changing a register that's used to compute a
4410 value in CMP, even if it doesn't otherwise change flags. */
4411 if (GET_CODE (operands[0]) == REG
4412 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4414 #if DEBUG_CMP
4415 fprintf(stderr, "sub-value affected, op0:\n");
4416 debug_rtx(prev);
4417 #endif
4418 return false;
4420 if (GET_CODE (operands[1]) == REG
4421 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4423 #if DEBUG_CMP
4424 fprintf(stderr, "sub-value affected, op1:\n");
4425 debug_rtx(prev);
4426 #endif
4427 return false;
4430 } while (pflags == FLAGS_N);
4431 #if DEBUG_CMP
4432 fprintf(stderr, "previous flag-setting insn:\n");
4433 debug_rtx(prev);
4434 debug_rtx(pp);
4435 #endif
4437 if (GET_CODE (pp) == SET
4438 && GET_CODE (XEXP (pp, 0)) == REG
4439 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4440 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4442 /* Adjacent cbranches must have the same operands to be
4443 redundant. */
4444 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4445 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4446 #if DEBUG_CMP
4447 fprintf(stderr, "adjacent cbranches\n");
4448 debug_rtx(pop0);
4449 debug_rtx(pop1);
4450 #endif
4451 if (rtx_equal_p (op0, pop0)
4452 && rtx_equal_p (op1, pop1))
4453 return true;
4454 #if DEBUG_CMP
4455 fprintf(stderr, "prev cmp not same\n");
4456 #endif
4457 return false;
4460 /* Else the previous insn must be a SET, with either the source or
4461 dest equal to operands[0], and operands[1] must be zero. */
4463 if (!rtx_equal_p (op1, const0_rtx))
4465 #if DEBUG_CMP
4466 fprintf(stderr, "operands[1] not const0_rtx\n");
4467 #endif
4468 return false;
4470 if (GET_CODE (pp) != SET)
4472 #if DEBUG_CMP
4473 fprintf (stderr, "pp not set\n");
4474 #endif
4475 return false;
4477 if (!rtx_equal_p (op0, SET_SRC (pp))
4478 && !rtx_equal_p (op0, SET_DEST (pp)))
4480 #if DEBUG_CMP
4481 fprintf(stderr, "operands[0] not found in set\n");
4482 #endif
4483 return false;
4486 #if DEBUG_CMP
4487 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4488 #endif
4489 if ((pflags & flags_needed) == flags_needed)
4490 return true;
4492 return false;
4495 /* Return the pattern for a compare. This will be commented out if
4496 the compare is redundant, else a normal pattern is returned. Thus,
4497 the assembler output says where the compare would have been. */
4498 char *
4499 m32c_output_compare (rtx_insn *insn, rtx *operands)
4501 static char templ[] = ";cmp.b\t%1,%0";
4502 /* ^ 5 */
4504 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4505 if (m32c_compare_redundant (insn, operands))
4507 #if DEBUG_CMP
4508 fprintf(stderr, "cbranch: cmp not needed\n");
4509 #endif
4510 return templ;
4513 #if DEBUG_CMP
4514 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4515 #endif
4516 return templ + 1;
4519 #undef TARGET_ENCODE_SECTION_INFO
4520 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4522 /* If the frame pointer isn't used, we detect it manually. But the
4523 stack pointer doesn't have as flexible addressing as the frame
4524 pointer, so we always assume we have it. */
4526 #undef TARGET_FRAME_POINTER_REQUIRED
4527 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4529 /* The Global `targetm' Variable. */
4531 struct gcc_target targetm = TARGET_INITIALIZER;
4533 #include "gt-m32c.h"