PR/56490
[official-gcc.git] / gcc / config / m32c / m32c.c
blobdeac40c228f82f3e94dbd73ac755b8d0b3df64d8
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "tree.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "except.h"
42 #include "function.h"
43 #include "ggc.h"
44 #include "target.h"
45 #include "target-def.h"
46 #include "tm_p.h"
47 #include "langhooks.h"
48 #include "gimple.h"
49 #include "df.h"
50 #include "tm-constrs.h"
52 /* Prototypes */
54 /* Used by m32c_pushm_popm. */
55 typedef enum
57 PP_pushm,
58 PP_popm,
59 PP_justcount
60 } Push_Pop_Type;
62 static bool m32c_function_needs_enter (void);
63 static tree interrupt_handler (tree *, tree, tree, int, bool *);
64 static tree function_vector_handler (tree *, tree, tree, int, bool *);
65 static int interrupt_p (tree node);
66 static int bank_switch_p (tree node);
67 static int fast_interrupt_p (tree node);
68 static int interrupt_p (tree node);
69 static bool m32c_asm_integer (rtx, unsigned int, int);
70 static int m32c_comp_type_attributes (const_tree, const_tree);
71 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
72 static struct machine_function *m32c_init_machine_status (void);
73 static void m32c_insert_attributes (tree, tree *);
74 static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
75 static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
76 static rtx m32c_function_arg (cumulative_args_t, enum machine_mode,
77 const_tree, bool);
78 static bool m32c_pass_by_reference (cumulative_args_t, enum machine_mode,
79 const_tree, bool);
80 static void m32c_function_arg_advance (cumulative_args_t, enum machine_mode,
81 const_tree, bool);
82 static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
83 static int m32c_pushm_popm (Push_Pop_Type);
84 static bool m32c_strict_argument_naming (cumulative_args_t);
85 static rtx m32c_struct_value_rtx (tree, int);
86 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
87 static int need_to_save (int);
88 static rtx m32c_function_value (const_tree, const_tree, bool);
89 static rtx m32c_libcall_value (enum machine_mode, const_rtx);
91 /* Returns true if an address is specified, else false. */
92 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
94 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
96 #define streq(a,b) (strcmp ((a), (b)) == 0)
98 /* Internal support routines */
100 /* Debugging statements are tagged with DEBUG0 only so that they can
101 be easily enabled individually, by replacing the '0' with '1' as
102 needed. */
103 #define DEBUG0 0
104 #define DEBUG1 1
106 #if DEBUG0
107 /* This is needed by some of the commented-out debug statements
108 below. */
109 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
110 #endif
111 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
113 /* These are all to support encode_pattern(). */
114 static char pattern[30], *patternp;
115 static GTY(()) rtx patternr[30];
116 #define RTX_IS(x) (streq (pattern, x))
118 /* Some macros to simplify the logic throughout this file. */
119 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
120 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
122 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
123 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
125 static int
126 far_addr_space_p (rtx x)
128 if (GET_CODE (x) != MEM)
129 return 0;
130 #if DEBUG0
131 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
132 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
133 #endif
134 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
137 /* We do most RTX matching by converting the RTX into a string, and
138 using string compares. This vastly simplifies the logic in many of
139 the functions in this file.
141 On exit, pattern[] has the encoded string (use RTX_IS("...") to
142 compare it) and patternr[] has pointers to the nodes in the RTX
143 corresponding to each character in the encoded string. The latter
144 is mostly used by print_operand().
146 Unrecognized patterns have '?' in them; this shows up when the
147 assembler complains about syntax errors.
150 static void
151 encode_pattern_1 (rtx x)
153 int i;
155 if (patternp == pattern + sizeof (pattern) - 2)
157 patternp[-1] = '?';
158 return;
161 patternr[patternp - pattern] = x;
163 switch (GET_CODE (x))
165 case REG:
166 *patternp++ = 'r';
167 break;
168 case SUBREG:
169 if (GET_MODE_SIZE (GET_MODE (x)) !=
170 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
171 *patternp++ = 'S';
172 encode_pattern_1 (XEXP (x, 0));
173 break;
174 case MEM:
175 *patternp++ = 'm';
176 case CONST:
177 encode_pattern_1 (XEXP (x, 0));
178 break;
179 case SIGN_EXTEND:
180 *patternp++ = '^';
181 *patternp++ = 'S';
182 encode_pattern_1 (XEXP (x, 0));
183 break;
184 case ZERO_EXTEND:
185 *patternp++ = '^';
186 *patternp++ = 'Z';
187 encode_pattern_1 (XEXP (x, 0));
188 break;
189 case PLUS:
190 *patternp++ = '+';
191 encode_pattern_1 (XEXP (x, 0));
192 encode_pattern_1 (XEXP (x, 1));
193 break;
194 case PRE_DEC:
195 *patternp++ = '>';
196 encode_pattern_1 (XEXP (x, 0));
197 break;
198 case POST_INC:
199 *patternp++ = '<';
200 encode_pattern_1 (XEXP (x, 0));
201 break;
202 case LO_SUM:
203 *patternp++ = 'L';
204 encode_pattern_1 (XEXP (x, 0));
205 encode_pattern_1 (XEXP (x, 1));
206 break;
207 case HIGH:
208 *patternp++ = 'H';
209 encode_pattern_1 (XEXP (x, 0));
210 break;
211 case SYMBOL_REF:
212 *patternp++ = 's';
213 break;
214 case LABEL_REF:
215 *patternp++ = 'l';
216 break;
217 case CODE_LABEL:
218 *patternp++ = 'c';
219 break;
220 case CONST_INT:
221 case CONST_DOUBLE:
222 *patternp++ = 'i';
223 break;
224 case UNSPEC:
225 *patternp++ = 'u';
226 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
227 for (i = 0; i < XVECLEN (x, 0); i++)
228 encode_pattern_1 (XVECEXP (x, 0, i));
229 break;
230 case USE:
231 *patternp++ = 'U';
232 break;
233 case PARALLEL:
234 *patternp++ = '|';
235 for (i = 0; i < XVECLEN (x, 0); i++)
236 encode_pattern_1 (XVECEXP (x, 0, i));
237 break;
238 case EXPR_LIST:
239 *patternp++ = 'E';
240 encode_pattern_1 (XEXP (x, 0));
241 if (XEXP (x, 1))
242 encode_pattern_1 (XEXP (x, 1));
243 break;
244 default:
245 *patternp++ = '?';
246 #if DEBUG0
247 fprintf (stderr, "can't encode pattern %s\n",
248 GET_RTX_NAME (GET_CODE (x)));
249 debug_rtx (x);
250 gcc_unreachable ();
251 #endif
252 break;
256 static void
257 encode_pattern (rtx x)
259 patternp = pattern;
260 encode_pattern_1 (x);
261 *patternp = 0;
264 /* Since register names indicate the mode they're used in, we need a
265 way to determine which name to refer to the register with. Called
266 by print_operand(). */
268 static const char *
269 reg_name_with_mode (int regno, enum machine_mode mode)
271 int mlen = GET_MODE_SIZE (mode);
272 if (regno == R0_REGNO && mlen == 1)
273 return "r0l";
274 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
275 return "r2r0";
276 if (regno == R0_REGNO && mlen == 6)
277 return "r2r1r0";
278 if (regno == R0_REGNO && mlen == 8)
279 return "r3r1r2r0";
280 if (regno == R1_REGNO && mlen == 1)
281 return "r1l";
282 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
283 return "r3r1";
284 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
285 return "a1a0";
286 return reg_names[regno];
289 /* How many bytes a register uses on stack when it's pushed. We need
290 to know this because the push opcode needs to explicitly indicate
291 the size of the register, even though the name of the register
292 already tells it that. Used by m32c_output_reg_{push,pop}, which
293 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
295 static int
296 reg_push_size (int regno)
298 switch (regno)
300 case R0_REGNO:
301 case R1_REGNO:
302 return 2;
303 case R2_REGNO:
304 case R3_REGNO:
305 case FLG_REGNO:
306 return 2;
307 case A0_REGNO:
308 case A1_REGNO:
309 case SB_REGNO:
310 case FB_REGNO:
311 case SP_REGNO:
312 if (TARGET_A16)
313 return 2;
314 else
315 return 3;
316 default:
317 gcc_unreachable ();
321 /* Given two register classes, find the largest intersection between
322 them. If there is no intersection, return RETURNED_IF_EMPTY
323 instead. */
324 static reg_class_t
325 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
326 reg_class_t returned_if_empty)
328 HARD_REG_SET cc;
329 int i;
330 reg_class_t best = NO_REGS;
331 unsigned int best_size = 0;
333 if (original_class == limiting_class)
334 return original_class;
336 cc = reg_class_contents[original_class];
337 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
339 for (i = 0; i < LIM_REG_CLASSES; i++)
341 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
342 if (best_size < reg_class_size[i])
344 best = (reg_class_t) i;
345 best_size = reg_class_size[i];
349 if (best == NO_REGS)
350 return returned_if_empty;
351 return best;
354 /* Used by m32c_register_move_cost to determine if a move is
355 impossibly expensive. */
356 static bool
357 class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
359 /* Cache the results: 0=untested 1=no 2=yes */
360 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
362 if (results[(int) rclass][mode] == 0)
364 int r;
365 results[rclass][mode] = 1;
366 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
367 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
368 && HARD_REGNO_MODE_OK (r, mode))
370 results[rclass][mode] = 2;
371 break;
375 #if DEBUG0
376 fprintf (stderr, "class %s can hold %s? %s\n",
377 class_names[(int) rclass], mode_name[mode],
378 (results[rclass][mode] == 2) ? "yes" : "no");
379 #endif
380 return results[(int) rclass][mode] == 2;
383 /* Run-time Target Specification. */
385 /* Memregs are memory locations that gcc treats like general
386 registers, as there are a limited number of true registers and the
387 m32c families can use memory in most places that registers can be
388 used.
390 However, since memory accesses are more expensive than registers,
391 we allow the user to limit the number of memregs available, in
392 order to try to persuade gcc to try harder to use real registers.
394 Memregs are provided by lib1funcs.S.
397 int ok_to_change_target_memregs = TRUE;
399 /* Implements TARGET_OPTION_OVERRIDE. */
401 #undef TARGET_OPTION_OVERRIDE
402 #define TARGET_OPTION_OVERRIDE m32c_option_override
404 static void
405 m32c_option_override (void)
407 /* We limit memregs to 0..16, and provide a default. */
408 if (global_options_set.x_target_memregs)
410 if (target_memregs < 0 || target_memregs > 16)
411 error ("invalid target memregs value '%d'", target_memregs);
413 else
414 target_memregs = 16;
416 if (TARGET_A24)
417 flag_ivopts = 0;
419 /* This target defaults to strict volatile bitfields. */
420 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
421 flag_strict_volatile_bitfields = 1;
423 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
424 This is always worse than an absolute call. */
425 if (TARGET_A16)
426 flag_no_function_cse = 1;
428 /* This wants to put insns between compares and their jumps. */
429 /* FIXME: The right solution is to properly trace the flags register
430 values, but that is too much work for stage 4. */
431 flag_combine_stack_adjustments = 0;
434 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
435 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
437 static void
438 m32c_override_options_after_change (void)
440 if (TARGET_A16)
441 flag_no_function_cse = 1;
444 /* Defining data structures for per-function information */
446 /* The usual; we set up our machine_function data. */
447 static struct machine_function *
448 m32c_init_machine_status (void)
450 return ggc_alloc_cleared_machine_function ();
453 /* Implements INIT_EXPANDERS. We just set up to call the above
454 function. */
455 void
456 m32c_init_expanders (void)
458 init_machine_status = m32c_init_machine_status;
461 /* Storage Layout */
463 /* Register Basics */
465 /* Basic Characteristics of Registers */
467 /* Whether a mode fits in a register is complex enough to warrant a
468 table. */
469 static struct
471 char qi_regs;
472 char hi_regs;
473 char pi_regs;
474 char si_regs;
475 char di_regs;
476 } nregs_table[FIRST_PSEUDO_REGISTER] =
478 { 1, 1, 2, 2, 4 }, /* r0 */
479 { 0, 1, 0, 0, 0 }, /* r2 */
480 { 1, 1, 2, 2, 0 }, /* r1 */
481 { 0, 1, 0, 0, 0 }, /* r3 */
482 { 0, 1, 1, 0, 0 }, /* a0 */
483 { 0, 1, 1, 0, 0 }, /* a1 */
484 { 0, 1, 1, 0, 0 }, /* sb */
485 { 0, 1, 1, 0, 0 }, /* fb */
486 { 0, 1, 1, 0, 0 }, /* sp */
487 { 1, 1, 1, 0, 0 }, /* pc */
488 { 0, 0, 0, 0, 0 }, /* fl */
489 { 1, 1, 1, 0, 0 }, /* ap */
490 { 1, 1, 2, 2, 4 }, /* mem0 */
491 { 1, 1, 2, 2, 4 }, /* mem1 */
492 { 1, 1, 2, 2, 4 }, /* mem2 */
493 { 1, 1, 2, 2, 4 }, /* mem3 */
494 { 1, 1, 2, 2, 4 }, /* mem4 */
495 { 1, 1, 2, 2, 0 }, /* mem5 */
496 { 1, 1, 2, 2, 0 }, /* mem6 */
497 { 1, 1, 0, 0, 0 }, /* mem7 */
500 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
501 of available memregs, and select which registers need to be preserved
502 across calls based on the chip family. */
504 #undef TARGET_CONDITIONAL_REGISTER_USAGE
505 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
506 void
507 m32c_conditional_register_usage (void)
509 int i;
511 if (0 <= target_memregs && target_memregs <= 16)
513 /* The command line option is bytes, but our "registers" are
514 16-bit words. */
515 for (i = (target_memregs+1)/2; i < 8; i++)
517 fixed_regs[MEM0_REGNO + i] = 1;
518 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
522 /* M32CM and M32C preserve more registers across function calls. */
523 if (TARGET_A24)
525 call_used_regs[R1_REGNO] = 0;
526 call_used_regs[R2_REGNO] = 0;
527 call_used_regs[R3_REGNO] = 0;
528 call_used_regs[A0_REGNO] = 0;
529 call_used_regs[A1_REGNO] = 0;
533 /* How Values Fit in Registers */
535 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
536 different registers are different sizes from each other, *and* may
537 be different sizes in different chip families. */
538 static int
539 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
541 if (regno == FLG_REGNO && mode == CCmode)
542 return 1;
543 if (regno >= FIRST_PSEUDO_REGISTER)
544 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
546 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
547 return (GET_MODE_SIZE (mode) + 1) / 2;
549 if (GET_MODE_SIZE (mode) <= 1)
550 return nregs_table[regno].qi_regs;
551 if (GET_MODE_SIZE (mode) <= 2)
552 return nregs_table[regno].hi_regs;
553 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
554 return 2;
555 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
556 return nregs_table[regno].pi_regs;
557 if (GET_MODE_SIZE (mode) <= 4)
558 return nregs_table[regno].si_regs;
559 if (GET_MODE_SIZE (mode) <= 8)
560 return nregs_table[regno].di_regs;
561 return 0;
565 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
567 int rv = m32c_hard_regno_nregs_1 (regno, mode);
568 return rv ? rv : 1;
571 /* Implements HARD_REGNO_MODE_OK. The above function does the work
572 already; just test its return value. */
574 m32c_hard_regno_ok (int regno, enum machine_mode mode)
576 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
579 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
580 registers are all different sizes. However, since most modes are
581 bigger than our registers anyway, it's easier to implement this
582 function that way, leaving QImode as the only unique case. */
584 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
586 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
587 return 1;
589 #if 0
590 if (m1 == QImode || m2 == QImode)
591 return 0;
592 #endif
594 return 1;
597 /* Register Classes */
599 /* Implements REGNO_REG_CLASS. */
600 enum reg_class
601 m32c_regno_reg_class (int regno)
603 switch (regno)
605 case R0_REGNO:
606 return R0_REGS;
607 case R1_REGNO:
608 return R1_REGS;
609 case R2_REGNO:
610 return R2_REGS;
611 case R3_REGNO:
612 return R3_REGS;
613 case A0_REGNO:
614 return A0_REGS;
615 case A1_REGNO:
616 return A1_REGS;
617 case SB_REGNO:
618 return SB_REGS;
619 case FB_REGNO:
620 return FB_REGS;
621 case SP_REGNO:
622 return SP_REGS;
623 case FLG_REGNO:
624 return FLG_REGS;
625 default:
626 if (IS_MEM_REGNO (regno))
627 return MEM_REGS;
628 return ALL_REGS;
632 /* Implements REGNO_OK_FOR_BASE_P. */
634 m32c_regno_ok_for_base_p (int regno)
636 if (regno == A0_REGNO
637 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
638 return 1;
639 return 0;
642 #define DEBUG_RELOAD 0
644 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
645 registers of the appropriate size. */
647 #undef TARGET_PREFERRED_RELOAD_CLASS
648 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
650 static reg_class_t
651 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
653 reg_class_t newclass = rclass;
655 #if DEBUG_RELOAD
656 fprintf (stderr, "\npreferred_reload_class for %s is ",
657 class_names[rclass]);
658 #endif
659 if (rclass == NO_REGS)
660 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
662 if (reg_classes_intersect_p (rclass, CR_REGS))
664 switch (GET_MODE (x))
666 case QImode:
667 newclass = HL_REGS;
668 break;
669 default:
670 /* newclass = HI_REGS; */
671 break;
675 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
676 newclass = SI_REGS;
677 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
678 && ! reg_class_subset_p (R03_REGS, rclass))
679 newclass = DI_REGS;
681 rclass = reduce_class (rclass, newclass, rclass);
683 if (GET_MODE (x) == QImode)
684 rclass = reduce_class (rclass, HL_REGS, rclass);
686 #if DEBUG_RELOAD
687 fprintf (stderr, "%s\n", class_names[rclass]);
688 debug_rtx (x);
690 if (GET_CODE (x) == MEM
691 && GET_CODE (XEXP (x, 0)) == PLUS
692 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
693 fprintf (stderr, "Glorm!\n");
694 #endif
695 return rclass;
698 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
700 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
701 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
703 static reg_class_t
704 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
706 return m32c_preferred_reload_class (x, rclass);
709 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
710 address registers for reloads since they're needed for address
711 reloads. */
713 m32c_limit_reload_class (enum machine_mode mode, int rclass)
715 #if DEBUG_RELOAD
716 fprintf (stderr, "limit_reload_class for %s: %s ->",
717 mode_name[mode], class_names[rclass]);
718 #endif
720 if (mode == QImode)
721 rclass = reduce_class (rclass, HL_REGS, rclass);
722 else if (mode == HImode)
723 rclass = reduce_class (rclass, HI_REGS, rclass);
724 else if (mode == SImode)
725 rclass = reduce_class (rclass, SI_REGS, rclass);
727 if (rclass != A_REGS)
728 rclass = reduce_class (rclass, DI_REGS, rclass);
730 #if DEBUG_RELOAD
731 fprintf (stderr, " %s\n", class_names[rclass]);
732 #endif
733 return rclass;
736 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
737 r0 or r1, as those are the only real QImode registers. CR regs get
738 reloaded through appropriately sized general or address
739 registers. */
741 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
743 int cc = class_contents[rclass][0];
744 #if DEBUG0
745 fprintf (stderr, "\nsecondary reload class %s %s\n",
746 class_names[rclass], mode_name[mode]);
747 debug_rtx (x);
748 #endif
749 if (mode == QImode
750 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
751 return QI_REGS;
752 if (reg_classes_intersect_p (rclass, CR_REGS)
753 && GET_CODE (x) == REG
754 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
755 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
756 return NO_REGS;
759 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
760 reloads. */
762 #undef TARGET_CLASS_LIKELY_SPILLED_P
763 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
765 static bool
766 m32c_class_likely_spilled_p (reg_class_t regclass)
768 if (regclass == A_REGS)
769 return true;
771 return (reg_class_size[(int) regclass] == 1);
774 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
775 documented meaning, to avoid potential inconsistencies with actual
776 class definitions. */
778 #undef TARGET_CLASS_MAX_NREGS
779 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
781 static unsigned char
782 m32c_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
784 int rn;
785 unsigned char max = 0;
787 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
788 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
790 unsigned char n = m32c_hard_regno_nregs (rn, mode);
791 if (max < n)
792 max = n;
794 return max;
797 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
798 QI (r0l, r1l) because the chip doesn't support QI ops on other
799 registers (well, it does on a0/a1 but if we let gcc do that, reload
800 suffers). Otherwise, we allow changes to larger modes. */
802 m32c_cannot_change_mode_class (enum machine_mode from,
803 enum machine_mode to, int rclass)
805 int rn;
806 #if DEBUG0
807 fprintf (stderr, "cannot change from %s to %s in %s\n",
808 mode_name[from], mode_name[to], class_names[rclass]);
809 #endif
811 /* If the larger mode isn't allowed in any of these registers, we
812 can't allow the change. */
813 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
814 if (class_contents[rclass][0] & (1 << rn))
815 if (! m32c_hard_regno_ok (rn, to))
816 return 1;
818 if (to == QImode)
819 return (class_contents[rclass][0] & 0x1ffa);
821 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
822 && GET_MODE_SIZE (from) > 1)
823 return 0;
824 if (GET_MODE_SIZE (from) > 2) /* all other regs */
825 return 0;
827 return 1;
830 /* Helpers for the rest of the file. */
831 /* TRUE if the rtx is a REG rtx for the given register. */
832 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
833 && REGNO (rtx) == regno)
834 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
835 base register in address calculations (hence the "strict"
836 argument). */
837 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
838 && (REGNO (rtx) == AP_REGNO \
839 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
841 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
843 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
844 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
845 call return values. */
846 bool
847 m32c_matches_constraint_p (rtx value, int constraint)
849 encode_pattern (value);
851 switch (constraint) {
852 case CONSTRAINT_SF:
853 return (far_addr_space_p (value)
854 && ((RTX_IS ("mr")
855 && A0_OR_PSEUDO (patternr[1])
856 && GET_MODE (patternr[1]) == SImode)
857 || (RTX_IS ("m+^Sri")
858 && A0_OR_PSEUDO (patternr[4])
859 && GET_MODE (patternr[4]) == HImode)
860 || (RTX_IS ("m+^Srs")
861 && A0_OR_PSEUDO (patternr[4])
862 && GET_MODE (patternr[4]) == HImode)
863 || (RTX_IS ("m+^S+ris")
864 && A0_OR_PSEUDO (patternr[5])
865 && GET_MODE (patternr[5]) == HImode)
866 || RTX_IS ("ms")));
867 case CONSTRAINT_Sd:
869 /* This is the common "src/dest" address */
870 rtx r;
871 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
872 return true;
873 if (RTX_IS ("ms") || RTX_IS ("m+si"))
874 return true;
875 if (RTX_IS ("m++rii"))
877 if (REGNO (patternr[3]) == FB_REGNO
878 && INTVAL (patternr[4]) == 0)
879 return true;
881 if (RTX_IS ("mr"))
882 r = patternr[1];
883 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
884 r = patternr[2];
885 else
886 return false;
887 if (REGNO (r) == SP_REGNO)
888 return false;
889 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
891 case CONSTRAINT_Sa:
893 rtx r;
894 if (RTX_IS ("mr"))
895 r = patternr[1];
896 else if (RTX_IS ("m+ri"))
897 r = patternr[2];
898 else
899 return false;
900 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
902 case CONSTRAINT_Si:
903 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
904 case CONSTRAINT_Ss:
905 return ((RTX_IS ("mr")
906 && (IS_REG (patternr[1], SP_REGNO)))
907 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
908 case CONSTRAINT_Sf:
909 return ((RTX_IS ("mr")
910 && (IS_REG (patternr[1], FB_REGNO)))
911 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
912 case CONSTRAINT_Sb:
913 return ((RTX_IS ("mr")
914 && (IS_REG (patternr[1], SB_REGNO)))
915 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
916 case CONSTRAINT_Sp:
917 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
918 return (RTX_IS ("mi")
919 && !(INTVAL (patternr[1]) & ~0x1fff));
920 case CONSTRAINT_S1:
921 return r1h_operand (value, QImode);
922 case CONSTRAINT_Rpa:
923 return GET_CODE (value) == PARALLEL;
924 default:
925 return false;
929 /* STACK AND CALLING */
931 /* Frame Layout */
933 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
934 (yes, THREE bytes) onto the stack for the return address, but we
935 don't support pointers bigger than 16 bits on those chips. This
936 will likely wreak havoc with exception unwinding. FIXME. */
938 m32c_return_addr_rtx (int count)
940 enum machine_mode mode;
941 int offset;
942 rtx ra_mem;
944 if (count)
945 return NULL_RTX;
946 /* we want 2[$fb] */
948 if (TARGET_A24)
950 /* It's four bytes */
951 mode = PSImode;
952 offset = 4;
954 else
956 /* FIXME: it's really 3 bytes */
957 mode = HImode;
958 offset = 2;
961 ra_mem =
962 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
963 offset));
964 return copy_to_mode_reg (mode, ra_mem);
967 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
969 m32c_incoming_return_addr_rtx (void)
971 /* we want [sp] */
972 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
975 /* Exception Handling Support */
977 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
978 pointers. */
980 m32c_eh_return_data_regno (int n)
982 switch (n)
984 case 0:
985 return A0_REGNO;
986 case 1:
987 if (TARGET_A16)
988 return R3_REGNO;
989 else
990 return R1_REGNO;
991 default:
992 return INVALID_REGNUM;
996 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
997 m32c_emit_eh_epilogue. */
999 m32c_eh_return_stackadj_rtx (void)
1001 if (!cfun->machine->eh_stack_adjust)
1003 rtx sa;
1005 sa = gen_rtx_REG (Pmode, R0_REGNO);
1006 cfun->machine->eh_stack_adjust = sa;
1008 return cfun->machine->eh_stack_adjust;
1011 /* Registers That Address the Stack Frame */
1013 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1014 the original spec called for dwarf numbers to vary with register
1015 width as well, for example, r0l, r0, and r2r0 would each have
1016 different dwarf numbers. GCC doesn't support this, and we don't do
1017 it, and gdb seems to like it this way anyway. */
1018 unsigned int
1019 m32c_dwarf_frame_regnum (int n)
1021 switch (n)
1023 case R0_REGNO:
1024 return 5;
1025 case R1_REGNO:
1026 return 6;
1027 case R2_REGNO:
1028 return 7;
1029 case R3_REGNO:
1030 return 8;
1031 case A0_REGNO:
1032 return 9;
1033 case A1_REGNO:
1034 return 10;
1035 case FB_REGNO:
1036 return 11;
1037 case SB_REGNO:
1038 return 19;
1040 case SP_REGNO:
1041 return 12;
1042 case PC_REGNO:
1043 return 13;
1044 default:
1045 return DWARF_FRAME_REGISTERS + 1;
1049 /* The frame looks like this:
1051 ap -> +------------------------------
1052 | Return address (3 or 4 bytes)
1053 | Saved FB (2 or 4 bytes)
1054 fb -> +------------------------------
1055 | local vars
1056 | register saves fb
1057 | through r0 as needed
1058 sp -> +------------------------------
1061 /* We use this to wrap all emitted insns in the prologue. */
1062 static rtx
1063 F (rtx x)
1065 RTX_FRAME_RELATED_P (x) = 1;
1066 return x;
1069 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1070 how much the stack pointer moves for each, for each cpu family. */
1071 static struct
1073 int reg1;
1074 int bit;
1075 int a16_bytes;
1076 int a24_bytes;
1077 } pushm_info[] =
1079 /* These are in reverse push (nearest-to-sp) order. */
1080 { R0_REGNO, 0x80, 2, 2 },
1081 { R1_REGNO, 0x40, 2, 2 },
1082 { R2_REGNO, 0x20, 2, 2 },
1083 { R3_REGNO, 0x10, 2, 2 },
1084 { A0_REGNO, 0x08, 2, 4 },
1085 { A1_REGNO, 0x04, 2, 4 },
1086 { SB_REGNO, 0x02, 2, 4 },
1087 { FB_REGNO, 0x01, 2, 4 }
1090 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1092 /* Returns TRUE if we need to save/restore the given register. We
1093 save everything for exception handlers, so that any register can be
1094 unwound. For interrupt handlers, we save everything if the handler
1095 calls something else (because we don't know what *that* function
1096 might do), but try to be a bit smarter if the handler is a leaf
1097 function. We always save $a0, though, because we use that in the
1098 epilogue to copy $fb to $sp. */
1099 static int
1100 need_to_save (int regno)
1102 if (fixed_regs[regno])
1103 return 0;
1104 if (crtl->calls_eh_return)
1105 return 1;
1106 if (regno == FP_REGNO)
1107 return 0;
1108 if (cfun->machine->is_interrupt
1109 && (!cfun->machine->is_leaf
1110 || (regno == A0_REGNO
1111 && m32c_function_needs_enter ())
1113 return 1;
1114 if (df_regs_ever_live_p (regno)
1115 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1116 return 1;
1117 return 0;
1120 /* This function contains all the intelligence about saving and
1121 restoring registers. It always figures out the register save set.
1122 When called with PP_justcount, it merely returns the size of the
1123 save set (for eliminating the frame pointer, for example). When
1124 called with PP_pushm or PP_popm, it emits the appropriate
1125 instructions for saving (pushm) or restoring (popm) the
1126 registers. */
1127 static int
1128 m32c_pushm_popm (Push_Pop_Type ppt)
1130 int reg_mask = 0;
1131 int byte_count = 0, bytes;
1132 int i;
1133 rtx dwarf_set[PUSHM_N];
1134 int n_dwarfs = 0;
1135 int nosave_mask = 0;
1137 if (crtl->return_rtx
1138 && GET_CODE (crtl->return_rtx) == PARALLEL
1139 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1141 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1142 rtx rv = XEXP (exp, 0);
1143 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1145 if (rv_bytes > 2)
1146 nosave_mask |= 0x20; /* PSI, SI */
1147 else
1148 nosave_mask |= 0xf0; /* DF */
1149 if (rv_bytes > 4)
1150 nosave_mask |= 0x50; /* DI */
1153 for (i = 0; i < (int) PUSHM_N; i++)
1155 /* Skip if neither register needs saving. */
1156 if (!need_to_save (pushm_info[i].reg1))
1157 continue;
1159 if (pushm_info[i].bit & nosave_mask)
1160 continue;
1162 reg_mask |= pushm_info[i].bit;
1163 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1165 if (ppt == PP_pushm)
1167 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1168 rtx addr;
1170 /* Always use stack_pointer_rtx instead of calling
1171 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1172 that there is a single rtx representing the stack pointer,
1173 namely stack_pointer_rtx, and uses == to recognize it. */
1174 addr = stack_pointer_rtx;
1176 if (byte_count != 0)
1177 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1179 dwarf_set[n_dwarfs++] =
1180 gen_rtx_SET (VOIDmode,
1181 gen_rtx_MEM (mode, addr),
1182 gen_rtx_REG (mode, pushm_info[i].reg1));
1183 F (dwarf_set[n_dwarfs - 1]);
1186 byte_count += bytes;
1189 if (cfun->machine->is_interrupt)
1191 cfun->machine->intr_pushm = reg_mask & 0xfe;
1192 reg_mask = 0;
1193 byte_count = 0;
1196 if (cfun->machine->is_interrupt)
1197 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1198 if (need_to_save (i))
1200 byte_count += 2;
1201 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1204 if (ppt == PP_pushm && byte_count)
1206 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1207 rtx pushm;
1209 if (reg_mask)
1211 XVECEXP (note, 0, 0)
1212 = gen_rtx_SET (VOIDmode,
1213 stack_pointer_rtx,
1214 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1215 stack_pointer_rtx,
1216 GEN_INT (-byte_count)));
1217 F (XVECEXP (note, 0, 0));
1219 for (i = 0; i < n_dwarfs; i++)
1220 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1222 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1224 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1227 if (cfun->machine->is_interrupt)
1228 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1229 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1231 if (TARGET_A16)
1232 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1233 else
1234 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1235 F (pushm);
1238 if (ppt == PP_popm && byte_count)
1240 if (cfun->machine->is_interrupt)
1241 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1242 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1244 if (TARGET_A16)
1245 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1246 else
1247 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1249 if (reg_mask)
1250 emit_insn (gen_popm (GEN_INT (reg_mask)));
1253 return byte_count;
1256 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1257 diagrams our call frame. */
1259 m32c_initial_elimination_offset (int from, int to)
1261 int ofs = 0;
1263 if (from == AP_REGNO)
1265 if (TARGET_A16)
1266 ofs += 5;
1267 else
1268 ofs += 8;
1271 if (to == SP_REGNO)
1273 ofs += m32c_pushm_popm (PP_justcount);
1274 ofs += get_frame_size ();
1277 /* Account for push rounding. */
1278 if (TARGET_A24)
1279 ofs = (ofs + 1) & ~1;
1280 #if DEBUG0
1281 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1282 to, ofs);
1283 #endif
1284 return ofs;
1287 /* Passing Function Arguments on the Stack */
1289 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1290 M32C has word stacks. */
1291 unsigned int
1292 m32c_push_rounding (int n)
1294 if (TARGET_R8C || TARGET_M16C)
1295 return n;
1296 return (n + 1) & ~1;
1299 /* Passing Arguments in Registers */
1301 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1302 registers, partly on stack. If our function returns a struct, a
1303 pointer to a buffer for it is at the top of the stack (last thing
1304 pushed). The first few real arguments may be in registers as
1305 follows:
1307 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1308 arg2 in r2 if it's HI (else pushed on stack)
1309 rest on stack
1310 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1311 rest on stack
1313 Structs are not passed in registers, even if they fit. Only
1314 integer and pointer types are passed in registers.
1316 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1317 r2 if it fits. */
1318 #undef TARGET_FUNCTION_ARG
1319 #define TARGET_FUNCTION_ARG m32c_function_arg
1320 static rtx
1321 m32c_function_arg (cumulative_args_t ca_v,
1322 enum machine_mode mode, const_tree type, bool named)
1324 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1326 /* Can return a reg, parallel, or 0 for stack */
1327 rtx rv = NULL_RTX;
1328 #if DEBUG0
1329 fprintf (stderr, "func_arg %d (%s, %d)\n",
1330 ca->parm_num, mode_name[mode], named);
1331 debug_tree (type);
1332 #endif
1334 if (mode == VOIDmode)
1335 return GEN_INT (0);
1337 if (ca->force_mem || !named)
1339 #if DEBUG0
1340 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1341 named);
1342 #endif
1343 return NULL_RTX;
1346 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1347 return NULL_RTX;
1349 if (type && AGGREGATE_TYPE_P (type))
1350 return NULL_RTX;
1352 switch (ca->parm_num)
1354 case 1:
1355 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1356 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1357 break;
1359 case 2:
1360 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1361 rv = gen_rtx_REG (mode, R2_REGNO);
1362 break;
1365 #if DEBUG0
1366 debug_rtx (rv);
1367 #endif
1368 return rv;
1371 #undef TARGET_PASS_BY_REFERENCE
1372 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1373 static bool
1374 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1375 enum machine_mode mode ATTRIBUTE_UNUSED,
1376 const_tree type ATTRIBUTE_UNUSED,
1377 bool named ATTRIBUTE_UNUSED)
1379 return 0;
1382 /* Implements INIT_CUMULATIVE_ARGS. */
1383 void
1384 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1385 tree fntype,
1386 rtx libname ATTRIBUTE_UNUSED,
1387 tree fndecl,
1388 int n_named_args ATTRIBUTE_UNUSED)
1390 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1391 ca->force_mem = 1;
1392 else
1393 ca->force_mem = 0;
1394 ca->parm_num = 1;
1397 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1398 functions returning structures, so we always reset that. Otherwise,
1399 we only need to know the sequence number of the argument to know what
1400 to do with it. */
1401 #undef TARGET_FUNCTION_ARG_ADVANCE
1402 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1403 static void
1404 m32c_function_arg_advance (cumulative_args_t ca_v,
1405 enum machine_mode mode ATTRIBUTE_UNUSED,
1406 const_tree type ATTRIBUTE_UNUSED,
1407 bool named ATTRIBUTE_UNUSED)
1409 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1411 if (ca->force_mem)
1412 ca->force_mem = 0;
1413 else
1414 ca->parm_num++;
1417 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1418 #undef TARGET_FUNCTION_ARG_BOUNDARY
1419 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1420 static unsigned int
1421 m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1422 const_tree type ATTRIBUTE_UNUSED)
1424 return (TARGET_A16 ? 8 : 16);
1427 /* Implements FUNCTION_ARG_REGNO_P. */
1429 m32c_function_arg_regno_p (int r)
1431 if (TARGET_A24)
1432 return (r == R0_REGNO);
1433 return (r == R1_REGNO || r == R2_REGNO);
1436 /* HImode and PSImode are the two "native" modes as far as GCC is
1437 concerned, but the chips also support a 32-bit mode which is used
1438 for some opcodes in R8C/M16C and for reset vectors and such. */
1439 #undef TARGET_VALID_POINTER_MODE
1440 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1441 static bool
1442 m32c_valid_pointer_mode (enum machine_mode mode)
1444 if (mode == HImode
1445 || mode == PSImode
1446 || mode == SImode
1448 return 1;
1449 return 0;
1452 /* How Scalar Function Values Are Returned */
1454 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1455 combination of registers starting there (r2r0 for longs, r3r1r2r0
1456 for long long, r3r2r1r0 for doubles), except that that ABI
1457 currently doesn't work because it ends up using all available
1458 general registers and gcc often can't compile it. So, instead, we
1459 return anything bigger than 16 bits in "mem0" (effectively, a
1460 memory location). */
1462 #undef TARGET_LIBCALL_VALUE
1463 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1465 static rtx
1466 m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1468 /* return reg or parallel */
1469 #if 0
1470 /* FIXME: GCC has difficulty returning large values in registers,
1471 because that ties up most of the general registers and gives the
1472 register allocator little to work with. Until we can resolve
1473 this, large values are returned in memory. */
1474 if (mode == DFmode)
1476 rtx rv;
1478 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1479 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1480 gen_rtx_REG (HImode,
1481 R0_REGNO),
1482 GEN_INT (0));
1483 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1484 gen_rtx_REG (HImode,
1485 R1_REGNO),
1486 GEN_INT (2));
1487 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1488 gen_rtx_REG (HImode,
1489 R2_REGNO),
1490 GEN_INT (4));
1491 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1492 gen_rtx_REG (HImode,
1493 R3_REGNO),
1494 GEN_INT (6));
1495 return rv;
1498 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1500 rtx rv;
1502 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1503 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1504 gen_rtx_REG (mode,
1505 R0_REGNO),
1506 GEN_INT (0));
1507 return rv;
1509 #endif
1511 if (GET_MODE_SIZE (mode) > 2)
1512 return gen_rtx_REG (mode, MEM0_REGNO);
1513 return gen_rtx_REG (mode, R0_REGNO);
1516 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1517 conventions. */
1519 #undef TARGET_FUNCTION_VALUE
1520 #define TARGET_FUNCTION_VALUE m32c_function_value
1522 static rtx
1523 m32c_function_value (const_tree valtype,
1524 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1525 bool outgoing ATTRIBUTE_UNUSED)
1527 /* return reg or parallel */
1528 const enum machine_mode mode = TYPE_MODE (valtype);
1529 return m32c_libcall_value (mode, NULL_RTX);
1532 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1534 #undef TARGET_FUNCTION_VALUE_REGNO_P
1535 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1537 static bool
1538 m32c_function_value_regno_p (const unsigned int regno)
1540 return (regno == R0_REGNO || regno == MEM0_REGNO);
1543 /* How Large Values Are Returned */
1545 /* We return structures by pushing the address on the stack, even if
1546 we use registers for the first few "real" arguments. */
1547 #undef TARGET_STRUCT_VALUE_RTX
1548 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1549 static rtx
1550 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1551 int incoming ATTRIBUTE_UNUSED)
1553 return 0;
1556 /* Function Entry and Exit */
1558 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1560 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1562 if (cfun->machine->is_interrupt)
1563 return 1;
1564 return 0;
1567 /* Implementing the Varargs Macros */
1569 #undef TARGET_STRICT_ARGUMENT_NAMING
1570 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1571 static bool
1572 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1574 return 1;
1577 /* Trampolines for Nested Functions */
1580 m16c:
1581 1 0000 75C43412 mov.w #0x1234,a0
1582 2 0004 FC000000 jmp.a label
1584 m32c:
1585 1 0000 BC563412 mov.l:s #0x123456,a0
1586 2 0004 CC000000 jmp.a label
1589 /* Implements TRAMPOLINE_SIZE. */
1591 m32c_trampoline_size (void)
1593 /* Allocate extra space so we can avoid the messy shifts when we
1594 initialize the trampoline; we just write past the end of the
1595 opcode. */
1596 return TARGET_A16 ? 8 : 10;
1599 /* Implements TRAMPOLINE_ALIGNMENT. */
1601 m32c_trampoline_alignment (void)
1603 return 2;
1606 /* Implements TARGET_TRAMPOLINE_INIT. */
1608 #undef TARGET_TRAMPOLINE_INIT
1609 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1610 static void
1611 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1613 rtx function = XEXP (DECL_RTL (fndecl), 0);
1615 #define A0(m,i) adjust_address (m_tramp, m, i)
1616 if (TARGET_A16)
1618 /* Note: we subtract a "word" because the moves want signed
1619 constants, not unsigned constants. */
1620 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1621 emit_move_insn (A0 (HImode, 2), chainval);
1622 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1623 /* We use 16-bit addresses here, but store the zero to turn it
1624 into a 24-bit offset. */
1625 emit_move_insn (A0 (HImode, 5), function);
1626 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1628 else
1630 /* Note that the PSI moves actually write 4 bytes. Make sure we
1631 write stuff out in the right order, and leave room for the
1632 extra byte at the end. */
1633 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1634 emit_move_insn (A0 (PSImode, 1), chainval);
1635 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1636 emit_move_insn (A0 (PSImode, 5), function);
1638 #undef A0
1641 /* Addressing Modes */
1643 /* The r8c/m32c family supports a wide range of non-orthogonal
1644 addressing modes, including the ability to double-indirect on *some*
1645 of them. Not all insns support all modes, either, but we rely on
1646 predicates and constraints to deal with that. */
1647 #undef TARGET_LEGITIMATE_ADDRESS_P
1648 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1649 bool
1650 m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1652 int mode_adjust;
1653 if (CONSTANT_P (x))
1654 return 1;
1656 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1657 return 0;
1658 if (TARGET_A24 && GET_MODE (x) != PSImode)
1659 return 0;
1661 /* Wide references to memory will be split after reload, so we must
1662 ensure that all parts of such splits remain legitimate
1663 addresses. */
1664 mode_adjust = GET_MODE_SIZE (mode) - 1;
1666 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1667 if (GET_CODE (x) == PRE_DEC
1668 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1670 return (GET_CODE (XEXP (x, 0)) == REG
1671 && REGNO (XEXP (x, 0)) == SP_REGNO);
1674 #if 0
1675 /* This is the double indirection detection, but it currently
1676 doesn't work as cleanly as this code implies, so until we've had
1677 a chance to debug it, leave it disabled. */
1678 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1680 #if DEBUG_DOUBLE
1681 fprintf (stderr, "double indirect\n");
1682 #endif
1683 x = XEXP (x, 0);
1685 #endif
1687 encode_pattern (x);
1688 if (RTX_IS ("r"))
1690 /* Most indexable registers can be used without displacements,
1691 although some of them will be emitted with an explicit zero
1692 to please the assembler. */
1693 switch (REGNO (patternr[0]))
1695 case A1_REGNO:
1696 case SB_REGNO:
1697 case FB_REGNO:
1698 case SP_REGNO:
1699 if (TARGET_A16 && GET_MODE (x) == SImode)
1700 return 0;
1701 case A0_REGNO:
1702 return 1;
1704 default:
1705 if (IS_PSEUDO (patternr[0], strict))
1706 return 1;
1707 return 0;
1711 if (TARGET_A16 && GET_MODE (x) == SImode)
1712 return 0;
1714 if (RTX_IS ("+ri"))
1716 /* This is more interesting, because different base registers
1717 allow for different displacements - both range and signedness
1718 - and it differs from chip series to chip series too. */
1719 int rn = REGNO (patternr[1]);
1720 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1721 switch (rn)
1723 case A0_REGNO:
1724 case A1_REGNO:
1725 case SB_REGNO:
1726 /* The syntax only allows positive offsets, but when the
1727 offsets span the entire memory range, we can simulate
1728 negative offsets by wrapping. */
1729 if (TARGET_A16)
1730 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1731 if (rn == SB_REGNO)
1732 return (offs >= 0 && offs <= 65535 - mode_adjust);
1733 /* A0 or A1 */
1734 return (offs >= -16777216 && offs <= 16777215);
1736 case FB_REGNO:
1737 if (TARGET_A16)
1738 return (offs >= -128 && offs <= 127 - mode_adjust);
1739 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1741 case SP_REGNO:
1742 return (offs >= -128 && offs <= 127 - mode_adjust);
1744 default:
1745 if (IS_PSEUDO (patternr[1], strict))
1746 return 1;
1747 return 0;
1750 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1752 rtx reg = patternr[1];
1754 /* We don't know where the symbol is, so only allow base
1755 registers which support displacements spanning the whole
1756 address range. */
1757 switch (REGNO (reg))
1759 case A0_REGNO:
1760 case A1_REGNO:
1761 /* $sb needs a secondary reload, but since it's involved in
1762 memory address reloads too, we don't deal with it very
1763 well. */
1764 /* case SB_REGNO: */
1765 return 1;
1766 default:
1767 if (IS_PSEUDO (reg, strict))
1768 return 1;
1769 return 0;
1772 return 0;
1775 /* Implements REG_OK_FOR_BASE_P. */
1777 m32c_reg_ok_for_base_p (rtx x, int strict)
1779 if (GET_CODE (x) != REG)
1780 return 0;
1781 switch (REGNO (x))
1783 case A0_REGNO:
1784 case A1_REGNO:
1785 case SB_REGNO:
1786 case FB_REGNO:
1787 case SP_REGNO:
1788 return 1;
1789 default:
1790 if (IS_PSEUDO (x, strict))
1791 return 1;
1792 return 0;
1796 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1797 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1798 like this:
1799 EB 4B FF mova -128[$fb],$a0
1800 D8 0C FF FF mov.w:Q #0,-1[$a0]
1802 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1803 displacements:
1804 7B F4 stc $fb,$a0
1805 77 54 00 01 sub #256,$a0
1806 D8 08 01 mov.w:Q #0,1[$a0]
1808 If we don't offset (i.e. offset by zero), we end up with:
1809 7B F4 stc $fb,$a0
1810 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1812 We have to subtract *something* so that we have a PLUS rtx to mark
1813 that we've done this reload. The -128 offset will never result in
1814 an 8-bit aN offset, and the payoff for the second case is five
1815 loads *if* those loads are within 256 bytes of the other end of the
1816 frame, so the third case seems best. Note that we subtract the
1817 zero, but detect that in the addhi3 pattern. */
1819 #define BIG_FB_ADJ 0
1821 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1822 worry about is frame base offsets, as $fb has a limited
1823 displacement range. We deal with this by attempting to reload $fb
1824 itself into an address register; that seems to result in the best
1825 code. */
1826 #undef TARGET_LEGITIMIZE_ADDRESS
1827 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1828 static rtx
1829 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1830 enum machine_mode mode)
1832 #if DEBUG0
1833 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1834 debug_rtx (x);
1835 fprintf (stderr, "\n");
1836 #endif
1838 if (GET_CODE (x) == PLUS
1839 && GET_CODE (XEXP (x, 0)) == REG
1840 && REGNO (XEXP (x, 0)) == FB_REGNO
1841 && GET_CODE (XEXP (x, 1)) == CONST_INT
1842 && (INTVAL (XEXP (x, 1)) < -128
1843 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1845 /* reload FB to A_REGS */
1846 rtx temp = gen_reg_rtx (Pmode);
1847 x = copy_rtx (x);
1848 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1849 XEXP (x, 0) = temp;
1852 return x;
1855 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1857 m32c_legitimize_reload_address (rtx * x,
1858 enum machine_mode mode,
1859 int opnum,
1860 int type, int ind_levels ATTRIBUTE_UNUSED)
1862 #if DEBUG0
1863 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1864 mode_name[mode]);
1865 debug_rtx (*x);
1866 #endif
1868 /* At one point, this function tried to get $fb copied to an address
1869 register, which in theory would maximize sharing, but gcc was
1870 *also* still trying to reload the whole address, and we'd run out
1871 of address registers. So we let gcc do the naive (but safe)
1872 reload instead, when the above function doesn't handle it for
1875 The code below is a second attempt at the above. */
1877 if (GET_CODE (*x) == PLUS
1878 && GET_CODE (XEXP (*x, 0)) == REG
1879 && REGNO (XEXP (*x, 0)) == FB_REGNO
1880 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1881 && (INTVAL (XEXP (*x, 1)) < -128
1882 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1884 rtx sum;
1885 int offset = INTVAL (XEXP (*x, 1));
1886 int adjustment = -BIG_FB_ADJ;
1888 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1889 GEN_INT (adjustment));
1890 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1891 if (type == RELOAD_OTHER)
1892 type = RELOAD_FOR_OTHER_ADDRESS;
1893 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1894 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1895 (enum reload_type) type);
1896 return 1;
1899 if (GET_CODE (*x) == PLUS
1900 && GET_CODE (XEXP (*x, 0)) == PLUS
1901 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1902 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1903 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1904 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1907 if (type == RELOAD_OTHER)
1908 type = RELOAD_FOR_OTHER_ADDRESS;
1909 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1910 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1911 (enum reload_type) type);
1912 return 1;
1915 return 0;
1918 /* Return the appropriate mode for a named address pointer. */
1919 #undef TARGET_ADDR_SPACE_POINTER_MODE
1920 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1921 static enum machine_mode
1922 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1924 switch (addrspace)
1926 case ADDR_SPACE_GENERIC:
1927 return TARGET_A24 ? PSImode : HImode;
1928 case ADDR_SPACE_FAR:
1929 return SImode;
1930 default:
1931 gcc_unreachable ();
1935 /* Return the appropriate mode for a named address address. */
1936 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1937 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1938 static enum machine_mode
1939 m32c_addr_space_address_mode (addr_space_t addrspace)
1941 switch (addrspace)
1943 case ADDR_SPACE_GENERIC:
1944 return TARGET_A24 ? PSImode : HImode;
1945 case ADDR_SPACE_FAR:
1946 return SImode;
1947 default:
1948 gcc_unreachable ();
1952 /* Like m32c_legitimate_address_p, except with named addresses. */
1953 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1954 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1955 m32c_addr_space_legitimate_address_p
1956 static bool
1957 m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
1958 bool strict, addr_space_t as)
1960 if (as == ADDR_SPACE_FAR)
1962 if (TARGET_A24)
1963 return 0;
1964 encode_pattern (x);
1965 if (RTX_IS ("r"))
1967 if (GET_MODE (x) != SImode)
1968 return 0;
1969 switch (REGNO (patternr[0]))
1971 case A0_REGNO:
1972 return 1;
1974 default:
1975 if (IS_PSEUDO (patternr[0], strict))
1976 return 1;
1977 return 0;
1980 if (RTX_IS ("+^Sri"))
1982 int rn = REGNO (patternr[3]);
1983 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1984 if (GET_MODE (patternr[3]) != HImode)
1985 return 0;
1986 switch (rn)
1988 case A0_REGNO:
1989 return (offs >= 0 && offs <= 0xfffff);
1991 default:
1992 if (IS_PSEUDO (patternr[3], strict))
1993 return 1;
1994 return 0;
1997 if (RTX_IS ("+^Srs"))
1999 int rn = REGNO (patternr[3]);
2000 if (GET_MODE (patternr[3]) != HImode)
2001 return 0;
2002 switch (rn)
2004 case A0_REGNO:
2005 return 1;
2007 default:
2008 if (IS_PSEUDO (patternr[3], strict))
2009 return 1;
2010 return 0;
2013 if (RTX_IS ("+^S+ris"))
2015 int rn = REGNO (patternr[4]);
2016 if (GET_MODE (patternr[4]) != HImode)
2017 return 0;
2018 switch (rn)
2020 case A0_REGNO:
2021 return 1;
2023 default:
2024 if (IS_PSEUDO (patternr[4], strict))
2025 return 1;
2026 return 0;
2029 if (RTX_IS ("s"))
2031 return 1;
2033 return 0;
2036 else if (as != ADDR_SPACE_GENERIC)
2037 gcc_unreachable ();
2039 return m32c_legitimate_address_p (mode, x, strict);
2042 /* Like m32c_legitimate_address, except with named address support. */
2043 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2044 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2045 static rtx
2046 m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2047 addr_space_t as)
2049 if (as != ADDR_SPACE_GENERIC)
2051 #if DEBUG0
2052 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2053 debug_rtx (x);
2054 fprintf (stderr, "\n");
2055 #endif
2057 if (GET_CODE (x) != REG)
2059 x = force_reg (SImode, x);
2061 return x;
2064 return m32c_legitimize_address (x, oldx, mode);
2067 /* Determine if one named address space is a subset of another. */
2068 #undef TARGET_ADDR_SPACE_SUBSET_P
2069 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2070 static bool
2071 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2073 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2074 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2076 if (subset == superset)
2077 return true;
2079 else
2080 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2083 #undef TARGET_ADDR_SPACE_CONVERT
2084 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2085 /* Convert from one address space to another. */
2086 static rtx
2087 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2089 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2090 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2091 rtx result;
2093 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2094 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2096 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2098 /* This is unpredictable, as we're truncating off usable address
2099 bits. */
2101 result = gen_reg_rtx (HImode);
2102 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2103 return result;
2105 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2107 /* This always works. */
2108 result = gen_reg_rtx (SImode);
2109 emit_insn (gen_zero_extendhisi2 (result, op));
2110 return result;
2112 else
2113 gcc_unreachable ();
2116 /* Condition Code Status */
2118 #undef TARGET_FIXED_CONDITION_CODE_REGS
2119 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2120 static bool
2121 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2123 *p1 = FLG_REGNO;
2124 *p2 = INVALID_REGNUM;
2125 return true;
2128 /* Describing Relative Costs of Operations */
2130 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2131 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2132 no opcodes to do that). We also discourage use of mem* registers
2133 since they're really memory. */
2135 #undef TARGET_REGISTER_MOVE_COST
2136 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2138 static int
2139 m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2140 reg_class_t to)
2142 int cost = COSTS_N_INSNS (3);
2143 HARD_REG_SET cc;
2145 /* FIXME: pick real values, but not 2 for now. */
2146 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2147 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2149 if (mode == QImode
2150 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2152 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2153 cost = COSTS_N_INSNS (1000);
2154 else
2155 cost = COSTS_N_INSNS (80);
2158 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2159 cost = COSTS_N_INSNS (1000);
2161 if (reg_classes_intersect_p (from, CR_REGS))
2162 cost += COSTS_N_INSNS (5);
2164 if (reg_classes_intersect_p (to, CR_REGS))
2165 cost += COSTS_N_INSNS (5);
2167 if (from == MEM_REGS || to == MEM_REGS)
2168 cost += COSTS_N_INSNS (50);
2169 else if (reg_classes_intersect_p (from, MEM_REGS)
2170 || reg_classes_intersect_p (to, MEM_REGS))
2171 cost += COSTS_N_INSNS (10);
2173 #if DEBUG0
2174 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2175 mode_name[mode], class_names[(int) from], class_names[(int) to],
2176 cost);
2177 #endif
2178 return cost;
2181 /* Implements TARGET_MEMORY_MOVE_COST. */
2183 #undef TARGET_MEMORY_MOVE_COST
2184 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2186 static int
2187 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2188 reg_class_t rclass ATTRIBUTE_UNUSED,
2189 bool in ATTRIBUTE_UNUSED)
2191 /* FIXME: pick real values. */
2192 return COSTS_N_INSNS (10);
2195 /* Here we try to describe when we use multiple opcodes for one RTX so
2196 that gcc knows when to use them. */
2197 #undef TARGET_RTX_COSTS
2198 #define TARGET_RTX_COSTS m32c_rtx_costs
2199 static bool
2200 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2201 int *total, bool speed ATTRIBUTE_UNUSED)
2203 switch (code)
2205 case REG:
2206 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2207 *total += COSTS_N_INSNS (500);
2208 else
2209 *total += COSTS_N_INSNS (1);
2210 return true;
2212 case ASHIFT:
2213 case LSHIFTRT:
2214 case ASHIFTRT:
2215 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2217 /* mov.b r1l, r1h */
2218 *total += COSTS_N_INSNS (1);
2219 return true;
2221 if (INTVAL (XEXP (x, 1)) > 8
2222 || INTVAL (XEXP (x, 1)) < -8)
2224 /* mov.b #N, r1l */
2225 /* mov.b r1l, r1h */
2226 *total += COSTS_N_INSNS (2);
2227 return true;
2229 return true;
2231 case LE:
2232 case LEU:
2233 case LT:
2234 case LTU:
2235 case GT:
2236 case GTU:
2237 case GE:
2238 case GEU:
2239 case NE:
2240 case EQ:
2241 if (outer_code == SET)
2243 *total += COSTS_N_INSNS (2);
2244 return true;
2246 break;
2248 case ZERO_EXTRACT:
2250 rtx dest = XEXP (x, 0);
2251 rtx addr = XEXP (dest, 0);
2252 switch (GET_CODE (addr))
2254 case CONST_INT:
2255 *total += COSTS_N_INSNS (1);
2256 break;
2257 case SYMBOL_REF:
2258 *total += COSTS_N_INSNS (3);
2259 break;
2260 default:
2261 *total += COSTS_N_INSNS (2);
2262 break;
2264 return true;
2266 break;
2268 default:
2269 /* Reasonable default. */
2270 if (TARGET_A16 && GET_MODE(x) == SImode)
2271 *total += COSTS_N_INSNS (2);
2272 break;
2274 return false;
2277 #undef TARGET_ADDRESS_COST
2278 #define TARGET_ADDRESS_COST m32c_address_cost
2279 static int
2280 m32c_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2281 addr_space_t as ATTRIBUTE_UNUSED,
2282 bool speed ATTRIBUTE_UNUSED)
2284 int i;
2285 /* fprintf(stderr, "\naddress_cost\n");
2286 debug_rtx(addr);*/
2287 switch (GET_CODE (addr))
2289 case CONST_INT:
2290 i = INTVAL (addr);
2291 if (i == 0)
2292 return COSTS_N_INSNS(1);
2293 if (0 < i && i <= 255)
2294 return COSTS_N_INSNS(2);
2295 if (0 < i && i <= 65535)
2296 return COSTS_N_INSNS(3);
2297 return COSTS_N_INSNS(4);
2298 case SYMBOL_REF:
2299 return COSTS_N_INSNS(4);
2300 case REG:
2301 return COSTS_N_INSNS(1);
2302 case PLUS:
2303 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2305 i = INTVAL (XEXP (addr, 1));
2306 if (i == 0)
2307 return COSTS_N_INSNS(1);
2308 if (0 < i && i <= 255)
2309 return COSTS_N_INSNS(2);
2310 if (0 < i && i <= 65535)
2311 return COSTS_N_INSNS(3);
2313 return COSTS_N_INSNS(4);
2314 default:
2315 return 0;
2319 /* Defining the Output Assembler Language */
2321 /* Output of Data */
2323 /* We may have 24 bit sizes, which is the native address size.
2324 Currently unused, but provided for completeness. */
2325 #undef TARGET_ASM_INTEGER
2326 #define TARGET_ASM_INTEGER m32c_asm_integer
2327 static bool
2328 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2330 switch (size)
2332 case 3:
2333 fprintf (asm_out_file, "\t.3byte\t");
2334 output_addr_const (asm_out_file, x);
2335 fputc ('\n', asm_out_file);
2336 return true;
2337 case 4:
2338 if (GET_CODE (x) == SYMBOL_REF)
2340 fprintf (asm_out_file, "\t.long\t");
2341 output_addr_const (asm_out_file, x);
2342 fputc ('\n', asm_out_file);
2343 return true;
2345 break;
2347 return default_assemble_integer (x, size, aligned_p);
2350 /* Output of Assembler Instructions */
2352 /* We use a lookup table because the addressing modes are non-orthogonal. */
2354 static struct
2356 char code;
2357 char const *pattern;
2358 char const *format;
2360 const conversions[] = {
2361 { 0, "r", "0" },
2363 { 0, "mr", "z[1]" },
2364 { 0, "m+ri", "3[2]" },
2365 { 0, "m+rs", "3[2]" },
2366 { 0, "m+^Zrs", "5[4]" },
2367 { 0, "m+^Zri", "5[4]" },
2368 { 0, "m+^Z+ris", "7+6[5]" },
2369 { 0, "m+^Srs", "5[4]" },
2370 { 0, "m+^Sri", "5[4]" },
2371 { 0, "m+^S+ris", "7+6[5]" },
2372 { 0, "m+r+si", "4+5[2]" },
2373 { 0, "ms", "1" },
2374 { 0, "mi", "1" },
2375 { 0, "m+si", "2+3" },
2377 { 0, "mmr", "[z[2]]" },
2378 { 0, "mm+ri", "[4[3]]" },
2379 { 0, "mm+rs", "[4[3]]" },
2380 { 0, "mm+r+si", "[5+6[3]]" },
2381 { 0, "mms", "[[2]]" },
2382 { 0, "mmi", "[[2]]" },
2383 { 0, "mm+si", "[4[3]]" },
2385 { 0, "i", "#0" },
2386 { 0, "s", "#0" },
2387 { 0, "+si", "#1+2" },
2388 { 0, "l", "#0" },
2390 { 'l', "l", "0" },
2391 { 'd', "i", "0" },
2392 { 'd', "s", "0" },
2393 { 'd', "+si", "1+2" },
2394 { 'D', "i", "0" },
2395 { 'D', "s", "0" },
2396 { 'D', "+si", "1+2" },
2397 { 'x', "i", "#0" },
2398 { 'X', "i", "#0" },
2399 { 'm', "i", "#0" },
2400 { 'b', "i", "#0" },
2401 { 'B', "i", "0" },
2402 { 'p', "i", "0" },
2404 { 0, 0, 0 }
2407 /* This is in order according to the bitfield that pushm/popm use. */
2408 static char const *pushm_regs[] = {
2409 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2412 /* Implements TARGET_PRINT_OPERAND. */
2414 #undef TARGET_PRINT_OPERAND
2415 #define TARGET_PRINT_OPERAND m32c_print_operand
2417 static void
2418 m32c_print_operand (FILE * file, rtx x, int code)
2420 int i, j, b;
2421 const char *comma;
2422 HOST_WIDE_INT ival;
2423 int unsigned_const = 0;
2424 int force_sign;
2426 /* Multiplies; constants are converted to sign-extended format but
2427 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2428 need. */
2429 if (code == 'u')
2431 unsigned_const = 2;
2432 code = 0;
2434 if (code == 'U')
2436 unsigned_const = 1;
2437 code = 0;
2439 /* This one is only for debugging; you can put it in a pattern to
2440 force this error. */
2441 if (code == '!')
2443 fprintf (stderr, "dj: unreviewed pattern:");
2444 if (current_output_insn)
2445 debug_rtx (current_output_insn);
2446 gcc_unreachable ();
2448 /* PSImode operations are either .w or .l depending on the target. */
2449 if (code == '&')
2451 if (TARGET_A16)
2452 fprintf (file, "w");
2453 else
2454 fprintf (file, "l");
2455 return;
2457 /* Inverted conditionals. */
2458 if (code == 'C')
2460 switch (GET_CODE (x))
2462 case LE:
2463 fputs ("gt", file);
2464 break;
2465 case LEU:
2466 fputs ("gtu", file);
2467 break;
2468 case LT:
2469 fputs ("ge", file);
2470 break;
2471 case LTU:
2472 fputs ("geu", file);
2473 break;
2474 case GT:
2475 fputs ("le", file);
2476 break;
2477 case GTU:
2478 fputs ("leu", file);
2479 break;
2480 case GE:
2481 fputs ("lt", file);
2482 break;
2483 case GEU:
2484 fputs ("ltu", file);
2485 break;
2486 case NE:
2487 fputs ("eq", file);
2488 break;
2489 case EQ:
2490 fputs ("ne", file);
2491 break;
2492 default:
2493 gcc_unreachable ();
2495 return;
2497 /* Regular conditionals. */
2498 if (code == 'c')
2500 switch (GET_CODE (x))
2502 case LE:
2503 fputs ("le", file);
2504 break;
2505 case LEU:
2506 fputs ("leu", file);
2507 break;
2508 case LT:
2509 fputs ("lt", file);
2510 break;
2511 case LTU:
2512 fputs ("ltu", file);
2513 break;
2514 case GT:
2515 fputs ("gt", file);
2516 break;
2517 case GTU:
2518 fputs ("gtu", file);
2519 break;
2520 case GE:
2521 fputs ("ge", file);
2522 break;
2523 case GEU:
2524 fputs ("geu", file);
2525 break;
2526 case NE:
2527 fputs ("ne", file);
2528 break;
2529 case EQ:
2530 fputs ("eq", file);
2531 break;
2532 default:
2533 gcc_unreachable ();
2535 return;
2537 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2538 operand. */
2539 if (code == 'h' && GET_MODE (x) == SImode)
2541 x = m32c_subreg (HImode, x, SImode, 0);
2542 code = 0;
2544 if (code == 'H' && GET_MODE (x) == SImode)
2546 x = m32c_subreg (HImode, x, SImode, 2);
2547 code = 0;
2549 if (code == 'h' && GET_MODE (x) == HImode)
2551 x = m32c_subreg (QImode, x, HImode, 0);
2552 code = 0;
2554 if (code == 'H' && GET_MODE (x) == HImode)
2556 /* We can't actually represent this as an rtx. Do it here. */
2557 if (GET_CODE (x) == REG)
2559 switch (REGNO (x))
2561 case R0_REGNO:
2562 fputs ("r0h", file);
2563 return;
2564 case R1_REGNO:
2565 fputs ("r1h", file);
2566 return;
2567 default:
2568 gcc_unreachable();
2571 /* This should be a MEM. */
2572 x = m32c_subreg (QImode, x, HImode, 1);
2573 code = 0;
2575 /* This is for BMcond, which always wants word register names. */
2576 if (code == 'h' && GET_MODE (x) == QImode)
2578 if (GET_CODE (x) == REG)
2579 x = gen_rtx_REG (HImode, REGNO (x));
2580 code = 0;
2582 /* 'x' and 'X' need to be ignored for non-immediates. */
2583 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2584 code = 0;
2586 encode_pattern (x);
2587 force_sign = 0;
2588 for (i = 0; conversions[i].pattern; i++)
2589 if (conversions[i].code == code
2590 && streq (conversions[i].pattern, pattern))
2592 for (j = 0; conversions[i].format[j]; j++)
2593 /* backslash quotes the next character in the output pattern. */
2594 if (conversions[i].format[j] == '\\')
2596 fputc (conversions[i].format[j + 1], file);
2597 j++;
2599 /* Digits in the output pattern indicate that the
2600 corresponding RTX is to be output at that point. */
2601 else if (ISDIGIT (conversions[i].format[j]))
2603 rtx r = patternr[conversions[i].format[j] - '0'];
2604 switch (GET_CODE (r))
2606 case REG:
2607 fprintf (file, "%s",
2608 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2609 break;
2610 case CONST_INT:
2611 switch (code)
2613 case 'b':
2614 case 'B':
2616 int v = INTVAL (r);
2617 int i = (int) exact_log2 (v);
2618 if (i == -1)
2619 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2620 if (i == -1)
2621 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2622 /* Bit position. */
2623 fprintf (file, "%d", i);
2625 break;
2626 case 'x':
2627 /* Unsigned byte. */
2628 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2629 INTVAL (r) & 0xff);
2630 break;
2631 case 'X':
2632 /* Unsigned word. */
2633 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2634 INTVAL (r) & 0xffff);
2635 break;
2636 case 'p':
2637 /* pushm and popm encode a register set into a single byte. */
2638 comma = "";
2639 for (b = 7; b >= 0; b--)
2640 if (INTVAL (r) & (1 << b))
2642 fprintf (file, "%s%s", comma, pushm_regs[b]);
2643 comma = ",";
2645 break;
2646 case 'm':
2647 /* "Minus". Output -X */
2648 ival = (-INTVAL (r) & 0xffff);
2649 if (ival & 0x8000)
2650 ival = ival - 0x10000;
2651 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2652 break;
2653 default:
2654 ival = INTVAL (r);
2655 if (conversions[i].format[j + 1] == '[' && ival < 0)
2657 /* We can simulate negative displacements by
2658 taking advantage of address space
2659 wrapping when the offset can span the
2660 entire address range. */
2661 rtx base =
2662 patternr[conversions[i].format[j + 2] - '0'];
2663 if (GET_CODE (base) == REG)
2664 switch (REGNO (base))
2666 case A0_REGNO:
2667 case A1_REGNO:
2668 if (TARGET_A24)
2669 ival = 0x1000000 + ival;
2670 else
2671 ival = 0x10000 + ival;
2672 break;
2673 case SB_REGNO:
2674 if (TARGET_A16)
2675 ival = 0x10000 + ival;
2676 break;
2679 else if (code == 'd' && ival < 0 && j == 0)
2680 /* The "mova" opcode is used to do addition by
2681 computing displacements, but again, we need
2682 displacements to be unsigned *if* they're
2683 the only component of the displacement
2684 (i.e. no "symbol-4" type displacement). */
2685 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2687 if (conversions[i].format[j] == '0')
2689 /* More conversions to unsigned. */
2690 if (unsigned_const == 2)
2691 ival &= 0xffff;
2692 if (unsigned_const == 1)
2693 ival &= 0xff;
2695 if (streq (conversions[i].pattern, "mi")
2696 || streq (conversions[i].pattern, "mmi"))
2698 /* Integers used as addresses are unsigned. */
2699 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2701 if (force_sign && ival >= 0)
2702 fputc ('+', file);
2703 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2704 break;
2706 break;
2707 case CONST_DOUBLE:
2708 /* We don't have const_double constants. If it
2709 happens, make it obvious. */
2710 fprintf (file, "[const_double 0x%lx]",
2711 (unsigned long) CONST_DOUBLE_HIGH (r));
2712 break;
2713 case SYMBOL_REF:
2714 assemble_name (file, XSTR (r, 0));
2715 break;
2716 case LABEL_REF:
2717 output_asm_label (r);
2718 break;
2719 default:
2720 fprintf (stderr, "don't know how to print this operand:");
2721 debug_rtx (r);
2722 gcc_unreachable ();
2725 else
2727 if (conversions[i].format[j] == 'z')
2729 /* Some addressing modes *must* have a displacement,
2730 so insert a zero here if needed. */
2731 int k;
2732 for (k = j + 1; conversions[i].format[k]; k++)
2733 if (ISDIGIT (conversions[i].format[k]))
2735 rtx reg = patternr[conversions[i].format[k] - '0'];
2736 if (GET_CODE (reg) == REG
2737 && (REGNO (reg) == SB_REGNO
2738 || REGNO (reg) == FB_REGNO
2739 || REGNO (reg) == SP_REGNO))
2740 fputc ('0', file);
2742 continue;
2744 /* Signed displacements off symbols need to have signs
2745 blended cleanly. */
2746 if (conversions[i].format[j] == '+'
2747 && (!code || code == 'D' || code == 'd')
2748 && ISDIGIT (conversions[i].format[j + 1])
2749 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2750 == CONST_INT))
2752 force_sign = 1;
2753 continue;
2755 fputc (conversions[i].format[j], file);
2757 break;
2759 if (!conversions[i].pattern)
2761 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2762 pattern);
2763 debug_rtx (x);
2764 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2767 return;
2770 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2772 See m32c_print_operand above for descriptions of what these do. */
2774 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2775 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2777 static bool
2778 m32c_print_operand_punct_valid_p (unsigned char c)
2780 if (c == '&' || c == '!')
2781 return true;
2783 return false;
2786 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2788 #undef TARGET_PRINT_OPERAND_ADDRESS
2789 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2791 static void
2792 m32c_print_operand_address (FILE * stream, rtx address)
2794 if (GET_CODE (address) == MEM)
2795 address = XEXP (address, 0);
2796 else
2797 /* cf: gcc.dg/asm-4.c. */
2798 gcc_assert (GET_CODE (address) == REG);
2800 m32c_print_operand (stream, address, 0);
2803 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2804 differently than general registers. */
2805 void
2806 m32c_output_reg_push (FILE * s, int regno)
2808 if (regno == FLG_REGNO)
2809 fprintf (s, "\tpushc\tflg\n");
2810 else
2811 fprintf (s, "\tpush.%c\t%s\n",
2812 " bwll"[reg_push_size (regno)], reg_names[regno]);
2815 /* Likewise for ASM_OUTPUT_REG_POP. */
2816 void
2817 m32c_output_reg_pop (FILE * s, int regno)
2819 if (regno == FLG_REGNO)
2820 fprintf (s, "\tpopc\tflg\n");
2821 else
2822 fprintf (s, "\tpop.%c\t%s\n",
2823 " bwll"[reg_push_size (regno)], reg_names[regno]);
2826 /* Defining target-specific uses of `__attribute__' */
2828 /* Used to simplify the logic below. Find the attributes wherever
2829 they may be. */
2830 #define M32C_ATTRIBUTES(decl) \
2831 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2832 : DECL_ATTRIBUTES (decl) \
2833 ? (DECL_ATTRIBUTES (decl)) \
2834 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2836 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2837 static int
2838 interrupt_p (tree node ATTRIBUTE_UNUSED)
2840 tree list = M32C_ATTRIBUTES (node);
2841 while (list)
2843 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2844 return 1;
2845 list = TREE_CHAIN (list);
2847 return fast_interrupt_p (node);
2850 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2851 static int
2852 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2854 tree list = M32C_ATTRIBUTES (node);
2855 while (list)
2857 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2858 return 1;
2859 list = TREE_CHAIN (list);
2861 return 0;
2864 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2865 static int
2866 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2868 tree list = M32C_ATTRIBUTES (node);
2869 while (list)
2871 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2872 return 1;
2873 list = TREE_CHAIN (list);
2875 return 0;
2878 static tree
2879 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2880 tree name ATTRIBUTE_UNUSED,
2881 tree args ATTRIBUTE_UNUSED,
2882 int flags ATTRIBUTE_UNUSED,
2883 bool * no_add_attrs ATTRIBUTE_UNUSED)
2885 return NULL_TREE;
2888 /* Returns TRUE if given tree has the "function_vector" attribute. */
2890 m32c_special_page_vector_p (tree func)
2892 tree list;
2894 if (TREE_CODE (func) != FUNCTION_DECL)
2895 return 0;
2897 list = M32C_ATTRIBUTES (func);
2898 while (list)
2900 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2901 return 1;
2902 list = TREE_CHAIN (list);
2904 return 0;
2907 static tree
2908 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2909 tree name ATTRIBUTE_UNUSED,
2910 tree args ATTRIBUTE_UNUSED,
2911 int flags ATTRIBUTE_UNUSED,
2912 bool * no_add_attrs ATTRIBUTE_UNUSED)
2914 if (TARGET_R8C)
2916 /* The attribute is not supported for R8C target. */
2917 warning (OPT_Wattributes,
2918 "%qE attribute is not supported for R8C target",
2919 name);
2920 *no_add_attrs = true;
2922 else if (TREE_CODE (*node) != FUNCTION_DECL)
2924 /* The attribute must be applied to functions only. */
2925 warning (OPT_Wattributes,
2926 "%qE attribute applies only to functions",
2927 name);
2928 *no_add_attrs = true;
2930 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2932 /* The argument must be a constant integer. */
2933 warning (OPT_Wattributes,
2934 "%qE attribute argument not an integer constant",
2935 name);
2936 *no_add_attrs = true;
2938 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2939 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2941 /* The argument value must be between 18 to 255. */
2942 warning (OPT_Wattributes,
2943 "%qE attribute argument should be between 18 to 255",
2944 name);
2945 *no_add_attrs = true;
2947 return NULL_TREE;
2950 /* If the function is assigned the attribute 'function_vector', it
2951 returns the function vector number, otherwise returns zero. */
2953 current_function_special_page_vector (rtx x)
2955 int num;
2957 if ((GET_CODE(x) == SYMBOL_REF)
2958 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2960 tree list;
2961 tree t = SYMBOL_REF_DECL (x);
2963 if (TREE_CODE (t) != FUNCTION_DECL)
2964 return 0;
2966 list = M32C_ATTRIBUTES (t);
2967 while (list)
2969 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2971 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2972 return num;
2975 list = TREE_CHAIN (list);
2978 return 0;
2980 else
2981 return 0;
2984 #undef TARGET_ATTRIBUTE_TABLE
2985 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2986 static const struct attribute_spec m32c_attribute_table[] = {
2987 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
2988 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
2989 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
2990 {"function_vector", 1, 1, true, false, false, function_vector_handler,
2991 false},
2992 {0, 0, 0, 0, 0, 0, 0, false}
2995 #undef TARGET_COMP_TYPE_ATTRIBUTES
2996 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2997 static int
2998 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
2999 const_tree type2 ATTRIBUTE_UNUSED)
3001 /* 0=incompatible 1=compatible 2=warning */
3002 return 1;
3005 #undef TARGET_INSERT_ATTRIBUTES
3006 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3007 static void
3008 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3009 tree * attr_ptr ATTRIBUTE_UNUSED)
3011 unsigned addr;
3012 /* See if we need to make #pragma address variables volatile. */
3014 if (TREE_CODE (node) == VAR_DECL)
3016 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3017 if (m32c_get_pragma_address (name, &addr))
3019 TREE_THIS_VOLATILE (node) = true;
3025 struct GTY(()) pragma_entry {
3026 const char *varname;
3027 unsigned address;
3029 typedef struct pragma_entry pragma_entry;
3031 /* Hash table of pragma info. */
3032 static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3034 static int
3035 pragma_entry_eq (const void *p1, const void *p2)
3037 const pragma_entry *old = (const pragma_entry *) p1;
3038 const char *new_name = (const char *) p2;
3040 return strcmp (old->varname, new_name) == 0;
3043 static hashval_t
3044 pragma_entry_hash (const void *p)
3046 const pragma_entry *old = (const pragma_entry *) p;
3047 return htab_hash_string (old->varname);
3050 void
3051 m32c_note_pragma_address (const char *varname, unsigned address)
3053 pragma_entry **slot;
3055 if (!pragma_htab)
3056 pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3057 pragma_entry_eq, NULL);
3059 slot = (pragma_entry **)
3060 htab_find_slot_with_hash (pragma_htab, varname,
3061 htab_hash_string (varname), INSERT);
3063 if (!*slot)
3065 *slot = ggc_alloc_pragma_entry ();
3066 (*slot)->varname = ggc_strdup (varname);
3068 (*slot)->address = address;
3071 static bool
3072 m32c_get_pragma_address (const char *varname, unsigned *address)
3074 pragma_entry **slot;
3076 if (!pragma_htab)
3077 return false;
3079 slot = (pragma_entry **)
3080 htab_find_slot_with_hash (pragma_htab, varname,
3081 htab_hash_string (varname), NO_INSERT);
3082 if (slot && *slot)
3084 *address = (*slot)->address;
3085 return true;
3087 return false;
3090 void
3091 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3092 const char *name,
3093 int size, int align, int global)
3095 unsigned address;
3097 if (m32c_get_pragma_address (name, &address))
3099 /* We never output these as global. */
3100 assemble_name (stream, name);
3101 fprintf (stream, " = 0x%04x\n", address);
3102 return;
3104 if (!global)
3106 fprintf (stream, "\t.local\t");
3107 assemble_name (stream, name);
3108 fprintf (stream, "\n");
3110 fprintf (stream, "\t.comm\t");
3111 assemble_name (stream, name);
3112 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3115 /* Predicates */
3117 /* This is a list of legal subregs of hard regs. */
3118 static const struct {
3119 unsigned char outer_mode_size;
3120 unsigned char inner_mode_size;
3121 unsigned char byte_mask;
3122 unsigned char legal_when;
3123 unsigned int regno;
3124 } legal_subregs[] = {
3125 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3126 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3127 {1, 2, 0x01, 1, A0_REGNO},
3128 {1, 2, 0x01, 1, A1_REGNO},
3130 {1, 4, 0x01, 1, A0_REGNO},
3131 {1, 4, 0x01, 1, A1_REGNO},
3133 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3134 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3135 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3136 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3137 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3139 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3142 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3143 support. We also bail on MEMs with illegal addresses. */
3144 bool
3145 m32c_illegal_subreg_p (rtx op)
3147 int offset;
3148 unsigned int i;
3149 int src_mode, dest_mode;
3151 if (GET_CODE (op) == MEM
3152 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3154 return true;
3157 if (GET_CODE (op) != SUBREG)
3158 return false;
3160 dest_mode = GET_MODE (op);
3161 offset = SUBREG_BYTE (op);
3162 op = SUBREG_REG (op);
3163 src_mode = GET_MODE (op);
3165 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3166 return false;
3167 if (GET_CODE (op) != REG)
3168 return false;
3169 if (REGNO (op) >= MEM0_REGNO)
3170 return false;
3172 offset = (1 << offset);
3174 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3175 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3176 && legal_subregs[i].regno == REGNO (op)
3177 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3178 && legal_subregs[i].byte_mask & offset)
3180 switch (legal_subregs[i].legal_when)
3182 case 1:
3183 return false;
3184 case 16:
3185 if (TARGET_A16)
3186 return false;
3187 break;
3188 case 24:
3189 if (TARGET_A24)
3190 return false;
3191 break;
3194 return true;
3197 /* Returns TRUE if we support a move between the first two operands.
3198 At the moment, we just want to discourage mem to mem moves until
3199 after reload, because reload has a hard time with our limited
3200 number of address registers, and we can get into a situation where
3201 we need three of them when we only have two. */
3202 bool
3203 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3205 rtx op0 = operands[0];
3206 rtx op1 = operands[1];
3208 if (TARGET_A24)
3209 return true;
3211 #define DEBUG_MOV_OK 0
3212 #if DEBUG_MOV_OK
3213 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3214 debug_rtx (op0);
3215 debug_rtx (op1);
3216 #endif
3218 if (GET_CODE (op0) == SUBREG)
3219 op0 = XEXP (op0, 0);
3220 if (GET_CODE (op1) == SUBREG)
3221 op1 = XEXP (op1, 0);
3223 if (GET_CODE (op0) == MEM
3224 && GET_CODE (op1) == MEM
3225 && ! reload_completed)
3227 #if DEBUG_MOV_OK
3228 fprintf (stderr, " - no, mem to mem\n");
3229 #endif
3230 return false;
3233 #if DEBUG_MOV_OK
3234 fprintf (stderr, " - ok\n");
3235 #endif
3236 return true;
3239 /* Returns TRUE if two consecutive HImode mov instructions, generated
3240 for moving an immediate double data to a double data type variable
3241 location, can be combined into single SImode mov instruction. */
3242 bool
3243 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3244 enum machine_mode mode ATTRIBUTE_UNUSED)
3246 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3247 flags. */
3248 return false;
3251 /* Expanders */
3253 /* Subregs are non-orthogonal for us, because our registers are all
3254 different sizes. */
3255 static rtx
3256 m32c_subreg (enum machine_mode outer,
3257 rtx x, enum machine_mode inner, int byte)
3259 int r, nr = -1;
3261 /* Converting MEMs to different types that are the same size, we
3262 just rewrite them. */
3263 if (GET_CODE (x) == SUBREG
3264 && SUBREG_BYTE (x) == 0
3265 && GET_CODE (SUBREG_REG (x)) == MEM
3266 && (GET_MODE_SIZE (GET_MODE (x))
3267 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3269 rtx oldx = x;
3270 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3271 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3274 /* Push/pop get done as smaller push/pops. */
3275 if (GET_CODE (x) == MEM
3276 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3277 || GET_CODE (XEXP (x, 0)) == POST_INC))
3278 return gen_rtx_MEM (outer, XEXP (x, 0));
3279 if (GET_CODE (x) == SUBREG
3280 && GET_CODE (XEXP (x, 0)) == MEM
3281 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3282 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3283 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3285 if (GET_CODE (x) != REG)
3287 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3288 if (GET_CODE (r) == SUBREG
3289 && GET_CODE (x) == MEM
3290 && MEM_VOLATILE_P (x))
3292 /* Volatile MEMs don't get simplified, but we need them to
3293 be. We are little endian, so the subreg byte is the
3294 offset. */
3295 r = adjust_address_nv (x, outer, byte);
3297 return r;
3300 r = REGNO (x);
3301 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3302 return simplify_gen_subreg (outer, x, inner, byte);
3304 if (IS_MEM_REGNO (r))
3305 return simplify_gen_subreg (outer, x, inner, byte);
3307 /* This is where the complexities of our register layout are
3308 described. */
3309 if (byte == 0)
3310 nr = r;
3311 else if (outer == HImode)
3313 if (r == R0_REGNO && byte == 2)
3314 nr = R2_REGNO;
3315 else if (r == R0_REGNO && byte == 4)
3316 nr = R1_REGNO;
3317 else if (r == R0_REGNO && byte == 6)
3318 nr = R3_REGNO;
3319 else if (r == R1_REGNO && byte == 2)
3320 nr = R3_REGNO;
3321 else if (r == A0_REGNO && byte == 2)
3322 nr = A1_REGNO;
3324 else if (outer == SImode)
3326 if (r == R0_REGNO && byte == 0)
3327 nr = R0_REGNO;
3328 else if (r == R0_REGNO && byte == 4)
3329 nr = R1_REGNO;
3331 if (nr == -1)
3333 fprintf (stderr, "m32c_subreg %s %s %d\n",
3334 mode_name[outer], mode_name[inner], byte);
3335 debug_rtx (x);
3336 gcc_unreachable ();
3338 return gen_rtx_REG (outer, nr);
3341 /* Used to emit move instructions. We split some moves,
3342 and avoid mem-mem moves. */
3344 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3346 if (far_addr_space_p (operands[0])
3347 && CONSTANT_P (operands[1]))
3349 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3351 if (TARGET_A16 && mode == PSImode)
3352 return m32c_split_move (operands, mode, 1);
3353 if ((GET_CODE (operands[0]) == MEM)
3354 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3356 rtx pmv = XEXP (operands[0], 0);
3357 rtx dest_reg = XEXP (pmv, 0);
3358 rtx dest_mod = XEXP (pmv, 1);
3360 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3361 operands[0] = gen_rtx_MEM (mode, dest_reg);
3363 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3364 operands[1] = copy_to_mode_reg (mode, operands[1]);
3365 return 0;
3368 #define DEBUG_SPLIT 0
3370 /* Returns TRUE if the given PSImode move should be split. We split
3371 for all r8c/m16c moves, since it doesn't support them, and for
3372 POP.L as we can only *push* SImode. */
3374 m32c_split_psi_p (rtx * operands)
3376 #if DEBUG_SPLIT
3377 fprintf (stderr, "\nm32c_split_psi_p\n");
3378 debug_rtx (operands[0]);
3379 debug_rtx (operands[1]);
3380 #endif
3381 if (TARGET_A16)
3383 #if DEBUG_SPLIT
3384 fprintf (stderr, "yes, A16\n");
3385 #endif
3386 return 1;
3388 if (GET_CODE (operands[1]) == MEM
3389 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3391 #if DEBUG_SPLIT
3392 fprintf (stderr, "yes, pop.l\n");
3393 #endif
3394 return 1;
3396 #if DEBUG_SPLIT
3397 fprintf (stderr, "no, default\n");
3398 #endif
3399 return 0;
3402 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3403 (define_expand), 1 if it is not optional (define_insn_and_split),
3404 and 3 for define_split (alternate api). */
3406 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3408 rtx s[4], d[4];
3409 int parts, si, di, rev = 0;
3410 int rv = 0, opi = 2;
3411 enum machine_mode submode = HImode;
3412 rtx *ops, local_ops[10];
3414 /* define_split modifies the existing operands, but the other two
3415 emit new insns. OPS is where we store the operand pairs, which
3416 we emit later. */
3417 if (split_all == 3)
3418 ops = operands;
3419 else
3420 ops = local_ops;
3422 /* Else HImode. */
3423 if (mode == DImode)
3424 submode = SImode;
3426 /* Before splitting mem-mem moves, force one operand into a
3427 register. */
3428 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3430 #if DEBUG0
3431 fprintf (stderr, "force_reg...\n");
3432 debug_rtx (operands[1]);
3433 #endif
3434 operands[1] = force_reg (mode, operands[1]);
3435 #if DEBUG0
3436 debug_rtx (operands[1]);
3437 #endif
3440 parts = 2;
3442 #if DEBUG_SPLIT
3443 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3444 split_all);
3445 debug_rtx (operands[0]);
3446 debug_rtx (operands[1]);
3447 #endif
3449 /* Note that split_all is not used to select the api after this
3450 point, so it's safe to set it to 3 even with define_insn. */
3451 /* None of the chips can move SI operands to sp-relative addresses,
3452 so we always split those. */
3453 if (satisfies_constraint_Ss (operands[0]))
3454 split_all = 3;
3456 if (TARGET_A16
3457 && (far_addr_space_p (operands[0])
3458 || far_addr_space_p (operands[1])))
3459 split_all |= 1;
3461 /* We don't need to split these. */
3462 if (TARGET_A24
3463 && split_all != 3
3464 && (mode == SImode || mode == PSImode)
3465 && !(GET_CODE (operands[1]) == MEM
3466 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3467 return 0;
3469 /* First, enumerate the subregs we'll be dealing with. */
3470 for (si = 0; si < parts; si++)
3472 d[si] =
3473 m32c_subreg (submode, operands[0], mode,
3474 si * GET_MODE_SIZE (submode));
3475 s[si] =
3476 m32c_subreg (submode, operands[1], mode,
3477 si * GET_MODE_SIZE (submode));
3480 /* Split pushes by emitting a sequence of smaller pushes. */
3481 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3483 for (si = parts - 1; si >= 0; si--)
3485 ops[opi++] = gen_rtx_MEM (submode,
3486 gen_rtx_PRE_DEC (Pmode,
3487 gen_rtx_REG (Pmode,
3488 SP_REGNO)));
3489 ops[opi++] = s[si];
3492 rv = 1;
3494 /* Likewise for pops. */
3495 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3497 for (di = 0; di < parts; di++)
3499 ops[opi++] = d[di];
3500 ops[opi++] = gen_rtx_MEM (submode,
3501 gen_rtx_POST_INC (Pmode,
3502 gen_rtx_REG (Pmode,
3503 SP_REGNO)));
3505 rv = 1;
3507 else if (split_all)
3509 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3510 for (di = 0; di < parts - 1; di++)
3511 for (si = di + 1; si < parts; si++)
3512 if (reg_mentioned_p (d[di], s[si]))
3513 rev = 1;
3515 if (rev)
3516 for (si = 0; si < parts; si++)
3518 ops[opi++] = d[si];
3519 ops[opi++] = s[si];
3521 else
3522 for (si = parts - 1; si >= 0; si--)
3524 ops[opi++] = d[si];
3525 ops[opi++] = s[si];
3527 rv = 1;
3529 /* Now emit any moves we may have accumulated. */
3530 if (rv && split_all != 3)
3532 int i;
3533 for (i = 2; i < opi; i += 2)
3534 emit_move_insn (ops[i], ops[i + 1]);
3536 return rv;
3539 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3540 the like. For the R8C they expect one of the addresses to be in
3541 R1L:An so we need to arrange for that. Otherwise, it's just a
3542 matter of picking out the operands we want and emitting the right
3543 pattern for them. All these expanders, which correspond to
3544 patterns in blkmov.md, must return nonzero if they expand the insn,
3545 or zero if they should FAIL. */
3547 /* This is a memset() opcode. All operands are implied, so we need to
3548 arrange for them to be in the right registers. The opcode wants
3549 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3550 the count (HI), and $2 the value (QI). */
3552 m32c_expand_setmemhi(rtx *operands)
3554 rtx desta, count, val;
3555 rtx desto, counto;
3557 desta = XEXP (operands[0], 0);
3558 count = operands[1];
3559 val = operands[2];
3561 desto = gen_reg_rtx (Pmode);
3562 counto = gen_reg_rtx (HImode);
3564 if (GET_CODE (desta) != REG
3565 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3566 desta = copy_to_mode_reg (Pmode, desta);
3568 /* This looks like an arbitrary restriction, but this is by far the
3569 most common case. For counts 8..14 this actually results in
3570 smaller code with no speed penalty because the half-sized
3571 constant can be loaded with a shorter opcode. */
3572 if (GET_CODE (count) == CONST_INT
3573 && GET_CODE (val) == CONST_INT
3574 && ! (INTVAL (count) & 1)
3575 && (INTVAL (count) > 1)
3576 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3578 unsigned v = INTVAL (val) & 0xff;
3579 v = v | (v << 8);
3580 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3581 val = copy_to_mode_reg (HImode, GEN_INT (v));
3582 if (TARGET_A16)
3583 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3584 else
3585 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3586 return 1;
3589 /* This is the generalized memset() case. */
3590 if (GET_CODE (val) != REG
3591 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3592 val = copy_to_mode_reg (QImode, val);
3594 if (GET_CODE (count) != REG
3595 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3596 count = copy_to_mode_reg (HImode, count);
3598 if (TARGET_A16)
3599 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3600 else
3601 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3603 return 1;
3606 /* This is a memcpy() opcode. All operands are implied, so we need to
3607 arrange for them to be in the right registers. The opcode wants
3608 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3609 is the source (MEM:BLK), and $2 the count (HI). */
3611 m32c_expand_movmemhi(rtx *operands)
3613 rtx desta, srca, count;
3614 rtx desto, srco, counto;
3616 desta = XEXP (operands[0], 0);
3617 srca = XEXP (operands[1], 0);
3618 count = operands[2];
3620 desto = gen_reg_rtx (Pmode);
3621 srco = gen_reg_rtx (Pmode);
3622 counto = gen_reg_rtx (HImode);
3624 if (GET_CODE (desta) != REG
3625 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3626 desta = copy_to_mode_reg (Pmode, desta);
3628 if (GET_CODE (srca) != REG
3629 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3630 srca = copy_to_mode_reg (Pmode, srca);
3632 /* Similar to setmem, but we don't need to check the value. */
3633 if (GET_CODE (count) == CONST_INT
3634 && ! (INTVAL (count) & 1)
3635 && (INTVAL (count) > 1))
3637 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3638 if (TARGET_A16)
3639 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3640 else
3641 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3642 return 1;
3645 /* This is the generalized memset() case. */
3646 if (GET_CODE (count) != REG
3647 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3648 count = copy_to_mode_reg (HImode, count);
3650 if (TARGET_A16)
3651 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3652 else
3653 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3655 return 1;
3658 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3659 the copy, which should point to the NUL at the end of the string,
3660 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3661 Since our opcode leaves the destination pointing *after* the NUL,
3662 we must emit an adjustment. */
3664 m32c_expand_movstr(rtx *operands)
3666 rtx desta, srca;
3667 rtx desto, srco;
3669 desta = XEXP (operands[1], 0);
3670 srca = XEXP (operands[2], 0);
3672 desto = gen_reg_rtx (Pmode);
3673 srco = gen_reg_rtx (Pmode);
3675 if (GET_CODE (desta) != REG
3676 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3677 desta = copy_to_mode_reg (Pmode, desta);
3679 if (GET_CODE (srca) != REG
3680 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3681 srca = copy_to_mode_reg (Pmode, srca);
3683 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3684 /* desto ends up being a1, which allows this type of add through MOVA. */
3685 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3687 return 1;
3690 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3691 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3692 $2 is the other (MEM:BLK). We must do the comparison, and then
3693 convert the flags to a signed integer result. */
3695 m32c_expand_cmpstr(rtx *operands)
3697 rtx src1a, src2a;
3699 src1a = XEXP (operands[1], 0);
3700 src2a = XEXP (operands[2], 0);
3702 if (GET_CODE (src1a) != REG
3703 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3704 src1a = copy_to_mode_reg (Pmode, src1a);
3706 if (GET_CODE (src2a) != REG
3707 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3708 src2a = copy_to_mode_reg (Pmode, src2a);
3710 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3711 emit_insn (gen_cond_to_int (operands[0]));
3713 return 1;
3717 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3719 static shift_gen_func
3720 shift_gen_func_for (int mode, int code)
3722 #define GFF(m,c,f) if (mode == m && code == c) return f
3723 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3724 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3725 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3726 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3727 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3728 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3729 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3730 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3731 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3732 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3733 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3734 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3735 #undef GFF
3736 gcc_unreachable ();
3739 /* The m32c only has one shift, but it takes a signed count. GCC
3740 doesn't want this, so we fake it by negating any shift count when
3741 we're pretending to shift the other way. Also, the shift count is
3742 limited to -8..8. It's slightly better to use two shifts for 9..15
3743 than to load the count into r1h, so we do that too. */
3745 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3747 enum machine_mode mode = GET_MODE (operands[0]);
3748 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3749 rtx temp;
3751 if (GET_CODE (operands[2]) == CONST_INT)
3753 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3754 int count = INTVAL (operands[2]) * scale;
3756 while (count > maxc)
3758 temp = gen_reg_rtx (mode);
3759 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3760 operands[1] = temp;
3761 count -= maxc;
3763 while (count < -maxc)
3765 temp = gen_reg_rtx (mode);
3766 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3767 operands[1] = temp;
3768 count += maxc;
3770 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3771 return 1;
3774 temp = gen_reg_rtx (QImode);
3775 if (scale < 0)
3776 /* The pattern has a NEG that corresponds to this. */
3777 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3778 else if (TARGET_A16 && mode == SImode)
3779 /* We do this because the code below may modify this, we don't
3780 want to modify the origin of this value. */
3781 emit_move_insn (temp, operands[2]);
3782 else
3783 /* We'll only use it for the shift, no point emitting a move. */
3784 temp = operands[2];
3786 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3788 /* The m16c has a limit of -16..16 for SI shifts, even when the
3789 shift count is in a register. Since there are so many targets
3790 of these shifts, it's better to expand the RTL here than to
3791 call a helper function.
3793 The resulting code looks something like this:
3795 cmp.b r1h,-16
3796 jge.b 1f
3797 shl.l -16,dest
3798 add.b r1h,16
3799 1f: cmp.b r1h,16
3800 jle.b 1f
3801 shl.l 16,dest
3802 sub.b r1h,16
3803 1f: shl.l r1h,dest
3805 We take advantage of the fact that "negative" shifts are
3806 undefined to skip one of the comparisons. */
3808 rtx count;
3809 rtx label, insn, tempvar;
3811 emit_move_insn (operands[0], operands[1]);
3813 count = temp;
3814 label = gen_label_rtx ();
3815 LABEL_NUSES (label) ++;
3817 tempvar = gen_reg_rtx (mode);
3819 if (shift_code == ASHIFT)
3821 /* This is a left shift. We only need check positive counts. */
3822 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3823 count, GEN_INT (16), label));
3824 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3825 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3826 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3827 emit_label_after (label, insn);
3829 else
3831 /* This is a right shift. We only need check negative counts. */
3832 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3833 count, GEN_INT (-16), label));
3834 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3835 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3836 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3837 emit_label_after (label, insn);
3839 operands[1] = operands[0];
3840 emit_insn (func (operands[0], operands[0], count));
3841 return 1;
3844 operands[2] = temp;
3845 return 0;
3848 /* The m32c has a limited range of operations that work on PSImode
3849 values; we have to expand to SI, do the math, and truncate back to
3850 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3851 those cases. */
3852 void
3853 m32c_expand_neg_mulpsi3 (rtx * operands)
3855 /* operands: a = b * i */
3856 rtx temp1; /* b as SI */
3857 rtx scale /* i as SI */;
3858 rtx temp2; /* a*b as SI */
3860 temp1 = gen_reg_rtx (SImode);
3861 temp2 = gen_reg_rtx (SImode);
3862 if (GET_CODE (operands[2]) != CONST_INT)
3864 scale = gen_reg_rtx (SImode);
3865 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3867 else
3868 scale = copy_to_mode_reg (SImode, operands[2]);
3870 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3871 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3872 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3875 /* Pattern Output Functions */
3878 m32c_expand_movcc (rtx *operands)
3880 rtx rel = operands[1];
3882 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3883 return 1;
3884 if (GET_CODE (operands[2]) != CONST_INT
3885 || GET_CODE (operands[3]) != CONST_INT)
3886 return 1;
3887 if (GET_CODE (rel) == NE)
3889 rtx tmp = operands[2];
3890 operands[2] = operands[3];
3891 operands[3] = tmp;
3892 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3895 emit_move_insn (operands[0],
3896 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3897 rel,
3898 operands[2],
3899 operands[3]));
3900 return 0;
3903 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3905 m32c_expand_insv (rtx *operands)
3907 rtx op0, src0, p;
3908 int mask;
3910 if (INTVAL (operands[1]) != 1)
3911 return 1;
3913 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3914 if (GET_CODE (operands[3]) != CONST_INT)
3915 return 1;
3916 if (INTVAL (operands[3]) != 0
3917 && INTVAL (operands[3]) != 1
3918 && INTVAL (operands[3]) != -1)
3919 return 1;
3921 mask = 1 << INTVAL (operands[2]);
3923 op0 = operands[0];
3924 if (GET_CODE (op0) == SUBREG
3925 && SUBREG_BYTE (op0) == 0)
3927 rtx sub = SUBREG_REG (op0);
3928 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3929 op0 = sub;
3932 if (!can_create_pseudo_p ()
3933 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3934 src0 = op0;
3935 else
3937 src0 = gen_reg_rtx (GET_MODE (op0));
3938 emit_move_insn (src0, op0);
3941 if (GET_MODE (op0) == HImode
3942 && INTVAL (operands[2]) >= 8
3943 && GET_CODE (op0) == MEM)
3945 /* We are little endian. */
3946 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3947 XEXP (op0, 0), 1));
3948 MEM_COPY_ATTRIBUTES (new_mem, op0);
3949 mask >>= 8;
3952 /* First, we generate a mask with the correct polarity. If we are
3953 storing a zero, we want an AND mask, so invert it. */
3954 if (INTVAL (operands[3]) == 0)
3956 /* Storing a zero, use an AND mask */
3957 if (GET_MODE (op0) == HImode)
3958 mask ^= 0xffff;
3959 else
3960 mask ^= 0xff;
3962 /* Now we need to properly sign-extend the mask in case we need to
3963 fall back to an AND or OR opcode. */
3964 if (GET_MODE (op0) == HImode)
3966 if (mask & 0x8000)
3967 mask -= 0x10000;
3969 else
3971 if (mask & 0x80)
3972 mask -= 0x100;
3975 switch ( (INTVAL (operands[3]) ? 4 : 0)
3976 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3977 + (TARGET_A24 ? 1 : 0))
3979 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3980 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3981 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3982 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3983 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3984 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3985 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3986 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3987 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3990 emit_insn (p);
3991 return 0;
3994 const char *
3995 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3997 static char buf[30];
3998 if (GET_CODE (operands[0]) == REG
3999 && REGNO (operands[0]) == R0_REGNO)
4001 if (code == EQ)
4002 return "stzx\t#1,#0,r0l";
4003 if (code == NE)
4004 return "stzx\t#0,#1,r0l";
4006 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4007 return buf;
4010 /* Encode symbol attributes of a SYMBOL_REF into its
4011 SYMBOL_REF_FLAGS. */
4012 static void
4013 m32c_encode_section_info (tree decl, rtx rtl, int first)
4015 int extra_flags = 0;
4017 default_encode_section_info (decl, rtl, first);
4018 if (TREE_CODE (decl) == FUNCTION_DECL
4019 && m32c_special_page_vector_p (decl))
4021 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4023 if (extra_flags)
4024 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4027 /* Returns TRUE if the current function is a leaf, and thus we can
4028 determine which registers an interrupt function really needs to
4029 save. The logic below is mostly about finding the insn sequence
4030 that's the function, versus any sequence that might be open for the
4031 current insn. */
4032 static int
4033 m32c_leaf_function_p (void)
4035 rtx saved_first, saved_last;
4036 struct sequence_stack *seq;
4037 int rv;
4039 saved_first = crtl->emit.x_first_insn;
4040 saved_last = crtl->emit.x_last_insn;
4041 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4043 if (seq)
4045 crtl->emit.x_first_insn = seq->first;
4046 crtl->emit.x_last_insn = seq->last;
4049 rv = leaf_function_p ();
4051 crtl->emit.x_first_insn = saved_first;
4052 crtl->emit.x_last_insn = saved_last;
4053 return rv;
4056 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4057 opcodes. If the function doesn't need the frame base or stack
4058 pointer, it can use the simpler RTS opcode. */
4059 static bool
4060 m32c_function_needs_enter (void)
4062 rtx insn;
4063 struct sequence_stack *seq;
4064 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4065 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4067 insn = get_insns ();
4068 for (seq = crtl->emit.sequence_stack;
4069 seq;
4070 insn = seq->first, seq = seq->next);
4072 while (insn)
4074 if (reg_mentioned_p (sp, insn))
4075 return true;
4076 if (reg_mentioned_p (fb, insn))
4077 return true;
4078 insn = NEXT_INSN (insn);
4080 return false;
4083 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4084 frame-related. Return PAR.
4086 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4087 PARALLEL rtx other than the first if they do not have the
4088 FRAME_RELATED flag set on them. So this function is handy for
4089 marking up 'enter' instructions. */
4090 static rtx
4091 m32c_all_frame_related (rtx par)
4093 int len = XVECLEN (par, 0);
4094 int i;
4096 for (i = 0; i < len; i++)
4097 F (XVECEXP (par, 0, i));
4099 return par;
4102 /* Emits the prologue. See the frame layout comment earlier in this
4103 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4104 that we manually update sp. */
4105 void
4106 m32c_emit_prologue (void)
4108 int frame_size, extra_frame_size = 0, reg_save_size;
4109 int complex_prologue = 0;
4111 cfun->machine->is_leaf = m32c_leaf_function_p ();
4112 if (interrupt_p (cfun->decl))
4114 cfun->machine->is_interrupt = 1;
4115 complex_prologue = 1;
4117 else if (bank_switch_p (cfun->decl))
4118 warning (OPT_Wattributes,
4119 "%<bank_switch%> has no effect on non-interrupt functions");
4121 reg_save_size = m32c_pushm_popm (PP_justcount);
4123 if (interrupt_p (cfun->decl))
4125 if (bank_switch_p (cfun->decl))
4126 emit_insn (gen_fset_b ());
4127 else if (cfun->machine->intr_pushm)
4128 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4131 frame_size =
4132 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4133 if (frame_size == 0
4134 && !m32c_function_needs_enter ())
4135 cfun->machine->use_rts = 1;
4137 if (frame_size > 254)
4139 extra_frame_size = frame_size - 254;
4140 frame_size = 254;
4142 if (cfun->machine->use_rts == 0)
4143 F (emit_insn (m32c_all_frame_related
4144 (TARGET_A16
4145 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4146 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4148 if (extra_frame_size)
4150 complex_prologue = 1;
4151 if (TARGET_A16)
4152 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4153 gen_rtx_REG (HImode, SP_REGNO),
4154 GEN_INT (-extra_frame_size))));
4155 else
4156 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4157 gen_rtx_REG (PSImode, SP_REGNO),
4158 GEN_INT (-extra_frame_size))));
4161 complex_prologue += m32c_pushm_popm (PP_pushm);
4163 /* This just emits a comment into the .s file for debugging. */
4164 if (complex_prologue)
4165 emit_insn (gen_prologue_end ());
4168 /* Likewise, for the epilogue. The only exception is that, for
4169 interrupts, we must manually unwind the frame as the REIT opcode
4170 doesn't do that. */
4171 void
4172 m32c_emit_epilogue (void)
4174 int popm_count = m32c_pushm_popm (PP_justcount);
4176 /* This just emits a comment into the .s file for debugging. */
4177 if (popm_count > 0 || cfun->machine->is_interrupt)
4178 emit_insn (gen_epilogue_start ());
4180 if (popm_count > 0)
4181 m32c_pushm_popm (PP_popm);
4183 if (cfun->machine->is_interrupt)
4185 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4187 /* REIT clears B flag and restores $fp for us, but we still
4188 have to fix up the stack. USE_RTS just means we didn't
4189 emit ENTER. */
4190 if (!cfun->machine->use_rts)
4192 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4193 gen_rtx_REG (spmode, FP_REGNO));
4194 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4195 gen_rtx_REG (spmode, A0_REGNO));
4196 /* We can't just add this to the POPM because it would be in
4197 the wrong order, and wouldn't fix the stack if we're bank
4198 switching. */
4199 if (TARGET_A16)
4200 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4201 else
4202 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4204 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4205 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4207 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4208 generated only for M32C/M32CM targets (generate the REIT
4209 instruction otherwise). */
4210 if (fast_interrupt_p (cfun->decl))
4212 /* Check if fast_attribute is set for M32C or M32CM. */
4213 if (TARGET_A24)
4215 emit_jump_insn (gen_epilogue_freit ());
4217 /* If fast_interrupt attribute is set for an R8C or M16C
4218 target ignore this attribute and generated REIT
4219 instruction. */
4220 else
4222 warning (OPT_Wattributes,
4223 "%<fast_interrupt%> attribute directive ignored");
4224 emit_jump_insn (gen_epilogue_reit_16 ());
4227 else if (TARGET_A16)
4228 emit_jump_insn (gen_epilogue_reit_16 ());
4229 else
4230 emit_jump_insn (gen_epilogue_reit_24 ());
4232 else if (cfun->machine->use_rts)
4233 emit_jump_insn (gen_epilogue_rts ());
4234 else if (TARGET_A16)
4235 emit_jump_insn (gen_epilogue_exitd_16 ());
4236 else
4237 emit_jump_insn (gen_epilogue_exitd_24 ());
4240 void
4241 m32c_emit_eh_epilogue (rtx ret_addr)
4243 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4244 return to. We have to fudge the stack, pop everything, pop SP
4245 (fudged), and return (fudged). This is actually easier to do in
4246 assembler, so punt to libgcc. */
4247 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4248 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4251 /* Indicate which flags must be properly set for a given conditional. */
4252 static int
4253 flags_needed_for_conditional (rtx cond)
4255 switch (GET_CODE (cond))
4257 case LE:
4258 case GT:
4259 return FLAGS_OSZ;
4260 case LEU:
4261 case GTU:
4262 return FLAGS_ZC;
4263 case LT:
4264 case GE:
4265 return FLAGS_OS;
4266 case LTU:
4267 case GEU:
4268 return FLAGS_C;
4269 case EQ:
4270 case NE:
4271 return FLAGS_Z;
4272 default:
4273 return FLAGS_N;
4277 #define DEBUG_CMP 0
4279 /* Returns true if a compare insn is redundant because it would only
4280 set flags that are already set correctly. */
4281 static bool
4282 m32c_compare_redundant (rtx cmp, rtx *operands)
4284 int flags_needed;
4285 int pflags;
4286 rtx prev, pp, next;
4287 rtx op0, op1;
4288 #if DEBUG_CMP
4289 int prev_icode, i;
4290 #endif
4292 op0 = operands[0];
4293 op1 = operands[1];
4295 #if DEBUG_CMP
4296 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4297 debug_rtx(cmp);
4298 for (i=0; i<2; i++)
4300 fprintf(stderr, "operands[%d] = ", i);
4301 debug_rtx(operands[i]);
4303 #endif
4305 next = next_nonnote_insn (cmp);
4306 if (!next || !INSN_P (next))
4308 #if DEBUG_CMP
4309 fprintf(stderr, "compare not followed by insn\n");
4310 debug_rtx(next);
4311 #endif
4312 return false;
4314 if (GET_CODE (PATTERN (next)) == SET
4315 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4317 next = XEXP (XEXP (PATTERN (next), 1), 0);
4319 else if (GET_CODE (PATTERN (next)) == SET)
4321 /* If this is a conditional, flags_needed will be something
4322 other than FLAGS_N, which we test below. */
4323 next = XEXP (PATTERN (next), 1);
4325 else
4327 #if DEBUG_CMP
4328 fprintf(stderr, "compare not followed by conditional\n");
4329 debug_rtx(next);
4330 #endif
4331 return false;
4333 #if DEBUG_CMP
4334 fprintf(stderr, "conditional is: ");
4335 debug_rtx(next);
4336 #endif
4338 flags_needed = flags_needed_for_conditional (next);
4339 if (flags_needed == FLAGS_N)
4341 #if DEBUG_CMP
4342 fprintf(stderr, "compare not followed by conditional\n");
4343 debug_rtx(next);
4344 #endif
4345 return false;
4348 /* Compare doesn't set overflow and carry the same way that
4349 arithmetic instructions do, so we can't replace those. */
4350 if (flags_needed & FLAGS_OC)
4351 return false;
4353 prev = cmp;
4354 do {
4355 prev = prev_nonnote_insn (prev);
4356 if (!prev)
4358 #if DEBUG_CMP
4359 fprintf(stderr, "No previous insn.\n");
4360 #endif
4361 return false;
4363 if (!INSN_P (prev))
4365 #if DEBUG_CMP
4366 fprintf(stderr, "Previous insn is a non-insn.\n");
4367 #endif
4368 return false;
4370 pp = PATTERN (prev);
4371 if (GET_CODE (pp) != SET)
4373 #if DEBUG_CMP
4374 fprintf(stderr, "Previous insn is not a SET.\n");
4375 #endif
4376 return false;
4378 pflags = get_attr_flags (prev);
4380 /* Looking up attributes of previous insns corrupted the recog
4381 tables. */
4382 INSN_UID (cmp) = -1;
4383 recog (PATTERN (cmp), cmp, 0);
4385 if (pflags == FLAGS_N
4386 && reg_mentioned_p (op0, pp))
4388 #if DEBUG_CMP
4389 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4390 debug_rtx(prev);
4391 #endif
4392 return false;
4395 /* Check for comparisons against memory - between volatiles and
4396 aliases, we just can't risk this one. */
4397 if (GET_CODE (operands[0]) == MEM
4398 || GET_CODE (operands[0]) == MEM)
4400 #if DEBUG_CMP
4401 fprintf(stderr, "comparisons with memory:\n");
4402 debug_rtx(prev);
4403 #endif
4404 return false;
4407 /* Check for PREV changing a register that's used to compute a
4408 value in CMP, even if it doesn't otherwise change flags. */
4409 if (GET_CODE (operands[0]) == REG
4410 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4412 #if DEBUG_CMP
4413 fprintf(stderr, "sub-value affected, op0:\n");
4414 debug_rtx(prev);
4415 #endif
4416 return false;
4418 if (GET_CODE (operands[1]) == REG
4419 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4421 #if DEBUG_CMP
4422 fprintf(stderr, "sub-value affected, op1:\n");
4423 debug_rtx(prev);
4424 #endif
4425 return false;
4428 } while (pflags == FLAGS_N);
4429 #if DEBUG_CMP
4430 fprintf(stderr, "previous flag-setting insn:\n");
4431 debug_rtx(prev);
4432 debug_rtx(pp);
4433 #endif
4435 if (GET_CODE (pp) == SET
4436 && GET_CODE (XEXP (pp, 0)) == REG
4437 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4438 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4440 /* Adjacent cbranches must have the same operands to be
4441 redundant. */
4442 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4443 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4444 #if DEBUG_CMP
4445 fprintf(stderr, "adjacent cbranches\n");
4446 debug_rtx(pop0);
4447 debug_rtx(pop1);
4448 #endif
4449 if (rtx_equal_p (op0, pop0)
4450 && rtx_equal_p (op1, pop1))
4451 return true;
4452 #if DEBUG_CMP
4453 fprintf(stderr, "prev cmp not same\n");
4454 #endif
4455 return false;
4458 /* Else the previous insn must be a SET, with either the source or
4459 dest equal to operands[0], and operands[1] must be zero. */
4461 if (!rtx_equal_p (op1, const0_rtx))
4463 #if DEBUG_CMP
4464 fprintf(stderr, "operands[1] not const0_rtx\n");
4465 #endif
4466 return false;
4468 if (GET_CODE (pp) != SET)
4470 #if DEBUG_CMP
4471 fprintf (stderr, "pp not set\n");
4472 #endif
4473 return false;
4475 if (!rtx_equal_p (op0, SET_SRC (pp))
4476 && !rtx_equal_p (op0, SET_DEST (pp)))
4478 #if DEBUG_CMP
4479 fprintf(stderr, "operands[0] not found in set\n");
4480 #endif
4481 return false;
4484 #if DEBUG_CMP
4485 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4486 #endif
4487 if ((pflags & flags_needed) == flags_needed)
4488 return true;
4490 return false;
4493 /* Return the pattern for a compare. This will be commented out if
4494 the compare is redundant, else a normal pattern is returned. Thus,
4495 the assembler output says where the compare would have been. */
4496 char *
4497 m32c_output_compare (rtx insn, rtx *operands)
4499 static char templ[] = ";cmp.b\t%1,%0";
4500 /* ^ 5 */
4502 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4503 if (m32c_compare_redundant (insn, operands))
4505 #if DEBUG_CMP
4506 fprintf(stderr, "cbranch: cmp not needed\n");
4507 #endif
4508 return templ;
4511 #if DEBUG_CMP
4512 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4513 #endif
4514 return templ + 1;
4517 #undef TARGET_ENCODE_SECTION_INFO
4518 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4520 /* If the frame pointer isn't used, we detect it manually. But the
4521 stack pointer doesn't have as flexible addressing as the frame
4522 pointer, so we always assume we have it. */
4524 #undef TARGET_FRAME_POINTER_REQUIRED
4525 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4527 /* The Global `targetm' Variable. */
4529 struct gcc_target targetm = TARGET_INITIALIZER;
4531 #include "gt-m32c.h"