Update concepts branch to revision 131834
[official-gcc.git] / gcc / config / m32c / m32c.c
blobb0733dd83645c79679590bc14440cf1a2184eea2
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005, 2006, 2007
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "flags.h"
36 #include "recog.h"
37 #include "reload.h"
38 #include "toplev.h"
39 #include "obstack.h"
40 #include "tree.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "except.h"
44 #include "function.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "tm_p.h"
49 #include "langhooks.h"
50 #include "tree-gimple.h"
51 #include "df.h"
53 /* Prototypes */
55 /* Used by m32c_pushm_popm. */
56 typedef enum
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61 } Push_Pop_Type;
63 static tree interrupt_handler (tree *, tree, tree, int, bool *);
64 static tree function_vector_handler (tree *, tree, tree, int, bool *);
65 static int interrupt_p (tree node);
66 static bool m32c_asm_integer (rtx, unsigned int, int);
67 static int m32c_comp_type_attributes (const_tree, const_tree);
68 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
69 static struct machine_function *m32c_init_machine_status (void);
70 static void m32c_insert_attributes (tree, tree *);
71 static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
72 const_tree, bool);
73 static bool m32c_promote_prototypes (const_tree);
74 static int m32c_pushm_popm (Push_Pop_Type);
75 static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
76 static rtx m32c_struct_value_rtx (tree, int);
77 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
78 static int need_to_save (int);
79 int current_function_special_page_vector (rtx);
81 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
83 #define streq(a,b) (strcmp ((a), (b)) == 0)
85 /* Internal support routines */
87 /* Debugging statements are tagged with DEBUG0 only so that they can
88 be easily enabled individually, by replacing the '0' with '1' as
89 needed. */
90 #define DEBUG0 0
91 #define DEBUG1 1
93 #if DEBUG0
94 /* This is needed by some of the commented-out debug statements
95 below. */
96 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
97 #endif
98 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
100 /* These are all to support encode_pattern(). */
101 static char pattern[30], *patternp;
102 static GTY(()) rtx patternr[30];
103 #define RTX_IS(x) (streq (pattern, x))
105 /* Some macros to simplify the logic throughout this file. */
106 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
107 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
109 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
110 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
112 /* We do most RTX matching by converting the RTX into a string, and
113 using string compares. This vastly simplifies the logic in many of
114 the functions in this file.
116 On exit, pattern[] has the encoded string (use RTX_IS("...") to
117 compare it) and patternr[] has pointers to the nodes in the RTX
118 corresponding to each character in the encoded string. The latter
119 is mostly used by print_operand().
121 Unrecognized patterns have '?' in them; this shows up when the
122 assembler complains about syntax errors.
125 static void
126 encode_pattern_1 (rtx x)
128 int i;
130 if (patternp == pattern + sizeof (pattern) - 2)
132 patternp[-1] = '?';
133 return;
136 patternr[patternp - pattern] = x;
138 switch (GET_CODE (x))
140 case REG:
141 *patternp++ = 'r';
142 break;
143 case SUBREG:
144 if (GET_MODE_SIZE (GET_MODE (x)) !=
145 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
146 *patternp++ = 'S';
147 encode_pattern_1 (XEXP (x, 0));
148 break;
149 case MEM:
150 *patternp++ = 'm';
151 case CONST:
152 encode_pattern_1 (XEXP (x, 0));
153 break;
154 case PLUS:
155 *patternp++ = '+';
156 encode_pattern_1 (XEXP (x, 0));
157 encode_pattern_1 (XEXP (x, 1));
158 break;
159 case PRE_DEC:
160 *patternp++ = '>';
161 encode_pattern_1 (XEXP (x, 0));
162 break;
163 case POST_INC:
164 *patternp++ = '<';
165 encode_pattern_1 (XEXP (x, 0));
166 break;
167 case LO_SUM:
168 *patternp++ = 'L';
169 encode_pattern_1 (XEXP (x, 0));
170 encode_pattern_1 (XEXP (x, 1));
171 break;
172 case HIGH:
173 *patternp++ = 'H';
174 encode_pattern_1 (XEXP (x, 0));
175 break;
176 case SYMBOL_REF:
177 *patternp++ = 's';
178 break;
179 case LABEL_REF:
180 *patternp++ = 'l';
181 break;
182 case CODE_LABEL:
183 *patternp++ = 'c';
184 break;
185 case CONST_INT:
186 case CONST_DOUBLE:
187 *patternp++ = 'i';
188 break;
189 case UNSPEC:
190 *patternp++ = 'u';
191 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
192 for (i = 0; i < XVECLEN (x, 0); i++)
193 encode_pattern_1 (XVECEXP (x, 0, i));
194 break;
195 case USE:
196 *patternp++ = 'U';
197 break;
198 case PARALLEL:
199 *patternp++ = '|';
200 for (i = 0; i < XVECLEN (x, 0); i++)
201 encode_pattern_1 (XVECEXP (x, 0, i));
202 break;
203 case EXPR_LIST:
204 *patternp++ = 'E';
205 encode_pattern_1 (XEXP (x, 0));
206 if (XEXP (x, 1))
207 encode_pattern_1 (XEXP (x, 1));
208 break;
209 default:
210 *patternp++ = '?';
211 #if DEBUG0
212 fprintf (stderr, "can't encode pattern %s\n",
213 GET_RTX_NAME (GET_CODE (x)));
214 debug_rtx (x);
215 gcc_unreachable ();
216 #endif
217 break;
221 static void
222 encode_pattern (rtx x)
224 patternp = pattern;
225 encode_pattern_1 (x);
226 *patternp = 0;
229 /* Since register names indicate the mode they're used in, we need a
230 way to determine which name to refer to the register with. Called
231 by print_operand(). */
233 static const char *
234 reg_name_with_mode (int regno, enum machine_mode mode)
236 int mlen = GET_MODE_SIZE (mode);
237 if (regno == R0_REGNO && mlen == 1)
238 return "r0l";
239 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
240 return "r2r0";
241 if (regno == R0_REGNO && mlen == 6)
242 return "r2r1r0";
243 if (regno == R0_REGNO && mlen == 8)
244 return "r3r1r2r0";
245 if (regno == R1_REGNO && mlen == 1)
246 return "r1l";
247 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
248 return "r3r1";
249 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
250 return "a1a0";
251 return reg_names[regno];
254 /* How many bytes a register uses on stack when it's pushed. We need
255 to know this because the push opcode needs to explicitly indicate
256 the size of the register, even though the name of the register
257 already tells it that. Used by m32c_output_reg_{push,pop}, which
258 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
260 static int
261 reg_push_size (int regno)
263 switch (regno)
265 case R0_REGNO:
266 case R1_REGNO:
267 return 2;
268 case R2_REGNO:
269 case R3_REGNO:
270 case FLG_REGNO:
271 return 2;
272 case A0_REGNO:
273 case A1_REGNO:
274 case SB_REGNO:
275 case FB_REGNO:
276 case SP_REGNO:
277 if (TARGET_A16)
278 return 2;
279 else
280 return 3;
281 default:
282 gcc_unreachable ();
286 static int *class_sizes = 0;
288 /* Given two register classes, find the largest intersection between
289 them. If there is no intersection, return RETURNED_IF_EMPTY
290 instead. */
291 static int
292 reduce_class (int original_class, int limiting_class, int returned_if_empty)
294 int cc = class_contents[original_class][0];
295 int i, best = NO_REGS;
296 int best_size = 0;
298 if (original_class == limiting_class)
299 return original_class;
301 if (!class_sizes)
303 int r;
304 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
305 for (i = 0; i < LIM_REG_CLASSES; i++)
307 class_sizes[i] = 0;
308 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
309 if (class_contents[i][0] & (1 << r))
310 class_sizes[i]++;
314 cc &= class_contents[limiting_class][0];
315 for (i = 0; i < LIM_REG_CLASSES; i++)
317 int ic = class_contents[i][0];
319 if ((~cc & ic) == 0)
320 if (best_size < class_sizes[i])
322 best = i;
323 best_size = class_sizes[i];
327 if (best == NO_REGS)
328 return returned_if_empty;
329 return best;
332 /* Returns TRUE If there are any registers that exist in both register
333 classes. */
334 static int
335 classes_intersect (int class1, int class2)
337 return class_contents[class1][0] & class_contents[class2][0];
340 /* Used by m32c_register_move_cost to determine if a move is
341 impossibly expensive. */
342 static int
343 class_can_hold_mode (int class, enum machine_mode mode)
345 /* Cache the results: 0=untested 1=no 2=yes */
346 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
347 if (results[class][mode] == 0)
349 int r, n, i;
350 results[class][mode] = 1;
351 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
352 if (class_contents[class][0] & (1 << r)
353 && HARD_REGNO_MODE_OK (r, mode))
355 int ok = 1;
356 n = HARD_REGNO_NREGS (r, mode);
357 for (i = 1; i < n; i++)
358 if (!(class_contents[class][0] & (1 << (r + i))))
359 ok = 0;
360 if (ok)
362 results[class][mode] = 2;
363 break;
367 #if DEBUG0
368 fprintf (stderr, "class %s can hold %s? %s\n",
369 class_names[class], mode_name[mode],
370 (results[class][mode] == 2) ? "yes" : "no");
371 #endif
372 return results[class][mode] == 2;
375 /* Run-time Target Specification. */
377 /* Memregs are memory locations that gcc treats like general
378 registers, as there are a limited number of true registers and the
379 m32c families can use memory in most places that registers can be
380 used.
382 However, since memory accesses are more expensive than registers,
383 we allow the user to limit the number of memregs available, in
384 order to try to persuade gcc to try harder to use real registers.
386 Memregs are provided by m32c-lib1.S.
389 int target_memregs = 16;
390 static bool target_memregs_set = FALSE;
391 int ok_to_change_target_memregs = TRUE;
393 #undef TARGET_HANDLE_OPTION
394 #define TARGET_HANDLE_OPTION m32c_handle_option
395 static bool
396 m32c_handle_option (size_t code,
397 const char *arg ATTRIBUTE_UNUSED,
398 int value ATTRIBUTE_UNUSED)
400 if (code == OPT_memregs_)
402 target_memregs_set = TRUE;
403 target_memregs = atoi (arg);
405 return TRUE;
408 /* Implements OVERRIDE_OPTIONS. We limit memregs to 0..16, and
409 provide a default. */
410 void
411 m32c_override_options (void)
413 if (target_memregs_set)
415 if (target_memregs < 0 || target_memregs > 16)
416 error ("invalid target memregs value '%d'", target_memregs);
418 else
419 target_memregs = 16;
422 /* Defining data structures for per-function information */
424 /* The usual; we set up our machine_function data. */
425 static struct machine_function *
426 m32c_init_machine_status (void)
428 struct machine_function *machine;
429 machine =
430 (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
432 return machine;
435 /* Implements INIT_EXPANDERS. We just set up to call the above
436 function. */
437 void
438 m32c_init_expanders (void)
440 init_machine_status = m32c_init_machine_status;
443 /* Storage Layout */
445 #undef TARGET_PROMOTE_FUNCTION_RETURN
446 #define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
447 bool
448 m32c_promote_function_return (const_tree fntype ATTRIBUTE_UNUSED)
450 return false;
453 /* Register Basics */
455 /* Basic Characteristics of Registers */
457 /* Whether a mode fits in a register is complex enough to warrant a
458 table. */
459 static struct
461 char qi_regs;
462 char hi_regs;
463 char pi_regs;
464 char si_regs;
465 char di_regs;
466 } nregs_table[FIRST_PSEUDO_REGISTER] =
468 { 1, 1, 2, 2, 4 }, /* r0 */
469 { 0, 1, 0, 0, 0 }, /* r2 */
470 { 1, 1, 2, 2, 0 }, /* r1 */
471 { 0, 1, 0, 0, 0 }, /* r3 */
472 { 0, 1, 1, 0, 0 }, /* a0 */
473 { 0, 1, 1, 0, 0 }, /* a1 */
474 { 0, 1, 1, 0, 0 }, /* sb */
475 { 0, 1, 1, 0, 0 }, /* fb */
476 { 0, 1, 1, 0, 0 }, /* sp */
477 { 1, 1, 1, 0, 0 }, /* pc */
478 { 0, 0, 0, 0, 0 }, /* fl */
479 { 1, 1, 1, 0, 0 }, /* ap */
480 { 1, 1, 2, 2, 4 }, /* mem0 */
481 { 1, 1, 2, 2, 4 }, /* mem1 */
482 { 1, 1, 2, 2, 4 }, /* mem2 */
483 { 1, 1, 2, 2, 4 }, /* mem3 */
484 { 1, 1, 2, 2, 4 }, /* mem4 */
485 { 1, 1, 2, 2, 0 }, /* mem5 */
486 { 1, 1, 2, 2, 0 }, /* mem6 */
487 { 1, 1, 0, 0, 0 }, /* mem7 */
490 /* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
491 available memregs, and select which registers need to be preserved
492 across calls based on the chip family. */
494 void
495 m32c_conditional_register_usage (void)
497 int i;
499 if (0 <= target_memregs && target_memregs <= 16)
501 /* The command line option is bytes, but our "registers" are
502 16-bit words. */
503 for (i = target_memregs/2; i < 8; i++)
505 fixed_regs[MEM0_REGNO + i] = 1;
506 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
510 /* M32CM and M32C preserve more registers across function calls. */
511 if (TARGET_A24)
513 call_used_regs[R1_REGNO] = 0;
514 call_used_regs[R2_REGNO] = 0;
515 call_used_regs[R3_REGNO] = 0;
516 call_used_regs[A0_REGNO] = 0;
517 call_used_regs[A1_REGNO] = 0;
521 /* How Values Fit in Registers */
523 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
524 different registers are different sizes from each other, *and* may
525 be different sizes in different chip families. */
526 static int
527 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
529 if (regno == FLG_REGNO && mode == CCmode)
530 return 1;
531 if (regno >= FIRST_PSEUDO_REGISTER)
532 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
534 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
535 return (GET_MODE_SIZE (mode) + 1) / 2;
537 if (GET_MODE_SIZE (mode) <= 1)
538 return nregs_table[regno].qi_regs;
539 if (GET_MODE_SIZE (mode) <= 2)
540 return nregs_table[regno].hi_regs;
541 if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
542 return 2;
543 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
544 return nregs_table[regno].pi_regs;
545 if (GET_MODE_SIZE (mode) <= 4)
546 return nregs_table[regno].si_regs;
547 if (GET_MODE_SIZE (mode) <= 8)
548 return nregs_table[regno].di_regs;
549 return 0;
553 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
555 int rv = m32c_hard_regno_nregs_1 (regno, mode);
556 return rv ? rv : 1;
559 /* Implements HARD_REGNO_MODE_OK. The above function does the work
560 already; just test its return value. */
562 m32c_hard_regno_ok (int regno, enum machine_mode mode)
564 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
567 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
568 registers are all different sizes. However, since most modes are
569 bigger than our registers anyway, it's easier to implement this
570 function that way, leaving QImode as the only unique case. */
572 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
574 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
575 return 1;
577 #if 0
578 if (m1 == QImode || m2 == QImode)
579 return 0;
580 #endif
582 return 1;
585 /* Register Classes */
587 /* Implements REGNO_REG_CLASS. */
588 enum machine_mode
589 m32c_regno_reg_class (int regno)
591 switch (regno)
593 case R0_REGNO:
594 return R0_REGS;
595 case R1_REGNO:
596 return R1_REGS;
597 case R2_REGNO:
598 return R2_REGS;
599 case R3_REGNO:
600 return R3_REGS;
601 case A0_REGNO:
602 case A1_REGNO:
603 return A_REGS;
604 case SB_REGNO:
605 return SB_REGS;
606 case FB_REGNO:
607 return FB_REGS;
608 case SP_REGNO:
609 return SP_REGS;
610 case FLG_REGNO:
611 return FLG_REGS;
612 default:
613 if (IS_MEM_REGNO (regno))
614 return MEM_REGS;
615 return ALL_REGS;
619 /* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
620 for certain chip families. */
622 m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
624 if (memcmp (s, "Rsp", 3) == 0)
625 return SP_REGS;
626 if (memcmp (s, "Rfb", 3) == 0)
627 return FB_REGS;
628 if (memcmp (s, "Rsb", 3) == 0)
629 return SB_REGS;
630 if (memcmp (s, "Rcr", 3) == 0)
631 return TARGET_A16 ? CR_REGS : NO_REGS;
632 if (memcmp (s, "Rcl", 3) == 0)
633 return TARGET_A24 ? CR_REGS : NO_REGS;
634 if (memcmp (s, "R0w", 3) == 0)
635 return R0_REGS;
636 if (memcmp (s, "R1w", 3) == 0)
637 return R1_REGS;
638 if (memcmp (s, "R2w", 3) == 0)
639 return R2_REGS;
640 if (memcmp (s, "R3w", 3) == 0)
641 return R3_REGS;
642 if (memcmp (s, "R02", 3) == 0)
643 return R02_REGS;
644 if (memcmp (s, "R03", 3) == 0)
645 return R03_REGS;
646 if (memcmp (s, "Rdi", 3) == 0)
647 return DI_REGS;
648 if (memcmp (s, "Rhl", 3) == 0)
649 return HL_REGS;
650 if (memcmp (s, "R23", 3) == 0)
651 return R23_REGS;
652 if (memcmp (s, "Ra0", 3) == 0)
653 return A0_REGS;
654 if (memcmp (s, "Ra1", 3) == 0)
655 return A1_REGS;
656 if (memcmp (s, "Raa", 3) == 0)
657 return A_REGS;
658 if (memcmp (s, "Raw", 3) == 0)
659 return TARGET_A16 ? A_REGS : NO_REGS;
660 if (memcmp (s, "Ral", 3) == 0)
661 return TARGET_A24 ? A_REGS : NO_REGS;
662 if (memcmp (s, "Rqi", 3) == 0)
663 return QI_REGS;
664 if (memcmp (s, "Rad", 3) == 0)
665 return AD_REGS;
666 if (memcmp (s, "Rsi", 3) == 0)
667 return SI_REGS;
668 if (memcmp (s, "Rhi", 3) == 0)
669 return HI_REGS;
670 if (memcmp (s, "Rhc", 3) == 0)
671 return HC_REGS;
672 if (memcmp (s, "Rra", 3) == 0)
673 return RA_REGS;
674 if (memcmp (s, "Rfl", 3) == 0)
675 return FLG_REGS;
676 if (memcmp (s, "Rmm", 3) == 0)
678 if (fixed_regs[MEM0_REGNO])
679 return NO_REGS;
680 return MEM_REGS;
683 /* PSImode registers - i.e. whatever can hold a pointer. */
684 if (memcmp (s, "Rpi", 3) == 0)
686 if (TARGET_A16)
687 return HI_REGS;
688 else
689 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
692 /* We handle this one as an EXTRA_CONSTRAINT. */
693 if (memcmp (s, "Rpa", 3) == 0)
694 return NO_REGS;
696 if (*s == 'R')
698 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
699 gcc_unreachable();
702 return NO_REGS;
705 /* Implements REGNO_OK_FOR_BASE_P. */
707 m32c_regno_ok_for_base_p (int regno)
709 if (regno == A0_REGNO
710 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
711 return 1;
712 return 0;
715 #define DEBUG_RELOAD 0
717 /* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
718 registers of the appropriate size. */
720 m32c_preferred_reload_class (rtx x, int rclass)
722 int newclass = rclass;
724 #if DEBUG_RELOAD
725 fprintf (stderr, "\npreferred_reload_class for %s is ",
726 class_names[rclass]);
727 #endif
728 if (rclass == NO_REGS)
729 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
731 if (classes_intersect (rclass, CR_REGS))
733 switch (GET_MODE (x))
735 case QImode:
736 newclass = HL_REGS;
737 break;
738 default:
739 /* newclass = HI_REGS; */
740 break;
744 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
745 newclass = SI_REGS;
746 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
747 && ~class_contents[rclass][0] & 0x000f)
748 newclass = DI_REGS;
750 rclass = reduce_class (rclass, newclass, rclass);
752 if (GET_MODE (x) == QImode)
753 rclass = reduce_class (rclass, HL_REGS, rclass);
755 #if DEBUG_RELOAD
756 fprintf (stderr, "%s\n", class_names[rclass]);
757 debug_rtx (x);
759 if (GET_CODE (x) == MEM
760 && GET_CODE (XEXP (x, 0)) == PLUS
761 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
762 fprintf (stderr, "Glorm!\n");
763 #endif
764 return rclass;
767 /* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
769 m32c_preferred_output_reload_class (rtx x, int rclass)
771 return m32c_preferred_reload_class (x, rclass);
774 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
775 address registers for reloads since they're needed for address
776 reloads. */
778 m32c_limit_reload_class (enum machine_mode mode, int rclass)
780 #if DEBUG_RELOAD
781 fprintf (stderr, "limit_reload_class for %s: %s ->",
782 mode_name[mode], class_names[rclass]);
783 #endif
785 if (mode == QImode)
786 rclass = reduce_class (rclass, HL_REGS, rclass);
787 else if (mode == HImode)
788 rclass = reduce_class (rclass, HI_REGS, rclass);
789 else if (mode == SImode)
790 rclass = reduce_class (rclass, SI_REGS, rclass);
792 if (rclass != A_REGS)
793 rclass = reduce_class (rclass, DI_REGS, rclass);
795 #if DEBUG_RELOAD
796 fprintf (stderr, " %s\n", class_names[rclass]);
797 #endif
798 return rclass;
801 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
802 r0 or r1, as those are the only real QImode registers. CR regs get
803 reloaded through appropriately sized general or address
804 registers. */
806 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
808 int cc = class_contents[rclass][0];
809 #if DEBUG0
810 fprintf (stderr, "\nsecondary reload class %s %s\n",
811 class_names[rclass], mode_name[mode]);
812 debug_rtx (x);
813 #endif
814 if (mode == QImode
815 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
816 return QI_REGS;
817 if (classes_intersect (rclass, CR_REGS)
818 && GET_CODE (x) == REG
819 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
820 return TARGET_A16 ? HI_REGS : A_REGS;
821 return NO_REGS;
824 /* Implements CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
825 reloads. */
827 m32c_class_likely_spilled_p (int regclass)
829 if (regclass == A_REGS)
830 return 1;
831 return reg_class_size[regclass] == 1;
834 /* Implements CLASS_MAX_NREGS. We calculate this according to its
835 documented meaning, to avoid potential inconsistencies with actual
836 class definitions. */
838 m32c_class_max_nregs (int regclass, enum machine_mode mode)
840 int rn, max = 0;
842 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
843 if (class_contents[regclass][0] & (1 << rn))
845 int n = m32c_hard_regno_nregs (rn, mode);
846 if (max < n)
847 max = n;
849 return max;
852 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
853 QI (r0l, r1l) because the chip doesn't support QI ops on other
854 registers (well, it does on a0/a1 but if we let gcc do that, reload
855 suffers). Otherwise, we allow changes to larger modes. */
857 m32c_cannot_change_mode_class (enum machine_mode from,
858 enum machine_mode to, int rclass)
860 int rn;
861 #if DEBUG0
862 fprintf (stderr, "cannot change from %s to %s in %s\n",
863 mode_name[from], mode_name[to], class_names[rclass]);
864 #endif
866 /* If the larger mode isn't allowed in any of these registers, we
867 can't allow the change. */
868 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
869 if (class_contents[rclass][0] & (1 << rn))
870 if (! m32c_hard_regno_ok (rn, to))
871 return 1;
873 if (to == QImode)
874 return (class_contents[rclass][0] & 0x1ffa);
876 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
877 && GET_MODE_SIZE (from) > 1)
878 return 0;
879 if (GET_MODE_SIZE (from) > 2) /* all other regs */
880 return 0;
882 return 1;
885 /* Helpers for the rest of the file. */
886 /* TRUE if the rtx is a REG rtx for the given register. */
887 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
888 && REGNO (rtx) == regno)
889 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
890 base register in address calculations (hence the "strict"
891 argument). */
892 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
893 && (REGNO (rtx) == AP_REGNO \
894 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
896 /* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
897 constraints start with 'I', with the next two characters indicating
898 the type and size of the range allowed. */
900 m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
901 char c ATTRIBUTE_UNUSED, const char *str)
903 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
904 [sun] bits [SUN] bytes, p=pointer size
905 I[-0-9][0-9] matches that number */
906 if (memcmp (str, "Is3", 3) == 0)
908 return (-8 <= value && value <= 7);
910 if (memcmp (str, "IS1", 3) == 0)
912 return (-128 <= value && value <= 127);
914 if (memcmp (str, "IS2", 3) == 0)
916 return (-32768 <= value && value <= 32767);
918 if (memcmp (str, "IU2", 3) == 0)
920 return (0 <= value && value <= 65535);
922 if (memcmp (str, "IU3", 3) == 0)
924 return (0 <= value && value <= 0x00ffffff);
926 if (memcmp (str, "In4", 3) == 0)
928 return (-8 <= value && value && value <= 8);
930 if (memcmp (str, "In5", 3) == 0)
932 return (-16 <= value && value && value <= 16);
934 if (memcmp (str, "In6", 3) == 0)
936 return (-32 <= value && value && value <= 32);
938 if (memcmp (str, "IM2", 3) == 0)
940 return (-65536 <= value && value && value <= -1);
942 if (memcmp (str, "Ilb", 3) == 0)
944 int b = exact_log2 (value);
945 return (b >= 0 && b <= 7);
947 if (memcmp (str, "Imb", 3) == 0)
949 int b = exact_log2 ((value ^ 0xff) & 0xff);
950 return (b >= 0 && b <= 7);
952 if (memcmp (str, "Ilw", 3) == 0)
954 int b = exact_log2 (value);
955 return (b >= 0 && b <= 15);
957 if (memcmp (str, "Imw", 3) == 0)
959 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
960 return (b >= 0 && b <= 15);
962 if (memcmp (str, "I00", 3) == 0)
964 return (value == 0);
966 return 0;
969 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
970 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
971 call return values. */
973 m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
975 encode_pattern (value);
976 if (memcmp (str, "Sd", 2) == 0)
978 /* This is the common "src/dest" address */
979 rtx r;
980 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
981 return 1;
982 if (RTX_IS ("ms") || RTX_IS ("m+si"))
983 return 1;
984 if (RTX_IS ("m++rii"))
986 if (REGNO (patternr[3]) == FB_REGNO
987 && INTVAL (patternr[4]) == 0)
988 return 1;
990 if (RTX_IS ("mr"))
991 r = patternr[1];
992 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
993 r = patternr[2];
994 else
995 return 0;
996 if (REGNO (r) == SP_REGNO)
997 return 0;
998 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1000 else if (memcmp (str, "Sa", 2) == 0)
1002 rtx r;
1003 if (RTX_IS ("mr"))
1004 r = patternr[1];
1005 else if (RTX_IS ("m+ri"))
1006 r = patternr[2];
1007 else
1008 return 0;
1009 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1011 else if (memcmp (str, "Si", 2) == 0)
1013 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1015 else if (memcmp (str, "Ss", 2) == 0)
1017 return ((RTX_IS ("mr")
1018 && (IS_REG (patternr[1], SP_REGNO)))
1019 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1021 else if (memcmp (str, "Sf", 2) == 0)
1023 return ((RTX_IS ("mr")
1024 && (IS_REG (patternr[1], FB_REGNO)))
1025 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1027 else if (memcmp (str, "Sb", 2) == 0)
1029 return ((RTX_IS ("mr")
1030 && (IS_REG (patternr[1], SB_REGNO)))
1031 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1033 else if (memcmp (str, "Sp", 2) == 0)
1035 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1036 return (RTX_IS ("mi")
1037 && !(INTVAL (patternr[1]) & ~0x1fff));
1039 else if (memcmp (str, "S1", 2) == 0)
1041 return r1h_operand (value, QImode);
1044 gcc_assert (str[0] != 'S');
1046 if (memcmp (str, "Rpa", 2) == 0)
1047 return GET_CODE (value) == PARALLEL;
1049 return 0;
1052 /* This is for when we're debugging the above. */
1054 m32c_extra_constraint_p (rtx value, char c, const char *str)
1056 int rv = m32c_extra_constraint_p2 (value, c, str);
1057 #if DEBUG0
1058 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1059 rv);
1060 debug_rtx (value);
1061 #endif
1062 return rv;
1065 /* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1066 starting with 'S'. */
1068 m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1070 return c == 'S';
1073 /* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1074 but don't currently define any. */
1076 m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1078 return c == 'A';
1081 /* STACK AND CALLING */
1083 /* Frame Layout */
1085 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1086 (yes, THREE bytes) onto the stack for the return address, but we
1087 don't support pointers bigger than 16 bits on those chips. This
1088 will likely wreak havoc with exception unwinding. FIXME. */
1090 m32c_return_addr_rtx (int count)
1092 enum machine_mode mode;
1093 int offset;
1094 rtx ra_mem;
1096 if (count)
1097 return NULL_RTX;
1098 /* we want 2[$fb] */
1100 if (TARGET_A24)
1102 /* It's four bytes */
1103 mode = PSImode;
1104 offset = 4;
1106 else
1108 /* FIXME: it's really 3 bytes */
1109 mode = HImode;
1110 offset = 2;
1113 ra_mem =
1114 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1115 return copy_to_mode_reg (mode, ra_mem);
1118 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1120 m32c_incoming_return_addr_rtx (void)
1122 /* we want [sp] */
1123 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1126 /* Exception Handling Support */
1128 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1129 pointers. */
1131 m32c_eh_return_data_regno (int n)
1133 switch (n)
1135 case 0:
1136 return A0_REGNO;
1137 case 1:
1138 if (TARGET_A16)
1139 return R3_REGNO;
1140 else
1141 return R1_REGNO;
1142 default:
1143 return INVALID_REGNUM;
1147 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1148 m32c_emit_eh_epilogue. */
1150 m32c_eh_return_stackadj_rtx (void)
1152 if (!cfun->machine->eh_stack_adjust)
1154 rtx sa;
1156 sa = gen_rtx_REG (Pmode, R0_REGNO);
1157 cfun->machine->eh_stack_adjust = sa;
1159 return cfun->machine->eh_stack_adjust;
1162 /* Registers That Address the Stack Frame */
1164 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1165 the original spec called for dwarf numbers to vary with register
1166 width as well, for example, r0l, r0, and r2r0 would each have
1167 different dwarf numbers. GCC doesn't support this, and we don't do
1168 it, and gdb seems to like it this way anyway. */
1169 unsigned int
1170 m32c_dwarf_frame_regnum (int n)
1172 switch (n)
1174 case R0_REGNO:
1175 return 5;
1176 case R1_REGNO:
1177 return 6;
1178 case R2_REGNO:
1179 return 7;
1180 case R3_REGNO:
1181 return 8;
1182 case A0_REGNO:
1183 return 9;
1184 case A1_REGNO:
1185 return 10;
1186 case FB_REGNO:
1187 return 11;
1188 case SB_REGNO:
1189 return 19;
1191 case SP_REGNO:
1192 return 12;
1193 case PC_REGNO:
1194 return 13;
1195 default:
1196 return DWARF_FRAME_REGISTERS + 1;
1200 /* The frame looks like this:
1202 ap -> +------------------------------
1203 | Return address (3 or 4 bytes)
1204 | Saved FB (2 or 4 bytes)
1205 fb -> +------------------------------
1206 | local vars
1207 | register saves fb
1208 | through r0 as needed
1209 sp -> +------------------------------
1212 /* We use this to wrap all emitted insns in the prologue. */
1213 static rtx
1214 F (rtx x)
1216 RTX_FRAME_RELATED_P (x) = 1;
1217 return x;
1220 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1221 how much the stack pointer moves for each, for each cpu family. */
1222 static struct
1224 int reg1;
1225 int bit;
1226 int a16_bytes;
1227 int a24_bytes;
1228 } pushm_info[] =
1230 /* These are in reverse push (nearest-to-sp) order. */
1231 { R0_REGNO, 0x80, 2, 2 },
1232 { R1_REGNO, 0x40, 2, 2 },
1233 { R2_REGNO, 0x20, 2, 2 },
1234 { R3_REGNO, 0x10, 2, 2 },
1235 { A0_REGNO, 0x08, 2, 4 },
1236 { A1_REGNO, 0x04, 2, 4 },
1237 { SB_REGNO, 0x02, 2, 4 },
1238 { FB_REGNO, 0x01, 2, 4 }
1241 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1243 /* Returns TRUE if we need to save/restore the given register. We
1244 save everything for exception handlers, so that any register can be
1245 unwound. For interrupt handlers, we save everything if the handler
1246 calls something else (because we don't know what *that* function
1247 might do), but try to be a bit smarter if the handler is a leaf
1248 function. We always save $a0, though, because we use that in the
1249 epilogue to copy $fb to $sp. */
1250 static int
1251 need_to_save (int regno)
1253 if (fixed_regs[regno])
1254 return 0;
1255 if (crtl->calls_eh_return)
1256 return 1;
1257 if (regno == FP_REGNO)
1258 return 0;
1259 if (cfun->machine->is_interrupt
1260 && (!cfun->machine->is_leaf || regno == A0_REGNO))
1261 return 1;
1262 if (df_regs_ever_live_p (regno)
1263 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1264 return 1;
1265 return 0;
1268 /* This function contains all the intelligence about saving and
1269 restoring registers. It always figures out the register save set.
1270 When called with PP_justcount, it merely returns the size of the
1271 save set (for eliminating the frame pointer, for example). When
1272 called with PP_pushm or PP_popm, it emits the appropriate
1273 instructions for saving (pushm) or restoring (popm) the
1274 registers. */
1275 static int
1276 m32c_pushm_popm (Push_Pop_Type ppt)
1278 int reg_mask = 0;
1279 int byte_count = 0, bytes;
1280 int i;
1281 rtx dwarf_set[PUSHM_N];
1282 int n_dwarfs = 0;
1283 int nosave_mask = 0;
1285 if (crtl->return_rtx
1286 && GET_CODE (crtl->return_rtx) == PARALLEL
1287 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1289 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1290 rtx rv = XEXP (exp, 0);
1291 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1293 if (rv_bytes > 2)
1294 nosave_mask |= 0x20; /* PSI, SI */
1295 else
1296 nosave_mask |= 0xf0; /* DF */
1297 if (rv_bytes > 4)
1298 nosave_mask |= 0x50; /* DI */
1301 for (i = 0; i < (int) PUSHM_N; i++)
1303 /* Skip if neither register needs saving. */
1304 if (!need_to_save (pushm_info[i].reg1))
1305 continue;
1307 if (pushm_info[i].bit & nosave_mask)
1308 continue;
1310 reg_mask |= pushm_info[i].bit;
1311 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1313 if (ppt == PP_pushm)
1315 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1316 rtx addr;
1318 /* Always use stack_pointer_rtx instead of calling
1319 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1320 that there is a single rtx representing the stack pointer,
1321 namely stack_pointer_rtx, and uses == to recognize it. */
1322 addr = stack_pointer_rtx;
1324 if (byte_count != 0)
1325 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1327 dwarf_set[n_dwarfs++] =
1328 gen_rtx_SET (VOIDmode,
1329 gen_rtx_MEM (mode, addr),
1330 gen_rtx_REG (mode, pushm_info[i].reg1));
1331 F (dwarf_set[n_dwarfs - 1]);
1334 byte_count += bytes;
1337 if (cfun->machine->is_interrupt)
1339 cfun->machine->intr_pushm = reg_mask & 0xfe;
1340 reg_mask = 0;
1341 byte_count = 0;
1344 if (cfun->machine->is_interrupt)
1345 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1346 if (need_to_save (i))
1348 byte_count += 2;
1349 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1352 if (ppt == PP_pushm && byte_count)
1354 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1355 rtx pushm;
1357 if (reg_mask)
1359 XVECEXP (note, 0, 0)
1360 = gen_rtx_SET (VOIDmode,
1361 stack_pointer_rtx,
1362 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1363 stack_pointer_rtx,
1364 GEN_INT (-byte_count)));
1365 F (XVECEXP (note, 0, 0));
1367 for (i = 0; i < n_dwarfs; i++)
1368 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1370 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1372 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1373 REG_NOTES (pushm));
1376 if (cfun->machine->is_interrupt)
1377 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1378 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1380 if (TARGET_A16)
1381 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1382 else
1383 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1384 F (pushm);
1387 if (ppt == PP_popm && byte_count)
1389 if (cfun->machine->is_interrupt)
1390 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1391 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1393 if (TARGET_A16)
1394 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1395 else
1396 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1398 if (reg_mask)
1399 emit_insn (gen_popm (GEN_INT (reg_mask)));
1402 return byte_count;
1405 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1406 diagrams our call frame. */
1408 m32c_initial_elimination_offset (int from, int to)
1410 int ofs = 0;
1412 if (from == AP_REGNO)
1414 if (TARGET_A16)
1415 ofs += 5;
1416 else
1417 ofs += 8;
1420 if (to == SP_REGNO)
1422 ofs += m32c_pushm_popm (PP_justcount);
1423 ofs += get_frame_size ();
1426 /* Account for push rounding. */
1427 if (TARGET_A24)
1428 ofs = (ofs + 1) & ~1;
1429 #if DEBUG0
1430 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1431 to, ofs);
1432 #endif
1433 return ofs;
1436 /* Passing Function Arguments on the Stack */
1438 #undef TARGET_PROMOTE_PROTOTYPES
1439 #define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
1440 static bool
1441 m32c_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
1443 return 0;
1446 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1447 M32C has word stacks. */
1449 m32c_push_rounding (int n)
1451 if (TARGET_R8C || TARGET_M16C)
1452 return n;
1453 return (n + 1) & ~1;
1456 /* Passing Arguments in Registers */
1458 /* Implements FUNCTION_ARG. Arguments are passed partly in registers,
1459 partly on stack. If our function returns a struct, a pointer to a
1460 buffer for it is at the top of the stack (last thing pushed). The
1461 first few real arguments may be in registers as follows:
1463 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1464 arg2 in r2 if it's HI (else pushed on stack)
1465 rest on stack
1466 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1467 rest on stack
1469 Structs are not passed in registers, even if they fit. Only
1470 integer and pointer types are passed in registers.
1472 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1473 r2 if it fits. */
1475 m32c_function_arg (CUMULATIVE_ARGS * ca,
1476 enum machine_mode mode, tree type, int named)
1478 /* Can return a reg, parallel, or 0 for stack */
1479 rtx rv = NULL_RTX;
1480 #if DEBUG0
1481 fprintf (stderr, "func_arg %d (%s, %d)\n",
1482 ca->parm_num, mode_name[mode], named);
1483 debug_tree (type);
1484 #endif
1486 if (mode == VOIDmode)
1487 return GEN_INT (0);
1489 if (ca->force_mem || !named)
1491 #if DEBUG0
1492 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1493 named);
1494 #endif
1495 return NULL_RTX;
1498 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1499 return NULL_RTX;
1501 if (type && AGGREGATE_TYPE_P (type))
1502 return NULL_RTX;
1504 switch (ca->parm_num)
1506 case 1:
1507 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1508 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1509 break;
1511 case 2:
1512 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1513 rv = gen_rtx_REG (mode, R2_REGNO);
1514 break;
1517 #if DEBUG0
1518 debug_rtx (rv);
1519 #endif
1520 return rv;
1523 #undef TARGET_PASS_BY_REFERENCE
1524 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1525 static bool
1526 m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1527 enum machine_mode mode ATTRIBUTE_UNUSED,
1528 const_tree type ATTRIBUTE_UNUSED,
1529 bool named ATTRIBUTE_UNUSED)
1531 return 0;
1534 /* Implements INIT_CUMULATIVE_ARGS. */
1535 void
1536 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1537 tree fntype,
1538 rtx libname ATTRIBUTE_UNUSED,
1539 tree fndecl,
1540 int n_named_args ATTRIBUTE_UNUSED)
1542 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1543 ca->force_mem = 1;
1544 else
1545 ca->force_mem = 0;
1546 ca->parm_num = 1;
1549 /* Implements FUNCTION_ARG_ADVANCE. force_mem is set for functions
1550 returning structures, so we always reset that. Otherwise, we only
1551 need to know the sequence number of the argument to know what to do
1552 with it. */
1553 void
1554 m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1555 enum machine_mode mode ATTRIBUTE_UNUSED,
1556 tree type ATTRIBUTE_UNUSED,
1557 int named ATTRIBUTE_UNUSED)
1559 if (ca->force_mem)
1560 ca->force_mem = 0;
1561 else
1562 ca->parm_num++;
1565 /* Implements FUNCTION_ARG_REGNO_P. */
1567 m32c_function_arg_regno_p (int r)
1569 if (TARGET_A24)
1570 return (r == R0_REGNO);
1571 return (r == R1_REGNO || r == R2_REGNO);
1574 /* HImode and PSImode are the two "native" modes as far as GCC is
1575 concerned, but the chips also support a 32-bit mode which is used
1576 for some opcodes in R8C/M16C and for reset vectors and such. */
1577 #undef TARGET_VALID_POINTER_MODE
1578 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1579 static bool
1580 m32c_valid_pointer_mode (enum machine_mode mode)
1582 if (mode == HImode
1583 || mode == PSImode
1584 || mode == SImode
1586 return 1;
1587 return 0;
1590 /* How Scalar Function Values Are Returned */
1592 /* Implements LIBCALL_VALUE. Most values are returned in $r0, or some
1593 combination of registers starting there (r2r0 for longs, r3r1r2r0
1594 for long long, r3r2r1r0 for doubles), except that that ABI
1595 currently doesn't work because it ends up using all available
1596 general registers and gcc often can't compile it. So, instead, we
1597 return anything bigger than 16 bits in "mem0" (effectively, a
1598 memory location). */
1600 m32c_libcall_value (enum machine_mode mode)
1602 /* return reg or parallel */
1603 #if 0
1604 /* FIXME: GCC has difficulty returning large values in registers,
1605 because that ties up most of the general registers and gives the
1606 register allocator little to work with. Until we can resolve
1607 this, large values are returned in memory. */
1608 if (mode == DFmode)
1610 rtx rv;
1612 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1613 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1614 gen_rtx_REG (HImode,
1615 R0_REGNO),
1616 GEN_INT (0));
1617 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1618 gen_rtx_REG (HImode,
1619 R1_REGNO),
1620 GEN_INT (2));
1621 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1622 gen_rtx_REG (HImode,
1623 R2_REGNO),
1624 GEN_INT (4));
1625 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1626 gen_rtx_REG (HImode,
1627 R3_REGNO),
1628 GEN_INT (6));
1629 return rv;
1632 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1634 rtx rv;
1636 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1637 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1638 gen_rtx_REG (mode,
1639 R0_REGNO),
1640 GEN_INT (0));
1641 return rv;
1643 #endif
1645 if (GET_MODE_SIZE (mode) > 2)
1646 return gen_rtx_REG (mode, MEM0_REGNO);
1647 return gen_rtx_REG (mode, R0_REGNO);
1650 /* Implements FUNCTION_VALUE. Functions and libcalls have the same
1651 conventions. */
1653 m32c_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
1655 /* return reg or parallel */
1656 const enum machine_mode mode = TYPE_MODE (valtype);
1657 return m32c_libcall_value (mode);
1660 /* How Large Values Are Returned */
1662 /* We return structures by pushing the address on the stack, even if
1663 we use registers for the first few "real" arguments. */
1664 #undef TARGET_STRUCT_VALUE_RTX
1665 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1666 static rtx
1667 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1668 int incoming ATTRIBUTE_UNUSED)
1670 return 0;
1673 /* Function Entry and Exit */
1675 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1677 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1679 if (cfun->machine->is_interrupt)
1680 return 1;
1681 return 0;
1684 /* Implementing the Varargs Macros */
1686 #undef TARGET_STRICT_ARGUMENT_NAMING
1687 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1688 static bool
1689 m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1691 return 1;
1694 /* Trampolines for Nested Functions */
1697 m16c:
1698 1 0000 75C43412 mov.w #0x1234,a0
1699 2 0004 FC000000 jmp.a label
1701 m32c:
1702 1 0000 BC563412 mov.l:s #0x123456,a0
1703 2 0004 CC000000 jmp.a label
1706 /* Implements TRAMPOLINE_SIZE. */
1708 m32c_trampoline_size (void)
1710 /* Allocate extra space so we can avoid the messy shifts when we
1711 initialize the trampoline; we just write past the end of the
1712 opcode. */
1713 return TARGET_A16 ? 8 : 10;
1716 /* Implements TRAMPOLINE_ALIGNMENT. */
1718 m32c_trampoline_alignment (void)
1720 return 2;
1723 /* Implements INITIALIZE_TRAMPOLINE. */
1724 void
1725 m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1727 #define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1728 if (TARGET_A16)
1730 /* Note: we subtract a "word" because the moves want signed
1731 constants, not unsigned constants. */
1732 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1733 emit_move_insn (A0 (HImode, 2), chainval);
1734 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1735 /* We use 16-bit addresses here, but store the zero to turn it
1736 into a 24-bit offset. */
1737 emit_move_insn (A0 (HImode, 5), function);
1738 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1740 else
1742 /* Note that the PSI moves actually write 4 bytes. Make sure we
1743 write stuff out in the right order, and leave room for the
1744 extra byte at the end. */
1745 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1746 emit_move_insn (A0 (PSImode, 1), chainval);
1747 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1748 emit_move_insn (A0 (PSImode, 5), function);
1750 #undef A0
1753 /* Implicit Calls to Library Routines */
1755 #undef TARGET_INIT_LIBFUNCS
1756 #define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1757 static void
1758 m32c_init_libfuncs (void)
1760 if (TARGET_A24)
1762 /* We do this because the M32C has an HImode operand, but the
1763 M16C has an 8-bit operand. Since gcc looks at the match data
1764 and not the expanded rtl, we have to reset the array so that
1765 the right modes are found. */
1766 setcc_gen_code[EQ] = CODE_FOR_seq_24;
1767 setcc_gen_code[NE] = CODE_FOR_sne_24;
1768 setcc_gen_code[GT] = CODE_FOR_sgt_24;
1769 setcc_gen_code[GE] = CODE_FOR_sge_24;
1770 setcc_gen_code[LT] = CODE_FOR_slt_24;
1771 setcc_gen_code[LE] = CODE_FOR_sle_24;
1772 setcc_gen_code[GTU] = CODE_FOR_sgtu_24;
1773 setcc_gen_code[GEU] = CODE_FOR_sgeu_24;
1774 setcc_gen_code[LTU] = CODE_FOR_sltu_24;
1775 setcc_gen_code[LEU] = CODE_FOR_sleu_24;
1779 /* Addressing Modes */
1781 /* Used by GO_IF_LEGITIMATE_ADDRESS. The r8c/m32c family supports a
1782 wide range of non-orthogonal addressing modes, including the
1783 ability to double-indirect on *some* of them. Not all insns
1784 support all modes, either, but we rely on predicates and
1785 constraints to deal with that. */
1787 m32c_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1789 int mode_adjust;
1790 if (CONSTANT_P (x))
1791 return 1;
1793 /* Wide references to memory will be split after reload, so we must
1794 ensure that all parts of such splits remain legitimate
1795 addresses. */
1796 mode_adjust = GET_MODE_SIZE (mode) - 1;
1798 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1799 if (GET_CODE (x) == PRE_DEC
1800 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1802 return (GET_CODE (XEXP (x, 0)) == REG
1803 && REGNO (XEXP (x, 0)) == SP_REGNO);
1806 #if 0
1807 /* This is the double indirection detection, but it currently
1808 doesn't work as cleanly as this code implies, so until we've had
1809 a chance to debug it, leave it disabled. */
1810 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1812 #if DEBUG_DOUBLE
1813 fprintf (stderr, "double indirect\n");
1814 #endif
1815 x = XEXP (x, 0);
1817 #endif
1819 encode_pattern (x);
1820 if (RTX_IS ("r"))
1822 /* Most indexable registers can be used without displacements,
1823 although some of them will be emitted with an explicit zero
1824 to please the assembler. */
1825 switch (REGNO (patternr[0]))
1827 case A0_REGNO:
1828 case A1_REGNO:
1829 case SB_REGNO:
1830 case FB_REGNO:
1831 case SP_REGNO:
1832 return 1;
1834 default:
1835 if (IS_PSEUDO (patternr[0], strict))
1836 return 1;
1837 return 0;
1840 if (RTX_IS ("+ri"))
1842 /* This is more interesting, because different base registers
1843 allow for different displacements - both range and signedness
1844 - and it differs from chip series to chip series too. */
1845 int rn = REGNO (patternr[1]);
1846 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1847 switch (rn)
1849 case A0_REGNO:
1850 case A1_REGNO:
1851 case SB_REGNO:
1852 /* The syntax only allows positive offsets, but when the
1853 offsets span the entire memory range, we can simulate
1854 negative offsets by wrapping. */
1855 if (TARGET_A16)
1856 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1857 if (rn == SB_REGNO)
1858 return (offs >= 0 && offs <= 65535 - mode_adjust);
1859 /* A0 or A1 */
1860 return (offs >= -16777216 && offs <= 16777215);
1862 case FB_REGNO:
1863 if (TARGET_A16)
1864 return (offs >= -128 && offs <= 127 - mode_adjust);
1865 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1867 case SP_REGNO:
1868 return (offs >= -128 && offs <= 127 - mode_adjust);
1870 default:
1871 if (IS_PSEUDO (patternr[1], strict))
1872 return 1;
1873 return 0;
1876 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1878 rtx reg = patternr[1];
1880 /* We don't know where the symbol is, so only allow base
1881 registers which support displacements spanning the whole
1882 address range. */
1883 switch (REGNO (reg))
1885 case A0_REGNO:
1886 case A1_REGNO:
1887 /* $sb needs a secondary reload, but since it's involved in
1888 memory address reloads too, we don't deal with it very
1889 well. */
1890 /* case SB_REGNO: */
1891 return 1;
1892 default:
1893 if (IS_PSEUDO (reg, strict))
1894 return 1;
1895 return 0;
1898 return 0;
1901 /* Implements REG_OK_FOR_BASE_P. */
1903 m32c_reg_ok_for_base_p (rtx x, int strict)
1905 if (GET_CODE (x) != REG)
1906 return 0;
1907 switch (REGNO (x))
1909 case A0_REGNO:
1910 case A1_REGNO:
1911 case SB_REGNO:
1912 case FB_REGNO:
1913 case SP_REGNO:
1914 return 1;
1915 default:
1916 if (IS_PSEUDO (x, strict))
1917 return 1;
1918 return 0;
1922 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1923 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1924 like this:
1925 EB 4B FF mova -128[$fb],$a0
1926 D8 0C FF FF mov.w:Q #0,-1[$a0]
1928 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1929 displacements:
1930 7B F4 stc $fb,$a0
1931 77 54 00 01 sub #256,$a0
1932 D8 08 01 mov.w:Q #0,1[$a0]
1934 If we don't offset (i.e. offset by zero), we end up with:
1935 7B F4 stc $fb,$a0
1936 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1938 We have to subtract *something* so that we have a PLUS rtx to mark
1939 that we've done this reload. The -128 offset will never result in
1940 an 8-bit aN offset, and the payoff for the second case is five
1941 loads *if* those loads are within 256 bytes of the other end of the
1942 frame, so the third case seems best. Note that we subtract the
1943 zero, but detect that in the addhi3 pattern. */
1945 #define BIG_FB_ADJ 0
1947 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1948 worry about is frame base offsets, as $fb has a limited
1949 displacement range. We deal with this by attempting to reload $fb
1950 itself into an address register; that seems to result in the best
1951 code. */
1953 m32c_legitimize_address (rtx * x ATTRIBUTE_UNUSED,
1954 rtx oldx ATTRIBUTE_UNUSED,
1955 enum machine_mode mode ATTRIBUTE_UNUSED)
1957 #if DEBUG0
1958 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1959 debug_rtx (*x);
1960 fprintf (stderr, "\n");
1961 #endif
1963 if (GET_CODE (*x) == PLUS
1964 && GET_CODE (XEXP (*x, 0)) == REG
1965 && REGNO (XEXP (*x, 0)) == FB_REGNO
1966 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1967 && (INTVAL (XEXP (*x, 1)) < -128
1968 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1970 /* reload FB to A_REGS */
1971 rtx temp = gen_reg_rtx (Pmode);
1972 *x = copy_rtx (*x);
1973 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (*x, 0)));
1974 XEXP (*x, 0) = temp;
1975 return 1;
1978 return 0;
1981 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1983 m32c_legitimize_reload_address (rtx * x,
1984 enum machine_mode mode,
1985 int opnum,
1986 int type, int ind_levels ATTRIBUTE_UNUSED)
1988 #if DEBUG0
1989 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1990 mode_name[mode]);
1991 debug_rtx (*x);
1992 #endif
1994 /* At one point, this function tried to get $fb copied to an address
1995 register, which in theory would maximize sharing, but gcc was
1996 *also* still trying to reload the whole address, and we'd run out
1997 of address registers. So we let gcc do the naive (but safe)
1998 reload instead, when the above function doesn't handle it for
2001 The code below is a second attempt at the above. */
2003 if (GET_CODE (*x) == PLUS
2004 && GET_CODE (XEXP (*x, 0)) == REG
2005 && REGNO (XEXP (*x, 0)) == FB_REGNO
2006 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2007 && (INTVAL (XEXP (*x, 1)) < -128
2008 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2010 rtx sum;
2011 int offset = INTVAL (XEXP (*x, 1));
2012 int adjustment = -BIG_FB_ADJ;
2014 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2015 GEN_INT (adjustment));
2016 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2017 if (type == RELOAD_OTHER)
2018 type = RELOAD_FOR_OTHER_ADDRESS;
2019 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2020 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2021 type);
2022 return 1;
2025 if (GET_CODE (*x) == PLUS
2026 && GET_CODE (XEXP (*x, 0)) == PLUS
2027 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2028 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2029 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2030 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2033 if (type == RELOAD_OTHER)
2034 type = RELOAD_FOR_OTHER_ADDRESS;
2035 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2036 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2037 type);
2038 return 1;
2041 return 0;
2044 /* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2045 so we can allow anything. */
2047 m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2049 return 1;
2053 /* Condition Code Status */
2055 #undef TARGET_FIXED_CONDITION_CODE_REGS
2056 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2057 static bool
2058 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2060 *p1 = FLG_REGNO;
2061 *p2 = INVALID_REGNUM;
2062 return true;
2065 /* Describing Relative Costs of Operations */
2067 /* Implements REGISTER_MOVE_COST. We make impossible moves
2068 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2069 no opcodes to do that). We also discourage use of mem* registers
2070 since they're really memory. */
2072 m32c_register_move_cost (enum machine_mode mode, int from, int to)
2074 int cost = COSTS_N_INSNS (3);
2075 int cc = class_contents[from][0] | class_contents[to][0];
2076 /* FIXME: pick real values, but not 2 for now. */
2077 if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2079 if (!(cc & ~class_contents[R23_REGS][0]))
2080 cost = COSTS_N_INSNS (1000);
2081 else
2082 cost = COSTS_N_INSNS (80);
2085 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2086 cost = COSTS_N_INSNS (1000);
2088 if (classes_intersect (from, CR_REGS))
2089 cost += COSTS_N_INSNS (5);
2091 if (classes_intersect (to, CR_REGS))
2092 cost += COSTS_N_INSNS (5);
2094 if (from == MEM_REGS || to == MEM_REGS)
2095 cost += COSTS_N_INSNS (50);
2096 else if (classes_intersect (from, MEM_REGS)
2097 || classes_intersect (to, MEM_REGS))
2098 cost += COSTS_N_INSNS (10);
2100 #if DEBUG0
2101 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2102 mode_name[mode], class_names[from], class_names[to], cost);
2103 #endif
2104 return cost;
2107 /* Implements MEMORY_MOVE_COST. */
2109 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2110 int reg_class ATTRIBUTE_UNUSED,
2111 int in ATTRIBUTE_UNUSED)
2113 /* FIXME: pick real values. */
2114 return COSTS_N_INSNS (10);
2117 /* Here we try to describe when we use multiple opcodes for one RTX so
2118 that gcc knows when to use them. */
2119 #undef TARGET_RTX_COSTS
2120 #define TARGET_RTX_COSTS m32c_rtx_costs
2121 static bool
2122 m32c_rtx_costs (rtx x, int code, int outer_code, int *total)
2124 switch (code)
2126 case REG:
2127 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2128 *total += COSTS_N_INSNS (500);
2129 else
2130 *total += COSTS_N_INSNS (1);
2131 return true;
2133 case ASHIFT:
2134 case LSHIFTRT:
2135 case ASHIFTRT:
2136 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2138 /* mov.b r1l, r1h */
2139 *total += COSTS_N_INSNS (1);
2140 return true;
2142 if (INTVAL (XEXP (x, 1)) > 8
2143 || INTVAL (XEXP (x, 1)) < -8)
2145 /* mov.b #N, r1l */
2146 /* mov.b r1l, r1h */
2147 *total += COSTS_N_INSNS (2);
2148 return true;
2150 return true;
2152 case LE:
2153 case LEU:
2154 case LT:
2155 case LTU:
2156 case GT:
2157 case GTU:
2158 case GE:
2159 case GEU:
2160 case NE:
2161 case EQ:
2162 if (outer_code == SET)
2164 *total += COSTS_N_INSNS (2);
2165 return true;
2167 break;
2169 case ZERO_EXTRACT:
2171 rtx dest = XEXP (x, 0);
2172 rtx addr = XEXP (dest, 0);
2173 switch (GET_CODE (addr))
2175 case CONST_INT:
2176 *total += COSTS_N_INSNS (1);
2177 break;
2178 case SYMBOL_REF:
2179 *total += COSTS_N_INSNS (3);
2180 break;
2181 default:
2182 *total += COSTS_N_INSNS (2);
2183 break;
2185 return true;
2187 break;
2189 default:
2190 /* Reasonable default. */
2191 if (TARGET_A16 && GET_MODE(x) == SImode)
2192 *total += COSTS_N_INSNS (2);
2193 break;
2195 return false;
2198 #undef TARGET_ADDRESS_COST
2199 #define TARGET_ADDRESS_COST m32c_address_cost
2200 static int
2201 m32c_address_cost (rtx addr)
2203 int i;
2204 /* fprintf(stderr, "\naddress_cost\n");
2205 debug_rtx(addr);*/
2206 switch (GET_CODE (addr))
2208 case CONST_INT:
2209 i = INTVAL (addr);
2210 if (i == 0)
2211 return COSTS_N_INSNS(1);
2212 if (0 < i && i <= 255)
2213 return COSTS_N_INSNS(2);
2214 if (0 < i && i <= 65535)
2215 return COSTS_N_INSNS(3);
2216 return COSTS_N_INSNS(4);
2217 case SYMBOL_REF:
2218 return COSTS_N_INSNS(4);
2219 case REG:
2220 return COSTS_N_INSNS(1);
2221 case PLUS:
2222 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2224 i = INTVAL (XEXP (addr, 1));
2225 if (i == 0)
2226 return COSTS_N_INSNS(1);
2227 if (0 < i && i <= 255)
2228 return COSTS_N_INSNS(2);
2229 if (0 < i && i <= 65535)
2230 return COSTS_N_INSNS(3);
2232 return COSTS_N_INSNS(4);
2233 default:
2234 return 0;
2238 /* Defining the Output Assembler Language */
2240 /* The Overall Framework of an Assembler File */
2242 #undef TARGET_HAVE_NAMED_SECTIONS
2243 #define TARGET_HAVE_NAMED_SECTIONS true
2245 /* Output of Data */
2247 /* We may have 24 bit sizes, which is the native address size.
2248 Currently unused, but provided for completeness. */
2249 #undef TARGET_ASM_INTEGER
2250 #define TARGET_ASM_INTEGER m32c_asm_integer
2251 static bool
2252 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2254 switch (size)
2256 case 3:
2257 fprintf (asm_out_file, "\t.3byte\t");
2258 output_addr_const (asm_out_file, x);
2259 fputc ('\n', asm_out_file);
2260 return true;
2261 case 4:
2262 if (GET_CODE (x) == SYMBOL_REF)
2264 fprintf (asm_out_file, "\t.long\t");
2265 output_addr_const (asm_out_file, x);
2266 fputc ('\n', asm_out_file);
2267 return true;
2269 break;
2271 return default_assemble_integer (x, size, aligned_p);
2274 /* Output of Assembler Instructions */
2276 /* We use a lookup table because the addressing modes are non-orthogonal. */
2278 static struct
2280 char code;
2281 char const *pattern;
2282 char const *format;
2284 const conversions[] = {
2285 { 0, "r", "0" },
2287 { 0, "mr", "z[1]" },
2288 { 0, "m+ri", "3[2]" },
2289 { 0, "m+rs", "3[2]" },
2290 { 0, "m+r+si", "4+5[2]" },
2291 { 0, "ms", "1" },
2292 { 0, "mi", "1" },
2293 { 0, "m+si", "2+3" },
2295 { 0, "mmr", "[z[2]]" },
2296 { 0, "mm+ri", "[4[3]]" },
2297 { 0, "mm+rs", "[4[3]]" },
2298 { 0, "mm+r+si", "[5+6[3]]" },
2299 { 0, "mms", "[[2]]" },
2300 { 0, "mmi", "[[2]]" },
2301 { 0, "mm+si", "[4[3]]" },
2303 { 0, "i", "#0" },
2304 { 0, "s", "#0" },
2305 { 0, "+si", "#1+2" },
2306 { 0, "l", "#0" },
2308 { 'l', "l", "0" },
2309 { 'd', "i", "0" },
2310 { 'd', "s", "0" },
2311 { 'd', "+si", "1+2" },
2312 { 'D', "i", "0" },
2313 { 'D', "s", "0" },
2314 { 'D', "+si", "1+2" },
2315 { 'x', "i", "#0" },
2316 { 'X', "i", "#0" },
2317 { 'm', "i", "#0" },
2318 { 'b', "i", "#0" },
2319 { 'B', "i", "0" },
2320 { 'p', "i", "0" },
2322 { 0, 0, 0 }
2325 /* This is in order according to the bitfield that pushm/popm use. */
2326 static char const *pushm_regs[] = {
2327 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2330 /* Implements PRINT_OPERAND. */
2331 void
2332 m32c_print_operand (FILE * file, rtx x, int code)
2334 int i, j, b;
2335 const char *comma;
2336 HOST_WIDE_INT ival;
2337 int unsigned_const = 0;
2338 int force_sign;
2340 /* Multiplies; constants are converted to sign-extended format but
2341 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2342 need. */
2343 if (code == 'u')
2345 unsigned_const = 2;
2346 code = 0;
2348 if (code == 'U')
2350 unsigned_const = 1;
2351 code = 0;
2353 /* This one is only for debugging; you can put it in a pattern to
2354 force this error. */
2355 if (code == '!')
2357 fprintf (stderr, "dj: unreviewed pattern:");
2358 if (current_output_insn)
2359 debug_rtx (current_output_insn);
2360 gcc_unreachable ();
2362 /* PSImode operations are either .w or .l depending on the target. */
2363 if (code == '&')
2365 if (TARGET_A16)
2366 fprintf (file, "w");
2367 else
2368 fprintf (file, "l");
2369 return;
2371 /* Inverted conditionals. */
2372 if (code == 'C')
2374 switch (GET_CODE (x))
2376 case LE:
2377 fputs ("gt", file);
2378 break;
2379 case LEU:
2380 fputs ("gtu", file);
2381 break;
2382 case LT:
2383 fputs ("ge", file);
2384 break;
2385 case LTU:
2386 fputs ("geu", file);
2387 break;
2388 case GT:
2389 fputs ("le", file);
2390 break;
2391 case GTU:
2392 fputs ("leu", file);
2393 break;
2394 case GE:
2395 fputs ("lt", file);
2396 break;
2397 case GEU:
2398 fputs ("ltu", file);
2399 break;
2400 case NE:
2401 fputs ("eq", file);
2402 break;
2403 case EQ:
2404 fputs ("ne", file);
2405 break;
2406 default:
2407 gcc_unreachable ();
2409 return;
2411 /* Regular conditionals. */
2412 if (code == 'c')
2414 switch (GET_CODE (x))
2416 case LE:
2417 fputs ("le", file);
2418 break;
2419 case LEU:
2420 fputs ("leu", file);
2421 break;
2422 case LT:
2423 fputs ("lt", file);
2424 break;
2425 case LTU:
2426 fputs ("ltu", file);
2427 break;
2428 case GT:
2429 fputs ("gt", file);
2430 break;
2431 case GTU:
2432 fputs ("gtu", file);
2433 break;
2434 case GE:
2435 fputs ("ge", file);
2436 break;
2437 case GEU:
2438 fputs ("geu", file);
2439 break;
2440 case NE:
2441 fputs ("ne", file);
2442 break;
2443 case EQ:
2444 fputs ("eq", file);
2445 break;
2446 default:
2447 gcc_unreachable ();
2449 return;
2451 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2452 operand. */
2453 if (code == 'h' && GET_MODE (x) == SImode)
2455 x = m32c_subreg (HImode, x, SImode, 0);
2456 code = 0;
2458 if (code == 'H' && GET_MODE (x) == SImode)
2460 x = m32c_subreg (HImode, x, SImode, 2);
2461 code = 0;
2463 if (code == 'h' && GET_MODE (x) == HImode)
2465 x = m32c_subreg (QImode, x, HImode, 0);
2466 code = 0;
2468 if (code == 'H' && GET_MODE (x) == HImode)
2470 /* We can't actually represent this as an rtx. Do it here. */
2471 if (GET_CODE (x) == REG)
2473 switch (REGNO (x))
2475 case R0_REGNO:
2476 fputs ("r0h", file);
2477 return;
2478 case R1_REGNO:
2479 fputs ("r1h", file);
2480 return;
2481 default:
2482 gcc_unreachable();
2485 /* This should be a MEM. */
2486 x = m32c_subreg (QImode, x, HImode, 1);
2487 code = 0;
2489 /* This is for BMcond, which always wants word register names. */
2490 if (code == 'h' && GET_MODE (x) == QImode)
2492 if (GET_CODE (x) == REG)
2493 x = gen_rtx_REG (HImode, REGNO (x));
2494 code = 0;
2496 /* 'x' and 'X' need to be ignored for non-immediates. */
2497 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2498 code = 0;
2500 encode_pattern (x);
2501 force_sign = 0;
2502 for (i = 0; conversions[i].pattern; i++)
2503 if (conversions[i].code == code
2504 && streq (conversions[i].pattern, pattern))
2506 for (j = 0; conversions[i].format[j]; j++)
2507 /* backslash quotes the next character in the output pattern. */
2508 if (conversions[i].format[j] == '\\')
2510 fputc (conversions[i].format[j + 1], file);
2511 j++;
2513 /* Digits in the output pattern indicate that the
2514 corresponding RTX is to be output at that point. */
2515 else if (ISDIGIT (conversions[i].format[j]))
2517 rtx r = patternr[conversions[i].format[j] - '0'];
2518 switch (GET_CODE (r))
2520 case REG:
2521 fprintf (file, "%s",
2522 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2523 break;
2524 case CONST_INT:
2525 switch (code)
2527 case 'b':
2528 case 'B':
2530 int v = INTVAL (r);
2531 int i = (int) exact_log2 (v);
2532 if (i == -1)
2533 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2534 if (i == -1)
2535 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2536 /* Bit position. */
2537 fprintf (file, "%d", i);
2539 break;
2540 case 'x':
2541 /* Unsigned byte. */
2542 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2543 INTVAL (r) & 0xff);
2544 break;
2545 case 'X':
2546 /* Unsigned word. */
2547 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2548 INTVAL (r) & 0xffff);
2549 break;
2550 case 'p':
2551 /* pushm and popm encode a register set into a single byte. */
2552 comma = "";
2553 for (b = 7; b >= 0; b--)
2554 if (INTVAL (r) & (1 << b))
2556 fprintf (file, "%s%s", comma, pushm_regs[b]);
2557 comma = ",";
2559 break;
2560 case 'm':
2561 /* "Minus". Output -X */
2562 ival = (-INTVAL (r) & 0xffff);
2563 if (ival & 0x8000)
2564 ival = ival - 0x10000;
2565 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2566 break;
2567 default:
2568 ival = INTVAL (r);
2569 if (conversions[i].format[j + 1] == '[' && ival < 0)
2571 /* We can simulate negative displacements by
2572 taking advantage of address space
2573 wrapping when the offset can span the
2574 entire address range. */
2575 rtx base =
2576 patternr[conversions[i].format[j + 2] - '0'];
2577 if (GET_CODE (base) == REG)
2578 switch (REGNO (base))
2580 case A0_REGNO:
2581 case A1_REGNO:
2582 if (TARGET_A24)
2583 ival = 0x1000000 + ival;
2584 else
2585 ival = 0x10000 + ival;
2586 break;
2587 case SB_REGNO:
2588 if (TARGET_A16)
2589 ival = 0x10000 + ival;
2590 break;
2593 else if (code == 'd' && ival < 0 && j == 0)
2594 /* The "mova" opcode is used to do addition by
2595 computing displacements, but again, we need
2596 displacements to be unsigned *if* they're
2597 the only component of the displacement
2598 (i.e. no "symbol-4" type displacement). */
2599 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2601 if (conversions[i].format[j] == '0')
2603 /* More conversions to unsigned. */
2604 if (unsigned_const == 2)
2605 ival &= 0xffff;
2606 if (unsigned_const == 1)
2607 ival &= 0xff;
2609 if (streq (conversions[i].pattern, "mi")
2610 || streq (conversions[i].pattern, "mmi"))
2612 /* Integers used as addresses are unsigned. */
2613 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2615 if (force_sign && ival >= 0)
2616 fputc ('+', file);
2617 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2618 break;
2620 break;
2621 case CONST_DOUBLE:
2622 /* We don't have const_double constants. If it
2623 happens, make it obvious. */
2624 fprintf (file, "[const_double 0x%lx]",
2625 (unsigned long) CONST_DOUBLE_HIGH (r));
2626 break;
2627 case SYMBOL_REF:
2628 assemble_name (file, XSTR (r, 0));
2629 break;
2630 case LABEL_REF:
2631 output_asm_label (r);
2632 break;
2633 default:
2634 fprintf (stderr, "don't know how to print this operand:");
2635 debug_rtx (r);
2636 gcc_unreachable ();
2639 else
2641 if (conversions[i].format[j] == 'z')
2643 /* Some addressing modes *must* have a displacement,
2644 so insert a zero here if needed. */
2645 int k;
2646 for (k = j + 1; conversions[i].format[k]; k++)
2647 if (ISDIGIT (conversions[i].format[k]))
2649 rtx reg = patternr[conversions[i].format[k] - '0'];
2650 if (GET_CODE (reg) == REG
2651 && (REGNO (reg) == SB_REGNO
2652 || REGNO (reg) == FB_REGNO
2653 || REGNO (reg) == SP_REGNO))
2654 fputc ('0', file);
2656 continue;
2658 /* Signed displacements off symbols need to have signs
2659 blended cleanly. */
2660 if (conversions[i].format[j] == '+'
2661 && (!code || code == 'D' || code == 'd')
2662 && ISDIGIT (conversions[i].format[j + 1])
2663 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2664 == CONST_INT))
2666 force_sign = 1;
2667 continue;
2669 fputc (conversions[i].format[j], file);
2671 break;
2673 if (!conversions[i].pattern)
2675 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2676 pattern);
2677 debug_rtx (x);
2678 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2681 return;
2684 /* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2685 above for descriptions of what these do. */
2687 m32c_print_operand_punct_valid_p (int c)
2689 if (c == '&' || c == '!')
2690 return 1;
2691 return 0;
2694 /* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2695 void
2696 m32c_print_operand_address (FILE * stream, rtx address)
2698 gcc_assert (GET_CODE (address) == MEM);
2699 m32c_print_operand (stream, XEXP (address, 0), 0);
2702 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2703 differently than general registers. */
2704 void
2705 m32c_output_reg_push (FILE * s, int regno)
2707 if (regno == FLG_REGNO)
2708 fprintf (s, "\tpushc\tflg\n");
2709 else
2710 fprintf (s, "\tpush.%c\t%s\n",
2711 " bwll"[reg_push_size (regno)], reg_names[regno]);
2714 /* Likewise for ASM_OUTPUT_REG_POP. */
2715 void
2716 m32c_output_reg_pop (FILE * s, int regno)
2718 if (regno == FLG_REGNO)
2719 fprintf (s, "\tpopc\tflg\n");
2720 else
2721 fprintf (s, "\tpop.%c\t%s\n",
2722 " bwll"[reg_push_size (regno)], reg_names[regno]);
2725 /* Defining target-specific uses of `__attribute__' */
2727 /* Used to simplify the logic below. Find the attributes wherever
2728 they may be. */
2729 #define M32C_ATTRIBUTES(decl) \
2730 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2731 : DECL_ATTRIBUTES (decl) \
2732 ? (DECL_ATTRIBUTES (decl)) \
2733 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2735 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2736 static int
2737 interrupt_p (tree node ATTRIBUTE_UNUSED)
2739 tree list = M32C_ATTRIBUTES (node);
2740 while (list)
2742 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2743 return 1;
2744 list = TREE_CHAIN (list);
2746 return 0;
2749 static tree
2750 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2751 tree name ATTRIBUTE_UNUSED,
2752 tree args ATTRIBUTE_UNUSED,
2753 int flags ATTRIBUTE_UNUSED,
2754 bool * no_add_attrs ATTRIBUTE_UNUSED)
2756 return NULL_TREE;
2759 /* Returns TRUE if given tree has the "function_vector" attribute. */
2761 m32c_special_page_vector_p (tree func)
2763 if (TREE_CODE (func) != FUNCTION_DECL)
2764 return 0;
2766 tree list = M32C_ATTRIBUTES (func);
2767 while (list)
2769 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2770 return 1;
2771 list = TREE_CHAIN (list);
2773 return 0;
2776 static tree
2777 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2778 tree name ATTRIBUTE_UNUSED,
2779 tree args ATTRIBUTE_UNUSED,
2780 int flags ATTRIBUTE_UNUSED,
2781 bool * no_add_attrs ATTRIBUTE_UNUSED)
2783 if (TARGET_R8C)
2785 /* The attribute is not supported for R8C target. */
2786 warning (OPT_Wattributes,
2787 "`%s' attribute is not supported for R8C target",
2788 IDENTIFIER_POINTER (name));
2789 *no_add_attrs = true;
2791 else if (TREE_CODE (*node) != FUNCTION_DECL)
2793 /* The attribute must be applied to functions only. */
2794 warning (OPT_Wattributes,
2795 "`%s' attribute applies only to functions",
2796 IDENTIFIER_POINTER (name));
2797 *no_add_attrs = true;
2799 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2801 /* The argument must be a constant integer. */
2802 warning (OPT_Wattributes,
2803 "`%s' attribute argument not an integer constant",
2804 IDENTIFIER_POINTER (name));
2805 *no_add_attrs = true;
2807 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2808 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2810 /* The argument value must be between 18 to 255. */
2811 warning (OPT_Wattributes,
2812 "`%s' attribute argument should be between 18 to 255",
2813 IDENTIFIER_POINTER (name));
2814 *no_add_attrs = true;
2816 return NULL_TREE;
2819 /* If the function is assigned the attribute 'function_vector', it
2820 returns the function vector number, otherwise returns zero. */
2822 current_function_special_page_vector (rtx x)
2824 int num;
2826 if ((GET_CODE(x) == SYMBOL_REF)
2827 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2829 tree t = SYMBOL_REF_DECL (x);
2831 if (TREE_CODE (t) != FUNCTION_DECL)
2832 return 0;
2834 tree list = M32C_ATTRIBUTES (t);
2835 while (list)
2837 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2839 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2840 return num;
2843 list = TREE_CHAIN (list);
2846 return 0;
2848 else
2849 return 0;
2852 #undef TARGET_ATTRIBUTE_TABLE
2853 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2854 static const struct attribute_spec m32c_attribute_table[] = {
2855 {"interrupt", 0, 0, false, false, false, interrupt_handler},
2856 {"function_vector", 1, 1, true, false, false, function_vector_handler},
2857 {0, 0, 0, 0, 0, 0, 0}
2860 #undef TARGET_COMP_TYPE_ATTRIBUTES
2861 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2862 static int
2863 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
2864 const_tree type2 ATTRIBUTE_UNUSED)
2866 /* 0=incompatible 1=compatible 2=warning */
2867 return 1;
2870 #undef TARGET_INSERT_ATTRIBUTES
2871 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2872 static void
2873 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2874 tree * attr_ptr ATTRIBUTE_UNUSED)
2876 /* Nothing to do here. */
2879 /* Predicates */
2881 /* This is a list of legal subregs of hard regs. */
2882 static const struct {
2883 unsigned char outer_mode_size;
2884 unsigned char inner_mode_size;
2885 unsigned char byte_mask;
2886 unsigned char legal_when;
2887 unsigned int regno;
2888 } legal_subregs[] = {
2889 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
2890 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
2891 {1, 2, 0x01, 1, A0_REGNO},
2892 {1, 2, 0x01, 1, A1_REGNO},
2894 {1, 4, 0x01, 1, A0_REGNO},
2895 {1, 4, 0x01, 1, A1_REGNO},
2897 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
2898 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
2899 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
2900 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
2901 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
2903 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
2906 /* Returns TRUE if OP is a subreg of a hard reg which we don't
2907 support. */
2908 bool
2909 m32c_illegal_subreg_p (rtx op)
2911 int offset;
2912 unsigned int i;
2913 int src_mode, dest_mode;
2915 if (GET_CODE (op) != SUBREG)
2916 return false;
2918 dest_mode = GET_MODE (op);
2919 offset = SUBREG_BYTE (op);
2920 op = SUBREG_REG (op);
2921 src_mode = GET_MODE (op);
2923 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
2924 return false;
2925 if (GET_CODE (op) != REG)
2926 return false;
2927 if (REGNO (op) >= MEM0_REGNO)
2928 return false;
2930 offset = (1 << offset);
2932 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
2933 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
2934 && legal_subregs[i].regno == REGNO (op)
2935 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
2936 && legal_subregs[i].byte_mask & offset)
2938 switch (legal_subregs[i].legal_when)
2940 case 1:
2941 return false;
2942 case 16:
2943 if (TARGET_A16)
2944 return false;
2945 break;
2946 case 24:
2947 if (TARGET_A24)
2948 return false;
2949 break;
2952 return true;
2955 /* Returns TRUE if we support a move between the first two operands.
2956 At the moment, we just want to discourage mem to mem moves until
2957 after reload, because reload has a hard time with our limited
2958 number of address registers, and we can get into a situation where
2959 we need three of them when we only have two. */
2960 bool
2961 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2963 rtx op0 = operands[0];
2964 rtx op1 = operands[1];
2966 if (TARGET_A24)
2967 return true;
2969 #define DEBUG_MOV_OK 0
2970 #if DEBUG_MOV_OK
2971 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2972 debug_rtx (op0);
2973 debug_rtx (op1);
2974 #endif
2976 if (GET_CODE (op0) == SUBREG)
2977 op0 = XEXP (op0, 0);
2978 if (GET_CODE (op1) == SUBREG)
2979 op1 = XEXP (op1, 0);
2981 if (GET_CODE (op0) == MEM
2982 && GET_CODE (op1) == MEM
2983 && ! reload_completed)
2985 #if DEBUG_MOV_OK
2986 fprintf (stderr, " - no, mem to mem\n");
2987 #endif
2988 return false;
2991 #if DEBUG_MOV_OK
2992 fprintf (stderr, " - ok\n");
2993 #endif
2994 return true;
2997 /* Returns TRUE if two consecutive HImode mov instructions, generated
2998 for moving an immediate double data to a double data type variable
2999 location, can be combined into single SImode mov instruction. */
3000 bool
3001 m32c_immd_dbl_mov (rtx * operands,
3002 enum machine_mode mode ATTRIBUTE_UNUSED)
3004 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3005 const char *str1;
3006 const char *str2;
3008 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3009 && MEM_SCALAR_P (operands[0])
3010 && !MEM_IN_STRUCT_P (operands[0])
3011 && GET_CODE (XEXP (operands[2], 0)) == CONST
3012 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3013 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3014 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3015 && MEM_SCALAR_P (operands[2])
3016 && !MEM_IN_STRUCT_P (operands[2]))
3017 flag = 1;
3019 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3020 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3021 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3022 && MEM_SCALAR_P (operands[0])
3023 && !MEM_IN_STRUCT_P (operands[0])
3024 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
3025 && GET_CODE (XEXP (operands[2], 0)) == CONST
3026 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3027 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3028 && MEM_SCALAR_P (operands[2])
3029 && !MEM_IN_STRUCT_P (operands[2]))
3030 flag = 2;
3032 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3033 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3034 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3035 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3036 && MEM_SCALAR_P (operands[0])
3037 && !MEM_IN_STRUCT_P (operands[0])
3038 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
3039 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3040 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3041 && MEM_SCALAR_P (operands[2])
3042 && !MEM_IN_STRUCT_P (operands[2]))
3043 flag = 3;
3045 else
3046 return false;
3048 switch (flag)
3050 case 1:
3051 str1 = XSTR (XEXP (operands[0], 0), 0);
3052 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3053 if (strcmp (str1, str2) == 0)
3054 okflag = 1;
3055 else
3056 okflag = 0;
3057 break;
3058 case 2:
3059 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3060 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3061 if (strcmp(str1,str2) == 0)
3062 okflag = 1;
3063 else
3064 okflag = 0;
3065 break;
3066 case 3:
3067 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3068 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
3069 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3070 if (((offset2-offset1) == 2) && offsetsign != 0)
3071 okflag = 1;
3072 else
3073 okflag = 0;
3074 break;
3075 default:
3076 okflag = 0;
3079 if (okflag == 1)
3081 HOST_WIDE_INT val;
3082 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3084 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
3085 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3087 return true;
3090 return false;
3093 /* Expanders */
3095 /* Subregs are non-orthogonal for us, because our registers are all
3096 different sizes. */
3097 static rtx
3098 m32c_subreg (enum machine_mode outer,
3099 rtx x, enum machine_mode inner, int byte)
3101 int r, nr = -1;
3103 /* Converting MEMs to different types that are the same size, we
3104 just rewrite them. */
3105 if (GET_CODE (x) == SUBREG
3106 && SUBREG_BYTE (x) == 0
3107 && GET_CODE (SUBREG_REG (x)) == MEM
3108 && (GET_MODE_SIZE (GET_MODE (x))
3109 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3111 rtx oldx = x;
3112 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3113 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3116 /* Push/pop get done as smaller push/pops. */
3117 if (GET_CODE (x) == MEM
3118 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3119 || GET_CODE (XEXP (x, 0)) == POST_INC))
3120 return gen_rtx_MEM (outer, XEXP (x, 0));
3121 if (GET_CODE (x) == SUBREG
3122 && GET_CODE (XEXP (x, 0)) == MEM
3123 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3124 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3125 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3127 if (GET_CODE (x) != REG)
3128 return simplify_gen_subreg (outer, x, inner, byte);
3130 r = REGNO (x);
3131 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3132 return simplify_gen_subreg (outer, x, inner, byte);
3134 if (IS_MEM_REGNO (r))
3135 return simplify_gen_subreg (outer, x, inner, byte);
3137 /* This is where the complexities of our register layout are
3138 described. */
3139 if (byte == 0)
3140 nr = r;
3141 else if (outer == HImode)
3143 if (r == R0_REGNO && byte == 2)
3144 nr = R2_REGNO;
3145 else if (r == R0_REGNO && byte == 4)
3146 nr = R1_REGNO;
3147 else if (r == R0_REGNO && byte == 6)
3148 nr = R3_REGNO;
3149 else if (r == R1_REGNO && byte == 2)
3150 nr = R3_REGNO;
3151 else if (r == A0_REGNO && byte == 2)
3152 nr = A1_REGNO;
3154 else if (outer == SImode)
3156 if (r == R0_REGNO && byte == 0)
3157 nr = R0_REGNO;
3158 else if (r == R0_REGNO && byte == 4)
3159 nr = R1_REGNO;
3161 if (nr == -1)
3163 fprintf (stderr, "m32c_subreg %s %s %d\n",
3164 mode_name[outer], mode_name[inner], byte);
3165 debug_rtx (x);
3166 gcc_unreachable ();
3168 return gen_rtx_REG (outer, nr);
3171 /* Used to emit move instructions. We split some moves,
3172 and avoid mem-mem moves. */
3174 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3176 if (TARGET_A16 && mode == PSImode)
3177 return m32c_split_move (operands, mode, 1);
3178 if ((GET_CODE (operands[0]) == MEM)
3179 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3181 rtx pmv = XEXP (operands[0], 0);
3182 rtx dest_reg = XEXP (pmv, 0);
3183 rtx dest_mod = XEXP (pmv, 1);
3185 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3186 operands[0] = gen_rtx_MEM (mode, dest_reg);
3188 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3189 operands[1] = copy_to_mode_reg (mode, operands[1]);
3190 return 0;
3193 #define DEBUG_SPLIT 0
3195 /* Returns TRUE if the given PSImode move should be split. We split
3196 for all r8c/m16c moves, since it doesn't support them, and for
3197 POP.L as we can only *push* SImode. */
3199 m32c_split_psi_p (rtx * operands)
3201 #if DEBUG_SPLIT
3202 fprintf (stderr, "\nm32c_split_psi_p\n");
3203 debug_rtx (operands[0]);
3204 debug_rtx (operands[1]);
3205 #endif
3206 if (TARGET_A16)
3208 #if DEBUG_SPLIT
3209 fprintf (stderr, "yes, A16\n");
3210 #endif
3211 return 1;
3213 if (GET_CODE (operands[1]) == MEM
3214 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3216 #if DEBUG_SPLIT
3217 fprintf (stderr, "yes, pop.l\n");
3218 #endif
3219 return 1;
3221 #if DEBUG_SPLIT
3222 fprintf (stderr, "no, default\n");
3223 #endif
3224 return 0;
3227 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3228 (define_expand), 1 if it is not optional (define_insn_and_split),
3229 and 3 for define_split (alternate api). */
3231 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3233 rtx s[4], d[4];
3234 int parts, si, di, rev = 0;
3235 int rv = 0, opi = 2;
3236 enum machine_mode submode = HImode;
3237 rtx *ops, local_ops[10];
3239 /* define_split modifies the existing operands, but the other two
3240 emit new insns. OPS is where we store the operand pairs, which
3241 we emit later. */
3242 if (split_all == 3)
3243 ops = operands;
3244 else
3245 ops = local_ops;
3247 /* Else HImode. */
3248 if (mode == DImode)
3249 submode = SImode;
3251 /* Before splitting mem-mem moves, force one operand into a
3252 register. */
3253 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3255 #if DEBUG0
3256 fprintf (stderr, "force_reg...\n");
3257 debug_rtx (operands[1]);
3258 #endif
3259 operands[1] = force_reg (mode, operands[1]);
3260 #if DEBUG0
3261 debug_rtx (operands[1]);
3262 #endif
3265 parts = 2;
3267 #if DEBUG_SPLIT
3268 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3269 split_all);
3270 debug_rtx (operands[0]);
3271 debug_rtx (operands[1]);
3272 #endif
3274 /* Note that split_all is not used to select the api after this
3275 point, so it's safe to set it to 3 even with define_insn. */
3276 /* None of the chips can move SI operands to sp-relative addresses,
3277 so we always split those. */
3278 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3279 split_all = 3;
3281 /* We don't need to split these. */
3282 if (TARGET_A24
3283 && split_all != 3
3284 && (mode == SImode || mode == PSImode)
3285 && !(GET_CODE (operands[1]) == MEM
3286 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3287 return 0;
3289 /* First, enumerate the subregs we'll be dealing with. */
3290 for (si = 0; si < parts; si++)
3292 d[si] =
3293 m32c_subreg (submode, operands[0], mode,
3294 si * GET_MODE_SIZE (submode));
3295 s[si] =
3296 m32c_subreg (submode, operands[1], mode,
3297 si * GET_MODE_SIZE (submode));
3300 /* Split pushes by emitting a sequence of smaller pushes. */
3301 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3303 for (si = parts - 1; si >= 0; si--)
3305 ops[opi++] = gen_rtx_MEM (submode,
3306 gen_rtx_PRE_DEC (Pmode,
3307 gen_rtx_REG (Pmode,
3308 SP_REGNO)));
3309 ops[opi++] = s[si];
3312 rv = 1;
3314 /* Likewise for pops. */
3315 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3317 for (di = 0; di < parts; di++)
3319 ops[opi++] = d[di];
3320 ops[opi++] = gen_rtx_MEM (submode,
3321 gen_rtx_POST_INC (Pmode,
3322 gen_rtx_REG (Pmode,
3323 SP_REGNO)));
3325 rv = 1;
3327 else if (split_all)
3329 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3330 for (di = 0; di < parts - 1; di++)
3331 for (si = di + 1; si < parts; si++)
3332 if (reg_mentioned_p (d[di], s[si]))
3333 rev = 1;
3335 if (rev)
3336 for (si = 0; si < parts; si++)
3338 ops[opi++] = d[si];
3339 ops[opi++] = s[si];
3341 else
3342 for (si = parts - 1; si >= 0; si--)
3344 ops[opi++] = d[si];
3345 ops[opi++] = s[si];
3347 rv = 1;
3349 /* Now emit any moves we may have accumulated. */
3350 if (rv && split_all != 3)
3352 int i;
3353 for (i = 2; i < opi; i += 2)
3354 emit_move_insn (ops[i], ops[i + 1]);
3356 return rv;
3359 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3360 the like. For the R8C they expect one of the addresses to be in
3361 R1L:An so we need to arrange for that. Otherwise, it's just a
3362 matter of picking out the operands we want and emitting the right
3363 pattern for them. All these expanders, which correspond to
3364 patterns in blkmov.md, must return nonzero if they expand the insn,
3365 or zero if they should FAIL. */
3367 /* This is a memset() opcode. All operands are implied, so we need to
3368 arrange for them to be in the right registers. The opcode wants
3369 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3370 the count (HI), and $2 the value (QI). */
3372 m32c_expand_setmemhi(rtx *operands)
3374 rtx desta, count, val;
3375 rtx desto, counto;
3377 desta = XEXP (operands[0], 0);
3378 count = operands[1];
3379 val = operands[2];
3381 desto = gen_reg_rtx (Pmode);
3382 counto = gen_reg_rtx (HImode);
3384 if (GET_CODE (desta) != REG
3385 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3386 desta = copy_to_mode_reg (Pmode, desta);
3388 /* This looks like an arbitrary restriction, but this is by far the
3389 most common case. For counts 8..14 this actually results in
3390 smaller code with no speed penalty because the half-sized
3391 constant can be loaded with a shorter opcode. */
3392 if (GET_CODE (count) == CONST_INT
3393 && GET_CODE (val) == CONST_INT
3394 && ! (INTVAL (count) & 1)
3395 && (INTVAL (count) > 1)
3396 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3398 unsigned v = INTVAL (val) & 0xff;
3399 v = v | (v << 8);
3400 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3401 val = copy_to_mode_reg (HImode, GEN_INT (v));
3402 if (TARGET_A16)
3403 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3404 else
3405 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3406 return 1;
3409 /* This is the generalized memset() case. */
3410 if (GET_CODE (val) != REG
3411 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3412 val = copy_to_mode_reg (QImode, val);
3414 if (GET_CODE (count) != REG
3415 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3416 count = copy_to_mode_reg (HImode, count);
3418 if (TARGET_A16)
3419 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3420 else
3421 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3423 return 1;
3426 /* This is a memcpy() opcode. All operands are implied, so we need to
3427 arrange for them to be in the right registers. The opcode wants
3428 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3429 is the source (MEM:BLK), and $2 the count (HI). */
3431 m32c_expand_movmemhi(rtx *operands)
3433 rtx desta, srca, count;
3434 rtx desto, srco, counto;
3436 desta = XEXP (operands[0], 0);
3437 srca = XEXP (operands[1], 0);
3438 count = operands[2];
3440 desto = gen_reg_rtx (Pmode);
3441 srco = gen_reg_rtx (Pmode);
3442 counto = gen_reg_rtx (HImode);
3444 if (GET_CODE (desta) != REG
3445 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3446 desta = copy_to_mode_reg (Pmode, desta);
3448 if (GET_CODE (srca) != REG
3449 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3450 srca = copy_to_mode_reg (Pmode, srca);
3452 /* Similar to setmem, but we don't need to check the value. */
3453 if (GET_CODE (count) == CONST_INT
3454 && ! (INTVAL (count) & 1)
3455 && (INTVAL (count) > 1))
3457 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3458 if (TARGET_A16)
3459 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3460 else
3461 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3462 return 1;
3465 /* This is the generalized memset() case. */
3466 if (GET_CODE (count) != REG
3467 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3468 count = copy_to_mode_reg (HImode, count);
3470 if (TARGET_A16)
3471 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3472 else
3473 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3475 return 1;
3478 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3479 the copy, which should point to the NUL at the end of the string,
3480 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3481 Since our opcode leaves the destination pointing *after* the NUL,
3482 we must emit an adjustment. */
3484 m32c_expand_movstr(rtx *operands)
3486 rtx desta, srca;
3487 rtx desto, srco;
3489 desta = XEXP (operands[1], 0);
3490 srca = XEXP (operands[2], 0);
3492 desto = gen_reg_rtx (Pmode);
3493 srco = gen_reg_rtx (Pmode);
3495 if (GET_CODE (desta) != REG
3496 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3497 desta = copy_to_mode_reg (Pmode, desta);
3499 if (GET_CODE (srca) != REG
3500 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3501 srca = copy_to_mode_reg (Pmode, srca);
3503 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3504 /* desto ends up being a1, which allows this type of add through MOVA. */
3505 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3507 return 1;
3510 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3511 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3512 $2 is the other (MEM:BLK). We must do the comparison, and then
3513 convert the flags to a signed integer result. */
3515 m32c_expand_cmpstr(rtx *operands)
3517 rtx src1a, src2a;
3519 src1a = XEXP (operands[1], 0);
3520 src2a = XEXP (operands[2], 0);
3522 if (GET_CODE (src1a) != REG
3523 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3524 src1a = copy_to_mode_reg (Pmode, src1a);
3526 if (GET_CODE (src2a) != REG
3527 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3528 src2a = copy_to_mode_reg (Pmode, src2a);
3530 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3531 emit_insn (gen_cond_to_int (operands[0]));
3533 return 1;
3537 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3539 static shift_gen_func
3540 shift_gen_func_for (int mode, int code)
3542 #define GFF(m,c,f) if (mode == m && code == c) return f
3543 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3544 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3545 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3546 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3547 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3548 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3549 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3550 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3551 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3552 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3553 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3554 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3555 #undef GFF
3556 gcc_unreachable ();
3559 /* The m32c only has one shift, but it takes a signed count. GCC
3560 doesn't want this, so we fake it by negating any shift count when
3561 we're pretending to shift the other way. Also, the shift count is
3562 limited to -8..8. It's slightly better to use two shifts for 9..15
3563 than to load the count into r1h, so we do that too. */
3565 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3567 enum machine_mode mode = GET_MODE (operands[0]);
3568 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3569 rtx temp;
3571 if (GET_CODE (operands[2]) == CONST_INT)
3573 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3574 int count = INTVAL (operands[2]) * scale;
3576 while (count > maxc)
3578 temp = gen_reg_rtx (mode);
3579 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3580 operands[1] = temp;
3581 count -= maxc;
3583 while (count < -maxc)
3585 temp = gen_reg_rtx (mode);
3586 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3587 operands[1] = temp;
3588 count += maxc;
3590 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3591 return 1;
3594 temp = gen_reg_rtx (QImode);
3595 if (scale < 0)
3596 /* The pattern has a NEG that corresponds to this. */
3597 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3598 else if (TARGET_A16 && mode == SImode)
3599 /* We do this because the code below may modify this, we don't
3600 want to modify the origin of this value. */
3601 emit_move_insn (temp, operands[2]);
3602 else
3603 /* We'll only use it for the shift, no point emitting a move. */
3604 temp = operands[2];
3606 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3608 /* The m16c has a limit of -16..16 for SI shifts, even when the
3609 shift count is in a register. Since there are so many targets
3610 of these shifts, it's better to expand the RTL here than to
3611 call a helper function.
3613 The resulting code looks something like this:
3615 cmp.b r1h,-16
3616 jge.b 1f
3617 shl.l -16,dest
3618 add.b r1h,16
3619 1f: cmp.b r1h,16
3620 jle.b 1f
3621 shl.l 16,dest
3622 sub.b r1h,16
3623 1f: shl.l r1h,dest
3625 We take advantage of the fact that "negative" shifts are
3626 undefined to skip one of the comparisons. */
3628 rtx count;
3629 rtx label, lref, insn, tempvar;
3631 emit_move_insn (operands[0], operands[1]);
3633 count = temp;
3634 label = gen_label_rtx ();
3635 lref = gen_rtx_LABEL_REF (VOIDmode, label);
3636 LABEL_NUSES (label) ++;
3638 tempvar = gen_reg_rtx (mode);
3640 if (shift_code == ASHIFT)
3642 /* This is a left shift. We only need check positive counts. */
3643 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3644 count, GEN_INT (16), label));
3645 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3646 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3647 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3648 emit_label_after (label, insn);
3650 else
3652 /* This is a right shift. We only need check negative counts. */
3653 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3654 count, GEN_INT (-16), label));
3655 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3656 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3657 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3658 emit_label_after (label, insn);
3660 operands[1] = operands[0];
3661 emit_insn (func (operands[0], operands[0], count));
3662 return 1;
3665 operands[2] = temp;
3666 return 0;
3669 /* The m32c has a limited range of operations that work on PSImode
3670 values; we have to expand to SI, do the math, and truncate back to
3671 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3672 those cases. */
3673 void
3674 m32c_expand_neg_mulpsi3 (rtx * operands)
3676 /* operands: a = b * i */
3677 rtx temp1; /* b as SI */
3678 rtx scale /* i as SI */;
3679 rtx temp2; /* a*b as SI */
3681 temp1 = gen_reg_rtx (SImode);
3682 temp2 = gen_reg_rtx (SImode);
3683 if (GET_CODE (operands[2]) != CONST_INT)
3685 scale = gen_reg_rtx (SImode);
3686 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3688 else
3689 scale = copy_to_mode_reg (SImode, operands[2]);
3691 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3692 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3693 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3696 static rtx compare_op0, compare_op1;
3698 void
3699 m32c_pend_compare (rtx *operands)
3701 compare_op0 = operands[0];
3702 compare_op1 = operands[1];
3705 void
3706 m32c_unpend_compare (void)
3708 switch (GET_MODE (compare_op0))
3710 case QImode:
3711 emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
3712 case HImode:
3713 emit_insn (gen_cmphi_op (compare_op0, compare_op1));
3714 case PSImode:
3715 emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
3716 default:
3717 /* Just to silence the "missing case" warnings. */ ;
3721 void
3722 m32c_expand_scc (int code, rtx *operands)
3724 enum machine_mode mode = TARGET_A16 ? QImode : HImode;
3726 emit_insn (gen_rtx_SET (mode,
3727 operands[0],
3728 gen_rtx_fmt_ee (code,
3729 mode,
3730 compare_op0,
3731 compare_op1)));
3734 /* Pattern Output Functions */
3736 /* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3737 match_operand rtx's OP. */
3739 m32c_cmp_flg_0 (rtx cmp)
3741 return gen_rtx_fmt_ee (GET_CODE (cmp),
3742 GET_MODE (cmp),
3743 gen_rtx_REG (CCmode, FLG_REGNO),
3744 GEN_INT (0));
3748 m32c_expand_movcc (rtx *operands)
3750 rtx rel = operands[1];
3751 rtx cmp;
3753 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3754 return 1;
3755 if (GET_CODE (operands[2]) != CONST_INT
3756 || GET_CODE (operands[3]) != CONST_INT)
3757 return 1;
3758 emit_insn (gen_cmpqi(XEXP (rel, 0), XEXP (rel, 1)));
3759 if (GET_CODE (rel) == NE)
3761 rtx tmp = operands[2];
3762 operands[2] = operands[3];
3763 operands[3] = tmp;
3766 cmp = gen_rtx_fmt_ee (GET_CODE (rel),
3767 GET_MODE (rel),
3768 compare_op0,
3769 compare_op1);
3771 emit_move_insn (operands[0],
3772 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3773 cmp,
3774 operands[2],
3775 operands[3]));
3776 return 0;
3779 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3781 m32c_expand_insv (rtx *operands)
3783 rtx op0, src0, p;
3784 int mask;
3786 if (INTVAL (operands[1]) != 1)
3787 return 1;
3789 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3790 if (GET_CODE (operands[3]) != CONST_INT)
3791 return 1;
3792 if (INTVAL (operands[3]) != 0
3793 && INTVAL (operands[3]) != 1
3794 && INTVAL (operands[3]) != -1)
3795 return 1;
3797 mask = 1 << INTVAL (operands[2]);
3799 op0 = operands[0];
3800 if (GET_CODE (op0) == SUBREG
3801 && SUBREG_BYTE (op0) == 0)
3803 rtx sub = SUBREG_REG (op0);
3804 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3805 op0 = sub;
3808 if (!can_create_pseudo_p ()
3809 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3810 src0 = op0;
3811 else
3813 src0 = gen_reg_rtx (GET_MODE (op0));
3814 emit_move_insn (src0, op0);
3817 if (GET_MODE (op0) == HImode
3818 && INTVAL (operands[2]) >= 8
3819 && GET_MODE (op0) == MEM)
3821 /* We are little endian. */
3822 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3823 MEM_COPY_ATTRIBUTES (new_mem, op0);
3824 mask >>= 8;
3827 /* First, we generate a mask with the correct polarity. If we are
3828 storing a zero, we want an AND mask, so invert it. */
3829 if (INTVAL (operands[3]) == 0)
3831 /* Storing a zero, use an AND mask */
3832 if (GET_MODE (op0) == HImode)
3833 mask ^= 0xffff;
3834 else
3835 mask ^= 0xff;
3837 /* Now we need to properly sign-extend the mask in case we need to
3838 fall back to an AND or OR opcode. */
3839 if (GET_MODE (op0) == HImode)
3841 if (mask & 0x8000)
3842 mask -= 0x10000;
3844 else
3846 if (mask & 0x80)
3847 mask -= 0x100;
3850 switch ( (INTVAL (operands[3]) ? 4 : 0)
3851 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3852 + (TARGET_A24 ? 1 : 0))
3854 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3855 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3856 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3857 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3858 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3859 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3860 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3861 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3864 emit_insn (p);
3865 return 0;
3868 const char *
3869 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3871 static char buf[30];
3872 if (GET_CODE (operands[0]) == REG
3873 && REGNO (operands[0]) == R0_REGNO)
3875 if (code == EQ)
3876 return "stzx\t#1,#0,r0l";
3877 if (code == NE)
3878 return "stzx\t#0,#1,r0l";
3880 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3881 return buf;
3884 /* Encode symbol attributes of a SYMBOL_REF into its
3885 SYMBOL_REF_FLAGS. */
3886 static void
3887 m32c_encode_section_info (tree decl, rtx rtl, int first)
3889 int extra_flags = 0;
3891 default_encode_section_info (decl, rtl, first);
3892 if (TREE_CODE (decl) == FUNCTION_DECL
3893 && m32c_special_page_vector_p (decl))
3895 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3897 if (extra_flags)
3898 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
3901 /* Returns TRUE if the current function is a leaf, and thus we can
3902 determine which registers an interrupt function really needs to
3903 save. The logic below is mostly about finding the insn sequence
3904 that's the function, versus any sequence that might be open for the
3905 current insn. */
3906 static int
3907 m32c_leaf_function_p (void)
3909 rtx saved_first, saved_last;
3910 struct sequence_stack *seq;
3911 int rv;
3913 saved_first = crtl->emit.x_first_insn;
3914 saved_last = crtl->emit.x_last_insn;
3915 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
3917 if (seq)
3919 crtl->emit.x_first_insn = seq->first;
3920 crtl->emit.x_last_insn = seq->last;
3923 rv = leaf_function_p ();
3925 crtl->emit.x_first_insn = saved_first;
3926 crtl->emit.x_last_insn = saved_last;
3927 return rv;
3930 /* Returns TRUE if the current function needs to use the ENTER/EXIT
3931 opcodes. If the function doesn't need the frame base or stack
3932 pointer, it can use the simpler RTS opcode. */
3933 static bool
3934 m32c_function_needs_enter (void)
3936 rtx insn;
3937 struct sequence_stack *seq;
3938 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3939 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3941 insn = get_insns ();
3942 for (seq = crtl->emit.sequence_stack;
3943 seq;
3944 insn = seq->first, seq = seq->next);
3946 while (insn)
3948 if (reg_mentioned_p (sp, insn))
3949 return true;
3950 if (reg_mentioned_p (fb, insn))
3951 return true;
3952 insn = NEXT_INSN (insn);
3954 return false;
3957 /* Mark all the subexpressions of the PARALLEL rtx PAR as
3958 frame-related. Return PAR.
3960 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3961 PARALLEL rtx other than the first if they do not have the
3962 FRAME_RELATED flag set on them. So this function is handy for
3963 marking up 'enter' instructions. */
3964 static rtx
3965 m32c_all_frame_related (rtx par)
3967 int len = XVECLEN (par, 0);
3968 int i;
3970 for (i = 0; i < len; i++)
3971 F (XVECEXP (par, 0, i));
3973 return par;
3976 /* Emits the prologue. See the frame layout comment earlier in this
3977 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
3978 that we manually update sp. */
3979 void
3980 m32c_emit_prologue (void)
3982 int frame_size, extra_frame_size = 0, reg_save_size;
3983 int complex_prologue = 0;
3985 cfun->machine->is_leaf = m32c_leaf_function_p ();
3986 if (interrupt_p (cfun->decl))
3988 cfun->machine->is_interrupt = 1;
3989 complex_prologue = 1;
3992 reg_save_size = m32c_pushm_popm (PP_justcount);
3994 if (interrupt_p (cfun->decl))
3995 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
3997 frame_size =
3998 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
3999 if (frame_size == 0
4000 && !cfun->machine->is_interrupt
4001 && !m32c_function_needs_enter ())
4002 cfun->machine->use_rts = 1;
4004 if (frame_size > 254)
4006 extra_frame_size = frame_size - 254;
4007 frame_size = 254;
4009 if (cfun->machine->use_rts == 0)
4010 F (emit_insn (m32c_all_frame_related
4011 (TARGET_A16
4012 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4013 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4015 if (extra_frame_size)
4017 complex_prologue = 1;
4018 if (TARGET_A16)
4019 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4020 gen_rtx_REG (HImode, SP_REGNO),
4021 GEN_INT (-extra_frame_size))));
4022 else
4023 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4024 gen_rtx_REG (PSImode, SP_REGNO),
4025 GEN_INT (-extra_frame_size))));
4028 complex_prologue += m32c_pushm_popm (PP_pushm);
4030 /* This just emits a comment into the .s file for debugging. */
4031 if (complex_prologue)
4032 emit_insn (gen_prologue_end ());
4035 /* Likewise, for the epilogue. The only exception is that, for
4036 interrupts, we must manually unwind the frame as the REIT opcode
4037 doesn't do that. */
4038 void
4039 m32c_emit_epilogue (void)
4041 /* This just emits a comment into the .s file for debugging. */
4042 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4043 emit_insn (gen_epilogue_start ());
4045 m32c_pushm_popm (PP_popm);
4047 if (cfun->machine->is_interrupt)
4049 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4051 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4052 gen_rtx_REG (spmode, FP_REGNO));
4053 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4054 gen_rtx_REG (spmode, A0_REGNO));
4055 if (TARGET_A16)
4056 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4057 else
4058 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4059 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4060 if (TARGET_A16)
4061 emit_jump_insn (gen_epilogue_reit_16 ());
4062 else
4063 emit_jump_insn (gen_epilogue_reit_24 ());
4065 else if (cfun->machine->use_rts)
4066 emit_jump_insn (gen_epilogue_rts ());
4067 else if (TARGET_A16)
4068 emit_jump_insn (gen_epilogue_exitd_16 ());
4069 else
4070 emit_jump_insn (gen_epilogue_exitd_24 ());
4071 emit_barrier ();
4074 void
4075 m32c_emit_eh_epilogue (rtx ret_addr)
4077 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4078 return to. We have to fudge the stack, pop everything, pop SP
4079 (fudged), and return (fudged). This is actually easier to do in
4080 assembler, so punt to libgcc. */
4081 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4082 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4083 emit_barrier ();
4086 /* Indicate which flags must be properly set for a given conditional. */
4087 static int
4088 flags_needed_for_conditional (rtx cond)
4090 switch (GET_CODE (cond))
4092 case LE:
4093 case GT:
4094 return FLAGS_OSZ;
4095 case LEU:
4096 case GTU:
4097 return FLAGS_ZC;
4098 case LT:
4099 case GE:
4100 return FLAGS_OS;
4101 case LTU:
4102 case GEU:
4103 return FLAGS_C;
4104 case EQ:
4105 case NE:
4106 return FLAGS_Z;
4107 default:
4108 return FLAGS_N;
4112 #define DEBUG_CMP 0
4114 /* Returns true if a compare insn is redundant because it would only
4115 set flags that are already set correctly. */
4116 static bool
4117 m32c_compare_redundant (rtx cmp, rtx *operands)
4119 int flags_needed;
4120 int pflags;
4121 rtx prev, pp, next;
4122 rtx op0, op1, op2;
4123 #if DEBUG_CMP
4124 int prev_icode, i;
4125 #endif
4127 op0 = operands[0];
4128 op1 = operands[1];
4129 op2 = operands[2];
4131 #if DEBUG_CMP
4132 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4133 debug_rtx(cmp);
4134 for (i=0; i<2; i++)
4136 fprintf(stderr, "operands[%d] = ", i);
4137 debug_rtx(operands[i]);
4139 #endif
4141 next = next_nonnote_insn (cmp);
4142 if (!next || !INSN_P (next))
4144 #if DEBUG_CMP
4145 fprintf(stderr, "compare not followed by insn\n");
4146 debug_rtx(next);
4147 #endif
4148 return false;
4150 if (GET_CODE (PATTERN (next)) == SET
4151 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4153 next = XEXP (XEXP (PATTERN (next), 1), 0);
4155 else if (GET_CODE (PATTERN (next)) == SET)
4157 /* If this is a conditional, flags_needed will be something
4158 other than FLAGS_N, which we test below. */
4159 next = XEXP (PATTERN (next), 1);
4161 else
4163 #if DEBUG_CMP
4164 fprintf(stderr, "compare not followed by conditional\n");
4165 debug_rtx(next);
4166 #endif
4167 return false;
4169 #if DEBUG_CMP
4170 fprintf(stderr, "conditional is: ");
4171 debug_rtx(next);
4172 #endif
4174 flags_needed = flags_needed_for_conditional (next);
4175 if (flags_needed == FLAGS_N)
4177 #if DEBUG_CMP
4178 fprintf(stderr, "compare not followed by conditional\n");
4179 debug_rtx(next);
4180 #endif
4181 return false;
4184 /* Compare doesn't set overflow and carry the same way that
4185 arithmetic instructions do, so we can't replace those. */
4186 if (flags_needed & FLAGS_OC)
4187 return false;
4189 prev = cmp;
4190 do {
4191 prev = prev_nonnote_insn (prev);
4192 if (!prev)
4194 #if DEBUG_CMP
4195 fprintf(stderr, "No previous insn.\n");
4196 #endif
4197 return false;
4199 if (!INSN_P (prev))
4201 #if DEBUG_CMP
4202 fprintf(stderr, "Previous insn is a non-insn.\n");
4203 #endif
4204 return false;
4206 pp = PATTERN (prev);
4207 if (GET_CODE (pp) != SET)
4209 #if DEBUG_CMP
4210 fprintf(stderr, "Previous insn is not a SET.\n");
4211 #endif
4212 return false;
4214 pflags = get_attr_flags (prev);
4216 /* Looking up attributes of previous insns corrupted the recog
4217 tables. */
4218 INSN_UID (cmp) = -1;
4219 recog (PATTERN (cmp), cmp, 0);
4221 if (pflags == FLAGS_N
4222 && reg_mentioned_p (op0, pp))
4224 #if DEBUG_CMP
4225 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4226 debug_rtx(prev);
4227 #endif
4228 return false;
4230 } while (pflags == FLAGS_N);
4231 #if DEBUG_CMP
4232 fprintf(stderr, "previous flag-setting insn:\n");
4233 debug_rtx(prev);
4234 debug_rtx(pp);
4235 #endif
4237 if (GET_CODE (pp) == SET
4238 && GET_CODE (XEXP (pp, 0)) == REG
4239 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4240 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4242 /* Adjacent cbranches must have the same operands to be
4243 redundant. */
4244 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4245 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4246 #if DEBUG_CMP
4247 fprintf(stderr, "adjacent cbranches\n");
4248 debug_rtx(pop0);
4249 debug_rtx(pop1);
4250 #endif
4251 if (rtx_equal_p (op0, pop0)
4252 && rtx_equal_p (op1, pop1))
4253 return true;
4254 #if DEBUG_CMP
4255 fprintf(stderr, "prev cmp not same\n");
4256 #endif
4257 return false;
4260 /* Else the previous insn must be a SET, with either the source or
4261 dest equal to operands[0], and operands[1] must be zero. */
4263 if (!rtx_equal_p (op1, const0_rtx))
4265 #if DEBUG_CMP
4266 fprintf(stderr, "operands[1] not const0_rtx\n");
4267 #endif
4268 return false;
4270 if (GET_CODE (pp) != SET)
4272 #if DEBUG_CMP
4273 fprintf (stderr, "pp not set\n");
4274 #endif
4275 return false;
4277 if (!rtx_equal_p (op0, SET_SRC (pp))
4278 && !rtx_equal_p (op0, SET_DEST (pp)))
4280 #if DEBUG_CMP
4281 fprintf(stderr, "operands[0] not found in set\n");
4282 #endif
4283 return false;
4286 #if DEBUG_CMP
4287 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4288 #endif
4289 if ((pflags & flags_needed) == flags_needed)
4290 return true;
4292 return false;
4295 /* Return the pattern for a compare. This will be commented out if
4296 the compare is redundant, else a normal pattern is returned. Thus,
4297 the assembler output says where the compare would have been. */
4298 char *
4299 m32c_output_compare (rtx insn, rtx *operands)
4301 static char template[] = ";cmp.b\t%1,%0";
4302 /* ^ 5 */
4304 template[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4305 if (m32c_compare_redundant (insn, operands))
4307 #if DEBUG_CMP
4308 fprintf(stderr, "cbranch: cmp not needed\n");
4309 #endif
4310 return template;
4313 #if DEBUG_CMP
4314 fprintf(stderr, "cbranch: cmp needed: `%s'\n", template);
4315 #endif
4316 return template + 1;
4319 #undef TARGET_ENCODE_SECTION_INFO
4320 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4322 /* The Global `targetm' Variable. */
4324 struct gcc_target targetm = TARGET_INITIALIZER;
4326 #include "gt-m32c.h"