2009-05-14 Paolo Bonzini <bonzini@gnu.org>
[official-gcc.git] / gcc / config / m32c / m32c.c
blob989f823df92192a2f67d58316358167e1e457b5f
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005, 2006, 2007, 2008, 2009
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "flags.h"
36 #include "recog.h"
37 #include "reload.h"
38 #include "toplev.h"
39 #include "obstack.h"
40 #include "tree.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "except.h"
44 #include "function.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "tm_p.h"
49 #include "langhooks.h"
50 #include "gimple.h"
51 #include "df.h"
53 /* Prototypes */
55 /* Used by m32c_pushm_popm. */
56 typedef enum
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61 } Push_Pop_Type;
63 static tree interrupt_handler (tree *, tree, tree, int, bool *);
64 static tree function_vector_handler (tree *, tree, tree, int, bool *);
65 static int interrupt_p (tree node);
66 static bool m32c_asm_integer (rtx, unsigned int, int);
67 static int m32c_comp_type_attributes (const_tree, const_tree);
68 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
69 static struct machine_function *m32c_init_machine_status (void);
70 static void m32c_insert_attributes (tree, tree *);
71 static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
72 static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
73 const_tree, bool);
74 static bool m32c_promote_prototypes (const_tree);
75 static int m32c_pushm_popm (Push_Pop_Type);
76 static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
77 static rtx m32c_struct_value_rtx (tree, int);
78 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
79 static int need_to_save (int);
80 int current_function_special_page_vector (rtx);
82 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
84 #define streq(a,b) (strcmp ((a), (b)) == 0)
86 /* Internal support routines */
88 /* Debugging statements are tagged with DEBUG0 only so that they can
89 be easily enabled individually, by replacing the '0' with '1' as
90 needed. */
91 #define DEBUG0 0
92 #define DEBUG1 1
94 #if DEBUG0
95 /* This is needed by some of the commented-out debug statements
96 below. */
97 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
98 #endif
99 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
101 /* These are all to support encode_pattern(). */
102 static char pattern[30], *patternp;
103 static GTY(()) rtx patternr[30];
104 #define RTX_IS(x) (streq (pattern, x))
106 /* Some macros to simplify the logic throughout this file. */
107 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
108 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
110 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
111 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
113 /* We do most RTX matching by converting the RTX into a string, and
114 using string compares. This vastly simplifies the logic in many of
115 the functions in this file.
117 On exit, pattern[] has the encoded string (use RTX_IS("...") to
118 compare it) and patternr[] has pointers to the nodes in the RTX
119 corresponding to each character in the encoded string. The latter
120 is mostly used by print_operand().
122 Unrecognized patterns have '?' in them; this shows up when the
123 assembler complains about syntax errors.
126 static void
127 encode_pattern_1 (rtx x)
129 int i;
131 if (patternp == pattern + sizeof (pattern) - 2)
133 patternp[-1] = '?';
134 return;
137 patternr[patternp - pattern] = x;
139 switch (GET_CODE (x))
141 case REG:
142 *patternp++ = 'r';
143 break;
144 case SUBREG:
145 if (GET_MODE_SIZE (GET_MODE (x)) !=
146 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
147 *patternp++ = 'S';
148 encode_pattern_1 (XEXP (x, 0));
149 break;
150 case MEM:
151 *patternp++ = 'm';
152 case CONST:
153 encode_pattern_1 (XEXP (x, 0));
154 break;
155 case PLUS:
156 *patternp++ = '+';
157 encode_pattern_1 (XEXP (x, 0));
158 encode_pattern_1 (XEXP (x, 1));
159 break;
160 case PRE_DEC:
161 *patternp++ = '>';
162 encode_pattern_1 (XEXP (x, 0));
163 break;
164 case POST_INC:
165 *patternp++ = '<';
166 encode_pattern_1 (XEXP (x, 0));
167 break;
168 case LO_SUM:
169 *patternp++ = 'L';
170 encode_pattern_1 (XEXP (x, 0));
171 encode_pattern_1 (XEXP (x, 1));
172 break;
173 case HIGH:
174 *patternp++ = 'H';
175 encode_pattern_1 (XEXP (x, 0));
176 break;
177 case SYMBOL_REF:
178 *patternp++ = 's';
179 break;
180 case LABEL_REF:
181 *patternp++ = 'l';
182 break;
183 case CODE_LABEL:
184 *patternp++ = 'c';
185 break;
186 case CONST_INT:
187 case CONST_DOUBLE:
188 *patternp++ = 'i';
189 break;
190 case UNSPEC:
191 *patternp++ = 'u';
192 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
193 for (i = 0; i < XVECLEN (x, 0); i++)
194 encode_pattern_1 (XVECEXP (x, 0, i));
195 break;
196 case USE:
197 *patternp++ = 'U';
198 break;
199 case PARALLEL:
200 *patternp++ = '|';
201 for (i = 0; i < XVECLEN (x, 0); i++)
202 encode_pattern_1 (XVECEXP (x, 0, i));
203 break;
204 case EXPR_LIST:
205 *patternp++ = 'E';
206 encode_pattern_1 (XEXP (x, 0));
207 if (XEXP (x, 1))
208 encode_pattern_1 (XEXP (x, 1));
209 break;
210 default:
211 *patternp++ = '?';
212 #if DEBUG0
213 fprintf (stderr, "can't encode pattern %s\n",
214 GET_RTX_NAME (GET_CODE (x)));
215 debug_rtx (x);
216 gcc_unreachable ();
217 #endif
218 break;
222 static void
223 encode_pattern (rtx x)
225 patternp = pattern;
226 encode_pattern_1 (x);
227 *patternp = 0;
230 /* Since register names indicate the mode they're used in, we need a
231 way to determine which name to refer to the register with. Called
232 by print_operand(). */
234 static const char *
235 reg_name_with_mode (int regno, enum machine_mode mode)
237 int mlen = GET_MODE_SIZE (mode);
238 if (regno == R0_REGNO && mlen == 1)
239 return "r0l";
240 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
241 return "r2r0";
242 if (regno == R0_REGNO && mlen == 6)
243 return "r2r1r0";
244 if (regno == R0_REGNO && mlen == 8)
245 return "r3r1r2r0";
246 if (regno == R1_REGNO && mlen == 1)
247 return "r1l";
248 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
249 return "r3r1";
250 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
251 return "a1a0";
252 return reg_names[regno];
255 /* How many bytes a register uses on stack when it's pushed. We need
256 to know this because the push opcode needs to explicitly indicate
257 the size of the register, even though the name of the register
258 already tells it that. Used by m32c_output_reg_{push,pop}, which
259 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
261 static int
262 reg_push_size (int regno)
264 switch (regno)
266 case R0_REGNO:
267 case R1_REGNO:
268 return 2;
269 case R2_REGNO:
270 case R3_REGNO:
271 case FLG_REGNO:
272 return 2;
273 case A0_REGNO:
274 case A1_REGNO:
275 case SB_REGNO:
276 case FB_REGNO:
277 case SP_REGNO:
278 if (TARGET_A16)
279 return 2;
280 else
281 return 3;
282 default:
283 gcc_unreachable ();
287 static int *class_sizes = 0;
289 /* Given two register classes, find the largest intersection between
290 them. If there is no intersection, return RETURNED_IF_EMPTY
291 instead. */
292 static int
293 reduce_class (int original_class, int limiting_class, int returned_if_empty)
295 int cc = class_contents[original_class][0];
296 int i, best = NO_REGS;
297 int best_size = 0;
299 if (original_class == limiting_class)
300 return original_class;
302 if (!class_sizes)
304 int r;
305 class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
306 for (i = 0; i < LIM_REG_CLASSES; i++)
308 class_sizes[i] = 0;
309 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
310 if (class_contents[i][0] & (1 << r))
311 class_sizes[i]++;
315 cc &= class_contents[limiting_class][0];
316 for (i = 0; i < LIM_REG_CLASSES; i++)
318 int ic = class_contents[i][0];
320 if ((~cc & ic) == 0)
321 if (best_size < class_sizes[i])
323 best = i;
324 best_size = class_sizes[i];
328 if (best == NO_REGS)
329 return returned_if_empty;
330 return best;
333 /* Returns TRUE If there are any registers that exist in both register
334 classes. */
335 static int
336 classes_intersect (int class1, int class2)
338 return class_contents[class1][0] & class_contents[class2][0];
341 /* Used by m32c_register_move_cost to determine if a move is
342 impossibly expensive. */
343 static int
344 class_can_hold_mode (int rclass, enum machine_mode mode)
346 /* Cache the results: 0=untested 1=no 2=yes */
347 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
348 if (results[rclass][mode] == 0)
350 int r, n, i;
351 results[rclass][mode] = 1;
352 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
353 if (class_contents[rclass][0] & (1 << r)
354 && HARD_REGNO_MODE_OK (r, mode))
356 int ok = 1;
357 n = HARD_REGNO_NREGS (r, mode);
358 for (i = 1; i < n; i++)
359 if (!(class_contents[rclass][0] & (1 << (r + i))))
360 ok = 0;
361 if (ok)
363 results[rclass][mode] = 2;
364 break;
368 #if DEBUG0
369 fprintf (stderr, "class %s can hold %s? %s\n",
370 class_names[rclass], mode_name[mode],
371 (results[rclass][mode] == 2) ? "yes" : "no");
372 #endif
373 return results[rclass][mode] == 2;
376 /* Run-time Target Specification. */
378 /* Memregs are memory locations that gcc treats like general
379 registers, as there are a limited number of true registers and the
380 m32c families can use memory in most places that registers can be
381 used.
383 However, since memory accesses are more expensive than registers,
384 we allow the user to limit the number of memregs available, in
385 order to try to persuade gcc to try harder to use real registers.
387 Memregs are provided by m32c-lib1.S.
390 int target_memregs = 16;
391 static bool target_memregs_set = FALSE;
392 int ok_to_change_target_memregs = TRUE;
394 #undef TARGET_HANDLE_OPTION
395 #define TARGET_HANDLE_OPTION m32c_handle_option
396 static bool
397 m32c_handle_option (size_t code,
398 const char *arg ATTRIBUTE_UNUSED,
399 int value ATTRIBUTE_UNUSED)
401 if (code == OPT_memregs_)
403 target_memregs_set = TRUE;
404 target_memregs = atoi (arg);
406 return TRUE;
409 /* Implements OVERRIDE_OPTIONS. We limit memregs to 0..16, and
410 provide a default. */
411 void
412 m32c_override_options (void)
414 if (target_memregs_set)
416 if (target_memregs < 0 || target_memregs > 16)
417 error ("invalid target memregs value '%d'", target_memregs);
419 else
420 target_memregs = 16;
423 /* Defining data structures for per-function information */
425 /* The usual; we set up our machine_function data. */
426 static struct machine_function *
427 m32c_init_machine_status (void)
429 struct machine_function *machine;
430 machine =
431 (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
433 return machine;
436 /* Implements INIT_EXPANDERS. We just set up to call the above
437 function. */
438 void
439 m32c_init_expanders (void)
441 init_machine_status = m32c_init_machine_status;
444 /* Storage Layout */
446 /* Register Basics */
448 /* Basic Characteristics of Registers */
450 /* Whether a mode fits in a register is complex enough to warrant a
451 table. */
452 static struct
454 char qi_regs;
455 char hi_regs;
456 char pi_regs;
457 char si_regs;
458 char di_regs;
459 } nregs_table[FIRST_PSEUDO_REGISTER] =
461 { 1, 1, 2, 2, 4 }, /* r0 */
462 { 0, 1, 0, 0, 0 }, /* r2 */
463 { 1, 1, 2, 2, 0 }, /* r1 */
464 { 0, 1, 0, 0, 0 }, /* r3 */
465 { 0, 1, 1, 0, 0 }, /* a0 */
466 { 0, 1, 1, 0, 0 }, /* a1 */
467 { 0, 1, 1, 0, 0 }, /* sb */
468 { 0, 1, 1, 0, 0 }, /* fb */
469 { 0, 1, 1, 0, 0 }, /* sp */
470 { 1, 1, 1, 0, 0 }, /* pc */
471 { 0, 0, 0, 0, 0 }, /* fl */
472 { 1, 1, 1, 0, 0 }, /* ap */
473 { 1, 1, 2, 2, 4 }, /* mem0 */
474 { 1, 1, 2, 2, 4 }, /* mem1 */
475 { 1, 1, 2, 2, 4 }, /* mem2 */
476 { 1, 1, 2, 2, 4 }, /* mem3 */
477 { 1, 1, 2, 2, 4 }, /* mem4 */
478 { 1, 1, 2, 2, 0 }, /* mem5 */
479 { 1, 1, 2, 2, 0 }, /* mem6 */
480 { 1, 1, 0, 0, 0 }, /* mem7 */
483 /* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
484 available memregs, and select which registers need to be preserved
485 across calls based on the chip family. */
487 void
488 m32c_conditional_register_usage (void)
490 int i;
492 if (0 <= target_memregs && target_memregs <= 16)
494 /* The command line option is bytes, but our "registers" are
495 16-bit words. */
496 for (i = target_memregs/2; i < 8; i++)
498 fixed_regs[MEM0_REGNO + i] = 1;
499 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
503 /* M32CM and M32C preserve more registers across function calls. */
504 if (TARGET_A24)
506 call_used_regs[R1_REGNO] = 0;
507 call_used_regs[R2_REGNO] = 0;
508 call_used_regs[R3_REGNO] = 0;
509 call_used_regs[A0_REGNO] = 0;
510 call_used_regs[A1_REGNO] = 0;
514 /* How Values Fit in Registers */
516 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
517 different registers are different sizes from each other, *and* may
518 be different sizes in different chip families. */
519 static int
520 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
522 if (regno == FLG_REGNO && mode == CCmode)
523 return 1;
524 if (regno >= FIRST_PSEUDO_REGISTER)
525 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
527 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
528 return (GET_MODE_SIZE (mode) + 1) / 2;
530 if (GET_MODE_SIZE (mode) <= 1)
531 return nregs_table[regno].qi_regs;
532 if (GET_MODE_SIZE (mode) <= 2)
533 return nregs_table[regno].hi_regs;
534 if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
535 return 2;
536 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
537 return nregs_table[regno].pi_regs;
538 if (GET_MODE_SIZE (mode) <= 4)
539 return nregs_table[regno].si_regs;
540 if (GET_MODE_SIZE (mode) <= 8)
541 return nregs_table[regno].di_regs;
542 return 0;
546 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
548 int rv = m32c_hard_regno_nregs_1 (regno, mode);
549 return rv ? rv : 1;
552 /* Implements HARD_REGNO_MODE_OK. The above function does the work
553 already; just test its return value. */
555 m32c_hard_regno_ok (int regno, enum machine_mode mode)
557 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
560 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
561 registers are all different sizes. However, since most modes are
562 bigger than our registers anyway, it's easier to implement this
563 function that way, leaving QImode as the only unique case. */
565 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
567 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
568 return 1;
570 #if 0
571 if (m1 == QImode || m2 == QImode)
572 return 0;
573 #endif
575 return 1;
578 /* Register Classes */
580 /* Implements REGNO_REG_CLASS. */
581 enum machine_mode
582 m32c_regno_reg_class (int regno)
584 switch (regno)
586 case R0_REGNO:
587 return R0_REGS;
588 case R1_REGNO:
589 return R1_REGS;
590 case R2_REGNO:
591 return R2_REGS;
592 case R3_REGNO:
593 return R3_REGS;
594 case A0_REGNO:
595 case A1_REGNO:
596 return A_REGS;
597 case SB_REGNO:
598 return SB_REGS;
599 case FB_REGNO:
600 return FB_REGS;
601 case SP_REGNO:
602 return SP_REGS;
603 case FLG_REGNO:
604 return FLG_REGS;
605 default:
606 if (IS_MEM_REGNO (regno))
607 return MEM_REGS;
608 return ALL_REGS;
612 /* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
613 for certain chip families. */
615 m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
617 if (memcmp (s, "Rsp", 3) == 0)
618 return SP_REGS;
619 if (memcmp (s, "Rfb", 3) == 0)
620 return FB_REGS;
621 if (memcmp (s, "Rsb", 3) == 0)
622 return SB_REGS;
623 if (memcmp (s, "Rcr", 3) == 0)
624 return TARGET_A16 ? CR_REGS : NO_REGS;
625 if (memcmp (s, "Rcl", 3) == 0)
626 return TARGET_A24 ? CR_REGS : NO_REGS;
627 if (memcmp (s, "R0w", 3) == 0)
628 return R0_REGS;
629 if (memcmp (s, "R1w", 3) == 0)
630 return R1_REGS;
631 if (memcmp (s, "R2w", 3) == 0)
632 return R2_REGS;
633 if (memcmp (s, "R3w", 3) == 0)
634 return R3_REGS;
635 if (memcmp (s, "R02", 3) == 0)
636 return R02_REGS;
637 if (memcmp (s, "R03", 3) == 0)
638 return R03_REGS;
639 if (memcmp (s, "Rdi", 3) == 0)
640 return DI_REGS;
641 if (memcmp (s, "Rhl", 3) == 0)
642 return HL_REGS;
643 if (memcmp (s, "R23", 3) == 0)
644 return R23_REGS;
645 if (memcmp (s, "Ra0", 3) == 0)
646 return A0_REGS;
647 if (memcmp (s, "Ra1", 3) == 0)
648 return A1_REGS;
649 if (memcmp (s, "Raa", 3) == 0)
650 return A_REGS;
651 if (memcmp (s, "Raw", 3) == 0)
652 return TARGET_A16 ? A_REGS : NO_REGS;
653 if (memcmp (s, "Ral", 3) == 0)
654 return TARGET_A24 ? A_REGS : NO_REGS;
655 if (memcmp (s, "Rqi", 3) == 0)
656 return QI_REGS;
657 if (memcmp (s, "Rad", 3) == 0)
658 return AD_REGS;
659 if (memcmp (s, "Rsi", 3) == 0)
660 return SI_REGS;
661 if (memcmp (s, "Rhi", 3) == 0)
662 return HI_REGS;
663 if (memcmp (s, "Rhc", 3) == 0)
664 return HC_REGS;
665 if (memcmp (s, "Rra", 3) == 0)
666 return RA_REGS;
667 if (memcmp (s, "Rfl", 3) == 0)
668 return FLG_REGS;
669 if (memcmp (s, "Rmm", 3) == 0)
671 if (fixed_regs[MEM0_REGNO])
672 return NO_REGS;
673 return MEM_REGS;
676 /* PSImode registers - i.e. whatever can hold a pointer. */
677 if (memcmp (s, "Rpi", 3) == 0)
679 if (TARGET_A16)
680 return HI_REGS;
681 else
682 return RA_REGS; /* r2r0 and r3r1 can hold pointers. */
685 /* We handle this one as an EXTRA_CONSTRAINT. */
686 if (memcmp (s, "Rpa", 3) == 0)
687 return NO_REGS;
689 if (*s == 'R')
691 fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
692 gcc_unreachable();
695 return NO_REGS;
698 /* Implements REGNO_OK_FOR_BASE_P. */
700 m32c_regno_ok_for_base_p (int regno)
702 if (regno == A0_REGNO
703 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
704 return 1;
705 return 0;
708 #define DEBUG_RELOAD 0
710 /* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
711 registers of the appropriate size. */
713 m32c_preferred_reload_class (rtx x, int rclass)
715 int newclass = rclass;
717 #if DEBUG_RELOAD
718 fprintf (stderr, "\npreferred_reload_class for %s is ",
719 class_names[rclass]);
720 #endif
721 if (rclass == NO_REGS)
722 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
724 if (classes_intersect (rclass, CR_REGS))
726 switch (GET_MODE (x))
728 case QImode:
729 newclass = HL_REGS;
730 break;
731 default:
732 /* newclass = HI_REGS; */
733 break;
737 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
738 newclass = SI_REGS;
739 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
740 && ~class_contents[rclass][0] & 0x000f)
741 newclass = DI_REGS;
743 rclass = reduce_class (rclass, newclass, rclass);
745 if (GET_MODE (x) == QImode)
746 rclass = reduce_class (rclass, HL_REGS, rclass);
748 #if DEBUG_RELOAD
749 fprintf (stderr, "%s\n", class_names[rclass]);
750 debug_rtx (x);
752 if (GET_CODE (x) == MEM
753 && GET_CODE (XEXP (x, 0)) == PLUS
754 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
755 fprintf (stderr, "Glorm!\n");
756 #endif
757 return rclass;
760 /* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
762 m32c_preferred_output_reload_class (rtx x, int rclass)
764 return m32c_preferred_reload_class (x, rclass);
767 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
768 address registers for reloads since they're needed for address
769 reloads. */
771 m32c_limit_reload_class (enum machine_mode mode, int rclass)
773 #if DEBUG_RELOAD
774 fprintf (stderr, "limit_reload_class for %s: %s ->",
775 mode_name[mode], class_names[rclass]);
776 #endif
778 if (mode == QImode)
779 rclass = reduce_class (rclass, HL_REGS, rclass);
780 else if (mode == HImode)
781 rclass = reduce_class (rclass, HI_REGS, rclass);
782 else if (mode == SImode)
783 rclass = reduce_class (rclass, SI_REGS, rclass);
785 if (rclass != A_REGS)
786 rclass = reduce_class (rclass, DI_REGS, rclass);
788 #if DEBUG_RELOAD
789 fprintf (stderr, " %s\n", class_names[rclass]);
790 #endif
791 return rclass;
794 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
795 r0 or r1, as those are the only real QImode registers. CR regs get
796 reloaded through appropriately sized general or address
797 registers. */
799 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
801 int cc = class_contents[rclass][0];
802 #if DEBUG0
803 fprintf (stderr, "\nsecondary reload class %s %s\n",
804 class_names[rclass], mode_name[mode]);
805 debug_rtx (x);
806 #endif
807 if (mode == QImode
808 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
809 return QI_REGS;
810 if (classes_intersect (rclass, CR_REGS)
811 && GET_CODE (x) == REG
812 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
813 return TARGET_A16 ? HI_REGS : A_REGS;
814 return NO_REGS;
817 /* Implements CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
818 reloads. */
820 m32c_class_likely_spilled_p (int regclass)
822 if (regclass == A_REGS)
823 return 1;
824 return reg_class_size[regclass] == 1;
827 /* Implements CLASS_MAX_NREGS. We calculate this according to its
828 documented meaning, to avoid potential inconsistencies with actual
829 class definitions. */
831 m32c_class_max_nregs (int regclass, enum machine_mode mode)
833 int rn, max = 0;
835 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
836 if (class_contents[regclass][0] & (1 << rn))
838 int n = m32c_hard_regno_nregs (rn, mode);
839 if (max < n)
840 max = n;
842 return max;
845 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
846 QI (r0l, r1l) because the chip doesn't support QI ops on other
847 registers (well, it does on a0/a1 but if we let gcc do that, reload
848 suffers). Otherwise, we allow changes to larger modes. */
850 m32c_cannot_change_mode_class (enum machine_mode from,
851 enum machine_mode to, int rclass)
853 int rn;
854 #if DEBUG0
855 fprintf (stderr, "cannot change from %s to %s in %s\n",
856 mode_name[from], mode_name[to], class_names[rclass]);
857 #endif
859 /* If the larger mode isn't allowed in any of these registers, we
860 can't allow the change. */
861 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
862 if (class_contents[rclass][0] & (1 << rn))
863 if (! m32c_hard_regno_ok (rn, to))
864 return 1;
866 if (to == QImode)
867 return (class_contents[rclass][0] & 0x1ffa);
869 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
870 && GET_MODE_SIZE (from) > 1)
871 return 0;
872 if (GET_MODE_SIZE (from) > 2) /* all other regs */
873 return 0;
875 return 1;
878 /* Helpers for the rest of the file. */
879 /* TRUE if the rtx is a REG rtx for the given register. */
880 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
881 && REGNO (rtx) == regno)
882 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
883 base register in address calculations (hence the "strict"
884 argument). */
885 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
886 && (REGNO (rtx) == AP_REGNO \
887 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
889 /* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
890 constraints start with 'I', with the next two characters indicating
891 the type and size of the range allowed. */
893 m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
894 char c ATTRIBUTE_UNUSED, const char *str)
896 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
897 [sun] bits [SUN] bytes, p=pointer size
898 I[-0-9][0-9] matches that number */
899 if (memcmp (str, "Is3", 3) == 0)
901 return (-8 <= value && value <= 7);
903 if (memcmp (str, "IS1", 3) == 0)
905 return (-128 <= value && value <= 127);
907 if (memcmp (str, "IS2", 3) == 0)
909 return (-32768 <= value && value <= 32767);
911 if (memcmp (str, "IU2", 3) == 0)
913 return (0 <= value && value <= 65535);
915 if (memcmp (str, "IU3", 3) == 0)
917 return (0 <= value && value <= 0x00ffffff);
919 if (memcmp (str, "In4", 3) == 0)
921 return (-8 <= value && value && value <= 8);
923 if (memcmp (str, "In5", 3) == 0)
925 return (-16 <= value && value && value <= 16);
927 if (memcmp (str, "In6", 3) == 0)
929 return (-32 <= value && value && value <= 32);
931 if (memcmp (str, "IM2", 3) == 0)
933 return (-65536 <= value && value && value <= -1);
935 if (memcmp (str, "Ilb", 3) == 0)
937 int b = exact_log2 (value);
938 return (b >= 0 && b <= 7);
940 if (memcmp (str, "Imb", 3) == 0)
942 int b = exact_log2 ((value ^ 0xff) & 0xff);
943 return (b >= 0 && b <= 7);
945 if (memcmp (str, "ImB", 3) == 0)
947 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
948 return (b >= 0 && b <= 7);
950 if (memcmp (str, "Ilw", 3) == 0)
952 int b = exact_log2 (value);
953 return (b >= 0 && b <= 15);
955 if (memcmp (str, "Imw", 3) == 0)
957 int b = exact_log2 ((value ^ 0xffff) & 0xffff);
958 return (b >= 0 && b <= 15);
960 if (memcmp (str, "I00", 3) == 0)
962 return (value == 0);
964 return 0;
967 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
968 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
969 call return values. */
971 m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
973 encode_pattern (value);
974 if (memcmp (str, "Sd", 2) == 0)
976 /* This is the common "src/dest" address */
977 rtx r;
978 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
979 return 1;
980 if (RTX_IS ("ms") || RTX_IS ("m+si"))
981 return 1;
982 if (RTX_IS ("m++rii"))
984 if (REGNO (patternr[3]) == FB_REGNO
985 && INTVAL (patternr[4]) == 0)
986 return 1;
988 if (RTX_IS ("mr"))
989 r = patternr[1];
990 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
991 r = patternr[2];
992 else
993 return 0;
994 if (REGNO (r) == SP_REGNO)
995 return 0;
996 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
998 else if (memcmp (str, "Sa", 2) == 0)
1000 rtx r;
1001 if (RTX_IS ("mr"))
1002 r = patternr[1];
1003 else if (RTX_IS ("m+ri"))
1004 r = patternr[2];
1005 else
1006 return 0;
1007 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1009 else if (memcmp (str, "Si", 2) == 0)
1011 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1013 else if (memcmp (str, "Ss", 2) == 0)
1015 return ((RTX_IS ("mr")
1016 && (IS_REG (patternr[1], SP_REGNO)))
1017 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1019 else if (memcmp (str, "Sf", 2) == 0)
1021 return ((RTX_IS ("mr")
1022 && (IS_REG (patternr[1], FB_REGNO)))
1023 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1025 else if (memcmp (str, "Sb", 2) == 0)
1027 return ((RTX_IS ("mr")
1028 && (IS_REG (patternr[1], SB_REGNO)))
1029 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1031 else if (memcmp (str, "Sp", 2) == 0)
1033 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1034 return (RTX_IS ("mi")
1035 && !(INTVAL (patternr[1]) & ~0x1fff));
1037 else if (memcmp (str, "S1", 2) == 0)
1039 return r1h_operand (value, QImode);
1042 gcc_assert (str[0] != 'S');
1044 if (memcmp (str, "Rpa", 2) == 0)
1045 return GET_CODE (value) == PARALLEL;
1047 return 0;
1050 /* This is for when we're debugging the above. */
1052 m32c_extra_constraint_p (rtx value, char c, const char *str)
1054 int rv = m32c_extra_constraint_p2 (value, c, str);
1055 #if DEBUG0
1056 fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1057 rv);
1058 debug_rtx (value);
1059 #endif
1060 return rv;
1063 /* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1064 starting with 'S'. */
1066 m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1068 return c == 'S';
1071 /* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1072 but don't currently define any. */
1074 m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1076 return c == 'A';
1079 /* STACK AND CALLING */
1081 /* Frame Layout */
1083 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1084 (yes, THREE bytes) onto the stack for the return address, but we
1085 don't support pointers bigger than 16 bits on those chips. This
1086 will likely wreak havoc with exception unwinding. FIXME. */
1088 m32c_return_addr_rtx (int count)
1090 enum machine_mode mode;
1091 int offset;
1092 rtx ra_mem;
1094 if (count)
1095 return NULL_RTX;
1096 /* we want 2[$fb] */
1098 if (TARGET_A24)
1100 /* It's four bytes */
1101 mode = PSImode;
1102 offset = 4;
1104 else
1106 /* FIXME: it's really 3 bytes */
1107 mode = HImode;
1108 offset = 2;
1111 ra_mem =
1112 gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1113 return copy_to_mode_reg (mode, ra_mem);
1116 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1118 m32c_incoming_return_addr_rtx (void)
1120 /* we want [sp] */
1121 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1124 /* Exception Handling Support */
1126 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1127 pointers. */
1129 m32c_eh_return_data_regno (int n)
1131 switch (n)
1133 case 0:
1134 return A0_REGNO;
1135 case 1:
1136 if (TARGET_A16)
1137 return R3_REGNO;
1138 else
1139 return R1_REGNO;
1140 default:
1141 return INVALID_REGNUM;
1145 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1146 m32c_emit_eh_epilogue. */
1148 m32c_eh_return_stackadj_rtx (void)
1150 if (!cfun->machine->eh_stack_adjust)
1152 rtx sa;
1154 sa = gen_rtx_REG (Pmode, R0_REGNO);
1155 cfun->machine->eh_stack_adjust = sa;
1157 return cfun->machine->eh_stack_adjust;
1160 /* Registers That Address the Stack Frame */
1162 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1163 the original spec called for dwarf numbers to vary with register
1164 width as well, for example, r0l, r0, and r2r0 would each have
1165 different dwarf numbers. GCC doesn't support this, and we don't do
1166 it, and gdb seems to like it this way anyway. */
1167 unsigned int
1168 m32c_dwarf_frame_regnum (int n)
1170 switch (n)
1172 case R0_REGNO:
1173 return 5;
1174 case R1_REGNO:
1175 return 6;
1176 case R2_REGNO:
1177 return 7;
1178 case R3_REGNO:
1179 return 8;
1180 case A0_REGNO:
1181 return 9;
1182 case A1_REGNO:
1183 return 10;
1184 case FB_REGNO:
1185 return 11;
1186 case SB_REGNO:
1187 return 19;
1189 case SP_REGNO:
1190 return 12;
1191 case PC_REGNO:
1192 return 13;
1193 default:
1194 return DWARF_FRAME_REGISTERS + 1;
1198 /* The frame looks like this:
1200 ap -> +------------------------------
1201 | Return address (3 or 4 bytes)
1202 | Saved FB (2 or 4 bytes)
1203 fb -> +------------------------------
1204 | local vars
1205 | register saves fb
1206 | through r0 as needed
1207 sp -> +------------------------------
1210 /* We use this to wrap all emitted insns in the prologue. */
1211 static rtx
1212 F (rtx x)
1214 RTX_FRAME_RELATED_P (x) = 1;
1215 return x;
1218 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1219 how much the stack pointer moves for each, for each cpu family. */
1220 static struct
1222 int reg1;
1223 int bit;
1224 int a16_bytes;
1225 int a24_bytes;
1226 } pushm_info[] =
1228 /* These are in reverse push (nearest-to-sp) order. */
1229 { R0_REGNO, 0x80, 2, 2 },
1230 { R1_REGNO, 0x40, 2, 2 },
1231 { R2_REGNO, 0x20, 2, 2 },
1232 { R3_REGNO, 0x10, 2, 2 },
1233 { A0_REGNO, 0x08, 2, 4 },
1234 { A1_REGNO, 0x04, 2, 4 },
1235 { SB_REGNO, 0x02, 2, 4 },
1236 { FB_REGNO, 0x01, 2, 4 }
1239 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1241 /* Returns TRUE if we need to save/restore the given register. We
1242 save everything for exception handlers, so that any register can be
1243 unwound. For interrupt handlers, we save everything if the handler
1244 calls something else (because we don't know what *that* function
1245 might do), but try to be a bit smarter if the handler is a leaf
1246 function. We always save $a0, though, because we use that in the
1247 epilogue to copy $fb to $sp. */
1248 static int
1249 need_to_save (int regno)
1251 if (fixed_regs[regno])
1252 return 0;
1253 if (crtl->calls_eh_return)
1254 return 1;
1255 if (regno == FP_REGNO)
1256 return 0;
1257 if (cfun->machine->is_interrupt
1258 && (!cfun->machine->is_leaf || regno == A0_REGNO))
1259 return 1;
1260 if (df_regs_ever_live_p (regno)
1261 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1262 return 1;
1263 return 0;
1266 /* This function contains all the intelligence about saving and
1267 restoring registers. It always figures out the register save set.
1268 When called with PP_justcount, it merely returns the size of the
1269 save set (for eliminating the frame pointer, for example). When
1270 called with PP_pushm or PP_popm, it emits the appropriate
1271 instructions for saving (pushm) or restoring (popm) the
1272 registers. */
1273 static int
1274 m32c_pushm_popm (Push_Pop_Type ppt)
1276 int reg_mask = 0;
1277 int byte_count = 0, bytes;
1278 int i;
1279 rtx dwarf_set[PUSHM_N];
1280 int n_dwarfs = 0;
1281 int nosave_mask = 0;
1283 if (crtl->return_rtx
1284 && GET_CODE (crtl->return_rtx) == PARALLEL
1285 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1287 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1288 rtx rv = XEXP (exp, 0);
1289 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1291 if (rv_bytes > 2)
1292 nosave_mask |= 0x20; /* PSI, SI */
1293 else
1294 nosave_mask |= 0xf0; /* DF */
1295 if (rv_bytes > 4)
1296 nosave_mask |= 0x50; /* DI */
1299 for (i = 0; i < (int) PUSHM_N; i++)
1301 /* Skip if neither register needs saving. */
1302 if (!need_to_save (pushm_info[i].reg1))
1303 continue;
1305 if (pushm_info[i].bit & nosave_mask)
1306 continue;
1308 reg_mask |= pushm_info[i].bit;
1309 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1311 if (ppt == PP_pushm)
1313 enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1314 rtx addr;
1316 /* Always use stack_pointer_rtx instead of calling
1317 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1318 that there is a single rtx representing the stack pointer,
1319 namely stack_pointer_rtx, and uses == to recognize it. */
1320 addr = stack_pointer_rtx;
1322 if (byte_count != 0)
1323 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1325 dwarf_set[n_dwarfs++] =
1326 gen_rtx_SET (VOIDmode,
1327 gen_rtx_MEM (mode, addr),
1328 gen_rtx_REG (mode, pushm_info[i].reg1));
1329 F (dwarf_set[n_dwarfs - 1]);
1332 byte_count += bytes;
1335 if (cfun->machine->is_interrupt)
1337 cfun->machine->intr_pushm = reg_mask & 0xfe;
1338 reg_mask = 0;
1339 byte_count = 0;
1342 if (cfun->machine->is_interrupt)
1343 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1344 if (need_to_save (i))
1346 byte_count += 2;
1347 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1350 if (ppt == PP_pushm && byte_count)
1352 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1353 rtx pushm;
1355 if (reg_mask)
1357 XVECEXP (note, 0, 0)
1358 = gen_rtx_SET (VOIDmode,
1359 stack_pointer_rtx,
1360 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1361 stack_pointer_rtx,
1362 GEN_INT (-byte_count)));
1363 F (XVECEXP (note, 0, 0));
1365 for (i = 0; i < n_dwarfs; i++)
1366 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1368 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1370 REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1371 REG_NOTES (pushm));
1374 if (cfun->machine->is_interrupt)
1375 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1376 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1378 if (TARGET_A16)
1379 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1380 else
1381 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1382 F (pushm);
1385 if (ppt == PP_popm && byte_count)
1387 if (cfun->machine->is_interrupt)
1388 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1389 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1391 if (TARGET_A16)
1392 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1393 else
1394 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1396 if (reg_mask)
1397 emit_insn (gen_popm (GEN_INT (reg_mask)));
1400 return byte_count;
1403 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1404 diagrams our call frame. */
1406 m32c_initial_elimination_offset (int from, int to)
1408 int ofs = 0;
1410 if (from == AP_REGNO)
1412 if (TARGET_A16)
1413 ofs += 5;
1414 else
1415 ofs += 8;
1418 if (to == SP_REGNO)
1420 ofs += m32c_pushm_popm (PP_justcount);
1421 ofs += get_frame_size ();
1424 /* Account for push rounding. */
1425 if (TARGET_A24)
1426 ofs = (ofs + 1) & ~1;
1427 #if DEBUG0
1428 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1429 to, ofs);
1430 #endif
1431 return ofs;
1434 /* Passing Function Arguments on the Stack */
1436 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1437 M32C has word stacks. */
1439 m32c_push_rounding (int n)
1441 if (TARGET_R8C || TARGET_M16C)
1442 return n;
1443 return (n + 1) & ~1;
1446 /* Passing Arguments in Registers */
1448 /* Implements FUNCTION_ARG. Arguments are passed partly in registers,
1449 partly on stack. If our function returns a struct, a pointer to a
1450 buffer for it is at the top of the stack (last thing pushed). The
1451 first few real arguments may be in registers as follows:
1453 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1454 arg2 in r2 if it's HI (else pushed on stack)
1455 rest on stack
1456 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1457 rest on stack
1459 Structs are not passed in registers, even if they fit. Only
1460 integer and pointer types are passed in registers.
1462 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1463 r2 if it fits. */
1465 m32c_function_arg (CUMULATIVE_ARGS * ca,
1466 enum machine_mode mode, tree type, int named)
1468 /* Can return a reg, parallel, or 0 for stack */
1469 rtx rv = NULL_RTX;
1470 #if DEBUG0
1471 fprintf (stderr, "func_arg %d (%s, %d)\n",
1472 ca->parm_num, mode_name[mode], named);
1473 debug_tree (type);
1474 #endif
1476 if (mode == VOIDmode)
1477 return GEN_INT (0);
1479 if (ca->force_mem || !named)
1481 #if DEBUG0
1482 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1483 named);
1484 #endif
1485 return NULL_RTX;
1488 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1489 return NULL_RTX;
1491 if (type && AGGREGATE_TYPE_P (type))
1492 return NULL_RTX;
1494 switch (ca->parm_num)
1496 case 1:
1497 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1498 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1499 break;
1501 case 2:
1502 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1503 rv = gen_rtx_REG (mode, R2_REGNO);
1504 break;
1507 #if DEBUG0
1508 debug_rtx (rv);
1509 #endif
1510 return rv;
1513 #undef TARGET_PASS_BY_REFERENCE
1514 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1515 static bool
1516 m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1517 enum machine_mode mode ATTRIBUTE_UNUSED,
1518 const_tree type ATTRIBUTE_UNUSED,
1519 bool named ATTRIBUTE_UNUSED)
1521 return 0;
1524 /* Implements INIT_CUMULATIVE_ARGS. */
1525 void
1526 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1527 tree fntype,
1528 rtx libname ATTRIBUTE_UNUSED,
1529 tree fndecl,
1530 int n_named_args ATTRIBUTE_UNUSED)
1532 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1533 ca->force_mem = 1;
1534 else
1535 ca->force_mem = 0;
1536 ca->parm_num = 1;
1539 /* Implements FUNCTION_ARG_ADVANCE. force_mem is set for functions
1540 returning structures, so we always reset that. Otherwise, we only
1541 need to know the sequence number of the argument to know what to do
1542 with it. */
1543 void
1544 m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1545 enum machine_mode mode ATTRIBUTE_UNUSED,
1546 tree type ATTRIBUTE_UNUSED,
1547 int named ATTRIBUTE_UNUSED)
1549 if (ca->force_mem)
1550 ca->force_mem = 0;
1551 else
1552 ca->parm_num++;
1555 /* Implements FUNCTION_ARG_REGNO_P. */
1557 m32c_function_arg_regno_p (int r)
1559 if (TARGET_A24)
1560 return (r == R0_REGNO);
1561 return (r == R1_REGNO || r == R2_REGNO);
1564 /* HImode and PSImode are the two "native" modes as far as GCC is
1565 concerned, but the chips also support a 32-bit mode which is used
1566 for some opcodes in R8C/M16C and for reset vectors and such. */
1567 #undef TARGET_VALID_POINTER_MODE
1568 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1569 static bool
1570 m32c_valid_pointer_mode (enum machine_mode mode)
1572 if (mode == HImode
1573 || mode == PSImode
1574 || mode == SImode
1576 return 1;
1577 return 0;
1580 /* How Scalar Function Values Are Returned */
1582 /* Implements LIBCALL_VALUE. Most values are returned in $r0, or some
1583 combination of registers starting there (r2r0 for longs, r3r1r2r0
1584 for long long, r3r2r1r0 for doubles), except that that ABI
1585 currently doesn't work because it ends up using all available
1586 general registers and gcc often can't compile it. So, instead, we
1587 return anything bigger than 16 bits in "mem0" (effectively, a
1588 memory location). */
1590 m32c_libcall_value (enum machine_mode mode)
1592 /* return reg or parallel */
1593 #if 0
1594 /* FIXME: GCC has difficulty returning large values in registers,
1595 because that ties up most of the general registers and gives the
1596 register allocator little to work with. Until we can resolve
1597 this, large values are returned in memory. */
1598 if (mode == DFmode)
1600 rtx rv;
1602 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1603 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1604 gen_rtx_REG (HImode,
1605 R0_REGNO),
1606 GEN_INT (0));
1607 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1608 gen_rtx_REG (HImode,
1609 R1_REGNO),
1610 GEN_INT (2));
1611 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1612 gen_rtx_REG (HImode,
1613 R2_REGNO),
1614 GEN_INT (4));
1615 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1616 gen_rtx_REG (HImode,
1617 R3_REGNO),
1618 GEN_INT (6));
1619 return rv;
1622 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1624 rtx rv;
1626 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1627 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1628 gen_rtx_REG (mode,
1629 R0_REGNO),
1630 GEN_INT (0));
1631 return rv;
1633 #endif
1635 if (GET_MODE_SIZE (mode) > 2)
1636 return gen_rtx_REG (mode, MEM0_REGNO);
1637 return gen_rtx_REG (mode, R0_REGNO);
1640 /* Implements FUNCTION_VALUE. Functions and libcalls have the same
1641 conventions. */
1643 m32c_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
1645 /* return reg or parallel */
1646 const enum machine_mode mode = TYPE_MODE (valtype);
1647 return m32c_libcall_value (mode);
1650 /* How Large Values Are Returned */
1652 /* We return structures by pushing the address on the stack, even if
1653 we use registers for the first few "real" arguments. */
1654 #undef TARGET_STRUCT_VALUE_RTX
1655 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1656 static rtx
1657 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1658 int incoming ATTRIBUTE_UNUSED)
1660 return 0;
1663 /* Function Entry and Exit */
1665 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1667 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1669 if (cfun->machine->is_interrupt)
1670 return 1;
1671 return 0;
1674 /* Implementing the Varargs Macros */
1676 #undef TARGET_STRICT_ARGUMENT_NAMING
1677 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1678 static bool
1679 m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1681 return 1;
1684 /* Trampolines for Nested Functions */
1687 m16c:
1688 1 0000 75C43412 mov.w #0x1234,a0
1689 2 0004 FC000000 jmp.a label
1691 m32c:
1692 1 0000 BC563412 mov.l:s #0x123456,a0
1693 2 0004 CC000000 jmp.a label
1696 /* Implements TRAMPOLINE_SIZE. */
1698 m32c_trampoline_size (void)
1700 /* Allocate extra space so we can avoid the messy shifts when we
1701 initialize the trampoline; we just write past the end of the
1702 opcode. */
1703 return TARGET_A16 ? 8 : 10;
1706 /* Implements TRAMPOLINE_ALIGNMENT. */
1708 m32c_trampoline_alignment (void)
1710 return 2;
1713 /* Implements INITIALIZE_TRAMPOLINE. */
1714 void
1715 m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1717 #define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1718 if (TARGET_A16)
1720 /* Note: we subtract a "word" because the moves want signed
1721 constants, not unsigned constants. */
1722 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1723 emit_move_insn (A0 (HImode, 2), chainval);
1724 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1725 /* We use 16-bit addresses here, but store the zero to turn it
1726 into a 24-bit offset. */
1727 emit_move_insn (A0 (HImode, 5), function);
1728 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1730 else
1732 /* Note that the PSI moves actually write 4 bytes. Make sure we
1733 write stuff out in the right order, and leave room for the
1734 extra byte at the end. */
1735 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1736 emit_move_insn (A0 (PSImode, 1), chainval);
1737 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1738 emit_move_insn (A0 (PSImode, 5), function);
1740 #undef A0
1743 /* Implicit Calls to Library Routines */
1745 #undef TARGET_INIT_LIBFUNCS
1746 #define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1747 static void
1748 m32c_init_libfuncs (void)
1750 /* We do this because the M32C has an HImode operand, but the
1751 M16C has an 8-bit operand. Since gcc looks at the match data
1752 and not the expanded rtl, we have to reset the optab so that
1753 the right modes are found. */
1754 if (TARGET_A24)
1756 optab_handler (cstore_optab, QImode)->insn_code = CODE_FOR_cstoreqi4_24;
1757 optab_handler (cstore_optab, HImode)->insn_code = CODE_FOR_cstorehi4_24;
1758 optab_handler (cstore_optab, PSImode)->insn_code = CODE_FOR_cstorepsi4_24;
1762 /* Addressing Modes */
1764 /* The r8c/m32c family supports a wide range of non-orthogonal
1765 addressing modes, including the ability to double-indirect on *some*
1766 of them. Not all insns support all modes, either, but we rely on
1767 predicates and constraints to deal with that. */
1768 #undef TARGET_LEGITIMATE_ADDRESS_P
1769 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1770 bool
1771 m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1773 int mode_adjust;
1774 if (CONSTANT_P (x))
1775 return 1;
1777 /* Wide references to memory will be split after reload, so we must
1778 ensure that all parts of such splits remain legitimate
1779 addresses. */
1780 mode_adjust = GET_MODE_SIZE (mode) - 1;
1782 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1783 if (GET_CODE (x) == PRE_DEC
1784 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1786 return (GET_CODE (XEXP (x, 0)) == REG
1787 && REGNO (XEXP (x, 0)) == SP_REGNO);
1790 #if 0
1791 /* This is the double indirection detection, but it currently
1792 doesn't work as cleanly as this code implies, so until we've had
1793 a chance to debug it, leave it disabled. */
1794 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1796 #if DEBUG_DOUBLE
1797 fprintf (stderr, "double indirect\n");
1798 #endif
1799 x = XEXP (x, 0);
1801 #endif
1803 encode_pattern (x);
1804 if (RTX_IS ("r"))
1806 /* Most indexable registers can be used without displacements,
1807 although some of them will be emitted with an explicit zero
1808 to please the assembler. */
1809 switch (REGNO (patternr[0]))
1811 case A0_REGNO:
1812 case A1_REGNO:
1813 case SB_REGNO:
1814 case FB_REGNO:
1815 case SP_REGNO:
1816 return 1;
1818 default:
1819 if (IS_PSEUDO (patternr[0], strict))
1820 return 1;
1821 return 0;
1824 if (RTX_IS ("+ri"))
1826 /* This is more interesting, because different base registers
1827 allow for different displacements - both range and signedness
1828 - and it differs from chip series to chip series too. */
1829 int rn = REGNO (patternr[1]);
1830 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1831 switch (rn)
1833 case A0_REGNO:
1834 case A1_REGNO:
1835 case SB_REGNO:
1836 /* The syntax only allows positive offsets, but when the
1837 offsets span the entire memory range, we can simulate
1838 negative offsets by wrapping. */
1839 if (TARGET_A16)
1840 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1841 if (rn == SB_REGNO)
1842 return (offs >= 0 && offs <= 65535 - mode_adjust);
1843 /* A0 or A1 */
1844 return (offs >= -16777216 && offs <= 16777215);
1846 case FB_REGNO:
1847 if (TARGET_A16)
1848 return (offs >= -128 && offs <= 127 - mode_adjust);
1849 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1851 case SP_REGNO:
1852 return (offs >= -128 && offs <= 127 - mode_adjust);
1854 default:
1855 if (IS_PSEUDO (patternr[1], strict))
1856 return 1;
1857 return 0;
1860 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1862 rtx reg = patternr[1];
1864 /* We don't know where the symbol is, so only allow base
1865 registers which support displacements spanning the whole
1866 address range. */
1867 switch (REGNO (reg))
1869 case A0_REGNO:
1870 case A1_REGNO:
1871 /* $sb needs a secondary reload, but since it's involved in
1872 memory address reloads too, we don't deal with it very
1873 well. */
1874 /* case SB_REGNO: */
1875 return 1;
1876 default:
1877 if (IS_PSEUDO (reg, strict))
1878 return 1;
1879 return 0;
1882 return 0;
1885 /* Implements REG_OK_FOR_BASE_P. */
1887 m32c_reg_ok_for_base_p (rtx x, int strict)
1889 if (GET_CODE (x) != REG)
1890 return 0;
1891 switch (REGNO (x))
1893 case A0_REGNO:
1894 case A1_REGNO:
1895 case SB_REGNO:
1896 case FB_REGNO:
1897 case SP_REGNO:
1898 return 1;
1899 default:
1900 if (IS_PSEUDO (x, strict))
1901 return 1;
1902 return 0;
1906 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1907 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1908 like this:
1909 EB 4B FF mova -128[$fb],$a0
1910 D8 0C FF FF mov.w:Q #0,-1[$a0]
1912 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1913 displacements:
1914 7B F4 stc $fb,$a0
1915 77 54 00 01 sub #256,$a0
1916 D8 08 01 mov.w:Q #0,1[$a0]
1918 If we don't offset (i.e. offset by zero), we end up with:
1919 7B F4 stc $fb,$a0
1920 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1922 We have to subtract *something* so that we have a PLUS rtx to mark
1923 that we've done this reload. The -128 offset will never result in
1924 an 8-bit aN offset, and the payoff for the second case is five
1925 loads *if* those loads are within 256 bytes of the other end of the
1926 frame, so the third case seems best. Note that we subtract the
1927 zero, but detect that in the addhi3 pattern. */
1929 #define BIG_FB_ADJ 0
1931 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1932 worry about is frame base offsets, as $fb has a limited
1933 displacement range. We deal with this by attempting to reload $fb
1934 itself into an address register; that seems to result in the best
1935 code. */
1936 #undef TARGET_LEGITIMIZE_ADDRESS
1937 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1938 static rtx
1939 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1940 enum machine_mode mode)
1942 #if DEBUG0
1943 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1944 debug_rtx (x);
1945 fprintf (stderr, "\n");
1946 #endif
1948 if (GET_CODE (x) == PLUS
1949 && GET_CODE (XEXP (x, 0)) == REG
1950 && REGNO (XEXP (x, 0)) == FB_REGNO
1951 && GET_CODE (XEXP (x, 1)) == CONST_INT
1952 && (INTVAL (XEXP (x, 1)) < -128
1953 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1955 /* reload FB to A_REGS */
1956 rtx temp = gen_reg_rtx (Pmode);
1957 x = copy_rtx (x);
1958 emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1959 XEXP (x, 0) = temp;
1962 return x;
1965 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1967 m32c_legitimize_reload_address (rtx * x,
1968 enum machine_mode mode,
1969 int opnum,
1970 int type, int ind_levels ATTRIBUTE_UNUSED)
1972 #if DEBUG0
1973 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1974 mode_name[mode]);
1975 debug_rtx (*x);
1976 #endif
1978 /* At one point, this function tried to get $fb copied to an address
1979 register, which in theory would maximize sharing, but gcc was
1980 *also* still trying to reload the whole address, and we'd run out
1981 of address registers. So we let gcc do the naive (but safe)
1982 reload instead, when the above function doesn't handle it for
1985 The code below is a second attempt at the above. */
1987 if (GET_CODE (*x) == PLUS
1988 && GET_CODE (XEXP (*x, 0)) == REG
1989 && REGNO (XEXP (*x, 0)) == FB_REGNO
1990 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1991 && (INTVAL (XEXP (*x, 1)) < -128
1992 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1994 rtx sum;
1995 int offset = INTVAL (XEXP (*x, 1));
1996 int adjustment = -BIG_FB_ADJ;
1998 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1999 GEN_INT (adjustment));
2000 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2001 if (type == RELOAD_OTHER)
2002 type = RELOAD_FOR_OTHER_ADDRESS;
2003 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2004 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2005 type);
2006 return 1;
2009 if (GET_CODE (*x) == PLUS
2010 && GET_CODE (XEXP (*x, 0)) == PLUS
2011 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2012 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2013 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2014 && GET_CODE (XEXP (*x, 1)) == CONST_INT
2017 if (type == RELOAD_OTHER)
2018 type = RELOAD_FOR_OTHER_ADDRESS;
2019 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2020 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2021 type);
2022 return 1;
2025 return 0;
2028 /* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2029 so we can allow anything. */
2031 m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2033 return 1;
2037 /* Condition Code Status */
2039 #undef TARGET_FIXED_CONDITION_CODE_REGS
2040 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2041 static bool
2042 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2044 *p1 = FLG_REGNO;
2045 *p2 = INVALID_REGNUM;
2046 return true;
2049 /* Describing Relative Costs of Operations */
2051 /* Implements REGISTER_MOVE_COST. We make impossible moves
2052 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2053 no opcodes to do that). We also discourage use of mem* registers
2054 since they're really memory. */
2056 m32c_register_move_cost (enum machine_mode mode, int from, int to)
2058 int cost = COSTS_N_INSNS (3);
2059 int cc = class_contents[from][0] | class_contents[to][0];
2060 /* FIXME: pick real values, but not 2 for now. */
2061 if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2063 if (!(cc & ~class_contents[R23_REGS][0]))
2064 cost = COSTS_N_INSNS (1000);
2065 else
2066 cost = COSTS_N_INSNS (80);
2069 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2070 cost = COSTS_N_INSNS (1000);
2072 if (classes_intersect (from, CR_REGS))
2073 cost += COSTS_N_INSNS (5);
2075 if (classes_intersect (to, CR_REGS))
2076 cost += COSTS_N_INSNS (5);
2078 if (from == MEM_REGS || to == MEM_REGS)
2079 cost += COSTS_N_INSNS (50);
2080 else if (classes_intersect (from, MEM_REGS)
2081 || classes_intersect (to, MEM_REGS))
2082 cost += COSTS_N_INSNS (10);
2084 #if DEBUG0
2085 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2086 mode_name[mode], class_names[from], class_names[to], cost);
2087 #endif
2088 return cost;
2091 /* Implements MEMORY_MOVE_COST. */
2093 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2094 int reg_class ATTRIBUTE_UNUSED,
2095 int in ATTRIBUTE_UNUSED)
2097 /* FIXME: pick real values. */
2098 return COSTS_N_INSNS (10);
2101 /* Here we try to describe when we use multiple opcodes for one RTX so
2102 that gcc knows when to use them. */
2103 #undef TARGET_RTX_COSTS
2104 #define TARGET_RTX_COSTS m32c_rtx_costs
2105 static bool
2106 m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2107 bool speed ATTRIBUTE_UNUSED)
2109 switch (code)
2111 case REG:
2112 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2113 *total += COSTS_N_INSNS (500);
2114 else
2115 *total += COSTS_N_INSNS (1);
2116 return true;
2118 case ASHIFT:
2119 case LSHIFTRT:
2120 case ASHIFTRT:
2121 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2123 /* mov.b r1l, r1h */
2124 *total += COSTS_N_INSNS (1);
2125 return true;
2127 if (INTVAL (XEXP (x, 1)) > 8
2128 || INTVAL (XEXP (x, 1)) < -8)
2130 /* mov.b #N, r1l */
2131 /* mov.b r1l, r1h */
2132 *total += COSTS_N_INSNS (2);
2133 return true;
2135 return true;
2137 case LE:
2138 case LEU:
2139 case LT:
2140 case LTU:
2141 case GT:
2142 case GTU:
2143 case GE:
2144 case GEU:
2145 case NE:
2146 case EQ:
2147 if (outer_code == SET)
2149 *total += COSTS_N_INSNS (2);
2150 return true;
2152 break;
2154 case ZERO_EXTRACT:
2156 rtx dest = XEXP (x, 0);
2157 rtx addr = XEXP (dest, 0);
2158 switch (GET_CODE (addr))
2160 case CONST_INT:
2161 *total += COSTS_N_INSNS (1);
2162 break;
2163 case SYMBOL_REF:
2164 *total += COSTS_N_INSNS (3);
2165 break;
2166 default:
2167 *total += COSTS_N_INSNS (2);
2168 break;
2170 return true;
2172 break;
2174 default:
2175 /* Reasonable default. */
2176 if (TARGET_A16 && GET_MODE(x) == SImode)
2177 *total += COSTS_N_INSNS (2);
2178 break;
2180 return false;
2183 #undef TARGET_ADDRESS_COST
2184 #define TARGET_ADDRESS_COST m32c_address_cost
2185 static int
2186 m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2188 int i;
2189 /* fprintf(stderr, "\naddress_cost\n");
2190 debug_rtx(addr);*/
2191 switch (GET_CODE (addr))
2193 case CONST_INT:
2194 i = INTVAL (addr);
2195 if (i == 0)
2196 return COSTS_N_INSNS(1);
2197 if (0 < i && i <= 255)
2198 return COSTS_N_INSNS(2);
2199 if (0 < i && i <= 65535)
2200 return COSTS_N_INSNS(3);
2201 return COSTS_N_INSNS(4);
2202 case SYMBOL_REF:
2203 return COSTS_N_INSNS(4);
2204 case REG:
2205 return COSTS_N_INSNS(1);
2206 case PLUS:
2207 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2209 i = INTVAL (XEXP (addr, 1));
2210 if (i == 0)
2211 return COSTS_N_INSNS(1);
2212 if (0 < i && i <= 255)
2213 return COSTS_N_INSNS(2);
2214 if (0 < i && i <= 65535)
2215 return COSTS_N_INSNS(3);
2217 return COSTS_N_INSNS(4);
2218 default:
2219 return 0;
2223 /* Defining the Output Assembler Language */
2225 /* The Overall Framework of an Assembler File */
2227 #undef TARGET_HAVE_NAMED_SECTIONS
2228 #define TARGET_HAVE_NAMED_SECTIONS true
2230 /* Output of Data */
2232 /* We may have 24 bit sizes, which is the native address size.
2233 Currently unused, but provided for completeness. */
2234 #undef TARGET_ASM_INTEGER
2235 #define TARGET_ASM_INTEGER m32c_asm_integer
2236 static bool
2237 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2239 switch (size)
2241 case 3:
2242 fprintf (asm_out_file, "\t.3byte\t");
2243 output_addr_const (asm_out_file, x);
2244 fputc ('\n', asm_out_file);
2245 return true;
2246 case 4:
2247 if (GET_CODE (x) == SYMBOL_REF)
2249 fprintf (asm_out_file, "\t.long\t");
2250 output_addr_const (asm_out_file, x);
2251 fputc ('\n', asm_out_file);
2252 return true;
2254 break;
2256 return default_assemble_integer (x, size, aligned_p);
2259 /* Output of Assembler Instructions */
2261 /* We use a lookup table because the addressing modes are non-orthogonal. */
2263 static struct
2265 char code;
2266 char const *pattern;
2267 char const *format;
2269 const conversions[] = {
2270 { 0, "r", "0" },
2272 { 0, "mr", "z[1]" },
2273 { 0, "m+ri", "3[2]" },
2274 { 0, "m+rs", "3[2]" },
2275 { 0, "m+r+si", "4+5[2]" },
2276 { 0, "ms", "1" },
2277 { 0, "mi", "1" },
2278 { 0, "m+si", "2+3" },
2280 { 0, "mmr", "[z[2]]" },
2281 { 0, "mm+ri", "[4[3]]" },
2282 { 0, "mm+rs", "[4[3]]" },
2283 { 0, "mm+r+si", "[5+6[3]]" },
2284 { 0, "mms", "[[2]]" },
2285 { 0, "mmi", "[[2]]" },
2286 { 0, "mm+si", "[4[3]]" },
2288 { 0, "i", "#0" },
2289 { 0, "s", "#0" },
2290 { 0, "+si", "#1+2" },
2291 { 0, "l", "#0" },
2293 { 'l', "l", "0" },
2294 { 'd', "i", "0" },
2295 { 'd', "s", "0" },
2296 { 'd', "+si", "1+2" },
2297 { 'D', "i", "0" },
2298 { 'D', "s", "0" },
2299 { 'D', "+si", "1+2" },
2300 { 'x', "i", "#0" },
2301 { 'X', "i", "#0" },
2302 { 'm', "i", "#0" },
2303 { 'b', "i", "#0" },
2304 { 'B', "i", "0" },
2305 { 'p', "i", "0" },
2307 { 0, 0, 0 }
2310 /* This is in order according to the bitfield that pushm/popm use. */
2311 static char const *pushm_regs[] = {
2312 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2315 /* Implements PRINT_OPERAND. */
2316 void
2317 m32c_print_operand (FILE * file, rtx x, int code)
2319 int i, j, b;
2320 const char *comma;
2321 HOST_WIDE_INT ival;
2322 int unsigned_const = 0;
2323 int force_sign;
2325 /* Multiplies; constants are converted to sign-extended format but
2326 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2327 need. */
2328 if (code == 'u')
2330 unsigned_const = 2;
2331 code = 0;
2333 if (code == 'U')
2335 unsigned_const = 1;
2336 code = 0;
2338 /* This one is only for debugging; you can put it in a pattern to
2339 force this error. */
2340 if (code == '!')
2342 fprintf (stderr, "dj: unreviewed pattern:");
2343 if (current_output_insn)
2344 debug_rtx (current_output_insn);
2345 gcc_unreachable ();
2347 /* PSImode operations are either .w or .l depending on the target. */
2348 if (code == '&')
2350 if (TARGET_A16)
2351 fprintf (file, "w");
2352 else
2353 fprintf (file, "l");
2354 return;
2356 /* Inverted conditionals. */
2357 if (code == 'C')
2359 switch (GET_CODE (x))
2361 case LE:
2362 fputs ("gt", file);
2363 break;
2364 case LEU:
2365 fputs ("gtu", file);
2366 break;
2367 case LT:
2368 fputs ("ge", file);
2369 break;
2370 case LTU:
2371 fputs ("geu", file);
2372 break;
2373 case GT:
2374 fputs ("le", file);
2375 break;
2376 case GTU:
2377 fputs ("leu", file);
2378 break;
2379 case GE:
2380 fputs ("lt", file);
2381 break;
2382 case GEU:
2383 fputs ("ltu", file);
2384 break;
2385 case NE:
2386 fputs ("eq", file);
2387 break;
2388 case EQ:
2389 fputs ("ne", file);
2390 break;
2391 default:
2392 gcc_unreachable ();
2394 return;
2396 /* Regular conditionals. */
2397 if (code == 'c')
2399 switch (GET_CODE (x))
2401 case LE:
2402 fputs ("le", file);
2403 break;
2404 case LEU:
2405 fputs ("leu", file);
2406 break;
2407 case LT:
2408 fputs ("lt", file);
2409 break;
2410 case LTU:
2411 fputs ("ltu", file);
2412 break;
2413 case GT:
2414 fputs ("gt", file);
2415 break;
2416 case GTU:
2417 fputs ("gtu", file);
2418 break;
2419 case GE:
2420 fputs ("ge", file);
2421 break;
2422 case GEU:
2423 fputs ("geu", file);
2424 break;
2425 case NE:
2426 fputs ("ne", file);
2427 break;
2428 case EQ:
2429 fputs ("eq", file);
2430 break;
2431 default:
2432 gcc_unreachable ();
2434 return;
2436 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2437 operand. */
2438 if (code == 'h' && GET_MODE (x) == SImode)
2440 x = m32c_subreg (HImode, x, SImode, 0);
2441 code = 0;
2443 if (code == 'H' && GET_MODE (x) == SImode)
2445 x = m32c_subreg (HImode, x, SImode, 2);
2446 code = 0;
2448 if (code == 'h' && GET_MODE (x) == HImode)
2450 x = m32c_subreg (QImode, x, HImode, 0);
2451 code = 0;
2453 if (code == 'H' && GET_MODE (x) == HImode)
2455 /* We can't actually represent this as an rtx. Do it here. */
2456 if (GET_CODE (x) == REG)
2458 switch (REGNO (x))
2460 case R0_REGNO:
2461 fputs ("r0h", file);
2462 return;
2463 case R1_REGNO:
2464 fputs ("r1h", file);
2465 return;
2466 default:
2467 gcc_unreachable();
2470 /* This should be a MEM. */
2471 x = m32c_subreg (QImode, x, HImode, 1);
2472 code = 0;
2474 /* This is for BMcond, which always wants word register names. */
2475 if (code == 'h' && GET_MODE (x) == QImode)
2477 if (GET_CODE (x) == REG)
2478 x = gen_rtx_REG (HImode, REGNO (x));
2479 code = 0;
2481 /* 'x' and 'X' need to be ignored for non-immediates. */
2482 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2483 code = 0;
2485 encode_pattern (x);
2486 force_sign = 0;
2487 for (i = 0; conversions[i].pattern; i++)
2488 if (conversions[i].code == code
2489 && streq (conversions[i].pattern, pattern))
2491 for (j = 0; conversions[i].format[j]; j++)
2492 /* backslash quotes the next character in the output pattern. */
2493 if (conversions[i].format[j] == '\\')
2495 fputc (conversions[i].format[j + 1], file);
2496 j++;
2498 /* Digits in the output pattern indicate that the
2499 corresponding RTX is to be output at that point. */
2500 else if (ISDIGIT (conversions[i].format[j]))
2502 rtx r = patternr[conversions[i].format[j] - '0'];
2503 switch (GET_CODE (r))
2505 case REG:
2506 fprintf (file, "%s",
2507 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2508 break;
2509 case CONST_INT:
2510 switch (code)
2512 case 'b':
2513 case 'B':
2515 int v = INTVAL (r);
2516 int i = (int) exact_log2 (v);
2517 if (i == -1)
2518 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2519 if (i == -1)
2520 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2521 /* Bit position. */
2522 fprintf (file, "%d", i);
2524 break;
2525 case 'x':
2526 /* Unsigned byte. */
2527 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2528 INTVAL (r) & 0xff);
2529 break;
2530 case 'X':
2531 /* Unsigned word. */
2532 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2533 INTVAL (r) & 0xffff);
2534 break;
2535 case 'p':
2536 /* pushm and popm encode a register set into a single byte. */
2537 comma = "";
2538 for (b = 7; b >= 0; b--)
2539 if (INTVAL (r) & (1 << b))
2541 fprintf (file, "%s%s", comma, pushm_regs[b]);
2542 comma = ",";
2544 break;
2545 case 'm':
2546 /* "Minus". Output -X */
2547 ival = (-INTVAL (r) & 0xffff);
2548 if (ival & 0x8000)
2549 ival = ival - 0x10000;
2550 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2551 break;
2552 default:
2553 ival = INTVAL (r);
2554 if (conversions[i].format[j + 1] == '[' && ival < 0)
2556 /* We can simulate negative displacements by
2557 taking advantage of address space
2558 wrapping when the offset can span the
2559 entire address range. */
2560 rtx base =
2561 patternr[conversions[i].format[j + 2] - '0'];
2562 if (GET_CODE (base) == REG)
2563 switch (REGNO (base))
2565 case A0_REGNO:
2566 case A1_REGNO:
2567 if (TARGET_A24)
2568 ival = 0x1000000 + ival;
2569 else
2570 ival = 0x10000 + ival;
2571 break;
2572 case SB_REGNO:
2573 if (TARGET_A16)
2574 ival = 0x10000 + ival;
2575 break;
2578 else if (code == 'd' && ival < 0 && j == 0)
2579 /* The "mova" opcode is used to do addition by
2580 computing displacements, but again, we need
2581 displacements to be unsigned *if* they're
2582 the only component of the displacement
2583 (i.e. no "symbol-4" type displacement). */
2584 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2586 if (conversions[i].format[j] == '0')
2588 /* More conversions to unsigned. */
2589 if (unsigned_const == 2)
2590 ival &= 0xffff;
2591 if (unsigned_const == 1)
2592 ival &= 0xff;
2594 if (streq (conversions[i].pattern, "mi")
2595 || streq (conversions[i].pattern, "mmi"))
2597 /* Integers used as addresses are unsigned. */
2598 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2600 if (force_sign && ival >= 0)
2601 fputc ('+', file);
2602 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2603 break;
2605 break;
2606 case CONST_DOUBLE:
2607 /* We don't have const_double constants. If it
2608 happens, make it obvious. */
2609 fprintf (file, "[const_double 0x%lx]",
2610 (unsigned long) CONST_DOUBLE_HIGH (r));
2611 break;
2612 case SYMBOL_REF:
2613 assemble_name (file, XSTR (r, 0));
2614 break;
2615 case LABEL_REF:
2616 output_asm_label (r);
2617 break;
2618 default:
2619 fprintf (stderr, "don't know how to print this operand:");
2620 debug_rtx (r);
2621 gcc_unreachable ();
2624 else
2626 if (conversions[i].format[j] == 'z')
2628 /* Some addressing modes *must* have a displacement,
2629 so insert a zero here if needed. */
2630 int k;
2631 for (k = j + 1; conversions[i].format[k]; k++)
2632 if (ISDIGIT (conversions[i].format[k]))
2634 rtx reg = patternr[conversions[i].format[k] - '0'];
2635 if (GET_CODE (reg) == REG
2636 && (REGNO (reg) == SB_REGNO
2637 || REGNO (reg) == FB_REGNO
2638 || REGNO (reg) == SP_REGNO))
2639 fputc ('0', file);
2641 continue;
2643 /* Signed displacements off symbols need to have signs
2644 blended cleanly. */
2645 if (conversions[i].format[j] == '+'
2646 && (!code || code == 'D' || code == 'd')
2647 && ISDIGIT (conversions[i].format[j + 1])
2648 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2649 == CONST_INT))
2651 force_sign = 1;
2652 continue;
2654 fputc (conversions[i].format[j], file);
2656 break;
2658 if (!conversions[i].pattern)
2660 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2661 pattern);
2662 debug_rtx (x);
2663 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2666 return;
2669 /* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2670 above for descriptions of what these do. */
2672 m32c_print_operand_punct_valid_p (int c)
2674 if (c == '&' || c == '!')
2675 return 1;
2676 return 0;
2679 /* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2680 void
2681 m32c_print_operand_address (FILE * stream, rtx address)
2683 if (GET_CODE (address) == MEM)
2684 address = XEXP (address, 0);
2685 else
2686 /* cf: gcc.dg/asm-4.c. */
2687 gcc_assert (GET_CODE (address) == REG);
2689 m32c_print_operand (stream, address, 0);
2692 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2693 differently than general registers. */
2694 void
2695 m32c_output_reg_push (FILE * s, int regno)
2697 if (regno == FLG_REGNO)
2698 fprintf (s, "\tpushc\tflg\n");
2699 else
2700 fprintf (s, "\tpush.%c\t%s\n",
2701 " bwll"[reg_push_size (regno)], reg_names[regno]);
2704 /* Likewise for ASM_OUTPUT_REG_POP. */
2705 void
2706 m32c_output_reg_pop (FILE * s, int regno)
2708 if (regno == FLG_REGNO)
2709 fprintf (s, "\tpopc\tflg\n");
2710 else
2711 fprintf (s, "\tpop.%c\t%s\n",
2712 " bwll"[reg_push_size (regno)], reg_names[regno]);
2715 /* Defining target-specific uses of `__attribute__' */
2717 /* Used to simplify the logic below. Find the attributes wherever
2718 they may be. */
2719 #define M32C_ATTRIBUTES(decl) \
2720 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2721 : DECL_ATTRIBUTES (decl) \
2722 ? (DECL_ATTRIBUTES (decl)) \
2723 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2725 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2726 static int
2727 interrupt_p (tree node ATTRIBUTE_UNUSED)
2729 tree list = M32C_ATTRIBUTES (node);
2730 while (list)
2732 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2733 return 1;
2734 list = TREE_CHAIN (list);
2736 return 0;
2739 static tree
2740 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2741 tree name ATTRIBUTE_UNUSED,
2742 tree args ATTRIBUTE_UNUSED,
2743 int flags ATTRIBUTE_UNUSED,
2744 bool * no_add_attrs ATTRIBUTE_UNUSED)
2746 return NULL_TREE;
2749 /* Returns TRUE if given tree has the "function_vector" attribute. */
2751 m32c_special_page_vector_p (tree func)
2753 tree list;
2755 if (TREE_CODE (func) != FUNCTION_DECL)
2756 return 0;
2758 list = M32C_ATTRIBUTES (func);
2759 while (list)
2761 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2762 return 1;
2763 list = TREE_CHAIN (list);
2765 return 0;
2768 static tree
2769 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2770 tree name ATTRIBUTE_UNUSED,
2771 tree args ATTRIBUTE_UNUSED,
2772 int flags ATTRIBUTE_UNUSED,
2773 bool * no_add_attrs ATTRIBUTE_UNUSED)
2775 if (TARGET_R8C)
2777 /* The attribute is not supported for R8C target. */
2778 warning (OPT_Wattributes,
2779 "%qE attribute is not supported for R8C target",
2780 name);
2781 *no_add_attrs = true;
2783 else if (TREE_CODE (*node) != FUNCTION_DECL)
2785 /* The attribute must be applied to functions only. */
2786 warning (OPT_Wattributes,
2787 "%qE attribute applies only to functions",
2788 name);
2789 *no_add_attrs = true;
2791 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2793 /* The argument must be a constant integer. */
2794 warning (OPT_Wattributes,
2795 "%qE attribute argument not an integer constant",
2796 name);
2797 *no_add_attrs = true;
2799 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2800 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2802 /* The argument value must be between 18 to 255. */
2803 warning (OPT_Wattributes,
2804 "%qE attribute argument should be between 18 to 255",
2805 name);
2806 *no_add_attrs = true;
2808 return NULL_TREE;
2811 /* If the function is assigned the attribute 'function_vector', it
2812 returns the function vector number, otherwise returns zero. */
2814 current_function_special_page_vector (rtx x)
2816 int num;
2818 if ((GET_CODE(x) == SYMBOL_REF)
2819 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2821 tree list;
2822 tree t = SYMBOL_REF_DECL (x);
2824 if (TREE_CODE (t) != FUNCTION_DECL)
2825 return 0;
2827 list = M32C_ATTRIBUTES (t);
2828 while (list)
2830 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2832 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2833 return num;
2836 list = TREE_CHAIN (list);
2839 return 0;
2841 else
2842 return 0;
2845 #undef TARGET_ATTRIBUTE_TABLE
2846 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2847 static const struct attribute_spec m32c_attribute_table[] = {
2848 {"interrupt", 0, 0, false, false, false, interrupt_handler},
2849 {"function_vector", 1, 1, true, false, false, function_vector_handler},
2850 {0, 0, 0, 0, 0, 0, 0}
2853 #undef TARGET_COMP_TYPE_ATTRIBUTES
2854 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2855 static int
2856 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
2857 const_tree type2 ATTRIBUTE_UNUSED)
2859 /* 0=incompatible 1=compatible 2=warning */
2860 return 1;
2863 #undef TARGET_INSERT_ATTRIBUTES
2864 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2865 static void
2866 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2867 tree * attr_ptr ATTRIBUTE_UNUSED)
2869 /* Nothing to do here. */
2872 /* Predicates */
2874 /* This is a list of legal subregs of hard regs. */
2875 static const struct {
2876 unsigned char outer_mode_size;
2877 unsigned char inner_mode_size;
2878 unsigned char byte_mask;
2879 unsigned char legal_when;
2880 unsigned int regno;
2881 } legal_subregs[] = {
2882 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
2883 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
2884 {1, 2, 0x01, 1, A0_REGNO},
2885 {1, 2, 0x01, 1, A1_REGNO},
2887 {1, 4, 0x01, 1, A0_REGNO},
2888 {1, 4, 0x01, 1, A1_REGNO},
2890 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
2891 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
2892 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
2893 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
2894 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
2896 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
2899 /* Returns TRUE if OP is a subreg of a hard reg which we don't
2900 support. */
2901 bool
2902 m32c_illegal_subreg_p (rtx op)
2904 int offset;
2905 unsigned int i;
2906 int src_mode, dest_mode;
2908 if (GET_CODE (op) != SUBREG)
2909 return false;
2911 dest_mode = GET_MODE (op);
2912 offset = SUBREG_BYTE (op);
2913 op = SUBREG_REG (op);
2914 src_mode = GET_MODE (op);
2916 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
2917 return false;
2918 if (GET_CODE (op) != REG)
2919 return false;
2920 if (REGNO (op) >= MEM0_REGNO)
2921 return false;
2923 offset = (1 << offset);
2925 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
2926 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
2927 && legal_subregs[i].regno == REGNO (op)
2928 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
2929 && legal_subregs[i].byte_mask & offset)
2931 switch (legal_subregs[i].legal_when)
2933 case 1:
2934 return false;
2935 case 16:
2936 if (TARGET_A16)
2937 return false;
2938 break;
2939 case 24:
2940 if (TARGET_A24)
2941 return false;
2942 break;
2945 return true;
2948 /* Returns TRUE if we support a move between the first two operands.
2949 At the moment, we just want to discourage mem to mem moves until
2950 after reload, because reload has a hard time with our limited
2951 number of address registers, and we can get into a situation where
2952 we need three of them when we only have two. */
2953 bool
2954 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2956 rtx op0 = operands[0];
2957 rtx op1 = operands[1];
2959 if (TARGET_A24)
2960 return true;
2962 #define DEBUG_MOV_OK 0
2963 #if DEBUG_MOV_OK
2964 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2965 debug_rtx (op0);
2966 debug_rtx (op1);
2967 #endif
2969 if (GET_CODE (op0) == SUBREG)
2970 op0 = XEXP (op0, 0);
2971 if (GET_CODE (op1) == SUBREG)
2972 op1 = XEXP (op1, 0);
2974 if (GET_CODE (op0) == MEM
2975 && GET_CODE (op1) == MEM
2976 && ! reload_completed)
2978 #if DEBUG_MOV_OK
2979 fprintf (stderr, " - no, mem to mem\n");
2980 #endif
2981 return false;
2984 #if DEBUG_MOV_OK
2985 fprintf (stderr, " - ok\n");
2986 #endif
2987 return true;
2990 /* Returns TRUE if two consecutive HImode mov instructions, generated
2991 for moving an immediate double data to a double data type variable
2992 location, can be combined into single SImode mov instruction. */
2993 bool
2994 m32c_immd_dbl_mov (rtx * operands,
2995 enum machine_mode mode ATTRIBUTE_UNUSED)
2997 int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
2998 const char *str1;
2999 const char *str2;
3001 if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3002 && MEM_SCALAR_P (operands[0])
3003 && !MEM_IN_STRUCT_P (operands[0])
3004 && GET_CODE (XEXP (operands[2], 0)) == CONST
3005 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3006 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3007 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3008 && MEM_SCALAR_P (operands[2])
3009 && !MEM_IN_STRUCT_P (operands[2]))
3010 flag = 1;
3012 else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3013 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3014 && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3015 && MEM_SCALAR_P (operands[0])
3016 && !MEM_IN_STRUCT_P (operands[0])
3017 && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
3018 && GET_CODE (XEXP (operands[2], 0)) == CONST
3019 && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3020 && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3021 && MEM_SCALAR_P (operands[2])
3022 && !MEM_IN_STRUCT_P (operands[2]))
3023 flag = 2;
3025 else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3026 && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3027 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3028 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3029 && MEM_SCALAR_P (operands[0])
3030 && !MEM_IN_STRUCT_P (operands[0])
3031 && !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
3032 && REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3033 && GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3034 && MEM_SCALAR_P (operands[2])
3035 && !MEM_IN_STRUCT_P (operands[2]))
3036 flag = 3;
3038 else
3039 return false;
3041 switch (flag)
3043 case 1:
3044 str1 = XSTR (XEXP (operands[0], 0), 0);
3045 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3046 if (strcmp (str1, str2) == 0)
3047 okflag = 1;
3048 else
3049 okflag = 0;
3050 break;
3051 case 2:
3052 str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3053 str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3054 if (strcmp(str1,str2) == 0)
3055 okflag = 1;
3056 else
3057 okflag = 0;
3058 break;
3059 case 3:
3060 offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3061 offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
3062 offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3063 if (((offset2-offset1) == 2) && offsetsign != 0)
3064 okflag = 1;
3065 else
3066 okflag = 0;
3067 break;
3068 default:
3069 okflag = 0;
3072 if (okflag == 1)
3074 HOST_WIDE_INT val;
3075 operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3077 val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
3078 operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3080 return true;
3083 return false;
3086 /* Expanders */
3088 /* Subregs are non-orthogonal for us, because our registers are all
3089 different sizes. */
3090 static rtx
3091 m32c_subreg (enum machine_mode outer,
3092 rtx x, enum machine_mode inner, int byte)
3094 int r, nr = -1;
3096 /* Converting MEMs to different types that are the same size, we
3097 just rewrite them. */
3098 if (GET_CODE (x) == SUBREG
3099 && SUBREG_BYTE (x) == 0
3100 && GET_CODE (SUBREG_REG (x)) == MEM
3101 && (GET_MODE_SIZE (GET_MODE (x))
3102 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3104 rtx oldx = x;
3105 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3106 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3109 /* Push/pop get done as smaller push/pops. */
3110 if (GET_CODE (x) == MEM
3111 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3112 || GET_CODE (XEXP (x, 0)) == POST_INC))
3113 return gen_rtx_MEM (outer, XEXP (x, 0));
3114 if (GET_CODE (x) == SUBREG
3115 && GET_CODE (XEXP (x, 0)) == MEM
3116 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3117 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3118 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3120 if (GET_CODE (x) != REG)
3121 return simplify_gen_subreg (outer, x, inner, byte);
3123 r = REGNO (x);
3124 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3125 return simplify_gen_subreg (outer, x, inner, byte);
3127 if (IS_MEM_REGNO (r))
3128 return simplify_gen_subreg (outer, x, inner, byte);
3130 /* This is where the complexities of our register layout are
3131 described. */
3132 if (byte == 0)
3133 nr = r;
3134 else if (outer == HImode)
3136 if (r == R0_REGNO && byte == 2)
3137 nr = R2_REGNO;
3138 else if (r == R0_REGNO && byte == 4)
3139 nr = R1_REGNO;
3140 else if (r == R0_REGNO && byte == 6)
3141 nr = R3_REGNO;
3142 else if (r == R1_REGNO && byte == 2)
3143 nr = R3_REGNO;
3144 else if (r == A0_REGNO && byte == 2)
3145 nr = A1_REGNO;
3147 else if (outer == SImode)
3149 if (r == R0_REGNO && byte == 0)
3150 nr = R0_REGNO;
3151 else if (r == R0_REGNO && byte == 4)
3152 nr = R1_REGNO;
3154 if (nr == -1)
3156 fprintf (stderr, "m32c_subreg %s %s %d\n",
3157 mode_name[outer], mode_name[inner], byte);
3158 debug_rtx (x);
3159 gcc_unreachable ();
3161 return gen_rtx_REG (outer, nr);
3164 /* Used to emit move instructions. We split some moves,
3165 and avoid mem-mem moves. */
3167 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3169 if (TARGET_A16 && mode == PSImode)
3170 return m32c_split_move (operands, mode, 1);
3171 if ((GET_CODE (operands[0]) == MEM)
3172 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3174 rtx pmv = XEXP (operands[0], 0);
3175 rtx dest_reg = XEXP (pmv, 0);
3176 rtx dest_mod = XEXP (pmv, 1);
3178 emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3179 operands[0] = gen_rtx_MEM (mode, dest_reg);
3181 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3182 operands[1] = copy_to_mode_reg (mode, operands[1]);
3183 return 0;
3186 #define DEBUG_SPLIT 0
3188 /* Returns TRUE if the given PSImode move should be split. We split
3189 for all r8c/m16c moves, since it doesn't support them, and for
3190 POP.L as we can only *push* SImode. */
3192 m32c_split_psi_p (rtx * operands)
3194 #if DEBUG_SPLIT
3195 fprintf (stderr, "\nm32c_split_psi_p\n");
3196 debug_rtx (operands[0]);
3197 debug_rtx (operands[1]);
3198 #endif
3199 if (TARGET_A16)
3201 #if DEBUG_SPLIT
3202 fprintf (stderr, "yes, A16\n");
3203 #endif
3204 return 1;
3206 if (GET_CODE (operands[1]) == MEM
3207 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3209 #if DEBUG_SPLIT
3210 fprintf (stderr, "yes, pop.l\n");
3211 #endif
3212 return 1;
3214 #if DEBUG_SPLIT
3215 fprintf (stderr, "no, default\n");
3216 #endif
3217 return 0;
3220 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3221 (define_expand), 1 if it is not optional (define_insn_and_split),
3222 and 3 for define_split (alternate api). */
3224 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3226 rtx s[4], d[4];
3227 int parts, si, di, rev = 0;
3228 int rv = 0, opi = 2;
3229 enum machine_mode submode = HImode;
3230 rtx *ops, local_ops[10];
3232 /* define_split modifies the existing operands, but the other two
3233 emit new insns. OPS is where we store the operand pairs, which
3234 we emit later. */
3235 if (split_all == 3)
3236 ops = operands;
3237 else
3238 ops = local_ops;
3240 /* Else HImode. */
3241 if (mode == DImode)
3242 submode = SImode;
3244 /* Before splitting mem-mem moves, force one operand into a
3245 register. */
3246 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3248 #if DEBUG0
3249 fprintf (stderr, "force_reg...\n");
3250 debug_rtx (operands[1]);
3251 #endif
3252 operands[1] = force_reg (mode, operands[1]);
3253 #if DEBUG0
3254 debug_rtx (operands[1]);
3255 #endif
3258 parts = 2;
3260 #if DEBUG_SPLIT
3261 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3262 split_all);
3263 debug_rtx (operands[0]);
3264 debug_rtx (operands[1]);
3265 #endif
3267 /* Note that split_all is not used to select the api after this
3268 point, so it's safe to set it to 3 even with define_insn. */
3269 /* None of the chips can move SI operands to sp-relative addresses,
3270 so we always split those. */
3271 if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3272 split_all = 3;
3274 /* We don't need to split these. */
3275 if (TARGET_A24
3276 && split_all != 3
3277 && (mode == SImode || mode == PSImode)
3278 && !(GET_CODE (operands[1]) == MEM
3279 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3280 return 0;
3282 /* First, enumerate the subregs we'll be dealing with. */
3283 for (si = 0; si < parts; si++)
3285 d[si] =
3286 m32c_subreg (submode, operands[0], mode,
3287 si * GET_MODE_SIZE (submode));
3288 s[si] =
3289 m32c_subreg (submode, operands[1], mode,
3290 si * GET_MODE_SIZE (submode));
3293 /* Split pushes by emitting a sequence of smaller pushes. */
3294 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3296 for (si = parts - 1; si >= 0; si--)
3298 ops[opi++] = gen_rtx_MEM (submode,
3299 gen_rtx_PRE_DEC (Pmode,
3300 gen_rtx_REG (Pmode,
3301 SP_REGNO)));
3302 ops[opi++] = s[si];
3305 rv = 1;
3307 /* Likewise for pops. */
3308 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3310 for (di = 0; di < parts; di++)
3312 ops[opi++] = d[di];
3313 ops[opi++] = gen_rtx_MEM (submode,
3314 gen_rtx_POST_INC (Pmode,
3315 gen_rtx_REG (Pmode,
3316 SP_REGNO)));
3318 rv = 1;
3320 else if (split_all)
3322 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3323 for (di = 0; di < parts - 1; di++)
3324 for (si = di + 1; si < parts; si++)
3325 if (reg_mentioned_p (d[di], s[si]))
3326 rev = 1;
3328 if (rev)
3329 for (si = 0; si < parts; si++)
3331 ops[opi++] = d[si];
3332 ops[opi++] = s[si];
3334 else
3335 for (si = parts - 1; si >= 0; si--)
3337 ops[opi++] = d[si];
3338 ops[opi++] = s[si];
3340 rv = 1;
3342 /* Now emit any moves we may have accumulated. */
3343 if (rv && split_all != 3)
3345 int i;
3346 for (i = 2; i < opi; i += 2)
3347 emit_move_insn (ops[i], ops[i + 1]);
3349 return rv;
3352 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3353 the like. For the R8C they expect one of the addresses to be in
3354 R1L:An so we need to arrange for that. Otherwise, it's just a
3355 matter of picking out the operands we want and emitting the right
3356 pattern for them. All these expanders, which correspond to
3357 patterns in blkmov.md, must return nonzero if they expand the insn,
3358 or zero if they should FAIL. */
3360 /* This is a memset() opcode. All operands are implied, so we need to
3361 arrange for them to be in the right registers. The opcode wants
3362 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3363 the count (HI), and $2 the value (QI). */
3365 m32c_expand_setmemhi(rtx *operands)
3367 rtx desta, count, val;
3368 rtx desto, counto;
3370 desta = XEXP (operands[0], 0);
3371 count = operands[1];
3372 val = operands[2];
3374 desto = gen_reg_rtx (Pmode);
3375 counto = gen_reg_rtx (HImode);
3377 if (GET_CODE (desta) != REG
3378 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3379 desta = copy_to_mode_reg (Pmode, desta);
3381 /* This looks like an arbitrary restriction, but this is by far the
3382 most common case. For counts 8..14 this actually results in
3383 smaller code with no speed penalty because the half-sized
3384 constant can be loaded with a shorter opcode. */
3385 if (GET_CODE (count) == CONST_INT
3386 && GET_CODE (val) == CONST_INT
3387 && ! (INTVAL (count) & 1)
3388 && (INTVAL (count) > 1)
3389 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3391 unsigned v = INTVAL (val) & 0xff;
3392 v = v | (v << 8);
3393 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3394 val = copy_to_mode_reg (HImode, GEN_INT (v));
3395 if (TARGET_A16)
3396 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3397 else
3398 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3399 return 1;
3402 /* This is the generalized memset() case. */
3403 if (GET_CODE (val) != REG
3404 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3405 val = copy_to_mode_reg (QImode, val);
3407 if (GET_CODE (count) != REG
3408 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3409 count = copy_to_mode_reg (HImode, count);
3411 if (TARGET_A16)
3412 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3413 else
3414 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3416 return 1;
3419 /* This is a memcpy() opcode. All operands are implied, so we need to
3420 arrange for them to be in the right registers. The opcode wants
3421 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3422 is the source (MEM:BLK), and $2 the count (HI). */
3424 m32c_expand_movmemhi(rtx *operands)
3426 rtx desta, srca, count;
3427 rtx desto, srco, counto;
3429 desta = XEXP (operands[0], 0);
3430 srca = XEXP (operands[1], 0);
3431 count = operands[2];
3433 desto = gen_reg_rtx (Pmode);
3434 srco = gen_reg_rtx (Pmode);
3435 counto = gen_reg_rtx (HImode);
3437 if (GET_CODE (desta) != REG
3438 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3439 desta = copy_to_mode_reg (Pmode, desta);
3441 if (GET_CODE (srca) != REG
3442 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3443 srca = copy_to_mode_reg (Pmode, srca);
3445 /* Similar to setmem, but we don't need to check the value. */
3446 if (GET_CODE (count) == CONST_INT
3447 && ! (INTVAL (count) & 1)
3448 && (INTVAL (count) > 1))
3450 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3451 if (TARGET_A16)
3452 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3453 else
3454 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3455 return 1;
3458 /* This is the generalized memset() case. */
3459 if (GET_CODE (count) != REG
3460 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3461 count = copy_to_mode_reg (HImode, count);
3463 if (TARGET_A16)
3464 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3465 else
3466 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3468 return 1;
3471 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3472 the copy, which should point to the NUL at the end of the string,
3473 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3474 Since our opcode leaves the destination pointing *after* the NUL,
3475 we must emit an adjustment. */
3477 m32c_expand_movstr(rtx *operands)
3479 rtx desta, srca;
3480 rtx desto, srco;
3482 desta = XEXP (operands[1], 0);
3483 srca = XEXP (operands[2], 0);
3485 desto = gen_reg_rtx (Pmode);
3486 srco = gen_reg_rtx (Pmode);
3488 if (GET_CODE (desta) != REG
3489 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3490 desta = copy_to_mode_reg (Pmode, desta);
3492 if (GET_CODE (srca) != REG
3493 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3494 srca = copy_to_mode_reg (Pmode, srca);
3496 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3497 /* desto ends up being a1, which allows this type of add through MOVA. */
3498 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3500 return 1;
3503 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3504 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3505 $2 is the other (MEM:BLK). We must do the comparison, and then
3506 convert the flags to a signed integer result. */
3508 m32c_expand_cmpstr(rtx *operands)
3510 rtx src1a, src2a;
3512 src1a = XEXP (operands[1], 0);
3513 src2a = XEXP (operands[2], 0);
3515 if (GET_CODE (src1a) != REG
3516 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3517 src1a = copy_to_mode_reg (Pmode, src1a);
3519 if (GET_CODE (src2a) != REG
3520 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3521 src2a = copy_to_mode_reg (Pmode, src2a);
3523 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3524 emit_insn (gen_cond_to_int (operands[0]));
3526 return 1;
3530 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3532 static shift_gen_func
3533 shift_gen_func_for (int mode, int code)
3535 #define GFF(m,c,f) if (mode == m && code == c) return f
3536 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3537 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3538 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3539 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3540 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3541 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3542 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3543 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3544 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3545 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3546 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3547 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3548 #undef GFF
3549 gcc_unreachable ();
3552 /* The m32c only has one shift, but it takes a signed count. GCC
3553 doesn't want this, so we fake it by negating any shift count when
3554 we're pretending to shift the other way. Also, the shift count is
3555 limited to -8..8. It's slightly better to use two shifts for 9..15
3556 than to load the count into r1h, so we do that too. */
3558 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3560 enum machine_mode mode = GET_MODE (operands[0]);
3561 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3562 rtx temp;
3564 if (GET_CODE (operands[2]) == CONST_INT)
3566 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3567 int count = INTVAL (operands[2]) * scale;
3569 while (count > maxc)
3571 temp = gen_reg_rtx (mode);
3572 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3573 operands[1] = temp;
3574 count -= maxc;
3576 while (count < -maxc)
3578 temp = gen_reg_rtx (mode);
3579 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3580 operands[1] = temp;
3581 count += maxc;
3583 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3584 return 1;
3587 temp = gen_reg_rtx (QImode);
3588 if (scale < 0)
3589 /* The pattern has a NEG that corresponds to this. */
3590 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3591 else if (TARGET_A16 && mode == SImode)
3592 /* We do this because the code below may modify this, we don't
3593 want to modify the origin of this value. */
3594 emit_move_insn (temp, operands[2]);
3595 else
3596 /* We'll only use it for the shift, no point emitting a move. */
3597 temp = operands[2];
3599 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3601 /* The m16c has a limit of -16..16 for SI shifts, even when the
3602 shift count is in a register. Since there are so many targets
3603 of these shifts, it's better to expand the RTL here than to
3604 call a helper function.
3606 The resulting code looks something like this:
3608 cmp.b r1h,-16
3609 jge.b 1f
3610 shl.l -16,dest
3611 add.b r1h,16
3612 1f: cmp.b r1h,16
3613 jle.b 1f
3614 shl.l 16,dest
3615 sub.b r1h,16
3616 1f: shl.l r1h,dest
3618 We take advantage of the fact that "negative" shifts are
3619 undefined to skip one of the comparisons. */
3621 rtx count;
3622 rtx label, lref, insn, tempvar;
3624 emit_move_insn (operands[0], operands[1]);
3626 count = temp;
3627 label = gen_label_rtx ();
3628 lref = gen_rtx_LABEL_REF (VOIDmode, label);
3629 LABEL_NUSES (label) ++;
3631 tempvar = gen_reg_rtx (mode);
3633 if (shift_code == ASHIFT)
3635 /* This is a left shift. We only need check positive counts. */
3636 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3637 count, GEN_INT (16), label));
3638 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3639 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3640 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3641 emit_label_after (label, insn);
3643 else
3645 /* This is a right shift. We only need check negative counts. */
3646 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3647 count, GEN_INT (-16), label));
3648 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3649 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3650 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3651 emit_label_after (label, insn);
3653 operands[1] = operands[0];
3654 emit_insn (func (operands[0], operands[0], count));
3655 return 1;
3658 operands[2] = temp;
3659 return 0;
3662 /* The m32c has a limited range of operations that work on PSImode
3663 values; we have to expand to SI, do the math, and truncate back to
3664 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3665 those cases. */
3666 void
3667 m32c_expand_neg_mulpsi3 (rtx * operands)
3669 /* operands: a = b * i */
3670 rtx temp1; /* b as SI */
3671 rtx scale /* i as SI */;
3672 rtx temp2; /* a*b as SI */
3674 temp1 = gen_reg_rtx (SImode);
3675 temp2 = gen_reg_rtx (SImode);
3676 if (GET_CODE (operands[2]) != CONST_INT)
3678 scale = gen_reg_rtx (SImode);
3679 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3681 else
3682 scale = copy_to_mode_reg (SImode, operands[2]);
3684 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3685 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3686 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3689 /* Pattern Output Functions */
3692 m32c_expand_movcc (rtx *operands)
3694 rtx rel = operands[1];
3695 rtx cmp;
3697 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3698 return 1;
3699 if (GET_CODE (operands[2]) != CONST_INT
3700 || GET_CODE (operands[3]) != CONST_INT)
3701 return 1;
3702 if (GET_CODE (rel) == NE)
3704 rtx tmp = operands[2];
3705 operands[2] = operands[3];
3706 operands[3] = tmp;
3707 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3710 emit_move_insn (operands[0],
3711 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3712 rel,
3713 operands[2],
3714 operands[3]));
3715 return 0;
3718 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3720 m32c_expand_insv (rtx *operands)
3722 rtx op0, src0, p;
3723 int mask;
3725 if (INTVAL (operands[1]) != 1)
3726 return 1;
3728 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3729 if (GET_CODE (operands[3]) != CONST_INT)
3730 return 1;
3731 if (INTVAL (operands[3]) != 0
3732 && INTVAL (operands[3]) != 1
3733 && INTVAL (operands[3]) != -1)
3734 return 1;
3736 mask = 1 << INTVAL (operands[2]);
3738 op0 = operands[0];
3739 if (GET_CODE (op0) == SUBREG
3740 && SUBREG_BYTE (op0) == 0)
3742 rtx sub = SUBREG_REG (op0);
3743 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3744 op0 = sub;
3747 if (!can_create_pseudo_p ()
3748 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3749 src0 = op0;
3750 else
3752 src0 = gen_reg_rtx (GET_MODE (op0));
3753 emit_move_insn (src0, op0);
3756 if (GET_MODE (op0) == HImode
3757 && INTVAL (operands[2]) >= 8
3758 && GET_MODE (op0) == MEM)
3760 /* We are little endian. */
3761 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3762 MEM_COPY_ATTRIBUTES (new_mem, op0);
3763 mask >>= 8;
3766 /* First, we generate a mask with the correct polarity. If we are
3767 storing a zero, we want an AND mask, so invert it. */
3768 if (INTVAL (operands[3]) == 0)
3770 /* Storing a zero, use an AND mask */
3771 if (GET_MODE (op0) == HImode)
3772 mask ^= 0xffff;
3773 else
3774 mask ^= 0xff;
3776 /* Now we need to properly sign-extend the mask in case we need to
3777 fall back to an AND or OR opcode. */
3778 if (GET_MODE (op0) == HImode)
3780 if (mask & 0x8000)
3781 mask -= 0x10000;
3783 else
3785 if (mask & 0x80)
3786 mask -= 0x100;
3789 switch ( (INTVAL (operands[3]) ? 4 : 0)
3790 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3791 + (TARGET_A24 ? 1 : 0))
3793 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3794 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3795 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3796 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3797 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3798 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3799 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3800 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3801 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3804 emit_insn (p);
3805 return 0;
3808 const char *
3809 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3811 static char buf[30];
3812 if (GET_CODE (operands[0]) == REG
3813 && REGNO (operands[0]) == R0_REGNO)
3815 if (code == EQ)
3816 return "stzx\t#1,#0,r0l";
3817 if (code == NE)
3818 return "stzx\t#0,#1,r0l";
3820 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3821 return buf;
3824 /* Encode symbol attributes of a SYMBOL_REF into its
3825 SYMBOL_REF_FLAGS. */
3826 static void
3827 m32c_encode_section_info (tree decl, rtx rtl, int first)
3829 int extra_flags = 0;
3831 default_encode_section_info (decl, rtl, first);
3832 if (TREE_CODE (decl) == FUNCTION_DECL
3833 && m32c_special_page_vector_p (decl))
3835 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3837 if (extra_flags)
3838 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
3841 /* Returns TRUE if the current function is a leaf, and thus we can
3842 determine which registers an interrupt function really needs to
3843 save. The logic below is mostly about finding the insn sequence
3844 that's the function, versus any sequence that might be open for the
3845 current insn. */
3846 static int
3847 m32c_leaf_function_p (void)
3849 rtx saved_first, saved_last;
3850 struct sequence_stack *seq;
3851 int rv;
3853 saved_first = crtl->emit.x_first_insn;
3854 saved_last = crtl->emit.x_last_insn;
3855 for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
3857 if (seq)
3859 crtl->emit.x_first_insn = seq->first;
3860 crtl->emit.x_last_insn = seq->last;
3863 rv = leaf_function_p ();
3865 crtl->emit.x_first_insn = saved_first;
3866 crtl->emit.x_last_insn = saved_last;
3867 return rv;
3870 /* Returns TRUE if the current function needs to use the ENTER/EXIT
3871 opcodes. If the function doesn't need the frame base or stack
3872 pointer, it can use the simpler RTS opcode. */
3873 static bool
3874 m32c_function_needs_enter (void)
3876 rtx insn;
3877 struct sequence_stack *seq;
3878 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3879 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3881 insn = get_insns ();
3882 for (seq = crtl->emit.sequence_stack;
3883 seq;
3884 insn = seq->first, seq = seq->next);
3886 while (insn)
3888 if (reg_mentioned_p (sp, insn))
3889 return true;
3890 if (reg_mentioned_p (fb, insn))
3891 return true;
3892 insn = NEXT_INSN (insn);
3894 return false;
3897 /* Mark all the subexpressions of the PARALLEL rtx PAR as
3898 frame-related. Return PAR.
3900 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3901 PARALLEL rtx other than the first if they do not have the
3902 FRAME_RELATED flag set on them. So this function is handy for
3903 marking up 'enter' instructions. */
3904 static rtx
3905 m32c_all_frame_related (rtx par)
3907 int len = XVECLEN (par, 0);
3908 int i;
3910 for (i = 0; i < len; i++)
3911 F (XVECEXP (par, 0, i));
3913 return par;
3916 /* Emits the prologue. See the frame layout comment earlier in this
3917 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
3918 that we manually update sp. */
3919 void
3920 m32c_emit_prologue (void)
3922 int frame_size, extra_frame_size = 0, reg_save_size;
3923 int complex_prologue = 0;
3925 cfun->machine->is_leaf = m32c_leaf_function_p ();
3926 if (interrupt_p (cfun->decl))
3928 cfun->machine->is_interrupt = 1;
3929 complex_prologue = 1;
3932 reg_save_size = m32c_pushm_popm (PP_justcount);
3934 if (interrupt_p (cfun->decl))
3935 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
3937 frame_size =
3938 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
3939 if (frame_size == 0
3940 && !cfun->machine->is_interrupt
3941 && !m32c_function_needs_enter ())
3942 cfun->machine->use_rts = 1;
3944 if (frame_size > 254)
3946 extra_frame_size = frame_size - 254;
3947 frame_size = 254;
3949 if (cfun->machine->use_rts == 0)
3950 F (emit_insn (m32c_all_frame_related
3951 (TARGET_A16
3952 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
3953 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
3955 if (extra_frame_size)
3957 complex_prologue = 1;
3958 if (TARGET_A16)
3959 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
3960 gen_rtx_REG (HImode, SP_REGNO),
3961 GEN_INT (-extra_frame_size))));
3962 else
3963 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
3964 gen_rtx_REG (PSImode, SP_REGNO),
3965 GEN_INT (-extra_frame_size))));
3968 complex_prologue += m32c_pushm_popm (PP_pushm);
3970 /* This just emits a comment into the .s file for debugging. */
3971 if (complex_prologue)
3972 emit_insn (gen_prologue_end ());
3975 /* Likewise, for the epilogue. The only exception is that, for
3976 interrupts, we must manually unwind the frame as the REIT opcode
3977 doesn't do that. */
3978 void
3979 m32c_emit_epilogue (void)
3981 /* This just emits a comment into the .s file for debugging. */
3982 if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
3983 emit_insn (gen_epilogue_start ());
3985 m32c_pushm_popm (PP_popm);
3987 if (cfun->machine->is_interrupt)
3989 enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
3991 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
3992 gen_rtx_REG (spmode, FP_REGNO));
3993 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
3994 gen_rtx_REG (spmode, A0_REGNO));
3995 if (TARGET_A16)
3996 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
3997 else
3998 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
3999 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4000 if (TARGET_A16)
4001 emit_jump_insn (gen_epilogue_reit_16 ());
4002 else
4003 emit_jump_insn (gen_epilogue_reit_24 ());
4005 else if (cfun->machine->use_rts)
4006 emit_jump_insn (gen_epilogue_rts ());
4007 else if (TARGET_A16)
4008 emit_jump_insn (gen_epilogue_exitd_16 ());
4009 else
4010 emit_jump_insn (gen_epilogue_exitd_24 ());
4011 emit_barrier ();
4014 void
4015 m32c_emit_eh_epilogue (rtx ret_addr)
4017 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4018 return to. We have to fudge the stack, pop everything, pop SP
4019 (fudged), and return (fudged). This is actually easier to do in
4020 assembler, so punt to libgcc. */
4021 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4022 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4023 emit_barrier ();
4026 /* Indicate which flags must be properly set for a given conditional. */
4027 static int
4028 flags_needed_for_conditional (rtx cond)
4030 switch (GET_CODE (cond))
4032 case LE:
4033 case GT:
4034 return FLAGS_OSZ;
4035 case LEU:
4036 case GTU:
4037 return FLAGS_ZC;
4038 case LT:
4039 case GE:
4040 return FLAGS_OS;
4041 case LTU:
4042 case GEU:
4043 return FLAGS_C;
4044 case EQ:
4045 case NE:
4046 return FLAGS_Z;
4047 default:
4048 return FLAGS_N;
4052 #define DEBUG_CMP 0
4054 /* Returns true if a compare insn is redundant because it would only
4055 set flags that are already set correctly. */
4056 static bool
4057 m32c_compare_redundant (rtx cmp, rtx *operands)
4059 int flags_needed;
4060 int pflags;
4061 rtx prev, pp, next;
4062 rtx op0, op1, op2;
4063 #if DEBUG_CMP
4064 int prev_icode, i;
4065 #endif
4067 op0 = operands[0];
4068 op1 = operands[1];
4069 op2 = operands[2];
4071 #if DEBUG_CMP
4072 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4073 debug_rtx(cmp);
4074 for (i=0; i<2; i++)
4076 fprintf(stderr, "operands[%d] = ", i);
4077 debug_rtx(operands[i]);
4079 #endif
4081 next = next_nonnote_insn (cmp);
4082 if (!next || !INSN_P (next))
4084 #if DEBUG_CMP
4085 fprintf(stderr, "compare not followed by insn\n");
4086 debug_rtx(next);
4087 #endif
4088 return false;
4090 if (GET_CODE (PATTERN (next)) == SET
4091 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4093 next = XEXP (XEXP (PATTERN (next), 1), 0);
4095 else if (GET_CODE (PATTERN (next)) == SET)
4097 /* If this is a conditional, flags_needed will be something
4098 other than FLAGS_N, which we test below. */
4099 next = XEXP (PATTERN (next), 1);
4101 else
4103 #if DEBUG_CMP
4104 fprintf(stderr, "compare not followed by conditional\n");
4105 debug_rtx(next);
4106 #endif
4107 return false;
4109 #if DEBUG_CMP
4110 fprintf(stderr, "conditional is: ");
4111 debug_rtx(next);
4112 #endif
4114 flags_needed = flags_needed_for_conditional (next);
4115 if (flags_needed == FLAGS_N)
4117 #if DEBUG_CMP
4118 fprintf(stderr, "compare not followed by conditional\n");
4119 debug_rtx(next);
4120 #endif
4121 return false;
4124 /* Compare doesn't set overflow and carry the same way that
4125 arithmetic instructions do, so we can't replace those. */
4126 if (flags_needed & FLAGS_OC)
4127 return false;
4129 prev = cmp;
4130 do {
4131 prev = prev_nonnote_insn (prev);
4132 if (!prev)
4134 #if DEBUG_CMP
4135 fprintf(stderr, "No previous insn.\n");
4136 #endif
4137 return false;
4139 if (!INSN_P (prev))
4141 #if DEBUG_CMP
4142 fprintf(stderr, "Previous insn is a non-insn.\n");
4143 #endif
4144 return false;
4146 pp = PATTERN (prev);
4147 if (GET_CODE (pp) != SET)
4149 #if DEBUG_CMP
4150 fprintf(stderr, "Previous insn is not a SET.\n");
4151 #endif
4152 return false;
4154 pflags = get_attr_flags (prev);
4156 /* Looking up attributes of previous insns corrupted the recog
4157 tables. */
4158 INSN_UID (cmp) = -1;
4159 recog (PATTERN (cmp), cmp, 0);
4161 if (pflags == FLAGS_N
4162 && reg_mentioned_p (op0, pp))
4164 #if DEBUG_CMP
4165 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4166 debug_rtx(prev);
4167 #endif
4168 return false;
4170 } while (pflags == FLAGS_N);
4171 #if DEBUG_CMP
4172 fprintf(stderr, "previous flag-setting insn:\n");
4173 debug_rtx(prev);
4174 debug_rtx(pp);
4175 #endif
4177 if (GET_CODE (pp) == SET
4178 && GET_CODE (XEXP (pp, 0)) == REG
4179 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4180 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4182 /* Adjacent cbranches must have the same operands to be
4183 redundant. */
4184 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4185 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4186 #if DEBUG_CMP
4187 fprintf(stderr, "adjacent cbranches\n");
4188 debug_rtx(pop0);
4189 debug_rtx(pop1);
4190 #endif
4191 if (rtx_equal_p (op0, pop0)
4192 && rtx_equal_p (op1, pop1))
4193 return true;
4194 #if DEBUG_CMP
4195 fprintf(stderr, "prev cmp not same\n");
4196 #endif
4197 return false;
4200 /* Else the previous insn must be a SET, with either the source or
4201 dest equal to operands[0], and operands[1] must be zero. */
4203 if (!rtx_equal_p (op1, const0_rtx))
4205 #if DEBUG_CMP
4206 fprintf(stderr, "operands[1] not const0_rtx\n");
4207 #endif
4208 return false;
4210 if (GET_CODE (pp) != SET)
4212 #if DEBUG_CMP
4213 fprintf (stderr, "pp not set\n");
4214 #endif
4215 return false;
4217 if (!rtx_equal_p (op0, SET_SRC (pp))
4218 && !rtx_equal_p (op0, SET_DEST (pp)))
4220 #if DEBUG_CMP
4221 fprintf(stderr, "operands[0] not found in set\n");
4222 #endif
4223 return false;
4226 #if DEBUG_CMP
4227 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4228 #endif
4229 if ((pflags & flags_needed) == flags_needed)
4230 return true;
4232 return false;
4235 /* Return the pattern for a compare. This will be commented out if
4236 the compare is redundant, else a normal pattern is returned. Thus,
4237 the assembler output says where the compare would have been. */
4238 char *
4239 m32c_output_compare (rtx insn, rtx *operands)
4241 static char templ[] = ";cmp.b\t%1,%0";
4242 /* ^ 5 */
4244 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4245 if (m32c_compare_redundant (insn, operands))
4247 #if DEBUG_CMP
4248 fprintf(stderr, "cbranch: cmp not needed\n");
4249 #endif
4250 return templ;
4253 #if DEBUG_CMP
4254 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ);
4255 #endif
4256 return templ + 1;
4259 #undef TARGET_ENCODE_SECTION_INFO
4260 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4262 /* The Global `targetm' Variable. */
4264 struct gcc_target targetm = TARGET_INITIALIZER;
4266 #include "gt-m32c.h"