Turn HARD_REGNO_MODE_OK into a target hook
[official-gcc.git] / gcc / config / m32c / m32c.c
blob39d1fd8018ccc1e5e029b4ec5fb7872af01ca01c
1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "attribs.h"
30 #include "df.h"
31 #include "memmodel.h"
32 #include "tm_p.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "insn-attr.h"
40 #include "flags.h"
41 #include "reload.h"
42 #include "stor-layout.h"
43 #include "varasm.h"
44 #include "calls.h"
45 #include "explow.h"
46 #include "expr.h"
47 #include "tm-constrs.h"
48 #include "builtins.h"
50 /* This file should be included last. */
51 #include "target-def.h"
53 /* Prototypes */
55 /* Used by m32c_pushm_popm. */
56 typedef enum
58 PP_pushm,
59 PP_popm,
60 PP_justcount
61 } Push_Pop_Type;
63 static bool m32c_function_needs_enter (void);
64 static tree interrupt_handler (tree *, tree, tree, int, bool *);
65 static tree function_vector_handler (tree *, tree, tree, int, bool *);
66 static int interrupt_p (tree node);
67 static int bank_switch_p (tree node);
68 static int fast_interrupt_p (tree node);
69 static int interrupt_p (tree node);
70 static bool m32c_asm_integer (rtx, unsigned int, int);
71 static int m32c_comp_type_attributes (const_tree, const_tree);
72 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
73 static struct machine_function *m32c_init_machine_status (void);
74 static void m32c_insert_attributes (tree, tree *);
75 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
76 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
77 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
78 const_tree, bool);
79 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
80 const_tree, bool);
81 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
82 const_tree, bool);
83 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
84 static int m32c_pushm_popm (Push_Pop_Type);
85 static bool m32c_strict_argument_naming (cumulative_args_t);
86 static rtx m32c_struct_value_rtx (tree, int);
87 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
88 static int need_to_save (int);
89 static rtx m32c_function_value (const_tree, const_tree, bool);
90 static rtx m32c_libcall_value (machine_mode, const_rtx);
92 /* Returns true if an address is specified, else false. */
93 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
95 static bool m32c_hard_regno_mode_ok (unsigned int, machine_mode);
97 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
99 #define streq(a,b) (strcmp ((a), (b)) == 0)
101 /* Internal support routines */
103 /* Debugging statements are tagged with DEBUG0 only so that they can
104 be easily enabled individually, by replacing the '0' with '1' as
105 needed. */
106 #define DEBUG0 0
107 #define DEBUG1 1
109 #if DEBUG0
110 #include "print-tree.h"
111 /* This is needed by some of the commented-out debug statements
112 below. */
113 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
114 #endif
115 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
117 /* These are all to support encode_pattern(). */
118 static char pattern[30], *patternp;
119 static GTY(()) rtx patternr[30];
120 #define RTX_IS(x) (streq (pattern, x))
122 /* Some macros to simplify the logic throughout this file. */
123 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
124 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
126 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
127 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
129 static int
130 far_addr_space_p (rtx x)
132 if (GET_CODE (x) != MEM)
133 return 0;
134 #if DEBUG0
135 fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
136 fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
137 #endif
138 return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
141 /* We do most RTX matching by converting the RTX into a string, and
142 using string compares. This vastly simplifies the logic in many of
143 the functions in this file.
145 On exit, pattern[] has the encoded string (use RTX_IS("...") to
146 compare it) and patternr[] has pointers to the nodes in the RTX
147 corresponding to each character in the encoded string. The latter
148 is mostly used by print_operand().
150 Unrecognized patterns have '?' in them; this shows up when the
151 assembler complains about syntax errors.
154 static void
155 encode_pattern_1 (rtx x)
157 int i;
159 if (patternp == pattern + sizeof (pattern) - 2)
161 patternp[-1] = '?';
162 return;
165 patternr[patternp - pattern] = x;
167 switch (GET_CODE (x))
169 case REG:
170 *patternp++ = 'r';
171 break;
172 case SUBREG:
173 if (GET_MODE_SIZE (GET_MODE (x)) !=
174 GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
175 *patternp++ = 'S';
176 if (GET_MODE (x) == PSImode
177 && GET_CODE (XEXP (x, 0)) == REG)
178 *patternp++ = 'S';
179 encode_pattern_1 (XEXP (x, 0));
180 break;
181 case MEM:
182 *patternp++ = 'm';
183 /* FALLTHRU */
184 case CONST:
185 encode_pattern_1 (XEXP (x, 0));
186 break;
187 case SIGN_EXTEND:
188 *patternp++ = '^';
189 *patternp++ = 'S';
190 encode_pattern_1 (XEXP (x, 0));
191 break;
192 case ZERO_EXTEND:
193 *patternp++ = '^';
194 *patternp++ = 'Z';
195 encode_pattern_1 (XEXP (x, 0));
196 break;
197 case PLUS:
198 *patternp++ = '+';
199 encode_pattern_1 (XEXP (x, 0));
200 encode_pattern_1 (XEXP (x, 1));
201 break;
202 case PRE_DEC:
203 *patternp++ = '>';
204 encode_pattern_1 (XEXP (x, 0));
205 break;
206 case POST_INC:
207 *patternp++ = '<';
208 encode_pattern_1 (XEXP (x, 0));
209 break;
210 case LO_SUM:
211 *patternp++ = 'L';
212 encode_pattern_1 (XEXP (x, 0));
213 encode_pattern_1 (XEXP (x, 1));
214 break;
215 case HIGH:
216 *patternp++ = 'H';
217 encode_pattern_1 (XEXP (x, 0));
218 break;
219 case SYMBOL_REF:
220 *patternp++ = 's';
221 break;
222 case LABEL_REF:
223 *patternp++ = 'l';
224 break;
225 case CODE_LABEL:
226 *patternp++ = 'c';
227 break;
228 case CONST_INT:
229 case CONST_DOUBLE:
230 *patternp++ = 'i';
231 break;
232 case UNSPEC:
233 *patternp++ = 'u';
234 *patternp++ = '0' + XCINT (x, 1, UNSPEC);
235 for (i = 0; i < XVECLEN (x, 0); i++)
236 encode_pattern_1 (XVECEXP (x, 0, i));
237 break;
238 case USE:
239 *patternp++ = 'U';
240 break;
241 case PARALLEL:
242 *patternp++ = '|';
243 for (i = 0; i < XVECLEN (x, 0); i++)
244 encode_pattern_1 (XVECEXP (x, 0, i));
245 break;
246 case EXPR_LIST:
247 *patternp++ = 'E';
248 encode_pattern_1 (XEXP (x, 0));
249 if (XEXP (x, 1))
250 encode_pattern_1 (XEXP (x, 1));
251 break;
252 default:
253 *patternp++ = '?';
254 #if DEBUG0
255 fprintf (stderr, "can't encode pattern %s\n",
256 GET_RTX_NAME (GET_CODE (x)));
257 debug_rtx (x);
258 #endif
259 break;
263 static void
264 encode_pattern (rtx x)
266 patternp = pattern;
267 encode_pattern_1 (x);
268 *patternp = 0;
271 /* Since register names indicate the mode they're used in, we need a
272 way to determine which name to refer to the register with. Called
273 by print_operand(). */
275 static const char *
276 reg_name_with_mode (int regno, machine_mode mode)
278 int mlen = GET_MODE_SIZE (mode);
279 if (regno == R0_REGNO && mlen == 1)
280 return "r0l";
281 if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
282 return "r2r0";
283 if (regno == R0_REGNO && mlen == 6)
284 return "r2r1r0";
285 if (regno == R0_REGNO && mlen == 8)
286 return "r3r1r2r0";
287 if (regno == R1_REGNO && mlen == 1)
288 return "r1l";
289 if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
290 return "r3r1";
291 if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
292 return "a1a0";
293 return reg_names[regno];
296 /* How many bytes a register uses on stack when it's pushed. We need
297 to know this because the push opcode needs to explicitly indicate
298 the size of the register, even though the name of the register
299 already tells it that. Used by m32c_output_reg_{push,pop}, which
300 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
302 static int
303 reg_push_size (int regno)
305 switch (regno)
307 case R0_REGNO:
308 case R1_REGNO:
309 return 2;
310 case R2_REGNO:
311 case R3_REGNO:
312 case FLG_REGNO:
313 return 2;
314 case A0_REGNO:
315 case A1_REGNO:
316 case SB_REGNO:
317 case FB_REGNO:
318 case SP_REGNO:
319 if (TARGET_A16)
320 return 2;
321 else
322 return 3;
323 default:
324 gcc_unreachable ();
328 /* Given two register classes, find the largest intersection between
329 them. If there is no intersection, return RETURNED_IF_EMPTY
330 instead. */
331 static reg_class_t
332 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
333 reg_class_t returned_if_empty)
335 HARD_REG_SET cc;
336 int i;
337 reg_class_t best = NO_REGS;
338 unsigned int best_size = 0;
340 if (original_class == limiting_class)
341 return original_class;
343 cc = reg_class_contents[original_class];
344 AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
346 for (i = 0; i < LIM_REG_CLASSES; i++)
348 if (hard_reg_set_subset_p (reg_class_contents[i], cc))
349 if (best_size < reg_class_size[i])
351 best = (reg_class_t) i;
352 best_size = reg_class_size[i];
356 if (best == NO_REGS)
357 return returned_if_empty;
358 return best;
361 /* Used by m32c_register_move_cost to determine if a move is
362 impossibly expensive. */
363 static bool
364 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
366 /* Cache the results: 0=untested 1=no 2=yes */
367 static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
369 if (results[(int) rclass][mode] == 0)
371 int r;
372 results[rclass][mode] = 1;
373 for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
374 if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
375 && m32c_hard_regno_mode_ok (r, mode))
377 results[rclass][mode] = 2;
378 break;
382 #if DEBUG0
383 fprintf (stderr, "class %s can hold %s? %s\n",
384 class_names[(int) rclass], mode_name[mode],
385 (results[rclass][mode] == 2) ? "yes" : "no");
386 #endif
387 return results[(int) rclass][mode] == 2;
390 /* Run-time Target Specification. */
392 /* Memregs are memory locations that gcc treats like general
393 registers, as there are a limited number of true registers and the
394 m32c families can use memory in most places that registers can be
395 used.
397 However, since memory accesses are more expensive than registers,
398 we allow the user to limit the number of memregs available, in
399 order to try to persuade gcc to try harder to use real registers.
401 Memregs are provided by lib1funcs.S.
404 int ok_to_change_target_memregs = TRUE;
406 /* Implements TARGET_OPTION_OVERRIDE. */
408 #undef TARGET_OPTION_OVERRIDE
409 #define TARGET_OPTION_OVERRIDE m32c_option_override
411 static void
412 m32c_option_override (void)
414 /* We limit memregs to 0..16, and provide a default. */
415 if (global_options_set.x_target_memregs)
417 if (target_memregs < 0 || target_memregs > 16)
418 error ("invalid target memregs value '%d'", target_memregs);
420 else
421 target_memregs = 16;
423 if (TARGET_A24)
424 flag_ivopts = 0;
426 /* This target defaults to strict volatile bitfields. */
427 if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
428 flag_strict_volatile_bitfields = 1;
430 /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
431 This is always worse than an absolute call. */
432 if (TARGET_A16)
433 flag_no_function_cse = 1;
435 /* This wants to put insns between compares and their jumps. */
436 /* FIXME: The right solution is to properly trace the flags register
437 values, but that is too much work for stage 4. */
438 flag_combine_stack_adjustments = 0;
441 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
442 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
444 static void
445 m32c_override_options_after_change (void)
447 if (TARGET_A16)
448 flag_no_function_cse = 1;
451 /* Defining data structures for per-function information */
453 /* The usual; we set up our machine_function data. */
454 static struct machine_function *
455 m32c_init_machine_status (void)
457 return ggc_cleared_alloc<machine_function> ();
460 /* Implements INIT_EXPANDERS. We just set up to call the above
461 function. */
462 void
463 m32c_init_expanders (void)
465 init_machine_status = m32c_init_machine_status;
468 /* Storage Layout */
470 /* Register Basics */
472 /* Basic Characteristics of Registers */
474 /* Whether a mode fits in a register is complex enough to warrant a
475 table. */
476 static struct
478 char qi_regs;
479 char hi_regs;
480 char pi_regs;
481 char si_regs;
482 char di_regs;
483 } nregs_table[FIRST_PSEUDO_REGISTER] =
485 { 1, 1, 2, 2, 4 }, /* r0 */
486 { 0, 1, 0, 0, 0 }, /* r2 */
487 { 1, 1, 2, 2, 0 }, /* r1 */
488 { 0, 1, 0, 0, 0 }, /* r3 */
489 { 0, 1, 1, 0, 0 }, /* a0 */
490 { 0, 1, 1, 0, 0 }, /* a1 */
491 { 0, 1, 1, 0, 0 }, /* sb */
492 { 0, 1, 1, 0, 0 }, /* fb */
493 { 0, 1, 1, 0, 0 }, /* sp */
494 { 1, 1, 1, 0, 0 }, /* pc */
495 { 0, 0, 0, 0, 0 }, /* fl */
496 { 1, 1, 1, 0, 0 }, /* ap */
497 { 1, 1, 2, 2, 4 }, /* mem0 */
498 { 1, 1, 2, 2, 4 }, /* mem1 */
499 { 1, 1, 2, 2, 4 }, /* mem2 */
500 { 1, 1, 2, 2, 4 }, /* mem3 */
501 { 1, 1, 2, 2, 4 }, /* mem4 */
502 { 1, 1, 2, 2, 0 }, /* mem5 */
503 { 1, 1, 2, 2, 0 }, /* mem6 */
504 { 1, 1, 0, 0, 0 }, /* mem7 */
507 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE. We adjust the number
508 of available memregs, and select which registers need to be preserved
509 across calls based on the chip family. */
511 #undef TARGET_CONDITIONAL_REGISTER_USAGE
512 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
513 void
514 m32c_conditional_register_usage (void)
516 int i;
518 if (0 <= target_memregs && target_memregs <= 16)
520 /* The command line option is bytes, but our "registers" are
521 16-bit words. */
522 for (i = (target_memregs+1)/2; i < 8; i++)
524 fixed_regs[MEM0_REGNO + i] = 1;
525 CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
529 /* M32CM and M32C preserve more registers across function calls. */
530 if (TARGET_A24)
532 call_used_regs[R1_REGNO] = 0;
533 call_used_regs[R2_REGNO] = 0;
534 call_used_regs[R3_REGNO] = 0;
535 call_used_regs[A0_REGNO] = 0;
536 call_used_regs[A1_REGNO] = 0;
540 /* How Values Fit in Registers */
542 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
543 different registers are different sizes from each other, *and* may
544 be different sizes in different chip families. */
545 static int
546 m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
548 if (regno == FLG_REGNO && mode == CCmode)
549 return 1;
550 if (regno >= FIRST_PSEUDO_REGISTER)
551 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
553 if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
554 return (GET_MODE_SIZE (mode) + 1) / 2;
556 if (GET_MODE_SIZE (mode) <= 1)
557 return nregs_table[regno].qi_regs;
558 if (GET_MODE_SIZE (mode) <= 2)
559 return nregs_table[regno].hi_regs;
560 if (regno == A0_REGNO && mode == SImode && TARGET_A16)
561 return 2;
562 if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
563 return nregs_table[regno].pi_regs;
564 if (GET_MODE_SIZE (mode) <= 4)
565 return nregs_table[regno].si_regs;
566 if (GET_MODE_SIZE (mode) <= 8)
567 return nregs_table[regno].di_regs;
568 return 0;
572 m32c_hard_regno_nregs (int regno, machine_mode mode)
574 int rv = m32c_hard_regno_nregs_1 (regno, mode);
575 return rv ? rv : 1;
578 /* Implement TARGET_HARD_REGNO_MODE_OK. The above function does the work
579 already; just test its return value. */
580 static bool
581 m32c_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
583 return m32c_hard_regno_nregs_1 (regno, mode) != 0;
586 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
587 registers are all different sizes. However, since most modes are
588 bigger than our registers anyway, it's easier to implement this
589 function that way, leaving QImode as the only unique case. */
591 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
593 if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
594 return 1;
596 #if 0
597 if (m1 == QImode || m2 == QImode)
598 return 0;
599 #endif
601 return 1;
604 /* Register Classes */
606 /* Implements REGNO_REG_CLASS. */
607 enum reg_class
608 m32c_regno_reg_class (int regno)
610 switch (regno)
612 case R0_REGNO:
613 return R0_REGS;
614 case R1_REGNO:
615 return R1_REGS;
616 case R2_REGNO:
617 return R2_REGS;
618 case R3_REGNO:
619 return R3_REGS;
620 case A0_REGNO:
621 return A0_REGS;
622 case A1_REGNO:
623 return A1_REGS;
624 case SB_REGNO:
625 return SB_REGS;
626 case FB_REGNO:
627 return FB_REGS;
628 case SP_REGNO:
629 return SP_REGS;
630 case FLG_REGNO:
631 return FLG_REGS;
632 default:
633 if (IS_MEM_REGNO (regno))
634 return MEM_REGS;
635 return ALL_REGS;
639 /* Implements REGNO_OK_FOR_BASE_P. */
641 m32c_regno_ok_for_base_p (int regno)
643 if (regno == A0_REGNO
644 || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
645 return 1;
646 return 0;
649 /* Implements TARGET_PREFERRED_RELOAD_CLASS. In general, prefer general
650 registers of the appropriate size. */
652 #undef TARGET_PREFERRED_RELOAD_CLASS
653 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
655 static reg_class_t
656 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
658 reg_class_t newclass = rclass;
660 #if DEBUG0
661 fprintf (stderr, "\npreferred_reload_class for %s is ",
662 class_names[rclass]);
663 #endif
664 if (rclass == NO_REGS)
665 rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
667 if (reg_classes_intersect_p (rclass, CR_REGS))
669 switch (GET_MODE (x))
671 case E_QImode:
672 newclass = HL_REGS;
673 break;
674 default:
675 /* newclass = HI_REGS; */
676 break;
680 else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
681 newclass = SI_REGS;
682 else if (GET_MODE_SIZE (GET_MODE (x)) > 4
683 && ! reg_class_subset_p (R03_REGS, rclass))
684 newclass = DI_REGS;
686 rclass = reduce_class (rclass, newclass, rclass);
688 if (GET_MODE (x) == QImode)
689 rclass = reduce_class (rclass, HL_REGS, rclass);
691 #if DEBUG0
692 fprintf (stderr, "%s\n", class_names[rclass]);
693 debug_rtx (x);
695 if (GET_CODE (x) == MEM
696 && GET_CODE (XEXP (x, 0)) == PLUS
697 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
698 fprintf (stderr, "Glorm!\n");
699 #endif
700 return rclass;
703 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
705 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
706 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
708 static reg_class_t
709 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
711 return m32c_preferred_reload_class (x, rclass);
714 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
715 address registers for reloads since they're needed for address
716 reloads. */
718 m32c_limit_reload_class (machine_mode mode, int rclass)
720 #if DEBUG0
721 fprintf (stderr, "limit_reload_class for %s: %s ->",
722 mode_name[mode], class_names[rclass]);
723 #endif
725 if (mode == QImode)
726 rclass = reduce_class (rclass, HL_REGS, rclass);
727 else if (mode == HImode)
728 rclass = reduce_class (rclass, HI_REGS, rclass);
729 else if (mode == SImode)
730 rclass = reduce_class (rclass, SI_REGS, rclass);
732 if (rclass != A_REGS)
733 rclass = reduce_class (rclass, DI_REGS, rclass);
735 #if DEBUG0
736 fprintf (stderr, " %s\n", class_names[rclass]);
737 #endif
738 return rclass;
741 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
742 r0 or r1, as those are the only real QImode registers. CR regs get
743 reloaded through appropriately sized general or address
744 registers. */
746 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
748 int cc = class_contents[rclass][0];
749 #if DEBUG0
750 fprintf (stderr, "\nsecondary reload class %s %s\n",
751 class_names[rclass], mode_name[mode]);
752 debug_rtx (x);
753 #endif
754 if (mode == QImode
755 && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
756 return QI_REGS;
757 if (reg_classes_intersect_p (rclass, CR_REGS)
758 && GET_CODE (x) == REG
759 && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
760 return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
761 return NO_REGS;
764 /* Implements TARGET_CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
765 reloads. */
767 #undef TARGET_CLASS_LIKELY_SPILLED_P
768 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
770 static bool
771 m32c_class_likely_spilled_p (reg_class_t regclass)
773 if (regclass == A_REGS)
774 return true;
776 return (reg_class_size[(int) regclass] == 1);
779 /* Implements TARGET_CLASS_MAX_NREGS. We calculate this according to its
780 documented meaning, to avoid potential inconsistencies with actual
781 class definitions. */
783 #undef TARGET_CLASS_MAX_NREGS
784 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
786 static unsigned char
787 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
789 int rn;
790 unsigned char max = 0;
792 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
793 if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
795 unsigned char n = m32c_hard_regno_nregs (rn, mode);
796 if (max < n)
797 max = n;
799 return max;
802 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
803 QI (r0l, r1l) because the chip doesn't support QI ops on other
804 registers (well, it does on a0/a1 but if we let gcc do that, reload
805 suffers). Otherwise, we allow changes to larger modes. */
807 m32c_cannot_change_mode_class (machine_mode from,
808 machine_mode to, int rclass)
810 int rn;
811 #if DEBUG0
812 fprintf (stderr, "cannot change from %s to %s in %s\n",
813 mode_name[from], mode_name[to], class_names[rclass]);
814 #endif
816 /* If the larger mode isn't allowed in any of these registers, we
817 can't allow the change. */
818 for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
819 if (class_contents[rclass][0] & (1 << rn))
820 if (! m32c_hard_regno_mode_ok (rn, to))
821 return 1;
823 if (to == QImode)
824 return (class_contents[rclass][0] & 0x1ffa);
826 if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
827 && GET_MODE_SIZE (from) > 1)
828 return 0;
829 if (GET_MODE_SIZE (from) > 2) /* all other regs */
830 return 0;
832 return 1;
835 /* Helpers for the rest of the file. */
836 /* TRUE if the rtx is a REG rtx for the given register. */
837 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
838 && REGNO (rtx) == regno)
839 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
840 base register in address calculations (hence the "strict"
841 argument). */
842 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
843 && (REGNO (rtx) == AP_REGNO \
844 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
846 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
848 /* Implements matching for constraints (see next function too). 'S' is
849 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
850 call return values. */
851 bool
852 m32c_matches_constraint_p (rtx value, int constraint)
854 encode_pattern (value);
856 switch (constraint) {
857 case CONSTRAINT_SF:
858 return (far_addr_space_p (value)
859 && ((RTX_IS ("mr")
860 && A0_OR_PSEUDO (patternr[1])
861 && GET_MODE (patternr[1]) == SImode)
862 || (RTX_IS ("m+^Sri")
863 && A0_OR_PSEUDO (patternr[4])
864 && GET_MODE (patternr[4]) == HImode)
865 || (RTX_IS ("m+^Srs")
866 && A0_OR_PSEUDO (patternr[4])
867 && GET_MODE (patternr[4]) == HImode)
868 || (RTX_IS ("m+^S+ris")
869 && A0_OR_PSEUDO (patternr[5])
870 && GET_MODE (patternr[5]) == HImode)
871 || RTX_IS ("ms")));
872 case CONSTRAINT_Sd:
874 /* This is the common "src/dest" address */
875 rtx r;
876 if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
877 return true;
878 if (RTX_IS ("ms") || RTX_IS ("m+si"))
879 return true;
880 if (RTX_IS ("m++rii"))
882 if (REGNO (patternr[3]) == FB_REGNO
883 && INTVAL (patternr[4]) == 0)
884 return true;
886 if (RTX_IS ("mr"))
887 r = patternr[1];
888 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
889 r = patternr[2];
890 else
891 return false;
892 if (REGNO (r) == SP_REGNO)
893 return false;
894 return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
896 case CONSTRAINT_Sa:
898 rtx r;
899 if (RTX_IS ("mr"))
900 r = patternr[1];
901 else if (RTX_IS ("m+ri"))
902 r = patternr[2];
903 else
904 return false;
905 return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
907 case CONSTRAINT_Si:
908 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
909 case CONSTRAINT_Ss:
910 return ((RTX_IS ("mr")
911 && (IS_REG (patternr[1], SP_REGNO)))
912 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
913 case CONSTRAINT_Sf:
914 return ((RTX_IS ("mr")
915 && (IS_REG (patternr[1], FB_REGNO)))
916 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
917 case CONSTRAINT_Sb:
918 return ((RTX_IS ("mr")
919 && (IS_REG (patternr[1], SB_REGNO)))
920 || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
921 case CONSTRAINT_Sp:
922 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
923 return (RTX_IS ("mi")
924 && !(INTVAL (patternr[1]) & ~0x1fff));
925 case CONSTRAINT_S1:
926 return r1h_operand (value, QImode);
927 case CONSTRAINT_Rpa:
928 return GET_CODE (value) == PARALLEL;
929 default:
930 return false;
934 /* STACK AND CALLING */
936 /* Frame Layout */
938 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
939 (yes, THREE bytes) onto the stack for the return address, but we
940 don't support pointers bigger than 16 bits on those chips. This
941 will likely wreak havoc with exception unwinding. FIXME. */
943 m32c_return_addr_rtx (int count)
945 machine_mode mode;
946 int offset;
947 rtx ra_mem;
949 if (count)
950 return NULL_RTX;
951 /* we want 2[$fb] */
953 if (TARGET_A24)
955 /* It's four bytes */
956 mode = PSImode;
957 offset = 4;
959 else
961 /* FIXME: it's really 3 bytes */
962 mode = HImode;
963 offset = 2;
966 ra_mem =
967 gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
968 offset));
969 return copy_to_mode_reg (mode, ra_mem);
972 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
974 m32c_incoming_return_addr_rtx (void)
976 /* we want [sp] */
977 return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
980 /* Exception Handling Support */
982 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
983 pointers. */
985 m32c_eh_return_data_regno (int n)
987 switch (n)
989 case 0:
990 return MEM0_REGNO;
991 case 1:
992 return MEM0_REGNO+4;
993 default:
994 return INVALID_REGNUM;
998 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
999 m32c_emit_eh_epilogue. */
1001 m32c_eh_return_stackadj_rtx (void)
1003 if (!cfun->machine->eh_stack_adjust)
1005 rtx sa;
1007 sa = gen_rtx_REG (Pmode, R0_REGNO);
1008 cfun->machine->eh_stack_adjust = sa;
1010 return cfun->machine->eh_stack_adjust;
1013 /* Registers That Address the Stack Frame */
1015 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1016 the original spec called for dwarf numbers to vary with register
1017 width as well, for example, r0l, r0, and r2r0 would each have
1018 different dwarf numbers. GCC doesn't support this, and we don't do
1019 it, and gdb seems to like it this way anyway. */
1020 unsigned int
1021 m32c_dwarf_frame_regnum (int n)
1023 switch (n)
1025 case R0_REGNO:
1026 return 5;
1027 case R1_REGNO:
1028 return 6;
1029 case R2_REGNO:
1030 return 7;
1031 case R3_REGNO:
1032 return 8;
1033 case A0_REGNO:
1034 return 9;
1035 case A1_REGNO:
1036 return 10;
1037 case FB_REGNO:
1038 return 11;
1039 case SB_REGNO:
1040 return 19;
1042 case SP_REGNO:
1043 return 12;
1044 case PC_REGNO:
1045 return 13;
1046 default:
1047 return DWARF_FRAME_REGISTERS + 1;
1051 /* The frame looks like this:
1053 ap -> +------------------------------
1054 | Return address (3 or 4 bytes)
1055 | Saved FB (2 or 4 bytes)
1056 fb -> +------------------------------
1057 | local vars
1058 | register saves fb
1059 | through r0 as needed
1060 sp -> +------------------------------
1063 /* We use this to wrap all emitted insns in the prologue. */
1064 static rtx
1065 F (rtx x)
1067 RTX_FRAME_RELATED_P (x) = 1;
1068 return x;
1071 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1072 how much the stack pointer moves for each, for each cpu family. */
1073 static struct
1075 int reg1;
1076 int bit;
1077 int a16_bytes;
1078 int a24_bytes;
1079 } pushm_info[] =
1081 /* These are in reverse push (nearest-to-sp) order. */
1082 { R0_REGNO, 0x80, 2, 2 },
1083 { R1_REGNO, 0x40, 2, 2 },
1084 { R2_REGNO, 0x20, 2, 2 },
1085 { R3_REGNO, 0x10, 2, 2 },
1086 { A0_REGNO, 0x08, 2, 4 },
1087 { A1_REGNO, 0x04, 2, 4 },
1088 { SB_REGNO, 0x02, 2, 4 },
1089 { FB_REGNO, 0x01, 2, 4 }
1092 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1094 /* Returns TRUE if we need to save/restore the given register. We
1095 save everything for exception handlers, so that any register can be
1096 unwound. For interrupt handlers, we save everything if the handler
1097 calls something else (because we don't know what *that* function
1098 might do), but try to be a bit smarter if the handler is a leaf
1099 function. We always save $a0, though, because we use that in the
1100 epilogue to copy $fb to $sp. */
1101 static int
1102 need_to_save (int regno)
1104 if (fixed_regs[regno])
1105 return 0;
1106 if (crtl->calls_eh_return)
1107 return 1;
1108 if (regno == FP_REGNO)
1109 return 0;
1110 if (cfun->machine->is_interrupt
1111 && (!cfun->machine->is_leaf
1112 || (regno == A0_REGNO
1113 && m32c_function_needs_enter ())
1115 return 1;
1116 if (df_regs_ever_live_p (regno)
1117 && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1118 return 1;
1119 return 0;
1122 /* This function contains all the intelligence about saving and
1123 restoring registers. It always figures out the register save set.
1124 When called with PP_justcount, it merely returns the size of the
1125 save set (for eliminating the frame pointer, for example). When
1126 called with PP_pushm or PP_popm, it emits the appropriate
1127 instructions for saving (pushm) or restoring (popm) the
1128 registers. */
1129 static int
1130 m32c_pushm_popm (Push_Pop_Type ppt)
1132 int reg_mask = 0;
1133 int byte_count = 0, bytes;
1134 int i;
1135 rtx dwarf_set[PUSHM_N];
1136 int n_dwarfs = 0;
1137 int nosave_mask = 0;
1139 if (crtl->return_rtx
1140 && GET_CODE (crtl->return_rtx) == PARALLEL
1141 && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1143 rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1144 rtx rv = XEXP (exp, 0);
1145 int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1147 if (rv_bytes > 2)
1148 nosave_mask |= 0x20; /* PSI, SI */
1149 else
1150 nosave_mask |= 0xf0; /* DF */
1151 if (rv_bytes > 4)
1152 nosave_mask |= 0x50; /* DI */
1155 for (i = 0; i < (int) PUSHM_N; i++)
1157 /* Skip if neither register needs saving. */
1158 if (!need_to_save (pushm_info[i].reg1))
1159 continue;
1161 if (pushm_info[i].bit & nosave_mask)
1162 continue;
1164 reg_mask |= pushm_info[i].bit;
1165 bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1167 if (ppt == PP_pushm)
1169 machine_mode mode = (bytes == 2) ? HImode : SImode;
1170 rtx addr;
1172 /* Always use stack_pointer_rtx instead of calling
1173 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1174 that there is a single rtx representing the stack pointer,
1175 namely stack_pointer_rtx, and uses == to recognize it. */
1176 addr = stack_pointer_rtx;
1178 if (byte_count != 0)
1179 addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1181 dwarf_set[n_dwarfs++] =
1182 gen_rtx_SET (gen_rtx_MEM (mode, addr),
1183 gen_rtx_REG (mode, pushm_info[i].reg1));
1184 F (dwarf_set[n_dwarfs - 1]);
1187 byte_count += bytes;
1190 if (cfun->machine->is_interrupt)
1192 cfun->machine->intr_pushm = reg_mask & 0xfe;
1193 reg_mask = 0;
1194 byte_count = 0;
1197 if (cfun->machine->is_interrupt)
1198 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1199 if (need_to_save (i))
1201 byte_count += 2;
1202 cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1205 if (ppt == PP_pushm && byte_count)
1207 rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1208 rtx pushm;
1210 if (reg_mask)
1212 XVECEXP (note, 0, 0)
1213 = gen_rtx_SET (stack_pointer_rtx,
1214 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1215 stack_pointer_rtx,
1216 GEN_INT (-byte_count)));
1217 F (XVECEXP (note, 0, 0));
1219 for (i = 0; i < n_dwarfs; i++)
1220 XVECEXP (note, 0, i + 1) = dwarf_set[i];
1222 pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1224 add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1227 if (cfun->machine->is_interrupt)
1228 for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1229 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1231 if (TARGET_A16)
1232 pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1233 else
1234 pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1235 F (pushm);
1238 if (ppt == PP_popm && byte_count)
1240 if (cfun->machine->is_interrupt)
1241 for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1242 if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1244 if (TARGET_A16)
1245 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1246 else
1247 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1249 if (reg_mask)
1250 emit_insn (gen_popm (GEN_INT (reg_mask)));
1253 return byte_count;
1256 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1257 diagrams our call frame. */
1259 m32c_initial_elimination_offset (int from, int to)
1261 int ofs = 0;
1263 if (from == AP_REGNO)
1265 if (TARGET_A16)
1266 ofs += 5;
1267 else
1268 ofs += 8;
1271 if (to == SP_REGNO)
1273 ofs += m32c_pushm_popm (PP_justcount);
1274 ofs += get_frame_size ();
1277 /* Account for push rounding. */
1278 if (TARGET_A24)
1279 ofs = (ofs + 1) & ~1;
1280 #if DEBUG0
1281 fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1282 to, ofs);
1283 #endif
1284 return ofs;
1287 /* Passing Function Arguments on the Stack */
1289 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1290 M32C has word stacks. */
1291 unsigned int
1292 m32c_push_rounding (int n)
1294 if (TARGET_R8C || TARGET_M16C)
1295 return n;
1296 return (n + 1) & ~1;
1299 /* Passing Arguments in Registers */
1301 /* Implements TARGET_FUNCTION_ARG. Arguments are passed partly in
1302 registers, partly on stack. If our function returns a struct, a
1303 pointer to a buffer for it is at the top of the stack (last thing
1304 pushed). The first few real arguments may be in registers as
1305 follows:
1307 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1308 arg2 in r2 if it's HI (else pushed on stack)
1309 rest on stack
1310 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1311 rest on stack
1313 Structs are not passed in registers, even if they fit. Only
1314 integer and pointer types are passed in registers.
1316 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1317 r2 if it fits. */
1318 #undef TARGET_FUNCTION_ARG
1319 #define TARGET_FUNCTION_ARG m32c_function_arg
1320 static rtx
1321 m32c_function_arg (cumulative_args_t ca_v,
1322 machine_mode mode, const_tree type, bool named)
1324 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1326 /* Can return a reg, parallel, or 0 for stack */
1327 rtx rv = NULL_RTX;
1328 #if DEBUG0
1329 fprintf (stderr, "func_arg %d (%s, %d)\n",
1330 ca->parm_num, mode_name[mode], named);
1331 debug_tree ((tree)type);
1332 #endif
1334 if (mode == VOIDmode)
1335 return GEN_INT (0);
1337 if (ca->force_mem || !named)
1339 #if DEBUG0
1340 fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1341 named);
1342 #endif
1343 return NULL_RTX;
1346 if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1347 return NULL_RTX;
1349 if (type && AGGREGATE_TYPE_P (type))
1350 return NULL_RTX;
1352 switch (ca->parm_num)
1354 case 1:
1355 if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1356 rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1357 break;
1359 case 2:
1360 if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1361 rv = gen_rtx_REG (mode, R2_REGNO);
1362 break;
1365 #if DEBUG0
1366 debug_rtx (rv);
1367 #endif
1368 return rv;
1371 #undef TARGET_PASS_BY_REFERENCE
1372 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1373 static bool
1374 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1375 machine_mode mode ATTRIBUTE_UNUSED,
1376 const_tree type ATTRIBUTE_UNUSED,
1377 bool named ATTRIBUTE_UNUSED)
1379 return 0;
1382 /* Implements INIT_CUMULATIVE_ARGS. */
1383 void
1384 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1385 tree fntype,
1386 rtx libname ATTRIBUTE_UNUSED,
1387 tree fndecl,
1388 int n_named_args ATTRIBUTE_UNUSED)
1390 if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1391 ca->force_mem = 1;
1392 else
1393 ca->force_mem = 0;
1394 ca->parm_num = 1;
1397 /* Implements TARGET_FUNCTION_ARG_ADVANCE. force_mem is set for
1398 functions returning structures, so we always reset that. Otherwise,
1399 we only need to know the sequence number of the argument to know what
1400 to do with it. */
1401 #undef TARGET_FUNCTION_ARG_ADVANCE
1402 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1403 static void
1404 m32c_function_arg_advance (cumulative_args_t ca_v,
1405 machine_mode mode ATTRIBUTE_UNUSED,
1406 const_tree type ATTRIBUTE_UNUSED,
1407 bool named ATTRIBUTE_UNUSED)
1409 CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1411 if (ca->force_mem)
1412 ca->force_mem = 0;
1413 else
1414 ca->parm_num++;
1417 /* Implements TARGET_FUNCTION_ARG_BOUNDARY. */
1418 #undef TARGET_FUNCTION_ARG_BOUNDARY
1419 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1420 static unsigned int
1421 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1422 const_tree type ATTRIBUTE_UNUSED)
1424 return (TARGET_A16 ? 8 : 16);
1427 /* Implements FUNCTION_ARG_REGNO_P. */
1429 m32c_function_arg_regno_p (int r)
1431 if (TARGET_A24)
1432 return (r == R0_REGNO);
1433 return (r == R1_REGNO || r == R2_REGNO);
1436 /* HImode and PSImode are the two "native" modes as far as GCC is
1437 concerned, but the chips also support a 32-bit mode which is used
1438 for some opcodes in R8C/M16C and for reset vectors and such. */
1439 #undef TARGET_VALID_POINTER_MODE
1440 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1441 static bool
1442 m32c_valid_pointer_mode (scalar_int_mode mode)
1444 if (mode == HImode
1445 || mode == PSImode
1446 || mode == SImode
1448 return 1;
1449 return 0;
1452 /* How Scalar Function Values Are Returned */
1454 /* Implements TARGET_LIBCALL_VALUE. Most values are returned in $r0, or some
1455 combination of registers starting there (r2r0 for longs, r3r1r2r0
1456 for long long, r3r2r1r0 for doubles), except that that ABI
1457 currently doesn't work because it ends up using all available
1458 general registers and gcc often can't compile it. So, instead, we
1459 return anything bigger than 16 bits in "mem0" (effectively, a
1460 memory location). */
1462 #undef TARGET_LIBCALL_VALUE
1463 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1465 static rtx
1466 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1468 /* return reg or parallel */
1469 #if 0
1470 /* FIXME: GCC has difficulty returning large values in registers,
1471 because that ties up most of the general registers and gives the
1472 register allocator little to work with. Until we can resolve
1473 this, large values are returned in memory. */
1474 if (mode == DFmode)
1476 rtx rv;
1478 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1479 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1480 gen_rtx_REG (HImode,
1481 R0_REGNO),
1482 GEN_INT (0));
1483 XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1484 gen_rtx_REG (HImode,
1485 R1_REGNO),
1486 GEN_INT (2));
1487 XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1488 gen_rtx_REG (HImode,
1489 R2_REGNO),
1490 GEN_INT (4));
1491 XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1492 gen_rtx_REG (HImode,
1493 R3_REGNO),
1494 GEN_INT (6));
1495 return rv;
1498 if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1500 rtx rv;
1502 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1503 XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1504 gen_rtx_REG (mode,
1505 R0_REGNO),
1506 GEN_INT (0));
1507 return rv;
1509 #endif
1511 if (GET_MODE_SIZE (mode) > 2)
1512 return gen_rtx_REG (mode, MEM0_REGNO);
1513 return gen_rtx_REG (mode, R0_REGNO);
1516 /* Implements TARGET_FUNCTION_VALUE. Functions and libcalls have the same
1517 conventions. */
1519 #undef TARGET_FUNCTION_VALUE
1520 #define TARGET_FUNCTION_VALUE m32c_function_value
1522 static rtx
1523 m32c_function_value (const_tree valtype,
1524 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1525 bool outgoing ATTRIBUTE_UNUSED)
1527 /* return reg or parallel */
1528 const machine_mode mode = TYPE_MODE (valtype);
1529 return m32c_libcall_value (mode, NULL_RTX);
1532 /* Implements TARGET_FUNCTION_VALUE_REGNO_P. */
1534 #undef TARGET_FUNCTION_VALUE_REGNO_P
1535 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1537 static bool
1538 m32c_function_value_regno_p (const unsigned int regno)
1540 return (regno == R0_REGNO || regno == MEM0_REGNO);
1543 /* How Large Values Are Returned */
1545 /* We return structures by pushing the address on the stack, even if
1546 we use registers for the first few "real" arguments. */
1547 #undef TARGET_STRUCT_VALUE_RTX
1548 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1549 static rtx
1550 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1551 int incoming ATTRIBUTE_UNUSED)
1553 return 0;
1556 /* Function Entry and Exit */
1558 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1560 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1562 if (cfun->machine->is_interrupt)
1563 return 1;
1564 return 0;
1567 /* Implementing the Varargs Macros */
1569 #undef TARGET_STRICT_ARGUMENT_NAMING
1570 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1571 static bool
1572 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1574 return 1;
1577 /* Trampolines for Nested Functions */
1580 m16c:
1581 1 0000 75C43412 mov.w #0x1234,a0
1582 2 0004 FC000000 jmp.a label
1584 m32c:
1585 1 0000 BC563412 mov.l:s #0x123456,a0
1586 2 0004 CC000000 jmp.a label
1589 /* Implements TRAMPOLINE_SIZE. */
1591 m32c_trampoline_size (void)
1593 /* Allocate extra space so we can avoid the messy shifts when we
1594 initialize the trampoline; we just write past the end of the
1595 opcode. */
1596 return TARGET_A16 ? 8 : 10;
1599 /* Implements TRAMPOLINE_ALIGNMENT. */
1601 m32c_trampoline_alignment (void)
1603 return 2;
1606 /* Implements TARGET_TRAMPOLINE_INIT. */
1608 #undef TARGET_TRAMPOLINE_INIT
1609 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1610 static void
1611 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1613 rtx function = XEXP (DECL_RTL (fndecl), 0);
1615 #define A0(m,i) adjust_address (m_tramp, m, i)
1616 if (TARGET_A16)
1618 /* Note: we subtract a "word" because the moves want signed
1619 constants, not unsigned constants. */
1620 emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1621 emit_move_insn (A0 (HImode, 2), chainval);
1622 emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1623 /* We use 16-bit addresses here, but store the zero to turn it
1624 into a 24-bit offset. */
1625 emit_move_insn (A0 (HImode, 5), function);
1626 emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1628 else
1630 /* Note that the PSI moves actually write 4 bytes. Make sure we
1631 write stuff out in the right order, and leave room for the
1632 extra byte at the end. */
1633 emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1634 emit_move_insn (A0 (PSImode, 1), chainval);
1635 emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1636 emit_move_insn (A0 (PSImode, 5), function);
1638 #undef A0
1641 #undef TARGET_LRA_P
1642 #define TARGET_LRA_P hook_bool_void_false
1644 /* Addressing Modes */
1646 /* The r8c/m32c family supports a wide range of non-orthogonal
1647 addressing modes, including the ability to double-indirect on *some*
1648 of them. Not all insns support all modes, either, but we rely on
1649 predicates and constraints to deal with that. */
1650 #undef TARGET_LEGITIMATE_ADDRESS_P
1651 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1652 bool
1653 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1655 int mode_adjust;
1656 if (CONSTANT_P (x))
1657 return 1;
1659 if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1660 return 0;
1661 if (TARGET_A24 && GET_MODE (x) != PSImode)
1662 return 0;
1664 /* Wide references to memory will be split after reload, so we must
1665 ensure that all parts of such splits remain legitimate
1666 addresses. */
1667 mode_adjust = GET_MODE_SIZE (mode) - 1;
1669 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1670 if (GET_CODE (x) == PRE_DEC
1671 || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1673 return (GET_CODE (XEXP (x, 0)) == REG
1674 && REGNO (XEXP (x, 0)) == SP_REGNO);
1677 #if 0
1678 /* This is the double indirection detection, but it currently
1679 doesn't work as cleanly as this code implies, so until we've had
1680 a chance to debug it, leave it disabled. */
1681 if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1683 #if DEBUG_DOUBLE
1684 fprintf (stderr, "double indirect\n");
1685 #endif
1686 x = XEXP (x, 0);
1688 #endif
1690 encode_pattern (x);
1691 if (RTX_IS ("r"))
1693 /* Most indexable registers can be used without displacements,
1694 although some of them will be emitted with an explicit zero
1695 to please the assembler. */
1696 switch (REGNO (patternr[0]))
1698 case A1_REGNO:
1699 case SB_REGNO:
1700 case FB_REGNO:
1701 case SP_REGNO:
1702 if (TARGET_A16 && GET_MODE (x) == SImode)
1703 return 0;
1704 /* FALLTHRU */
1705 case A0_REGNO:
1706 return 1;
1708 default:
1709 if (IS_PSEUDO (patternr[0], strict))
1710 return 1;
1711 return 0;
1715 if (TARGET_A16 && GET_MODE (x) == SImode)
1716 return 0;
1718 if (RTX_IS ("+ri"))
1720 /* This is more interesting, because different base registers
1721 allow for different displacements - both range and signedness
1722 - and it differs from chip series to chip series too. */
1723 int rn = REGNO (patternr[1]);
1724 HOST_WIDE_INT offs = INTVAL (patternr[2]);
1725 switch (rn)
1727 case A0_REGNO:
1728 case A1_REGNO:
1729 case SB_REGNO:
1730 /* The syntax only allows positive offsets, but when the
1731 offsets span the entire memory range, we can simulate
1732 negative offsets by wrapping. */
1733 if (TARGET_A16)
1734 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1735 if (rn == SB_REGNO)
1736 return (offs >= 0 && offs <= 65535 - mode_adjust);
1737 /* A0 or A1 */
1738 return (offs >= -16777216 && offs <= 16777215);
1740 case FB_REGNO:
1741 if (TARGET_A16)
1742 return (offs >= -128 && offs <= 127 - mode_adjust);
1743 return (offs >= -65536 && offs <= 65535 - mode_adjust);
1745 case SP_REGNO:
1746 return (offs >= -128 && offs <= 127 - mode_adjust);
1748 default:
1749 if (IS_PSEUDO (patternr[1], strict))
1750 return 1;
1751 return 0;
1754 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1756 rtx reg = patternr[1];
1758 /* We don't know where the symbol is, so only allow base
1759 registers which support displacements spanning the whole
1760 address range. */
1761 switch (REGNO (reg))
1763 case A0_REGNO:
1764 case A1_REGNO:
1765 /* $sb needs a secondary reload, but since it's involved in
1766 memory address reloads too, we don't deal with it very
1767 well. */
1768 /* case SB_REGNO: */
1769 return 1;
1770 default:
1771 if (GET_CODE (reg) == SUBREG)
1772 return 0;
1773 if (IS_PSEUDO (reg, strict))
1774 return 1;
1775 return 0;
1778 return 0;
1781 /* Implements REG_OK_FOR_BASE_P. */
1783 m32c_reg_ok_for_base_p (rtx x, int strict)
1785 if (GET_CODE (x) != REG)
1786 return 0;
1787 switch (REGNO (x))
1789 case A0_REGNO:
1790 case A1_REGNO:
1791 case SB_REGNO:
1792 case FB_REGNO:
1793 case SP_REGNO:
1794 return 1;
1795 default:
1796 if (IS_PSEUDO (x, strict))
1797 return 1;
1798 return 0;
1802 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1803 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1804 like this:
1805 EB 4B FF mova -128[$fb],$a0
1806 D8 0C FF FF mov.w:Q #0,-1[$a0]
1808 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1809 displacements:
1810 7B F4 stc $fb,$a0
1811 77 54 00 01 sub #256,$a0
1812 D8 08 01 mov.w:Q #0,1[$a0]
1814 If we don't offset (i.e. offset by zero), we end up with:
1815 7B F4 stc $fb,$a0
1816 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1818 We have to subtract *something* so that we have a PLUS rtx to mark
1819 that we've done this reload. The -128 offset will never result in
1820 an 8-bit aN offset, and the payoff for the second case is five
1821 loads *if* those loads are within 256 bytes of the other end of the
1822 frame, so the third case seems best. Note that we subtract the
1823 zero, but detect that in the addhi3 pattern. */
1825 #define BIG_FB_ADJ 0
1827 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1828 worry about is frame base offsets, as $fb has a limited
1829 displacement range. We deal with this by attempting to reload $fb
1830 itself into an address register; that seems to result in the best
1831 code. */
1832 #undef TARGET_LEGITIMIZE_ADDRESS
1833 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1834 static rtx
1835 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1836 machine_mode mode)
1838 #if DEBUG0
1839 fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1840 debug_rtx (x);
1841 fprintf (stderr, "\n");
1842 #endif
1844 if (GET_CODE (x) == PLUS
1845 && GET_CODE (XEXP (x, 0)) == REG
1846 && REGNO (XEXP (x, 0)) == FB_REGNO
1847 && GET_CODE (XEXP (x, 1)) == CONST_INT
1848 && (INTVAL (XEXP (x, 1)) < -128
1849 || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1851 /* reload FB to A_REGS */
1852 rtx temp = gen_reg_rtx (Pmode);
1853 x = copy_rtx (x);
1854 emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
1855 XEXP (x, 0) = temp;
1858 return x;
1861 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1863 m32c_legitimize_reload_address (rtx * x,
1864 machine_mode mode,
1865 int opnum,
1866 int type, int ind_levels ATTRIBUTE_UNUSED)
1868 #if DEBUG0
1869 fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1870 mode_name[mode]);
1871 debug_rtx (*x);
1872 #endif
1874 /* At one point, this function tried to get $fb copied to an address
1875 register, which in theory would maximize sharing, but gcc was
1876 *also* still trying to reload the whole address, and we'd run out
1877 of address registers. So we let gcc do the naive (but safe)
1878 reload instead, when the above function doesn't handle it for
1881 The code below is a second attempt at the above. */
1883 if (GET_CODE (*x) == PLUS
1884 && GET_CODE (XEXP (*x, 0)) == REG
1885 && REGNO (XEXP (*x, 0)) == FB_REGNO
1886 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1887 && (INTVAL (XEXP (*x, 1)) < -128
1888 || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1890 rtx sum;
1891 int offset = INTVAL (XEXP (*x, 1));
1892 int adjustment = -BIG_FB_ADJ;
1894 sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1895 GEN_INT (adjustment));
1896 *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1897 if (type == RELOAD_OTHER)
1898 type = RELOAD_FOR_OTHER_ADDRESS;
1899 push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1900 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1901 (enum reload_type) type);
1902 return 1;
1905 if (GET_CODE (*x) == PLUS
1906 && GET_CODE (XEXP (*x, 0)) == PLUS
1907 && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1908 && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1909 && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1910 && GET_CODE (XEXP (*x, 1)) == CONST_INT
1913 if (type == RELOAD_OTHER)
1914 type = RELOAD_FOR_OTHER_ADDRESS;
1915 push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1916 A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1917 (enum reload_type) type);
1918 return 1;
1921 if (TARGET_A24 && GET_MODE (*x) == PSImode)
1923 push_reload (*x, NULL_RTX, x, NULL,
1924 A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1925 (enum reload_type) type);
1926 return 1;
1929 return 0;
1932 /* Return the appropriate mode for a named address pointer. */
1933 #undef TARGET_ADDR_SPACE_POINTER_MODE
1934 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1935 static scalar_int_mode
1936 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1938 switch (addrspace)
1940 case ADDR_SPACE_GENERIC:
1941 return TARGET_A24 ? PSImode : HImode;
1942 case ADDR_SPACE_FAR:
1943 return SImode;
1944 default:
1945 gcc_unreachable ();
1949 /* Return the appropriate mode for a named address address. */
1950 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1951 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1952 static scalar_int_mode
1953 m32c_addr_space_address_mode (addr_space_t addrspace)
1955 switch (addrspace)
1957 case ADDR_SPACE_GENERIC:
1958 return TARGET_A24 ? PSImode : HImode;
1959 case ADDR_SPACE_FAR:
1960 return SImode;
1961 default:
1962 gcc_unreachable ();
1966 /* Like m32c_legitimate_address_p, except with named addresses. */
1967 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1968 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1969 m32c_addr_space_legitimate_address_p
1970 static bool
1971 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1972 bool strict, addr_space_t as)
1974 if (as == ADDR_SPACE_FAR)
1976 if (TARGET_A24)
1977 return 0;
1978 encode_pattern (x);
1979 if (RTX_IS ("r"))
1981 if (GET_MODE (x) != SImode)
1982 return 0;
1983 switch (REGNO (patternr[0]))
1985 case A0_REGNO:
1986 return 1;
1988 default:
1989 if (IS_PSEUDO (patternr[0], strict))
1990 return 1;
1991 return 0;
1994 if (RTX_IS ("+^Sri"))
1996 int rn = REGNO (patternr[3]);
1997 HOST_WIDE_INT offs = INTVAL (patternr[4]);
1998 if (GET_MODE (patternr[3]) != HImode)
1999 return 0;
2000 switch (rn)
2002 case A0_REGNO:
2003 return (offs >= 0 && offs <= 0xfffff);
2005 default:
2006 if (IS_PSEUDO (patternr[3], strict))
2007 return 1;
2008 return 0;
2011 if (RTX_IS ("+^Srs"))
2013 int rn = REGNO (patternr[3]);
2014 if (GET_MODE (patternr[3]) != HImode)
2015 return 0;
2016 switch (rn)
2018 case A0_REGNO:
2019 return 1;
2021 default:
2022 if (IS_PSEUDO (patternr[3], strict))
2023 return 1;
2024 return 0;
2027 if (RTX_IS ("+^S+ris"))
2029 int rn = REGNO (patternr[4]);
2030 if (GET_MODE (patternr[4]) != HImode)
2031 return 0;
2032 switch (rn)
2034 case A0_REGNO:
2035 return 1;
2037 default:
2038 if (IS_PSEUDO (patternr[4], strict))
2039 return 1;
2040 return 0;
2043 if (RTX_IS ("s"))
2045 return 1;
2047 return 0;
2050 else if (as != ADDR_SPACE_GENERIC)
2051 gcc_unreachable ();
2053 return m32c_legitimate_address_p (mode, x, strict);
2056 /* Like m32c_legitimate_address, except with named address support. */
2057 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2058 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2059 static rtx
2060 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2061 addr_space_t as)
2063 if (as != ADDR_SPACE_GENERIC)
2065 #if DEBUG0
2066 fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2067 debug_rtx (x);
2068 fprintf (stderr, "\n");
2069 #endif
2071 if (GET_CODE (x) != REG)
2073 x = force_reg (SImode, x);
2075 return x;
2078 return m32c_legitimize_address (x, oldx, mode);
2081 /* Determine if one named address space is a subset of another. */
2082 #undef TARGET_ADDR_SPACE_SUBSET_P
2083 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2084 static bool
2085 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2087 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2088 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2090 if (subset == superset)
2091 return true;
2093 else
2094 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2097 #undef TARGET_ADDR_SPACE_CONVERT
2098 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2099 /* Convert from one address space to another. */
2100 static rtx
2101 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2103 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2104 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2105 rtx result;
2107 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2108 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2110 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2112 /* This is unpredictable, as we're truncating off usable address
2113 bits. */
2115 result = gen_reg_rtx (HImode);
2116 emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2117 return result;
2119 else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2121 /* This always works. */
2122 result = gen_reg_rtx (SImode);
2123 emit_insn (gen_zero_extendhisi2 (result, op));
2124 return result;
2126 else
2127 gcc_unreachable ();
2130 /* Condition Code Status */
2132 #undef TARGET_FIXED_CONDITION_CODE_REGS
2133 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2134 static bool
2135 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2137 *p1 = FLG_REGNO;
2138 *p2 = INVALID_REGNUM;
2139 return true;
2142 /* Describing Relative Costs of Operations */
2144 /* Implements TARGET_REGISTER_MOVE_COST. We make impossible moves
2145 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2146 no opcodes to do that). We also discourage use of mem* registers
2147 since they're really memory. */
2149 #undef TARGET_REGISTER_MOVE_COST
2150 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2152 static int
2153 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2154 reg_class_t to)
2156 int cost = COSTS_N_INSNS (3);
2157 HARD_REG_SET cc;
2159 /* FIXME: pick real values, but not 2 for now. */
2160 COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2161 IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2163 if (mode == QImode
2164 && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2166 if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2167 cost = COSTS_N_INSNS (1000);
2168 else
2169 cost = COSTS_N_INSNS (80);
2172 if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2173 cost = COSTS_N_INSNS (1000);
2175 if (reg_classes_intersect_p (from, CR_REGS))
2176 cost += COSTS_N_INSNS (5);
2178 if (reg_classes_intersect_p (to, CR_REGS))
2179 cost += COSTS_N_INSNS (5);
2181 if (from == MEM_REGS || to == MEM_REGS)
2182 cost += COSTS_N_INSNS (50);
2183 else if (reg_classes_intersect_p (from, MEM_REGS)
2184 || reg_classes_intersect_p (to, MEM_REGS))
2185 cost += COSTS_N_INSNS (10);
2187 #if DEBUG0
2188 fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2189 mode_name[mode], class_names[(int) from], class_names[(int) to],
2190 cost);
2191 #endif
2192 return cost;
2195 /* Implements TARGET_MEMORY_MOVE_COST. */
2197 #undef TARGET_MEMORY_MOVE_COST
2198 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2200 static int
2201 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2202 reg_class_t rclass ATTRIBUTE_UNUSED,
2203 bool in ATTRIBUTE_UNUSED)
2205 /* FIXME: pick real values. */
2206 return COSTS_N_INSNS (10);
2209 /* Here we try to describe when we use multiple opcodes for one RTX so
2210 that gcc knows when to use them. */
2211 #undef TARGET_RTX_COSTS
2212 #define TARGET_RTX_COSTS m32c_rtx_costs
2213 static bool
2214 m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2215 int opno ATTRIBUTE_UNUSED,
2216 int *total, bool speed ATTRIBUTE_UNUSED)
2218 int code = GET_CODE (x);
2219 switch (code)
2221 case REG:
2222 if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2223 *total += COSTS_N_INSNS (500);
2224 else
2225 *total += COSTS_N_INSNS (1);
2226 return true;
2228 case ASHIFT:
2229 case LSHIFTRT:
2230 case ASHIFTRT:
2231 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2233 /* mov.b r1l, r1h */
2234 *total += COSTS_N_INSNS (1);
2235 return true;
2237 if (INTVAL (XEXP (x, 1)) > 8
2238 || INTVAL (XEXP (x, 1)) < -8)
2240 /* mov.b #N, r1l */
2241 /* mov.b r1l, r1h */
2242 *total += COSTS_N_INSNS (2);
2243 return true;
2245 return true;
2247 case LE:
2248 case LEU:
2249 case LT:
2250 case LTU:
2251 case GT:
2252 case GTU:
2253 case GE:
2254 case GEU:
2255 case NE:
2256 case EQ:
2257 if (outer_code == SET)
2259 *total += COSTS_N_INSNS (2);
2260 return true;
2262 break;
2264 case ZERO_EXTRACT:
2266 rtx dest = XEXP (x, 0);
2267 rtx addr = XEXP (dest, 0);
2268 switch (GET_CODE (addr))
2270 case CONST_INT:
2271 *total += COSTS_N_INSNS (1);
2272 break;
2273 case SYMBOL_REF:
2274 *total += COSTS_N_INSNS (3);
2275 break;
2276 default:
2277 *total += COSTS_N_INSNS (2);
2278 break;
2280 return true;
2282 break;
2284 default:
2285 /* Reasonable default. */
2286 if (TARGET_A16 && mode == SImode)
2287 *total += COSTS_N_INSNS (2);
2288 break;
2290 return false;
2293 #undef TARGET_ADDRESS_COST
2294 #define TARGET_ADDRESS_COST m32c_address_cost
2295 static int
2296 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2297 addr_space_t as ATTRIBUTE_UNUSED,
2298 bool speed ATTRIBUTE_UNUSED)
2300 int i;
2301 /* fprintf(stderr, "\naddress_cost\n");
2302 debug_rtx(addr);*/
2303 switch (GET_CODE (addr))
2305 case CONST_INT:
2306 i = INTVAL (addr);
2307 if (i == 0)
2308 return COSTS_N_INSNS(1);
2309 if (0 < i && i <= 255)
2310 return COSTS_N_INSNS(2);
2311 if (0 < i && i <= 65535)
2312 return COSTS_N_INSNS(3);
2313 return COSTS_N_INSNS(4);
2314 case SYMBOL_REF:
2315 return COSTS_N_INSNS(4);
2316 case REG:
2317 return COSTS_N_INSNS(1);
2318 case PLUS:
2319 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2321 i = INTVAL (XEXP (addr, 1));
2322 if (i == 0)
2323 return COSTS_N_INSNS(1);
2324 if (0 < i && i <= 255)
2325 return COSTS_N_INSNS(2);
2326 if (0 < i && i <= 65535)
2327 return COSTS_N_INSNS(3);
2329 return COSTS_N_INSNS(4);
2330 default:
2331 return 0;
2335 /* Defining the Output Assembler Language */
2337 /* Output of Data */
2339 /* We may have 24 bit sizes, which is the native address size.
2340 Currently unused, but provided for completeness. */
2341 #undef TARGET_ASM_INTEGER
2342 #define TARGET_ASM_INTEGER m32c_asm_integer
2343 static bool
2344 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2346 switch (size)
2348 case 3:
2349 fprintf (asm_out_file, "\t.3byte\t");
2350 output_addr_const (asm_out_file, x);
2351 fputc ('\n', asm_out_file);
2352 return true;
2353 case 4:
2354 if (GET_CODE (x) == SYMBOL_REF)
2356 fprintf (asm_out_file, "\t.long\t");
2357 output_addr_const (asm_out_file, x);
2358 fputc ('\n', asm_out_file);
2359 return true;
2361 break;
2363 return default_assemble_integer (x, size, aligned_p);
2366 /* Output of Assembler Instructions */
2368 /* We use a lookup table because the addressing modes are non-orthogonal. */
2370 static struct
2372 char code;
2373 char const *pattern;
2374 char const *format;
2376 const conversions[] = {
2377 { 0, "r", "0" },
2379 { 0, "mr", "z[1]" },
2380 { 0, "m+ri", "3[2]" },
2381 { 0, "m+rs", "3[2]" },
2382 { 0, "m+^Zrs", "5[4]" },
2383 { 0, "m+^Zri", "5[4]" },
2384 { 0, "m+^Z+ris", "7+6[5]" },
2385 { 0, "m+^Srs", "5[4]" },
2386 { 0, "m+^Sri", "5[4]" },
2387 { 0, "m+^S+ris", "7+6[5]" },
2388 { 0, "m+r+si", "4+5[2]" },
2389 { 0, "ms", "1" },
2390 { 0, "mi", "1" },
2391 { 0, "m+si", "2+3" },
2393 { 0, "mmr", "[z[2]]" },
2394 { 0, "mm+ri", "[4[3]]" },
2395 { 0, "mm+rs", "[4[3]]" },
2396 { 0, "mm+r+si", "[5+6[3]]" },
2397 { 0, "mms", "[[2]]" },
2398 { 0, "mmi", "[[2]]" },
2399 { 0, "mm+si", "[4[3]]" },
2401 { 0, "i", "#0" },
2402 { 0, "s", "#0" },
2403 { 0, "+si", "#1+2" },
2404 { 0, "l", "#0" },
2406 { 'l', "l", "0" },
2407 { 'd', "i", "0" },
2408 { 'd', "s", "0" },
2409 { 'd', "+si", "1+2" },
2410 { 'D', "i", "0" },
2411 { 'D', "s", "0" },
2412 { 'D', "+si", "1+2" },
2413 { 'x', "i", "#0" },
2414 { 'X', "i", "#0" },
2415 { 'm', "i", "#0" },
2416 { 'b', "i", "#0" },
2417 { 'B', "i", "0" },
2418 { 'p', "i", "0" },
2420 { 0, 0, 0 }
2423 /* This is in order according to the bitfield that pushm/popm use. */
2424 static char const *pushm_regs[] = {
2425 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2428 /* Implements TARGET_PRINT_OPERAND. */
2430 #undef TARGET_PRINT_OPERAND
2431 #define TARGET_PRINT_OPERAND m32c_print_operand
2433 static void
2434 m32c_print_operand (FILE * file, rtx x, int code)
2436 int i, j, b;
2437 const char *comma;
2438 HOST_WIDE_INT ival;
2439 int unsigned_const = 0;
2440 int force_sign;
2442 /* Multiplies; constants are converted to sign-extended format but
2443 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2444 need. */
2445 if (code == 'u')
2447 unsigned_const = 2;
2448 code = 0;
2450 if (code == 'U')
2452 unsigned_const = 1;
2453 code = 0;
2455 /* This one is only for debugging; you can put it in a pattern to
2456 force this error. */
2457 if (code == '!')
2459 fprintf (stderr, "dj: unreviewed pattern:");
2460 if (current_output_insn)
2461 debug_rtx (current_output_insn);
2462 gcc_unreachable ();
2464 /* PSImode operations are either .w or .l depending on the target. */
2465 if (code == '&')
2467 if (TARGET_A16)
2468 fprintf (file, "w");
2469 else
2470 fprintf (file, "l");
2471 return;
2473 /* Inverted conditionals. */
2474 if (code == 'C')
2476 switch (GET_CODE (x))
2478 case LE:
2479 fputs ("gt", file);
2480 break;
2481 case LEU:
2482 fputs ("gtu", file);
2483 break;
2484 case LT:
2485 fputs ("ge", file);
2486 break;
2487 case LTU:
2488 fputs ("geu", file);
2489 break;
2490 case GT:
2491 fputs ("le", file);
2492 break;
2493 case GTU:
2494 fputs ("leu", file);
2495 break;
2496 case GE:
2497 fputs ("lt", file);
2498 break;
2499 case GEU:
2500 fputs ("ltu", file);
2501 break;
2502 case NE:
2503 fputs ("eq", file);
2504 break;
2505 case EQ:
2506 fputs ("ne", file);
2507 break;
2508 default:
2509 gcc_unreachable ();
2511 return;
2513 /* Regular conditionals. */
2514 if (code == 'c')
2516 switch (GET_CODE (x))
2518 case LE:
2519 fputs ("le", file);
2520 break;
2521 case LEU:
2522 fputs ("leu", file);
2523 break;
2524 case LT:
2525 fputs ("lt", file);
2526 break;
2527 case LTU:
2528 fputs ("ltu", file);
2529 break;
2530 case GT:
2531 fputs ("gt", file);
2532 break;
2533 case GTU:
2534 fputs ("gtu", file);
2535 break;
2536 case GE:
2537 fputs ("ge", file);
2538 break;
2539 case GEU:
2540 fputs ("geu", file);
2541 break;
2542 case NE:
2543 fputs ("ne", file);
2544 break;
2545 case EQ:
2546 fputs ("eq", file);
2547 break;
2548 default:
2549 gcc_unreachable ();
2551 return;
2553 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2554 operand. */
2555 if (code == 'h' && GET_MODE (x) == SImode)
2557 x = m32c_subreg (HImode, x, SImode, 0);
2558 code = 0;
2560 if (code == 'H' && GET_MODE (x) == SImode)
2562 x = m32c_subreg (HImode, x, SImode, 2);
2563 code = 0;
2565 if (code == 'h' && GET_MODE (x) == HImode)
2567 x = m32c_subreg (QImode, x, HImode, 0);
2568 code = 0;
2570 if (code == 'H' && GET_MODE (x) == HImode)
2572 /* We can't actually represent this as an rtx. Do it here. */
2573 if (GET_CODE (x) == REG)
2575 switch (REGNO (x))
2577 case R0_REGNO:
2578 fputs ("r0h", file);
2579 return;
2580 case R1_REGNO:
2581 fputs ("r1h", file);
2582 return;
2583 default:
2584 gcc_unreachable();
2587 /* This should be a MEM. */
2588 x = m32c_subreg (QImode, x, HImode, 1);
2589 code = 0;
2591 /* This is for BMcond, which always wants word register names. */
2592 if (code == 'h' && GET_MODE (x) == QImode)
2594 if (GET_CODE (x) == REG)
2595 x = gen_rtx_REG (HImode, REGNO (x));
2596 code = 0;
2598 /* 'x' and 'X' need to be ignored for non-immediates. */
2599 if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2600 code = 0;
2602 encode_pattern (x);
2603 force_sign = 0;
2604 for (i = 0; conversions[i].pattern; i++)
2605 if (conversions[i].code == code
2606 && streq (conversions[i].pattern, pattern))
2608 for (j = 0; conversions[i].format[j]; j++)
2609 /* backslash quotes the next character in the output pattern. */
2610 if (conversions[i].format[j] == '\\')
2612 fputc (conversions[i].format[j + 1], file);
2613 j++;
2615 /* Digits in the output pattern indicate that the
2616 corresponding RTX is to be output at that point. */
2617 else if (ISDIGIT (conversions[i].format[j]))
2619 rtx r = patternr[conversions[i].format[j] - '0'];
2620 switch (GET_CODE (r))
2622 case REG:
2623 fprintf (file, "%s",
2624 reg_name_with_mode (REGNO (r), GET_MODE (r)));
2625 break;
2626 case CONST_INT:
2627 switch (code)
2629 case 'b':
2630 case 'B':
2632 int v = INTVAL (r);
2633 int i = (int) exact_log2 (v);
2634 if (i == -1)
2635 i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2636 if (i == -1)
2637 i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2638 /* Bit position. */
2639 fprintf (file, "%d", i);
2641 break;
2642 case 'x':
2643 /* Unsigned byte. */
2644 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2645 INTVAL (r) & 0xff);
2646 break;
2647 case 'X':
2648 /* Unsigned word. */
2649 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2650 INTVAL (r) & 0xffff);
2651 break;
2652 case 'p':
2653 /* pushm and popm encode a register set into a single byte. */
2654 comma = "";
2655 for (b = 7; b >= 0; b--)
2656 if (INTVAL (r) & (1 << b))
2658 fprintf (file, "%s%s", comma, pushm_regs[b]);
2659 comma = ",";
2661 break;
2662 case 'm':
2663 /* "Minus". Output -X */
2664 ival = (-INTVAL (r) & 0xffff);
2665 if (ival & 0x8000)
2666 ival = ival - 0x10000;
2667 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2668 break;
2669 default:
2670 ival = INTVAL (r);
2671 if (conversions[i].format[j + 1] == '[' && ival < 0)
2673 /* We can simulate negative displacements by
2674 taking advantage of address space
2675 wrapping when the offset can span the
2676 entire address range. */
2677 rtx base =
2678 patternr[conversions[i].format[j + 2] - '0'];
2679 if (GET_CODE (base) == REG)
2680 switch (REGNO (base))
2682 case A0_REGNO:
2683 case A1_REGNO:
2684 if (TARGET_A24)
2685 ival = 0x1000000 + ival;
2686 else
2687 ival = 0x10000 + ival;
2688 break;
2689 case SB_REGNO:
2690 if (TARGET_A16)
2691 ival = 0x10000 + ival;
2692 break;
2695 else if (code == 'd' && ival < 0 && j == 0)
2696 /* The "mova" opcode is used to do addition by
2697 computing displacements, but again, we need
2698 displacements to be unsigned *if* they're
2699 the only component of the displacement
2700 (i.e. no "symbol-4" type displacement). */
2701 ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2703 if (conversions[i].format[j] == '0')
2705 /* More conversions to unsigned. */
2706 if (unsigned_const == 2)
2707 ival &= 0xffff;
2708 if (unsigned_const == 1)
2709 ival &= 0xff;
2711 if (streq (conversions[i].pattern, "mi")
2712 || streq (conversions[i].pattern, "mmi"))
2714 /* Integers used as addresses are unsigned. */
2715 ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2717 if (force_sign && ival >= 0)
2718 fputc ('+', file);
2719 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2720 break;
2722 break;
2723 case CONST_DOUBLE:
2724 /* We don't have const_double constants. If it
2725 happens, make it obvious. */
2726 fprintf (file, "[const_double 0x%lx]",
2727 (unsigned long) CONST_DOUBLE_HIGH (r));
2728 break;
2729 case SYMBOL_REF:
2730 assemble_name (file, XSTR (r, 0));
2731 break;
2732 case LABEL_REF:
2733 output_asm_label (r);
2734 break;
2735 default:
2736 fprintf (stderr, "don't know how to print this operand:");
2737 debug_rtx (r);
2738 gcc_unreachable ();
2741 else
2743 if (conversions[i].format[j] == 'z')
2745 /* Some addressing modes *must* have a displacement,
2746 so insert a zero here if needed. */
2747 int k;
2748 for (k = j + 1; conversions[i].format[k]; k++)
2749 if (ISDIGIT (conversions[i].format[k]))
2751 rtx reg = patternr[conversions[i].format[k] - '0'];
2752 if (GET_CODE (reg) == REG
2753 && (REGNO (reg) == SB_REGNO
2754 || REGNO (reg) == FB_REGNO
2755 || REGNO (reg) == SP_REGNO))
2756 fputc ('0', file);
2758 continue;
2760 /* Signed displacements off symbols need to have signs
2761 blended cleanly. */
2762 if (conversions[i].format[j] == '+'
2763 && (!code || code == 'D' || code == 'd')
2764 && ISDIGIT (conversions[i].format[j + 1])
2765 && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2766 == CONST_INT))
2768 force_sign = 1;
2769 continue;
2771 fputc (conversions[i].format[j], file);
2773 break;
2775 if (!conversions[i].pattern)
2777 fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2778 pattern);
2779 debug_rtx (x);
2780 fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2783 return;
2786 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2788 See m32c_print_operand above for descriptions of what these do. */
2790 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2791 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2793 static bool
2794 m32c_print_operand_punct_valid_p (unsigned char c)
2796 if (c == '&' || c == '!')
2797 return true;
2799 return false;
2802 /* Implements TARGET_PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2804 #undef TARGET_PRINT_OPERAND_ADDRESS
2805 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2807 static void
2808 m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
2810 if (GET_CODE (address) == MEM)
2811 address = XEXP (address, 0);
2812 else
2813 /* cf: gcc.dg/asm-4.c. */
2814 gcc_assert (GET_CODE (address) == REG);
2816 m32c_print_operand (stream, address, 0);
2819 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2820 differently than general registers. */
2821 void
2822 m32c_output_reg_push (FILE * s, int regno)
2824 if (regno == FLG_REGNO)
2825 fprintf (s, "\tpushc\tflg\n");
2826 else
2827 fprintf (s, "\tpush.%c\t%s\n",
2828 " bwll"[reg_push_size (regno)], reg_names[regno]);
2831 /* Likewise for ASM_OUTPUT_REG_POP. */
2832 void
2833 m32c_output_reg_pop (FILE * s, int regno)
2835 if (regno == FLG_REGNO)
2836 fprintf (s, "\tpopc\tflg\n");
2837 else
2838 fprintf (s, "\tpop.%c\t%s\n",
2839 " bwll"[reg_push_size (regno)], reg_names[regno]);
2842 /* Defining target-specific uses of `__attribute__' */
2844 /* Used to simplify the logic below. Find the attributes wherever
2845 they may be. */
2846 #define M32C_ATTRIBUTES(decl) \
2847 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2848 : DECL_ATTRIBUTES (decl) \
2849 ? (DECL_ATTRIBUTES (decl)) \
2850 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2852 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2853 static int
2854 interrupt_p (tree node ATTRIBUTE_UNUSED)
2856 tree list = M32C_ATTRIBUTES (node);
2857 while (list)
2859 if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2860 return 1;
2861 list = TREE_CHAIN (list);
2863 return fast_interrupt_p (node);
2866 /* Returns TRUE if the given tree has the "bank_switch" attribute. */
2867 static int
2868 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2870 tree list = M32C_ATTRIBUTES (node);
2871 while (list)
2873 if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2874 return 1;
2875 list = TREE_CHAIN (list);
2877 return 0;
2880 /* Returns TRUE if the given tree has the "fast_interrupt" attribute. */
2881 static int
2882 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2884 tree list = M32C_ATTRIBUTES (node);
2885 while (list)
2887 if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2888 return 1;
2889 list = TREE_CHAIN (list);
2891 return 0;
2894 static tree
2895 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2896 tree name ATTRIBUTE_UNUSED,
2897 tree args ATTRIBUTE_UNUSED,
2898 int flags ATTRIBUTE_UNUSED,
2899 bool * no_add_attrs ATTRIBUTE_UNUSED)
2901 return NULL_TREE;
2904 /* Returns TRUE if given tree has the "function_vector" attribute. */
2906 m32c_special_page_vector_p (tree func)
2908 tree list;
2910 if (TREE_CODE (func) != FUNCTION_DECL)
2911 return 0;
2913 list = M32C_ATTRIBUTES (func);
2914 while (list)
2916 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2917 return 1;
2918 list = TREE_CHAIN (list);
2920 return 0;
2923 static tree
2924 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2925 tree name ATTRIBUTE_UNUSED,
2926 tree args ATTRIBUTE_UNUSED,
2927 int flags ATTRIBUTE_UNUSED,
2928 bool * no_add_attrs ATTRIBUTE_UNUSED)
2930 if (TARGET_R8C)
2932 /* The attribute is not supported for R8C target. */
2933 warning (OPT_Wattributes,
2934 "%qE attribute is not supported for R8C target",
2935 name);
2936 *no_add_attrs = true;
2938 else if (TREE_CODE (*node) != FUNCTION_DECL)
2940 /* The attribute must be applied to functions only. */
2941 warning (OPT_Wattributes,
2942 "%qE attribute applies only to functions",
2943 name);
2944 *no_add_attrs = true;
2946 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2948 /* The argument must be a constant integer. */
2949 warning (OPT_Wattributes,
2950 "%qE attribute argument not an integer constant",
2951 name);
2952 *no_add_attrs = true;
2954 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2955 || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2957 /* The argument value must be between 18 to 255. */
2958 warning (OPT_Wattributes,
2959 "%qE attribute argument should be between 18 to 255",
2960 name);
2961 *no_add_attrs = true;
2963 return NULL_TREE;
2966 /* If the function is assigned the attribute 'function_vector', it
2967 returns the function vector number, otherwise returns zero. */
2969 current_function_special_page_vector (rtx x)
2971 int num;
2973 if ((GET_CODE(x) == SYMBOL_REF)
2974 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2976 tree list;
2977 tree t = SYMBOL_REF_DECL (x);
2979 if (TREE_CODE (t) != FUNCTION_DECL)
2980 return 0;
2982 list = M32C_ATTRIBUTES (t);
2983 while (list)
2985 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2987 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2988 return num;
2991 list = TREE_CHAIN (list);
2994 return 0;
2996 else
2997 return 0;
3000 #undef TARGET_ATTRIBUTE_TABLE
3001 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3002 static const struct attribute_spec m32c_attribute_table[] = {
3003 {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3004 {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3005 {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3006 {"function_vector", 1, 1, true, false, false, function_vector_handler,
3007 false},
3008 {0, 0, 0, 0, 0, 0, 0, false}
3011 #undef TARGET_COMP_TYPE_ATTRIBUTES
3012 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3013 static int
3014 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3015 const_tree type2 ATTRIBUTE_UNUSED)
3017 /* 0=incompatible 1=compatible 2=warning */
3018 return 1;
3021 #undef TARGET_INSERT_ATTRIBUTES
3022 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3023 static void
3024 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3025 tree * attr_ptr ATTRIBUTE_UNUSED)
3027 unsigned addr;
3028 /* See if we need to make #pragma address variables volatile. */
3030 if (TREE_CODE (node) == VAR_DECL)
3032 const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3033 if (m32c_get_pragma_address (name, &addr))
3035 TREE_THIS_VOLATILE (node) = true;
3040 /* Hash table of pragma info. */
3041 static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
3043 void
3044 m32c_note_pragma_address (const char *varname, unsigned address)
3046 if (!pragma_htab)
3047 pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
3049 const char *name = ggc_strdup (varname);
3050 unsigned int *slot = &pragma_htab->get_or_insert (name);
3051 *slot = address;
3054 static bool
3055 m32c_get_pragma_address (const char *varname, unsigned *address)
3057 if (!pragma_htab)
3058 return false;
3060 unsigned int *slot = pragma_htab->get (varname);
3061 if (slot)
3063 *address = *slot;
3064 return true;
3066 return false;
3069 void
3070 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3071 const char *name,
3072 int size, int align, int global)
3074 unsigned address;
3076 if (m32c_get_pragma_address (name, &address))
3078 /* We never output these as global. */
3079 assemble_name (stream, name);
3080 fprintf (stream, " = 0x%04x\n", address);
3081 return;
3083 if (!global)
3085 fprintf (stream, "\t.local\t");
3086 assemble_name (stream, name);
3087 fprintf (stream, "\n");
3089 fprintf (stream, "\t.comm\t");
3090 assemble_name (stream, name);
3091 fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3094 /* Predicates */
3096 /* This is a list of legal subregs of hard regs. */
3097 static const struct {
3098 unsigned char outer_mode_size;
3099 unsigned char inner_mode_size;
3100 unsigned char byte_mask;
3101 unsigned char legal_when;
3102 unsigned int regno;
3103 } legal_subregs[] = {
3104 {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3105 {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3106 {1, 2, 0x01, 1, A0_REGNO},
3107 {1, 2, 0x01, 1, A1_REGNO},
3109 {1, 4, 0x01, 1, A0_REGNO},
3110 {1, 4, 0x01, 1, A1_REGNO},
3112 {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3113 {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3114 {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3115 {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3116 {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3118 {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3121 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3122 support. We also bail on MEMs with illegal addresses. */
3123 bool
3124 m32c_illegal_subreg_p (rtx op)
3126 int offset;
3127 unsigned int i;
3128 machine_mode src_mode, dest_mode;
3130 if (GET_CODE (op) == MEM
3131 && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3133 return true;
3136 if (GET_CODE (op) != SUBREG)
3137 return false;
3139 dest_mode = GET_MODE (op);
3140 offset = SUBREG_BYTE (op);
3141 op = SUBREG_REG (op);
3142 src_mode = GET_MODE (op);
3144 if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3145 return false;
3146 if (GET_CODE (op) != REG)
3147 return false;
3148 if (REGNO (op) >= MEM0_REGNO)
3149 return false;
3151 offset = (1 << offset);
3153 for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3154 if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3155 && legal_subregs[i].regno == REGNO (op)
3156 && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3157 && legal_subregs[i].byte_mask & offset)
3159 switch (legal_subregs[i].legal_when)
3161 case 1:
3162 return false;
3163 case 16:
3164 if (TARGET_A16)
3165 return false;
3166 break;
3167 case 24:
3168 if (TARGET_A24)
3169 return false;
3170 break;
3173 return true;
3176 /* Returns TRUE if we support a move between the first two operands.
3177 At the moment, we just want to discourage mem to mem moves until
3178 after reload, because reload has a hard time with our limited
3179 number of address registers, and we can get into a situation where
3180 we need three of them when we only have two. */
3181 bool
3182 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3184 rtx op0 = operands[0];
3185 rtx op1 = operands[1];
3187 if (TARGET_A24)
3188 return true;
3190 #define DEBUG_MOV_OK 0
3191 #if DEBUG_MOV_OK
3192 fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3193 debug_rtx (op0);
3194 debug_rtx (op1);
3195 #endif
3197 if (GET_CODE (op0) == SUBREG)
3198 op0 = XEXP (op0, 0);
3199 if (GET_CODE (op1) == SUBREG)
3200 op1 = XEXP (op1, 0);
3202 if (GET_CODE (op0) == MEM
3203 && GET_CODE (op1) == MEM
3204 && ! reload_completed)
3206 #if DEBUG_MOV_OK
3207 fprintf (stderr, " - no, mem to mem\n");
3208 #endif
3209 return false;
3212 #if DEBUG_MOV_OK
3213 fprintf (stderr, " - ok\n");
3214 #endif
3215 return true;
3218 /* Returns TRUE if two consecutive HImode mov instructions, generated
3219 for moving an immediate double data to a double data type variable
3220 location, can be combined into single SImode mov instruction. */
3221 bool
3222 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3223 machine_mode mode ATTRIBUTE_UNUSED)
3225 /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3226 flags. */
3227 return false;
3230 /* Expanders */
3232 /* Subregs are non-orthogonal for us, because our registers are all
3233 different sizes. */
3234 static rtx
3235 m32c_subreg (machine_mode outer,
3236 rtx x, machine_mode inner, int byte)
3238 int r, nr = -1;
3240 /* Converting MEMs to different types that are the same size, we
3241 just rewrite them. */
3242 if (GET_CODE (x) == SUBREG
3243 && SUBREG_BYTE (x) == 0
3244 && GET_CODE (SUBREG_REG (x)) == MEM
3245 && (GET_MODE_SIZE (GET_MODE (x))
3246 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3248 rtx oldx = x;
3249 x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3250 MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3253 /* Push/pop get done as smaller push/pops. */
3254 if (GET_CODE (x) == MEM
3255 && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3256 || GET_CODE (XEXP (x, 0)) == POST_INC))
3257 return gen_rtx_MEM (outer, XEXP (x, 0));
3258 if (GET_CODE (x) == SUBREG
3259 && GET_CODE (XEXP (x, 0)) == MEM
3260 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3261 || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3262 return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3264 if (GET_CODE (x) != REG)
3266 rtx r = simplify_gen_subreg (outer, x, inner, byte);
3267 if (GET_CODE (r) == SUBREG
3268 && GET_CODE (x) == MEM
3269 && MEM_VOLATILE_P (x))
3271 /* Volatile MEMs don't get simplified, but we need them to
3272 be. We are little endian, so the subreg byte is the
3273 offset. */
3274 r = adjust_address_nv (x, outer, byte);
3276 return r;
3279 r = REGNO (x);
3280 if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3281 return simplify_gen_subreg (outer, x, inner, byte);
3283 if (IS_MEM_REGNO (r))
3284 return simplify_gen_subreg (outer, x, inner, byte);
3286 /* This is where the complexities of our register layout are
3287 described. */
3288 if (byte == 0)
3289 nr = r;
3290 else if (outer == HImode)
3292 if (r == R0_REGNO && byte == 2)
3293 nr = R2_REGNO;
3294 else if (r == R0_REGNO && byte == 4)
3295 nr = R1_REGNO;
3296 else if (r == R0_REGNO && byte == 6)
3297 nr = R3_REGNO;
3298 else if (r == R1_REGNO && byte == 2)
3299 nr = R3_REGNO;
3300 else if (r == A0_REGNO && byte == 2)
3301 nr = A1_REGNO;
3303 else if (outer == SImode)
3305 if (r == R0_REGNO && byte == 0)
3306 nr = R0_REGNO;
3307 else if (r == R0_REGNO && byte == 4)
3308 nr = R1_REGNO;
3310 if (nr == -1)
3312 fprintf (stderr, "m32c_subreg %s %s %d\n",
3313 mode_name[outer], mode_name[inner], byte);
3314 debug_rtx (x);
3315 gcc_unreachable ();
3317 return gen_rtx_REG (outer, nr);
3320 /* Used to emit move instructions. We split some moves,
3321 and avoid mem-mem moves. */
3323 m32c_prepare_move (rtx * operands, machine_mode mode)
3325 if (far_addr_space_p (operands[0])
3326 && CONSTANT_P (operands[1]))
3328 operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3330 if (TARGET_A16 && mode == PSImode)
3331 return m32c_split_move (operands, mode, 1);
3332 if ((GET_CODE (operands[0]) == MEM)
3333 && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3335 rtx pmv = XEXP (operands[0], 0);
3336 rtx dest_reg = XEXP (pmv, 0);
3337 rtx dest_mod = XEXP (pmv, 1);
3339 emit_insn (gen_rtx_SET (dest_reg, dest_mod));
3340 operands[0] = gen_rtx_MEM (mode, dest_reg);
3342 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3343 operands[1] = copy_to_mode_reg (mode, operands[1]);
3344 return 0;
3347 #define DEBUG_SPLIT 0
3349 /* Returns TRUE if the given PSImode move should be split. We split
3350 for all r8c/m16c moves, since it doesn't support them, and for
3351 POP.L as we can only *push* SImode. */
3353 m32c_split_psi_p (rtx * operands)
3355 #if DEBUG_SPLIT
3356 fprintf (stderr, "\nm32c_split_psi_p\n");
3357 debug_rtx (operands[0]);
3358 debug_rtx (operands[1]);
3359 #endif
3360 if (TARGET_A16)
3362 #if DEBUG_SPLIT
3363 fprintf (stderr, "yes, A16\n");
3364 #endif
3365 return 1;
3367 if (GET_CODE (operands[1]) == MEM
3368 && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3370 #if DEBUG_SPLIT
3371 fprintf (stderr, "yes, pop.l\n");
3372 #endif
3373 return 1;
3375 #if DEBUG_SPLIT
3376 fprintf (stderr, "no, default\n");
3377 #endif
3378 return 0;
3381 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3382 (define_expand), 1 if it is not optional (define_insn_and_split),
3383 and 3 for define_split (alternate api). */
3385 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3387 rtx s[4], d[4];
3388 int parts, si, di, rev = 0;
3389 int rv = 0, opi = 2;
3390 machine_mode submode = HImode;
3391 rtx *ops, local_ops[10];
3393 /* define_split modifies the existing operands, but the other two
3394 emit new insns. OPS is where we store the operand pairs, which
3395 we emit later. */
3396 if (split_all == 3)
3397 ops = operands;
3398 else
3399 ops = local_ops;
3401 /* Else HImode. */
3402 if (mode == DImode)
3403 submode = SImode;
3405 /* Before splitting mem-mem moves, force one operand into a
3406 register. */
3407 if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3409 #if DEBUG0
3410 fprintf (stderr, "force_reg...\n");
3411 debug_rtx (operands[1]);
3412 #endif
3413 operands[1] = force_reg (mode, operands[1]);
3414 #if DEBUG0
3415 debug_rtx (operands[1]);
3416 #endif
3419 parts = 2;
3421 #if DEBUG_SPLIT
3422 fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3423 split_all);
3424 debug_rtx (operands[0]);
3425 debug_rtx (operands[1]);
3426 #endif
3428 /* Note that split_all is not used to select the api after this
3429 point, so it's safe to set it to 3 even with define_insn. */
3430 /* None of the chips can move SI operands to sp-relative addresses,
3431 so we always split those. */
3432 if (satisfies_constraint_Ss (operands[0]))
3433 split_all = 3;
3435 if (TARGET_A16
3436 && (far_addr_space_p (operands[0])
3437 || far_addr_space_p (operands[1])))
3438 split_all |= 1;
3440 /* We don't need to split these. */
3441 if (TARGET_A24
3442 && split_all != 3
3443 && (mode == SImode || mode == PSImode)
3444 && !(GET_CODE (operands[1]) == MEM
3445 && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3446 return 0;
3448 /* First, enumerate the subregs we'll be dealing with. */
3449 for (si = 0; si < parts; si++)
3451 d[si] =
3452 m32c_subreg (submode, operands[0], mode,
3453 si * GET_MODE_SIZE (submode));
3454 s[si] =
3455 m32c_subreg (submode, operands[1], mode,
3456 si * GET_MODE_SIZE (submode));
3459 /* Split pushes by emitting a sequence of smaller pushes. */
3460 if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3462 for (si = parts - 1; si >= 0; si--)
3464 ops[opi++] = gen_rtx_MEM (submode,
3465 gen_rtx_PRE_DEC (Pmode,
3466 gen_rtx_REG (Pmode,
3467 SP_REGNO)));
3468 ops[opi++] = s[si];
3471 rv = 1;
3473 /* Likewise for pops. */
3474 else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3476 for (di = 0; di < parts; di++)
3478 ops[opi++] = d[di];
3479 ops[opi++] = gen_rtx_MEM (submode,
3480 gen_rtx_POST_INC (Pmode,
3481 gen_rtx_REG (Pmode,
3482 SP_REGNO)));
3484 rv = 1;
3486 else if (split_all)
3488 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3489 for (di = 0; di < parts - 1; di++)
3490 for (si = di + 1; si < parts; si++)
3491 if (reg_mentioned_p (d[di], s[si]))
3492 rev = 1;
3494 if (rev)
3495 for (si = 0; si < parts; si++)
3497 ops[opi++] = d[si];
3498 ops[opi++] = s[si];
3500 else
3501 for (si = parts - 1; si >= 0; si--)
3503 ops[opi++] = d[si];
3504 ops[opi++] = s[si];
3506 rv = 1;
3508 /* Now emit any moves we may have accumulated. */
3509 if (rv && split_all != 3)
3511 int i;
3512 for (i = 2; i < opi; i += 2)
3513 emit_move_insn (ops[i], ops[i + 1]);
3515 return rv;
3518 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3519 the like. For the R8C they expect one of the addresses to be in
3520 R1L:An so we need to arrange for that. Otherwise, it's just a
3521 matter of picking out the operands we want and emitting the right
3522 pattern for them. All these expanders, which correspond to
3523 patterns in blkmov.md, must return nonzero if they expand the insn,
3524 or zero if they should FAIL. */
3526 /* This is a memset() opcode. All operands are implied, so we need to
3527 arrange for them to be in the right registers. The opcode wants
3528 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3529 the count (HI), and $2 the value (QI). */
3531 m32c_expand_setmemhi(rtx *operands)
3533 rtx desta, count, val;
3534 rtx desto, counto;
3536 desta = XEXP (operands[0], 0);
3537 count = operands[1];
3538 val = operands[2];
3540 desto = gen_reg_rtx (Pmode);
3541 counto = gen_reg_rtx (HImode);
3543 if (GET_CODE (desta) != REG
3544 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3545 desta = copy_to_mode_reg (Pmode, desta);
3547 /* This looks like an arbitrary restriction, but this is by far the
3548 most common case. For counts 8..14 this actually results in
3549 smaller code with no speed penalty because the half-sized
3550 constant can be loaded with a shorter opcode. */
3551 if (GET_CODE (count) == CONST_INT
3552 && GET_CODE (val) == CONST_INT
3553 && ! (INTVAL (count) & 1)
3554 && (INTVAL (count) > 1)
3555 && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3557 unsigned v = INTVAL (val) & 0xff;
3558 v = v | (v << 8);
3559 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3560 val = copy_to_mode_reg (HImode, GEN_INT (v));
3561 if (TARGET_A16)
3562 emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3563 else
3564 emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3565 return 1;
3568 /* This is the generalized memset() case. */
3569 if (GET_CODE (val) != REG
3570 || REGNO (val) < FIRST_PSEUDO_REGISTER)
3571 val = copy_to_mode_reg (QImode, val);
3573 if (GET_CODE (count) != REG
3574 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3575 count = copy_to_mode_reg (HImode, count);
3577 if (TARGET_A16)
3578 emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3579 else
3580 emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3582 return 1;
3585 /* This is a memcpy() opcode. All operands are implied, so we need to
3586 arrange for them to be in the right registers. The opcode wants
3587 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3588 is the source (MEM:BLK), and $2 the count (HI). */
3590 m32c_expand_movmemhi(rtx *operands)
3592 rtx desta, srca, count;
3593 rtx desto, srco, counto;
3595 desta = XEXP (operands[0], 0);
3596 srca = XEXP (operands[1], 0);
3597 count = operands[2];
3599 desto = gen_reg_rtx (Pmode);
3600 srco = gen_reg_rtx (Pmode);
3601 counto = gen_reg_rtx (HImode);
3603 if (GET_CODE (desta) != REG
3604 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3605 desta = copy_to_mode_reg (Pmode, desta);
3607 if (GET_CODE (srca) != REG
3608 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3609 srca = copy_to_mode_reg (Pmode, srca);
3611 /* Similar to setmem, but we don't need to check the value. */
3612 if (GET_CODE (count) == CONST_INT
3613 && ! (INTVAL (count) & 1)
3614 && (INTVAL (count) > 1))
3616 count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3617 if (TARGET_A16)
3618 emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3619 else
3620 emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3621 return 1;
3624 /* This is the generalized memset() case. */
3625 if (GET_CODE (count) != REG
3626 || REGNO (count) < FIRST_PSEUDO_REGISTER)
3627 count = copy_to_mode_reg (HImode, count);
3629 if (TARGET_A16)
3630 emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3631 else
3632 emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3634 return 1;
3637 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3638 the copy, which should point to the NUL at the end of the string,
3639 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3640 Since our opcode leaves the destination pointing *after* the NUL,
3641 we must emit an adjustment. */
3643 m32c_expand_movstr(rtx *operands)
3645 rtx desta, srca;
3646 rtx desto, srco;
3648 desta = XEXP (operands[1], 0);
3649 srca = XEXP (operands[2], 0);
3651 desto = gen_reg_rtx (Pmode);
3652 srco = gen_reg_rtx (Pmode);
3654 if (GET_CODE (desta) != REG
3655 || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3656 desta = copy_to_mode_reg (Pmode, desta);
3658 if (GET_CODE (srca) != REG
3659 || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3660 srca = copy_to_mode_reg (Pmode, srca);
3662 emit_insn (gen_movstr_op (desto, srco, desta, srca));
3663 /* desto ends up being a1, which allows this type of add through MOVA. */
3664 emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3666 return 1;
3669 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3670 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3671 $2 is the other (MEM:BLK). We must do the comparison, and then
3672 convert the flags to a signed integer result. */
3674 m32c_expand_cmpstr(rtx *operands)
3676 rtx src1a, src2a;
3678 src1a = XEXP (operands[1], 0);
3679 src2a = XEXP (operands[2], 0);
3681 if (GET_CODE (src1a) != REG
3682 || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3683 src1a = copy_to_mode_reg (Pmode, src1a);
3685 if (GET_CODE (src2a) != REG
3686 || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3687 src2a = copy_to_mode_reg (Pmode, src2a);
3689 emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3690 emit_insn (gen_cond_to_int (operands[0]));
3692 return 1;
3696 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3698 static shift_gen_func
3699 shift_gen_func_for (int mode, int code)
3701 #define GFF(m,c,f) if (mode == m && code == c) return f
3702 GFF(QImode, ASHIFT, gen_ashlqi3_i);
3703 GFF(QImode, ASHIFTRT, gen_ashrqi3_i);
3704 GFF(QImode, LSHIFTRT, gen_lshrqi3_i);
3705 GFF(HImode, ASHIFT, gen_ashlhi3_i);
3706 GFF(HImode, ASHIFTRT, gen_ashrhi3_i);
3707 GFF(HImode, LSHIFTRT, gen_lshrhi3_i);
3708 GFF(PSImode, ASHIFT, gen_ashlpsi3_i);
3709 GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3710 GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3711 GFF(SImode, ASHIFT, TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3712 GFF(SImode, ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3713 GFF(SImode, LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3714 #undef GFF
3715 gcc_unreachable ();
3718 /* The m32c only has one shift, but it takes a signed count. GCC
3719 doesn't want this, so we fake it by negating any shift count when
3720 we're pretending to shift the other way. Also, the shift count is
3721 limited to -8..8. It's slightly better to use two shifts for 9..15
3722 than to load the count into r1h, so we do that too. */
3724 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3726 machine_mode mode = GET_MODE (operands[0]);
3727 shift_gen_func func = shift_gen_func_for (mode, shift_code);
3728 rtx temp;
3730 if (GET_CODE (operands[2]) == CONST_INT)
3732 int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3733 int count = INTVAL (operands[2]) * scale;
3735 while (count > maxc)
3737 temp = gen_reg_rtx (mode);
3738 emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3739 operands[1] = temp;
3740 count -= maxc;
3742 while (count < -maxc)
3744 temp = gen_reg_rtx (mode);
3745 emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3746 operands[1] = temp;
3747 count += maxc;
3749 emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3750 return 1;
3753 temp = gen_reg_rtx (QImode);
3754 if (scale < 0)
3755 /* The pattern has a NEG that corresponds to this. */
3756 emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3757 else if (TARGET_A16 && mode == SImode)
3758 /* We do this because the code below may modify this, we don't
3759 want to modify the origin of this value. */
3760 emit_move_insn (temp, operands[2]);
3761 else
3762 /* We'll only use it for the shift, no point emitting a move. */
3763 temp = operands[2];
3765 if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3767 /* The m16c has a limit of -16..16 for SI shifts, even when the
3768 shift count is in a register. Since there are so many targets
3769 of these shifts, it's better to expand the RTL here than to
3770 call a helper function.
3772 The resulting code looks something like this:
3774 cmp.b r1h,-16
3775 jge.b 1f
3776 shl.l -16,dest
3777 add.b r1h,16
3778 1f: cmp.b r1h,16
3779 jle.b 1f
3780 shl.l 16,dest
3781 sub.b r1h,16
3782 1f: shl.l r1h,dest
3784 We take advantage of the fact that "negative" shifts are
3785 undefined to skip one of the comparisons. */
3787 rtx count;
3788 rtx tempvar;
3789 rtx_insn *insn;
3791 emit_move_insn (operands[0], operands[1]);
3793 count = temp;
3794 rtx_code_label *label = gen_label_rtx ();
3795 LABEL_NUSES (label) ++;
3797 tempvar = gen_reg_rtx (mode);
3799 if (shift_code == ASHIFT)
3801 /* This is a left shift. We only need check positive counts. */
3802 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3803 count, GEN_INT (16), label));
3804 emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3805 emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3806 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3807 emit_label_after (label, insn);
3809 else
3811 /* This is a right shift. We only need check negative counts. */
3812 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3813 count, GEN_INT (-16), label));
3814 emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3815 emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3816 insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3817 emit_label_after (label, insn);
3819 operands[1] = operands[0];
3820 emit_insn (func (operands[0], operands[0], count));
3821 return 1;
3824 operands[2] = temp;
3825 return 0;
3828 /* The m32c has a limited range of operations that work on PSImode
3829 values; we have to expand to SI, do the math, and truncate back to
3830 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3831 those cases. */
3832 void
3833 m32c_expand_neg_mulpsi3 (rtx * operands)
3835 /* operands: a = b * i */
3836 rtx temp1; /* b as SI */
3837 rtx scale /* i as SI */;
3838 rtx temp2; /* a*b as SI */
3840 temp1 = gen_reg_rtx (SImode);
3841 temp2 = gen_reg_rtx (SImode);
3842 if (GET_CODE (operands[2]) != CONST_INT)
3844 scale = gen_reg_rtx (SImode);
3845 emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3847 else
3848 scale = copy_to_mode_reg (SImode, operands[2]);
3850 emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3851 temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3852 emit_insn (gen_truncsipsi2 (operands[0], temp2));
3855 /* Pattern Output Functions */
3858 m32c_expand_movcc (rtx *operands)
3860 rtx rel = operands[1];
3862 if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3863 return 1;
3864 if (GET_CODE (operands[2]) != CONST_INT
3865 || GET_CODE (operands[3]) != CONST_INT)
3866 return 1;
3867 if (GET_CODE (rel) == NE)
3869 rtx tmp = operands[2];
3870 operands[2] = operands[3];
3871 operands[3] = tmp;
3872 rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3875 emit_move_insn (operands[0],
3876 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3877 rel,
3878 operands[2],
3879 operands[3]));
3880 return 0;
3883 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3885 m32c_expand_insv (rtx *operands)
3887 rtx op0, src0, p;
3888 int mask;
3890 if (INTVAL (operands[1]) != 1)
3891 return 1;
3893 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3894 if (GET_CODE (operands[3]) != CONST_INT)
3895 return 1;
3896 if (INTVAL (operands[3]) != 0
3897 && INTVAL (operands[3]) != 1
3898 && INTVAL (operands[3]) != -1)
3899 return 1;
3901 mask = 1 << INTVAL (operands[2]);
3903 op0 = operands[0];
3904 if (GET_CODE (op0) == SUBREG
3905 && SUBREG_BYTE (op0) == 0)
3907 rtx sub = SUBREG_REG (op0);
3908 if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3909 op0 = sub;
3912 if (!can_create_pseudo_p ()
3913 || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3914 src0 = op0;
3915 else
3917 src0 = gen_reg_rtx (GET_MODE (op0));
3918 emit_move_insn (src0, op0);
3921 if (GET_MODE (op0) == HImode
3922 && INTVAL (operands[2]) >= 8
3923 && GET_CODE (op0) == MEM)
3925 /* We are little endian. */
3926 rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3927 XEXP (op0, 0), 1));
3928 MEM_COPY_ATTRIBUTES (new_mem, op0);
3929 mask >>= 8;
3932 /* First, we generate a mask with the correct polarity. If we are
3933 storing a zero, we want an AND mask, so invert it. */
3934 if (INTVAL (operands[3]) == 0)
3936 /* Storing a zero, use an AND mask */
3937 if (GET_MODE (op0) == HImode)
3938 mask ^= 0xffff;
3939 else
3940 mask ^= 0xff;
3942 /* Now we need to properly sign-extend the mask in case we need to
3943 fall back to an AND or OR opcode. */
3944 if (GET_MODE (op0) == HImode)
3946 if (mask & 0x8000)
3947 mask -= 0x10000;
3949 else
3951 if (mask & 0x80)
3952 mask -= 0x100;
3955 switch ( (INTVAL (operands[3]) ? 4 : 0)
3956 + ((GET_MODE (op0) == HImode) ? 2 : 0)
3957 + (TARGET_A24 ? 1 : 0))
3959 case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3960 case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3961 case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3962 case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3963 case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3964 case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3965 case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3966 case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3967 default: p = NULL_RTX; break; /* Not reached, but silences a warning. */
3970 emit_insn (p);
3971 return 0;
3974 const char *
3975 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3977 static char buf[30];
3978 if (GET_CODE (operands[0]) == REG
3979 && REGNO (operands[0]) == R0_REGNO)
3981 if (code == EQ)
3982 return "stzx\t#1,#0,r0l";
3983 if (code == NE)
3984 return "stzx\t#0,#1,r0l";
3986 sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3987 return buf;
3990 /* Encode symbol attributes of a SYMBOL_REF into its
3991 SYMBOL_REF_FLAGS. */
3992 static void
3993 m32c_encode_section_info (tree decl, rtx rtl, int first)
3995 int extra_flags = 0;
3997 default_encode_section_info (decl, rtl, first);
3998 if (TREE_CODE (decl) == FUNCTION_DECL
3999 && m32c_special_page_vector_p (decl))
4001 extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4003 if (extra_flags)
4004 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4007 /* Returns TRUE if the current function is a leaf, and thus we can
4008 determine which registers an interrupt function really needs to
4009 save. The logic below is mostly about finding the insn sequence
4010 that's the function, versus any sequence that might be open for the
4011 current insn. */
4012 static int
4013 m32c_leaf_function_p (void)
4015 int rv;
4017 push_topmost_sequence ();
4018 rv = leaf_function_p ();
4019 pop_topmost_sequence ();
4020 return rv;
4023 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4024 opcodes. If the function doesn't need the frame base or stack
4025 pointer, it can use the simpler RTS opcode. */
4026 static bool
4027 m32c_function_needs_enter (void)
4029 rtx_insn *insn;
4030 rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4031 rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4033 for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4034 if (NONDEBUG_INSN_P (insn))
4036 if (reg_mentioned_p (sp, insn))
4037 return true;
4038 if (reg_mentioned_p (fb, insn))
4039 return true;
4041 return false;
4044 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4045 frame-related. Return PAR.
4047 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4048 PARALLEL rtx other than the first if they do not have the
4049 FRAME_RELATED flag set on them. So this function is handy for
4050 marking up 'enter' instructions. */
4051 static rtx
4052 m32c_all_frame_related (rtx par)
4054 int len = XVECLEN (par, 0);
4055 int i;
4057 for (i = 0; i < len; i++)
4058 F (XVECEXP (par, 0, i));
4060 return par;
4063 /* Emits the prologue. See the frame layout comment earlier in this
4064 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
4065 that we manually update sp. */
4066 void
4067 m32c_emit_prologue (void)
4069 int frame_size, extra_frame_size = 0, reg_save_size;
4070 int complex_prologue = 0;
4072 cfun->machine->is_leaf = m32c_leaf_function_p ();
4073 if (interrupt_p (cfun->decl))
4075 cfun->machine->is_interrupt = 1;
4076 complex_prologue = 1;
4078 else if (bank_switch_p (cfun->decl))
4079 warning (OPT_Wattributes,
4080 "%<bank_switch%> has no effect on non-interrupt functions");
4082 reg_save_size = m32c_pushm_popm (PP_justcount);
4084 if (interrupt_p (cfun->decl))
4086 if (bank_switch_p (cfun->decl))
4087 emit_insn (gen_fset_b ());
4088 else if (cfun->machine->intr_pushm)
4089 emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4092 frame_size =
4093 m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4094 if (frame_size == 0
4095 && !m32c_function_needs_enter ())
4096 cfun->machine->use_rts = 1;
4098 if (flag_stack_usage_info)
4099 current_function_static_stack_size = frame_size;
4101 if (frame_size > 254)
4103 extra_frame_size = frame_size - 254;
4104 frame_size = 254;
4106 if (cfun->machine->use_rts == 0)
4107 F (emit_insn (m32c_all_frame_related
4108 (TARGET_A16
4109 ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4110 : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4112 if (extra_frame_size)
4114 complex_prologue = 1;
4115 if (TARGET_A16)
4116 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4117 gen_rtx_REG (HImode, SP_REGNO),
4118 GEN_INT (-extra_frame_size))));
4119 else
4120 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4121 gen_rtx_REG (PSImode, SP_REGNO),
4122 GEN_INT (-extra_frame_size))));
4125 complex_prologue += m32c_pushm_popm (PP_pushm);
4127 /* This just emits a comment into the .s file for debugging. */
4128 if (complex_prologue)
4129 emit_insn (gen_prologue_end ());
4132 /* Likewise, for the epilogue. The only exception is that, for
4133 interrupts, we must manually unwind the frame as the REIT opcode
4134 doesn't do that. */
4135 void
4136 m32c_emit_epilogue (void)
4138 int popm_count = m32c_pushm_popm (PP_justcount);
4140 /* This just emits a comment into the .s file for debugging. */
4141 if (popm_count > 0 || cfun->machine->is_interrupt)
4142 emit_insn (gen_epilogue_start ());
4144 if (popm_count > 0)
4145 m32c_pushm_popm (PP_popm);
4147 if (cfun->machine->is_interrupt)
4149 machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4151 /* REIT clears B flag and restores $fp for us, but we still
4152 have to fix up the stack. USE_RTS just means we didn't
4153 emit ENTER. */
4154 if (!cfun->machine->use_rts)
4156 emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4157 gen_rtx_REG (spmode, FP_REGNO));
4158 emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4159 gen_rtx_REG (spmode, A0_REGNO));
4160 /* We can't just add this to the POPM because it would be in
4161 the wrong order, and wouldn't fix the stack if we're bank
4162 switching. */
4163 if (TARGET_A16)
4164 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4165 else
4166 emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4168 if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4169 emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4171 /* The FREIT (Fast REturn from InTerrupt) instruction should be
4172 generated only for M32C/M32CM targets (generate the REIT
4173 instruction otherwise). */
4174 if (fast_interrupt_p (cfun->decl))
4176 /* Check if fast_attribute is set for M32C or M32CM. */
4177 if (TARGET_A24)
4179 emit_jump_insn (gen_epilogue_freit ());
4181 /* If fast_interrupt attribute is set for an R8C or M16C
4182 target ignore this attribute and generated REIT
4183 instruction. */
4184 else
4186 warning (OPT_Wattributes,
4187 "%<fast_interrupt%> attribute directive ignored");
4188 emit_jump_insn (gen_epilogue_reit_16 ());
4191 else if (TARGET_A16)
4192 emit_jump_insn (gen_epilogue_reit_16 ());
4193 else
4194 emit_jump_insn (gen_epilogue_reit_24 ());
4196 else if (cfun->machine->use_rts)
4197 emit_jump_insn (gen_epilogue_rts ());
4198 else if (TARGET_A16)
4199 emit_jump_insn (gen_epilogue_exitd_16 ());
4200 else
4201 emit_jump_insn (gen_epilogue_exitd_24 ());
4204 void
4205 m32c_emit_eh_epilogue (rtx ret_addr)
4207 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4208 return to. We have to fudge the stack, pop everything, pop SP
4209 (fudged), and return (fudged). This is actually easier to do in
4210 assembler, so punt to libgcc. */
4211 emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4212 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4215 /* Indicate which flags must be properly set for a given conditional. */
4216 static int
4217 flags_needed_for_conditional (rtx cond)
4219 switch (GET_CODE (cond))
4221 case LE:
4222 case GT:
4223 return FLAGS_OSZ;
4224 case LEU:
4225 case GTU:
4226 return FLAGS_ZC;
4227 case LT:
4228 case GE:
4229 return FLAGS_OS;
4230 case LTU:
4231 case GEU:
4232 return FLAGS_C;
4233 case EQ:
4234 case NE:
4235 return FLAGS_Z;
4236 default:
4237 return FLAGS_N;
4241 #define DEBUG_CMP 0
4243 /* Returns true if a compare insn is redundant because it would only
4244 set flags that are already set correctly. */
4245 static bool
4246 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4248 int flags_needed;
4249 int pflags;
4250 rtx_insn *prev;
4251 rtx pp, next;
4252 rtx op0, op1;
4253 #if DEBUG_CMP
4254 int prev_icode, i;
4255 #endif
4257 op0 = operands[0];
4258 op1 = operands[1];
4260 #if DEBUG_CMP
4261 fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4262 debug_rtx(cmp);
4263 for (i=0; i<2; i++)
4265 fprintf(stderr, "operands[%d] = ", i);
4266 debug_rtx(operands[i]);
4268 #endif
4270 next = next_nonnote_insn (cmp);
4271 if (!next || !INSN_P (next))
4273 #if DEBUG_CMP
4274 fprintf(stderr, "compare not followed by insn\n");
4275 debug_rtx(next);
4276 #endif
4277 return false;
4279 if (GET_CODE (PATTERN (next)) == SET
4280 && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4282 next = XEXP (XEXP (PATTERN (next), 1), 0);
4284 else if (GET_CODE (PATTERN (next)) == SET)
4286 /* If this is a conditional, flags_needed will be something
4287 other than FLAGS_N, which we test below. */
4288 next = XEXP (PATTERN (next), 1);
4290 else
4292 #if DEBUG_CMP
4293 fprintf(stderr, "compare not followed by conditional\n");
4294 debug_rtx(next);
4295 #endif
4296 return false;
4298 #if DEBUG_CMP
4299 fprintf(stderr, "conditional is: ");
4300 debug_rtx(next);
4301 #endif
4303 flags_needed = flags_needed_for_conditional (next);
4304 if (flags_needed == FLAGS_N)
4306 #if DEBUG_CMP
4307 fprintf(stderr, "compare not followed by conditional\n");
4308 debug_rtx(next);
4309 #endif
4310 return false;
4313 /* Compare doesn't set overflow and carry the same way that
4314 arithmetic instructions do, so we can't replace those. */
4315 if (flags_needed & FLAGS_OC)
4316 return false;
4318 prev = cmp;
4319 do {
4320 prev = prev_nonnote_insn (prev);
4321 if (!prev)
4323 #if DEBUG_CMP
4324 fprintf(stderr, "No previous insn.\n");
4325 #endif
4326 return false;
4328 if (!INSN_P (prev))
4330 #if DEBUG_CMP
4331 fprintf(stderr, "Previous insn is a non-insn.\n");
4332 #endif
4333 return false;
4335 pp = PATTERN (prev);
4336 if (GET_CODE (pp) != SET)
4338 #if DEBUG_CMP
4339 fprintf(stderr, "Previous insn is not a SET.\n");
4340 #endif
4341 return false;
4343 pflags = get_attr_flags (prev);
4345 /* Looking up attributes of previous insns corrupted the recog
4346 tables. */
4347 INSN_UID (cmp) = -1;
4348 recog (PATTERN (cmp), cmp, 0);
4350 if (pflags == FLAGS_N
4351 && reg_mentioned_p (op0, pp))
4353 #if DEBUG_CMP
4354 fprintf(stderr, "intermediate non-flags insn uses op:\n");
4355 debug_rtx(prev);
4356 #endif
4357 return false;
4360 /* Check for comparisons against memory - between volatiles and
4361 aliases, we just can't risk this one. */
4362 if (GET_CODE (operands[0]) == MEM
4363 || GET_CODE (operands[0]) == MEM)
4365 #if DEBUG_CMP
4366 fprintf(stderr, "comparisons with memory:\n");
4367 debug_rtx(prev);
4368 #endif
4369 return false;
4372 /* Check for PREV changing a register that's used to compute a
4373 value in CMP, even if it doesn't otherwise change flags. */
4374 if (GET_CODE (operands[0]) == REG
4375 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4377 #if DEBUG_CMP
4378 fprintf(stderr, "sub-value affected, op0:\n");
4379 debug_rtx(prev);
4380 #endif
4381 return false;
4383 if (GET_CODE (operands[1]) == REG
4384 && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4386 #if DEBUG_CMP
4387 fprintf(stderr, "sub-value affected, op1:\n");
4388 debug_rtx(prev);
4389 #endif
4390 return false;
4393 } while (pflags == FLAGS_N);
4394 #if DEBUG_CMP
4395 fprintf(stderr, "previous flag-setting insn:\n");
4396 debug_rtx(prev);
4397 debug_rtx(pp);
4398 #endif
4400 if (GET_CODE (pp) == SET
4401 && GET_CODE (XEXP (pp, 0)) == REG
4402 && REGNO (XEXP (pp, 0)) == FLG_REGNO
4403 && GET_CODE (XEXP (pp, 1)) == COMPARE)
4405 /* Adjacent cbranches must have the same operands to be
4406 redundant. */
4407 rtx pop0 = XEXP (XEXP (pp, 1), 0);
4408 rtx pop1 = XEXP (XEXP (pp, 1), 1);
4409 #if DEBUG_CMP
4410 fprintf(stderr, "adjacent cbranches\n");
4411 debug_rtx(pop0);
4412 debug_rtx(pop1);
4413 #endif
4414 if (rtx_equal_p (op0, pop0)
4415 && rtx_equal_p (op1, pop1))
4416 return true;
4417 #if DEBUG_CMP
4418 fprintf(stderr, "prev cmp not same\n");
4419 #endif
4420 return false;
4423 /* Else the previous insn must be a SET, with either the source or
4424 dest equal to operands[0], and operands[1] must be zero. */
4426 if (!rtx_equal_p (op1, const0_rtx))
4428 #if DEBUG_CMP
4429 fprintf(stderr, "operands[1] not const0_rtx\n");
4430 #endif
4431 return false;
4433 if (GET_CODE (pp) != SET)
4435 #if DEBUG_CMP
4436 fprintf (stderr, "pp not set\n");
4437 #endif
4438 return false;
4440 if (!rtx_equal_p (op0, SET_SRC (pp))
4441 && !rtx_equal_p (op0, SET_DEST (pp)))
4443 #if DEBUG_CMP
4444 fprintf(stderr, "operands[0] not found in set\n");
4445 #endif
4446 return false;
4449 #if DEBUG_CMP
4450 fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4451 #endif
4452 if ((pflags & flags_needed) == flags_needed)
4453 return true;
4455 return false;
4458 /* Return the pattern for a compare. This will be commented out if
4459 the compare is redundant, else a normal pattern is returned. Thus,
4460 the assembler output says where the compare would have been. */
4461 char *
4462 m32c_output_compare (rtx_insn *insn, rtx *operands)
4464 static char templ[] = ";cmp.b\t%1,%0";
4465 /* ^ 5 */
4467 templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4468 if (m32c_compare_redundant (insn, operands))
4470 #if DEBUG_CMP
4471 fprintf(stderr, "cbranch: cmp not needed\n");
4472 #endif
4473 return templ;
4476 #if DEBUG_CMP
4477 fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4478 #endif
4479 return templ + 1;
4482 #undef TARGET_ENCODE_SECTION_INFO
4483 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4485 /* If the frame pointer isn't used, we detect it manually. But the
4486 stack pointer doesn't have as flexible addressing as the frame
4487 pointer, so we always assume we have it. */
4489 #undef TARGET_FRAME_POINTER_REQUIRED
4490 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4492 #undef TARGET_HARD_REGNO_MODE_OK
4493 #define TARGET_HARD_REGNO_MODE_OK m32c_hard_regno_mode_ok
4495 /* The Global `targetm' Variable. */
4497 struct gcc_target targetm = TARGET_INITIALIZER;
4499 #include "gt-m32c.h"