1 /* Target Code for R8C/M16C/M32C
2 Copyright (C) 2005, 2006, 2007, 2008
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
28 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
34 #include "insn-attr.h"
47 #include "target-def.h"
49 #include "langhooks.h"
55 /* Used by m32c_pushm_popm. */
63 static tree
interrupt_handler (tree
*, tree
, tree
, int, bool *);
64 static tree
function_vector_handler (tree
*, tree
, tree
, int, bool *);
65 static int interrupt_p (tree node
);
66 static bool m32c_asm_integer (rtx
, unsigned int, int);
67 static int m32c_comp_type_attributes (const_tree
, const_tree
);
68 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
69 static struct machine_function
*m32c_init_machine_status (void);
70 static void m32c_insert_attributes (tree
, tree
*);
71 static bool m32c_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode
,
73 static bool m32c_promote_prototypes (const_tree
);
74 static int m32c_pushm_popm (Push_Pop_Type
);
75 static bool m32c_strict_argument_naming (CUMULATIVE_ARGS
*);
76 static rtx
m32c_struct_value_rtx (tree
, int);
77 static rtx
m32c_subreg (enum machine_mode
, rtx
, enum machine_mode
, int);
78 static int need_to_save (int);
79 int current_function_special_page_vector (rtx
);
81 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
83 #define streq(a,b) (strcmp ((a), (b)) == 0)
85 /* Internal support routines */
87 /* Debugging statements are tagged with DEBUG0 only so that they can
88 be easily enabled individually, by replacing the '0' with '1' as
94 /* This is needed by some of the commented-out debug statements
96 static char const *class_names
[LIM_REG_CLASSES
] = REG_CLASS_NAMES
;
98 static int class_contents
[LIM_REG_CLASSES
][1] = REG_CLASS_CONTENTS
;
100 /* These are all to support encode_pattern(). */
101 static char pattern
[30], *patternp
;
102 static GTY(()) rtx patternr
[30];
103 #define RTX_IS(x) (streq (pattern, x))
105 /* Some macros to simplify the logic throughout this file. */
106 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
107 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
109 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
110 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
112 /* We do most RTX matching by converting the RTX into a string, and
113 using string compares. This vastly simplifies the logic in many of
114 the functions in this file.
116 On exit, pattern[] has the encoded string (use RTX_IS("...") to
117 compare it) and patternr[] has pointers to the nodes in the RTX
118 corresponding to each character in the encoded string. The latter
119 is mostly used by print_operand().
121 Unrecognized patterns have '?' in them; this shows up when the
122 assembler complains about syntax errors.
126 encode_pattern_1 (rtx x
)
130 if (patternp
== pattern
+ sizeof (pattern
) - 2)
136 patternr
[patternp
- pattern
] = x
;
138 switch (GET_CODE (x
))
144 if (GET_MODE_SIZE (GET_MODE (x
)) !=
145 GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))))
147 encode_pattern_1 (XEXP (x
, 0));
152 encode_pattern_1 (XEXP (x
, 0));
156 encode_pattern_1 (XEXP (x
, 0));
157 encode_pattern_1 (XEXP (x
, 1));
161 encode_pattern_1 (XEXP (x
, 0));
165 encode_pattern_1 (XEXP (x
, 0));
169 encode_pattern_1 (XEXP (x
, 0));
170 encode_pattern_1 (XEXP (x
, 1));
174 encode_pattern_1 (XEXP (x
, 0));
191 *patternp
++ = '0' + XCINT (x
, 1, UNSPEC
);
192 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
193 encode_pattern_1 (XVECEXP (x
, 0, i
));
200 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
201 encode_pattern_1 (XVECEXP (x
, 0, i
));
205 encode_pattern_1 (XEXP (x
, 0));
207 encode_pattern_1 (XEXP (x
, 1));
212 fprintf (stderr
, "can't encode pattern %s\n",
213 GET_RTX_NAME (GET_CODE (x
)));
222 encode_pattern (rtx x
)
225 encode_pattern_1 (x
);
229 /* Since register names indicate the mode they're used in, we need a
230 way to determine which name to refer to the register with. Called
231 by print_operand(). */
234 reg_name_with_mode (int regno
, enum machine_mode mode
)
236 int mlen
= GET_MODE_SIZE (mode
);
237 if (regno
== R0_REGNO
&& mlen
== 1)
239 if (regno
== R0_REGNO
&& (mlen
== 3 || mlen
== 4))
241 if (regno
== R0_REGNO
&& mlen
== 6)
243 if (regno
== R0_REGNO
&& mlen
== 8)
245 if (regno
== R1_REGNO
&& mlen
== 1)
247 if (regno
== R1_REGNO
&& (mlen
== 3 || mlen
== 4))
249 if (regno
== A0_REGNO
&& TARGET_A16
&& (mlen
== 3 || mlen
== 4))
251 return reg_names
[regno
];
254 /* How many bytes a register uses on stack when it's pushed. We need
255 to know this because the push opcode needs to explicitly indicate
256 the size of the register, even though the name of the register
257 already tells it that. Used by m32c_output_reg_{push,pop}, which
258 is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}. */
261 reg_push_size (int regno
)
286 static int *class_sizes
= 0;
288 /* Given two register classes, find the largest intersection between
289 them. If there is no intersection, return RETURNED_IF_EMPTY
292 reduce_class (int original_class
, int limiting_class
, int returned_if_empty
)
294 int cc
= class_contents
[original_class
][0];
295 int i
, best
= NO_REGS
;
298 if (original_class
== limiting_class
)
299 return original_class
;
304 class_sizes
= (int *) xmalloc (LIM_REG_CLASSES
* sizeof (int));
305 for (i
= 0; i
< LIM_REG_CLASSES
; i
++)
308 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; r
++)
309 if (class_contents
[i
][0] & (1 << r
))
314 cc
&= class_contents
[limiting_class
][0];
315 for (i
= 0; i
< LIM_REG_CLASSES
; i
++)
317 int ic
= class_contents
[i
][0];
320 if (best_size
< class_sizes
[i
])
323 best_size
= class_sizes
[i
];
328 return returned_if_empty
;
332 /* Returns TRUE If there are any registers that exist in both register
335 classes_intersect (int class1
, int class2
)
337 return class_contents
[class1
][0] & class_contents
[class2
][0];
340 /* Used by m32c_register_move_cost to determine if a move is
341 impossibly expensive. */
343 class_can_hold_mode (int rclass
, enum machine_mode mode
)
345 /* Cache the results: 0=untested 1=no 2=yes */
346 static char results
[LIM_REG_CLASSES
][MAX_MACHINE_MODE
];
347 if (results
[rclass
][mode
] == 0)
350 results
[rclass
][mode
] = 1;
351 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; r
++)
352 if (class_contents
[rclass
][0] & (1 << r
)
353 && HARD_REGNO_MODE_OK (r
, mode
))
356 n
= HARD_REGNO_NREGS (r
, mode
);
357 for (i
= 1; i
< n
; i
++)
358 if (!(class_contents
[rclass
][0] & (1 << (r
+ i
))))
362 results
[rclass
][mode
] = 2;
368 fprintf (stderr
, "class %s can hold %s? %s\n",
369 class_names
[rclass
], mode_name
[mode
],
370 (results
[rclass
][mode
] == 2) ? "yes" : "no");
372 return results
[rclass
][mode
] == 2;
375 /* Run-time Target Specification. */
377 /* Memregs are memory locations that gcc treats like general
378 registers, as there are a limited number of true registers and the
379 m32c families can use memory in most places that registers can be
382 However, since memory accesses are more expensive than registers,
383 we allow the user to limit the number of memregs available, in
384 order to try to persuade gcc to try harder to use real registers.
386 Memregs are provided by m32c-lib1.S.
389 int target_memregs
= 16;
390 static bool target_memregs_set
= FALSE
;
391 int ok_to_change_target_memregs
= TRUE
;
393 #undef TARGET_HANDLE_OPTION
394 #define TARGET_HANDLE_OPTION m32c_handle_option
396 m32c_handle_option (size_t code
,
397 const char *arg ATTRIBUTE_UNUSED
,
398 int value ATTRIBUTE_UNUSED
)
400 if (code
== OPT_memregs_
)
402 target_memregs_set
= TRUE
;
403 target_memregs
= atoi (arg
);
408 /* Implements OVERRIDE_OPTIONS. We limit memregs to 0..16, and
409 provide a default. */
411 m32c_override_options (void)
413 if (target_memregs_set
)
415 if (target_memregs
< 0 || target_memregs
> 16)
416 error ("invalid target memregs value '%d'", target_memregs
);
422 /* Defining data structures for per-function information */
424 /* The usual; we set up our machine_function data. */
425 static struct machine_function
*
426 m32c_init_machine_status (void)
428 struct machine_function
*machine
;
430 (machine_function
*) ggc_alloc_cleared (sizeof (machine_function
));
435 /* Implements INIT_EXPANDERS. We just set up to call the above
438 m32c_init_expanders (void)
440 init_machine_status
= m32c_init_machine_status
;
445 #undef TARGET_PROMOTE_FUNCTION_RETURN
446 #define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
448 m32c_promote_function_return (const_tree fntype ATTRIBUTE_UNUSED
)
453 /* Register Basics */
455 /* Basic Characteristics of Registers */
457 /* Whether a mode fits in a register is complex enough to warrant a
466 } nregs_table
[FIRST_PSEUDO_REGISTER
] =
468 { 1, 1, 2, 2, 4 }, /* r0 */
469 { 0, 1, 0, 0, 0 }, /* r2 */
470 { 1, 1, 2, 2, 0 }, /* r1 */
471 { 0, 1, 0, 0, 0 }, /* r3 */
472 { 0, 1, 1, 0, 0 }, /* a0 */
473 { 0, 1, 1, 0, 0 }, /* a1 */
474 { 0, 1, 1, 0, 0 }, /* sb */
475 { 0, 1, 1, 0, 0 }, /* fb */
476 { 0, 1, 1, 0, 0 }, /* sp */
477 { 1, 1, 1, 0, 0 }, /* pc */
478 { 0, 0, 0, 0, 0 }, /* fl */
479 { 1, 1, 1, 0, 0 }, /* ap */
480 { 1, 1, 2, 2, 4 }, /* mem0 */
481 { 1, 1, 2, 2, 4 }, /* mem1 */
482 { 1, 1, 2, 2, 4 }, /* mem2 */
483 { 1, 1, 2, 2, 4 }, /* mem3 */
484 { 1, 1, 2, 2, 4 }, /* mem4 */
485 { 1, 1, 2, 2, 0 }, /* mem5 */
486 { 1, 1, 2, 2, 0 }, /* mem6 */
487 { 1, 1, 0, 0, 0 }, /* mem7 */
490 /* Implements CONDITIONAL_REGISTER_USAGE. We adjust the number of
491 available memregs, and select which registers need to be preserved
492 across calls based on the chip family. */
495 m32c_conditional_register_usage (void)
499 if (0 <= target_memregs
&& target_memregs
<= 16)
501 /* The command line option is bytes, but our "registers" are
503 for (i
= target_memregs
/2; i
< 8; i
++)
505 fixed_regs
[MEM0_REGNO
+ i
] = 1;
506 CLEAR_HARD_REG_BIT (reg_class_contents
[MEM_REGS
], MEM0_REGNO
+ i
);
510 /* M32CM and M32C preserve more registers across function calls. */
513 call_used_regs
[R1_REGNO
] = 0;
514 call_used_regs
[R2_REGNO
] = 0;
515 call_used_regs
[R3_REGNO
] = 0;
516 call_used_regs
[A0_REGNO
] = 0;
517 call_used_regs
[A1_REGNO
] = 0;
521 /* How Values Fit in Registers */
523 /* Implements HARD_REGNO_NREGS. This is complicated by the fact that
524 different registers are different sizes from each other, *and* may
525 be different sizes in different chip families. */
527 m32c_hard_regno_nregs_1 (int regno
, enum machine_mode mode
)
529 if (regno
== FLG_REGNO
&& mode
== CCmode
)
531 if (regno
>= FIRST_PSEUDO_REGISTER
)
532 return ((GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
);
534 if (regno
>= MEM0_REGNO
&& regno
<= MEM7_REGNO
)
535 return (GET_MODE_SIZE (mode
) + 1) / 2;
537 if (GET_MODE_SIZE (mode
) <= 1)
538 return nregs_table
[regno
].qi_regs
;
539 if (GET_MODE_SIZE (mode
) <= 2)
540 return nregs_table
[regno
].hi_regs
;
541 if (regno
== A0_REGNO
&& mode
== PSImode
&& TARGET_A16
)
543 if ((GET_MODE_SIZE (mode
) <= 3 || mode
== PSImode
) && TARGET_A24
)
544 return nregs_table
[regno
].pi_regs
;
545 if (GET_MODE_SIZE (mode
) <= 4)
546 return nregs_table
[regno
].si_regs
;
547 if (GET_MODE_SIZE (mode
) <= 8)
548 return nregs_table
[regno
].di_regs
;
553 m32c_hard_regno_nregs (int regno
, enum machine_mode mode
)
555 int rv
= m32c_hard_regno_nregs_1 (regno
, mode
);
559 /* Implements HARD_REGNO_MODE_OK. The above function does the work
560 already; just test its return value. */
562 m32c_hard_regno_ok (int regno
, enum machine_mode mode
)
564 return m32c_hard_regno_nregs_1 (regno
, mode
) != 0;
567 /* Implements MODES_TIEABLE_P. In general, modes aren't tieable since
568 registers are all different sizes. However, since most modes are
569 bigger than our registers anyway, it's easier to implement this
570 function that way, leaving QImode as the only unique case. */
572 m32c_modes_tieable_p (enum machine_mode m1
, enum machine_mode m2
)
574 if (GET_MODE_SIZE (m1
) == GET_MODE_SIZE (m2
))
578 if (m1
== QImode
|| m2
== QImode
)
585 /* Register Classes */
587 /* Implements REGNO_REG_CLASS. */
589 m32c_regno_reg_class (int regno
)
613 if (IS_MEM_REGNO (regno
))
619 /* Implements REG_CLASS_FROM_CONSTRAINT. Note that some constraints only match
620 for certain chip families. */
622 m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED
, const char *s
)
624 if (memcmp (s
, "Rsp", 3) == 0)
626 if (memcmp (s
, "Rfb", 3) == 0)
628 if (memcmp (s
, "Rsb", 3) == 0)
630 if (memcmp (s
, "Rcr", 3) == 0)
631 return TARGET_A16
? CR_REGS
: NO_REGS
;
632 if (memcmp (s
, "Rcl", 3) == 0)
633 return TARGET_A24
? CR_REGS
: NO_REGS
;
634 if (memcmp (s
, "R0w", 3) == 0)
636 if (memcmp (s
, "R1w", 3) == 0)
638 if (memcmp (s
, "R2w", 3) == 0)
640 if (memcmp (s
, "R3w", 3) == 0)
642 if (memcmp (s
, "R02", 3) == 0)
644 if (memcmp (s
, "R03", 3) == 0)
646 if (memcmp (s
, "Rdi", 3) == 0)
648 if (memcmp (s
, "Rhl", 3) == 0)
650 if (memcmp (s
, "R23", 3) == 0)
652 if (memcmp (s
, "Ra0", 3) == 0)
654 if (memcmp (s
, "Ra1", 3) == 0)
656 if (memcmp (s
, "Raa", 3) == 0)
658 if (memcmp (s
, "Raw", 3) == 0)
659 return TARGET_A16
? A_REGS
: NO_REGS
;
660 if (memcmp (s
, "Ral", 3) == 0)
661 return TARGET_A24
? A_REGS
: NO_REGS
;
662 if (memcmp (s
, "Rqi", 3) == 0)
664 if (memcmp (s
, "Rad", 3) == 0)
666 if (memcmp (s
, "Rsi", 3) == 0)
668 if (memcmp (s
, "Rhi", 3) == 0)
670 if (memcmp (s
, "Rhc", 3) == 0)
672 if (memcmp (s
, "Rra", 3) == 0)
674 if (memcmp (s
, "Rfl", 3) == 0)
676 if (memcmp (s
, "Rmm", 3) == 0)
678 if (fixed_regs
[MEM0_REGNO
])
683 /* PSImode registers - i.e. whatever can hold a pointer. */
684 if (memcmp (s
, "Rpi", 3) == 0)
689 return RA_REGS
; /* r2r0 and r3r1 can hold pointers. */
692 /* We handle this one as an EXTRA_CONSTRAINT. */
693 if (memcmp (s
, "Rpa", 3) == 0)
698 fprintf(stderr
, "unrecognized R constraint: %.3s\n", s
);
705 /* Implements REGNO_OK_FOR_BASE_P. */
707 m32c_regno_ok_for_base_p (int regno
)
709 if (regno
== A0_REGNO
710 || regno
== A1_REGNO
|| regno
>= FIRST_PSEUDO_REGISTER
)
715 #define DEBUG_RELOAD 0
717 /* Implements PREFERRED_RELOAD_CLASS. In general, prefer general
718 registers of the appropriate size. */
720 m32c_preferred_reload_class (rtx x
, int rclass
)
722 int newclass
= rclass
;
725 fprintf (stderr
, "\npreferred_reload_class for %s is ",
726 class_names
[rclass
]);
728 if (rclass
== NO_REGS
)
729 rclass
= GET_MODE (x
) == QImode
? HL_REGS
: R03_REGS
;
731 if (classes_intersect (rclass
, CR_REGS
))
733 switch (GET_MODE (x
))
739 /* newclass = HI_REGS; */
744 else if (newclass
== QI_REGS
&& GET_MODE_SIZE (GET_MODE (x
)) > 2)
746 else if (GET_MODE_SIZE (GET_MODE (x
)) > 4
747 && ~class_contents
[rclass
][0] & 0x000f)
750 rclass
= reduce_class (rclass
, newclass
, rclass
);
752 if (GET_MODE (x
) == QImode
)
753 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
756 fprintf (stderr
, "%s\n", class_names
[rclass
]);
759 if (GET_CODE (x
) == MEM
760 && GET_CODE (XEXP (x
, 0)) == PLUS
761 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == PLUS
)
762 fprintf (stderr
, "Glorm!\n");
767 /* Implements PREFERRED_OUTPUT_RELOAD_CLASS. */
769 m32c_preferred_output_reload_class (rtx x
, int rclass
)
771 return m32c_preferred_reload_class (x
, rclass
);
774 /* Implements LIMIT_RELOAD_CLASS. We basically want to avoid using
775 address registers for reloads since they're needed for address
778 m32c_limit_reload_class (enum machine_mode mode
, int rclass
)
781 fprintf (stderr
, "limit_reload_class for %s: %s ->",
782 mode_name
[mode
], class_names
[rclass
]);
786 rclass
= reduce_class (rclass
, HL_REGS
, rclass
);
787 else if (mode
== HImode
)
788 rclass
= reduce_class (rclass
, HI_REGS
, rclass
);
789 else if (mode
== SImode
)
790 rclass
= reduce_class (rclass
, SI_REGS
, rclass
);
792 if (rclass
!= A_REGS
)
793 rclass
= reduce_class (rclass
, DI_REGS
, rclass
);
796 fprintf (stderr
, " %s\n", class_names
[rclass
]);
801 /* Implements SECONDARY_RELOAD_CLASS. QImode have to be reloaded in
802 r0 or r1, as those are the only real QImode registers. CR regs get
803 reloaded through appropriately sized general or address
806 m32c_secondary_reload_class (int rclass
, enum machine_mode mode
, rtx x
)
808 int cc
= class_contents
[rclass
][0];
810 fprintf (stderr
, "\nsecondary reload class %s %s\n",
811 class_names
[rclass
], mode_name
[mode
]);
815 && GET_CODE (x
) == MEM
&& (cc
& ~class_contents
[R23_REGS
][0]) == 0)
817 if (classes_intersect (rclass
, CR_REGS
)
818 && GET_CODE (x
) == REG
819 && REGNO (x
) >= SB_REGNO
&& REGNO (x
) <= SP_REGNO
)
820 return TARGET_A16
? HI_REGS
: A_REGS
;
824 /* Implements CLASS_LIKELY_SPILLED_P. A_REGS is needed for address
827 m32c_class_likely_spilled_p (int regclass
)
829 if (regclass
== A_REGS
)
831 return reg_class_size
[regclass
] == 1;
834 /* Implements CLASS_MAX_NREGS. We calculate this according to its
835 documented meaning, to avoid potential inconsistencies with actual
836 class definitions. */
838 m32c_class_max_nregs (int regclass
, enum machine_mode mode
)
842 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
843 if (class_contents
[regclass
][0] & (1 << rn
))
845 int n
= m32c_hard_regno_nregs (rn
, mode
);
852 /* Implements CANNOT_CHANGE_MODE_CLASS. Only r0 and r1 can change to
853 QI (r0l, r1l) because the chip doesn't support QI ops on other
854 registers (well, it does on a0/a1 but if we let gcc do that, reload
855 suffers). Otherwise, we allow changes to larger modes. */
857 m32c_cannot_change_mode_class (enum machine_mode from
,
858 enum machine_mode to
, int rclass
)
862 fprintf (stderr
, "cannot change from %s to %s in %s\n",
863 mode_name
[from
], mode_name
[to
], class_names
[rclass
]);
866 /* If the larger mode isn't allowed in any of these registers, we
867 can't allow the change. */
868 for (rn
= 0; rn
< FIRST_PSEUDO_REGISTER
; rn
++)
869 if (class_contents
[rclass
][0] & (1 << rn
))
870 if (! m32c_hard_regno_ok (rn
, to
))
874 return (class_contents
[rclass
][0] & 0x1ffa);
876 if (class_contents
[rclass
][0] & 0x0005 /* r0, r1 */
877 && GET_MODE_SIZE (from
) > 1)
879 if (GET_MODE_SIZE (from
) > 2) /* all other regs */
885 /* Helpers for the rest of the file. */
886 /* TRUE if the rtx is a REG rtx for the given register. */
887 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
888 && REGNO (rtx) == regno)
889 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
890 base register in address calculations (hence the "strict"
892 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
893 && (REGNO (rtx) == AP_REGNO \
894 || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
896 /* Implements CONST_OK_FOR_CONSTRAINT_P. Currently, all constant
897 constraints start with 'I', with the next two characters indicating
898 the type and size of the range allowed. */
900 m32c_const_ok_for_constraint_p (HOST_WIDE_INT value
,
901 char c ATTRIBUTE_UNUSED
, const char *str
)
903 /* s=signed u=unsigned n=nonzero m=minus l=log2able,
904 [sun] bits [SUN] bytes, p=pointer size
905 I[-0-9][0-9] matches that number */
906 if (memcmp (str
, "Is3", 3) == 0)
908 return (-8 <= value
&& value
<= 7);
910 if (memcmp (str
, "IS1", 3) == 0)
912 return (-128 <= value
&& value
<= 127);
914 if (memcmp (str
, "IS2", 3) == 0)
916 return (-32768 <= value
&& value
<= 32767);
918 if (memcmp (str
, "IU2", 3) == 0)
920 return (0 <= value
&& value
<= 65535);
922 if (memcmp (str
, "IU3", 3) == 0)
924 return (0 <= value
&& value
<= 0x00ffffff);
926 if (memcmp (str
, "In4", 3) == 0)
928 return (-8 <= value
&& value
&& value
<= 8);
930 if (memcmp (str
, "In5", 3) == 0)
932 return (-16 <= value
&& value
&& value
<= 16);
934 if (memcmp (str
, "In6", 3) == 0)
936 return (-32 <= value
&& value
&& value
<= 32);
938 if (memcmp (str
, "IM2", 3) == 0)
940 return (-65536 <= value
&& value
&& value
<= -1);
942 if (memcmp (str
, "Ilb", 3) == 0)
944 int b
= exact_log2 (value
);
945 return (b
>= 0 && b
<= 7);
947 if (memcmp (str
, "Imb", 3) == 0)
949 int b
= exact_log2 ((value
^ 0xff) & 0xff);
950 return (b
>= 0 && b
<= 7);
952 if (memcmp (str
, "ImB", 3) == 0)
954 int b
= exact_log2 ((value
^ 0xffff) & 0xffff);
955 return (b
>= 0 && b
<= 7);
957 if (memcmp (str
, "Ilw", 3) == 0)
959 int b
= exact_log2 (value
);
960 return (b
>= 0 && b
<= 15);
962 if (memcmp (str
, "Imw", 3) == 0)
964 int b
= exact_log2 ((value
^ 0xffff) & 0xffff);
965 return (b
>= 0 && b
<= 15);
967 if (memcmp (str
, "I00", 3) == 0)
974 /* Implements EXTRA_CONSTRAINT_STR (see next function too). 'S' is
975 for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
976 call return values. */
978 m32c_extra_constraint_p2 (rtx value
, char c ATTRIBUTE_UNUSED
, const char *str
)
980 encode_pattern (value
);
981 if (memcmp (str
, "Sd", 2) == 0)
983 /* This is the common "src/dest" address */
985 if (GET_CODE (value
) == MEM
&& CONSTANT_P (XEXP (value
, 0)))
987 if (RTX_IS ("ms") || RTX_IS ("m+si"))
989 if (RTX_IS ("m++rii"))
991 if (REGNO (patternr
[3]) == FB_REGNO
992 && INTVAL (patternr
[4]) == 0)
997 else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1001 if (REGNO (r
) == SP_REGNO
)
1003 return m32c_legitimate_address_p (GET_MODE (value
), XEXP (value
, 0), 1);
1005 else if (memcmp (str
, "Sa", 2) == 0)
1010 else if (RTX_IS ("m+ri"))
1014 return (IS_REG (r
, A0_REGNO
) || IS_REG (r
, A1_REGNO
));
1016 else if (memcmp (str
, "Si", 2) == 0)
1018 return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1020 else if (memcmp (str
, "Ss", 2) == 0)
1022 return ((RTX_IS ("mr")
1023 && (IS_REG (patternr
[1], SP_REGNO
)))
1024 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SP_REGNO
))));
1026 else if (memcmp (str
, "Sf", 2) == 0)
1028 return ((RTX_IS ("mr")
1029 && (IS_REG (patternr
[1], FB_REGNO
)))
1030 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], FB_REGNO
))));
1032 else if (memcmp (str
, "Sb", 2) == 0)
1034 return ((RTX_IS ("mr")
1035 && (IS_REG (patternr
[1], SB_REGNO
)))
1036 || (RTX_IS ("m+ri") && (IS_REG (patternr
[2], SB_REGNO
))));
1038 else if (memcmp (str
, "Sp", 2) == 0)
1040 /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1041 return (RTX_IS ("mi")
1042 && !(INTVAL (patternr
[1]) & ~0x1fff));
1044 else if (memcmp (str
, "S1", 2) == 0)
1046 return r1h_operand (value
, QImode
);
1049 gcc_assert (str
[0] != 'S');
1051 if (memcmp (str
, "Rpa", 2) == 0)
1052 return GET_CODE (value
) == PARALLEL
;
1057 /* This is for when we're debugging the above. */
1059 m32c_extra_constraint_p (rtx value
, char c
, const char *str
)
1061 int rv
= m32c_extra_constraint_p2 (value
, c
, str
);
1063 fprintf (stderr
, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c
, str
), str
,
1070 /* Implements EXTRA_MEMORY_CONSTRAINT. Currently, we only use strings
1071 starting with 'S'. */
1073 m32c_extra_memory_constraint (char c
, const char *str ATTRIBUTE_UNUSED
)
1078 /* Implements EXTRA_ADDRESS_CONSTRAINT. We reserve 'A' strings for these,
1079 but don't currently define any. */
1081 m32c_extra_address_constraint (char c
, const char *str ATTRIBUTE_UNUSED
)
1086 /* STACK AND CALLING */
1090 /* Implements RETURN_ADDR_RTX. Note that R8C and M16C push 24 bits
1091 (yes, THREE bytes) onto the stack for the return address, but we
1092 don't support pointers bigger than 16 bits on those chips. This
1093 will likely wreak havoc with exception unwinding. FIXME. */
1095 m32c_return_addr_rtx (int count
)
1097 enum machine_mode mode
;
1103 /* we want 2[$fb] */
1107 /* It's four bytes */
1113 /* FIXME: it's really 3 bytes */
1119 gen_rtx_MEM (mode
, plus_constant (gen_rtx_REG (Pmode
, FP_REGNO
), offset
));
1120 return copy_to_mode_reg (mode
, ra_mem
);
1123 /* Implements INCOMING_RETURN_ADDR_RTX. See comment above. */
1125 m32c_incoming_return_addr_rtx (void)
1128 return gen_rtx_MEM (PSImode
, gen_rtx_REG (PSImode
, SP_REGNO
));
1131 /* Exception Handling Support */
1133 /* Implements EH_RETURN_DATA_REGNO. Choose registers able to hold
1136 m32c_eh_return_data_regno (int n
)
1148 return INVALID_REGNUM
;
1152 /* Implements EH_RETURN_STACKADJ_RTX. Saved and used later in
1153 m32c_emit_eh_epilogue. */
1155 m32c_eh_return_stackadj_rtx (void)
1157 if (!cfun
->machine
->eh_stack_adjust
)
1161 sa
= gen_rtx_REG (Pmode
, R0_REGNO
);
1162 cfun
->machine
->eh_stack_adjust
= sa
;
1164 return cfun
->machine
->eh_stack_adjust
;
1167 /* Registers That Address the Stack Frame */
1169 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER. Note that
1170 the original spec called for dwarf numbers to vary with register
1171 width as well, for example, r0l, r0, and r2r0 would each have
1172 different dwarf numbers. GCC doesn't support this, and we don't do
1173 it, and gdb seems to like it this way anyway. */
1175 m32c_dwarf_frame_regnum (int n
)
1201 return DWARF_FRAME_REGISTERS
+ 1;
1205 /* The frame looks like this:
1207 ap -> +------------------------------
1208 | Return address (3 or 4 bytes)
1209 | Saved FB (2 or 4 bytes)
1210 fb -> +------------------------------
1213 | through r0 as needed
1214 sp -> +------------------------------
1217 /* We use this to wrap all emitted insns in the prologue. */
1221 RTX_FRAME_RELATED_P (x
) = 1;
1225 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1226 how much the stack pointer moves for each, for each cpu family. */
1235 /* These are in reverse push (nearest-to-sp) order. */
1236 { R0_REGNO
, 0x80, 2, 2 },
1237 { R1_REGNO
, 0x40, 2, 2 },
1238 { R2_REGNO
, 0x20, 2, 2 },
1239 { R3_REGNO
, 0x10, 2, 2 },
1240 { A0_REGNO
, 0x08, 2, 4 },
1241 { A1_REGNO
, 0x04, 2, 4 },
1242 { SB_REGNO
, 0x02, 2, 4 },
1243 { FB_REGNO
, 0x01, 2, 4 }
1246 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1248 /* Returns TRUE if we need to save/restore the given register. We
1249 save everything for exception handlers, so that any register can be
1250 unwound. For interrupt handlers, we save everything if the handler
1251 calls something else (because we don't know what *that* function
1252 might do), but try to be a bit smarter if the handler is a leaf
1253 function. We always save $a0, though, because we use that in the
1254 epilogue to copy $fb to $sp. */
1256 need_to_save (int regno
)
1258 if (fixed_regs
[regno
])
1260 if (crtl
->calls_eh_return
)
1262 if (regno
== FP_REGNO
)
1264 if (cfun
->machine
->is_interrupt
1265 && (!cfun
->machine
->is_leaf
|| regno
== A0_REGNO
))
1267 if (df_regs_ever_live_p (regno
)
1268 && (!call_used_regs
[regno
] || cfun
->machine
->is_interrupt
))
1273 /* This function contains all the intelligence about saving and
1274 restoring registers. It always figures out the register save set.
1275 When called with PP_justcount, it merely returns the size of the
1276 save set (for eliminating the frame pointer, for example). When
1277 called with PP_pushm or PP_popm, it emits the appropriate
1278 instructions for saving (pushm) or restoring (popm) the
1281 m32c_pushm_popm (Push_Pop_Type ppt
)
1284 int byte_count
= 0, bytes
;
1286 rtx dwarf_set
[PUSHM_N
];
1288 int nosave_mask
= 0;
1290 if (crtl
->return_rtx
1291 && GET_CODE (crtl
->return_rtx
) == PARALLEL
1292 && !(crtl
->calls_eh_return
|| cfun
->machine
->is_interrupt
))
1294 rtx exp
= XVECEXP (crtl
->return_rtx
, 0, 0);
1295 rtx rv
= XEXP (exp
, 0);
1296 int rv_bytes
= GET_MODE_SIZE (GET_MODE (rv
));
1299 nosave_mask
|= 0x20; /* PSI, SI */
1301 nosave_mask
|= 0xf0; /* DF */
1303 nosave_mask
|= 0x50; /* DI */
1306 for (i
= 0; i
< (int) PUSHM_N
; i
++)
1308 /* Skip if neither register needs saving. */
1309 if (!need_to_save (pushm_info
[i
].reg1
))
1312 if (pushm_info
[i
].bit
& nosave_mask
)
1315 reg_mask
|= pushm_info
[i
].bit
;
1316 bytes
= TARGET_A16
? pushm_info
[i
].a16_bytes
: pushm_info
[i
].a24_bytes
;
1318 if (ppt
== PP_pushm
)
1320 enum machine_mode mode
= (bytes
== 2) ? HImode
: SImode
;
1323 /* Always use stack_pointer_rtx instead of calling
1324 rtx_gen_REG ourselves. Code elsewhere in GCC assumes
1325 that there is a single rtx representing the stack pointer,
1326 namely stack_pointer_rtx, and uses == to recognize it. */
1327 addr
= stack_pointer_rtx
;
1329 if (byte_count
!= 0)
1330 addr
= gen_rtx_PLUS (GET_MODE (addr
), addr
, GEN_INT (byte_count
));
1332 dwarf_set
[n_dwarfs
++] =
1333 gen_rtx_SET (VOIDmode
,
1334 gen_rtx_MEM (mode
, addr
),
1335 gen_rtx_REG (mode
, pushm_info
[i
].reg1
));
1336 F (dwarf_set
[n_dwarfs
- 1]);
1339 byte_count
+= bytes
;
1342 if (cfun
->machine
->is_interrupt
)
1344 cfun
->machine
->intr_pushm
= reg_mask
& 0xfe;
1349 if (cfun
->machine
->is_interrupt
)
1350 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1351 if (need_to_save (i
))
1354 cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
] = 1;
1357 if (ppt
== PP_pushm
&& byte_count
)
1359 rtx note
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (n_dwarfs
+ 1));
1364 XVECEXP (note
, 0, 0)
1365 = gen_rtx_SET (VOIDmode
,
1367 gen_rtx_PLUS (GET_MODE (stack_pointer_rtx
),
1369 GEN_INT (-byte_count
)));
1370 F (XVECEXP (note
, 0, 0));
1372 for (i
= 0; i
< n_dwarfs
; i
++)
1373 XVECEXP (note
, 0, i
+ 1) = dwarf_set
[i
];
1375 pushm
= F (emit_insn (gen_pushm (GEN_INT (reg_mask
))));
1377 REG_NOTES (pushm
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, note
,
1381 if (cfun
->machine
->is_interrupt
)
1382 for (i
= MEM0_REGNO
; i
<= MEM7_REGNO
; i
++)
1383 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1386 pushm
= emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode
, i
)));
1388 pushm
= emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode
, i
)));
1392 if (ppt
== PP_popm
&& byte_count
)
1394 if (cfun
->machine
->is_interrupt
)
1395 for (i
= MEM7_REGNO
; i
>= MEM0_REGNO
; i
--)
1396 if (cfun
->machine
->intr_pushmem
[i
- MEM0_REGNO
])
1399 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, i
)));
1401 emit_insn (gen_pophi_24 (gen_rtx_REG (HImode
, i
)));
1404 emit_insn (gen_popm (GEN_INT (reg_mask
)));
1410 /* Implements INITIAL_ELIMINATION_OFFSET. See the comment above that
1411 diagrams our call frame. */
1413 m32c_initial_elimination_offset (int from
, int to
)
1417 if (from
== AP_REGNO
)
1427 ofs
+= m32c_pushm_popm (PP_justcount
);
1428 ofs
+= get_frame_size ();
1431 /* Account for push rounding. */
1433 ofs
= (ofs
+ 1) & ~1;
1435 fprintf (stderr
, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from
,
1441 /* Passing Function Arguments on the Stack */
1443 #undef TARGET_PROMOTE_PROTOTYPES
1444 #define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
1446 m32c_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED
)
1451 /* Implements PUSH_ROUNDING. The R8C and M16C have byte stacks, the
1452 M32C has word stacks. */
1454 m32c_push_rounding (int n
)
1456 if (TARGET_R8C
|| TARGET_M16C
)
1458 return (n
+ 1) & ~1;
1461 /* Passing Arguments in Registers */
1463 /* Implements FUNCTION_ARG. Arguments are passed partly in registers,
1464 partly on stack. If our function returns a struct, a pointer to a
1465 buffer for it is at the top of the stack (last thing pushed). The
1466 first few real arguments may be in registers as follows:
1468 R8C/M16C: arg1 in r1 if it's QI or HI (else it's pushed on stack)
1469 arg2 in r2 if it's HI (else pushed on stack)
1471 M32C: arg1 in r0 if it's QI or HI (else it's pushed on stack)
1474 Structs are not passed in registers, even if they fit. Only
1475 integer and pointer types are passed in registers.
1477 Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1480 m32c_function_arg (CUMULATIVE_ARGS
* ca
,
1481 enum machine_mode mode
, tree type
, int named
)
1483 /* Can return a reg, parallel, or 0 for stack */
1486 fprintf (stderr
, "func_arg %d (%s, %d)\n",
1487 ca
->parm_num
, mode_name
[mode
], named
);
1491 if (mode
== VOIDmode
)
1494 if (ca
->force_mem
|| !named
)
1497 fprintf (stderr
, "func arg: force %d named %d, mem\n", ca
->force_mem
,
1503 if (type
&& INTEGRAL_TYPE_P (type
) && POINTER_TYPE_P (type
))
1506 if (type
&& AGGREGATE_TYPE_P (type
))
1509 switch (ca
->parm_num
)
1512 if (GET_MODE_SIZE (mode
) == 1 || GET_MODE_SIZE (mode
) == 2)
1513 rv
= gen_rtx_REG (mode
, TARGET_A16
? R1_REGNO
: R0_REGNO
);
1517 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 2)
1518 rv
= gen_rtx_REG (mode
, R2_REGNO
);
1528 #undef TARGET_PASS_BY_REFERENCE
1529 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1531 m32c_pass_by_reference (CUMULATIVE_ARGS
* ca ATTRIBUTE_UNUSED
,
1532 enum machine_mode mode ATTRIBUTE_UNUSED
,
1533 const_tree type ATTRIBUTE_UNUSED
,
1534 bool named ATTRIBUTE_UNUSED
)
1539 /* Implements INIT_CUMULATIVE_ARGS. */
1541 m32c_init_cumulative_args (CUMULATIVE_ARGS
* ca
,
1543 rtx libname ATTRIBUTE_UNUSED
,
1545 int n_named_args ATTRIBUTE_UNUSED
)
1547 if (fntype
&& aggregate_value_p (TREE_TYPE (fntype
), fndecl
))
1554 /* Implements FUNCTION_ARG_ADVANCE. force_mem is set for functions
1555 returning structures, so we always reset that. Otherwise, we only
1556 need to know the sequence number of the argument to know what to do
1559 m32c_function_arg_advance (CUMULATIVE_ARGS
* ca
,
1560 enum machine_mode mode ATTRIBUTE_UNUSED
,
1561 tree type ATTRIBUTE_UNUSED
,
1562 int named ATTRIBUTE_UNUSED
)
1570 /* Implements FUNCTION_ARG_REGNO_P. */
1572 m32c_function_arg_regno_p (int r
)
1575 return (r
== R0_REGNO
);
1576 return (r
== R1_REGNO
|| r
== R2_REGNO
);
1579 /* HImode and PSImode are the two "native" modes as far as GCC is
1580 concerned, but the chips also support a 32-bit mode which is used
1581 for some opcodes in R8C/M16C and for reset vectors and such. */
1582 #undef TARGET_VALID_POINTER_MODE
1583 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1585 m32c_valid_pointer_mode (enum machine_mode mode
)
1595 /* How Scalar Function Values Are Returned */
1597 /* Implements LIBCALL_VALUE. Most values are returned in $r0, or some
1598 combination of registers starting there (r2r0 for longs, r3r1r2r0
1599 for long long, r3r2r1r0 for doubles), except that that ABI
1600 currently doesn't work because it ends up using all available
1601 general registers and gcc often can't compile it. So, instead, we
1602 return anything bigger than 16 bits in "mem0" (effectively, a
1603 memory location). */
1605 m32c_libcall_value (enum machine_mode mode
)
1607 /* return reg or parallel */
1609 /* FIXME: GCC has difficulty returning large values in registers,
1610 because that ties up most of the general registers and gives the
1611 register allocator little to work with. Until we can resolve
1612 this, large values are returned in memory. */
1617 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (4));
1618 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1619 gen_rtx_REG (HImode
,
1622 XVECEXP (rv
, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode
,
1623 gen_rtx_REG (HImode
,
1626 XVECEXP (rv
, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode
,
1627 gen_rtx_REG (HImode
,
1630 XVECEXP (rv
, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode
,
1631 gen_rtx_REG (HImode
,
1637 if (TARGET_A24
&& GET_MODE_SIZE (mode
) > 2)
1641 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (1));
1642 XVECEXP (rv
, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode
,
1650 if (GET_MODE_SIZE (mode
) > 2)
1651 return gen_rtx_REG (mode
, MEM0_REGNO
);
1652 return gen_rtx_REG (mode
, R0_REGNO
);
1655 /* Implements FUNCTION_VALUE. Functions and libcalls have the same
1658 m32c_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
)
1660 /* return reg or parallel */
1661 const enum machine_mode mode
= TYPE_MODE (valtype
);
1662 return m32c_libcall_value (mode
);
1665 /* How Large Values Are Returned */
1667 /* We return structures by pushing the address on the stack, even if
1668 we use registers for the first few "real" arguments. */
1669 #undef TARGET_STRUCT_VALUE_RTX
1670 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1672 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED
,
1673 int incoming ATTRIBUTE_UNUSED
)
1678 /* Function Entry and Exit */
1680 /* Implements EPILOGUE_USES. Interrupts restore all registers. */
1682 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
1684 if (cfun
->machine
->is_interrupt
)
1689 /* Implementing the Varargs Macros */
1691 #undef TARGET_STRICT_ARGUMENT_NAMING
1692 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1694 m32c_strict_argument_naming (CUMULATIVE_ARGS
* ca ATTRIBUTE_UNUSED
)
1699 /* Trampolines for Nested Functions */
1703 1 0000 75C43412 mov.w #0x1234,a0
1704 2 0004 FC000000 jmp.a label
1707 1 0000 BC563412 mov.l:s #0x123456,a0
1708 2 0004 CC000000 jmp.a label
1711 /* Implements TRAMPOLINE_SIZE. */
1713 m32c_trampoline_size (void)
1715 /* Allocate extra space so we can avoid the messy shifts when we
1716 initialize the trampoline; we just write past the end of the
1718 return TARGET_A16
? 8 : 10;
1721 /* Implements TRAMPOLINE_ALIGNMENT. */
1723 m32c_trampoline_alignment (void)
1728 /* Implements INITIALIZE_TRAMPOLINE. */
1730 m32c_initialize_trampoline (rtx tramp
, rtx function
, rtx chainval
)
1732 #define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1735 /* Note: we subtract a "word" because the moves want signed
1736 constants, not unsigned constants. */
1737 emit_move_insn (A0 (HImode
, 0), GEN_INT (0xc475 - 0x10000));
1738 emit_move_insn (A0 (HImode
, 2), chainval
);
1739 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xfc - 0x100));
1740 /* We use 16-bit addresses here, but store the zero to turn it
1741 into a 24-bit offset. */
1742 emit_move_insn (A0 (HImode
, 5), function
);
1743 emit_move_insn (A0 (QImode
, 7), GEN_INT (0x00));
1747 /* Note that the PSI moves actually write 4 bytes. Make sure we
1748 write stuff out in the right order, and leave room for the
1749 extra byte at the end. */
1750 emit_move_insn (A0 (QImode
, 0), GEN_INT (0xbc - 0x100));
1751 emit_move_insn (A0 (PSImode
, 1), chainval
);
1752 emit_move_insn (A0 (QImode
, 4), GEN_INT (0xcc - 0x100));
1753 emit_move_insn (A0 (PSImode
, 5), function
);
1758 /* Implicit Calls to Library Routines */
1760 #undef TARGET_INIT_LIBFUNCS
1761 #define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1763 m32c_init_libfuncs (void)
1767 /* We do this because the M32C has an HImode operand, but the
1768 M16C has an 8-bit operand. Since gcc looks at the match data
1769 and not the expanded rtl, we have to reset the array so that
1770 the right modes are found. */
1771 setcc_gen_code
[EQ
] = CODE_FOR_seq_24
;
1772 setcc_gen_code
[NE
] = CODE_FOR_sne_24
;
1773 setcc_gen_code
[GT
] = CODE_FOR_sgt_24
;
1774 setcc_gen_code
[GE
] = CODE_FOR_sge_24
;
1775 setcc_gen_code
[LT
] = CODE_FOR_slt_24
;
1776 setcc_gen_code
[LE
] = CODE_FOR_sle_24
;
1777 setcc_gen_code
[GTU
] = CODE_FOR_sgtu_24
;
1778 setcc_gen_code
[GEU
] = CODE_FOR_sgeu_24
;
1779 setcc_gen_code
[LTU
] = CODE_FOR_sltu_24
;
1780 setcc_gen_code
[LEU
] = CODE_FOR_sleu_24
;
1784 /* Addressing Modes */
1786 /* Used by GO_IF_LEGITIMATE_ADDRESS. The r8c/m32c family supports a
1787 wide range of non-orthogonal addressing modes, including the
1788 ability to double-indirect on *some* of them. Not all insns
1789 support all modes, either, but we rely on predicates and
1790 constraints to deal with that. */
1792 m32c_legitimate_address_p (enum machine_mode mode
, rtx x
, int strict
)
1798 /* Wide references to memory will be split after reload, so we must
1799 ensure that all parts of such splits remain legitimate
1801 mode_adjust
= GET_MODE_SIZE (mode
) - 1;
1803 /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1804 if (GET_CODE (x
) == PRE_DEC
1805 || GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_MODIFY
)
1807 return (GET_CODE (XEXP (x
, 0)) == REG
1808 && REGNO (XEXP (x
, 0)) == SP_REGNO
);
1812 /* This is the double indirection detection, but it currently
1813 doesn't work as cleanly as this code implies, so until we've had
1814 a chance to debug it, leave it disabled. */
1815 if (TARGET_A24
&& GET_CODE (x
) == MEM
&& GET_CODE (XEXP (x
, 0)) != PLUS
)
1818 fprintf (stderr
, "double indirect\n");
1827 /* Most indexable registers can be used without displacements,
1828 although some of them will be emitted with an explicit zero
1829 to please the assembler. */
1830 switch (REGNO (patternr
[0]))
1840 if (IS_PSEUDO (patternr
[0], strict
))
1847 /* This is more interesting, because different base registers
1848 allow for different displacements - both range and signedness
1849 - and it differs from chip series to chip series too. */
1850 int rn
= REGNO (patternr
[1]);
1851 HOST_WIDE_INT offs
= INTVAL (patternr
[2]);
1857 /* The syntax only allows positive offsets, but when the
1858 offsets span the entire memory range, we can simulate
1859 negative offsets by wrapping. */
1861 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1863 return (offs
>= 0 && offs
<= 65535 - mode_adjust
);
1865 return (offs
>= -16777216 && offs
<= 16777215);
1869 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1870 return (offs
>= -65536 && offs
<= 65535 - mode_adjust
);
1873 return (offs
>= -128 && offs
<= 127 - mode_adjust
);
1876 if (IS_PSEUDO (patternr
[1], strict
))
1881 if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1883 rtx reg
= patternr
[1];
1885 /* We don't know where the symbol is, so only allow base
1886 registers which support displacements spanning the whole
1888 switch (REGNO (reg
))
1892 /* $sb needs a secondary reload, but since it's involved in
1893 memory address reloads too, we don't deal with it very
1895 /* case SB_REGNO: */
1898 if (IS_PSEUDO (reg
, strict
))
1906 /* Implements REG_OK_FOR_BASE_P. */
1908 m32c_reg_ok_for_base_p (rtx x
, int strict
)
1910 if (GET_CODE (x
) != REG
)
1921 if (IS_PSEUDO (x
, strict
))
1927 /* We have three choices for choosing fb->aN offsets. If we choose -128,
1928 we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1930 EB 4B FF mova -128[$fb],$a0
1931 D8 0C FF FF mov.w:Q #0,-1[$a0]
1933 Alternately, we subtract the frame size, and hopefully use 8-bit aN
1936 77 54 00 01 sub #256,$a0
1937 D8 08 01 mov.w:Q #0,1[$a0]
1939 If we don't offset (i.e. offset by zero), we end up with:
1941 D8 0C 00 FF mov.w:Q #0,-256[$a0]
1943 We have to subtract *something* so that we have a PLUS rtx to mark
1944 that we've done this reload. The -128 offset will never result in
1945 an 8-bit aN offset, and the payoff for the second case is five
1946 loads *if* those loads are within 256 bytes of the other end of the
1947 frame, so the third case seems best. Note that we subtract the
1948 zero, but detect that in the addhi3 pattern. */
1950 #define BIG_FB_ADJ 0
1952 /* Implements LEGITIMIZE_ADDRESS. The only address we really have to
1953 worry about is frame base offsets, as $fb has a limited
1954 displacement range. We deal with this by attempting to reload $fb
1955 itself into an address register; that seems to result in the best
1958 m32c_legitimize_address (rtx
* x ATTRIBUTE_UNUSED
,
1959 rtx oldx ATTRIBUTE_UNUSED
,
1960 enum machine_mode mode ATTRIBUTE_UNUSED
)
1963 fprintf (stderr
, "m32c_legitimize_address for mode %s\n", mode_name
[mode
]);
1965 fprintf (stderr
, "\n");
1968 if (GET_CODE (*x
) == PLUS
1969 && GET_CODE (XEXP (*x
, 0)) == REG
1970 && REGNO (XEXP (*x
, 0)) == FB_REGNO
1971 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
1972 && (INTVAL (XEXP (*x
, 1)) < -128
1973 || INTVAL (XEXP (*x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
1975 /* reload FB to A_REGS */
1976 rtx temp
= gen_reg_rtx (Pmode
);
1978 emit_insn (gen_rtx_SET (VOIDmode
, temp
, XEXP (*x
, 0)));
1979 XEXP (*x
, 0) = temp
;
1986 /* Implements LEGITIMIZE_RELOAD_ADDRESS. See comment above. */
1988 m32c_legitimize_reload_address (rtx
* x
,
1989 enum machine_mode mode
,
1991 int type
, int ind_levels ATTRIBUTE_UNUSED
)
1994 fprintf (stderr
, "\nm32c_legitimize_reload_address for mode %s\n",
1999 /* At one point, this function tried to get $fb copied to an address
2000 register, which in theory would maximize sharing, but gcc was
2001 *also* still trying to reload the whole address, and we'd run out
2002 of address registers. So we let gcc do the naive (but safe)
2003 reload instead, when the above function doesn't handle it for
2006 The code below is a second attempt at the above. */
2008 if (GET_CODE (*x
) == PLUS
2009 && GET_CODE (XEXP (*x
, 0)) == REG
2010 && REGNO (XEXP (*x
, 0)) == FB_REGNO
2011 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
2012 && (INTVAL (XEXP (*x
, 1)) < -128
2013 || INTVAL (XEXP (*x
, 1)) > (128 - GET_MODE_SIZE (mode
))))
2016 int offset
= INTVAL (XEXP (*x
, 1));
2017 int adjustment
= -BIG_FB_ADJ
;
2019 sum
= gen_rtx_PLUS (Pmode
, XEXP (*x
, 0),
2020 GEN_INT (adjustment
));
2021 *x
= gen_rtx_PLUS (Pmode
, sum
, GEN_INT (offset
- adjustment
));
2022 if (type
== RELOAD_OTHER
)
2023 type
= RELOAD_FOR_OTHER_ADDRESS
;
2024 push_reload (sum
, NULL_RTX
, &XEXP (*x
, 0), NULL
,
2025 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
2030 if (GET_CODE (*x
) == PLUS
2031 && GET_CODE (XEXP (*x
, 0)) == PLUS
2032 && GET_CODE (XEXP (XEXP (*x
, 0), 0)) == REG
2033 && REGNO (XEXP (XEXP (*x
, 0), 0)) == FB_REGNO
2034 && GET_CODE (XEXP (XEXP (*x
, 0), 1)) == CONST_INT
2035 && GET_CODE (XEXP (*x
, 1)) == CONST_INT
2038 if (type
== RELOAD_OTHER
)
2039 type
= RELOAD_FOR_OTHER_ADDRESS
;
2040 push_reload (XEXP (*x
, 0), NULL_RTX
, &XEXP (*x
, 0), NULL
,
2041 A_REGS
, Pmode
, VOIDmode
, 0, 0, opnum
,
2049 /* Implements LEGITIMATE_CONSTANT_P. We split large constants anyway,
2050 so we can allow anything. */
2052 m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED
)
2058 /* Condition Code Status */
2060 #undef TARGET_FIXED_CONDITION_CODE_REGS
2061 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2063 m32c_fixed_condition_code_regs (unsigned int *p1
, unsigned int *p2
)
2066 *p2
= INVALID_REGNUM
;
2070 /* Describing Relative Costs of Operations */
2072 /* Implements REGISTER_MOVE_COST. We make impossible moves
2073 prohibitively expensive, like trying to put QIs in r2/r3 (there are
2074 no opcodes to do that). We also discourage use of mem* registers
2075 since they're really memory. */
2077 m32c_register_move_cost (enum machine_mode mode
, int from
, int to
)
2079 int cost
= COSTS_N_INSNS (3);
2080 int cc
= class_contents
[from
][0] | class_contents
[to
][0];
2081 /* FIXME: pick real values, but not 2 for now. */
2082 if (mode
== QImode
&& (cc
& class_contents
[R23_REGS
][0]))
2084 if (!(cc
& ~class_contents
[R23_REGS
][0]))
2085 cost
= COSTS_N_INSNS (1000);
2087 cost
= COSTS_N_INSNS (80);
2090 if (!class_can_hold_mode (from
, mode
) || !class_can_hold_mode (to
, mode
))
2091 cost
= COSTS_N_INSNS (1000);
2093 if (classes_intersect (from
, CR_REGS
))
2094 cost
+= COSTS_N_INSNS (5);
2096 if (classes_intersect (to
, CR_REGS
))
2097 cost
+= COSTS_N_INSNS (5);
2099 if (from
== MEM_REGS
|| to
== MEM_REGS
)
2100 cost
+= COSTS_N_INSNS (50);
2101 else if (classes_intersect (from
, MEM_REGS
)
2102 || classes_intersect (to
, MEM_REGS
))
2103 cost
+= COSTS_N_INSNS (10);
2106 fprintf (stderr
, "register_move_cost %s from %s to %s = %d\n",
2107 mode_name
[mode
], class_names
[from
], class_names
[to
], cost
);
2112 /* Implements MEMORY_MOVE_COST. */
2114 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2115 int reg_class ATTRIBUTE_UNUSED
,
2116 int in ATTRIBUTE_UNUSED
)
2118 /* FIXME: pick real values. */
2119 return COSTS_N_INSNS (10);
2122 /* Here we try to describe when we use multiple opcodes for one RTX so
2123 that gcc knows when to use them. */
2124 #undef TARGET_RTX_COSTS
2125 #define TARGET_RTX_COSTS m32c_rtx_costs
2127 m32c_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
2128 bool speed ATTRIBUTE_UNUSED
)
2133 if (REGNO (x
) >= MEM0_REGNO
&& REGNO (x
) <= MEM7_REGNO
)
2134 *total
+= COSTS_N_INSNS (500);
2136 *total
+= COSTS_N_INSNS (1);
2142 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
2144 /* mov.b r1l, r1h */
2145 *total
+= COSTS_N_INSNS (1);
2148 if (INTVAL (XEXP (x
, 1)) > 8
2149 || INTVAL (XEXP (x
, 1)) < -8)
2152 /* mov.b r1l, r1h */
2153 *total
+= COSTS_N_INSNS (2);
2168 if (outer_code
== SET
)
2170 *total
+= COSTS_N_INSNS (2);
2177 rtx dest
= XEXP (x
, 0);
2178 rtx addr
= XEXP (dest
, 0);
2179 switch (GET_CODE (addr
))
2182 *total
+= COSTS_N_INSNS (1);
2185 *total
+= COSTS_N_INSNS (3);
2188 *total
+= COSTS_N_INSNS (2);
2196 /* Reasonable default. */
2197 if (TARGET_A16
&& GET_MODE(x
) == SImode
)
2198 *total
+= COSTS_N_INSNS (2);
2204 #undef TARGET_ADDRESS_COST
2205 #define TARGET_ADDRESS_COST m32c_address_cost
2207 m32c_address_cost (rtx addr
, bool speed ATTRIBUTE_UNUSED
)
2210 /* fprintf(stderr, "\naddress_cost\n");
2212 switch (GET_CODE (addr
))
2217 return COSTS_N_INSNS(1);
2218 if (0 < i
&& i
<= 255)
2219 return COSTS_N_INSNS(2);
2220 if (0 < i
&& i
<= 65535)
2221 return COSTS_N_INSNS(3);
2222 return COSTS_N_INSNS(4);
2224 return COSTS_N_INSNS(4);
2226 return COSTS_N_INSNS(1);
2228 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
2230 i
= INTVAL (XEXP (addr
, 1));
2232 return COSTS_N_INSNS(1);
2233 if (0 < i
&& i
<= 255)
2234 return COSTS_N_INSNS(2);
2235 if (0 < i
&& i
<= 65535)
2236 return COSTS_N_INSNS(3);
2238 return COSTS_N_INSNS(4);
2244 /* Defining the Output Assembler Language */
2246 /* The Overall Framework of an Assembler File */
2248 #undef TARGET_HAVE_NAMED_SECTIONS
2249 #define TARGET_HAVE_NAMED_SECTIONS true
2251 /* Output of Data */
2253 /* We may have 24 bit sizes, which is the native address size.
2254 Currently unused, but provided for completeness. */
2255 #undef TARGET_ASM_INTEGER
2256 #define TARGET_ASM_INTEGER m32c_asm_integer
2258 m32c_asm_integer (rtx x
, unsigned int size
, int aligned_p
)
2263 fprintf (asm_out_file
, "\t.3byte\t");
2264 output_addr_const (asm_out_file
, x
);
2265 fputc ('\n', asm_out_file
);
2268 if (GET_CODE (x
) == SYMBOL_REF
)
2270 fprintf (asm_out_file
, "\t.long\t");
2271 output_addr_const (asm_out_file
, x
);
2272 fputc ('\n', asm_out_file
);
2277 return default_assemble_integer (x
, size
, aligned_p
);
2280 /* Output of Assembler Instructions */
2282 /* We use a lookup table because the addressing modes are non-orthogonal. */
2287 char const *pattern
;
2290 const conversions
[] = {
2293 { 0, "mr", "z[1]" },
2294 { 0, "m+ri", "3[2]" },
2295 { 0, "m+rs", "3[2]" },
2296 { 0, "m+r+si", "4+5[2]" },
2299 { 0, "m+si", "2+3" },
2301 { 0, "mmr", "[z[2]]" },
2302 { 0, "mm+ri", "[4[3]]" },
2303 { 0, "mm+rs", "[4[3]]" },
2304 { 0, "mm+r+si", "[5+6[3]]" },
2305 { 0, "mms", "[[2]]" },
2306 { 0, "mmi", "[[2]]" },
2307 { 0, "mm+si", "[4[3]]" },
2311 { 0, "+si", "#1+2" },
2317 { 'd', "+si", "1+2" },
2320 { 'D', "+si", "1+2" },
2331 /* This is in order according to the bitfield that pushm/popm use. */
2332 static char const *pushm_regs
[] = {
2333 "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2336 /* Implements PRINT_OPERAND. */
2338 m32c_print_operand (FILE * file
, rtx x
, int code
)
2343 int unsigned_const
= 0;
2346 /* Multiplies; constants are converted to sign-extended format but
2347 we need unsigned, so 'u' and 'U' tell us what size unsigned we
2359 /* This one is only for debugging; you can put it in a pattern to
2360 force this error. */
2363 fprintf (stderr
, "dj: unreviewed pattern:");
2364 if (current_output_insn
)
2365 debug_rtx (current_output_insn
);
2368 /* PSImode operations are either .w or .l depending on the target. */
2372 fprintf (file
, "w");
2374 fprintf (file
, "l");
2377 /* Inverted conditionals. */
2380 switch (GET_CODE (x
))
2386 fputs ("gtu", file
);
2392 fputs ("geu", file
);
2398 fputs ("leu", file
);
2404 fputs ("ltu", file
);
2417 /* Regular conditionals. */
2420 switch (GET_CODE (x
))
2426 fputs ("leu", file
);
2432 fputs ("ltu", file
);
2438 fputs ("gtu", file
);
2444 fputs ("geu", file
);
2457 /* Used in negsi2 to do HImode ops on the two parts of an SImode
2459 if (code
== 'h' && GET_MODE (x
) == SImode
)
2461 x
= m32c_subreg (HImode
, x
, SImode
, 0);
2464 if (code
== 'H' && GET_MODE (x
) == SImode
)
2466 x
= m32c_subreg (HImode
, x
, SImode
, 2);
2469 if (code
== 'h' && GET_MODE (x
) == HImode
)
2471 x
= m32c_subreg (QImode
, x
, HImode
, 0);
2474 if (code
== 'H' && GET_MODE (x
) == HImode
)
2476 /* We can't actually represent this as an rtx. Do it here. */
2477 if (GET_CODE (x
) == REG
)
2482 fputs ("r0h", file
);
2485 fputs ("r1h", file
);
2491 /* This should be a MEM. */
2492 x
= m32c_subreg (QImode
, x
, HImode
, 1);
2495 /* This is for BMcond, which always wants word register names. */
2496 if (code
== 'h' && GET_MODE (x
) == QImode
)
2498 if (GET_CODE (x
) == REG
)
2499 x
= gen_rtx_REG (HImode
, REGNO (x
));
2502 /* 'x' and 'X' need to be ignored for non-immediates. */
2503 if ((code
== 'x' || code
== 'X') && GET_CODE (x
) != CONST_INT
)
2508 for (i
= 0; conversions
[i
].pattern
; i
++)
2509 if (conversions
[i
].code
== code
2510 && streq (conversions
[i
].pattern
, pattern
))
2512 for (j
= 0; conversions
[i
].format
[j
]; j
++)
2513 /* backslash quotes the next character in the output pattern. */
2514 if (conversions
[i
].format
[j
] == '\\')
2516 fputc (conversions
[i
].format
[j
+ 1], file
);
2519 /* Digits in the output pattern indicate that the
2520 corresponding RTX is to be output at that point. */
2521 else if (ISDIGIT (conversions
[i
].format
[j
]))
2523 rtx r
= patternr
[conversions
[i
].format
[j
] - '0'];
2524 switch (GET_CODE (r
))
2527 fprintf (file
, "%s",
2528 reg_name_with_mode (REGNO (r
), GET_MODE (r
)));
2537 int i
= (int) exact_log2 (v
);
2539 i
= (int) exact_log2 ((v
^ 0xffff) & 0xffff);
2541 i
= (int) exact_log2 ((v
^ 0xff) & 0xff);
2543 fprintf (file
, "%d", i
);
2547 /* Unsigned byte. */
2548 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2552 /* Unsigned word. */
2553 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
2554 INTVAL (r
) & 0xffff);
2557 /* pushm and popm encode a register set into a single byte. */
2559 for (b
= 7; b
>= 0; b
--)
2560 if (INTVAL (r
) & (1 << b
))
2562 fprintf (file
, "%s%s", comma
, pushm_regs
[b
]);
2567 /* "Minus". Output -X */
2568 ival
= (-INTVAL (r
) & 0xffff);
2570 ival
= ival
- 0x10000;
2571 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2575 if (conversions
[i
].format
[j
+ 1] == '[' && ival
< 0)
2577 /* We can simulate negative displacements by
2578 taking advantage of address space
2579 wrapping when the offset can span the
2580 entire address range. */
2582 patternr
[conversions
[i
].format
[j
+ 2] - '0'];
2583 if (GET_CODE (base
) == REG
)
2584 switch (REGNO (base
))
2589 ival
= 0x1000000 + ival
;
2591 ival
= 0x10000 + ival
;
2595 ival
= 0x10000 + ival
;
2599 else if (code
== 'd' && ival
< 0 && j
== 0)
2600 /* The "mova" opcode is used to do addition by
2601 computing displacements, but again, we need
2602 displacements to be unsigned *if* they're
2603 the only component of the displacement
2604 (i.e. no "symbol-4" type displacement). */
2605 ival
= (TARGET_A24
? 0x1000000 : 0x10000) + ival
;
2607 if (conversions
[i
].format
[j
] == '0')
2609 /* More conversions to unsigned. */
2610 if (unsigned_const
== 2)
2612 if (unsigned_const
== 1)
2615 if (streq (conversions
[i
].pattern
, "mi")
2616 || streq (conversions
[i
].pattern
, "mmi"))
2618 /* Integers used as addresses are unsigned. */
2619 ival
&= (TARGET_A24
? 0xffffff : 0xffff);
2621 if (force_sign
&& ival
>= 0)
2623 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ival
);
2628 /* We don't have const_double constants. If it
2629 happens, make it obvious. */
2630 fprintf (file
, "[const_double 0x%lx]",
2631 (unsigned long) CONST_DOUBLE_HIGH (r
));
2634 assemble_name (file
, XSTR (r
, 0));
2637 output_asm_label (r
);
2640 fprintf (stderr
, "don't know how to print this operand:");
2647 if (conversions
[i
].format
[j
] == 'z')
2649 /* Some addressing modes *must* have a displacement,
2650 so insert a zero here if needed. */
2652 for (k
= j
+ 1; conversions
[i
].format
[k
]; k
++)
2653 if (ISDIGIT (conversions
[i
].format
[k
]))
2655 rtx reg
= patternr
[conversions
[i
].format
[k
] - '0'];
2656 if (GET_CODE (reg
) == REG
2657 && (REGNO (reg
) == SB_REGNO
2658 || REGNO (reg
) == FB_REGNO
2659 || REGNO (reg
) == SP_REGNO
))
2664 /* Signed displacements off symbols need to have signs
2666 if (conversions
[i
].format
[j
] == '+'
2667 && (!code
|| code
== 'D' || code
== 'd')
2668 && ISDIGIT (conversions
[i
].format
[j
+ 1])
2669 && (GET_CODE (patternr
[conversions
[i
].format
[j
+ 1] - '0'])
2675 fputc (conversions
[i
].format
[j
], file
);
2679 if (!conversions
[i
].pattern
)
2681 fprintf (stderr
, "unconvertible operand %c `%s'", code
? code
: '-',
2684 fprintf (file
, "[%c.%s]", code
? code
: '-', pattern
);
2690 /* Implements PRINT_OPERAND_PUNCT_VALID_P. See m32c_print_operand
2691 above for descriptions of what these do. */
2693 m32c_print_operand_punct_valid_p (int c
)
2695 if (c
== '&' || c
== '!')
2700 /* Implements PRINT_OPERAND_ADDRESS. Nothing unusual here. */
2702 m32c_print_operand_address (FILE * stream
, rtx address
)
2704 gcc_assert (GET_CODE (address
) == MEM
);
2705 m32c_print_operand (stream
, XEXP (address
, 0), 0);
2708 /* Implements ASM_OUTPUT_REG_PUSH. Control registers are pushed
2709 differently than general registers. */
2711 m32c_output_reg_push (FILE * s
, int regno
)
2713 if (regno
== FLG_REGNO
)
2714 fprintf (s
, "\tpushc\tflg\n");
2716 fprintf (s
, "\tpush.%c\t%s\n",
2717 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2720 /* Likewise for ASM_OUTPUT_REG_POP. */
2722 m32c_output_reg_pop (FILE * s
, int regno
)
2724 if (regno
== FLG_REGNO
)
2725 fprintf (s
, "\tpopc\tflg\n");
2727 fprintf (s
, "\tpop.%c\t%s\n",
2728 " bwll"[reg_push_size (regno
)], reg_names
[regno
]);
2731 /* Defining target-specific uses of `__attribute__' */
2733 /* Used to simplify the logic below. Find the attributes wherever
2735 #define M32C_ATTRIBUTES(decl) \
2736 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2737 : DECL_ATTRIBUTES (decl) \
2738 ? (DECL_ATTRIBUTES (decl)) \
2739 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2741 /* Returns TRUE if the given tree has the "interrupt" attribute. */
2743 interrupt_p (tree node ATTRIBUTE_UNUSED
)
2745 tree list
= M32C_ATTRIBUTES (node
);
2748 if (is_attribute_p ("interrupt", TREE_PURPOSE (list
)))
2750 list
= TREE_CHAIN (list
);
2756 interrupt_handler (tree
* node ATTRIBUTE_UNUSED
,
2757 tree name ATTRIBUTE_UNUSED
,
2758 tree args ATTRIBUTE_UNUSED
,
2759 int flags ATTRIBUTE_UNUSED
,
2760 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2765 /* Returns TRUE if given tree has the "function_vector" attribute. */
2767 m32c_special_page_vector_p (tree func
)
2769 if (TREE_CODE (func
) != FUNCTION_DECL
)
2772 tree list
= M32C_ATTRIBUTES (func
);
2775 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2777 list
= TREE_CHAIN (list
);
2783 function_vector_handler (tree
* node ATTRIBUTE_UNUSED
,
2784 tree name ATTRIBUTE_UNUSED
,
2785 tree args ATTRIBUTE_UNUSED
,
2786 int flags ATTRIBUTE_UNUSED
,
2787 bool * no_add_attrs ATTRIBUTE_UNUSED
)
2791 /* The attribute is not supported for R8C target. */
2792 warning (OPT_Wattributes
,
2793 "`%s' attribute is not supported for R8C target",
2794 IDENTIFIER_POINTER (name
));
2795 *no_add_attrs
= true;
2797 else if (TREE_CODE (*node
) != FUNCTION_DECL
)
2799 /* The attribute must be applied to functions only. */
2800 warning (OPT_Wattributes
,
2801 "`%s' attribute applies only to functions",
2802 IDENTIFIER_POINTER (name
));
2803 *no_add_attrs
= true;
2805 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
2807 /* The argument must be a constant integer. */
2808 warning (OPT_Wattributes
,
2809 "`%s' attribute argument not an integer constant",
2810 IDENTIFIER_POINTER (name
));
2811 *no_add_attrs
= true;
2813 else if (TREE_INT_CST_LOW (TREE_VALUE (args
)) < 18
2814 || TREE_INT_CST_LOW (TREE_VALUE (args
)) > 255)
2816 /* The argument value must be between 18 to 255. */
2817 warning (OPT_Wattributes
,
2818 "`%s' attribute argument should be between 18 to 255",
2819 IDENTIFIER_POINTER (name
));
2820 *no_add_attrs
= true;
2825 /* If the function is assigned the attribute 'function_vector', it
2826 returns the function vector number, otherwise returns zero. */
2828 current_function_special_page_vector (rtx x
)
2832 if ((GET_CODE(x
) == SYMBOL_REF
)
2833 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
2835 tree t
= SYMBOL_REF_DECL (x
);
2837 if (TREE_CODE (t
) != FUNCTION_DECL
)
2840 tree list
= M32C_ATTRIBUTES (t
);
2843 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
2845 num
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list
)));
2849 list
= TREE_CHAIN (list
);
2858 #undef TARGET_ATTRIBUTE_TABLE
2859 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2860 static const struct attribute_spec m32c_attribute_table
[] = {
2861 {"interrupt", 0, 0, false, false, false, interrupt_handler
},
2862 {"function_vector", 1, 1, true, false, false, function_vector_handler
},
2863 {0, 0, 0, 0, 0, 0, 0}
2866 #undef TARGET_COMP_TYPE_ATTRIBUTES
2867 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2869 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED
,
2870 const_tree type2 ATTRIBUTE_UNUSED
)
2872 /* 0=incompatible 1=compatible 2=warning */
2876 #undef TARGET_INSERT_ATTRIBUTES
2877 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2879 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED
,
2880 tree
* attr_ptr ATTRIBUTE_UNUSED
)
2882 /* Nothing to do here. */
2887 /* This is a list of legal subregs of hard regs. */
2888 static const struct {
2889 unsigned char outer_mode_size
;
2890 unsigned char inner_mode_size
;
2891 unsigned char byte_mask
;
2892 unsigned char legal_when
;
2894 } legal_subregs
[] = {
2895 {1, 2, 0x03, 1, R0_REGNO
}, /* r0h r0l */
2896 {1, 2, 0x03, 1, R1_REGNO
}, /* r1h r1l */
2897 {1, 2, 0x01, 1, A0_REGNO
},
2898 {1, 2, 0x01, 1, A1_REGNO
},
2900 {1, 4, 0x01, 1, A0_REGNO
},
2901 {1, 4, 0x01, 1, A1_REGNO
},
2903 {2, 4, 0x05, 1, R0_REGNO
}, /* r2 r0 */
2904 {2, 4, 0x05, 1, R1_REGNO
}, /* r3 r1 */
2905 {2, 4, 0x05, 16, A0_REGNO
}, /* a1 a0 */
2906 {2, 4, 0x01, 24, A0_REGNO
}, /* a1 a0 */
2907 {2, 4, 0x01, 24, A1_REGNO
}, /* a1 a0 */
2909 {4, 8, 0x55, 1, R0_REGNO
}, /* r3 r1 r2 r0 */
2912 /* Returns TRUE if OP is a subreg of a hard reg which we don't
2915 m32c_illegal_subreg_p (rtx op
)
2919 int src_mode
, dest_mode
;
2921 if (GET_CODE (op
) != SUBREG
)
2924 dest_mode
= GET_MODE (op
);
2925 offset
= SUBREG_BYTE (op
);
2926 op
= SUBREG_REG (op
);
2927 src_mode
= GET_MODE (op
);
2929 if (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (src_mode
))
2931 if (GET_CODE (op
) != REG
)
2933 if (REGNO (op
) >= MEM0_REGNO
)
2936 offset
= (1 << offset
);
2938 for (i
= 0; i
< ARRAY_SIZE (legal_subregs
); i
++)
2939 if (legal_subregs
[i
].outer_mode_size
== GET_MODE_SIZE (dest_mode
)
2940 && legal_subregs
[i
].regno
== REGNO (op
)
2941 && legal_subregs
[i
].inner_mode_size
== GET_MODE_SIZE (src_mode
)
2942 && legal_subregs
[i
].byte_mask
& offset
)
2944 switch (legal_subregs
[i
].legal_when
)
2961 /* Returns TRUE if we support a move between the first two operands.
2962 At the moment, we just want to discourage mem to mem moves until
2963 after reload, because reload has a hard time with our limited
2964 number of address registers, and we can get into a situation where
2965 we need three of them when we only have two. */
2967 m32c_mov_ok (rtx
* operands
, enum machine_mode mode ATTRIBUTE_UNUSED
)
2969 rtx op0
= operands
[0];
2970 rtx op1
= operands
[1];
2975 #define DEBUG_MOV_OK 0
2977 fprintf (stderr
, "m32c_mov_ok %s\n", mode_name
[mode
]);
2982 if (GET_CODE (op0
) == SUBREG
)
2983 op0
= XEXP (op0
, 0);
2984 if (GET_CODE (op1
) == SUBREG
)
2985 op1
= XEXP (op1
, 0);
2987 if (GET_CODE (op0
) == MEM
2988 && GET_CODE (op1
) == MEM
2989 && ! reload_completed
)
2992 fprintf (stderr
, " - no, mem to mem\n");
2998 fprintf (stderr
, " - ok\n");
3003 /* Returns TRUE if two consecutive HImode mov instructions, generated
3004 for moving an immediate double data to a double data type variable
3005 location, can be combined into single SImode mov instruction. */
3007 m32c_immd_dbl_mov (rtx
* operands
,
3008 enum machine_mode mode ATTRIBUTE_UNUSED
)
3010 int flag
= 0, okflag
= 0, offset1
= 0, offset2
= 0, offsetsign
= 0;
3014 if (GET_CODE (XEXP (operands
[0], 0)) == SYMBOL_REF
3015 && MEM_SCALAR_P (operands
[0])
3016 && !MEM_IN_STRUCT_P (operands
[0])
3017 && GET_CODE (XEXP (operands
[2], 0)) == CONST
3018 && GET_CODE (XEXP (XEXP (operands
[2], 0), 0)) == PLUS
3019 && GET_CODE (XEXP (XEXP (XEXP (operands
[2], 0), 0), 0)) == SYMBOL_REF
3020 && GET_CODE (XEXP (XEXP (XEXP (operands
[2], 0), 0), 1)) == CONST_INT
3021 && MEM_SCALAR_P (operands
[2])
3022 && !MEM_IN_STRUCT_P (operands
[2]))
3025 else if (GET_CODE (XEXP (operands
[0], 0)) == CONST
3026 && GET_CODE (XEXP (XEXP (operands
[0], 0), 0)) == PLUS
3027 && GET_CODE (XEXP (XEXP (XEXP (operands
[0], 0), 0), 0)) == SYMBOL_REF
3028 && MEM_SCALAR_P (operands
[0])
3029 && !MEM_IN_STRUCT_P (operands
[0])
3030 && !(INTVAL (XEXP (XEXP (XEXP (operands
[0], 0), 0), 1)) %4)
3031 && GET_CODE (XEXP (operands
[2], 0)) == CONST
3032 && GET_CODE (XEXP (XEXP (operands
[2], 0), 0)) == PLUS
3033 && GET_CODE (XEXP (XEXP (XEXP (operands
[2], 0), 0), 0)) == SYMBOL_REF
3034 && MEM_SCALAR_P (operands
[2])
3035 && !MEM_IN_STRUCT_P (operands
[2]))
3038 else if (GET_CODE (XEXP (operands
[0], 0)) == PLUS
3039 && GET_CODE (XEXP (XEXP (operands
[0], 0), 0)) == REG
3040 && REGNO (XEXP (XEXP (operands
[0], 0), 0)) == FB_REGNO
3041 && GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
3042 && MEM_SCALAR_P (operands
[0])
3043 && !MEM_IN_STRUCT_P (operands
[0])
3044 && !(INTVAL (XEXP (XEXP (operands
[0], 0), 1)) %4)
3045 && REGNO (XEXP (XEXP (operands
[2], 0), 0)) == FB_REGNO
3046 && GET_CODE (XEXP (XEXP (operands
[2], 0), 1)) == CONST_INT
3047 && MEM_SCALAR_P (operands
[2])
3048 && !MEM_IN_STRUCT_P (operands
[2]))
3057 str1
= XSTR (XEXP (operands
[0], 0), 0);
3058 str2
= XSTR (XEXP (XEXP (XEXP (operands
[2], 0), 0), 0), 0);
3059 if (strcmp (str1
, str2
) == 0)
3065 str1
= XSTR (XEXP (XEXP (XEXP (operands
[0], 0), 0), 0), 0);
3066 str2
= XSTR (XEXP (XEXP (XEXP (operands
[2], 0), 0), 0), 0);
3067 if (strcmp(str1
,str2
) == 0)
3073 offset1
= INTVAL (XEXP (XEXP (operands
[0], 0), 1));
3074 offset2
= INTVAL (XEXP (XEXP (operands
[2], 0), 1));
3075 offsetsign
= offset1
>> ((sizeof (offset1
) * 8) -1);
3076 if (((offset2
-offset1
) == 2) && offsetsign
!= 0)
3088 operands
[4] = gen_rtx_MEM (SImode
, XEXP (operands
[0], 0));
3090 val
= (INTVAL (operands
[3]) << 16) + (INTVAL (operands
[1]) & 0xFFFF);
3091 operands
[5] = gen_rtx_CONST_INT (VOIDmode
, val
);
3101 /* Subregs are non-orthogonal for us, because our registers are all
3104 m32c_subreg (enum machine_mode outer
,
3105 rtx x
, enum machine_mode inner
, int byte
)
3109 /* Converting MEMs to different types that are the same size, we
3110 just rewrite them. */
3111 if (GET_CODE (x
) == SUBREG
3112 && SUBREG_BYTE (x
) == 0
3113 && GET_CODE (SUBREG_REG (x
)) == MEM
3114 && (GET_MODE_SIZE (GET_MODE (x
))
3115 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
)))))
3118 x
= gen_rtx_MEM (GET_MODE (x
), XEXP (SUBREG_REG (x
), 0));
3119 MEM_COPY_ATTRIBUTES (x
, SUBREG_REG (oldx
));
3122 /* Push/pop get done as smaller push/pops. */
3123 if (GET_CODE (x
) == MEM
3124 && (GET_CODE (XEXP (x
, 0)) == PRE_DEC
3125 || GET_CODE (XEXP (x
, 0)) == POST_INC
))
3126 return gen_rtx_MEM (outer
, XEXP (x
, 0));
3127 if (GET_CODE (x
) == SUBREG
3128 && GET_CODE (XEXP (x
, 0)) == MEM
3129 && (GET_CODE (XEXP (XEXP (x
, 0), 0)) == PRE_DEC
3130 || GET_CODE (XEXP (XEXP (x
, 0), 0)) == POST_INC
))
3131 return gen_rtx_MEM (outer
, XEXP (XEXP (x
, 0), 0));
3133 if (GET_CODE (x
) != REG
)
3134 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3137 if (r
>= FIRST_PSEUDO_REGISTER
|| r
== AP_REGNO
)
3138 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3140 if (IS_MEM_REGNO (r
))
3141 return simplify_gen_subreg (outer
, x
, inner
, byte
);
3143 /* This is where the complexities of our register layout are
3147 else if (outer
== HImode
)
3149 if (r
== R0_REGNO
&& byte
== 2)
3151 else if (r
== R0_REGNO
&& byte
== 4)
3153 else if (r
== R0_REGNO
&& byte
== 6)
3155 else if (r
== R1_REGNO
&& byte
== 2)
3157 else if (r
== A0_REGNO
&& byte
== 2)
3160 else if (outer
== SImode
)
3162 if (r
== R0_REGNO
&& byte
== 0)
3164 else if (r
== R0_REGNO
&& byte
== 4)
3169 fprintf (stderr
, "m32c_subreg %s %s %d\n",
3170 mode_name
[outer
], mode_name
[inner
], byte
);
3174 return gen_rtx_REG (outer
, nr
);
3177 /* Used to emit move instructions. We split some moves,
3178 and avoid mem-mem moves. */
3180 m32c_prepare_move (rtx
* operands
, enum machine_mode mode
)
3182 if (TARGET_A16
&& mode
== PSImode
)
3183 return m32c_split_move (operands
, mode
, 1);
3184 if ((GET_CODE (operands
[0]) == MEM
)
3185 && (GET_CODE (XEXP (operands
[0], 0)) == PRE_MODIFY
))
3187 rtx pmv
= XEXP (operands
[0], 0);
3188 rtx dest_reg
= XEXP (pmv
, 0);
3189 rtx dest_mod
= XEXP (pmv
, 1);
3191 emit_insn (gen_rtx_SET (Pmode
, dest_reg
, dest_mod
));
3192 operands
[0] = gen_rtx_MEM (mode
, dest_reg
);
3194 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3195 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
3199 #define DEBUG_SPLIT 0
3201 /* Returns TRUE if the given PSImode move should be split. We split
3202 for all r8c/m16c moves, since it doesn't support them, and for
3203 POP.L as we can only *push* SImode. */
3205 m32c_split_psi_p (rtx
* operands
)
3208 fprintf (stderr
, "\nm32c_split_psi_p\n");
3209 debug_rtx (operands
[0]);
3210 debug_rtx (operands
[1]);
3215 fprintf (stderr
, "yes, A16\n");
3219 if (GET_CODE (operands
[1]) == MEM
3220 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3223 fprintf (stderr
, "yes, pop.l\n");
3228 fprintf (stderr
, "no, default\n");
3233 /* Split the given move. SPLIT_ALL is 0 if splitting is optional
3234 (define_expand), 1 if it is not optional (define_insn_and_split),
3235 and 3 for define_split (alternate api). */
3237 m32c_split_move (rtx
* operands
, enum machine_mode mode
, int split_all
)
3240 int parts
, si
, di
, rev
= 0;
3241 int rv
= 0, opi
= 2;
3242 enum machine_mode submode
= HImode
;
3243 rtx
*ops
, local_ops
[10];
3245 /* define_split modifies the existing operands, but the other two
3246 emit new insns. OPS is where we store the operand pairs, which
3257 /* Before splitting mem-mem moves, force one operand into a
3259 if (can_create_pseudo_p () && MEM_P (operands
[0]) && MEM_P (operands
[1]))
3262 fprintf (stderr
, "force_reg...\n");
3263 debug_rtx (operands
[1]);
3265 operands
[1] = force_reg (mode
, operands
[1]);
3267 debug_rtx (operands
[1]);
3274 fprintf (stderr
, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3276 debug_rtx (operands
[0]);
3277 debug_rtx (operands
[1]);
3280 /* Note that split_all is not used to select the api after this
3281 point, so it's safe to set it to 3 even with define_insn. */
3282 /* None of the chips can move SI operands to sp-relative addresses,
3283 so we always split those. */
3284 if (m32c_extra_constraint_p (operands
[0], 'S', "Ss"))
3287 /* We don't need to split these. */
3290 && (mode
== SImode
|| mode
== PSImode
)
3291 && !(GET_CODE (operands
[1]) == MEM
3292 && GET_CODE (XEXP (operands
[1], 0)) == POST_INC
))
3295 /* First, enumerate the subregs we'll be dealing with. */
3296 for (si
= 0; si
< parts
; si
++)
3299 m32c_subreg (submode
, operands
[0], mode
,
3300 si
* GET_MODE_SIZE (submode
));
3302 m32c_subreg (submode
, operands
[1], mode
,
3303 si
* GET_MODE_SIZE (submode
));
3306 /* Split pushes by emitting a sequence of smaller pushes. */
3307 if (GET_CODE (d
[0]) == MEM
&& GET_CODE (XEXP (d
[0], 0)) == PRE_DEC
)
3309 for (si
= parts
- 1; si
>= 0; si
--)
3311 ops
[opi
++] = gen_rtx_MEM (submode
,
3312 gen_rtx_PRE_DEC (Pmode
,
3320 /* Likewise for pops. */
3321 else if (GET_CODE (s
[0]) == MEM
&& GET_CODE (XEXP (s
[0], 0)) == POST_INC
)
3323 for (di
= 0; di
< parts
; di
++)
3326 ops
[opi
++] = gen_rtx_MEM (submode
,
3327 gen_rtx_POST_INC (Pmode
,
3335 /* if d[di] == s[si] for any di < si, we'll early clobber. */
3336 for (di
= 0; di
< parts
- 1; di
++)
3337 for (si
= di
+ 1; si
< parts
; si
++)
3338 if (reg_mentioned_p (d
[di
], s
[si
]))
3342 for (si
= 0; si
< parts
; si
++)
3348 for (si
= parts
- 1; si
>= 0; si
--)
3355 /* Now emit any moves we may have accumulated. */
3356 if (rv
&& split_all
!= 3)
3359 for (i
= 2; i
< opi
; i
+= 2)
3360 emit_move_insn (ops
[i
], ops
[i
+ 1]);
3365 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3366 the like. For the R8C they expect one of the addresses to be in
3367 R1L:An so we need to arrange for that. Otherwise, it's just a
3368 matter of picking out the operands we want and emitting the right
3369 pattern for them. All these expanders, which correspond to
3370 patterns in blkmov.md, must return nonzero if they expand the insn,
3371 or zero if they should FAIL. */
3373 /* This is a memset() opcode. All operands are implied, so we need to
3374 arrange for them to be in the right registers. The opcode wants
3375 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3376 the count (HI), and $2 the value (QI). */
3378 m32c_expand_setmemhi(rtx
*operands
)
3380 rtx desta
, count
, val
;
3383 desta
= XEXP (operands
[0], 0);
3384 count
= operands
[1];
3387 desto
= gen_reg_rtx (Pmode
);
3388 counto
= gen_reg_rtx (HImode
);
3390 if (GET_CODE (desta
) != REG
3391 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3392 desta
= copy_to_mode_reg (Pmode
, desta
);
3394 /* This looks like an arbitrary restriction, but this is by far the
3395 most common case. For counts 8..14 this actually results in
3396 smaller code with no speed penalty because the half-sized
3397 constant can be loaded with a shorter opcode. */
3398 if (GET_CODE (count
) == CONST_INT
3399 && GET_CODE (val
) == CONST_INT
3400 && ! (INTVAL (count
) & 1)
3401 && (INTVAL (count
) > 1)
3402 && (INTVAL (val
) <= 7 && INTVAL (val
) >= -8))
3404 unsigned v
= INTVAL (val
) & 0xff;
3406 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3407 val
= copy_to_mode_reg (HImode
, GEN_INT (v
));
3409 emit_insn (gen_setmemhi_whi_op (desto
, counto
, val
, desta
, count
));
3411 emit_insn (gen_setmemhi_wpsi_op (desto
, counto
, val
, desta
, count
));
3415 /* This is the generalized memset() case. */
3416 if (GET_CODE (val
) != REG
3417 || REGNO (val
) < FIRST_PSEUDO_REGISTER
)
3418 val
= copy_to_mode_reg (QImode
, val
);
3420 if (GET_CODE (count
) != REG
3421 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3422 count
= copy_to_mode_reg (HImode
, count
);
3425 emit_insn (gen_setmemhi_bhi_op (desto
, counto
, val
, desta
, count
));
3427 emit_insn (gen_setmemhi_bpsi_op (desto
, counto
, val
, desta
, count
));
3432 /* This is a memcpy() opcode. All operands are implied, so we need to
3433 arrange for them to be in the right registers. The opcode wants
3434 addresses, not [mem] syntax. $0 is the destination (MEM:BLK), $1
3435 is the source (MEM:BLK), and $2 the count (HI). */
3437 m32c_expand_movmemhi(rtx
*operands
)
3439 rtx desta
, srca
, count
;
3440 rtx desto
, srco
, counto
;
3442 desta
= XEXP (operands
[0], 0);
3443 srca
= XEXP (operands
[1], 0);
3444 count
= operands
[2];
3446 desto
= gen_reg_rtx (Pmode
);
3447 srco
= gen_reg_rtx (Pmode
);
3448 counto
= gen_reg_rtx (HImode
);
3450 if (GET_CODE (desta
) != REG
3451 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3452 desta
= copy_to_mode_reg (Pmode
, desta
);
3454 if (GET_CODE (srca
) != REG
3455 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3456 srca
= copy_to_mode_reg (Pmode
, srca
);
3458 /* Similar to setmem, but we don't need to check the value. */
3459 if (GET_CODE (count
) == CONST_INT
3460 && ! (INTVAL (count
) & 1)
3461 && (INTVAL (count
) > 1))
3463 count
= copy_to_mode_reg (HImode
, GEN_INT (INTVAL (count
) / 2));
3465 emit_insn (gen_movmemhi_whi_op (desto
, srco
, counto
, desta
, srca
, count
));
3467 emit_insn (gen_movmemhi_wpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3471 /* This is the generalized memset() case. */
3472 if (GET_CODE (count
) != REG
3473 || REGNO (count
) < FIRST_PSEUDO_REGISTER
)
3474 count
= copy_to_mode_reg (HImode
, count
);
3477 emit_insn (gen_movmemhi_bhi_op (desto
, srco
, counto
, desta
, srca
, count
));
3479 emit_insn (gen_movmemhi_bpsi_op (desto
, srco
, counto
, desta
, srca
, count
));
3484 /* This is a stpcpy() opcode. $0 is the destination (MEM:BLK) after
3485 the copy, which should point to the NUL at the end of the string,
3486 $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3487 Since our opcode leaves the destination pointing *after* the NUL,
3488 we must emit an adjustment. */
3490 m32c_expand_movstr(rtx
*operands
)
3495 desta
= XEXP (operands
[1], 0);
3496 srca
= XEXP (operands
[2], 0);
3498 desto
= gen_reg_rtx (Pmode
);
3499 srco
= gen_reg_rtx (Pmode
);
3501 if (GET_CODE (desta
) != REG
3502 || REGNO (desta
) < FIRST_PSEUDO_REGISTER
)
3503 desta
= copy_to_mode_reg (Pmode
, desta
);
3505 if (GET_CODE (srca
) != REG
3506 || REGNO (srca
) < FIRST_PSEUDO_REGISTER
)
3507 srca
= copy_to_mode_reg (Pmode
, srca
);
3509 emit_insn (gen_movstr_op (desto
, srco
, desta
, srca
));
3510 /* desto ends up being a1, which allows this type of add through MOVA. */
3511 emit_insn (gen_addpsi3 (operands
[0], desto
, GEN_INT (-1)));
3516 /* This is a strcmp() opcode. $0 is the destination (HI) which holds
3517 <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3518 $2 is the other (MEM:BLK). We must do the comparison, and then
3519 convert the flags to a signed integer result. */
3521 m32c_expand_cmpstr(rtx
*operands
)
3525 src1a
= XEXP (operands
[1], 0);
3526 src2a
= XEXP (operands
[2], 0);
3528 if (GET_CODE (src1a
) != REG
3529 || REGNO (src1a
) < FIRST_PSEUDO_REGISTER
)
3530 src1a
= copy_to_mode_reg (Pmode
, src1a
);
3532 if (GET_CODE (src2a
) != REG
3533 || REGNO (src2a
) < FIRST_PSEUDO_REGISTER
)
3534 src2a
= copy_to_mode_reg (Pmode
, src2a
);
3536 emit_insn (gen_cmpstrhi_op (src1a
, src2a
, src1a
, src2a
));
3537 emit_insn (gen_cond_to_int (operands
[0]));
3543 typedef rtx (*shift_gen_func
)(rtx
, rtx
, rtx
);
3545 static shift_gen_func
3546 shift_gen_func_for (int mode
, int code
)
3548 #define GFF(m,c,f) if (mode == m && code == c) return f
3549 GFF(QImode
, ASHIFT
, gen_ashlqi3_i
);
3550 GFF(QImode
, ASHIFTRT
, gen_ashrqi3_i
);
3551 GFF(QImode
, LSHIFTRT
, gen_lshrqi3_i
);
3552 GFF(HImode
, ASHIFT
, gen_ashlhi3_i
);
3553 GFF(HImode
, ASHIFTRT
, gen_ashrhi3_i
);
3554 GFF(HImode
, LSHIFTRT
, gen_lshrhi3_i
);
3555 GFF(PSImode
, ASHIFT
, gen_ashlpsi3_i
);
3556 GFF(PSImode
, ASHIFTRT
, gen_ashrpsi3_i
);
3557 GFF(PSImode
, LSHIFTRT
, gen_lshrpsi3_i
);
3558 GFF(SImode
, ASHIFT
, TARGET_A16
? gen_ashlsi3_16
: gen_ashlsi3_24
);
3559 GFF(SImode
, ASHIFTRT
, TARGET_A16
? gen_ashrsi3_16
: gen_ashrsi3_24
);
3560 GFF(SImode
, LSHIFTRT
, TARGET_A16
? gen_lshrsi3_16
: gen_lshrsi3_24
);
3565 /* The m32c only has one shift, but it takes a signed count. GCC
3566 doesn't want this, so we fake it by negating any shift count when
3567 we're pretending to shift the other way. Also, the shift count is
3568 limited to -8..8. It's slightly better to use two shifts for 9..15
3569 than to load the count into r1h, so we do that too. */
3571 m32c_prepare_shift (rtx
* operands
, int scale
, int shift_code
)
3573 enum machine_mode mode
= GET_MODE (operands
[0]);
3574 shift_gen_func func
= shift_gen_func_for (mode
, shift_code
);
3577 if (GET_CODE (operands
[2]) == CONST_INT
)
3579 int maxc
= TARGET_A24
&& (mode
== PSImode
|| mode
== SImode
) ? 32 : 8;
3580 int count
= INTVAL (operands
[2]) * scale
;
3582 while (count
> maxc
)
3584 temp
= gen_reg_rtx (mode
);
3585 emit_insn (func (temp
, operands
[1], GEN_INT (maxc
)));
3589 while (count
< -maxc
)
3591 temp
= gen_reg_rtx (mode
);
3592 emit_insn (func (temp
, operands
[1], GEN_INT (-maxc
)));
3596 emit_insn (func (operands
[0], operands
[1], GEN_INT (count
)));
3600 temp
= gen_reg_rtx (QImode
);
3602 /* The pattern has a NEG that corresponds to this. */
3603 emit_move_insn (temp
, gen_rtx_NEG (QImode
, operands
[2]));
3604 else if (TARGET_A16
&& mode
== SImode
)
3605 /* We do this because the code below may modify this, we don't
3606 want to modify the origin of this value. */
3607 emit_move_insn (temp
, operands
[2]);
3609 /* We'll only use it for the shift, no point emitting a move. */
3612 if (TARGET_A16
&& GET_MODE_SIZE (mode
) == 4)
3614 /* The m16c has a limit of -16..16 for SI shifts, even when the
3615 shift count is in a register. Since there are so many targets
3616 of these shifts, it's better to expand the RTL here than to
3617 call a helper function.
3619 The resulting code looks something like this:
3631 We take advantage of the fact that "negative" shifts are
3632 undefined to skip one of the comparisons. */
3635 rtx label
, lref
, insn
, tempvar
;
3637 emit_move_insn (operands
[0], operands
[1]);
3640 label
= gen_label_rtx ();
3641 lref
= gen_rtx_LABEL_REF (VOIDmode
, label
);
3642 LABEL_NUSES (label
) ++;
3644 tempvar
= gen_reg_rtx (mode
);
3646 if (shift_code
== ASHIFT
)
3648 /* This is a left shift. We only need check positive counts. */
3649 emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode
, 0, 0),
3650 count
, GEN_INT (16), label
));
3651 emit_insn (func (tempvar
, operands
[0], GEN_INT (8)));
3652 emit_insn (func (operands
[0], tempvar
, GEN_INT (8)));
3653 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (-16)));
3654 emit_label_after (label
, insn
);
3658 /* This is a right shift. We only need check negative counts. */
3659 emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode
, 0, 0),
3660 count
, GEN_INT (-16), label
));
3661 emit_insn (func (tempvar
, operands
[0], GEN_INT (-8)));
3662 emit_insn (func (operands
[0], tempvar
, GEN_INT (-8)));
3663 insn
= emit_insn (gen_addqi3 (count
, count
, GEN_INT (16)));
3664 emit_label_after (label
, insn
);
3666 operands
[1] = operands
[0];
3667 emit_insn (func (operands
[0], operands
[0], count
));
3675 /* The m32c has a limited range of operations that work on PSImode
3676 values; we have to expand to SI, do the math, and truncate back to
3677 PSI. Yes, this is expensive, but hopefully gcc will learn to avoid
3680 m32c_expand_neg_mulpsi3 (rtx
* operands
)
3682 /* operands: a = b * i */
3683 rtx temp1
; /* b as SI */
3684 rtx scale
/* i as SI */;
3685 rtx temp2
; /* a*b as SI */
3687 temp1
= gen_reg_rtx (SImode
);
3688 temp2
= gen_reg_rtx (SImode
);
3689 if (GET_CODE (operands
[2]) != CONST_INT
)
3691 scale
= gen_reg_rtx (SImode
);
3692 emit_insn (gen_zero_extendpsisi2 (scale
, operands
[2]));
3695 scale
= copy_to_mode_reg (SImode
, operands
[2]);
3697 emit_insn (gen_zero_extendpsisi2 (temp1
, operands
[1]));
3698 temp2
= expand_simple_binop (SImode
, MULT
, temp1
, scale
, temp2
, 1, OPTAB_LIB
);
3699 emit_insn (gen_truncsipsi2 (operands
[0], temp2
));
3702 static rtx compare_op0
, compare_op1
;
3705 m32c_pend_compare (rtx
*operands
)
3707 compare_op0
= operands
[0];
3708 compare_op1
= operands
[1];
3712 m32c_unpend_compare (void)
3714 switch (GET_MODE (compare_op0
))
3717 emit_insn (gen_cmpqi_op (compare_op0
, compare_op1
));
3719 emit_insn (gen_cmphi_op (compare_op0
, compare_op1
));
3721 emit_insn (gen_cmppsi_op (compare_op0
, compare_op1
));
3723 /* Just to silence the "missing case" warnings. */ ;
3728 m32c_expand_scc (int code
, rtx
*operands
)
3730 enum machine_mode mode
= TARGET_A16
? QImode
: HImode
;
3732 emit_insn (gen_rtx_SET (mode
,
3734 gen_rtx_fmt_ee (code
,
3740 /* Pattern Output Functions */
3742 /* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3743 match_operand rtx's OP. */
3745 m32c_cmp_flg_0 (rtx cmp
)
3747 return gen_rtx_fmt_ee (GET_CODE (cmp
),
3749 gen_rtx_REG (CCmode
, FLG_REGNO
),
3754 m32c_expand_movcc (rtx
*operands
)
3756 rtx rel
= operands
[1];
3759 if (GET_CODE (rel
) != EQ
&& GET_CODE (rel
) != NE
)
3761 if (GET_CODE (operands
[2]) != CONST_INT
3762 || GET_CODE (operands
[3]) != CONST_INT
)
3764 emit_insn (gen_cmpqi(XEXP (rel
, 0), XEXP (rel
, 1)));
3765 if (GET_CODE (rel
) == NE
)
3767 rtx tmp
= operands
[2];
3768 operands
[2] = operands
[3];
3772 cmp
= gen_rtx_fmt_ee (GET_CODE (rel
),
3777 emit_move_insn (operands
[0],
3778 gen_rtx_IF_THEN_ELSE (GET_MODE (operands
[0]),
3785 /* Used for the "insv" pattern. Return nonzero to fail, else done. */
3787 m32c_expand_insv (rtx
*operands
)
3792 if (INTVAL (operands
[1]) != 1)
3795 /* Our insv opcode (bset, bclr) can only insert a one-bit constant. */
3796 if (GET_CODE (operands
[3]) != CONST_INT
)
3798 if (INTVAL (operands
[3]) != 0
3799 && INTVAL (operands
[3]) != 1
3800 && INTVAL (operands
[3]) != -1)
3803 mask
= 1 << INTVAL (operands
[2]);
3806 if (GET_CODE (op0
) == SUBREG
3807 && SUBREG_BYTE (op0
) == 0)
3809 rtx sub
= SUBREG_REG (op0
);
3810 if (GET_MODE (sub
) == HImode
|| GET_MODE (sub
) == QImode
)
3814 if (!can_create_pseudo_p ()
3815 || (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
)))
3819 src0
= gen_reg_rtx (GET_MODE (op0
));
3820 emit_move_insn (src0
, op0
);
3823 if (GET_MODE (op0
) == HImode
3824 && INTVAL (operands
[2]) >= 8
3825 && GET_MODE (op0
) == MEM
)
3827 /* We are little endian. */
3828 rtx new_mem
= gen_rtx_MEM (QImode
, plus_constant (XEXP (op0
, 0), 1));
3829 MEM_COPY_ATTRIBUTES (new_mem
, op0
);
3833 /* First, we generate a mask with the correct polarity. If we are
3834 storing a zero, we want an AND mask, so invert it. */
3835 if (INTVAL (operands
[3]) == 0)
3837 /* Storing a zero, use an AND mask */
3838 if (GET_MODE (op0
) == HImode
)
3843 /* Now we need to properly sign-extend the mask in case we need to
3844 fall back to an AND or OR opcode. */
3845 if (GET_MODE (op0
) == HImode
)
3856 switch ( (INTVAL (operands
[3]) ? 4 : 0)
3857 + ((GET_MODE (op0
) == HImode
) ? 2 : 0)
3858 + (TARGET_A24
? 1 : 0))
3860 case 0: p
= gen_andqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3861 case 1: p
= gen_andqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3862 case 2: p
= gen_andhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3863 case 3: p
= gen_andhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3864 case 4: p
= gen_iorqi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3865 case 5: p
= gen_iorqi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3866 case 6: p
= gen_iorhi3_16 (op0
, src0
, GEN_INT (mask
)); break;
3867 case 7: p
= gen_iorhi3_24 (op0
, src0
, GEN_INT (mask
)); break;
3875 m32c_scc_pattern(rtx
*operands
, RTX_CODE code
)
3877 static char buf
[30];
3878 if (GET_CODE (operands
[0]) == REG
3879 && REGNO (operands
[0]) == R0_REGNO
)
3882 return "stzx\t#1,#0,r0l";
3884 return "stzx\t#0,#1,r0l";
3886 sprintf(buf
, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code
));
3890 /* Encode symbol attributes of a SYMBOL_REF into its
3891 SYMBOL_REF_FLAGS. */
3893 m32c_encode_section_info (tree decl
, rtx rtl
, int first
)
3895 int extra_flags
= 0;
3897 default_encode_section_info (decl
, rtl
, first
);
3898 if (TREE_CODE (decl
) == FUNCTION_DECL
3899 && m32c_special_page_vector_p (decl
))
3901 extra_flags
= SYMBOL_FLAG_FUNCVEC_FUNCTION
;
3904 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= extra_flags
;
3907 /* Returns TRUE if the current function is a leaf, and thus we can
3908 determine which registers an interrupt function really needs to
3909 save. The logic below is mostly about finding the insn sequence
3910 that's the function, versus any sequence that might be open for the
3913 m32c_leaf_function_p (void)
3915 rtx saved_first
, saved_last
;
3916 struct sequence_stack
*seq
;
3919 saved_first
= crtl
->emit
.x_first_insn
;
3920 saved_last
= crtl
->emit
.x_last_insn
;
3921 for (seq
= crtl
->emit
.sequence_stack
; seq
&& seq
->next
; seq
= seq
->next
)
3925 crtl
->emit
.x_first_insn
= seq
->first
;
3926 crtl
->emit
.x_last_insn
= seq
->last
;
3929 rv
= leaf_function_p ();
3931 crtl
->emit
.x_first_insn
= saved_first
;
3932 crtl
->emit
.x_last_insn
= saved_last
;
3936 /* Returns TRUE if the current function needs to use the ENTER/EXIT
3937 opcodes. If the function doesn't need the frame base or stack
3938 pointer, it can use the simpler RTS opcode. */
3940 m32c_function_needs_enter (void)
3943 struct sequence_stack
*seq
;
3944 rtx sp
= gen_rtx_REG (Pmode
, SP_REGNO
);
3945 rtx fb
= gen_rtx_REG (Pmode
, FB_REGNO
);
3947 insn
= get_insns ();
3948 for (seq
= crtl
->emit
.sequence_stack
;
3950 insn
= seq
->first
, seq
= seq
->next
);
3954 if (reg_mentioned_p (sp
, insn
))
3956 if (reg_mentioned_p (fb
, insn
))
3958 insn
= NEXT_INSN (insn
);
3963 /* Mark all the subexpressions of the PARALLEL rtx PAR as
3964 frame-related. Return PAR.
3966 dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3967 PARALLEL rtx other than the first if they do not have the
3968 FRAME_RELATED flag set on them. So this function is handy for
3969 marking up 'enter' instructions. */
3971 m32c_all_frame_related (rtx par
)
3973 int len
= XVECLEN (par
, 0);
3976 for (i
= 0; i
< len
; i
++)
3977 F (XVECEXP (par
, 0, i
));
3982 /* Emits the prologue. See the frame layout comment earlier in this
3983 file. We can reserve up to 256 bytes with the ENTER opcode, beyond
3984 that we manually update sp. */
3986 m32c_emit_prologue (void)
3988 int frame_size
, extra_frame_size
= 0, reg_save_size
;
3989 int complex_prologue
= 0;
3991 cfun
->machine
->is_leaf
= m32c_leaf_function_p ();
3992 if (interrupt_p (cfun
->decl
))
3994 cfun
->machine
->is_interrupt
= 1;
3995 complex_prologue
= 1;
3998 reg_save_size
= m32c_pushm_popm (PP_justcount
);
4000 if (interrupt_p (cfun
->decl
))
4001 emit_insn (gen_pushm (GEN_INT (cfun
->machine
->intr_pushm
)));
4004 m32c_initial_elimination_offset (FB_REGNO
, SP_REGNO
) - reg_save_size
;
4006 && !cfun
->machine
->is_interrupt
4007 && !m32c_function_needs_enter ())
4008 cfun
->machine
->use_rts
= 1;
4010 if (frame_size
> 254)
4012 extra_frame_size
= frame_size
- 254;
4015 if (cfun
->machine
->use_rts
== 0)
4016 F (emit_insn (m32c_all_frame_related
4018 ? gen_prologue_enter_16 (GEN_INT (frame_size
+ 2))
4019 : gen_prologue_enter_24 (GEN_INT (frame_size
+ 4)))));
4021 if (extra_frame_size
)
4023 complex_prologue
= 1;
4025 F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode
, SP_REGNO
),
4026 gen_rtx_REG (HImode
, SP_REGNO
),
4027 GEN_INT (-extra_frame_size
))));
4029 F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode
, SP_REGNO
),
4030 gen_rtx_REG (PSImode
, SP_REGNO
),
4031 GEN_INT (-extra_frame_size
))));
4034 complex_prologue
+= m32c_pushm_popm (PP_pushm
);
4036 /* This just emits a comment into the .s file for debugging. */
4037 if (complex_prologue
)
4038 emit_insn (gen_prologue_end ());
4041 /* Likewise, for the epilogue. The only exception is that, for
4042 interrupts, we must manually unwind the frame as the REIT opcode
4045 m32c_emit_epilogue (void)
4047 /* This just emits a comment into the .s file for debugging. */
4048 if (m32c_pushm_popm (PP_justcount
) > 0 || cfun
->machine
->is_interrupt
)
4049 emit_insn (gen_epilogue_start ());
4051 m32c_pushm_popm (PP_popm
);
4053 if (cfun
->machine
->is_interrupt
)
4055 enum machine_mode spmode
= TARGET_A16
? HImode
: PSImode
;
4057 emit_move_insn (gen_rtx_REG (spmode
, A0_REGNO
),
4058 gen_rtx_REG (spmode
, FP_REGNO
));
4059 emit_move_insn (gen_rtx_REG (spmode
, SP_REGNO
),
4060 gen_rtx_REG (spmode
, A0_REGNO
));
4062 emit_insn (gen_pophi_16 (gen_rtx_REG (HImode
, FP_REGNO
)));
4064 emit_insn (gen_poppsi (gen_rtx_REG (PSImode
, FP_REGNO
)));
4065 emit_insn (gen_popm (GEN_INT (cfun
->machine
->intr_pushm
)));
4067 emit_jump_insn (gen_epilogue_reit_16 ());
4069 emit_jump_insn (gen_epilogue_reit_24 ());
4071 else if (cfun
->machine
->use_rts
)
4072 emit_jump_insn (gen_epilogue_rts ());
4073 else if (TARGET_A16
)
4074 emit_jump_insn (gen_epilogue_exitd_16 ());
4076 emit_jump_insn (gen_epilogue_exitd_24 ());
4081 m32c_emit_eh_epilogue (rtx ret_addr
)
4083 /* R0[R2] has the stack adjustment. R1[R3] has the address to
4084 return to. We have to fudge the stack, pop everything, pop SP
4085 (fudged), and return (fudged). This is actually easier to do in
4086 assembler, so punt to libgcc. */
4087 emit_jump_insn (gen_eh_epilogue (ret_addr
, cfun
->machine
->eh_stack_adjust
));
4088 /* emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4092 /* Indicate which flags must be properly set for a given conditional. */
4094 flags_needed_for_conditional (rtx cond
)
4096 switch (GET_CODE (cond
))
4120 /* Returns true if a compare insn is redundant because it would only
4121 set flags that are already set correctly. */
4123 m32c_compare_redundant (rtx cmp
, rtx
*operands
)
4138 fprintf(stderr
, "\n\033[32mm32c_compare_redundant\033[0m\n");
4142 fprintf(stderr
, "operands[%d] = ", i
);
4143 debug_rtx(operands
[i
]);
4147 next
= next_nonnote_insn (cmp
);
4148 if (!next
|| !INSN_P (next
))
4151 fprintf(stderr
, "compare not followed by insn\n");
4156 if (GET_CODE (PATTERN (next
)) == SET
4157 && GET_CODE (XEXP ( PATTERN (next
), 1)) == IF_THEN_ELSE
)
4159 next
= XEXP (XEXP (PATTERN (next
), 1), 0);
4161 else if (GET_CODE (PATTERN (next
)) == SET
)
4163 /* If this is a conditional, flags_needed will be something
4164 other than FLAGS_N, which we test below. */
4165 next
= XEXP (PATTERN (next
), 1);
4170 fprintf(stderr
, "compare not followed by conditional\n");
4176 fprintf(stderr
, "conditional is: ");
4180 flags_needed
= flags_needed_for_conditional (next
);
4181 if (flags_needed
== FLAGS_N
)
4184 fprintf(stderr
, "compare not followed by conditional\n");
4190 /* Compare doesn't set overflow and carry the same way that
4191 arithmetic instructions do, so we can't replace those. */
4192 if (flags_needed
& FLAGS_OC
)
4197 prev
= prev_nonnote_insn (prev
);
4201 fprintf(stderr
, "No previous insn.\n");
4208 fprintf(stderr
, "Previous insn is a non-insn.\n");
4212 pp
= PATTERN (prev
);
4213 if (GET_CODE (pp
) != SET
)
4216 fprintf(stderr
, "Previous insn is not a SET.\n");
4220 pflags
= get_attr_flags (prev
);
4222 /* Looking up attributes of previous insns corrupted the recog
4224 INSN_UID (cmp
) = -1;
4225 recog (PATTERN (cmp
), cmp
, 0);
4227 if (pflags
== FLAGS_N
4228 && reg_mentioned_p (op0
, pp
))
4231 fprintf(stderr
, "intermediate non-flags insn uses op:\n");
4236 } while (pflags
== FLAGS_N
);
4238 fprintf(stderr
, "previous flag-setting insn:\n");
4243 if (GET_CODE (pp
) == SET
4244 && GET_CODE (XEXP (pp
, 0)) == REG
4245 && REGNO (XEXP (pp
, 0)) == FLG_REGNO
4246 && GET_CODE (XEXP (pp
, 1)) == COMPARE
)
4248 /* Adjacent cbranches must have the same operands to be
4250 rtx pop0
= XEXP (XEXP (pp
, 1), 0);
4251 rtx pop1
= XEXP (XEXP (pp
, 1), 1);
4253 fprintf(stderr
, "adjacent cbranches\n");
4257 if (rtx_equal_p (op0
, pop0
)
4258 && rtx_equal_p (op1
, pop1
))
4261 fprintf(stderr
, "prev cmp not same\n");
4266 /* Else the previous insn must be a SET, with either the source or
4267 dest equal to operands[0], and operands[1] must be zero. */
4269 if (!rtx_equal_p (op1
, const0_rtx
))
4272 fprintf(stderr
, "operands[1] not const0_rtx\n");
4276 if (GET_CODE (pp
) != SET
)
4279 fprintf (stderr
, "pp not set\n");
4283 if (!rtx_equal_p (op0
, SET_SRC (pp
))
4284 && !rtx_equal_p (op0
, SET_DEST (pp
)))
4287 fprintf(stderr
, "operands[0] not found in set\n");
4293 fprintf(stderr
, "cmp flags %x prev flags %x\n", flags_needed
, pflags
);
4295 if ((pflags
& flags_needed
) == flags_needed
)
4301 /* Return the pattern for a compare. This will be commented out if
4302 the compare is redundant, else a normal pattern is returned. Thus,
4303 the assembler output says where the compare would have been. */
4305 m32c_output_compare (rtx insn
, rtx
*operands
)
4307 static char templ
[] = ";cmp.b\t%1,%0";
4310 templ
[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands
[0]))];
4311 if (m32c_compare_redundant (insn
, operands
))
4314 fprintf(stderr
, "cbranch: cmp not needed\n");
4320 fprintf(stderr
, "cbranch: cmp needed: `%s'\n", templ
);
4325 #undef TARGET_ENCODE_SECTION_INFO
4326 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4328 /* The Global `targetm' Variable. */
4330 struct gcc_target targetm
= TARGET_INITIALIZER
;
4332 #include "gt-m32c.h"