Daily bump.
[official-gcc.git] / gcc / config / mn10300 / mn10300.c
blobcad6a0dbbb4af1522d9c6593b524e38ca25195a3
1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Jeff Law (law@cygnus.com).
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "output.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "recog.h"
36 #include "reload.h"
37 #include "expr.h"
38 #include "optabs.h"
39 #include "function.h"
40 #include "obstack.h"
41 #include "diagnostic-core.h"
42 #include "tm_p.h"
43 #include "tm-constrs.h"
44 #include "target.h"
45 #include "target-def.h"
46 #include "df.h"
47 #include "opts.h"
49 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
50 names are not prefixed by underscores, to tell whether to prefix a
51 label with a plus sign or not, so that the assembler can tell
52 symbol names from register names. */
53 int mn10300_protect_label;
55 /* Selected processor type for tuning. */
56 enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
58 /* The size of the callee register save area. Right now we save everything
59 on entry since it costs us nothing in code size. It does cost us from a
60 speed standpoint, so we want to optimize this sooner or later. */
61 #define REG_SAVE_BYTES (4 * df_regs_ever_live_p (2) \
62 + 4 * df_regs_ever_live_p (3) \
63 + 4 * df_regs_ever_live_p (6) \
64 + 4 * df_regs_ever_live_p (7) \
65 + 16 * (df_regs_ever_live_p (14) \
66 || df_regs_ever_live_p (15) \
67 || df_regs_ever_live_p (16) \
68 || df_regs_ever_live_p (17)))
70 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
71 static const struct default_options mn10300_option_optimization_table[] =
73 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
74 { OPT_LEVELS_NONE, 0, NULL, 0 }
77 #define CC_FLAG_Z 1
78 #define CC_FLAG_N 2
79 #define CC_FLAG_C 4
80 #define CC_FLAG_V 8
82 static int cc_flags_for_mode(enum machine_mode);
83 static int cc_flags_for_code(enum rtx_code);
85 /* Implement TARGET_HANDLE_OPTION. */
87 static bool
88 mn10300_handle_option (struct gcc_options *opts,
89 struct gcc_options *opts_set ATTRIBUTE_UNUSED,
90 const struct cl_decoded_option *decoded,
91 location_t loc ATTRIBUTE_UNUSED)
93 size_t code = decoded->opt_index;
94 int value = decoded->value;
96 switch (code)
98 case OPT_mam33:
99 opts->x_mn10300_processor = value ? PROCESSOR_AM33 : PROCESSOR_MN10300;
100 return true;
102 case OPT_mam33_2:
103 opts->x_mn10300_processor = (value
104 ? PROCESSOR_AM33_2
105 : MIN (PROCESSOR_AM33, PROCESSOR_DEFAULT));
106 return true;
108 case OPT_mam34:
109 opts->x_mn10300_processor = (value ? PROCESSOR_AM34 : PROCESSOR_DEFAULT);
110 return true;
112 default:
113 return true;
117 /* Implement TARGET_OPTION_OVERRIDE. */
119 static void
120 mn10300_option_override (void)
122 if (TARGET_AM33)
123 target_flags &= ~MASK_MULT_BUG;
124 else
126 /* Disable scheduling for the MN10300 as we do
127 not have timing information available for it. */
128 flag_schedule_insns = 0;
129 flag_schedule_insns_after_reload = 0;
131 /* Force enable splitting of wide types, as otherwise it is trivial
132 to run out of registers. Indeed, this works so well that register
133 allocation problems are now more common *without* optimization,
134 when this flag is not enabled by default. */
135 flag_split_wide_types = 1;
138 if (mn10300_tune_string)
140 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
141 mn10300_tune_cpu = PROCESSOR_MN10300;
142 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
143 mn10300_tune_cpu = PROCESSOR_AM33;
144 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
145 mn10300_tune_cpu = PROCESSOR_AM33_2;
146 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
147 mn10300_tune_cpu = PROCESSOR_AM34;
148 else
149 error ("-mtune= expects mn10300, am33, am33-2, or am34");
153 static void
154 mn10300_file_start (void)
156 default_file_start ();
158 if (TARGET_AM33_2)
159 fprintf (asm_out_file, "\t.am33_2\n");
160 else if (TARGET_AM33)
161 fprintf (asm_out_file, "\t.am33\n");
164 /* Note: This list must match the liw_op attribute in mn10300.md. */
166 static const char *liw_op_names[] =
168 "add", "cmp", "sub", "mov",
169 "and", "or", "xor",
170 "asr", "lsr", "asl",
171 "none", "max"
174 /* Print operand X using operand code CODE to assembly language output file
175 FILE. */
177 void
178 mn10300_print_operand (FILE *file, rtx x, int code)
180 switch (code)
182 case 'W':
184 unsigned int liw_op = UINTVAL (x);
186 gcc_assert (TARGET_ALLOW_LIW);
187 gcc_assert (liw_op < LIW_OP_MAX);
188 fputs (liw_op_names[liw_op], file);
189 break;
192 case 'b':
193 case 'B':
195 enum rtx_code cmp = GET_CODE (x);
196 enum machine_mode mode = GET_MODE (XEXP (x, 0));
197 const char *str;
198 int have_flags;
200 if (code == 'B')
201 cmp = reverse_condition (cmp);
202 have_flags = cc_flags_for_mode (mode);
204 switch (cmp)
206 case NE:
207 str = "ne";
208 break;
209 case EQ:
210 str = "eq";
211 break;
212 case GE:
213 /* bge is smaller than bnc. */
214 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
215 break;
216 case LT:
217 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
218 break;
219 case GT:
220 str = "gt";
221 break;
222 case LE:
223 str = "le";
224 break;
225 case GEU:
226 str = "cc";
227 break;
228 case GTU:
229 str = "hi";
230 break;
231 case LEU:
232 str = "ls";
233 break;
234 case LTU:
235 str = "cs";
236 break;
237 case ORDERED:
238 str = "lge";
239 break;
240 case UNORDERED:
241 str = "uo";
242 break;
243 case LTGT:
244 str = "lg";
245 break;
246 case UNEQ:
247 str = "ue";
248 break;
249 case UNGE:
250 str = "uge";
251 break;
252 case UNGT:
253 str = "ug";
254 break;
255 case UNLE:
256 str = "ule";
257 break;
258 case UNLT:
259 str = "ul";
260 break;
261 default:
262 gcc_unreachable ();
265 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
266 fputs (str, file);
268 break;
270 case 'C':
271 /* This is used for the operand to a call instruction;
272 if it's a REG, enclose it in parens, else output
273 the operand normally. */
274 if (REG_P (x))
276 fputc ('(', file);
277 mn10300_print_operand (file, x, 0);
278 fputc (')', file);
280 else
281 mn10300_print_operand (file, x, 0);
282 break;
284 case 'D':
285 switch (GET_CODE (x))
287 case MEM:
288 fputc ('(', file);
289 output_address (XEXP (x, 0));
290 fputc (')', file);
291 break;
293 case REG:
294 fprintf (file, "fd%d", REGNO (x) - 18);
295 break;
297 default:
298 gcc_unreachable ();
300 break;
302 /* These are the least significant word in a 64bit value. */
303 case 'L':
304 switch (GET_CODE (x))
306 case MEM:
307 fputc ('(', file);
308 output_address (XEXP (x, 0));
309 fputc (')', file);
310 break;
312 case REG:
313 fprintf (file, "%s", reg_names[REGNO (x)]);
314 break;
316 case SUBREG:
317 fprintf (file, "%s", reg_names[subreg_regno (x)]);
318 break;
320 case CONST_DOUBLE:
322 long val[2];
323 REAL_VALUE_TYPE rv;
325 switch (GET_MODE (x))
327 case DFmode:
328 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
329 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
330 fprintf (file, "0x%lx", val[0]);
331 break;;
332 case SFmode:
333 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
334 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
335 fprintf (file, "0x%lx", val[0]);
336 break;;
337 case VOIDmode:
338 case DImode:
339 mn10300_print_operand_address (file,
340 GEN_INT (CONST_DOUBLE_LOW (x)));
341 break;
342 default:
343 break;
345 break;
348 case CONST_INT:
350 rtx low, high;
351 split_double (x, &low, &high);
352 fprintf (file, "%ld", (long)INTVAL (low));
353 break;
356 default:
357 gcc_unreachable ();
359 break;
361 /* Similarly, but for the most significant word. */
362 case 'H':
363 switch (GET_CODE (x))
365 case MEM:
366 fputc ('(', file);
367 x = adjust_address (x, SImode, 4);
368 output_address (XEXP (x, 0));
369 fputc (')', file);
370 break;
372 case REG:
373 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
374 break;
376 case SUBREG:
377 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
378 break;
380 case CONST_DOUBLE:
382 long val[2];
383 REAL_VALUE_TYPE rv;
385 switch (GET_MODE (x))
387 case DFmode:
388 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
389 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
390 fprintf (file, "0x%lx", val[1]);
391 break;;
392 case SFmode:
393 gcc_unreachable ();
394 case VOIDmode:
395 case DImode:
396 mn10300_print_operand_address (file,
397 GEN_INT (CONST_DOUBLE_HIGH (x)));
398 break;
399 default:
400 break;
402 break;
405 case CONST_INT:
407 rtx low, high;
408 split_double (x, &low, &high);
409 fprintf (file, "%ld", (long)INTVAL (high));
410 break;
413 default:
414 gcc_unreachable ();
416 break;
418 case 'A':
419 fputc ('(', file);
420 if (REG_P (XEXP (x, 0)))
421 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
422 else
423 output_address (XEXP (x, 0));
424 fputc (')', file);
425 break;
427 case 'N':
428 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
429 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
430 break;
432 case 'U':
433 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
434 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
435 break;
437 /* For shift counts. The hardware ignores the upper bits of
438 any immediate, but the assembler will flag an out of range
439 shift count as an error. So we mask off the high bits
440 of the immediate here. */
441 case 'S':
442 if (CONST_INT_P (x))
444 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
445 break;
447 /* FALL THROUGH */
449 default:
450 switch (GET_CODE (x))
452 case MEM:
453 fputc ('(', file);
454 output_address (XEXP (x, 0));
455 fputc (')', file);
456 break;
458 case PLUS:
459 output_address (x);
460 break;
462 case REG:
463 fprintf (file, "%s", reg_names[REGNO (x)]);
464 break;
466 case SUBREG:
467 fprintf (file, "%s", reg_names[subreg_regno (x)]);
468 break;
470 /* This will only be single precision.... */
471 case CONST_DOUBLE:
473 unsigned long val;
474 REAL_VALUE_TYPE rv;
476 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
477 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
478 fprintf (file, "0x%lx", val);
479 break;
482 case CONST_INT:
483 case SYMBOL_REF:
484 case CONST:
485 case LABEL_REF:
486 case CODE_LABEL:
487 case UNSPEC:
488 mn10300_print_operand_address (file, x);
489 break;
490 default:
491 gcc_unreachable ();
493 break;
497 /* Output assembly language output for the address ADDR to FILE. */
499 void
500 mn10300_print_operand_address (FILE *file, rtx addr)
502 switch (GET_CODE (addr))
504 case POST_INC:
505 mn10300_print_operand (file, XEXP (addr, 0), 0);
506 fputc ('+', file);
507 break;
509 case POST_MODIFY:
510 mn10300_print_operand (file, XEXP (addr, 0), 0);
511 fputc ('+', file);
512 fputc (',', file);
513 mn10300_print_operand (file, XEXP (addr, 1), 0);
514 break;
516 case REG:
517 mn10300_print_operand (file, addr, 0);
518 break;
519 case PLUS:
521 rtx base = XEXP (addr, 0);
522 rtx index = XEXP (addr, 1);
524 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
526 rtx x = base;
527 base = index;
528 index = x;
530 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
532 gcc_assert (REG_OK_FOR_BASE_P (base));
534 mn10300_print_operand (file, index, 0);
535 fputc (',', file);
536 mn10300_print_operand (file, base, 0);
537 break;
539 case SYMBOL_REF:
540 output_addr_const (file, addr);
541 break;
542 default:
543 output_addr_const (file, addr);
544 break;
548 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
550 Used for PIC-specific UNSPECs. */
552 static bool
553 mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
555 if (GET_CODE (x) == UNSPEC)
557 switch (XINT (x, 1))
559 case UNSPEC_PIC:
560 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
561 output_addr_const (file, XVECEXP (x, 0, 0));
562 break;
563 case UNSPEC_GOT:
564 output_addr_const (file, XVECEXP (x, 0, 0));
565 fputs ("@GOT", file);
566 break;
567 case UNSPEC_GOTOFF:
568 output_addr_const (file, XVECEXP (x, 0, 0));
569 fputs ("@GOTOFF", file);
570 break;
571 case UNSPEC_PLT:
572 output_addr_const (file, XVECEXP (x, 0, 0));
573 fputs ("@PLT", file);
574 break;
575 case UNSPEC_GOTSYM_OFF:
576 assemble_name (file, GOT_SYMBOL_NAME);
577 fputs ("-(", file);
578 output_addr_const (file, XVECEXP (x, 0, 0));
579 fputs ("-.)", file);
580 break;
581 default:
582 return false;
584 return true;
586 else
587 return false;
590 /* Count the number of FP registers that have to be saved. */
591 static int
592 fp_regs_to_save (void)
594 int i, n = 0;
596 if (! TARGET_AM33_2)
597 return 0;
599 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
600 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
601 ++n;
603 return n;
606 /* Print a set of registers in the format required by "movm" and "ret".
607 Register K is saved if bit K of MASK is set. The data and address
608 registers can be stored individually, but the extended registers cannot.
609 We assume that the mask already takes that into account. For instance,
610 bits 14 to 17 must have the same value. */
612 void
613 mn10300_print_reg_list (FILE *file, int mask)
615 int need_comma;
616 int i;
618 need_comma = 0;
619 fputc ('[', file);
621 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
622 if ((mask & (1 << i)) != 0)
624 if (need_comma)
625 fputc (',', file);
626 fputs (reg_names [i], file);
627 need_comma = 1;
630 if ((mask & 0x3c000) != 0)
632 gcc_assert ((mask & 0x3c000) == 0x3c000);
633 if (need_comma)
634 fputc (',', file);
635 fputs ("exreg1", file);
636 need_comma = 1;
639 fputc (']', file);
642 /* If the MDR register is never clobbered, we can use the RETF instruction
643 which takes the address from the MDR register. This is 3 cycles faster
644 than having to load the address from the stack. */
646 bool
647 mn10300_can_use_retf_insn (void)
649 /* Don't bother if we're not optimizing. In this case we won't
650 have proper access to df_regs_ever_live_p. */
651 if (!optimize)
652 return false;
654 /* EH returns alter the saved return address; MDR is not current. */
655 if (crtl->calls_eh_return)
656 return false;
658 /* Obviously not if MDR is ever clobbered. */
659 if (df_regs_ever_live_p (MDR_REG))
660 return false;
662 /* ??? Careful not to use this during expand_epilogue etc. */
663 gcc_assert (!in_sequence_p ());
664 return leaf_function_p ();
667 bool
668 mn10300_can_use_rets_insn (void)
670 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
673 /* Returns the set of live, callee-saved registers as a bitmask. The
674 callee-saved extended registers cannot be stored individually, so
675 all of them will be included in the mask if any one of them is used. */
678 mn10300_get_live_callee_saved_regs (void)
680 int mask;
681 int i;
683 mask = 0;
684 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
685 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
686 mask |= (1 << i);
687 if ((mask & 0x3c000) != 0)
688 mask |= 0x3c000;
690 return mask;
693 static rtx
694 F (rtx r)
696 RTX_FRAME_RELATED_P (r) = 1;
697 return r;
700 /* Generate an instruction that pushes several registers onto the stack.
701 Register K will be saved if bit K in MASK is set. The function does
702 nothing if MASK is zero.
704 To be compatible with the "movm" instruction, the lowest-numbered
705 register must be stored in the lowest slot. If MASK is the set
706 { R1,...,RN }, where R1...RN are ordered least first, the generated
707 instruction will have the form:
709 (parallel
710 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
711 (set (mem:SI (plus:SI (reg:SI 9)
712 (const_int -1*4)))
713 (reg:SI RN))
715 (set (mem:SI (plus:SI (reg:SI 9)
716 (const_int -N*4)))
717 (reg:SI R1))) */
719 static void
720 mn10300_gen_multiple_store (unsigned int mask)
722 /* The order in which registers are stored, from SP-4 through SP-N*4. */
723 static const unsigned int store_order[8] = {
724 /* e2, e3: never saved */
725 FIRST_EXTENDED_REGNUM + 4,
726 FIRST_EXTENDED_REGNUM + 5,
727 FIRST_EXTENDED_REGNUM + 6,
728 FIRST_EXTENDED_REGNUM + 7,
729 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
730 FIRST_DATA_REGNUM + 2,
731 FIRST_DATA_REGNUM + 3,
732 FIRST_ADDRESS_REGNUM + 2,
733 FIRST_ADDRESS_REGNUM + 3,
734 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
737 rtx x, elts[9];
738 unsigned int i;
739 int count;
741 if (mask == 0)
742 return;
744 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
746 unsigned regno = store_order[i];
748 if (((mask >> regno) & 1) == 0)
749 continue;
751 ++count;
752 x = plus_constant (stack_pointer_rtx, count * -4);
753 x = gen_frame_mem (SImode, x);
754 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
755 elts[count] = F(x);
757 /* Remove the register from the mask so that... */
758 mask &= ~(1u << regno);
761 /* ... we can make sure that we didn't try to use a register
762 not listed in the store order. */
763 gcc_assert (mask == 0);
765 /* Create the instruction that updates the stack pointer. */
766 x = plus_constant (stack_pointer_rtx, count * -4);
767 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
768 elts[0] = F(x);
770 /* We need one PARALLEL element to update the stack pointer and
771 an additional element for each register that is stored. */
772 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
773 F (emit_insn (x));
776 void
777 mn10300_expand_prologue (void)
779 HOST_WIDE_INT size = mn10300_frame_size ();
781 /* If we use any of the callee-saved registers, save them now. */
782 mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs ());
784 if (TARGET_AM33_2 && fp_regs_to_save ())
786 int num_regs_to_save = fp_regs_to_save (), i;
787 HOST_WIDE_INT xsize;
788 enum
790 save_sp_merge,
791 save_sp_no_merge,
792 save_sp_partial_merge,
793 save_a0_merge,
794 save_a0_no_merge
795 } strategy;
796 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
797 rtx reg;
799 /* We have several different strategies to save FP registers.
800 We can store them using SP offsets, which is beneficial if
801 there are just a few registers to save, or we can use `a0' in
802 post-increment mode (`a0' is the only call-clobbered address
803 register that is never used to pass information to a
804 function). Furthermore, if we don't need a frame pointer, we
805 can merge the two SP adds into a single one, but this isn't
806 always beneficial; sometimes we can just split the two adds
807 so that we don't exceed a 16-bit constant size. The code
808 below will select which strategy to use, so as to generate
809 smallest code. Ties are broken in favor or shorter sequences
810 (in terms of number of instructions). */
812 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
813 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
814 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
815 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
817 /* We add 0 * (S) in two places to promote to the type of S,
818 so that all arms of the conditional have the same type. */
819 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
820 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
821 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
822 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
823 : 0 * (S) + (ELSE))
824 #define SIZE_FMOV_SP_(S,N) \
825 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
826 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
827 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
828 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
830 /* Consider alternative save_sp_merge only if we don't need the
831 frame pointer and size is nonzero. */
832 if (! frame_pointer_needed && size)
834 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
835 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
836 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
837 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
839 if (this_strategy_size < strategy_size)
841 strategy = save_sp_merge;
842 strategy_size = this_strategy_size;
846 /* Consider alternative save_sp_no_merge unconditionally. */
847 /* Insn: add -4 * num_regs_to_save, sp. */
848 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
849 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
850 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
851 if (size)
853 /* Insn: add -size, sp. */
854 this_strategy_size += SIZE_ADD_SP (-size);
857 if (this_strategy_size < strategy_size)
859 strategy = save_sp_no_merge;
860 strategy_size = this_strategy_size;
863 /* Consider alternative save_sp_partial_merge only if we don't
864 need a frame pointer and size is reasonably large. */
865 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
867 /* Insn: add -128, sp. */
868 this_strategy_size = SIZE_ADD_SP (-128);
869 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
870 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
871 num_regs_to_save);
872 if (size)
874 /* Insn: add 128-size, sp. */
875 this_strategy_size += SIZE_ADD_SP (128 - size);
878 if (this_strategy_size < strategy_size)
880 strategy = save_sp_partial_merge;
881 strategy_size = this_strategy_size;
885 /* Consider alternative save_a0_merge only if we don't need a
886 frame pointer, size is nonzero and the user hasn't
887 changed the calling conventions of a0. */
888 if (! frame_pointer_needed && size
889 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
890 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
892 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
893 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
894 /* Insn: mov sp, a0. */
895 this_strategy_size++;
896 if (size)
898 /* Insn: add size, a0. */
899 this_strategy_size += SIZE_ADD_AX (size);
901 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
902 this_strategy_size += 3 * num_regs_to_save;
904 if (this_strategy_size < strategy_size)
906 strategy = save_a0_merge;
907 strategy_size = this_strategy_size;
911 /* Consider alternative save_a0_no_merge if the user hasn't
912 changed the calling conventions of a0. */
913 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
914 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
916 /* Insn: add -4 * num_regs_to_save, sp. */
917 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
918 /* Insn: mov sp, a0. */
919 this_strategy_size++;
920 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
921 this_strategy_size += 3 * num_regs_to_save;
922 if (size)
924 /* Insn: add -size, sp. */
925 this_strategy_size += SIZE_ADD_SP (-size);
928 if (this_strategy_size < strategy_size)
930 strategy = save_a0_no_merge;
931 strategy_size = this_strategy_size;
935 /* Emit the initial SP add, common to all strategies. */
936 switch (strategy)
938 case save_sp_no_merge:
939 case save_a0_no_merge:
940 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
941 stack_pointer_rtx,
942 GEN_INT (-4 * num_regs_to_save))));
943 xsize = 0;
944 break;
946 case save_sp_partial_merge:
947 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
948 stack_pointer_rtx,
949 GEN_INT (-128))));
950 xsize = 128 - 4 * num_regs_to_save;
951 size -= xsize;
952 break;
954 case save_sp_merge:
955 case save_a0_merge:
956 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
957 stack_pointer_rtx,
958 GEN_INT (-(size + 4 * num_regs_to_save)))));
959 /* We'll have to adjust FP register saves according to the
960 frame size. */
961 xsize = size;
962 /* Since we've already created the stack frame, don't do it
963 again at the end of the function. */
964 size = 0;
965 break;
967 default:
968 gcc_unreachable ();
971 /* Now prepare register a0, if we have decided to use it. */
972 switch (strategy)
974 case save_sp_merge:
975 case save_sp_no_merge:
976 case save_sp_partial_merge:
977 reg = 0;
978 break;
980 case save_a0_merge:
981 case save_a0_no_merge:
982 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
983 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
984 if (xsize)
985 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
986 reg = gen_rtx_POST_INC (SImode, reg);
987 break;
989 default:
990 gcc_unreachable ();
993 /* Now actually save the FP registers. */
994 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
995 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
997 rtx addr;
999 if (reg)
1000 addr = reg;
1001 else
1003 /* If we aren't using `a0', use an SP offset. */
1004 if (xsize)
1006 addr = gen_rtx_PLUS (SImode,
1007 stack_pointer_rtx,
1008 GEN_INT (xsize));
1010 else
1011 addr = stack_pointer_rtx;
1013 xsize += 4;
1016 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1017 gen_rtx_REG (SFmode, i))));
1021 /* Now put the frame pointer into the frame pointer register. */
1022 if (frame_pointer_needed)
1023 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
1025 /* Allocate stack for this frame. */
1026 if (size)
1027 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1028 stack_pointer_rtx,
1029 GEN_INT (-size))));
1031 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1032 emit_insn (gen_load_pic ());
1035 void
1036 mn10300_expand_epilogue (void)
1038 HOST_WIDE_INT size = mn10300_frame_size ();
1039 int reg_save_bytes = REG_SAVE_BYTES;
1041 if (TARGET_AM33_2 && fp_regs_to_save ())
1043 int num_regs_to_save = fp_regs_to_save (), i;
1044 rtx reg = 0;
1046 /* We have several options to restore FP registers. We could
1047 load them from SP offsets, but, if there are enough FP
1048 registers to restore, we win if we use a post-increment
1049 addressing mode. */
1051 /* If we have a frame pointer, it's the best option, because we
1052 already know it has the value we want. */
1053 if (frame_pointer_needed)
1054 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1055 /* Otherwise, we may use `a1', since it's call-clobbered and
1056 it's never used for return values. But only do so if it's
1057 smaller than using SP offsets. */
1058 else
1060 enum { restore_sp_post_adjust,
1061 restore_sp_pre_adjust,
1062 restore_sp_partial_adjust,
1063 restore_a1 } strategy;
1064 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1066 /* Consider using sp offsets before adjusting sp. */
1067 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1068 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1069 /* If size is too large, we'll have to adjust SP with an
1070 add. */
1071 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1073 /* Insn: add size + 4 * num_regs_to_save, sp. */
1074 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1076 /* If we don't have to restore any non-FP registers,
1077 we'll be able to save one byte by using rets. */
1078 if (! reg_save_bytes)
1079 this_strategy_size--;
1081 if (this_strategy_size < strategy_size)
1083 strategy = restore_sp_post_adjust;
1084 strategy_size = this_strategy_size;
1087 /* Consider using sp offsets after adjusting sp. */
1088 /* Insn: add size, sp. */
1089 this_strategy_size = SIZE_ADD_SP (size);
1090 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1091 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1092 /* We're going to use ret to release the FP registers
1093 save area, so, no savings. */
1095 if (this_strategy_size < strategy_size)
1097 strategy = restore_sp_pre_adjust;
1098 strategy_size = this_strategy_size;
1101 /* Consider using sp offsets after partially adjusting sp.
1102 When size is close to 32Kb, we may be able to adjust SP
1103 with an imm16 add instruction while still using fmov
1104 (d8,sp). */
1105 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1107 /* Insn: add size + 4 * num_regs_to_save
1108 + reg_save_bytes - 252,sp. */
1109 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1110 + reg_save_bytes - 252);
1111 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1112 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1113 - 4 * num_regs_to_save,
1114 num_regs_to_save);
1115 /* We're going to use ret to release the FP registers
1116 save area, so, no savings. */
1118 if (this_strategy_size < strategy_size)
1120 strategy = restore_sp_partial_adjust;
1121 strategy_size = this_strategy_size;
1125 /* Consider using a1 in post-increment mode, as long as the
1126 user hasn't changed the calling conventions of a1. */
1127 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1128 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1130 /* Insn: mov sp,a1. */
1131 this_strategy_size = 1;
1132 if (size)
1134 /* Insn: add size,a1. */
1135 this_strategy_size += SIZE_ADD_AX (size);
1137 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1138 this_strategy_size += 3 * num_regs_to_save;
1139 /* If size is large enough, we may be able to save a
1140 couple of bytes. */
1141 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1143 /* Insn: mov a1,sp. */
1144 this_strategy_size += 2;
1146 /* If we don't have to restore any non-FP registers,
1147 we'll be able to save one byte by using rets. */
1148 if (! reg_save_bytes)
1149 this_strategy_size--;
1151 if (this_strategy_size < strategy_size)
1153 strategy = restore_a1;
1154 strategy_size = this_strategy_size;
1158 switch (strategy)
1160 case restore_sp_post_adjust:
1161 break;
1163 case restore_sp_pre_adjust:
1164 emit_insn (gen_addsi3 (stack_pointer_rtx,
1165 stack_pointer_rtx,
1166 GEN_INT (size)));
1167 size = 0;
1168 break;
1170 case restore_sp_partial_adjust:
1171 emit_insn (gen_addsi3 (stack_pointer_rtx,
1172 stack_pointer_rtx,
1173 GEN_INT (size + 4 * num_regs_to_save
1174 + reg_save_bytes - 252)));
1175 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1176 break;
1178 case restore_a1:
1179 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1180 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1181 if (size)
1182 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1183 break;
1185 default:
1186 gcc_unreachable ();
1190 /* Adjust the selected register, if any, for post-increment. */
1191 if (reg)
1192 reg = gen_rtx_POST_INC (SImode, reg);
1194 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1195 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1197 rtx addr;
1199 if (reg)
1200 addr = reg;
1201 else if (size)
1203 /* If we aren't using a post-increment register, use an
1204 SP offset. */
1205 addr = gen_rtx_PLUS (SImode,
1206 stack_pointer_rtx,
1207 GEN_INT (size));
1209 else
1210 addr = stack_pointer_rtx;
1212 size += 4;
1214 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1215 gen_rtx_MEM (SFmode, addr)));
1218 /* If we were using the restore_a1 strategy and the number of
1219 bytes to be released won't fit in the `ret' byte, copy `a1'
1220 to `sp', to avoid having to use `add' to adjust it. */
1221 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1223 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1224 size = 0;
1228 /* Maybe cut back the stack, except for the register save area.
1230 If the frame pointer exists, then use the frame pointer to
1231 cut back the stack.
1233 If the stack size + register save area is more than 255 bytes,
1234 then the stack must be cut back here since the size + register
1235 save size is too big for a ret/retf instruction.
1237 Else leave it alone, it will be cut back as part of the
1238 ret/retf instruction, or there wasn't any stack to begin with.
1240 Under no circumstances should the register save area be
1241 deallocated here, that would leave a window where an interrupt
1242 could occur and trash the register save area. */
1243 if (frame_pointer_needed)
1245 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1246 size = 0;
1248 else if (size + reg_save_bytes > 255)
1250 emit_insn (gen_addsi3 (stack_pointer_rtx,
1251 stack_pointer_rtx,
1252 GEN_INT (size)));
1253 size = 0;
1256 /* Adjust the stack and restore callee-saved registers, if any. */
1257 if (mn10300_can_use_rets_insn ())
1258 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
1259 else
1260 emit_jump_insn (gen_return_ret (GEN_INT (size + REG_SAVE_BYTES)));
1263 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1264 This function is for MATCH_PARALLEL and so assumes OP is known to be
1265 parallel. If OP is a multiple store, return a mask indicating which
1266 registers it saves. Return 0 otherwise. */
1269 mn10300_store_multiple_operation (rtx op,
1270 enum machine_mode mode ATTRIBUTE_UNUSED)
1272 int count;
1273 int mask;
1274 int i;
1275 unsigned int last;
1276 rtx elt;
1278 count = XVECLEN (op, 0);
1279 if (count < 2)
1280 return 0;
1282 /* Check that first instruction has the form (set (sp) (plus A B)) */
1283 elt = XVECEXP (op, 0, 0);
1284 if (GET_CODE (elt) != SET
1285 || (! REG_P (SET_DEST (elt)))
1286 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1287 || GET_CODE (SET_SRC (elt)) != PLUS)
1288 return 0;
1290 /* Check that A is the stack pointer and B is the expected stack size.
1291 For OP to match, each subsequent instruction should push a word onto
1292 the stack. We therefore expect the first instruction to create
1293 COUNT-1 stack slots. */
1294 elt = SET_SRC (elt);
1295 if ((! REG_P (XEXP (elt, 0)))
1296 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1297 || (! CONST_INT_P (XEXP (elt, 1)))
1298 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1299 return 0;
1301 mask = 0;
1302 for (i = 1; i < count; i++)
1304 /* Check that element i is a (set (mem M) R). */
1305 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1306 Remember: the ordering is *not* monotonic. */
1307 elt = XVECEXP (op, 0, i);
1308 if (GET_CODE (elt) != SET
1309 || (! MEM_P (SET_DEST (elt)))
1310 || (! REG_P (SET_SRC (elt))))
1311 return 0;
1313 /* Remember which registers are to be saved. */
1314 last = REGNO (SET_SRC (elt));
1315 mask |= (1 << last);
1317 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1318 elt = XEXP (SET_DEST (elt), 0);
1319 if (GET_CODE (elt) != PLUS
1320 || (! REG_P (XEXP (elt, 0)))
1321 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1322 || (! CONST_INT_P (XEXP (elt, 1)))
1323 || INTVAL (XEXP (elt, 1)) != -i * 4)
1324 return 0;
1327 /* All or none of the callee-saved extended registers must be in the set. */
1328 if ((mask & 0x3c000) != 0
1329 && (mask & 0x3c000) != 0x3c000)
1330 return 0;
1332 return mask;
1335 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1337 static reg_class_t
1338 mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1340 if (x == stack_pointer_rtx && rclass != SP_REGS)
1341 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1342 else if (MEM_P (x)
1343 || (REG_P (x)
1344 && !HARD_REGISTER_P (x))
1345 || (GET_CODE (x) == SUBREG
1346 && REG_P (SUBREG_REG (x))
1347 && !HARD_REGISTER_P (SUBREG_REG (x))))
1348 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1349 else
1350 return rclass;
1353 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1355 static reg_class_t
1356 mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1358 if (x == stack_pointer_rtx && rclass != SP_REGS)
1359 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1360 return rclass;
1363 /* Implement TARGET_SECONDARY_RELOAD. */
1365 static reg_class_t
1366 mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1367 enum machine_mode mode, secondary_reload_info *sri)
1369 enum reg_class rclass = (enum reg_class) rclass_i;
1370 enum reg_class xclass = NO_REGS;
1371 unsigned int xregno = INVALID_REGNUM;
1373 if (REG_P (x))
1375 xregno = REGNO (x);
1376 if (xregno >= FIRST_PSEUDO_REGISTER)
1377 xregno = true_regnum (x);
1378 if (xregno != INVALID_REGNUM)
1379 xclass = REGNO_REG_CLASS (xregno);
1382 if (!TARGET_AM33)
1384 /* Memory load/stores less than a full word wide can't have an
1385 address or stack pointer destination. They must use a data
1386 register as an intermediate register. */
1387 if (rclass != DATA_REGS
1388 && (mode == QImode || mode == HImode)
1389 && xclass == NO_REGS)
1390 return DATA_REGS;
1392 /* We can only move SP to/from an address register. */
1393 if (in_p
1394 && rclass == SP_REGS
1395 && xclass != ADDRESS_REGS)
1396 return ADDRESS_REGS;
1397 if (!in_p
1398 && xclass == SP_REGS
1399 && rclass != ADDRESS_REGS
1400 && rclass != SP_OR_ADDRESS_REGS)
1401 return ADDRESS_REGS;
1404 /* We can't directly load sp + const_int into a register;
1405 we must use an address register as an scratch. */
1406 if (in_p
1407 && rclass != SP_REGS
1408 && rclass != SP_OR_ADDRESS_REGS
1409 && rclass != SP_OR_GENERAL_REGS
1410 && GET_CODE (x) == PLUS
1411 && (XEXP (x, 0) == stack_pointer_rtx
1412 || XEXP (x, 1) == stack_pointer_rtx))
1414 sri->icode = CODE_FOR_reload_plus_sp_const;
1415 return NO_REGS;
1418 /* We can only move MDR to/from a data register. */
1419 if (rclass == MDR_REGS && xclass != DATA_REGS)
1420 return DATA_REGS;
1421 if (xclass == MDR_REGS && rclass != DATA_REGS)
1422 return DATA_REGS;
1424 /* We can't load/store an FP register from a constant address. */
1425 if (TARGET_AM33_2
1426 && (rclass == FP_REGS || xclass == FP_REGS)
1427 && (xclass == NO_REGS || rclass == NO_REGS))
1429 rtx addr = NULL;
1431 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1433 addr = reg_equiv_mem [xregno];
1434 if (addr)
1435 addr = XEXP (addr, 0);
1437 else if (MEM_P (x))
1438 addr = XEXP (x, 0);
1440 if (addr && CONSTANT_ADDRESS_P (addr))
1441 return GENERAL_REGS;
1444 /* Otherwise assume no secondary reloads are needed. */
1445 return NO_REGS;
1449 mn10300_frame_size (void)
1451 /* size includes the fixed stack space needed for function calls. */
1452 int size = get_frame_size () + crtl->outgoing_args_size;
1454 /* And space for the return pointer. */
1455 size += crtl->outgoing_args_size ? 4 : 0;
1457 return size;
1461 mn10300_initial_offset (int from, int to)
1463 int diff = 0;
1465 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1466 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1468 if (to == STACK_POINTER_REGNUM)
1469 diff = mn10300_frame_size ();
1471 /* The difference between the argument pointer and the frame pointer
1472 is the size of the callee register save area. */
1473 if (from == ARG_POINTER_REGNUM)
1475 diff += REG_SAVE_BYTES;
1476 diff += 4 * fp_regs_to_save ();
1479 return diff;
1482 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1484 static bool
1485 mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1487 /* Return values > 8 bytes in length in memory. */
1488 return (int_size_in_bytes (type) > 8
1489 || int_size_in_bytes (type) == 0
1490 || TYPE_MODE (type) == BLKmode);
1493 /* Flush the argument registers to the stack for a stdarg function;
1494 return the new argument pointer. */
1495 static rtx
1496 mn10300_builtin_saveregs (void)
1498 rtx offset, mem;
1499 tree fntype = TREE_TYPE (current_function_decl);
1500 int argadj = ((!stdarg_p (fntype))
1501 ? UNITS_PER_WORD : 0);
1502 alias_set_type set = get_varargs_alias_set ();
1504 if (argadj)
1505 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
1506 else
1507 offset = crtl->args.arg_offset_rtx;
1509 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1510 set_mem_alias_set (mem, set);
1511 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1513 mem = gen_rtx_MEM (SImode,
1514 plus_constant (crtl->args.internal_arg_pointer, 4));
1515 set_mem_alias_set (mem, set);
1516 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1518 return copy_to_reg (expand_binop (Pmode, add_optab,
1519 crtl->args.internal_arg_pointer,
1520 offset, 0, 0, OPTAB_LIB_WIDEN));
1523 static void
1524 mn10300_va_start (tree valist, rtx nextarg)
1526 nextarg = expand_builtin_saveregs ();
1527 std_expand_builtin_va_start (valist, nextarg);
1530 /* Return true when a parameter should be passed by reference. */
1532 static bool
1533 mn10300_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
1534 enum machine_mode mode, const_tree type,
1535 bool named ATTRIBUTE_UNUSED)
1537 unsigned HOST_WIDE_INT size;
1539 if (type)
1540 size = int_size_in_bytes (type);
1541 else
1542 size = GET_MODE_SIZE (mode);
1544 return (size > 8 || size == 0);
1547 /* Return an RTX to represent where a value with mode MODE will be returned
1548 from a function. If the result is NULL_RTX, the argument is pushed. */
1550 static rtx
1551 mn10300_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1552 const_tree type, bool named ATTRIBUTE_UNUSED)
1554 rtx result = NULL_RTX;
1555 int size;
1557 /* We only support using 2 data registers as argument registers. */
1558 int nregs = 2;
1560 /* Figure out the size of the object to be passed. */
1561 if (mode == BLKmode)
1562 size = int_size_in_bytes (type);
1563 else
1564 size = GET_MODE_SIZE (mode);
1566 cum->nbytes = (cum->nbytes + 3) & ~3;
1568 /* Don't pass this arg via a register if all the argument registers
1569 are used up. */
1570 if (cum->nbytes > nregs * UNITS_PER_WORD)
1571 return result;
1573 /* Don't pass this arg via a register if it would be split between
1574 registers and memory. */
1575 if (type == NULL_TREE
1576 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1577 return result;
1579 switch (cum->nbytes / UNITS_PER_WORD)
1581 case 0:
1582 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1583 break;
1584 case 1:
1585 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1586 break;
1587 default:
1588 break;
1591 return result;
1594 /* Update the data in CUM to advance over an argument
1595 of mode MODE and data type TYPE.
1596 (TYPE is null for libcalls where that information may not be available.) */
1598 static void
1599 mn10300_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1600 const_tree type, bool named ATTRIBUTE_UNUSED)
1602 cum->nbytes += (mode != BLKmode
1603 ? (GET_MODE_SIZE (mode) + 3) & ~3
1604 : (int_size_in_bytes (type) + 3) & ~3);
1607 /* Return the number of bytes of registers to use for an argument passed
1608 partially in registers and partially in memory. */
1610 static int
1611 mn10300_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1612 tree type, bool named ATTRIBUTE_UNUSED)
1614 int size;
1616 /* We only support using 2 data registers as argument registers. */
1617 int nregs = 2;
1619 /* Figure out the size of the object to be passed. */
1620 if (mode == BLKmode)
1621 size = int_size_in_bytes (type);
1622 else
1623 size = GET_MODE_SIZE (mode);
1625 cum->nbytes = (cum->nbytes + 3) & ~3;
1627 /* Don't pass this arg via a register if all the argument registers
1628 are used up. */
1629 if (cum->nbytes > nregs * UNITS_PER_WORD)
1630 return 0;
1632 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1633 return 0;
1635 /* Don't pass this arg via a register if it would be split between
1636 registers and memory. */
1637 if (type == NULL_TREE
1638 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1639 return 0;
1641 return nregs * UNITS_PER_WORD - cum->nbytes;
1644 /* Return the location of the function's value. This will be either
1645 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1646 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1647 we only return the PARALLEL for outgoing values; we do not want
1648 callers relying on this extra copy. */
1650 static rtx
1651 mn10300_function_value (const_tree valtype,
1652 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1653 bool outgoing)
1655 rtx rv;
1656 enum machine_mode mode = TYPE_MODE (valtype);
1658 if (! POINTER_TYPE_P (valtype))
1659 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1660 else if (! TARGET_PTR_A0D0 || ! outgoing
1661 || cfun->returns_struct)
1662 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1664 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1665 XVECEXP (rv, 0, 0)
1666 = gen_rtx_EXPR_LIST (VOIDmode,
1667 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1668 GEN_INT (0));
1670 XVECEXP (rv, 0, 1)
1671 = gen_rtx_EXPR_LIST (VOIDmode,
1672 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1673 GEN_INT (0));
1674 return rv;
1677 /* Implements TARGET_LIBCALL_VALUE. */
1679 static rtx
1680 mn10300_libcall_value (enum machine_mode mode,
1681 const_rtx fun ATTRIBUTE_UNUSED)
1683 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1686 /* Implements FUNCTION_VALUE_REGNO_P. */
1688 bool
1689 mn10300_function_value_regno_p (const unsigned int regno)
1691 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1694 /* Output an addition operation. */
1696 const char *
1697 mn10300_output_add (rtx operands[3], bool need_flags)
1699 rtx dest, src1, src2;
1700 unsigned int dest_regnum, src1_regnum, src2_regnum;
1701 enum reg_class src1_class, src2_class, dest_class;
1703 dest = operands[0];
1704 src1 = operands[1];
1705 src2 = operands[2];
1707 dest_regnum = true_regnum (dest);
1708 src1_regnum = true_regnum (src1);
1710 dest_class = REGNO_REG_CLASS (dest_regnum);
1711 src1_class = REGNO_REG_CLASS (src1_regnum);
1713 if (CONST_INT_P (src2))
1715 gcc_assert (dest_regnum == src1_regnum);
1717 if (src2 == const1_rtx && !need_flags)
1718 return "inc %0";
1719 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1720 return "inc4 %0";
1722 gcc_assert (!need_flags || dest_class != SP_REGS);
1723 return "add %2,%0";
1725 else if (CONSTANT_P (src2))
1726 return "add %2,%0";
1728 src2_regnum = true_regnum (src2);
1729 src2_class = REGNO_REG_CLASS (src2_regnum);
1731 if (dest_regnum == src1_regnum)
1732 return "add %2,%0";
1733 if (dest_regnum == src2_regnum)
1734 return "add %1,%0";
1736 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1737 this directly, as below, but when optimizing for space we can sometimes
1738 do better by using a mov+add. For MN103, we claimed that we could
1739 implement a three-operand add because the various move and add insns
1740 change sizes across register classes, and we can often do better than
1741 reload in choosing which operand to move. */
1742 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1743 return "add %2,%1,%0";
1745 /* Catch cases where no extended register was used. */
1746 if (src1_class != EXTENDED_REGS
1747 && src2_class != EXTENDED_REGS
1748 && dest_class != EXTENDED_REGS)
1750 /* We have to copy one of the sources into the destination, then
1751 add the other source to the destination.
1753 Carefully select which source to copy to the destination; a
1754 naive implementation will waste a byte when the source classes
1755 are different and the destination is an address register.
1756 Selecting the lowest cost register copy will optimize this
1757 sequence. */
1758 if (src1_class == dest_class)
1759 return "mov %1,%0\n\tadd %2,%0";
1760 else
1761 return "mov %2,%0\n\tadd %1,%0";
1764 /* At least one register is an extended register. */
1766 /* The three operand add instruction on the am33 is a win iff the
1767 output register is an extended register, or if both source
1768 registers are extended registers. */
1769 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1770 return "add %2,%1,%0";
1772 /* It is better to copy one of the sources to the destination, then
1773 perform a 2 address add. The destination in this case must be
1774 an address or data register and one of the sources must be an
1775 extended register and the remaining source must not be an extended
1776 register.
1778 The best code for this case is to copy the extended reg to the
1779 destination, then emit a two address add. */
1780 if (src1_class == EXTENDED_REGS)
1781 return "mov %1,%0\n\tadd %2,%0";
1782 else
1783 return "mov %2,%0\n\tadd %1,%0";
1786 /* Return 1 if X contains a symbolic expression. We know these
1787 expressions will have one of a few well defined forms, so
1788 we need only check those forms. */
1791 mn10300_symbolic_operand (rtx op,
1792 enum machine_mode mode ATTRIBUTE_UNUSED)
1794 switch (GET_CODE (op))
1796 case SYMBOL_REF:
1797 case LABEL_REF:
1798 return 1;
1799 case CONST:
1800 op = XEXP (op, 0);
1801 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1802 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1803 && CONST_INT_P (XEXP (op, 1)));
1804 default:
1805 return 0;
1809 /* Try machine dependent ways of modifying an illegitimate address
1810 to be legitimate. If we find one, return the new valid address.
1811 This macro is used in only one place: `memory_address' in explow.c.
1813 OLDX is the address as it was before break_out_memory_refs was called.
1814 In some cases it is useful to look at this to decide what needs to be done.
1816 Normally it is always safe for this macro to do nothing. It exists to
1817 recognize opportunities to optimize the output.
1819 But on a few ports with segmented architectures and indexed addressing
1820 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1822 static rtx
1823 mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1824 enum machine_mode mode ATTRIBUTE_UNUSED)
1826 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1827 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1829 /* Uh-oh. We might have an address for x[n-100000]. This needs
1830 special handling to avoid creating an indexed memory address
1831 with x-100000 as the base. */
1832 if (GET_CODE (x) == PLUS
1833 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1835 /* Ugly. We modify things here so that the address offset specified
1836 by the index expression is computed first, then added to x to form
1837 the entire address. */
1839 rtx regx1, regy1, regy2, y;
1841 /* Strip off any CONST. */
1842 y = XEXP (x, 1);
1843 if (GET_CODE (y) == CONST)
1844 y = XEXP (y, 0);
1846 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1848 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1849 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1850 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1851 regx1 = force_reg (Pmode,
1852 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1853 regy2));
1854 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1857 return x;
1860 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1861 @GOTOFF in `reg'. */
1864 mn10300_legitimize_pic_address (rtx orig, rtx reg)
1866 rtx x;
1868 if (GET_CODE (orig) == LABEL_REF
1869 || (GET_CODE (orig) == SYMBOL_REF
1870 && (CONSTANT_POOL_ADDRESS_P (orig)
1871 || ! MN10300_GLOBAL_P (orig))))
1873 if (reg == NULL)
1874 reg = gen_reg_rtx (Pmode);
1876 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1877 x = gen_rtx_CONST (SImode, x);
1878 emit_move_insn (reg, x);
1880 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1882 else if (GET_CODE (orig) == SYMBOL_REF)
1884 if (reg == NULL)
1885 reg = gen_reg_rtx (Pmode);
1887 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1888 x = gen_rtx_CONST (SImode, x);
1889 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1890 x = gen_const_mem (SImode, x);
1892 x = emit_move_insn (reg, x);
1894 else
1895 return orig;
1897 set_unique_reg_note (x, REG_EQUAL, orig);
1898 return reg;
1901 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1902 isn't protected by a PIC unspec; nonzero otherwise. */
1905 mn10300_legitimate_pic_operand_p (rtx x)
1907 const char *fmt;
1908 int i;
1910 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1911 return 0;
1913 if (GET_CODE (x) == UNSPEC
1914 && (XINT (x, 1) == UNSPEC_PIC
1915 || XINT (x, 1) == UNSPEC_GOT
1916 || XINT (x, 1) == UNSPEC_GOTOFF
1917 || XINT (x, 1) == UNSPEC_PLT
1918 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1919 return 1;
1921 fmt = GET_RTX_FORMAT (GET_CODE (x));
1922 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1924 if (fmt[i] == 'E')
1926 int j;
1928 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1929 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1930 return 0;
1932 else if (fmt[i] == 'e'
1933 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1934 return 0;
1937 return 1;
1940 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1941 legitimate, and FALSE otherwise.
1943 On the mn10300, the value in the address register must be
1944 in the same memory space/segment as the effective address.
1946 This is problematical for reload since it does not understand
1947 that base+index != index+base in a memory reference.
1949 Note it is still possible to use reg+reg addressing modes,
1950 it's just much more difficult. For a discussion of a possible
1951 workaround and solution, see the comments in pa.c before the
1952 function record_unscaled_index_insn_codes. */
1954 static bool
1955 mn10300_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1957 rtx base, index;
1959 if (CONSTANT_ADDRESS_P (x))
1960 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1962 if (RTX_OK_FOR_BASE_P (x, strict))
1963 return true;
1965 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1967 if (GET_CODE (x) == POST_INC)
1968 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1969 if (GET_CODE (x) == POST_MODIFY)
1970 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1971 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1974 if (GET_CODE (x) != PLUS)
1975 return false;
1977 base = XEXP (x, 0);
1978 index = XEXP (x, 1);
1980 if (!REG_P (base))
1981 return false;
1982 if (REG_P (index))
1984 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1985 addressing is hard to satisfy. */
1986 if (!TARGET_AM33)
1987 return false;
1989 return (REGNO_GENERAL_P (REGNO (base), strict)
1990 && REGNO_GENERAL_P (REGNO (index), strict));
1993 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1994 return false;
1996 if (CONST_INT_P (index))
1997 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1999 if (CONSTANT_ADDRESS_P (index))
2000 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
2002 return false;
2005 bool
2006 mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2008 if (regno >= FIRST_PSEUDO_REGISTER)
2010 if (!strict)
2011 return true;
2012 if (!reg_renumber)
2013 return false;
2014 regno = reg_renumber[regno];
2015 if (regno == INVALID_REGNUM)
2016 return false;
2018 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2022 mn10300_legitimize_reload_address (rtx x,
2023 enum machine_mode mode ATTRIBUTE_UNUSED,
2024 int opnum, int type,
2025 int ind_levels ATTRIBUTE_UNUSED)
2027 bool any_change = false;
2029 /* See above re disabling reg+reg addressing for MN103. */
2030 if (!TARGET_AM33)
2031 return NULL_RTX;
2033 if (GET_CODE (x) != PLUS)
2034 return NULL_RTX;
2036 if (XEXP (x, 0) == stack_pointer_rtx)
2038 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2039 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2040 opnum, (enum reload_type) type);
2041 any_change = true;
2043 if (XEXP (x, 1) == stack_pointer_rtx)
2045 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2046 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2047 opnum, (enum reload_type) type);
2048 any_change = true;
2051 return any_change ? x : NULL_RTX;
2054 /* Used by LEGITIMATE_CONSTANT_P(). Returns TRUE if X is a valid
2055 constant. Note that some "constants" aren't valid, such as TLS
2056 symbols and unconverted GOT-based references, so we eliminate
2057 those here. */
2059 bool
2060 mn10300_legitimate_constant_p (rtx x)
2062 switch (GET_CODE (x))
2064 case CONST:
2065 x = XEXP (x, 0);
2067 if (GET_CODE (x) == PLUS)
2069 if (! CONST_INT_P (XEXP (x, 1)))
2070 return false;
2071 x = XEXP (x, 0);
2074 /* Only some unspecs are valid as "constants". */
2075 if (GET_CODE (x) == UNSPEC)
2077 switch (XINT (x, 1))
2079 case UNSPEC_PIC:
2080 case UNSPEC_GOT:
2081 case UNSPEC_GOTOFF:
2082 case UNSPEC_PLT:
2083 return true;
2084 default:
2085 return false;
2089 /* We must have drilled down to a symbol. */
2090 if (! mn10300_symbolic_operand (x, Pmode))
2091 return false;
2092 break;
2094 default:
2095 break;
2098 return true;
2101 /* Undo pic address legitimization for the benefit of debug info. */
2103 static rtx
2104 mn10300_delegitimize_address (rtx orig_x)
2106 rtx x = orig_x, ret, addend = NULL;
2107 bool need_mem;
2109 if (MEM_P (x))
2110 x = XEXP (x, 0);
2111 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2112 return orig_x;
2114 if (XEXP (x, 0) == pic_offset_table_rtx)
2116 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2117 some odd-looking "addresses" that were never valid in the first place.
2118 We need to look harder to avoid warnings being emitted. */
2119 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2121 rtx x0 = XEXP (x, 0);
2122 rtx x00 = XEXP (x0, 0);
2123 rtx x01 = XEXP (x0, 1);
2125 if (x00 == pic_offset_table_rtx)
2126 addend = x01;
2127 else if (x01 == pic_offset_table_rtx)
2128 addend = x00;
2129 else
2130 return orig_x;
2133 else
2134 return orig_x;
2135 x = XEXP (x, 1);
2137 if (GET_CODE (x) != CONST)
2138 return orig_x;
2139 x = XEXP (x, 0);
2140 if (GET_CODE (x) != UNSPEC)
2141 return orig_x;
2143 ret = XVECEXP (x, 0, 0);
2144 if (XINT (x, 1) == UNSPEC_GOTOFF)
2145 need_mem = false;
2146 else if (XINT (x, 1) == UNSPEC_GOT)
2147 need_mem = true;
2148 else
2149 return orig_x;
2151 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2152 if (need_mem != MEM_P (orig_x))
2153 return orig_x;
2154 if (need_mem && addend)
2155 return orig_x;
2156 if (addend)
2157 ret = gen_rtx_PLUS (Pmode, addend, ret);
2158 return ret;
2161 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2162 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2163 with an address register. */
2165 static int
2166 mn10300_address_cost (rtx x, bool speed)
2168 HOST_WIDE_INT i;
2169 rtx base, index;
2171 switch (GET_CODE (x))
2173 case CONST:
2174 case SYMBOL_REF:
2175 case LABEL_REF:
2176 /* We assume all of these require a 32-bit constant, even though
2177 some symbol and label references can be relaxed. */
2178 return speed ? 1 : 4;
2180 case REG:
2181 case SUBREG:
2182 case POST_INC:
2183 return 0;
2185 case POST_MODIFY:
2186 /* Assume any symbolic offset is a 32-bit constant. */
2187 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2188 if (IN_RANGE (i, -128, 127))
2189 return speed ? 0 : 1;
2190 if (speed)
2191 return 1;
2192 if (IN_RANGE (i, -0x800000, 0x7fffff))
2193 return 3;
2194 return 4;
2196 case PLUS:
2197 base = XEXP (x, 0);
2198 index = XEXP (x, 1);
2199 if (register_operand (index, SImode))
2201 /* Attempt to minimize the number of registers in the address.
2202 This is similar to what other ports do. */
2203 if (register_operand (base, SImode))
2204 return 1;
2206 base = XEXP (x, 1);
2207 index = XEXP (x, 0);
2210 /* Assume any symbolic offset is a 32-bit constant. */
2211 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2212 if (IN_RANGE (i, -128, 127))
2213 return speed ? 0 : 1;
2214 if (IN_RANGE (i, -32768, 32767))
2215 return speed ? 0 : 2;
2216 return speed ? 2 : 6;
2218 default:
2219 return rtx_cost (x, MEM, speed);
2223 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2225 Recall that the base value of 2 is required by assumptions elsewhere
2226 in the body of the compiler, and that cost 2 is special-cased as an
2227 early exit from reload meaning no work is required. */
2229 static int
2230 mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2231 reg_class_t ifrom, reg_class_t ito)
2233 enum reg_class from = (enum reg_class) ifrom;
2234 enum reg_class to = (enum reg_class) ito;
2235 enum reg_class scratch, test;
2237 /* Simplify the following code by unifying the fp register classes. */
2238 if (to == FP_ACC_REGS)
2239 to = FP_REGS;
2240 if (from == FP_ACC_REGS)
2241 from = FP_REGS;
2243 /* Diagnose invalid moves by costing them as two moves. */
2245 scratch = NO_REGS;
2246 test = from;
2247 if (to == SP_REGS)
2248 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2249 else if (to == MDR_REGS)
2250 scratch = DATA_REGS;
2251 else if (to == FP_REGS && to != from)
2252 scratch = GENERAL_REGS;
2253 else
2255 test = to;
2256 if (from == SP_REGS)
2257 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2258 else if (from == MDR_REGS)
2259 scratch = DATA_REGS;
2260 else if (from == FP_REGS && to != from)
2261 scratch = GENERAL_REGS;
2263 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2264 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2265 + mn10300_register_move_cost (VOIDmode, scratch, to));
2267 /* From here on, all we need consider are legal combinations. */
2269 if (optimize_size)
2271 /* The scale here is bytes * 2. */
2273 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2274 return 2;
2276 if (from == SP_REGS)
2277 return (to == ADDRESS_REGS ? 2 : 6);
2279 /* For MN103, all remaining legal moves are two bytes. */
2280 if (TARGET_AM33)
2281 return 4;
2283 if (to == SP_REGS)
2284 return (from == ADDRESS_REGS ? 4 : 6);
2286 if ((from == ADDRESS_REGS || from == DATA_REGS)
2287 && (to == ADDRESS_REGS || to == DATA_REGS))
2288 return 4;
2290 if (to == EXTENDED_REGS)
2291 return (to == from ? 6 : 4);
2293 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2294 return 6;
2296 else
2298 /* The scale here is cycles * 2. */
2300 if (to == FP_REGS)
2301 return 8;
2302 if (from == FP_REGS)
2303 return 4;
2305 /* All legal moves between integral registers are single cycle. */
2306 return 2;
2310 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2312 Given lack of the form of the address, this must be speed-relative,
2313 though we should never be less expensive than a size-relative register
2314 move cost above. This is not a problem. */
2316 static int
2317 mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2318 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2320 enum reg_class rclass = (enum reg_class) iclass;
2322 if (rclass == FP_REGS)
2323 return 8;
2324 return 6;
2327 /* Implement the TARGET_RTX_COSTS hook.
2329 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2330 to represent cycles. Size-relative costs are in bytes. */
2332 static bool
2333 mn10300_rtx_costs (rtx x, int code, int outer_code, int *ptotal, bool speed)
2335 /* This value is used for SYMBOL_REF etc where we want to pretend
2336 we have a full 32-bit constant. */
2337 HOST_WIDE_INT i = 0x12345678;
2338 int total;
2340 switch (code)
2342 case CONST_INT:
2343 i = INTVAL (x);
2344 do_int_costs:
2345 if (speed)
2347 if (outer_code == SET)
2349 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2350 if (IN_RANGE (i, -32768, 32767))
2351 total = COSTS_N_INSNS (1);
2352 else
2353 total = COSTS_N_INSNS (2);
2355 else
2357 /* 16-bit integer operands don't affect latency;
2358 24-bit and 32-bit operands add a cycle. */
2359 if (IN_RANGE (i, -32768, 32767))
2360 total = 0;
2361 else
2362 total = COSTS_N_INSNS (1);
2365 else
2367 if (outer_code == SET)
2369 if (i == 0)
2370 total = 1;
2371 else if (IN_RANGE (i, -128, 127))
2372 total = 2;
2373 else if (IN_RANGE (i, -32768, 32767))
2374 total = 3;
2375 else
2376 total = 6;
2378 else
2380 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2381 if (IN_RANGE (i, -128, 127))
2382 total = 0;
2383 else if (IN_RANGE (i, -32768, 32767))
2384 total = 2;
2385 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2386 total = 3;
2387 else
2388 total = 4;
2391 goto alldone;
2393 case CONST:
2394 case LABEL_REF:
2395 case SYMBOL_REF:
2396 case CONST_DOUBLE:
2397 /* We assume all of these require a 32-bit constant, even though
2398 some symbol and label references can be relaxed. */
2399 goto do_int_costs;
2401 case UNSPEC:
2402 switch (XINT (x, 1))
2404 case UNSPEC_PIC:
2405 case UNSPEC_GOT:
2406 case UNSPEC_GOTOFF:
2407 case UNSPEC_PLT:
2408 case UNSPEC_GOTSYM_OFF:
2409 /* The PIC unspecs also resolve to a 32-bit constant. */
2410 goto do_int_costs;
2412 default:
2413 /* Assume any non-listed unspec is some sort of arithmetic. */
2414 goto do_arith_costs;
2417 case PLUS:
2418 /* Notice the size difference of INC and INC4. */
2419 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2421 i = INTVAL (XEXP (x, 1));
2422 if (i == 1 || i == 4)
2424 total = 1 + rtx_cost (XEXP (x, 0), PLUS, speed);
2425 goto alldone;
2428 goto do_arith_costs;
2430 case MINUS:
2431 case AND:
2432 case IOR:
2433 case XOR:
2434 case NOT:
2435 case NEG:
2436 case ZERO_EXTEND:
2437 case SIGN_EXTEND:
2438 case COMPARE:
2439 case BSWAP:
2440 case CLZ:
2441 do_arith_costs:
2442 total = (speed ? COSTS_N_INSNS (1) : 2);
2443 break;
2445 case ASHIFT:
2446 /* Notice the size difference of ASL2 and variants. */
2447 if (!speed && CONST_INT_P (XEXP (x, 1)))
2448 switch (INTVAL (XEXP (x, 1)))
2450 case 1:
2451 case 2:
2452 total = 1;
2453 goto alldone;
2454 case 3:
2455 case 4:
2456 total = 2;
2457 goto alldone;
2459 /* FALLTHRU */
2461 case ASHIFTRT:
2462 case LSHIFTRT:
2463 total = (speed ? COSTS_N_INSNS (1) : 3);
2464 goto alldone;
2466 case MULT:
2467 total = (speed ? COSTS_N_INSNS (3) : 2);
2468 break;
2470 case DIV:
2471 case UDIV:
2472 case MOD:
2473 case UMOD:
2474 total = (speed ? COSTS_N_INSNS (39)
2475 /* Include space to load+retrieve MDR. */
2476 : code == MOD || code == UMOD ? 6 : 4);
2477 break;
2479 case MEM:
2480 total = mn10300_address_cost (XEXP (x, 0), speed);
2481 if (speed)
2482 total = COSTS_N_INSNS (2 + total);
2483 goto alldone;
2485 default:
2486 /* Probably not implemented. Assume external call. */
2487 total = (speed ? COSTS_N_INSNS (10) : 7);
2488 break;
2491 *ptotal = total;
2492 return false;
2494 alldone:
2495 *ptotal = total;
2496 return true;
2499 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2500 may access it using GOTOFF instead of GOT. */
2502 static void
2503 mn10300_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
2505 rtx symbol;
2507 if (! MEM_P (rtl))
2508 return;
2509 symbol = XEXP (rtl, 0);
2510 if (GET_CODE (symbol) != SYMBOL_REF)
2511 return;
2513 if (flag_pic)
2514 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2517 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2518 and readonly data size. So we crank up the case threshold value to
2519 encourage a series of if/else comparisons to implement many small switch
2520 statements. In theory, this value could be increased much more if we
2521 were solely optimizing for space, but we keep it "reasonable" to avoid
2522 serious code efficiency lossage. */
2524 static unsigned int
2525 mn10300_case_values_threshold (void)
2527 return 6;
2530 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2532 static void
2533 mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2535 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2537 /* This is a strict alignment target, which means that we play
2538 some games to make sure that the locations at which we need
2539 to store <chain> and <disp> wind up at aligned addresses.
2541 0x28 0x00 add 0,d0
2542 0xfc 0xdd mov chain,a1
2543 <chain>
2544 0xf8 0xed 0x00 btst 0,d1
2545 0xdc jmp fnaddr
2546 <disp>
2548 Note that the two extra insns are effectively nops; they
2549 clobber the flags but do not affect the contents of D0 or D1. */
2551 disp = expand_binop (SImode, sub_optab, fnaddr,
2552 plus_constant (XEXP (m_tramp, 0), 11),
2553 NULL_RTX, 1, OPTAB_DIRECT);
2555 mem = adjust_address (m_tramp, SImode, 0);
2556 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2557 mem = adjust_address (m_tramp, SImode, 4);
2558 emit_move_insn (mem, chain_value);
2559 mem = adjust_address (m_tramp, SImode, 8);
2560 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2561 mem = adjust_address (m_tramp, SImode, 12);
2562 emit_move_insn (mem, disp);
2565 /* Output the assembler code for a C++ thunk function.
2566 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2567 is the decl for the target function. DELTA is an immediate constant
2568 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2569 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2570 additionally added to THIS. Finally jump to the entry point of
2571 FUNCTION. */
2573 static void
2574 mn10300_asm_output_mi_thunk (FILE * file,
2575 tree thunk_fndecl ATTRIBUTE_UNUSED,
2576 HOST_WIDE_INT delta,
2577 HOST_WIDE_INT vcall_offset,
2578 tree function)
2580 const char * _this;
2582 /* Get the register holding the THIS parameter. Handle the case
2583 where there is a hidden first argument for a returned structure. */
2584 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2585 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2586 else
2587 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2589 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2591 if (delta)
2592 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2594 if (vcall_offset)
2596 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2598 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2599 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2600 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2601 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2602 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2605 fputs ("\tjmp ", file);
2606 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2607 putc ('\n', file);
2610 /* Return true if mn10300_output_mi_thunk would be able to output the
2611 assembler code for the thunk function specified by the arguments
2612 it is passed, and false otherwise. */
2614 static bool
2615 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2616 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2617 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2618 const_tree function ATTRIBUTE_UNUSED)
2620 return true;
2623 bool
2624 mn10300_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
2626 if (REGNO_REG_CLASS (regno) == FP_REGS
2627 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2628 /* Do not store integer values in FP registers. */
2629 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2631 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2632 return true;
2634 if (REGNO_REG_CLASS (regno) == DATA_REGS
2635 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2636 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2637 return GET_MODE_SIZE (mode) <= 4;
2639 return false;
2642 bool
2643 mn10300_modes_tieable (enum machine_mode mode1, enum machine_mode mode2)
2645 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2646 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2647 return false;
2649 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2650 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2651 return false;
2653 if (TARGET_AM33
2654 || mode1 == mode2
2655 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2656 return true;
2658 return false;
2661 static int
2662 cc_flags_for_mode (enum machine_mode mode)
2664 switch (mode)
2666 case CCmode:
2667 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2668 case CCZNCmode:
2669 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2670 case CCZNmode:
2671 return CC_FLAG_Z | CC_FLAG_N;
2672 case CC_FLOATmode:
2673 return -1;
2674 default:
2675 gcc_unreachable ();
2679 static int
2680 cc_flags_for_code (enum rtx_code code)
2682 switch (code)
2684 case EQ: /* Z */
2685 case NE: /* ~Z */
2686 return CC_FLAG_Z;
2688 case LT: /* N */
2689 case GE: /* ~N */
2690 return CC_FLAG_N;
2691 break;
2693 case GT: /* ~(Z|(N^V)) */
2694 case LE: /* Z|(N^V) */
2695 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2697 case GEU: /* ~C */
2698 case LTU: /* C */
2699 return CC_FLAG_C;
2701 case GTU: /* ~(C | Z) */
2702 case LEU: /* C | Z */
2703 return CC_FLAG_Z | CC_FLAG_C;
2705 case ORDERED:
2706 case UNORDERED:
2707 case LTGT:
2708 case UNEQ:
2709 case UNGE:
2710 case UNGT:
2711 case UNLE:
2712 case UNLT:
2713 return -1;
2715 default:
2716 gcc_unreachable ();
2720 enum machine_mode
2721 mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2723 int req;
2725 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2726 return CC_FLOATmode;
2728 req = cc_flags_for_code (code);
2730 if (req & CC_FLAG_V)
2731 return CCmode;
2732 if (req & CC_FLAG_C)
2733 return CCZNCmode;
2734 return CCZNmode;
2737 static inline bool
2738 is_load_insn (rtx insn)
2740 if (GET_CODE (PATTERN (insn)) != SET)
2741 return false;
2743 return MEM_P (SET_SRC (PATTERN (insn)));
2746 static inline bool
2747 is_store_insn (rtx insn)
2749 if (GET_CODE (PATTERN (insn)) != SET)
2750 return false;
2752 return MEM_P (SET_DEST (PATTERN (insn)));
2755 /* Update scheduling costs for situations that cannot be
2756 described using the attributes and DFA machinery.
2757 DEP is the insn being scheduled.
2758 INSN is the previous insn.
2759 COST is the current cycle cost for DEP. */
2761 static int
2762 mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
2764 int timings = get_attr_timings (insn);
2766 if (!TARGET_AM33)
2767 return 1;
2769 if (GET_CODE (insn) == PARALLEL)
2770 insn = XVECEXP (insn, 0, 0);
2772 if (GET_CODE (dep) == PARALLEL)
2773 dep = XVECEXP (dep, 0, 0);
2775 /* For the AM34 a load instruction that follows a
2776 store instruction incurs an extra cycle of delay. */
2777 if (mn10300_tune_cpu == PROCESSOR_AM34
2778 && is_load_insn (dep)
2779 && is_store_insn (insn))
2780 cost += 1;
2782 /* For the AM34 a non-store, non-branch FPU insn that follows
2783 another FPU insn incurs a one cycle throughput increase. */
2784 else if (mn10300_tune_cpu == PROCESSOR_AM34
2785 && ! is_store_insn (insn)
2786 && ! JUMP_P (insn)
2787 && GET_CODE (PATTERN (dep)) == SET
2788 && GET_CODE (PATTERN (insn)) == SET
2789 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) == MODE_FLOAT
2790 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
2791 cost += 1;
2793 /* Resolve the conflict described in section 1-7-4 of
2794 Chapter 3 of the MN103E Series Instruction Manual
2795 where it says:
2797 "When the preceeding instruction is a CPU load or
2798 store instruction, a following FPU instruction
2799 cannot be executed until the CPU completes the
2800 latency period even though there are no register
2801 or flag dependencies between them." */
2803 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2804 if (! TARGET_AM33_2)
2805 return cost;
2807 /* If a data dependence already exists then the cost is correct. */
2808 if (REG_NOTE_KIND (link) == 0)
2809 return cost;
2811 /* Check that the instruction about to scheduled is an FPU instruction. */
2812 if (GET_CODE (PATTERN (dep)) != SET)
2813 return cost;
2815 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) != MODE_FLOAT)
2816 return cost;
2818 /* Now check to see if the previous instruction is a load or store. */
2819 if (! is_load_insn (insn) && ! is_store_insn (insn))
2820 return cost;
2822 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2823 only applies when an INTEGER load/store preceeds an FPU
2824 instruction, but is this true ? For now we assume that it is. */
2825 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
2826 return cost;
2828 /* Extract the latency value from the timings attribute. */
2829 return timings < 100 ? (timings % 10) : (timings % 100);
2832 static void
2833 mn10300_conditional_register_usage (void)
2835 unsigned int i;
2837 if (!TARGET_AM33)
2839 for (i = FIRST_EXTENDED_REGNUM;
2840 i <= LAST_EXTENDED_REGNUM; i++)
2841 fixed_regs[i] = call_used_regs[i] = 1;
2843 if (!TARGET_AM33_2)
2845 for (i = FIRST_FP_REGNUM;
2846 i <= LAST_FP_REGNUM; i++)
2847 fixed_regs[i] = call_used_regs[i] = 1;
2849 if (flag_pic)
2850 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2851 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2854 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2855 We do this in the mn10300 backend to maintain source compatibility
2856 with the old cc0-based compiler. */
2858 static tree
2859 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2860 tree inputs ATTRIBUTE_UNUSED,
2861 tree clobbers)
2863 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2864 clobbers);
2865 return clobbers;
2868 /* A helper function for splitting cbranch patterns after reload. */
2870 void
2871 mn10300_split_cbranch (enum machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2873 rtx flags, x;
2875 flags = gen_rtx_REG (cmp_mode, CC_REG);
2876 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2877 x = gen_rtx_SET (VOIDmode, flags, x);
2878 emit_insn (x);
2880 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2881 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2882 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2883 emit_jump_insn (x);
2886 /* A helper function for matching parallels that set the flags. */
2888 bool
2889 mn10300_match_ccmode (rtx insn, enum machine_mode cc_mode)
2891 rtx op1, flags;
2892 enum machine_mode flags_mode;
2894 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2896 op1 = XVECEXP (PATTERN (insn), 0, 1);
2897 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2899 flags = SET_DEST (op1);
2900 flags_mode = GET_MODE (flags);
2902 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2903 return false;
2904 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2905 return false;
2907 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2908 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2909 return false;
2911 return true;
2915 mn10300_split_and_operand_count (rtx op)
2917 HOST_WIDE_INT val = INTVAL (op);
2918 int count;
2920 if (val < 0)
2922 /* High bit is set, look for bits clear at the bottom. */
2923 count = exact_log2 (-val);
2924 if (count < 0)
2925 return 0;
2926 /* This is only size win if we can use the asl2 insn. Otherwise we
2927 would be replacing 1 6-byte insn with 2 3-byte insns. */
2928 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2929 return 0;
2930 return -count;
2932 else
2934 /* High bit is clear, look for bits set at the bottom. */
2935 count = exact_log2 (val + 1);
2936 count = 32 - count;
2937 /* Again, this is only a size win with asl2. */
2938 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2939 return 0;
2940 return -count;
2944 struct liw_data
2946 enum attr_liw slot;
2947 enum attr_liw_op op;
2948 rtx dest;
2949 rtx src;
2952 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2953 extract the operands and LIW attributes from the insn and use them to fill
2954 in the liw_data structure. Return true upon success or false if the insn
2955 cannot be bundled. */
2957 static bool
2958 extract_bundle (rtx insn, struct liw_data * pdata)
2960 bool allow_consts = true;
2961 rtx p,s;
2963 gcc_assert (pdata != NULL);
2965 if (insn == NULL_RTX)
2966 return false;
2967 /* Make sure that we are dealing with a simple SET insn. */
2968 p = single_set (insn);
2969 if (p == NULL_RTX)
2970 return false;
2972 /* Make sure that it could go into one of the LIW pipelines. */
2973 pdata->slot = get_attr_liw (insn);
2974 if (pdata->slot == LIW_BOTH)
2975 return false;
2977 pdata->op = get_attr_liw_op (insn);
2979 s = SET_SRC (p);
2981 switch (pdata->op)
2983 case LIW_OP_MOV:
2984 pdata->dest = SET_DEST (p);
2985 pdata->src = SET_SRC (p);
2986 break;
2987 case LIW_OP_CMP:
2988 pdata->dest = XEXP (SET_SRC (p), 0);
2989 pdata->src = XEXP (SET_SRC (p), 1);
2990 break;
2991 case LIW_OP_NONE:
2992 return false;
2993 case LIW_OP_AND:
2994 case LIW_OP_OR:
2995 case LIW_OP_XOR:
2996 /* The AND, OR and XOR long instruction words only accept register arguments. */
2997 allow_consts = false;
2998 /* Fall through. */
2999 default:
3000 pdata->dest = SET_DEST (p);
3001 pdata->src = XEXP (SET_SRC (p), 1);
3002 break;
3005 if (! REG_P (pdata->dest))
3006 return false;
3008 if (REG_P (pdata->src))
3009 return true;
3011 return allow_consts && satisfies_constraint_O (pdata->src);
3014 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3015 the instructions with the assumption that LIW1 would be executed before LIW2
3016 so we must check for overlaps between their sources and destinations. */
3018 static bool
3019 check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3021 /* Check for slot conflicts. */
3022 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
3023 return false;
3025 /* If either operation is a compare, then "dest" is really an input; the real
3026 destination is CC_REG. So these instructions need different checks. */
3028 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3029 check its values prior to any changes made by OP. */
3030 if (pliw1->op == LIW_OP_CMP)
3032 /* Two sequential comparisons means dead code, which ought to
3033 have been eliminated given that bundling only happens with
3034 optimization. We cannot bundle them in any case. */
3035 gcc_assert (pliw1->op != pliw2->op);
3036 return true;
3039 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3040 is the destination of OP, as the CMP will look at the old value, not the new
3041 one. */
3042 if (pliw2->op == LIW_OP_CMP)
3044 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3045 return false;
3047 if (REG_P (pliw2->src))
3048 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3050 return true;
3053 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3054 same destination register. */
3055 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3056 return false;
3058 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3059 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3060 we can replace the source in OP2 with the source of OP1. */
3061 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3063 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
3065 if (! REG_P (pliw1->src)
3066 && (pliw2->op == LIW_OP_AND
3067 || pliw2->op == LIW_OP_OR
3068 || pliw2->op == LIW_OP_XOR))
3069 return false;
3071 pliw2->src = pliw1->src;
3072 return true;
3074 return false;
3077 /* Everything else is OK. */
3078 return true;
3081 /* Combine pairs of insns into LIW bundles. */
3083 static void
3084 mn10300_bundle_liw (void)
3086 rtx r;
3088 for (r = get_insns (); r != NULL_RTX; r = next_nonnote_nondebug_insn (r))
3090 rtx insn1, insn2;
3091 struct liw_data liw1, liw2;
3093 insn1 = r;
3094 if (! extract_bundle (insn1, & liw1))
3095 continue;
3097 insn2 = next_nonnote_nondebug_insn (insn1);
3098 if (! extract_bundle (insn2, & liw2))
3099 continue;
3101 /* Check for source/destination overlap. */
3102 if (! check_liw_constraints (& liw1, & liw2))
3103 continue;
3105 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
3107 struct liw_data temp;
3109 temp = liw1;
3110 liw1 = liw2;
3111 liw2 = temp;
3114 delete_insn (insn2);
3116 if (liw1.op == LIW_OP_CMP)
3117 insn2 = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3118 GEN_INT (liw2.op));
3119 else if (liw2.op == LIW_OP_CMP)
3120 insn2 = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3121 GEN_INT (liw1.op));
3122 else
3123 insn2 = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3124 GEN_INT (liw1.op), GEN_INT (liw2.op));
3126 insn2 = emit_insn_after (insn2, insn1);
3127 delete_insn (insn1);
3128 r = insn2;
3132 static void
3133 mn10300_reorg (void)
3135 if (TARGET_AM33)
3137 if (TARGET_ALLOW_LIW)
3138 mn10300_bundle_liw ();
3142 /* Initialize the GCC target structure. */
3144 #undef TARGET_MACHINE_DEPENDENT_REORG
3145 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3147 #undef TARGET_EXCEPT_UNWIND_INFO
3148 #define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
3150 #undef TARGET_ASM_ALIGNED_HI_OP
3151 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3153 #undef TARGET_LEGITIMIZE_ADDRESS
3154 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3156 #undef TARGET_ADDRESS_COST
3157 #define TARGET_ADDRESS_COST mn10300_address_cost
3158 #undef TARGET_REGISTER_MOVE_COST
3159 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3160 #undef TARGET_MEMORY_MOVE_COST
3161 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3162 #undef TARGET_RTX_COSTS
3163 #define TARGET_RTX_COSTS mn10300_rtx_costs
3165 #undef TARGET_ASM_FILE_START
3166 #define TARGET_ASM_FILE_START mn10300_file_start
3167 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3168 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3170 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3171 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3173 #undef TARGET_DEFAULT_TARGET_FLAGS
3174 #define TARGET_DEFAULT_TARGET_FLAGS MASK_MULT_BUG | MASK_PTR_A0D0 | MASK_ALLOW_LIW
3175 #undef TARGET_HANDLE_OPTION
3176 #define TARGET_HANDLE_OPTION mn10300_handle_option
3177 #undef TARGET_OPTION_OVERRIDE
3178 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3179 #undef TARGET_OPTION_OPTIMIZATION_TABLE
3180 #define TARGET_OPTION_OPTIMIZATION_TABLE mn10300_option_optimization_table
3182 #undef TARGET_ENCODE_SECTION_INFO
3183 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3185 #undef TARGET_PROMOTE_PROTOTYPES
3186 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3187 #undef TARGET_RETURN_IN_MEMORY
3188 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3189 #undef TARGET_PASS_BY_REFERENCE
3190 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3191 #undef TARGET_CALLEE_COPIES
3192 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3193 #undef TARGET_ARG_PARTIAL_BYTES
3194 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3195 #undef TARGET_FUNCTION_ARG
3196 #define TARGET_FUNCTION_ARG mn10300_function_arg
3197 #undef TARGET_FUNCTION_ARG_ADVANCE
3198 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3200 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3201 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3202 #undef TARGET_EXPAND_BUILTIN_VA_START
3203 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3205 #undef TARGET_CASE_VALUES_THRESHOLD
3206 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3208 #undef TARGET_LEGITIMATE_ADDRESS_P
3209 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3210 #undef TARGET_DELEGITIMIZE_ADDRESS
3211 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3213 #undef TARGET_PREFERRED_RELOAD_CLASS
3214 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3215 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3216 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3217 mn10300_preferred_output_reload_class
3218 #undef TARGET_SECONDARY_RELOAD
3219 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3221 #undef TARGET_TRAMPOLINE_INIT
3222 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3224 #undef TARGET_FUNCTION_VALUE
3225 #define TARGET_FUNCTION_VALUE mn10300_function_value
3226 #undef TARGET_LIBCALL_VALUE
3227 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3229 #undef TARGET_ASM_OUTPUT_MI_THUNK
3230 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3231 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3232 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3234 #undef TARGET_SCHED_ADJUST_COST
3235 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3237 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3238 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3240 #undef TARGET_MD_ASM_CLOBBERS
3241 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3243 #undef TARGET_FLAGS_REGNUM
3244 #define TARGET_FLAGS_REGNUM CC_REG
3246 struct gcc_target targetm = TARGET_INITIALIZER;