1 /* Output routines for GCC for ARM/RISCiX.
2 Copyright (C) 1991, 93, 94, 95, 96, 1997 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "insn-flags.h"
36 #include "insn-attr.h"
42 /* The maximum number of insns skipped which will be conditionalised if
44 #define MAX_INSNS_SKIPPED 5
46 /* Some function declarations. */
47 extern FILE *asm_out_file
;
48 extern char *output_multi_immediate ();
50 HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
51 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
52 HOST_WIDE_INT
, rtx
, rtx
, int, int));
53 static int arm_naked_function_p
PROTO ((tree func
));
55 /* Define the information needed to generate branch insns. This is
56 stored from the compare operation. */
58 rtx arm_compare_op0
, arm_compare_op1
;
61 /* What type of cpu are we compiling for? */
62 enum processor_type arm_cpu
;
64 /* What type of floating point are we compiling for? */
65 enum floating_point_type arm_fpu
;
67 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
68 enum prog_mode_type arm_prgmode
;
70 char *target_cpu_name
= ARM_CPU_NAME
;
71 char *target_fpe_name
= NULL
;
73 /* Nonzero if this is an "M" variant of the processor. */
74 int arm_fast_multiply
= 0;
76 /* Nonzero if this chip supports the ARM Architecture 4 extensions */
79 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
80 must report the mode of the memory reference from PRINT_OPERAND to
81 PRINT_OPERAND_ADDRESS. */
82 enum machine_mode output_memory_reference_mode
;
84 /* Nonzero if the prologue must setup `fp'. */
85 int current_function_anonymous_args
;
87 /* The register number to be used for the PIC offset register. */
88 int arm_pic_register
= 9;
90 /* Location counter of .text segment. */
91 int arm_text_location
= 0;
93 /* Set to one if we think that lr is only saved because of subroutine calls,
94 but all of these can be `put after' return insns */
95 int lr_save_eliminated
;
97 /* Set to 1 when a return insn is output, this means that the epilogue
100 static int return_used_this_function
;
102 static int arm_constant_limit
= 3;
104 /* For an explanation of these variables, see final_prescan_insn below. */
106 enum arm_cond_code arm_current_cc
;
108 int arm_target_label
;
110 /* The condition codes of the ARM, and the inverse function. */
111 char *arm_condition_codes
[] =
113 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
114 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
117 static enum arm_cond_code
get_arm_condition_code ();
120 /* Initialization code */
122 struct arm_cpu_select arm_select
[3] =
124 /* switch name, tune arch */
125 { (char *)0, "--with-cpu=", 1, 1 },
126 { (char *)0, "-mcpu=", 1, 1 },
127 { (char *)0, "-mtune=", 1, 0 },
130 #define FL_CO_PROC 0x01 /* Has external co-processor bus */
131 #define FL_FAST_MULT 0x02 /* Fast multiply */
132 #define FL_MODE26 0x04 /* 26-bit mode support */
133 #define FL_MODE32 0x08 /* 32-bit mode support */
134 #define FL_ARCH4 0x10 /* Architecture rel 4 */
135 #define FL_THUMB 0x20 /* Thumb aware */
140 enum processor_type type
;
144 /* Not all of these give usefully different compilation alternatives,
145 but there is no simple way of generalizing them. */
146 static struct processors all_procs
[] =
148 {"arm2", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
149 {"arm250", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
150 {"arm3", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
151 {"arm6", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
152 {"arm600", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
153 {"arm610", PROCESSOR_ARM6
, FL_MODE32
| FL_MODE26
},
154 {"arm7", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
155 /* arm7m doesn't exist on its own, only in conjuction with D, (and I), but
156 those don't alter the code, so it is sometimes known as the arm7m */
157 {"arm7m", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
159 {"arm7dm", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
161 {"arm7dmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
163 {"arm700", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
164 {"arm710", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
165 {"arm7100", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
166 {"arm7500", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
167 /* Doesn't really have an external co-proc, but does have embedded fpu */
168 {"arm7500fe", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
169 {"arm7tdmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
170 | FL_ARCH4
| FL_THUMB
)},
171 {"arm8", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
173 {"arm810", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
175 {"strongarm", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
177 {"strongarm110", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
182 /* Fix up any incompatible options that the user has specified.
183 This has now turned into a maze. */
185 arm_override_options ()
187 int arm_thumb_aware
= 0;
190 struct arm_cpu_select
*ptr
;
191 static struct cpu_default
{
195 { TARGET_CPU_arm2
, "arm2" },
196 { TARGET_CPU_arm6
, "arm6" },
197 { TARGET_CPU_arm610
, "arm610" },
198 { TARGET_CPU_arm7dm
, "arm7dm" },
199 { TARGET_CPU_arm7500fe
, "arm7500fe" },
200 { TARGET_CPU_arm7tdmi
, "arm7tdmi" },
201 { TARGET_CPU_arm8
, "arm8" },
202 { TARGET_CPU_arm810
, "arm810" },
203 { TARGET_CPU_strongarm
, "strongarm" },
206 struct cpu_default
*def
;
208 /* Set the default. */
209 for (def
= &cpu_defaults
[0]; def
->name
; ++def
)
210 if (def
->cpu
== TARGET_CPU_DEFAULT
)
215 arm_select
[0].string
= def
->name
;
217 for (i
= 0; i
< sizeof (arm_select
) / sizeof (arm_select
[0]); i
++)
219 ptr
= &arm_select
[i
];
220 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
222 struct processors
*sel
;
224 for (sel
= all_procs
; sel
->name
!= NULL
; sel
++)
225 if (! strcmp (ptr
->string
, sel
->name
))
235 if (sel
->name
== NULL
)
236 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
240 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
241 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
243 if (TARGET_POKE_FUNCTION_NAME
)
244 target_flags
|= ARM_FLAG_APCS_FRAME
;
247 warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu=<proc>");
250 warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu=<proc>");
252 if (TARGET_APCS_REENT
&& flag_pic
)
253 fatal ("-fpic and -mapcs-reent are incompatible");
255 if (TARGET_APCS_REENT
)
256 warning ("APCS reentrant code not supported.");
258 /* If stack checking is disabled, we can use r10 as the PIC register,
259 which keeps r9 available. */
260 if (flag_pic
&& ! TARGET_APCS_STACK
)
261 arm_pic_register
= 10;
263 /* Well, I'm about to have a go, but pic is NOT going to be compatible
264 with APCS reentrancy, since that requires too much support in the
265 assembler and linker, and the ARMASM assembler seems to lack some
266 required directives. */
268 warning ("Position independent code not supported. Ignored");
270 if (TARGET_APCS_FLOAT
)
271 warning ("Passing floating point arguments in fp regs not yet supported");
273 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
275 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
276 target_flags
|= ARM_FLAG_APCS_FRAME
;
281 /* Default value for floating point code... if no co-processor
282 bus, then schedule for emulated floating point. Otherwise,
283 assume the user has an FPA, unless overridden with -mfpe-... */
284 if (flags
& FL_CO_PROC
== 0)
288 arm_fast_multiply
= (flags
& FL_FAST_MULT
) != 0;
289 arm_arch4
= (flags
& FL_ARCH4
) != 0;
290 arm_thumb_aware
= (flags
& FL_THUMB
) != 0;
294 if (strcmp (target_fpe_name
, "2") == 0)
296 else if (strcmp (target_fpe_name
, "3") == 0)
299 fatal ("Invalid floating point emulation option: -mfpe-%s",
303 if (TARGET_THUMB_INTERWORK
&& ! arm_thumb_aware
)
305 warning ("This processor variant does not support Thumb interworking");
306 target_flags
&= ~ARM_FLAG_THUMB
;
309 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
312 /* For arm2/3 there is no need to do any scheduling if there is only
313 a floating point emulator, or we are doing software floating-point. */
314 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && arm_cpu
== PROCESSOR_ARM2
)
315 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
317 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
321 /* Return 1 if it is possible to return using a single instruction */
328 if (!reload_completed
||current_function_pretend_args_size
329 || current_function_anonymous_args
330 || (get_frame_size () && !(TARGET_APCS
|| frame_pointer_needed
)))
333 /* Can't be done if any of the FPU regs are pushed, since this also
335 for (regno
= 20; regno
< 24; regno
++)
336 if (regs_ever_live
[regno
])
339 /* If a function is naked, don't use the "return" insn. */
340 if (arm_naked_function_p (current_function_decl
))
346 /* Return TRUE if int I is a valid immediate ARM constant. */
352 unsigned HOST_WIDE_INT mask
= ~0xFF;
354 /* Fast return for 0 and powers of 2 */
355 if ((i
& (i
- 1)) == 0)
360 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
363 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
364 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
365 } while (mask
!= ~0xFF);
370 /* Return true if I is a valid constant for the operation CODE. */
372 const_ok_for_op (i
, code
, mode
)
375 enum machine_mode mode
;
377 if (const_ok_for_arm (i
))
383 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
385 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
391 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
398 /* Emit a sequence of insns to handle a large constant.
399 CODE is the code of the operation required, it can be any of SET, PLUS,
400 IOR, AND, XOR, MINUS;
401 MODE is the mode in which the operation is being performed;
402 VAL is the integer to operate on;
403 SOURCE is the other operand (a register, or a null-pointer for SET);
404 SUBTARGETS means it is safe to create scratch registers if that will
405 either produce a simpler sequence, or we will want to cse the values.
406 Return value is the number of insns emitted. */
409 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
411 enum machine_mode mode
;
417 if (subtargets
|| code
== SET
418 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
419 && REGNO (target
) != REGNO (source
)))
423 if (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
424 > arm_constant_limit
+ (code
!= SET
))
428 /* Currently SET is the only monadic value for CODE, all
429 the rest are diadic. */
430 emit_insn (gen_rtx (SET
, VOIDmode
, target
, GEN_INT (val
)));
435 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
437 emit_insn (gen_rtx (SET
, VOIDmode
, temp
, GEN_INT (val
)));
438 /* For MINUS, the value is subtracted from, since we never
439 have subtraction of a constant. */
441 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
442 gen_rtx (code
, mode
, temp
, source
)));
444 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
445 gen_rtx (code
, mode
, source
, temp
)));
451 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
454 /* As above, but extra parameter GENERATE which, if clear, suppresses
457 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
459 enum machine_mode mode
;
469 int can_negate_initial
= 0;
472 int num_bits_set
= 0;
473 int set_sign_bit_copies
= 0;
474 int clear_sign_bit_copies
= 0;
475 int clear_zero_bit_copies
= 0;
476 int set_zero_bit_copies
= 0;
479 unsigned HOST_WIDE_INT temp1
, temp2
;
480 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
482 /* find out which operations are safe for a given CODE. Also do a quick
483 check for degenerate cases; these can occur when DImode operations
495 can_negate_initial
= 1;
499 if (remainder
== 0xffffffff)
502 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
503 GEN_INT (ARM_SIGN_EXTEND (val
))));
508 if (reload_completed
&& rtx_equal_p (target
, source
))
511 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
520 emit_insn (gen_rtx (SET
, VOIDmode
, target
, const0_rtx
));
523 if (remainder
== 0xffffffff)
525 if (reload_completed
&& rtx_equal_p (target
, source
))
528 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
537 if (reload_completed
&& rtx_equal_p (target
, source
))
540 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
543 if (remainder
== 0xffffffff)
546 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
547 gen_rtx (NOT
, mode
, source
)));
551 /* We don't know how to handle this yet below. */
555 /* We treat MINUS as (val - source), since (source - val) is always
556 passed as (source + (-val)). */
560 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
561 gen_rtx (NEG
, mode
, source
)));
564 if (const_ok_for_arm (val
))
567 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
568 gen_rtx (MINUS
, mode
, GEN_INT (val
), source
)));
579 /* If we can do it in one insn get out quickly */
580 if (const_ok_for_arm (val
)
581 || (can_negate_initial
&& const_ok_for_arm (-val
))
582 || (can_invert
&& const_ok_for_arm (~val
)))
585 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
586 (source
? gen_rtx (code
, mode
, source
,
593 /* Calculate a few attributes that may be useful for specific
596 for (i
= 31; i
>= 0; i
--)
598 if ((remainder
& (1 << i
)) == 0)
599 clear_sign_bit_copies
++;
604 for (i
= 31; i
>= 0; i
--)
606 if ((remainder
& (1 << i
)) != 0)
607 set_sign_bit_copies
++;
612 for (i
= 0; i
<= 31; i
++)
614 if ((remainder
& (1 << i
)) == 0)
615 clear_zero_bit_copies
++;
620 for (i
= 0; i
<= 31; i
++)
622 if ((remainder
& (1 << i
)) != 0)
623 set_zero_bit_copies
++;
631 /* See if we can do this by sign_extending a constant that is known
632 to be negative. This is a good, way of doing it, since the shift
633 may well merge into a subsequent insn. */
634 if (set_sign_bit_copies
> 1)
637 (temp1
= ARM_SIGN_EXTEND (remainder
638 << (set_sign_bit_copies
- 1))))
642 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
643 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
645 emit_insn (gen_ashrsi3 (target
, new_src
,
646 GEN_INT (set_sign_bit_copies
- 1)));
650 /* For an inverted constant, we will need to set the low bits,
651 these will be shifted out of harm's way. */
652 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
653 if (const_ok_for_arm (~temp1
))
657 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
658 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
660 emit_insn (gen_ashrsi3 (target
, new_src
,
661 GEN_INT (set_sign_bit_copies
- 1)));
667 /* See if we can generate this by setting the bottom (or the top)
668 16 bits, and then shifting these into the other half of the
669 word. We only look for the simplest cases, to do more would cost
670 too much. Be careful, however, not to generate this when the
671 alternative would take fewer insns. */
672 if (val
& 0xffff0000)
674 temp1
= remainder
& 0xffff0000;
675 temp2
= remainder
& 0x0000ffff;
677 /* Overlaps outside this range are best done using other methods. */
678 for (i
= 9; i
< 24; i
++)
680 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
681 && ! const_ok_for_arm (temp2
))
683 insns
= arm_gen_constant (code
, mode
, temp2
,
684 new_src
= (subtargets
687 source
, subtargets
, generate
);
690 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
692 gen_rtx (ASHIFT
, mode
, source
,
699 /* Don't duplicate cases already considered. */
700 for (i
= 17; i
< 24; i
++)
702 if (((temp1
| (temp1
>> i
)) == remainder
)
703 && ! const_ok_for_arm (temp1
))
705 insns
= arm_gen_constant (code
, mode
, temp1
,
706 new_src
= (subtargets
709 source
, subtargets
, generate
);
712 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
714 gen_rtx (LSHIFTRT
, mode
,
715 source
, GEN_INT (i
)),
725 /* If we have IOR or XOR, and the constant can be loaded in a
726 single instruction, and we can find a temporary to put it in,
727 then this can be done in two instructions instead of 3-4. */
729 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
731 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
735 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
737 emit_insn (gen_rtx (SET
, VOIDmode
, sub
, GEN_INT (val
)));
738 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
739 gen_rtx (code
, mode
, source
, sub
)));
748 if (set_sign_bit_copies
> 8
749 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
753 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
754 rtx shift
= GEN_INT (set_sign_bit_copies
);
756 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
758 gen_rtx (ASHIFT
, mode
, source
,
760 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
762 gen_rtx (LSHIFTRT
, mode
, sub
,
768 if (set_zero_bit_copies
> 8
769 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
773 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
774 rtx shift
= GEN_INT (set_zero_bit_copies
);
776 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
778 gen_rtx (LSHIFTRT
, mode
, source
,
780 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
782 gen_rtx (ASHIFT
, mode
, sub
,
788 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
792 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
793 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
794 gen_rtx (NOT
, mode
, source
)));
797 sub
= gen_reg_rtx (mode
);
798 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
799 gen_rtx (AND
, mode
, source
,
801 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
802 gen_rtx (NOT
, mode
, sub
)));
809 /* See if two shifts will do 2 or more insn's worth of work. */
810 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
812 HOST_WIDE_INT shift_mask
= ((0xffffffff
813 << (32 - clear_sign_bit_copies
))
818 if ((remainder
| shift_mask
) != 0xffffffff)
822 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
823 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
824 new_source
, source
, subtargets
, 1);
828 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
829 new_source
, source
, subtargets
, 0);
834 shift
= GEN_INT (clear_sign_bit_copies
);
835 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
836 emit_insn (gen_ashlsi3 (new_source
, source
, shift
));
837 emit_insn (gen_lshrsi3 (target
, new_source
, shift
));
843 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
845 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
849 if ((remainder
| shift_mask
) != 0xffffffff)
853 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
854 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
855 new_source
, source
, subtargets
, 1);
859 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
860 new_source
, source
, subtargets
, 0);
865 shift
= GEN_INT (clear_zero_bit_copies
);
866 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
867 emit_insn (gen_lshrsi3 (new_source
, source
, shift
));
868 emit_insn (gen_ashlsi3 (target
, new_source
, shift
));
880 for (i
= 0; i
< 32; i
++)
881 if (remainder
& (1 << i
))
884 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
885 remainder
= (~remainder
) & 0xffffffff;
886 else if (code
== PLUS
&& num_bits_set
> 16)
887 remainder
= (-remainder
) & 0xffffffff;
894 /* Now try and find a way of doing the job in either two or three
896 We start by looking for the largest block of zeros that are aligned on
897 a 2-bit boundary, we then fill up the temps, wrapping around to the
898 top of the word when we drop off the bottom.
899 In the worst case this code should produce no more than four insns. */
902 int best_consecutive_zeros
= 0;
904 for (i
= 0; i
< 32; i
+= 2)
906 int consecutive_zeros
= 0;
908 if (! (remainder
& (3 << i
)))
910 while ((i
< 32) && ! (remainder
& (3 << i
)))
912 consecutive_zeros
+= 2;
915 if (consecutive_zeros
> best_consecutive_zeros
)
917 best_consecutive_zeros
= consecutive_zeros
;
918 best_start
= i
- consecutive_zeros
;
924 /* Now start emitting the insns, starting with the one with the highest
925 bit set: we do this so that the smallest number will be emitted last;
926 this is more likely to be combinable with addressing insns. */
934 if (remainder
& (3 << (i
- 2)))
939 temp1
= remainder
& ((0x0ff << end
)
940 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
946 emit_insn (gen_rtx (SET
, VOIDmode
,
947 new_src
= (subtargets
950 GEN_INT (can_invert
? ~temp1
: temp1
)));
954 else if (code
== MINUS
)
957 emit_insn (gen_rtx (SET
, VOIDmode
,
958 new_src
= (subtargets
961 gen_rtx (code
, mode
, GEN_INT (temp1
),
968 emit_insn (gen_rtx (SET
, VOIDmode
,
974 gen_rtx (code
, mode
, source
,
975 GEN_INT (can_invert
? ~temp1
991 /* Canonicalize a comparison so that we are more likely to recognize it.
992 This can be done for a few constant compares, where we can make the
993 immediate value easier to load. */
995 arm_canonicalize_comparison (code
, op1
)
999 HOST_WIDE_INT i
= INTVAL (*op1
);
1009 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1) - 1)
1010 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1012 *op1
= GEN_INT (i
+1);
1013 return code
== GT
? GE
: LT
;
1019 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1))
1020 && (const_ok_for_arm (i
-1) || const_ok_for_arm (- (i
-1))))
1022 *op1
= GEN_INT (i
-1);
1023 return code
== GE
? GT
: LE
;
1030 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1032 *op1
= GEN_INT (i
+ 1);
1033 return code
== GTU
? GEU
: LTU
;
1040 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (- (i
- 1))))
1042 *op1
= GEN_INT (i
- 1);
1043 return code
== GEU
? GTU
: LEU
;
1055 /* Handle aggregates that are not laid out in a BLKmode element.
1056 This is a sub-element of RETURN_IN_MEMORY. */
1058 arm_return_in_memory (type
)
1061 if (TREE_CODE (type
) == RECORD_TYPE
)
1065 /* For a struct, we can return in a register if every element was a
1067 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1068 if (TREE_CODE (field
) != FIELD_DECL
1069 || ! DECL_BIT_FIELD_TYPE (field
))
1074 else if (TREE_CODE (type
) == UNION_TYPE
)
1078 /* Unions can be returned in registers if every element is
1079 integral, or can be returned in an integer register. */
1080 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1082 if (TREE_CODE (field
) != FIELD_DECL
1083 || (AGGREGATE_TYPE_P (TREE_TYPE (field
))
1084 && RETURN_IN_MEMORY (TREE_TYPE (field
)))
1085 || FLOAT_TYPE_P (TREE_TYPE (field
)))
1090 /* XXX Not sure what should be done for other aggregates, so put them in
1096 legitimate_pic_operand_p (x
)
1099 if (CONSTANT_P (x
) && flag_pic
1100 && (GET_CODE (x
) == SYMBOL_REF
1101 || (GET_CODE (x
) == CONST
1102 && GET_CODE (XEXP (x
, 0)) == PLUS
1103 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)))
1110 legitimize_pic_address (orig
, mode
, reg
)
1112 enum machine_mode mode
;
1115 if (GET_CODE (orig
) == SYMBOL_REF
)
1117 rtx pic_ref
, address
;
1123 if (reload_in_progress
|| reload_completed
)
1126 reg
= gen_reg_rtx (Pmode
);
1131 #ifdef AOF_ASSEMBLER
1132 /* The AOF assembler can generate relocations for these directly, and
1133 understands that the PIC register has to be added into the offset.
1135 insn
= emit_insn (gen_pic_load_addr_based (reg
, orig
));
1138 address
= gen_reg_rtx (Pmode
);
1142 emit_insn (gen_pic_load_addr (address
, orig
));
1144 pic_ref
= gen_rtx (MEM
, Pmode
,
1145 gen_rtx (PLUS
, Pmode
, pic_offset_table_rtx
, address
));
1146 RTX_UNCHANGING_P (pic_ref
) = 1;
1147 insn
= emit_move_insn (reg
, pic_ref
);
1149 current_function_uses_pic_offset_table
= 1;
1150 /* Put a REG_EQUAL note on this insn, so that it can be optimized
1152 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_EQUAL
, orig
,
1156 else if (GET_CODE (orig
) == CONST
)
1160 if (GET_CODE (XEXP (orig
, 0)) == PLUS
1161 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
1166 if (reload_in_progress
|| reload_completed
)
1169 reg
= gen_reg_rtx (Pmode
);
1172 if (GET_CODE (XEXP (orig
, 0)) == PLUS
)
1174 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
1175 offset
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
1176 base
== reg
? 0 : reg
);
1181 if (GET_CODE (offset
) == CONST_INT
)
1183 /* The base register doesn't really matter, we only want to
1184 test the index for the appropriate mode. */
1185 GO_IF_LEGITIMATE_INDEX (mode
, 0, offset
, win
);
1187 if (! reload_in_progress
&& ! reload_completed
)
1188 offset
= force_reg (Pmode
, offset
);
1193 if (GET_CODE (offset
) == CONST_INT
)
1194 return plus_constant_for_output (base
, INTVAL (offset
));
1197 if (GET_MODE_SIZE (mode
) > 4
1198 && (GET_MODE_CLASS (mode
) == MODE_INT
1199 || TARGET_SOFT_FLOAT
))
1201 emit_insn (gen_addsi3 (reg
, base
, offset
));
1205 return gen_rtx (PLUS
, Pmode
, base
, offset
);
1207 else if (GET_CODE (orig
) == LABEL_REF
)
1208 current_function_uses_pic_offset_table
= 1;
1227 #ifndef AOF_ASSEMBLER
1228 rtx l1
, pic_tmp
, pic_tmp2
, seq
;
1229 rtx global_offset_table
;
1231 if (current_function_uses_pic_offset_table
== 0)
1238 l1
= gen_label_rtx ();
1240 global_offset_table
= gen_rtx (SYMBOL_REF
, Pmode
, "_GLOBAL_OFFSET_TABLE_");
1241 pic_tmp
= gen_rtx (CONST
, VOIDmode
,
1242 gen_rtx (PLUS
, Pmode
,
1243 gen_rtx (LABEL_REF
, VOIDmode
, l1
),
1245 pic_tmp2
= gen_rtx (CONST
, VOIDmode
,
1246 gen_rtx (PLUS
, Pmode
,
1247 global_offset_table
,
1250 pic_rtx
= gen_rtx (CONST
, Pmode
,
1251 gen_rtx (MINUS
, Pmode
, pic_tmp2
, pic_tmp
));
1253 emit_insn (gen_pic_load_addr (pic_offset_table_rtx
, pic_rtx
));
1254 emit_jump_insn (gen_pic_add_dot_plus_eight(l1
, pic_offset_table_rtx
));
1257 seq
= gen_sequence ();
1259 emit_insn_after (seq
, get_insns ());
1261 /* Need to emit this whether or not we obey regdecls,
1262 since setjmp/longjmp can cause life info to screw up. */
1263 emit_insn (gen_rtx (USE
, VOIDmode
, pic_offset_table_rtx
));
1264 #endif /* AOF_ASSEMBLER */
1267 #define REG_OR_SUBREG_REG(X) \
1268 (GET_CODE (X) == REG \
1269 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1271 #define REG_OR_SUBREG_RTX(X) \
1272 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1274 #define ARM_FRAME_RTX(X) \
1275 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1276 || (X) == arg_pointer_rtx)
1279 arm_rtx_costs (x
, code
, outer_code
)
1281 enum rtx_code code
, outer_code
;
1283 enum machine_mode mode
= GET_MODE (x
);
1284 enum rtx_code subcode
;
1290 /* Memory costs quite a lot for the first word, but subsequent words
1291 load at the equivalent of a single insn each. */
1292 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1293 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1300 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1307 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1309 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1310 + ((GET_CODE (XEXP (x
, 0)) == REG
1311 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1312 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1314 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1315 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1316 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1318 + ((GET_CODE (XEXP (x
, 1)) == REG
1319 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1320 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1321 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1326 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1327 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1328 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1329 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1332 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1333 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1334 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1335 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1337 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1338 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1339 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1342 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1343 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1344 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1345 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1346 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1347 || subcode
== ROTATE
|| subcode
== ROTATERT
1349 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1350 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1351 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1352 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1353 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1354 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1355 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1360 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1361 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1362 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1363 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1364 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1368 case AND
: case XOR
: case IOR
:
1371 /* Normally the frame registers will be spilt into reg+const during
1372 reload, so it is a bad idea to combine them with other instructions,
1373 since then they might not be moved outside of loops. As a compromise
1374 we allow integration with ops that have a constant as their second
1376 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1377 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1378 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1379 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1380 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1384 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1385 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1386 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1387 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1390 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1391 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1392 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1393 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1394 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1397 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1398 return (1 + extra_cost
1399 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1400 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1401 || subcode
== ROTATE
|| subcode
== ROTATERT
1403 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1404 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1405 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0))
1406 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1407 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1408 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
1414 if (arm_fast_multiply
&& mode
== DImode
1415 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1416 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1417 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1420 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1424 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1426 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1427 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1428 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1430 int booth_unit_size
= (arm_fast_multiply
? 8 : 2);
1432 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1434 i
>>= booth_unit_size
;
1441 return ((arm_fast_multiply
? 8 : 30)
1442 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1443 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1446 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1447 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1451 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1453 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1456 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1464 return 4 + (mode
== DImode
? 4 : 0);
1467 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1468 return (4 + (mode
== DImode
? 4 : 0)
1469 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1472 switch (GET_MODE (XEXP (x
, 0)))
1475 return (1 + (mode
== DImode
? 4 : 0)
1476 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1479 return (4 + (mode
== DImode
? 4 : 0)
1480 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1483 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1493 arm_adjust_cost (insn
, link
, dep
, cost
)
1501 if ((i_pat
= single_set (insn
)) != NULL
1502 && GET_CODE (SET_SRC (i_pat
)) == MEM
1503 && (d_pat
= single_set (dep
)) != NULL
1504 && GET_CODE (SET_DEST (d_pat
)) == MEM
)
1506 /* This is a load after a store, there is no conflict if the load reads
1507 from a cached area. Assume that loads from the stack, and from the
1508 constant pool are cached, and that others will miss. This is a
1511 /* debug_rtx (insn);
1514 fprintf (stderr, "costs %d\n", cost); */
1516 if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat
), 0))
1517 || reg_mentioned_p (stack_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1518 || reg_mentioned_p (frame_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1519 || reg_mentioned_p (hard_frame_pointer_rtx
,
1520 XEXP (SET_SRC (i_pat
), 0)))
1522 /* fprintf (stderr, "***** Now 1\n"); */
1530 /* This code has been fixed for cross compilation. */
1532 static int fpa_consts_inited
= 0;
1534 char *strings_fpa
[8] = {
1536 "4", "5", "0.5", "10"
1539 static REAL_VALUE_TYPE values_fpa
[8];
1547 for (i
= 0; i
< 8; i
++)
1549 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1553 fpa_consts_inited
= 1;
1556 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1559 const_double_rtx_ok_for_fpu (x
)
1565 if (!fpa_consts_inited
)
1568 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1569 if (REAL_VALUE_MINUS_ZERO (r
))
1572 for (i
= 0; i
< 8; i
++)
1573 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1579 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1582 neg_const_double_rtx_ok_for_fpu (x
)
1588 if (!fpa_consts_inited
)
1591 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1592 r
= REAL_VALUE_NEGATE (r
);
1593 if (REAL_VALUE_MINUS_ZERO (r
))
1596 for (i
= 0; i
< 8; i
++)
1597 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1603 /* Predicates for `match_operand' and `match_operator'. */
1605 /* s_register_operand is the same as register_operand, but it doesn't accept
1608 This function exists because at the time it was put in it led to better
1609 code. SUBREG(MEM) always needs a reload in the places where
1610 s_register_operand is used, and this seemed to lead to excessive
1614 s_register_operand (op
, mode
)
1616 enum machine_mode mode
;
1618 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1621 if (GET_CODE (op
) == SUBREG
)
1622 op
= SUBREG_REG (op
);
1624 /* We don't consider registers whose class is NO_REGS
1625 to be a register operand. */
1626 return (GET_CODE (op
) == REG
1627 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1628 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1631 /* Only accept reg, subreg(reg), const_int. */
1634 reg_or_int_operand (op
, mode
)
1636 enum machine_mode mode
;
1638 if (GET_CODE (op
) == CONST_INT
)
1641 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1644 if (GET_CODE (op
) == SUBREG
)
1645 op
= SUBREG_REG (op
);
1647 /* We don't consider registers whose class is NO_REGS
1648 to be a register operand. */
1649 return (GET_CODE (op
) == REG
1650 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1651 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1654 /* Return 1 if OP is an item in memory, given that we are in reload. */
1657 reload_memory_operand (op
, mode
)
1659 enum machine_mode mode
;
1661 int regno
= true_regnum (op
);
1663 return (! CONSTANT_P (op
)
1665 || (GET_CODE (op
) == REG
1666 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
1669 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
1672 arm_rhs_operand (op
, mode
)
1674 enum machine_mode mode
;
1676 return (s_register_operand (op
, mode
)
1677 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
1680 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
1684 arm_rhsm_operand (op
, mode
)
1686 enum machine_mode mode
;
1688 return (s_register_operand (op
, mode
)
1689 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
1690 || memory_operand (op
, mode
));
1693 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
1694 constant that is valid when negated. */
1697 arm_add_operand (op
, mode
)
1699 enum machine_mode mode
;
1701 return (s_register_operand (op
, mode
)
1702 || (GET_CODE (op
) == CONST_INT
1703 && (const_ok_for_arm (INTVAL (op
))
1704 || const_ok_for_arm (-INTVAL (op
)))));
1708 arm_not_operand (op
, mode
)
1710 enum machine_mode mode
;
1712 return (s_register_operand (op
, mode
)
1713 || (GET_CODE (op
) == CONST_INT
1714 && (const_ok_for_arm (INTVAL (op
))
1715 || const_ok_for_arm (~INTVAL (op
)))));
1718 /* Return TRUE if the operand is a memory reference which contains an
1719 offsettable address. */
1721 offsettable_memory_operand (op
, mode
)
1723 enum machine_mode mode
;
1725 if (mode
== VOIDmode
)
1726 mode
= GET_MODE (op
);
1728 return (mode
== GET_MODE (op
)
1729 && GET_CODE (op
) == MEM
1730 && offsettable_address_p (reload_completed
| reload_in_progress
,
1731 mode
, XEXP (op
, 0)));
1734 /* Return TRUE if the operand is a memory reference which is, or can be
1735 made word aligned by adjusting the offset. */
1737 alignable_memory_operand (op
, mode
)
1739 enum machine_mode mode
;
1743 if (mode
== VOIDmode
)
1744 mode
= GET_MODE (op
);
1746 if (mode
!= GET_MODE (op
) || GET_CODE (op
) != MEM
)
1751 return ((GET_CODE (reg
= op
) == REG
1752 || (GET_CODE (op
) == SUBREG
1753 && GET_CODE (reg
= SUBREG_REG (op
)) == REG
)
1754 || (GET_CODE (op
) == PLUS
1755 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1756 && (GET_CODE (reg
= XEXP (op
, 0)) == REG
1757 || (GET_CODE (XEXP (op
, 0)) == SUBREG
1758 && GET_CODE (reg
= SUBREG_REG (XEXP (op
, 0))) == REG
))))
1759 && REGNO_POINTER_ALIGN (REGNO (reg
)) >= 4);
1762 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
1765 fpu_rhs_operand (op
, mode
)
1767 enum machine_mode mode
;
1769 if (s_register_operand (op
, mode
))
1771 else if (GET_CODE (op
) == CONST_DOUBLE
)
1772 return (const_double_rtx_ok_for_fpu (op
));
1778 fpu_add_operand (op
, mode
)
1780 enum machine_mode mode
;
1782 if (s_register_operand (op
, mode
))
1784 else if (GET_CODE (op
) == CONST_DOUBLE
)
1785 return (const_double_rtx_ok_for_fpu (op
)
1786 || neg_const_double_rtx_ok_for_fpu (op
));
1791 /* Return nonzero if OP is a constant power of two. */
1794 power_of_two_operand (op
, mode
)
1796 enum machine_mode mode
;
1798 if (GET_CODE (op
) == CONST_INT
)
1800 HOST_WIDE_INT value
= INTVAL(op
);
1801 return value
!= 0 && (value
& (value
- 1)) == 0;
1806 /* Return TRUE for a valid operand of a DImode operation.
1807 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1808 Note that this disallows MEM(REG+REG), but allows
1809 MEM(PRE/POST_INC/DEC(REG)). */
1812 di_operand (op
, mode
)
1814 enum machine_mode mode
;
1816 if (s_register_operand (op
, mode
))
1819 switch (GET_CODE (op
))
1826 return memory_address_p (DImode
, XEXP (op
, 0));
1833 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
1834 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1835 Note that this disallows MEM(REG+REG), but allows
1836 MEM(PRE/POST_INC/DEC(REG)). */
1839 soft_df_operand (op
, mode
)
1841 enum machine_mode mode
;
1843 if (s_register_operand (op
, mode
))
1846 switch (GET_CODE (op
))
1852 return memory_address_p (DFmode
, XEXP (op
, 0));
1859 /* Return TRUE for valid index operands. */
1862 index_operand (op
, mode
)
1864 enum machine_mode mode
;
1866 return (s_register_operand(op
, mode
)
1867 || (immediate_operand (op
, mode
)
1868 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
1871 /* Return TRUE for valid shifts by a constant. This also accepts any
1872 power of two on the (somewhat overly relaxed) assumption that the
1873 shift operator in this case was a mult. */
1876 const_shift_operand (op
, mode
)
1878 enum machine_mode mode
;
1880 return (power_of_two_operand (op
, mode
)
1881 || (immediate_operand (op
, mode
)
1882 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
1885 /* Return TRUE for arithmetic operators which can be combined with a multiply
1889 shiftable_operator (x
, mode
)
1891 enum machine_mode mode
;
1893 if (GET_MODE (x
) != mode
)
1897 enum rtx_code code
= GET_CODE (x
);
1899 return (code
== PLUS
|| code
== MINUS
1900 || code
== IOR
|| code
== XOR
|| code
== AND
);
1904 /* Return TRUE for shift operators. */
1907 shift_operator (x
, mode
)
1909 enum machine_mode mode
;
1911 if (GET_MODE (x
) != mode
)
1915 enum rtx_code code
= GET_CODE (x
);
1918 return power_of_two_operand (XEXP (x
, 1));
1920 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
1921 || code
== ROTATERT
);
1925 int equality_operator (x
, mode
)
1927 enum machine_mode mode
;
1929 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
1932 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
1935 minmax_operator (x
, mode
)
1937 enum machine_mode mode
;
1939 enum rtx_code code
= GET_CODE (x
);
1941 if (GET_MODE (x
) != mode
)
1944 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
1947 /* return TRUE if x is EQ or NE */
1949 /* Return TRUE if this is the condition code register, if we aren't given
1950 a mode, accept any class CCmode register */
1953 cc_register (x
, mode
)
1955 enum machine_mode mode
;
1957 if (mode
== VOIDmode
)
1959 mode
= GET_MODE (x
);
1960 if (GET_MODE_CLASS (mode
) != MODE_CC
)
1964 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1970 /* Return TRUE if this is the condition code register, if we aren't given
1971 a mode, accept any class CCmode register which indicates a dominance
1975 dominant_cc_register (x
, mode
)
1977 enum machine_mode mode
;
1979 if (mode
== VOIDmode
)
1981 mode
= GET_MODE (x
);
1982 if (GET_MODE_CLASS (mode
) != MODE_CC
)
1986 if (mode
!= CC_DNEmode
&& mode
!= CC_DEQmode
1987 && mode
!= CC_DLEmode
&& mode
!= CC_DLTmode
1988 && mode
!= CC_DGEmode
&& mode
!= CC_DGTmode
1989 && mode
!= CC_DLEUmode
&& mode
!= CC_DLTUmode
1990 && mode
!= CC_DGEUmode
&& mode
!= CC_DGTUmode
)
1993 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1999 /* Return TRUE if X references a SYMBOL_REF. */
2001 symbol_mentioned_p (x
)
2007 if (GET_CODE (x
) == SYMBOL_REF
)
2010 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2011 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2017 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2018 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
2021 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
2028 /* Return TRUE if X references a LABEL_REF. */
2030 label_mentioned_p (x
)
2036 if (GET_CODE (x
) == LABEL_REF
)
2039 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2040 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2046 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2047 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
2050 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
2061 enum rtx_code code
= GET_CODE (x
);
2065 else if (code
== SMIN
)
2067 else if (code
== UMIN
)
2069 else if (code
== UMAX
)
2075 /* Return 1 if memory locations are adjacent */
2078 adjacent_mem_locations (a
, b
)
2081 int val0
= 0, val1
= 0;
2084 if ((GET_CODE (XEXP (a
, 0)) == REG
2085 || (GET_CODE (XEXP (a
, 0)) == PLUS
2086 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
2087 && (GET_CODE (XEXP (b
, 0)) == REG
2088 || (GET_CODE (XEXP (b
, 0)) == PLUS
2089 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
2091 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
2093 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
2094 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
2097 reg0
= REGNO (XEXP (a
, 0));
2098 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
2100 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
2101 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
2104 reg1
= REGNO (XEXP (b
, 0));
2105 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
2110 /* Return 1 if OP is a load multiple operation. It is known to be
2111 parallel and the first section will be tested. */
2114 load_multiple_operation (op
, mode
)
2116 enum machine_mode mode
;
2118 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2121 HOST_WIDE_INT i
= 1, base
= 0;
2125 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2128 /* Check to see if this might be a write-back */
2129 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2134 /* Now check it more carefully */
2135 if (GET_CODE (SET_DEST (elt
)) != REG
2136 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2137 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2138 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2139 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2140 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2141 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2142 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2143 != REGNO (SET_DEST (elt
)))
2149 /* Perform a quick check so we don't blow up below. */
2151 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2152 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
2153 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
2156 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
2157 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
2159 for (; i
< count
; i
++)
2161 rtx elt
= XVECEXP (op
, 0, i
);
2163 if (GET_CODE (elt
) != SET
2164 || GET_CODE (SET_DEST (elt
)) != REG
2165 || GET_MODE (SET_DEST (elt
)) != SImode
2166 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
2167 || GET_CODE (SET_SRC (elt
)) != MEM
2168 || GET_MODE (SET_SRC (elt
)) != SImode
2169 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
2170 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
2171 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
2172 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
2179 /* Return 1 if OP is a store multiple operation. It is known to be
2180 parallel and the first section will be tested. */
2183 store_multiple_operation (op
, mode
)
2185 enum machine_mode mode
;
2187 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2190 HOST_WIDE_INT i
= 1, base
= 0;
2194 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2197 /* Check to see if this might be a write-back */
2198 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2203 /* Now check it more carefully */
2204 if (GET_CODE (SET_DEST (elt
)) != REG
2205 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2206 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2207 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2208 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2209 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2210 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2211 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2212 != REGNO (SET_DEST (elt
)))
2218 /* Perform a quick check so we don't blow up below. */
2220 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2221 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
2222 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
2225 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
2226 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
2228 for (; i
< count
; i
++)
2230 elt
= XVECEXP (op
, 0, i
);
2232 if (GET_CODE (elt
) != SET
2233 || GET_CODE (SET_SRC (elt
)) != REG
2234 || GET_MODE (SET_SRC (elt
)) != SImode
2235 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
2236 || GET_CODE (SET_DEST (elt
)) != MEM
2237 || GET_MODE (SET_DEST (elt
)) != SImode
2238 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
2239 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
2240 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
2241 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
2249 load_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2254 HOST_WIDE_INT
*load_offset
;
2256 int unsorted_regs
[4];
2257 HOST_WIDE_INT unsorted_offsets
[4];
2262 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2263 extended if required. */
2264 if (nops
< 2 || nops
> 4)
2267 /* Loop over the operands and check that the memory references are
2268 suitable (ie immediate offsets from the same base register). At
2269 the same time, extract the target register, and the memory
2271 for (i
= 0; i
< nops
; i
++)
2276 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2279 /* Don't reorder volatile memory references; it doesn't seem worth
2280 looking for the case where the order is ok anyway. */
2281 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2284 offset
= const0_rtx
;
2286 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2287 || (GET_CODE (reg
) == SUBREG
2288 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2289 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2290 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2292 || (GET_CODE (reg
) == SUBREG
2293 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2294 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2299 base_reg
= REGNO(reg
);
2300 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2301 ? REGNO (operands
[i
])
2302 : REGNO (SUBREG_REG (operands
[i
])));
2307 if (base_reg
!= REGNO (reg
))
2308 /* Not addressed from the same base register. */
2311 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2312 ? REGNO (operands
[i
])
2313 : REGNO (SUBREG_REG (operands
[i
])));
2314 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2318 /* If it isn't an integer register, or if it overwrites the
2319 base register but isn't the last insn in the list, then
2320 we can't do this. */
2321 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
2322 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
2325 unsorted_offsets
[i
] = INTVAL (offset
);
2328 /* Not a suitable memory address. */
2332 /* All the useful information has now been extracted from the
2333 operands into unsorted_regs and unsorted_offsets; additionally,
2334 order[0] has been set to the lowest numbered register in the
2335 list. Sort the registers into order, and check that the memory
2336 offsets are ascending and adjacent. */
2338 for (i
= 1; i
< nops
; i
++)
2342 order
[i
] = order
[i
- 1];
2343 for (j
= 0; j
< nops
; j
++)
2344 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2345 && (order
[i
] == order
[i
- 1]
2346 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2349 /* Have we found a suitable register? if not, one must be used more
2351 if (order
[i
] == order
[i
- 1])
2354 /* Is the memory address adjacent and ascending? */
2355 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2363 for (i
= 0; i
< nops
; i
++)
2364 regs
[i
] = unsorted_regs
[order
[i
]];
2366 *load_offset
= unsorted_offsets
[order
[0]];
2369 if (unsorted_offsets
[order
[0]] == 0)
2370 return 1; /* ldmia */
2372 if (unsorted_offsets
[order
[0]] == 4)
2373 return 2; /* ldmib */
2375 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2376 return 3; /* ldmda */
2378 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2379 return 4; /* ldmdb */
2381 /* Can't do it without setting up the offset, only do this if it takes
2382 no more than one insn. */
2383 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
2384 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
2388 emit_ldm_seq (operands
, nops
)
2394 HOST_WIDE_INT offset
;
2398 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2401 strcpy (buf
, "ldm%?ia\t");
2405 strcpy (buf
, "ldm%?ib\t");
2409 strcpy (buf
, "ldm%?da\t");
2413 strcpy (buf
, "ldm%?db\t");
2418 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2419 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2422 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2423 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2425 output_asm_insn (buf
, operands
);
2427 strcpy (buf
, "ldm%?ia\t");
2434 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2435 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2437 for (i
= 1; i
< nops
; i
++)
2438 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2439 reg_names
[regs
[i
]]);
2441 strcat (buf
, "}\t%@ phole ldm");
2443 output_asm_insn (buf
, operands
);
2448 store_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2453 HOST_WIDE_INT
*load_offset
;
2455 int unsorted_regs
[4];
2456 HOST_WIDE_INT unsorted_offsets
[4];
2461 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2462 extended if required. */
2463 if (nops
< 2 || nops
> 4)
2466 /* Loop over the operands and check that the memory references are
2467 suitable (ie immediate offsets from the same base register). At
2468 the same time, extract the target register, and the memory
2470 for (i
= 0; i
< nops
; i
++)
2475 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2478 /* Don't reorder volatile memory references; it doesn't seem worth
2479 looking for the case where the order is ok anyway. */
2480 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2483 offset
= const0_rtx
;
2485 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2486 || (GET_CODE (reg
) == SUBREG
2487 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2488 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2489 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2491 || (GET_CODE (reg
) == SUBREG
2492 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2493 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2498 base_reg
= REGNO(reg
);
2499 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2500 ? REGNO (operands
[i
])
2501 : REGNO (SUBREG_REG (operands
[i
])));
2506 if (base_reg
!= REGNO (reg
))
2507 /* Not addressed from the same base register. */
2510 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2511 ? REGNO (operands
[i
])
2512 : REGNO (SUBREG_REG (operands
[i
])));
2513 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2517 /* If it isn't an integer register, then we can't do this. */
2518 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
2521 unsorted_offsets
[i
] = INTVAL (offset
);
2524 /* Not a suitable memory address. */
2528 /* All the useful information has now been extracted from the
2529 operands into unsorted_regs and unsorted_offsets; additionally,
2530 order[0] has been set to the lowest numbered register in the
2531 list. Sort the registers into order, and check that the memory
2532 offsets are ascending and adjacent. */
2534 for (i
= 1; i
< nops
; i
++)
2538 order
[i
] = order
[i
- 1];
2539 for (j
= 0; j
< nops
; j
++)
2540 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2541 && (order
[i
] == order
[i
- 1]
2542 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2545 /* Have we found a suitable register? if not, one must be used more
2547 if (order
[i
] == order
[i
- 1])
2550 /* Is the memory address adjacent and ascending? */
2551 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2559 for (i
= 0; i
< nops
; i
++)
2560 regs
[i
] = unsorted_regs
[order
[i
]];
2562 *load_offset
= unsorted_offsets
[order
[0]];
2565 if (unsorted_offsets
[order
[0]] == 0)
2566 return 1; /* stmia */
2568 if (unsorted_offsets
[order
[0]] == 4)
2569 return 2; /* stmib */
2571 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2572 return 3; /* stmda */
2574 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2575 return 4; /* stmdb */
2581 emit_stm_seq (operands
, nops
)
2587 HOST_WIDE_INT offset
;
2591 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2594 strcpy (buf
, "stm%?ia\t");
2598 strcpy (buf
, "stm%?ib\t");
2602 strcpy (buf
, "stm%?da\t");
2606 strcpy (buf
, "stm%?db\t");
2613 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2614 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2616 for (i
= 1; i
< nops
; i
++)
2617 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2618 reg_names
[regs
[i
]]);
2620 strcat (buf
, "}\t%@ phole stm");
2622 output_asm_insn (buf
, operands
);
2627 multi_register_push (op
, mode
)
2629 enum machine_mode mode
;
2631 if (GET_CODE (op
) != PARALLEL
2632 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2633 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
2634 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
2641 /* Routines for use with attributes */
2643 /* Return nonzero if ATTR is a valid attribute for DECL.
2644 ATTRIBUTES are any existing attributes and ARGS are the arguments
2647 Supported attributes:
2649 naked: don't output any prologue or epilogue code, the user is assumed
2650 to do the right thing. */
2653 arm_valid_machine_decl_attribute (decl
, attributes
, attr
, args
)
2659 if (args
!= NULL_TREE
)
2662 if (is_attribute_p ("naked", attr
))
2663 return TREE_CODE (decl
) == FUNCTION_DECL
;
2667 /* Return non-zero if FUNC is a naked function. */
2670 arm_naked_function_p (func
)
2675 if (TREE_CODE (func
) != FUNCTION_DECL
)
2678 a
= lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func
));
2679 return a
!= NULL_TREE
;
2682 /* Routines for use in generating RTL */
2685 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
)
2694 int sign
= up
? 1 : -1;
2696 result
= gen_rtx (PARALLEL
, VOIDmode
,
2697 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2700 XVECEXP (result
, 0, 0)
2701 = gen_rtx (SET
, GET_MODE (from
), from
,
2702 plus_constant (from
, count
* 4 * sign
));
2707 for (j
= 0; i
< count
; i
++, j
++)
2709 XVECEXP (result
, 0, i
)
2710 = gen_rtx (SET
, VOIDmode
, gen_rtx (REG
, SImode
, base_regno
+ j
),
2711 gen_rtx (MEM
, SImode
,
2712 plus_constant (from
, j
* 4 * sign
)));
2716 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, from
);
2722 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
)
2731 int sign
= up
? 1 : -1;
2733 result
= gen_rtx (PARALLEL
, VOIDmode
,
2734 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2737 XVECEXP (result
, 0, 0)
2738 = gen_rtx (SET
, GET_MODE (to
), to
,
2739 plus_constant (to
, count
* 4 * sign
));
2744 for (j
= 0; i
< count
; i
++, j
++)
2746 XVECEXP (result
, 0, i
)
2747 = gen_rtx (SET
, VOIDmode
,
2748 gen_rtx (MEM
, SImode
, plus_constant (to
, j
* 4 * sign
)),
2749 gen_rtx (REG
, SImode
, base_regno
+ j
));
2753 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, to
);
2759 arm_gen_movstrqi (operands
)
2762 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
2765 rtx st_src
, st_dst
, end_src
, end_dst
, fin_src
, fin_dst
;
2766 rtx part_bytes_reg
= NULL
;
2767 extern int optimize
;
2769 if (GET_CODE (operands
[2]) != CONST_INT
2770 || GET_CODE (operands
[3]) != CONST_INT
2771 || INTVAL (operands
[2]) > 64
2772 || INTVAL (operands
[3]) & 3)
2775 st_dst
= XEXP (operands
[0], 0);
2776 st_src
= XEXP (operands
[1], 0);
2777 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
2778 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
2780 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
2781 out_words_to_go
= INTVAL (operands
[2]) / 4;
2782 last_bytes
= INTVAL (operands
[2]) & 3;
2784 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
2785 part_bytes_reg
= gen_rtx (REG
, SImode
, (in_words_to_go
- 1) & 3);
2787 for (i
= 0; in_words_to_go
>= 2; i
+=4)
2789 if (in_words_to_go
> 4)
2790 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
));
2792 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
2795 if (out_words_to_go
)
2797 if (out_words_to_go
> 4)
2798 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
));
2799 else if (out_words_to_go
!= 1)
2800 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
2806 emit_move_insn (gen_rtx (MEM
, SImode
, dst
),
2807 gen_rtx (REG
, SImode
, 0));
2808 if (last_bytes
!= 0)
2809 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
2813 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
2814 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
2817 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
2818 if (out_words_to_go
)
2822 emit_move_insn (sreg
= gen_reg_rtx (SImode
), gen_rtx (MEM
, SImode
, src
));
2823 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
2824 emit_move_insn (gen_rtx (MEM
, SImode
, dst
), sreg
);
2825 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
2828 if (in_words_to_go
) /* Sanity check */
2834 if (in_words_to_go
< 0)
2837 part_bytes_reg
= copy_to_mode_reg (SImode
, gen_rtx (MEM
, SImode
, src
));
2840 if (BYTES_BIG_ENDIAN
&& last_bytes
)
2842 rtx tmp
= gen_reg_rtx (SImode
);
2844 if (part_bytes_reg
== NULL
)
2847 /* The bytes we want are in the top end of the word */
2848 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
2849 GEN_INT (8 * (4 - last_bytes
))));
2850 part_bytes_reg
= tmp
;
2854 emit_move_insn (gen_rtx (MEM
, QImode
,
2855 plus_constant (dst
, last_bytes
- 1)),
2856 gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
2859 tmp
= gen_reg_rtx (SImode
);
2860 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
2861 part_bytes_reg
= tmp
;
2870 if (part_bytes_reg
== NULL
)
2873 emit_move_insn (gen_rtx (MEM
, QImode
, dst
),
2874 gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
2877 rtx tmp
= gen_reg_rtx (SImode
);
2879 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
2880 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
2881 part_bytes_reg
= tmp
;
2889 /* Generate a memory reference for a half word, such that it will be loaded
2890 into the top 16 bits of the word. We can assume that the address is
2891 known to be alignable and of the form reg, or plus (reg, const). */
2893 gen_rotated_half_load (memref
)
2896 HOST_WIDE_INT offset
= 0;
2897 rtx base
= XEXP (memref
, 0);
2899 if (GET_CODE (base
) == PLUS
)
2901 offset
= INTVAL (XEXP (base
, 1));
2902 base
= XEXP (base
, 0);
2905 /* If we aren't allowed to generate unalligned addresses, then fail. */
2906 if (TARGET_SHORT_BY_BYTES
2907 && ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 0)))
2910 base
= gen_rtx (MEM
, SImode
, plus_constant (base
, offset
& ~2));
2912 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 2))
2915 return gen_rtx (ROTATE
, SImode
, base
, GEN_INT (16));
2918 static enum machine_mode
2919 select_dominance_cc_mode (op
, x
, y
, cond_or
)
2923 HOST_WIDE_INT cond_or
;
2925 enum rtx_code cond1
, cond2
;
2928 /* Currently we will probably get the wrong result if the individual
2929 comparisons are not simple. This also ensures that it is safe to
2930 reverse a comparions if necessary. */
2931 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
2933 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
2938 cond1
= reverse_condition (cond1
);
2940 /* If the comparisons are not equal, and one doesn't dominate the other,
2941 then we can't do this. */
2943 && ! comparison_dominates_p (cond1
, cond2
)
2944 && (swapped
= 1, ! comparison_dominates_p (cond2
, cond1
)))
2949 enum rtx_code temp
= cond1
;
2957 if (cond2
== EQ
|| ! cond_or
)
2962 case LE
: return CC_DLEmode
;
2963 case LEU
: return CC_DLEUmode
;
2964 case GE
: return CC_DGEmode
;
2965 case GEU
: return CC_DGEUmode
;
2971 if (cond2
== LT
|| ! cond_or
)
2980 if (cond2
== GT
|| ! cond_or
)
2989 if (cond2
== LTU
|| ! cond_or
)
2998 if (cond2
== GTU
|| ! cond_or
)
3006 /* The remaining cases only occur when both comparisons are the
3028 arm_select_cc_mode (op
, x
, y
)
3033 /* All floating point compares return CCFP if it is an equality
3034 comparison, and CCFPE otherwise. */
3035 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
3036 return (op
== EQ
|| op
== NE
) ? CCFPmode
: CCFPEmode
;
3038 /* A compare with a shifted operand. Because of canonicalization, the
3039 comparison will have to be swapped when we emit the assembler. */
3040 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
3041 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3042 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
3043 || GET_CODE (x
) == ROTATERT
))
3046 /* This is a special case, that is used by combine to alow a
3047 comarison of a shifted byte load to be split into a zero-extend
3048 followed by a comparison of the shifted integer (only valid for
3049 equalities and unsigned inequalites. */
3050 if (GET_MODE (x
) == SImode
3051 && GET_CODE (x
) == ASHIFT
3052 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
3053 && GET_CODE (XEXP (x
, 0)) == SUBREG
3054 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
3055 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
3056 && (op
== EQ
|| op
== NE
3057 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
3058 && GET_CODE (y
) == CONST_INT
)
3061 /* An operation that sets the condition codes as a side-effect, the
3062 V flag is not set correctly, so we can only use comparisons where
3063 this doesn't matter. (For LT and GE we can use "mi" and "pl"
3065 if (GET_MODE (x
) == SImode
3067 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
3068 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
3069 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
3070 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
3071 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
3072 || GET_CODE (x
) == LSHIFTRT
3073 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3074 || GET_CODE (x
) == ROTATERT
|| GET_CODE (x
) == ZERO_EXTRACT
))
3077 /* A construct for a conditional compare, if the false arm contains
3078 0, then both conditions must be true, otherwise either condition
3079 must be true. Not all conditions are possible, so CCmode is
3080 returned if it can't be done. */
3081 if (GET_CODE (x
) == IF_THEN_ELSE
3082 && (XEXP (x
, 2) == const0_rtx
3083 || XEXP (x
, 2) == const1_rtx
)
3084 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 0))) == '<'
3085 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 1))) == '<')
3086 return select_dominance_cc_mode (op
, XEXP (x
, 0), XEXP (x
, 1),
3087 INTVAL (XEXP (x
, 2)));
3089 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
3092 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
3093 && GET_CODE (x
) == PLUS
3094 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
3100 /* X and Y are two things to compare using CODE. Emit the compare insn and
3101 return the rtx for register 0 in the proper mode. FP means this is a
3102 floating point compare: I don't think that it is needed on the arm. */
3105 gen_compare_reg (code
, x
, y
, fp
)
3109 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
3110 rtx cc_reg
= gen_rtx (REG
, mode
, 24);
3112 emit_insn (gen_rtx (SET
, VOIDmode
, cc_reg
,
3113 gen_rtx (COMPARE
, mode
, x
, y
)));
3119 arm_reload_in_hi (operands
)
3122 rtx base
= find_replacement (&XEXP (operands
[1], 0));
3124 emit_insn (gen_zero_extendqisi2 (operands
[2], gen_rtx (MEM
, QImode
, base
)));
3125 emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG
, SImode
, operands
[0], 0),
3126 gen_rtx (MEM
, QImode
,
3127 plus_constant (base
, 1))));
3128 if (BYTES_BIG_ENDIAN
)
3129 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
3131 gen_rtx (IOR
, SImode
,
3132 gen_rtx (ASHIFT
, SImode
,
3133 gen_rtx (SUBREG
, SImode
,
3138 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
3140 gen_rtx (IOR
, SImode
,
3141 gen_rtx (ASHIFT
, SImode
,
3144 gen_rtx (SUBREG
, SImode
, operands
[0], 0))));
3148 arm_reload_out_hi (operands
)
3151 rtx base
= find_replacement (&XEXP (operands
[0], 0));
3153 if (BYTES_BIG_ENDIAN
)
3155 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
3156 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
3157 emit_insn (gen_lshrsi3 (operands
[2],
3158 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
3160 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
3161 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
3165 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
3166 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
3167 emit_insn (gen_lshrsi3 (operands
[2],
3168 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
3170 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
3171 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
3175 /* Routines for manipulation of the constant pool. */
3176 /* This is unashamedly hacked from the version in sh.c, since the problem is
3177 extremely similar. */
3179 /* Arm instructions cannot load a large constant into a register,
3180 constants have to come from a pc relative load. The reference of a pc
3181 relative load instruction must be less than 1k infront of the instruction.
3182 This means that we often have to dump a constant inside a function, and
3183 generate code to branch around it.
3185 It is important to minimize this, since the branches will slow things
3186 down and make things bigger.
3188 Worst case code looks like:
3204 We fix this by performing a scan before scheduling, which notices which
3205 instructions need to have their operands fetched from the constant table
3206 and builds the table.
3211 scan, find an instruction which needs a pcrel move. Look forward, find th
3212 last barrier which is within MAX_COUNT bytes of the requirement.
3213 If there isn't one, make one. Process all the instructions between
3214 the find and the barrier.
3216 In the above example, we can tell that L3 is within 1k of L1, so
3217 the first move can be shrunk from the 2 insn+constant sequence into
3218 just 1 insn, and the constant moved to L3 to make:
3229 Then the second move becomes the target for the shortening process.
3235 rtx value
; /* Value in table */
3236 HOST_WIDE_INT next_offset
;
3237 enum machine_mode mode
; /* Mode of value */
3240 /* The maximum number of constants that can fit into one pool, since
3241 the pc relative range is 0...1020 bytes and constants are at least 4
3244 #define MAX_POOL_SIZE (1020/4)
3245 static pool_node pool_vector
[MAX_POOL_SIZE
];
3246 static int pool_size
;
3247 static rtx pool_vector_label
;
3249 /* Add a constant to the pool and return its label. */
3250 static HOST_WIDE_INT
3251 add_constant (x
, mode
)
3253 enum machine_mode mode
;
3257 HOST_WIDE_INT offset
;
3259 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
3260 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
3261 x
= get_pool_constant (XEXP (x
, 0));
3262 #ifndef AOF_ASSEMBLER
3263 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
3264 x
= XVECEXP (x
, 0, 0);
3267 #ifdef AOF_ASSEMBLER
3268 /* PIC Symbol references need to be converted into offsets into the
3270 if (flag_pic
&& GET_CODE (x
) == SYMBOL_REF
)
3271 x
= aof_pic_entry (x
);
3272 #endif /* AOF_ASSEMBLER */
3274 /* First see if we've already got it */
3275 for (i
= 0; i
< pool_size
; i
++)
3277 if (GET_CODE (x
) == pool_vector
[i
].value
->code
3278 && mode
== pool_vector
[i
].mode
)
3280 if (GET_CODE (x
) == CODE_LABEL
)
3282 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
3285 if (rtx_equal_p (x
, pool_vector
[i
].value
))
3286 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
3290 /* Need a new one */
3291 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
3294 pool_vector_label
= gen_label_rtx ();
3296 pool_vector
[pool_size
].next_offset
3297 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
3299 pool_vector
[pool_size
].value
= x
;
3300 pool_vector
[pool_size
].mode
= mode
;
3305 /* Output the literal table */
3312 scan
= emit_label_after (gen_label_rtx (), scan
);
3313 scan
= emit_insn_after (gen_align_4 (), scan
);
3314 scan
= emit_label_after (pool_vector_label
, scan
);
3316 for (i
= 0; i
< pool_size
; i
++)
3318 pool_node
*p
= pool_vector
+ i
;
3320 switch (GET_MODE_SIZE (p
->mode
))
3323 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
3327 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
3336 scan
= emit_insn_after (gen_consttable_end (), scan
);
3337 scan
= emit_barrier_after (scan
);
3341 /* Non zero if the src operand needs to be fixed up */
3343 fixit (src
, mode
, destreg
)
3345 enum machine_mode mode
;
3348 if (CONSTANT_P (src
))
3350 if (GET_CODE (src
) == CONST_INT
)
3351 return (! const_ok_for_arm (INTVAL (src
))
3352 && ! const_ok_for_arm (~INTVAL (src
)));
3353 if (GET_CODE (src
) == CONST_DOUBLE
)
3354 return (GET_MODE (src
) == VOIDmode
3356 || (! const_double_rtx_ok_for_fpu (src
)
3357 && ! neg_const_double_rtx_ok_for_fpu (src
)));
3358 return symbol_mentioned_p (src
);
3360 #ifndef AOF_ASSEMBLER
3361 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
3365 return (mode
== SImode
&& GET_CODE (src
) == MEM
3366 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
3367 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
3370 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
3372 find_barrier (from
, max_count
)
3377 rtx found_barrier
= 0;
3379 while (from
&& count
< max_count
)
3381 if (GET_CODE (from
) == BARRIER
)
3382 found_barrier
= from
;
3384 /* Count the length of this insn */
3385 if (GET_CODE (from
) == INSN
3386 && GET_CODE (PATTERN (from
)) == SET
3387 && CONSTANT_P (SET_SRC (PATTERN (from
)))
3388 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
3390 rtx src
= SET_SRC (PATTERN (from
));
3394 count
+= get_attr_length (from
);
3396 from
= NEXT_INSN (from
);
3401 /* We didn't find a barrier in time to
3402 dump our stuff, so we'll make one */
3403 rtx label
= gen_label_rtx ();
3406 from
= PREV_INSN (from
);
3408 from
= get_last_insn ();
3410 /* Walk back to be just before any jump */
3411 while (GET_CODE (from
) == JUMP_INSN
3412 || GET_CODE (from
) == NOTE
3413 || GET_CODE (from
) == CODE_LABEL
)
3414 from
= PREV_INSN (from
);
3416 from
= emit_jump_insn_after (gen_jump (label
), from
);
3417 JUMP_LABEL (from
) = label
;
3418 found_barrier
= emit_barrier_after (from
);
3419 emit_label_after (label
, found_barrier
);
3420 return found_barrier
;
3423 return found_barrier
;
3426 /* Non zero if the insn is a move instruction which needs to be fixed. */
3431 if (!INSN_DELETED_P (insn
)
3432 && GET_CODE (insn
) == INSN
3433 && GET_CODE (PATTERN (insn
)) == SET
)
3435 rtx pat
= PATTERN (insn
);
3436 rtx src
= SET_SRC (pat
);
3437 rtx dst
= SET_DEST (pat
);
3439 enum machine_mode mode
= GET_MODE (dst
);
3443 if (GET_CODE (dst
) == REG
)
3444 destreg
= REGNO (dst
);
3445 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
3446 destreg
= REGNO (SUBREG_REG (dst
));
3448 return fixit (src
, mode
, destreg
);
3462 /* The ldr instruction can work with up to a 4k offset, and most constants
3463 will be loaded with one of these instructions; however, the adr
3464 instruction and the ldf instructions only work with a 1k offset. This
3465 code needs to be rewritten to use the 4k offset when possible, and to
3466 adjust when a 1k offset is needed. For now we just use a 1k offset
3470 /* Floating point operands can't work further than 1024 bytes from the
3471 PC, so to make things simple we restrict all loads for such functions.
3473 if (TARGET_HARD_FLOAT
)
3474 for (regno
= 16; regno
< 24; regno
++)
3475 if (regs_ever_live
[regno
])
3484 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
3486 if (broken_move (insn
))
3488 /* This is a broken move instruction, scan ahead looking for
3489 a barrier to stick the constant table behind */
3491 rtx barrier
= find_barrier (insn
, count_size
);
3493 /* Now find all the moves between the points and modify them */
3494 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
3496 if (broken_move (scan
))
3498 /* This is a broken move instruction, add it to the pool */
3499 rtx pat
= PATTERN (scan
);
3500 rtx src
= SET_SRC (pat
);
3501 rtx dst
= SET_DEST (pat
);
3502 enum machine_mode mode
= GET_MODE (dst
);
3503 HOST_WIDE_INT offset
;
3509 /* If this is an HImode constant load, convert it into
3510 an SImode constant load. Since the register is always
3511 32 bits this is safe. We have to do this, since the
3512 load pc-relative instruction only does a 32-bit load. */
3516 if (GET_CODE (dst
) != REG
)
3518 PUT_MODE (dst
, SImode
);
3521 offset
= add_constant (src
, mode
);
3522 addr
= plus_constant (gen_rtx (LABEL_REF
, VOIDmode
,
3526 /* For wide moves to integer regs we need to split the
3527 address calculation off into a separate insn, so that
3528 the load can then be done with a load-multiple. This is
3529 safe, since we have already noted the length of such
3530 insns to be 8, and we are immediately over-writing the
3531 scratch we have grabbed with the final result. */
3532 if (GET_MODE_SIZE (mode
) > 4
3533 && (scratch
= REGNO (dst
)) < 16)
3535 rtx reg
= gen_rtx (REG
, SImode
, scratch
);
3536 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
3541 newsrc
= gen_rtx (MEM
, mode
, addr
);
3543 /* Build a jump insn wrapper around the move instead
3544 of an ordinary insn, because we want to have room for
3545 the target label rtx in fld[7], which an ordinary
3546 insn doesn't have. */
3547 newinsn
= emit_jump_insn_after (gen_rtx (SET
, VOIDmode
,
3550 JUMP_LABEL (newinsn
) = pool_vector_label
;
3552 /* But it's still an ordinary insn */
3553 PUT_CODE (newinsn
, INSN
);
3560 dump_table (barrier
);
3567 /* Routines to output assembly language. */
3569 /* If the rtx is the correct value then return the string of the number.
3570 In this way we can ensure that valid double constants are generated even
3571 when cross compiling. */
3573 fp_immediate_constant (x
)
3579 if (!fpa_consts_inited
)
3582 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3583 for (i
= 0; i
< 8; i
++)
3584 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
3585 return strings_fpa
[i
];
3590 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
3592 fp_const_from_val (r
)
3597 if (! fpa_consts_inited
)
3600 for (i
= 0; i
< 8; i
++)
3601 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
3602 return strings_fpa
[i
];
3607 /* Output the operands of a LDM/STM instruction to STREAM.
3608 MASK is the ARM register set mask of which only bits 0-15 are important.
3609 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
3610 must follow the register list. */
3613 print_multi_reg (stream
, instr
, mask
, hat
)
3619 int not_first
= FALSE
;
3621 fputc ('\t', stream
);
3622 fprintf (stream
, instr
, REGISTER_PREFIX
);
3623 fputs (", {", stream
);
3624 for (i
= 0; i
< 16; i
++)
3625 if (mask
& (1 << i
))
3628 fprintf (stream
, ", ");
3629 fprintf (stream
, "%s%s", REGISTER_PREFIX
, reg_names
[i
]);
3633 fprintf (stream
, "}%s\n", hat
? "^" : "");
3636 /* Output a 'call' insn. */
3639 output_call (operands
)
3642 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
3644 if (REGNO (operands
[0]) == 14)
3646 operands
[0] = gen_rtx (REG
, SImode
, 12);
3647 output_asm_insn ("mov%?\t%0, %|lr", operands
);
3649 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3650 output_asm_insn ("mov%?\t%|pc, %0", operands
);
3658 int something_changed
= 0;
3660 int code
= GET_CODE (x0
);
3667 if (REGNO (x0
) == 14)
3669 *x
= gen_rtx (REG
, SImode
, 12);
3674 /* Scan through the sub-elements and change any references there */
3675 fmt
= GET_RTX_FORMAT (code
);
3676 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3678 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
3679 else if (fmt
[i
] == 'E')
3680 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
3681 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
3682 return something_changed
;
3686 /* Output a 'call' insn that is a reference in memory. */
3689 output_call_mem (operands
)
3692 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
3693 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
3695 if (eliminate_lr2ip (&operands
[0]))
3696 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
3698 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3699 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
3704 /* Output a move from arm registers to an fpu registers.
3705 OPERANDS[0] is an fpu register.
3706 OPERANDS[1] is the first registers of an arm register pair. */
3709 output_mov_long_double_fpu_from_arm (operands
)
3712 int arm_reg0
= REGNO (operands
[1]);
3718 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3719 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3720 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3722 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3723 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
3727 /* Output a move from an fpu register to arm registers.
3728 OPERANDS[0] is the first registers of an arm register pair.
3729 OPERANDS[1] is an fpu register. */
3732 output_mov_long_double_arm_from_fpu (operands
)
3735 int arm_reg0
= REGNO (operands
[0]);
3741 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3742 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3743 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3745 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
3746 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3750 /* Output a move from arm registers to arm registers of a long double
3751 OPERANDS[0] is the destination.
3752 OPERANDS[1] is the source. */
3754 output_mov_long_double_arm_from_arm (operands
)
3757 /* We have to be careful here because the two might overlap */
3758 int dest_start
= REGNO (operands
[0]);
3759 int src_start
= REGNO (operands
[1]);
3763 if (dest_start
< src_start
)
3765 for (i
= 0; i
< 3; i
++)
3767 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3768 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3769 output_asm_insn ("mov%?\t%0, %1", ops
);
3774 for (i
= 2; i
>= 0; i
--)
3776 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3777 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3778 output_asm_insn ("mov%?\t%0, %1", ops
);
3786 /* Output a move from arm registers to an fpu registers.
3787 OPERANDS[0] is an fpu register.
3788 OPERANDS[1] is the first registers of an arm register pair. */
3791 output_mov_double_fpu_from_arm (operands
)
3794 int arm_reg0
= REGNO (operands
[1]);
3799 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3800 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3801 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
3802 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
3806 /* Output a move from an fpu register to arm registers.
3807 OPERANDS[0] is the first registers of an arm register pair.
3808 OPERANDS[1] is an fpu register. */
3811 output_mov_double_arm_from_fpu (operands
)
3814 int arm_reg0
= REGNO (operands
[0]);
3820 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3821 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3822 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
3823 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
3827 /* Output a move between double words.
3828 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
3829 or MEM<-REG and all MEMs must be offsettable addresses. */
3832 output_move_double (operands
)
3835 enum rtx_code code0
= GET_CODE (operands
[0]);
3836 enum rtx_code code1
= GET_CODE (operands
[1]);
3841 int reg0
= REGNO (operands
[0]);
3843 otherops
[0] = gen_rtx (REG
, SImode
, 1 + reg0
);
3846 int reg1
= REGNO (operands
[1]);
3850 /* Ensure the second source is not overwritten */
3851 if (reg1
== reg0
+ (WORDS_BIG_ENDIAN
? -1 : 1))
3852 output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands
);
3854 output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands
);
3856 else if (code1
== CONST_DOUBLE
)
3858 if (GET_MODE (operands
[1]) == DFmode
)
3861 union real_extract u
;
3863 bcopy ((char *) &CONST_DOUBLE_LOW (operands
[1]), (char *) &u
,
3865 REAL_VALUE_TO_TARGET_DOUBLE (u
.d
, l
);
3866 otherops
[1] = GEN_INT(l
[1]);
3867 operands
[1] = GEN_INT(l
[0]);
3869 else if (GET_MODE (operands
[1]) != VOIDmode
)
3871 else if (WORDS_BIG_ENDIAN
)
3874 otherops
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
3875 operands
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
3880 otherops
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
3881 operands
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
3883 output_mov_immediate (operands
);
3884 output_mov_immediate (otherops
);
3886 else if (code1
== CONST_INT
)
3888 /* sign extend the intval into the high-order word */
3889 if (WORDS_BIG_ENDIAN
)
3891 otherops
[1] = operands
[1];
3892 operands
[1] = (INTVAL (operands
[1]) < 0
3893 ? constm1_rtx
: const0_rtx
);
3896 otherops
[1] = INTVAL (operands
[1]) < 0 ? constm1_rtx
: const0_rtx
;
3897 output_mov_immediate (otherops
);
3898 output_mov_immediate (operands
);
3900 else if (code1
== MEM
)
3902 switch (GET_CODE (XEXP (operands
[1], 0)))
3905 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
3909 abort (); /* Should never happen now */
3913 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
3917 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
3921 abort (); /* Should never happen now */
3926 output_asm_insn ("adr%?\t%0, %1", operands
);
3927 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
3931 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1)))
3933 otherops
[0] = operands
[0];
3934 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
3935 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
3936 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
3938 if (GET_CODE (otherops
[2]) == CONST_INT
)
3940 switch (INTVAL (otherops
[2]))
3943 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
3946 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
3949 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
3952 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
3953 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
3955 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
3958 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
3961 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
3962 return "ldm%?ia\t%0, %M0";
3966 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
3967 /* Take care of overlapping base/data reg. */
3968 if (reg_mentioned_p (operands
[0], operands
[1]))
3970 output_asm_insn ("ldr%?\t%0, %1", otherops
);
3971 output_asm_insn ("ldr%?\t%0, %1", operands
);
3975 output_asm_insn ("ldr%?\t%0, %1", operands
);
3976 output_asm_insn ("ldr%?\t%0, %1", otherops
);
3982 abort(); /* Constraints should prevent this */
3984 else if (code0
== MEM
&& code1
== REG
)
3986 if (REGNO (operands
[1]) == 12)
3989 switch (GET_CODE (XEXP (operands
[0], 0)))
3992 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
3996 abort (); /* Should never happen now */
4000 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
4004 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
4008 abort (); /* Should never happen now */
4012 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
4014 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
4017 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
4021 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
4025 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
4032 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
4033 otherops
[1] = gen_rtx (REG
, SImode
, 1 + REGNO (operands
[1]));
4034 output_asm_insn ("str%?\t%1, %0", operands
);
4035 output_asm_insn ("str%?\t%1, %0", otherops
);
4039 abort(); /* Constraints should prevent this */
4045 /* Output an arbitrary MOV reg, #n.
4046 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
4049 output_mov_immediate (operands
)
4052 HOST_WIDE_INT n
= INTVAL (operands
[1]);
4056 /* Try to use one MOV */
4057 if (const_ok_for_arm (n
))
4059 output_asm_insn ("mov%?\t%0, %1", operands
);
4063 /* Try to use one MVN */
4064 if (const_ok_for_arm (~n
))
4066 operands
[1] = GEN_INT (~n
);
4067 output_asm_insn ("mvn%?\t%0, %1", operands
);
4071 /* If all else fails, make it out of ORRs or BICs as appropriate. */
4073 for (i
=0; i
< 32; i
++)
4077 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
4078 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
4081 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
4088 /* Output an ADD r, s, #n where n may be too big for one instruction. If
4089 adding zero to one register, output nothing. */
4092 output_add_immediate (operands
)
4095 HOST_WIDE_INT n
= INTVAL (operands
[2]);
4097 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
4100 output_multi_immediate (operands
,
4101 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
4104 output_multi_immediate (operands
,
4105 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
4112 /* Output a multiple immediate operation.
4113 OPERANDS is the vector of operands referred to in the output patterns.
4114 INSTR1 is the output pattern to use for the first constant.
4115 INSTR2 is the output pattern to use for subsequent constants.
4116 IMMED_OP is the index of the constant slot in OPERANDS.
4117 N is the constant value. */
4120 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
4122 char *instr1
, *instr2
;
4126 #if HOST_BITS_PER_WIDE_INT > 32
4132 operands
[immed_op
] = const0_rtx
;
4133 output_asm_insn (instr1
, operands
); /* Quick and easy output */
4138 char *instr
= instr1
;
4140 /* Note that n is never zero here (which would give no output) */
4141 for (i
= 0; i
< 32; i
+= 2)
4145 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
4146 output_asm_insn (instr
, operands
);
4156 /* Return the appropriate ARM instruction for the operation code.
4157 The returned result should not be overwritten. OP is the rtx of the
4158 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
4162 arithmetic_instr (op
, shift_first_arg
)
4164 int shift_first_arg
;
4166 switch (GET_CODE (op
))
4172 return shift_first_arg
? "rsb" : "sub";
4189 /* Ensure valid constant shifts and return the appropriate shift mnemonic
4190 for the operation code. The returned result should not be overwritten.
4191 OP is the rtx code of the shift.
4192 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
4196 shift_op (op
, amountp
)
4198 HOST_WIDE_INT
*amountp
;
4201 enum rtx_code code
= GET_CODE (op
);
4203 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
4205 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
4206 *amountp
= INTVAL (XEXP (op
, 1));
4229 /* We never have to worry about the amount being other than a
4230 power of 2, since this case can never be reloaded from a reg. */
4232 *amountp
= int_log2 (*amountp
);
4243 /* This is not 100% correct, but follows from the desire to merge
4244 multiplication by a power of 2 with the recognizer for a
4245 shift. >=32 is not a valid shift for "asl", so we must try and
4246 output a shift that produces the correct arithmetical result.
4247 Using lsr #32 is identical except for the fact that the carry bit
4248 is not set correctly if we set the flags; but we never use the
4249 carry bit from such an operation, so we can ignore that. */
4250 if (code
== ROTATERT
)
4251 *amountp
&= 31; /* Rotate is just modulo 32 */
4252 else if (*amountp
!= (*amountp
& 31))
4259 /* Shifts of 0 are no-ops. */
4268 /* Obtain the shift from the POWER of two. */
4272 HOST_WIDE_INT power
;
4274 HOST_WIDE_INT shift
= 0;
4276 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
4286 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
4287 /bin/as is horribly restrictive. */
4290 output_ascii_pseudo_op (stream
, p
, len
)
4296 int len_so_far
= 1000;
4297 int chars_so_far
= 0;
4299 for (i
= 0; i
< len
; i
++)
4301 register int c
= p
[i
];
4303 if (len_so_far
> 50)
4306 fputs ("\"\n", stream
);
4307 fputs ("\t.ascii\t\"", stream
);
4312 if (c
== '\"' || c
== '\\')
4318 if (c
>= ' ' && c
< 0177)
4325 fprintf (stream
, "\\%03o", c
);
4332 fputs ("\"\n", stream
);
4336 /* Try to determine whether a pattern really clobbers the link register.
4337 This information is useful when peepholing, so that lr need not be pushed
4338 if we combine a call followed by a return.
4339 NOTE: This code does not check for side-effect expressions in a SET_SRC:
4340 such a check should not be needed because these only update an existing
4341 value within a register; the register must still be set elsewhere within
4345 pattern_really_clobbers_lr (x
)
4350 switch (GET_CODE (x
))
4353 switch (GET_CODE (SET_DEST (x
)))
4356 return REGNO (SET_DEST (x
)) == 14;
4359 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
4360 return REGNO (XEXP (SET_DEST (x
), 0)) == 14;
4362 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
4371 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
4372 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
4377 switch (GET_CODE (XEXP (x
, 0)))
4380 return REGNO (XEXP (x
, 0)) == 14;
4383 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
4384 return REGNO (XEXP (XEXP (x
, 0), 0)) == 14;
4400 function_really_clobbers_lr (first
)
4405 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
4407 switch (GET_CODE (insn
))
4412 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
4417 if (pattern_really_clobbers_lr (PATTERN (insn
)))
4422 /* Don't yet know how to handle those calls that are not to a
4424 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
4427 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
4430 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
4436 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
4442 default: /* Don't recognize it, be safe */
4446 /* A call can be made (by peepholing) not to clobber lr iff it is
4447 followed by a return. There may, however, be a use insn iff
4448 we are returning the result of the call.
4449 If we run off the end of the insn chain, then that means the
4450 call was at the end of the function. Unfortunately we don't
4451 have a return insn for the peephole to recognize, so we
4452 must reject this. (Can this be fixed by adding our own insn?) */
4453 if ((next
= next_nonnote_insn (insn
)) == NULL
)
4456 /* No need to worry about lr if the call never returns */
4457 if (GET_CODE (next
) == BARRIER
)
4460 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
4461 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
4462 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
4463 == REGNO (XEXP (PATTERN (next
), 0))))
4464 if ((next
= next_nonnote_insn (next
)) == NULL
)
4467 if (GET_CODE (next
) == JUMP_INSN
4468 && GET_CODE (PATTERN (next
)) == RETURN
)
4477 /* We have reached the end of the chain so lr was _not_ clobbered */
4482 output_return_instruction (operand
, really_return
, reverse
)
4488 int reg
, live_regs
= 0;
4489 int volatile_func
= (optimize
> 0
4490 && TREE_THIS_VOLATILE (current_function_decl
));
4492 return_used_this_function
= 1;
4497 /* If this function was declared non-returning, and we have found a tail
4498 call, then we have to trust that the called function won't return. */
4499 if (! really_return
)
4502 /* Otherwise, trap an attempted return by aborting. */
4504 ops
[1] = gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4505 assemble_external_libcall (ops
[1]);
4506 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
4510 if (current_function_calls_alloca
&& ! really_return
)
4513 for (reg
= 0; reg
<= 10; reg
++)
4514 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4517 if (live_regs
|| (regs_ever_live
[14] && ! lr_save_eliminated
))
4520 if (frame_pointer_needed
)
4525 if (lr_save_eliminated
|| ! regs_ever_live
[14])
4528 if (frame_pointer_needed
)
4530 reverse
? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
4533 reverse
? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
4535 for (reg
= 0; reg
<= 10; reg
++)
4536 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4538 strcat (instr
, "%|");
4539 strcat (instr
, reg_names
[reg
]);
4541 strcat (instr
, ", ");
4544 if (frame_pointer_needed
)
4546 strcat (instr
, "%|");
4547 strcat (instr
, reg_names
[11]);
4548 strcat (instr
, ", ");
4549 strcat (instr
, "%|");
4550 strcat (instr
, reg_names
[13]);
4551 strcat (instr
, ", ");
4552 strcat (instr
, "%|");
4553 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4557 strcat (instr
, "%|");
4558 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4560 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
4561 output_asm_insn (instr
, &operand
);
4563 else if (really_return
)
4565 sprintf (instr
, "mov%%?%%%s0%s\t%%|pc, %%|lr",
4566 reverse
? "D" : "d", TARGET_APCS_32
? "" : "s");
4567 output_asm_insn (instr
, &operand
);
4573 /* Return nonzero if optimizing and the current function is volatile.
4574 Such functions never return, and many memory cycles can be saved
4575 by not storing register values that will never be needed again.
4576 This optimization was added to speed up context switching in a
4577 kernel application. */
4580 arm_volatile_func ()
4582 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
4585 /* The amount of stack adjustment that happens here, in output_return and in
4586 output_epilogue must be exactly the same as was calculated during reload,
4587 or things will point to the wrong place. The only time we can safely
4588 ignore this constraint is when a function has no arguments on the stack,
4589 no stack frame requirement and no live registers execpt for `lr'. If we
4590 can guarantee that by making all function calls into tail calls and that
4591 lr is not clobbered in any other way, then there is no need to push lr
4595 output_func_prologue (f
, frame_size
)
4599 int reg
, live_regs_mask
= 0;
4601 int volatile_func
= (optimize
> 0
4602 && TREE_THIS_VOLATILE (current_function_decl
));
4604 /* Nonzero if we must stuff some register arguments onto the stack as if
4605 they were passed there. */
4606 int store_arg_regs
= 0;
4608 if (arm_ccfsm_state
|| arm_target_insn
)
4609 abort (); /* Sanity check */
4611 if (arm_naked_function_p (current_function_decl
))
4614 return_used_this_function
= 0;
4615 lr_save_eliminated
= 0;
4617 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
4618 ASM_COMMENT_START
, current_function_args_size
,
4619 current_function_pretend_args_size
, frame_size
);
4620 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
4621 ASM_COMMENT_START
, frame_pointer_needed
,
4622 current_function_anonymous_args
);
4625 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
4627 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
4630 for (reg
= 0; reg
<= 10; reg
++)
4631 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4632 live_regs_mask
|= (1 << reg
);
4634 if (frame_pointer_needed
)
4635 live_regs_mask
|= 0xD800;
4636 else if (regs_ever_live
[14])
4638 if (! current_function_args_size
4639 && ! function_really_clobbers_lr (get_insns ()))
4640 lr_save_eliminated
= 1;
4642 live_regs_mask
|= 0x4000;
4647 /* if a di mode load/store multiple is used, and the base register
4648 is r3, then r4 can become an ever live register without lr
4649 doing so, in this case we need to push lr as well, or we
4650 will fail to get a proper return. */
4652 live_regs_mask
|= 0x4000;
4653 lr_save_eliminated
= 0;
4657 if (lr_save_eliminated
)
4658 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
4661 #ifdef AOF_ASSEMBLER
4663 fprintf (f
, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX
, REGISTER_PREFIX
,
4664 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
4670 output_func_epilogue (f
, frame_size
)
4674 int reg
, live_regs_mask
= 0, code_size
= 0;
4675 /* If we need this then it will always be at lesat this much */
4676 int floats_offset
= 24;
4678 int volatile_func
= (optimize
> 0
4679 && TREE_THIS_VOLATILE (current_function_decl
));
4681 if (use_return_insn() && return_used_this_function
)
4683 if (frame_size
&& !(frame_pointer_needed
|| TARGET_APCS
))
4690 /* Naked functions don't have epilogues. */
4691 if (arm_naked_function_p (current_function_decl
))
4694 /* A volatile function should never return. Call abort. */
4697 rtx op
= gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4698 assemble_external_libcall (op
);
4699 output_asm_insn ("bl\t%a0", &op
);
4704 for (reg
= 0; reg
<= 10; reg
++)
4705 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4707 live_regs_mask
|= (1 << reg
);
4711 if (frame_pointer_needed
)
4713 for (reg
= 23; reg
> 15; reg
--)
4714 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4716 fprintf (f
, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX
,
4717 reg_names
[reg
], REGISTER_PREFIX
, floats_offset
);
4718 floats_offset
+= 12;
4722 live_regs_mask
|= 0xA800;
4723 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
4724 TARGET_APCS_32
? FALSE
: TRUE
);
4729 /* Restore stack pointer if necessary. */
4732 operands
[0] = operands
[1] = stack_pointer_rtx
;
4733 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
, frame_size
);
4734 output_add_immediate (operands
);
4737 for (reg
= 16; reg
< 24; reg
++)
4738 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4740 fprintf (f
, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX
,
4741 reg_names
[reg
], REGISTER_PREFIX
);
4744 if (current_function_pretend_args_size
== 0 && regs_ever_live
[14])
4746 if (lr_save_eliminated
)
4747 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
4748 : "\tmovs\t%spc, %slr\n"),
4749 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
4751 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
4752 TARGET_APCS_32
? FALSE
: TRUE
);
4757 if (live_regs_mask
|| regs_ever_live
[14])
4759 /* Restore the integer regs, and the return address into lr */
4760 if (! lr_save_eliminated
)
4761 live_regs_mask
|= 0x4000;
4763 if (live_regs_mask
!= 0)
4765 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
4769 if (current_function_pretend_args_size
)
4771 /* Unwind the pre-pushed regs */
4772 operands
[0] = operands
[1] = stack_pointer_rtx
;
4773 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
,
4774 current_function_pretend_args_size
);
4775 output_add_immediate (operands
);
4777 /* And finally, go home */
4778 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
4779 : "\tmovs\t%spc, %slr\n"),
4780 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
4787 current_function_anonymous_args
= 0;
4791 emit_multi_reg_push (mask
)
4798 for (i
= 0; i
< 16; i
++)
4799 if (mask
& (1 << i
))
4802 if (num_regs
== 0 || num_regs
> 16)
4805 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (num_regs
));
4807 for (i
= 0; i
< 16; i
++)
4809 if (mask
& (1 << i
))
4812 = gen_rtx (SET
, VOIDmode
, gen_rtx (MEM
, BLKmode
,
4813 gen_rtx (PRE_DEC
, BLKmode
,
4814 stack_pointer_rtx
)),
4815 gen_rtx (UNSPEC
, BLKmode
,
4816 gen_rtvec (1, gen_rtx (REG
, SImode
, i
)),
4822 for (j
= 1, i
++; j
< num_regs
; i
++)
4824 if (mask
& (1 << i
))
4827 = gen_rtx (USE
, VOIDmode
, gen_rtx (REG
, SImode
, i
));
4835 arm_expand_prologue ()
4838 rtx amount
= GEN_INT (- get_frame_size ());
4841 int live_regs_mask
= 0;
4842 int store_arg_regs
= 0;
4843 int volatile_func
= (optimize
> 0
4844 && TREE_THIS_VOLATILE (current_function_decl
));
4846 /* Naked functions don't have prologues. */
4847 if (arm_naked_function_p (current_function_decl
))
4850 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
4853 if (! volatile_func
)
4854 for (reg
= 0; reg
<= 10; reg
++)
4855 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4856 live_regs_mask
|= 1 << reg
;
4858 if (! volatile_func
&& regs_ever_live
[14])
4859 live_regs_mask
|= 0x4000;
4861 if (frame_pointer_needed
)
4863 live_regs_mask
|= 0xD800;
4864 emit_insn (gen_movsi (gen_rtx (REG
, SImode
, 12),
4865 stack_pointer_rtx
));
4868 if (current_function_pretend_args_size
)
4871 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
4874 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
4875 GEN_INT (-current_function_pretend_args_size
)));
4880 /* If we have to push any regs, then we must push lr as well, or
4881 we won't get a proper return. */
4882 live_regs_mask
|= 0x4000;
4883 emit_multi_reg_push (live_regs_mask
);
4886 /* For now the integer regs are still pushed in output_func_epilogue (). */
4888 if (! volatile_func
)
4889 for (reg
= 23; reg
> 15; reg
--)
4890 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4891 emit_insn (gen_rtx (SET
, VOIDmode
,
4892 gen_rtx (MEM
, XFmode
,
4893 gen_rtx (PRE_DEC
, XFmode
,
4894 stack_pointer_rtx
)),
4895 gen_rtx (REG
, XFmode
, reg
)));
4897 if (frame_pointer_needed
)
4898 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx (REG
, SImode
, 12),
4900 (-(4 + current_function_pretend_args_size
)))));
4902 if (amount
!= const0_rtx
)
4904 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
4905 emit_insn (gen_rtx (CLOBBER
, VOIDmode
,
4906 gen_rtx (MEM
, BLKmode
, stack_pointer_rtx
)));
4909 /* If we are profiling, make sure no instructions are scheduled before
4910 the call to mcount. */
4911 if (profile_flag
|| profile_block_flag
)
4912 emit_insn (gen_blockage ());
4916 /* If CODE is 'd', then the X is a condition operand and the instruction
4917 should only be executed if the condition is true.
4918 if CODE is 'D', then the X is a condition operand and the instruction
4919 should only be executed if the condition is false: however, if the mode
4920 of the comparison is CCFPEmode, then always execute the instruction -- we
4921 do this because in these circumstances !GE does not necessarily imply LT;
4922 in these cases the instruction pattern will take care to make sure that
4923 an instruction containing %d will follow, thereby undoing the effects of
4924 doing this instruction unconditionally.
4925 If CODE is 'N' then X is a floating point operand that must be negated
4927 If CODE is 'B' then output a bitwise inverted value of X (a const int).
4928 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
4931 arm_print_operand (stream
, x
, code
)
4939 fputs (ASM_COMMENT_START
, stream
);
4943 fputs (REGISTER_PREFIX
, stream
);
4947 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
4948 fputs (arm_condition_codes
[arm_current_cc
], stream
);
4954 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4955 r
= REAL_VALUE_NEGATE (r
);
4956 fprintf (stream
, "%s", fp_const_from_val (&r
));
4961 if (GET_CODE (x
) == CONST_INT
)
4963 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
4968 ARM_SIGN_EXTEND (~ INTVAL (x
)));
4972 output_addr_const (stream
, x
);
4977 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
4981 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
4987 char *shift
= shift_op (x
, &val
);
4991 fprintf (stream
, ", %s ", shift_op (x
, &val
));
4993 arm_print_operand (stream
, XEXP (x
, 1), 0);
4996 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5009 fputs (REGISTER_PREFIX
, stream
);
5010 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0)], stream
);
5016 fputs (REGISTER_PREFIX
, stream
);
5017 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1)], stream
);
5021 fputs (REGISTER_PREFIX
, stream
);
5022 if (GET_CODE (XEXP (x
, 0)) == REG
)
5023 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
5025 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
5029 fprintf (stream
, "{%s%s-%s%s}", REGISTER_PREFIX
, reg_names
[REGNO (x
)],
5030 REGISTER_PREFIX
, reg_names
[REGNO (x
) - 1
5031 + ((GET_MODE_SIZE (GET_MODE (x
))
5032 + GET_MODE_SIZE (SImode
) - 1)
5033 / GET_MODE_SIZE (SImode
))]);
5038 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
5044 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
5045 (get_arm_condition_code (x
))],
5053 if (GET_CODE (x
) == REG
)
5055 fputs (REGISTER_PREFIX
, stream
);
5056 fputs (reg_names
[REGNO (x
)], stream
);
5058 else if (GET_CODE (x
) == MEM
)
5060 output_memory_reference_mode
= GET_MODE (x
);
5061 output_address (XEXP (x
, 0));
5063 else if (GET_CODE (x
) == CONST_DOUBLE
)
5064 fprintf (stream
, "#%s", fp_immediate_constant (x
));
5065 else if (GET_CODE (x
) == NEG
)
5066 abort (); /* This should never happen now. */
5069 fputc ('#', stream
);
5070 output_addr_const (stream
, x
);
5075 /* Output a label definition. */
5078 arm_asm_output_label (stream
, name
)
5082 ARM_OUTPUT_LABEL (stream
, name
);
5085 /* Output code resembling an .lcomm directive. /bin/as doesn't have this
5086 directive hence this hack, which works by reserving some `.space' in the
5087 bss segment directly.
5089 XXX This is a severe hack, which is guaranteed NOT to work since it doesn't
5090 define STATIC COMMON space but merely STATIC BSS space. */
5093 output_lcomm_directive (stream
, name
, size
, align
)
5099 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
5100 ARM_OUTPUT_LABEL (stream
, name
);
5101 fprintf (stream
, "\t.space\t%d\n", size
);
5104 /* A finite state machine takes care of noticing whether or not instructions
5105 can be conditionally executed, and thus decrease execution time and code
5106 size by deleting branch instructions. The fsm is controlled by
5107 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
5109 /* The state of the fsm controlling condition codes are:
5110 0: normal, do nothing special
5111 1: make ASM_OUTPUT_OPCODE not output this instruction
5112 2: make ASM_OUTPUT_OPCODE not output this instruction
5113 3: make instructions conditional
5114 4: make instructions conditional
5116 State transitions (state->state by whom under condition):
5117 0 -> 1 final_prescan_insn if the `target' is a label
5118 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
5119 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
5120 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
5121 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
5122 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
5123 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
5124 (the target insn is arm_target_insn).
5126 If the jump clobbers the conditions then we use states 2 and 4.
5128 A similar thing can be done with conditional return insns.
5130 XXX In case the `target' is an unconditional branch, this conditionalising
5131 of the instructions always reduces code size, but not always execution
5132 time. But then, I want to reduce the code size to somewhere near what
5133 /bin/cc produces. */
5135 /* Returns the index of the ARM condition code string in
5136 `arm_condition_codes'. COMPARISON should be an rtx like
5137 `(eq (...) (...))'. */
5139 static enum arm_cond_code
5140 get_arm_condition_code (comparison
)
5143 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
5145 register enum rtx_code comp_code
= GET_CODE (comparison
);
5147 if (GET_MODE_CLASS (mode
) != MODE_CC
)
5148 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
5149 XEXP (comparison
, 1));
5153 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
5154 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
5155 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
5156 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
5157 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
5158 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
5159 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
5160 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
5161 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
5162 case CC_DLTUmode
: code
= ARM_CC
;
5165 if (comp_code
!= EQ
&& comp_code
!= NE
)
5168 if (comp_code
== EQ
)
5169 return ARM_INVERSE_CONDITION_CODE (code
);
5175 case NE
: return ARM_NE
;
5176 case EQ
: return ARM_EQ
;
5177 case GE
: return ARM_PL
;
5178 case LT
: return ARM_MI
;
5186 case NE
: return ARM_NE
;
5187 case EQ
: return ARM_EQ
;
5194 case GE
: return ARM_GE
;
5195 case GT
: return ARM_GT
;
5196 case LE
: return ARM_LS
;
5197 case LT
: return ARM_MI
;
5204 case NE
: return ARM_NE
;
5205 case EQ
: return ARM_EQ
;
5206 case GE
: return ARM_LE
;
5207 case GT
: return ARM_LT
;
5208 case LE
: return ARM_GE
;
5209 case LT
: return ARM_GT
;
5210 case GEU
: return ARM_LS
;
5211 case GTU
: return ARM_CC
;
5212 case LEU
: return ARM_CS
;
5213 case LTU
: return ARM_HI
;
5220 case LTU
: return ARM_CS
;
5221 case GEU
: return ARM_CC
;
5228 case NE
: return ARM_NE
;
5229 case EQ
: return ARM_EQ
;
5230 case GE
: return ARM_GE
;
5231 case GT
: return ARM_GT
;
5232 case LE
: return ARM_LE
;
5233 case LT
: return ARM_LT
;
5234 case GEU
: return ARM_CS
;
5235 case GTU
: return ARM_HI
;
5236 case LEU
: return ARM_LS
;
5237 case LTU
: return ARM_CC
;
5249 final_prescan_insn (insn
, opvec
, noperands
)
5254 /* BODY will hold the body of INSN. */
5255 register rtx body
= PATTERN (insn
);
5257 /* This will be 1 if trying to repeat the trick, and things need to be
5258 reversed if it appears to fail. */
5261 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
5262 taken are clobbered, even if the rtl suggests otherwise. It also
5263 means that we have to grub around within the jump expression to find
5264 out what the conditions are when the jump isn't taken. */
5265 int jump_clobbers
= 0;
5267 /* If we start with a return insn, we only succeed if we find another one. */
5268 int seeking_return
= 0;
5270 /* START_INSN will hold the insn from where we start looking. This is the
5271 first insn after the following code_label if REVERSE is true. */
5272 rtx start_insn
= insn
;
5274 /* If in state 4, check if the target branch is reached, in order to
5275 change back to state 0. */
5276 if (arm_ccfsm_state
== 4)
5278 if (insn
== arm_target_insn
)
5280 arm_target_insn
= NULL
;
5281 arm_ccfsm_state
= 0;
5286 /* If in state 3, it is possible to repeat the trick, if this insn is an
5287 unconditional branch to a label, and immediately following this branch
5288 is the previous target label which is only used once, and the label this
5289 branch jumps to is not too far off. */
5290 if (arm_ccfsm_state
== 3)
5292 if (simplejump_p (insn
))
5294 start_insn
= next_nonnote_insn (start_insn
);
5295 if (GET_CODE (start_insn
) == BARRIER
)
5297 /* XXX Isn't this always a barrier? */
5298 start_insn
= next_nonnote_insn (start_insn
);
5300 if (GET_CODE (start_insn
) == CODE_LABEL
5301 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5302 && LABEL_NUSES (start_insn
) == 1)
5307 else if (GET_CODE (body
) == RETURN
)
5309 start_insn
= next_nonnote_insn (start_insn
);
5310 if (GET_CODE (start_insn
) == BARRIER
)
5311 start_insn
= next_nonnote_insn (start_insn
);
5312 if (GET_CODE (start_insn
) == CODE_LABEL
5313 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5314 && LABEL_NUSES (start_insn
) == 1)
5326 if (arm_ccfsm_state
!= 0 && !reverse
)
5328 if (GET_CODE (insn
) != JUMP_INSN
)
5331 /* This jump might be paralleled with a clobber of the condition codes
5332 the jump should always come first */
5333 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
5334 body
= XVECEXP (body
, 0, 0);
5337 /* If this is a conditional return then we don't want to know */
5338 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5339 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
5340 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
5341 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
5346 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5347 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
5350 int fail
= FALSE
, succeed
= FALSE
;
5351 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
5352 int then_not_else
= TRUE
;
5353 rtx this_insn
= start_insn
, label
= 0;
5355 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
5357 /* The code below is wrong for these, and I haven't time to
5358 fix it now. So we just do the safe thing and return. This
5359 whole function needs re-writing anyway. */
5364 /* Register the insn jumped to. */
5367 if (!seeking_return
)
5368 label
= XEXP (SET_SRC (body
), 0);
5370 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
5371 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
5372 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
5374 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
5375 then_not_else
= FALSE
;
5377 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
5379 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
5382 then_not_else
= FALSE
;
5387 /* See how many insns this branch skips, and what kind of insns. If all
5388 insns are okay, and the label or unconditional branch to the same
5389 label is not too far away, succeed. */
5390 for (insns_skipped
= 0;
5391 !fail
&& !succeed
&& insns_skipped
++ < MAX_INSNS_SKIPPED
;)
5395 this_insn
= next_nonnote_insn (this_insn
);
5399 scanbody
= PATTERN (this_insn
);
5401 switch (GET_CODE (this_insn
))
5404 /* Succeed if it is the target label, otherwise fail since
5405 control falls in from somewhere else. */
5406 if (this_insn
== label
)
5410 arm_ccfsm_state
= 2;
5411 this_insn
= next_nonnote_insn (this_insn
);
5414 arm_ccfsm_state
= 1;
5422 /* Succeed if the following insn is the target label.
5424 If return insns are used then the last insn in a function
5425 will be a barrier. */
5426 this_insn
= next_nonnote_insn (this_insn
);
5427 if (this_insn
&& this_insn
== label
)
5431 arm_ccfsm_state
= 2;
5432 this_insn
= next_nonnote_insn (this_insn
);
5435 arm_ccfsm_state
= 1;
5443 /* If using 32-bit addresses the cc is not preserved over
5447 /* Succeed if the following insn is the target label,
5448 or if the following two insns are a barrier and
5449 the target label. */
5450 this_insn
= next_nonnote_insn (this_insn
);
5451 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
5452 this_insn
= next_nonnote_insn (this_insn
);
5454 if (this_insn
&& this_insn
== label
5455 && insns_skipped
< MAX_INSNS_SKIPPED
)
5459 arm_ccfsm_state
= 2;
5460 this_insn
= next_nonnote_insn (this_insn
);
5463 arm_ccfsm_state
= 1;
5472 /* If this is an unconditional branch to the same label, succeed.
5473 If it is to another label, do nothing. If it is conditional,
5475 /* XXX Probably, the test for the SET and the PC are unnecessary. */
5477 if (GET_CODE (scanbody
) == SET
5478 && GET_CODE (SET_DEST (scanbody
)) == PC
)
5480 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
5481 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
5483 arm_ccfsm_state
= 2;
5486 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
5489 else if (GET_CODE (scanbody
) == RETURN
5492 arm_ccfsm_state
= 2;
5495 else if (GET_CODE (scanbody
) == PARALLEL
)
5497 switch (get_attr_conds (this_insn
))
5509 /* Instructions using or affecting the condition codes make it
5511 if ((GET_CODE (scanbody
) == SET
5512 || GET_CODE (scanbody
) == PARALLEL
)
5513 && get_attr_conds (this_insn
) != CONDS_NOCOND
)
5523 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
5524 arm_target_label
= CODE_LABEL_NUMBER (label
);
5525 else if (seeking_return
|| arm_ccfsm_state
== 2)
5527 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
5529 this_insn
= next_nonnote_insn (this_insn
);
5530 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
5531 || GET_CODE (this_insn
) == CODE_LABEL
))
5536 /* Oh, dear! we ran off the end.. give up */
5537 recog (PATTERN (insn
), insn
, NULL_PTR
);
5538 arm_ccfsm_state
= 0;
5539 arm_target_insn
= NULL
;
5542 arm_target_insn
= this_insn
;
5551 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
5553 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
5554 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5555 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
5556 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5560 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
5563 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
5567 if (reverse
|| then_not_else
)
5568 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5570 /* restore recog_operand (getting the attributes of other insns can
5571 destroy this array, but final.c assumes that it remains intact
5572 across this call; since the insn has been recognized already we
5573 call recog direct). */
5574 recog (PATTERN (insn
), insn
, NULL_PTR
);
5578 #ifdef AOF_ASSEMBLER
5579 /* Special functions only needed when producing AOF syntax assembler. */
5581 rtx aof_pic_label
= NULL_RTX
;
5584 struct pic_chain
*next
;
5588 static struct pic_chain
*aof_pic_chain
= NULL
;
5594 struct pic_chain
**chainp
;
5597 if (aof_pic_label
== NULL_RTX
)
5599 /* This needs to persist throughout the compilation. */
5600 end_temporary_allocation ();
5601 aof_pic_label
= gen_rtx (SYMBOL_REF
, Pmode
, "x$adcons");
5602 resume_temporary_allocation ();
5605 for (offset
= 0, chainp
= &aof_pic_chain
; *chainp
;
5606 offset
+= 4, chainp
= &(*chainp
)->next
)
5607 if ((*chainp
)->symname
== XSTR (x
, 0))
5608 return plus_constant (aof_pic_label
, offset
);
5610 *chainp
= (struct pic_chain
*) xmalloc (sizeof (struct pic_chain
));
5611 (*chainp
)->next
= NULL
;
5612 (*chainp
)->symname
= XSTR (x
, 0);
5613 return plus_constant (aof_pic_label
, offset
);
5617 aof_dump_pic_table (f
)
5620 struct pic_chain
*chain
;
5622 if (aof_pic_chain
== NULL
)
5625 fprintf (f
, "\tAREA |%s$$adcons|, BASED %s%s\n",
5626 reg_names
[PIC_OFFSET_TABLE_REGNUM
], REGISTER_PREFIX
,
5627 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
5628 fputs ("|x$adcons|\n", f
);
5630 for (chain
= aof_pic_chain
; chain
; chain
= chain
->next
)
5632 fputs ("\tDCD\t", f
);
5633 assemble_name (f
, chain
->symname
);
5638 int arm_text_section_count
= 1;
5643 static char buf
[100];
5644 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
5645 arm_text_section_count
++);
5647 strcat (buf
, ", PIC, REENTRANT");
5651 static int arm_data_section_count
= 1;
5656 static char buf
[100];
5657 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
5661 /* The AOF assembler is religiously strict about declarations of
5662 imported and exported symbols, so that it is impossible to declare
5663 a function as imported near the begining of the file, and then to
5664 export it later on. It is, however, possible to delay the decision
5665 until all the functions in the file have been compiled. To get
5666 around this, we maintain a list of the imports and exports, and
5667 delete from it any that are subsequently defined. At the end of
5668 compilation we spit the remainder of the list out before the END
5673 struct import
*next
;
5677 static struct import
*imports_list
= NULL
;
5680 aof_add_import (name
)
5685 for (new = imports_list
; new; new = new->next
)
5686 if (new->name
== name
)
5689 new = (struct import
*) xmalloc (sizeof (struct import
));
5690 new->next
= imports_list
;
5696 aof_delete_import (name
)
5699 struct import
**old
;
5701 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
5703 if ((*old
)->name
== name
)
5705 *old
= (*old
)->next
;
5711 int arm_main_function
= 0;
5714 aof_dump_imports (f
)
5717 /* The AOF assembler needs this to cause the startup code to be extracted
5718 from the library. Brining in __main causes the whole thing to work
5720 if (arm_main_function
)
5723 fputs ("\tIMPORT __main\n", f
);
5724 fputs ("\tDCD __main\n", f
);
5727 /* Now dump the remaining imports. */
5728 while (imports_list
)
5730 fprintf (f
, "\tIMPORT\t");
5731 assemble_name (f
, imports_list
->name
);
5733 imports_list
= imports_list
->next
;
5736 #endif /* AOF_ASSEMBLER */