1 /* Output routines for GCC for ARM/RISCiX.
2 Copyright (C) 1991, 1993, 1994, 1995 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "insn-flags.h"
36 #include "insn-attr.h"
42 /* The maximum number of insns skipped which will be conditionalised if
44 #define MAX_INSNS_SKIPPED 5
46 /* Some function declarations. */
47 extern FILE *asm_out_file
;
48 extern char *output_multi_immediate ();
49 extern void arm_increase_location ();
51 HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
52 static int get_prologue_size
PROTO ((void));
53 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
54 HOST_WIDE_INT
, rtx
, rtx
, int, int));
56 /* Define the information needed to generate branch insns. This is
57 stored from the compare operation. */
59 rtx arm_compare_op0
, arm_compare_op1
;
62 /* What type of cpu are we compiling for? */
63 enum processor_type arm_cpu
;
65 /* What type of floating point are we compiling for? */
66 enum floating_point_type arm_fpu
;
68 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
69 enum prog_mode_type arm_prgmode
;
71 char *target_cpu_name
= ARM_CPU_NAME
;
72 char *target_fpe_name
= NULL
;
74 /* Nonzero if this is an "M" variant of the processor. */
75 int arm_fast_multiply
= 0;
77 /* Nonzero if this chip support the ARM Architecture 4 extensions */
80 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
81 must report the mode of the memory reference from PRINT_OPERAND to
82 PRINT_OPERAND_ADDRESS. */
83 enum machine_mode output_memory_reference_mode
;
85 /* Nonzero if the prologue must setup `fp'. */
86 int current_function_anonymous_args
;
88 /* Location counter of .text segment. */
89 int arm_text_location
= 0;
91 /* Set to one if we think that lr is only saved because of subroutine calls,
92 but all of these can be `put after' return insns */
93 int lr_save_eliminated
;
95 /* A hash table is used to store text segment labels and their associated
96 offset from the start of the text segment. */
101 struct label_offset
*cdr
;
104 #define LABEL_HASH_SIZE 257
106 static struct label_offset
*offset_table
[LABEL_HASH_SIZE
];
108 /* Set to 1 when a return insn is output, this means that the epilogue
111 static int return_used_this_function
;
113 static int arm_constant_limit
= 3;
115 /* For an explanation of these variables, see final_prescan_insn below. */
119 int arm_target_label
;
121 /* The condition codes of the ARM, and the inverse function. */
122 char *arm_condition_codes
[] =
124 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
125 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
128 #define ARM_INVERSE_CONDITION_CODE(X) ((X) ^ 1)
131 /* Initialization code */
133 #define FL_CO_PROC 0x01 /* Has external co-processor bus */
134 #define FL_FAST_MULT 0x02 /* Fast multiply */
135 #define FL_MODE26 0x04 /* 26-bit mode support */
136 #define FL_MODE32 0x08 /* 32-bit mode support */
137 #define FL_ARCH4 0x10 /* Architecture rel 4 */
138 #define FL_THUMB 0x20 /* Thumb aware */
142 enum processor_type type
;
146 /* Not all of these give usefully different compilation alternatives,
147 but there is no simple way of generalizing them. */
148 static struct processors all_procs
[] =
150 {"arm2", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
151 {"arm250", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
152 {"arm3", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
153 {"arm6", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
154 {"arm60", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
155 {"arm600", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
156 {"arm610", PROCESSOR_ARM6
, FL_MODE32
| FL_MODE26
},
157 {"arm620", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
158 {"arm7", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
159 {"arm70", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
160 {"arm7d", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
161 {"arm7di", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
162 {"arm7dm", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
164 {"arm7dmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
166 {"arm700", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
167 {"arm700i", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
168 {"arm710", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
169 {"arm710c", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
170 {"arm7500", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
171 {"arm7tdmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
172 | FL_ARCH4
| FL_THUMB
)},
176 /* Fix up any incompatible options that the user has specified.
177 This has now turned into a maze. */
179 arm_override_options ()
181 int arm_thumb_aware
= 0;
183 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
184 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
186 if (TARGET_POKE_FUNCTION_NAME
)
187 target_flags
|= ARM_FLAG_APCS_FRAME
;
191 warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu-<proc>");
192 target_flags
|= ARM_FLAG_APCS_32
;
193 arm_cpu
= PROCESSOR_ARM6
;
198 warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu-<proc>");
199 target_flags
&= ~ARM_FLAG_APCS_32
;
200 arm_cpu
= PROCESSOR_ARM2
;
203 if ((TARGET_3
|| TARGET_6
) && target_cpu_name
!= NULL
)
204 fatal ("Incompatible mix of old and new options. -m%d and -mcpu-%s",
205 TARGET_3
? 3 : 6, target_cpu_name
);
207 if (TARGET_APCS_REENT
&& flag_pic
)
208 fatal ("-fpic and -mapcs-reent are incompatible");
210 if (TARGET_APCS_REENT
)
211 warning ("APCS reentrant code not supported. Ignored");
214 warning ("Position independent code not supported. Ignored");
216 if (TARGET_APCS_FLOAT
)
217 warning ("Passing floating point arguments in fp regs not yet supported");
219 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
221 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
222 target_flags
|= ARM_FLAG_APCS_FRAME
;
225 arm_cpu
= TARGET_6
? PROCESSOR_ARM6
: PROCESSOR_ARM2
;
228 if (target_cpu_name
!= NULL
)
230 char *c
= target_cpu_name
;
231 struct processors
*proc
;
233 /* Match against the supported types. */
234 for (proc
= all_procs
; proc
->name
!= NULL
; proc
++)
236 if (strcmp (proc
->name
, c
) == 0)
242 arm_cpu
= proc
->type
;
244 /* Default value for floating point code... if no co-processor
245 bus, then schedule for emulated floating point. Otherwise,
246 assume the user has an FPA, unless overridden with -mfpe-... */
247 if (proc
->flags
& FL_CO_PROC
== 0)
251 arm_fast_multiply
= (proc
->flags
& FL_FAST_MULT
) != 0;
252 arm_arch4
= (proc
->flags
& FL_ARCH4
) != 0;
253 arm_thumb_aware
= (proc
->flags
& FL_THUMB
) != 0;
254 /* Processors with a load delay slot can load constants faster,
255 from the pool than it takes to construct them, so reduce the
256 complexity of the constant that we will try to generate
260 fatal ("Unrecognized cpu type: %s", target_cpu_name
);
265 if (strcmp (target_fpe_name
, "2") == 0)
267 else if (strcmp (target_fpe_name
, "3") == 0)
270 fatal ("Invalid floating point emulation option: -mfpe-%s",
274 if (TARGET_THUMB_INTERWORK
&& ! arm_thumb_aware
)
276 warning ("This processor variant does not support Thumb interworking");
277 target_flags
&= ~ARM_FLAG_THUMB
;
280 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
283 /* For arm2/3 there is no need to do any scheduling if there is only
284 a floating point emulator, or we are doing software floating-point. */
285 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && arm_cpu
== PROCESSOR_ARM2
)
286 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
288 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
292 /* Return 1 if it is possible to return using a single instruction */
299 if (!reload_completed
||current_function_pretend_args_size
300 || current_function_anonymous_args
301 || (get_frame_size () && !(TARGET_APCS
|| frame_pointer_needed
)))
304 /* Can't be done if any of the FPU regs are pushed, since this also
306 for (regno
= 20; regno
< 24; regno
++)
307 if (regs_ever_live
[regno
])
313 /* Return TRUE if int I is a valid immediate ARM constant. */
319 unsigned HOST_WIDE_INT mask
= ~0xFF;
321 /* Fast return for 0 and powers of 2 */
322 if ((i
& (i
- 1)) == 0)
327 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
330 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
331 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
332 } while (mask
!= ~0xFF);
337 /* Return true if I is a valid constant for the operation CODE. */
339 const_ok_for_op (i
, code
, mode
)
342 enum machine_mode mode
;
344 if (const_ok_for_arm (i
))
350 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
352 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
358 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
365 /* Emit a sequence of insns to handle a large constant.
366 CODE is the code of the operation required, it can be any of SET, PLUS,
367 IOR, AND, XOR, MINUS;
368 MODE is the mode in which the operation is being performed;
369 VAL is the integer to operate on;
370 SOURCE is the other operand (a register, or a null-pointer for SET);
371 SUBTARGETS means it is safe to create scratch registers if that will
372 either produce a simpler sequence, or we will want to cse the values.
373 Return value is the number of insns emitted. */
376 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
378 enum machine_mode mode
;
384 if (subtargets
|| code
== SET
385 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
386 && REGNO (target
) != REGNO (source
)))
390 if (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
391 > arm_constant_limit
+ (code
!= SET
))
395 /* Currently SET is the only monadic value for CODE, all
396 the rest are diadic. */
397 emit_insn (gen_rtx (SET
, VOIDmode
, target
, GEN_INT (val
)));
402 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
404 emit_insn (gen_rtx (SET
, VOIDmode
, temp
, GEN_INT (val
)));
405 /* For MINUS, the value is subtracted from, since we never
406 have subtraction of a constant. */
408 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
409 gen_rtx (code
, mode
, temp
, source
)));
411 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
412 gen_rtx (code
, mode
, source
, temp
)));
418 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
421 /* As above, but extra parameter GENERATE which, if clear, suppresses
424 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
426 enum machine_mode mode
;
436 int can_negate_initial
= 0;
439 int num_bits_set
= 0;
440 int set_sign_bit_copies
= 0;
441 int clear_sign_bit_copies
= 0;
442 int clear_zero_bit_copies
= 0;
443 int set_zero_bit_copies
= 0;
446 unsigned HOST_WIDE_INT temp1
, temp2
;
447 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
449 /* find out which operations are safe for a given CODE. Also do a quick
450 check for degenerate cases; these can occur when DImode operations
462 can_negate_initial
= 1;
466 if (remainder
== 0xffffffff)
469 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
470 GEN_INT (ARM_SIGN_EXTEND (val
))));
475 if (reload_completed
&& rtx_equal_p (target
, source
))
478 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
487 emit_insn (gen_rtx (SET
, VOIDmode
, target
, const0_rtx
));
490 if (remainder
== 0xffffffff)
492 if (reload_completed
&& rtx_equal_p (target
, source
))
495 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
504 if (reload_completed
&& rtx_equal_p (target
, source
))
507 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
510 if (remainder
== 0xffffffff)
513 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
514 gen_rtx (NOT
, mode
, source
)));
518 /* We don't know how to handle this yet below. */
522 /* We treat MINUS as (val - source), since (source - val) is always
523 passed as (source + (-val)). */
527 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
528 gen_rtx (NEG
, mode
, source
)));
531 if (const_ok_for_arm (val
))
534 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
535 gen_rtx (MINUS
, mode
, GEN_INT (val
), source
)));
546 /* If we can do it in one insn get out quickly */
547 if (const_ok_for_arm (val
)
548 || (can_negate_initial
&& const_ok_for_arm (-val
))
549 || (can_invert
&& const_ok_for_arm (~val
)))
552 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
553 (source
? gen_rtx (code
, mode
, source
,
560 /* Calculate a few attributes that may be useful for specific
563 for (i
= 31; i
>= 0; i
--)
565 if ((remainder
& (1 << i
)) == 0)
566 clear_sign_bit_copies
++;
571 for (i
= 31; i
>= 0; i
--)
573 if ((remainder
& (1 << i
)) != 0)
574 set_sign_bit_copies
++;
579 for (i
= 0; i
<= 31; i
++)
581 if ((remainder
& (1 << i
)) == 0)
582 clear_zero_bit_copies
++;
587 for (i
= 0; i
<= 31; i
++)
589 if ((remainder
& (1 << i
)) != 0)
590 set_zero_bit_copies
++;
598 /* See if we can do this by sign_extending a constant that is known
599 to be negative. This is a good, way of doing it, since the shift
600 may well merge into a subsequent insn. */
601 if (set_sign_bit_copies
> 1)
604 (temp1
= ARM_SIGN_EXTEND (remainder
605 << (set_sign_bit_copies
- 1))))
609 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
610 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
612 emit_insn (gen_ashrsi3 (target
, new_src
,
613 GEN_INT (set_sign_bit_copies
- 1)));
617 /* For an inverted constant, we will need to set the low bits,
618 these will be shifted out of harm's way. */
619 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
620 if (const_ok_for_arm (~temp1
))
624 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
625 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
627 emit_insn (gen_ashrsi3 (target
, new_src
,
628 GEN_INT (set_sign_bit_copies
- 1)));
634 /* See if we can generate this by setting the bottom (or the top)
635 16 bits, and then shifting these into the other half of the
636 word. We only look for the simplest cases, to do more would cost
637 too much. Be careful, however, not to generate this when the
638 alternative would take fewer insns. */
639 if (val
& 0xffff0000)
641 temp1
= remainder
& 0xffff0000;
642 temp2
= remainder
& 0x0000ffff;
644 /* Overlaps outside this range are best done using other methods. */
645 for (i
= 9; i
< 24; i
++)
647 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
648 && ! const_ok_for_arm (temp2
))
650 insns
= arm_gen_constant (code
, mode
, temp2
,
651 new_src
= (subtargets
654 source
, subtargets
, generate
);
657 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
659 gen_rtx (ASHIFT
, mode
, source
,
666 /* Don't duplicate cases already considered. */
667 for (i
= 17; i
< 24; i
++)
669 if (((temp1
| (temp1
>> i
)) == remainder
)
670 && ! const_ok_for_arm (temp1
))
672 insns
= arm_gen_constant (code
, mode
, temp1
,
673 new_src
= (subtargets
676 source
, subtargets
, generate
);
679 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
681 gen_rtx (LSHIFTRT
, mode
,
682 source
, GEN_INT (i
)),
692 /* If we have IOR or XOR, and the inverse of the constant can be loaded
693 in a single instruction, and we can find a temporary to put it in,
694 then this can be done in two instructions instead of 3-4. */
696 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
698 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
702 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
704 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
705 GEN_INT (ARM_SIGN_EXTEND (~ val
))));
706 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
707 gen_rtx (code
, mode
, source
, sub
)));
716 if (set_sign_bit_copies
> 8
717 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
721 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
722 rtx shift
= GEN_INT (set_sign_bit_copies
);
724 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
726 gen_rtx (ASHIFT
, mode
, source
,
728 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
730 gen_rtx (LSHIFTRT
, mode
, sub
,
736 if (set_zero_bit_copies
> 8
737 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
741 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
742 rtx shift
= GEN_INT (set_zero_bit_copies
);
744 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
746 gen_rtx (LSHIFTRT
, mode
, source
,
748 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
750 gen_rtx (ASHIFT
, mode
, sub
,
756 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
760 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
761 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
762 gen_rtx (NOT
, mode
, source
)));
765 sub
= gen_reg_rtx (mode
);
766 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
767 gen_rtx (AND
, mode
, source
,
769 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
770 gen_rtx (NOT
, mode
, sub
)));
777 /* See if two shifts will do 2 or more insn's worth of work. */
778 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
780 HOST_WIDE_INT shift_mask
= ((0xffffffff
781 << (32 - clear_sign_bit_copies
))
786 if ((remainder
| shift_mask
) != 0xffffffff)
790 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
791 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
792 new_source
, source
, subtargets
, 1);
796 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
797 new_source
, source
, subtargets
, 0);
802 shift
= GEN_INT (clear_sign_bit_copies
);
803 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
804 emit_insn (gen_ashlsi3 (new_source
, source
, shift
));
805 emit_insn (gen_lshrsi3 (target
, new_source
, shift
));
811 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
813 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
817 if ((remainder
| shift_mask
) != 0xffffffff)
821 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
822 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
823 new_source
, source
, subtargets
, 1);
827 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
828 new_source
, source
, subtargets
, 0);
833 shift
= GEN_INT (clear_zero_bit_copies
);
834 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
835 emit_insn (gen_lshrsi3 (new_source
, source
, shift
));
836 emit_insn (gen_ashlsi3 (target
, new_source
, shift
));
848 for (i
= 0; i
< 32; i
++)
849 if (remainder
& (1 << i
))
852 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
853 remainder
= (~remainder
) & 0xffffffff;
854 else if (code
== PLUS
&& num_bits_set
> 16)
855 remainder
= (-remainder
) & 0xffffffff;
862 /* Now try and find a way of doing the job in either two or three
864 We start by looking for the largest block of zeros that are aligned on
865 a 2-bit boundary, we then fill up the temps, wrapping around to the
866 top of the word when we drop off the bottom.
867 In the worst case this code should produce no more than four insns. */
870 int best_consecutive_zeros
= 0;
872 for (i
= 0; i
< 32; i
+= 2)
874 int consecutive_zeros
= 0;
876 if (! (remainder
& (3 << i
)))
878 while ((i
< 32) && ! (remainder
& (3 << i
)))
880 consecutive_zeros
+= 2;
883 if (consecutive_zeros
> best_consecutive_zeros
)
885 best_consecutive_zeros
= consecutive_zeros
;
886 best_start
= i
- consecutive_zeros
;
892 /* Now start emitting the insns, starting with the one with the highest
893 bit set: we do this so that the smallest number will be emitted last;
894 this is more likely to be combinable with addressing insns. */
902 if (remainder
& (3 << (i
- 2)))
907 temp1
= remainder
& ((0x0ff << end
)
908 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
914 emit_insn (gen_rtx (SET
, VOIDmode
,
915 new_src
= (subtargets
918 GEN_INT (can_invert
? ~temp1
: temp1
)));
922 else if (code
== MINUS
)
925 emit_insn (gen_rtx (SET
, VOIDmode
,
926 new_src
= (subtargets
929 gen_rtx (code
, mode
, GEN_INT (temp1
),
936 emit_insn (gen_rtx (SET
, VOIDmode
,
942 gen_rtx (code
, mode
, source
,
943 GEN_INT (can_invert
? ~temp1
959 /* Handle aggregates that are not laid out in a BLKmode element.
960 This is a sub-element of RETURN_IN_MEMORY. */
962 arm_return_in_memory (type
)
965 if (TREE_CODE (type
) == RECORD_TYPE
)
969 /* For a struct, we can return in a register if every element was a
971 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
972 if (TREE_CODE (field
) != FIELD_DECL
973 || ! DECL_BIT_FIELD_TYPE (field
))
978 else if (TREE_CODE (type
) == UNION_TYPE
)
982 /* Unions can be returned in registers if every element is
983 integral, or can be returned in an integer register. */
984 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
986 if (TREE_CODE (field
) != FIELD_DECL
987 || (AGGREGATE_TYPE_P (TREE_TYPE (field
))
988 && RETURN_IN_MEMORY (TREE_TYPE (field
)))
989 || FLOAT_TYPE_P (TREE_TYPE (field
)))
994 /* XXX Not sure what should be done for other aggregates, so put them in
999 #define REG_OR_SUBREG_REG(X) \
1000 (GET_CODE (X) == REG \
1001 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1003 #define REG_OR_SUBREG_RTX(X) \
1004 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1006 #define ARM_FRAME_RTX(X) \
1007 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1008 || (X) == arg_pointer_rtx)
1011 arm_rtx_costs (x
, code
, outer_code
)
1013 enum rtx_code code
, outer_code
;
1015 enum machine_mode mode
= GET_MODE (x
);
1016 enum rtx_code subcode
;
1022 /* Memory costs quite a lot for the first word, but subsequent words
1023 load at the equivalent of a single insn each. */
1024 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1025 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1032 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1039 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1041 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1042 + ((GET_CODE (XEXP (x
, 0)) == REG
1043 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1044 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1046 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1047 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1048 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1050 + ((GET_CODE (XEXP (x
, 1)) == REG
1051 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1052 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1053 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1058 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1059 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1060 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1061 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1064 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1065 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1066 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1067 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1069 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1070 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1071 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1074 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1075 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1076 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1077 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1078 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1079 || subcode
== ROTATE
|| subcode
== ROTATERT
1081 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1082 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1083 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1084 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1085 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1086 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1087 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1092 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1093 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1094 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1095 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1096 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1100 case AND
: case XOR
: case IOR
:
1103 /* Normally the frame registers will be spilt into reg+const during
1104 reload, so it is a bad idea to combine them with other instructions,
1105 since then they might not be moved outside of loops. As a compromise
1106 we allow integration with ops that have a constant as their second
1108 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1109 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1110 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1111 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1112 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1116 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1117 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1118 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1119 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1122 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1123 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1124 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1125 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1126 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1129 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1130 return (1 + extra_cost
1131 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1132 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1133 || subcode
== ROTATE
|| subcode
== ROTATERT
1135 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1136 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1137 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0))
1138 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1139 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1140 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
1146 if (arm_fast_multiply
&& mode
== DImode
1147 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1148 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1149 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1152 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1156 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1158 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1159 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1160 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1162 int booth_unit_size
= (arm_fast_multiply
? 8 : 2);
1164 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1166 i
>>= booth_unit_size
;
1173 return ((arm_fast_multiply
? 8 : 30)
1174 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1175 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1178 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1179 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1183 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1185 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1188 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1196 return 4 + (mode
== DImode
? 4 : 0);
1199 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1200 return (4 + (mode
== DImode
? 4 : 0)
1201 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1204 switch (GET_MODE (XEXP (x
, 0)))
1207 return (1 + (mode
== DImode
? 4 : 0)
1208 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1211 return (4 + (mode
== DImode
? 4 : 0)
1212 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1215 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1224 /* This code has been fixed for cross compilation. */
1226 static int fpa_consts_inited
= 0;
1228 char *strings_fpa
[8] = {
1230 "4", "5", "0.5", "10"
1233 static REAL_VALUE_TYPE values_fpa
[8];
1241 for (i
= 0; i
< 8; i
++)
1243 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1247 fpa_consts_inited
= 1;
1250 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1253 const_double_rtx_ok_for_fpu (x
)
1259 if (!fpa_consts_inited
)
1262 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1263 if (REAL_VALUE_MINUS_ZERO (r
))
1266 for (i
= 0; i
< 8; i
++)
1267 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1273 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1276 neg_const_double_rtx_ok_for_fpu (x
)
1282 if (!fpa_consts_inited
)
1285 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1286 r
= REAL_VALUE_NEGATE (r
);
1287 if (REAL_VALUE_MINUS_ZERO (r
))
1290 for (i
= 0; i
< 8; i
++)
1291 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1297 /* Predicates for `match_operand' and `match_operator'. */
1299 /* s_register_operand is the same as register_operand, but it doesn't accept
1300 (SUBREG (MEM)...). */
1303 s_register_operand (op
, mode
)
1305 enum machine_mode mode
;
1307 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1310 if (GET_CODE (op
) == SUBREG
)
1311 op
= SUBREG_REG (op
);
1313 /* We don't consider registers whose class is NO_REGS
1314 to be a register operand. */
1315 return (GET_CODE (op
) == REG
1316 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1317 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1320 /* Only accept reg, subreg(reg), const_int. */
1323 reg_or_int_operand (op
, mode
)
1325 enum machine_mode mode
;
1327 if (GET_CODE (op
) == CONST_INT
)
1330 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1333 if (GET_CODE (op
) == SUBREG
)
1334 op
= SUBREG_REG (op
);
1336 /* We don't consider registers whose class is NO_REGS
1337 to be a register operand. */
1338 return (GET_CODE (op
) == REG
1339 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1340 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1343 /* Return 1 if OP is an item in memory, given that we are in reload. */
1346 reload_memory_operand (op
, mode
)
1348 enum machine_mode mode
;
1350 int regno
= true_regnum (op
);
1352 return (! CONSTANT_P (op
)
1354 || (GET_CODE (op
) == REG
1355 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
1358 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
1361 arm_rhs_operand (op
, mode
)
1363 enum machine_mode mode
;
1365 return (s_register_operand (op
, mode
)
1366 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
1369 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
1373 arm_rhsm_operand (op
, mode
)
1375 enum machine_mode mode
;
1377 return (s_register_operand (op
, mode
)
1378 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
1379 || memory_operand (op
, mode
));
1382 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
1383 constant that is valid when negated. */
1386 arm_add_operand (op
, mode
)
1388 enum machine_mode mode
;
1390 return (s_register_operand (op
, mode
)
1391 || (GET_CODE (op
) == CONST_INT
1392 && (const_ok_for_arm (INTVAL (op
))
1393 || const_ok_for_arm (-INTVAL (op
)))));
1397 arm_not_operand (op
, mode
)
1399 enum machine_mode mode
;
1401 return (s_register_operand (op
, mode
)
1402 || (GET_CODE (op
) == CONST_INT
1403 && (const_ok_for_arm (INTVAL (op
))
1404 || const_ok_for_arm (~INTVAL (op
)))));
1407 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
1410 fpu_rhs_operand (op
, mode
)
1412 enum machine_mode mode
;
1414 if (s_register_operand (op
, mode
))
1416 else if (GET_CODE (op
) == CONST_DOUBLE
)
1417 return (const_double_rtx_ok_for_fpu (op
));
1423 fpu_add_operand (op
, mode
)
1425 enum machine_mode mode
;
1427 if (s_register_operand (op
, mode
))
1429 else if (GET_CODE (op
) == CONST_DOUBLE
)
1430 return (const_double_rtx_ok_for_fpu (op
)
1431 || neg_const_double_rtx_ok_for_fpu (op
));
1436 /* Return nonzero if OP is a constant power of two. */
1439 power_of_two_operand (op
, mode
)
1441 enum machine_mode mode
;
1443 if (GET_CODE (op
) == CONST_INT
)
1445 HOST_WIDE_INT value
= INTVAL(op
);
1446 return value
!= 0 && (value
& (value
- 1)) == 0;
1451 /* Return TRUE for a valid operand of a DImode operation.
1452 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1453 Note that this disallows MEM(REG+REG), but allows
1454 MEM(PRE/POST_INC/DEC(REG)). */
1457 di_operand (op
, mode
)
1459 enum machine_mode mode
;
1461 if (s_register_operand (op
, mode
))
1464 switch (GET_CODE (op
))
1471 return memory_address_p (DImode
, XEXP (op
, 0));
1478 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
1479 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1480 Note that this disallows MEM(REG+REG), but allows
1481 MEM(PRE/POST_INC/DEC(REG)). */
1484 soft_df_operand (op
, mode
)
1486 enum machine_mode mode
;
1488 if (s_register_operand (op
, mode
))
1491 switch (GET_CODE (op
))
1497 return memory_address_p (DFmode
, XEXP (op
, 0));
1504 /* Return TRUE for valid index operands. */
1507 index_operand (op
, mode
)
1509 enum machine_mode mode
;
1511 return (s_register_operand(op
, mode
)
1512 || (immediate_operand (op
, mode
)
1513 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
1516 /* Return TRUE for valid shifts by a constant. This also accepts any
1517 power of two on the (somewhat overly relaxed) assumption that the
1518 shift operator in this case was a mult. */
1521 const_shift_operand (op
, mode
)
1523 enum machine_mode mode
;
1525 return (power_of_two_operand (op
, mode
)
1526 || (immediate_operand (op
, mode
)
1527 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
1530 /* Return TRUE for arithmetic operators which can be combined with a multiply
1534 shiftable_operator (x
, mode
)
1536 enum machine_mode mode
;
1538 if (GET_MODE (x
) != mode
)
1542 enum rtx_code code
= GET_CODE (x
);
1544 return (code
== PLUS
|| code
== MINUS
1545 || code
== IOR
|| code
== XOR
|| code
== AND
);
1549 /* Return TRUE for shift operators. */
1552 shift_operator (x
, mode
)
1554 enum machine_mode mode
;
1556 if (GET_MODE (x
) != mode
)
1560 enum rtx_code code
= GET_CODE (x
);
1563 return power_of_two_operand (XEXP (x
, 1));
1565 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
1566 || code
== ROTATERT
);
1570 int equality_operator (x
, mode
)
1572 enum machine_mode mode
;
1574 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
1577 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
1580 minmax_operator (x
, mode
)
1582 enum machine_mode mode
;
1584 enum rtx_code code
= GET_CODE (x
);
1586 if (GET_MODE (x
) != mode
)
1589 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
1592 /* return TRUE if x is EQ or NE */
1594 /* Return TRUE if this is the condition code register, if we aren't given
1595 a mode, accept any class CCmode register */
1598 cc_register (x
, mode
)
1600 enum machine_mode mode
;
1602 if (mode
== VOIDmode
)
1604 mode
= GET_MODE (x
);
1605 if (GET_MODE_CLASS (mode
) != MODE_CC
)
1609 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1615 /* Return TRUE if this is the condition code register, if we aren't given
1616 a mode, accept any mode in class CC_MODE that is reversible */
1619 reversible_cc_register (x
, mode
)
1621 enum machine_mode mode
;
1623 if (mode
== VOIDmode
)
1625 mode
= GET_MODE (x
);
1626 if (GET_MODE_CLASS (mode
) != MODE_CC
1627 && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1629 if (GET_MODE_CLASS (mode
) != MODE_CC
1630 || (! flag_fast_math
&& ! REVERSIBLE_CC_MODE (mode
)))
1634 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1640 /* Return TRUE if X references a SYMBOL_REF. */
1642 symbol_mentioned_p (x
)
1648 if (GET_CODE (x
) == SYMBOL_REF
)
1651 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1652 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1658 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1659 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
1662 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
1669 /* Return TRUE if X references a LABEL_REF. */
1671 label_mentioned_p (x
)
1677 if (GET_CODE (x
) == LABEL_REF
)
1680 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1681 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1687 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1688 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
1691 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
1702 enum rtx_code code
= GET_CODE (x
);
1706 else if (code
== SMIN
)
1708 else if (code
== UMIN
)
1710 else if (code
== UMAX
)
1716 /* Return 1 if memory locations are adjacent */
1719 adjacent_mem_locations (a
, b
)
1722 int val0
= 0, val1
= 0;
1725 if ((GET_CODE (XEXP (a
, 0)) == REG
1726 || (GET_CODE (XEXP (a
, 0)) == PLUS
1727 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
1728 && (GET_CODE (XEXP (b
, 0)) == REG
1729 || (GET_CODE (XEXP (b
, 0)) == PLUS
1730 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
1732 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
1734 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
1735 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
1738 reg0
= REGNO (XEXP (a
, 0));
1739 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
1741 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
1742 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
1745 reg1
= REGNO (XEXP (b
, 0));
1746 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
1751 /* Return 1 if OP is a load multiple operation. It is known to be
1752 parallel and the first section will be tested. */
1755 load_multiple_operation (op
, mode
)
1757 enum machine_mode mode
;
1759 HOST_WIDE_INT count
= XVECLEN (op
, 0);
1762 HOST_WIDE_INT i
= 1, base
= 0;
1766 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
1769 /* Check to see if this might be a write-back */
1770 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
1775 /* Now check it more carefully */
1776 if (GET_CODE (SET_DEST (elt
)) != REG
1777 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
1778 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
1779 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
1780 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
1781 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
1782 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
1783 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
1784 != REGNO (SET_DEST (elt
)))
1790 /* Perform a quick check so we don't blow up below. */
1792 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
1793 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
1794 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
1797 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
1798 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
1800 for (; i
< count
; i
++)
1802 rtx elt
= XVECEXP (op
, 0, i
);
1804 if (GET_CODE (elt
) != SET
1805 || GET_CODE (SET_DEST (elt
)) != REG
1806 || GET_MODE (SET_DEST (elt
)) != SImode
1807 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
1808 || GET_CODE (SET_SRC (elt
)) != MEM
1809 || GET_MODE (SET_SRC (elt
)) != SImode
1810 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
1811 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
1812 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
1813 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
1820 /* Return 1 if OP is a store multiple operation. It is known to be
1821 parallel and the first section will be tested. */
1824 store_multiple_operation (op
, mode
)
1826 enum machine_mode mode
;
1828 HOST_WIDE_INT count
= XVECLEN (op
, 0);
1831 HOST_WIDE_INT i
= 1, base
= 0;
1835 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
1838 /* Check to see if this might be a write-back */
1839 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
1844 /* Now check it more carefully */
1845 if (GET_CODE (SET_DEST (elt
)) != REG
1846 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
1847 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
1848 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
1849 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
1850 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
1851 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
1852 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
1853 != REGNO (SET_DEST (elt
)))
1859 /* Perform a quick check so we don't blow up below. */
1861 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
1862 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
1863 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
1866 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
1867 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
1869 for (; i
< count
; i
++)
1871 elt
= XVECEXP (op
, 0, i
);
1873 if (GET_CODE (elt
) != SET
1874 || GET_CODE (SET_SRC (elt
)) != REG
1875 || GET_MODE (SET_SRC (elt
)) != SImode
1876 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
1877 || GET_CODE (SET_DEST (elt
)) != MEM
1878 || GET_MODE (SET_DEST (elt
)) != SImode
1879 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
1880 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
1881 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
1882 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
1890 multi_register_push (op
, mode
)
1892 enum machine_mode mode
;
1894 if (GET_CODE (op
) != PARALLEL
1895 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
1896 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
1897 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
1904 /* Routines for use with attributes */
1907 const_pool_offset (symbol
)
1910 return get_pool_offset (symbol
) - get_pool_size () - get_prologue_size ();
1913 /* Routines for use in generating RTL */
1916 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
)
1925 int sign
= up
? 1 : -1;
1927 result
= gen_rtx (PARALLEL
, VOIDmode
,
1928 rtvec_alloc (count
+ (write_back
? 2 : 0)));
1931 XVECEXP (result
, 0, 0)
1932 = gen_rtx (SET
, GET_MODE (from
), from
,
1933 plus_constant (from
, count
* 4 * sign
));
1938 for (j
= 0; i
< count
; i
++, j
++)
1940 XVECEXP (result
, 0, i
)
1941 = gen_rtx (SET
, VOIDmode
, gen_rtx (REG
, SImode
, base_regno
+ j
),
1942 gen_rtx (MEM
, SImode
,
1943 plus_constant (from
, j
* 4 * sign
)));
1947 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, from
);
1953 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
)
1962 int sign
= up
? 1 : -1;
1964 result
= gen_rtx (PARALLEL
, VOIDmode
,
1965 rtvec_alloc (count
+ (write_back
? 2 : 0)));
1968 XVECEXP (result
, 0, 0)
1969 = gen_rtx (SET
, GET_MODE (to
), to
,
1970 plus_constant (to
, count
* 4 * sign
));
1975 for (j
= 0; i
< count
; i
++, j
++)
1977 XVECEXP (result
, 0, i
)
1978 = gen_rtx (SET
, VOIDmode
,
1979 gen_rtx (MEM
, SImode
, plus_constant (to
, j
* 4 * sign
)),
1980 gen_rtx (REG
, SImode
, base_regno
+ j
));
1984 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, to
);
1990 arm_gen_movstrqi (operands
)
1993 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
1996 rtx st_src
, st_dst
, end_src
, end_dst
, fin_src
, fin_dst
;
1997 rtx part_bytes_reg
= NULL
;
1998 extern int optimize
;
2000 if (GET_CODE (operands
[2]) != CONST_INT
2001 || GET_CODE (operands
[3]) != CONST_INT
2002 || INTVAL (operands
[2]) > 64
2003 || INTVAL (operands
[3]) & 3)
2006 st_dst
= XEXP (operands
[0], 0);
2007 st_src
= XEXP (operands
[1], 0);
2008 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
2009 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
2011 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
2012 out_words_to_go
= INTVAL (operands
[2]) / 4;
2013 last_bytes
= INTVAL (operands
[2]) & 3;
2015 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
2016 part_bytes_reg
= gen_rtx (REG
, SImode
, (in_words_to_go
- 1) & 3);
2018 for (i
= 0; in_words_to_go
>= 2; i
+=4)
2020 emit_insn (arm_gen_load_multiple (0, (in_words_to_go
> 4
2021 ? 4 : in_words_to_go
),
2023 if (out_words_to_go
)
2025 if (out_words_to_go
!= 1)
2026 emit_insn (arm_gen_store_multiple (0, (out_words_to_go
> 4
2027 ? 4 : out_words_to_go
),
2031 emit_move_insn (gen_rtx (MEM
, SImode
, dst
),
2032 gen_rtx (REG
, SImode
, 0));
2033 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
2037 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
2038 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
2041 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
2042 if (out_words_to_go
)
2046 emit_move_insn (sreg
= gen_reg_rtx (SImode
), gen_rtx (MEM
, SImode
, src
));
2047 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
2048 emit_move_insn (gen_rtx (MEM
, SImode
, dst
), sreg
);
2049 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
2052 if (in_words_to_go
) /* Sanity check */
2058 if (in_words_to_go
< 0)
2061 part_bytes_reg
= copy_to_mode_reg (SImode
, gen_rtx (MEM
, SImode
, src
));
2062 emit_insn (gen_addsi3 (src
, src
, GEN_INT (4)));
2065 if (BYTES_BIG_ENDIAN
&& last_bytes
)
2067 rtx tmp
= gen_reg_rtx (SImode
);
2069 if (part_bytes_reg
== NULL
)
2072 /* The bytes we want are in the top end of the word */
2073 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
2074 GEN_INT (8 * (4 - last_bytes
))));
2075 part_bytes_reg
= tmp
;
2079 emit_move_insn (gen_rtx (MEM
, QImode
,
2080 plus_constant (dst
, last_bytes
- 1)),
2081 gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
2084 tmp
= gen_reg_rtx (SImode
);
2085 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
2086 part_bytes_reg
= tmp
;
2095 if (part_bytes_reg
== NULL
)
2098 emit_move_insn (gen_rtx (MEM
, QImode
, dst
),
2099 gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
2100 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
2103 rtx tmp
= gen_reg_rtx (SImode
);
2104 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
2105 part_bytes_reg
= tmp
;
2113 /* X and Y are two things to compare using CODE. Emit the compare insn and
2114 return the rtx for register 0 in the proper mode. FP means this is a
2115 floating point compare: I don't think that it is needed on the arm. */
2118 gen_compare_reg (code
, x
, y
, fp
)
2122 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
2123 rtx cc_reg
= gen_rtx (REG
, mode
, 24);
2125 emit_insn (gen_rtx (SET
, VOIDmode
, cc_reg
,
2126 gen_rtx (COMPARE
, mode
, x
, y
)));
2132 arm_reload_in_hi (operands
)
2135 rtx base
= find_replacement (&XEXP (operands
[1], 0));
2137 emit_insn (gen_zero_extendqisi2 (operands
[2], gen_rtx (MEM
, QImode
, base
)));
2138 emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG
, SImode
, operands
[0], 0),
2139 gen_rtx (MEM
, QImode
,
2140 plus_constant (base
, 1))));
2141 if (BYTES_BIG_ENDIAN
)
2142 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
2144 gen_rtx (IOR
, SImode
,
2145 gen_rtx (ASHIFT
, SImode
,
2146 gen_rtx (SUBREG
, SImode
,
2151 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
2153 gen_rtx (IOR
, SImode
,
2154 gen_rtx (ASHIFT
, SImode
,
2157 gen_rtx (SUBREG
, SImode
, operands
[0], 0))));
2161 arm_reload_out_hi (operands
)
2164 rtx base
= find_replacement (&XEXP (operands
[0], 0));
2166 if (BYTES_BIG_ENDIAN
)
2168 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
2169 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
2170 emit_insn (gen_lshrsi3 (operands
[2],
2171 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
2173 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
2174 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
2178 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
2179 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
2180 emit_insn (gen_lshrsi3 (operands
[2],
2181 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
2183 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
2184 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
2188 /* Check to see if a branch is forwards or backwards. Return TRUE if it
2192 arm_backwards_branch (from
, to
)
2195 return insn_addresses
[to
] <= insn_addresses
[from
];
2198 /* Check to see if a branch is within the distance that can be done using
2199 an arithmetic expression. */
2201 short_branch (from
, to
)
2204 int delta
= insn_addresses
[from
] + 8 - insn_addresses
[to
];
2206 return abs (delta
) < 980; /* A small margin for safety */
2209 /* Check to see that the insn isn't the target of the conditionalizing
2212 arm_insn_not_targeted (insn
)
2215 return insn
!= arm_target_insn
;
2219 /* Routines for manipulation of the constant pool. */
2220 /* This is unashamedly hacked from the version in sh.c, since the problem is
2221 extremely similar. */
2223 /* Arm instructions cannot load a large constant into a register,
2224 constants have to come from a pc relative load. The reference of a pc
2225 relative load instruction must be less than 1k infront of the instruction.
2226 This means that we often have to dump a constant inside a function, and
2227 generate code to branch around it.
2229 It is important to minimize this, since the branches will slow things
2230 down and make things bigger.
2232 Worst case code looks like:
2248 We fix this by performing a scan before scheduling, which notices which
2249 instructions need to have their operands fetched from the constant table
2250 and builds the table.
2255 scan, find an instruction which needs a pcrel move. Look forward, find th
2256 last barrier which is within MAX_COUNT bytes of the requirement.
2257 If there isn't one, make one. Process all the instructions between
2258 the find and the barrier.
2260 In the above example, we can tell that L3 is within 1k of L1, so
2261 the first move can be shrunk from the 2 insn+constant sequence into
2262 just 1 insn, and the constant moved to L3 to make:
2273 Then the second move becomes the target for the shortening process.
2279 rtx value
; /* Value in table */
2280 HOST_WIDE_INT next_offset
;
2281 enum machine_mode mode
; /* Mode of value */
2284 /* The maximum number of constants that can fit into one pool, since
2285 the pc relative range is 0...1020 bytes and constants are at least 4
2288 #define MAX_POOL_SIZE (1020/4)
2289 static pool_node pool_vector
[MAX_POOL_SIZE
];
2290 static int pool_size
;
2291 static rtx pool_vector_label
;
2293 /* Add a constant to the pool and return its label. */
2294 static HOST_WIDE_INT
2295 add_constant (x
, mode
)
2297 enum machine_mode mode
;
2301 HOST_WIDE_INT offset
;
2303 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
2304 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
2305 x
= get_pool_constant (XEXP (x
, 0));
2306 #ifndef AOF_ASSEMBLER
2307 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
2308 x
= XVECEXP (x
, 0, 0);
2311 /* First see if we've already got it */
2312 for (i
= 0; i
< pool_size
; i
++)
2314 if (GET_CODE (x
) == pool_vector
[i
].value
->code
2315 && mode
== pool_vector
[i
].mode
)
2317 if (GET_CODE (x
) == CODE_LABEL
)
2319 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
2322 if (rtx_equal_p (x
, pool_vector
[i
].value
))
2323 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
2327 /* Need a new one */
2328 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
2331 pool_vector_label
= gen_label_rtx ();
2333 pool_vector
[pool_size
].next_offset
2334 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
2336 pool_vector
[pool_size
].value
= x
;
2337 pool_vector
[pool_size
].mode
= mode
;
2342 /* Output the literal table */
2349 scan
= emit_label_after (gen_label_rtx (), scan
);
2350 scan
= emit_insn_after (gen_align_4 (), scan
);
2351 scan
= emit_label_after (pool_vector_label
, scan
);
2353 for (i
= 0; i
< pool_size
; i
++)
2355 pool_node
*p
= pool_vector
+ i
;
2357 switch (GET_MODE_SIZE (p
->mode
))
2360 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
2364 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
2373 scan
= emit_insn_after (gen_consttable_end (), scan
);
2374 scan
= emit_barrier_after (scan
);
2378 /* Non zero if the src operand needs to be fixed up */
2380 fixit (src
, mode
, destreg
)
2382 enum machine_mode mode
;
2385 if (CONSTANT_P (src
))
2387 if (GET_CODE (src
) == CONST_INT
)
2388 return (! const_ok_for_arm (INTVAL (src
))
2389 && ! const_ok_for_arm (~INTVAL (src
)));
2390 if (GET_CODE (src
) == CONST_DOUBLE
)
2391 return (GET_MODE (src
) == VOIDmode
2393 || (! const_double_rtx_ok_for_fpu (src
)
2394 && ! neg_const_double_rtx_ok_for_fpu (src
)));
2395 return symbol_mentioned_p (src
);
2397 #ifndef AOF_ASSEMBLER
2398 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
2402 return (mode
== SImode
&& GET_CODE (src
) == MEM
2403 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
2404 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
2407 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
2409 find_barrier (from
, max_count
)
2414 rtx found_barrier
= 0;
2416 while (from
&& count
< max_count
)
2418 if (GET_CODE (from
) == BARRIER
)
2419 found_barrier
= from
;
2421 /* Count the length of this insn */
2422 if (GET_CODE (from
) == INSN
2423 && GET_CODE (PATTERN (from
)) == SET
2424 && CONSTANT_P (SET_SRC (PATTERN (from
)))
2425 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
2427 rtx src
= SET_SRC (PATTERN (from
));
2431 count
+= get_attr_length (from
);
2433 from
= NEXT_INSN (from
);
2438 /* We didn't find a barrier in time to
2439 dump our stuff, so we'll make one */
2440 rtx label
= gen_label_rtx ();
2443 from
= PREV_INSN (from
);
2445 from
= get_last_insn ();
2447 /* Walk back to be just before any jump */
2448 while (GET_CODE (from
) == JUMP_INSN
2449 || GET_CODE (from
) == NOTE
2450 || GET_CODE (from
) == CODE_LABEL
)
2451 from
= PREV_INSN (from
);
2453 from
= emit_jump_insn_after (gen_jump (label
), from
);
2454 JUMP_LABEL (from
) = label
;
2455 found_barrier
= emit_barrier_after (from
);
2456 emit_label_after (label
, found_barrier
);
2457 return found_barrier
;
2460 return found_barrier
;
2463 /* Non zero if the insn is a move instruction which needs to be fixed. */
2468 if (!INSN_DELETED_P (insn
)
2469 && GET_CODE (insn
) == INSN
2470 && GET_CODE (PATTERN (insn
)) == SET
)
2472 rtx pat
= PATTERN (insn
);
2473 rtx src
= SET_SRC (pat
);
2474 rtx dst
= SET_DEST (pat
);
2476 enum machine_mode mode
= GET_MODE (dst
);
2480 if (GET_CODE (dst
) == REG
)
2481 destreg
= REGNO (dst
);
2482 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
2483 destreg
= REGNO (SUBREG_REG (dst
));
2485 return fixit (src
, mode
, destreg
);
2499 /* The ldr instruction can work with up to a 4k offset, and most constants
2500 will be loaded with one of these instructions; however, the adr
2501 instruction and the ldf instructions only work with a 1k offset. This
2502 code needs to be rewritten to use the 4k offset when possible, and to
2503 adjust when a 1k offset is needed. For now we just use a 1k offset
2507 /* Floating point operands can't work further than 1024 bytes from the
2508 PC, so to make things simple we restrict all loads for such functions.
2510 if (TARGET_HARD_FLOAT
)
2511 for (regno
= 16; regno
< 24; regno
++)
2512 if (regs_ever_live
[regno
])
2521 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
2523 if (broken_move (insn
))
2525 /* This is a broken move instruction, scan ahead looking for
2526 a barrier to stick the constant table behind */
2528 rtx barrier
= find_barrier (insn
, count_size
);
2530 /* Now find all the moves between the points and modify them */
2531 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
2533 if (broken_move (scan
))
2535 /* This is a broken move instruction, add it to the pool */
2536 rtx pat
= PATTERN (scan
);
2537 rtx src
= SET_SRC (pat
);
2538 rtx dst
= SET_DEST (pat
);
2539 enum machine_mode mode
= GET_MODE (dst
);
2540 HOST_WIDE_INT offset
;
2546 /* If this is an HImode constant load, convert it into
2547 an SImode constant load. Since the register is always
2548 32 bits this is safe. We have to do this, since the
2549 load pc-relative instruction only does a 32-bit load. */
2553 if (GET_CODE (dst
) != REG
)
2555 PUT_MODE (dst
, SImode
);
2558 offset
= add_constant (src
, mode
);
2559 addr
= plus_constant (gen_rtx (LABEL_REF
, VOIDmode
,
2563 /* For wide moves to integer regs we need to split the
2564 address calculation off into a separate insn, so that
2565 the load can then be done with a load-multiple. This is
2566 safe, since we have already noted the length of such
2567 insns to be 8, and we are immediately over-writing the
2568 scratch we have grabbed with the final result. */
2569 if (GET_MODE_SIZE (mode
) > 4
2570 && (scratch
= REGNO (dst
)) < 16)
2572 rtx reg
= gen_rtx (REG
, SImode
, scratch
);
2573 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
2578 newsrc
= gen_rtx (MEM
, mode
, addr
);
2580 /* Build a jump insn wrapper around the move instead
2581 of an ordinary insn, because we want to have room for
2582 the target label rtx in fld[7], which an ordinary
2583 insn doesn't have. */
2584 newinsn
= emit_jump_insn_after (gen_rtx (SET
, VOIDmode
,
2587 JUMP_LABEL (newinsn
) = pool_vector_label
;
2589 /* But it's still an ordinary insn */
2590 PUT_CODE (newinsn
, INSN
);
2597 dump_table (barrier
);
2604 /* Routines to output assembly language. */
2606 /* If the rtx is the correct value then return the string of the number.
2607 In this way we can ensure that valid double constants are generated even
2608 when cross compiling. */
2610 fp_immediate_constant (x
)
2616 if (!fpa_consts_inited
)
2619 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
2620 for (i
= 0; i
< 8; i
++)
2621 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
2622 return strings_fpa
[i
];
2627 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
2629 fp_const_from_val (r
)
2634 if (! fpa_consts_inited
)
2637 for (i
= 0; i
< 8; i
++)
2638 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
2639 return strings_fpa
[i
];
2644 /* Output the operands of a LDM/STM instruction to STREAM.
2645 MASK is the ARM register set mask of which only bits 0-15 are important.
2646 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
2647 must follow the register list. */
2650 print_multi_reg (stream
, instr
, mask
, hat
)
2656 int not_first
= FALSE
;
2658 fputc ('\t', stream
);
2659 fprintf (stream
, instr
, REGISTER_PREFIX
);
2660 fputs (", {", stream
);
2661 for (i
= 0; i
< 16; i
++)
2662 if (mask
& (1 << i
))
2665 fprintf (stream
, ", ");
2666 fprintf (stream
, "%s%s", REGISTER_PREFIX
, reg_names
[i
]);
2670 fprintf (stream
, "}%s\n", hat
? "^" : "");
2673 /* Output a 'call' insn. */
2676 output_call (operands
)
2679 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
2681 if (REGNO (operands
[0]) == 14)
2683 operands
[0] = gen_rtx (REG
, SImode
, 12);
2684 output_asm_insn ("mov%?\t%0, %|lr", operands
);
2686 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
2687 output_asm_insn ("mov%?\t%|pc, %0", operands
);
2695 int something_changed
= 0;
2697 int code
= GET_CODE (x0
);
2704 if (REGNO (x0
) == 14)
2706 *x
= gen_rtx (REG
, SImode
, 12);
2711 /* Scan through the sub-elements and change any references there */
2712 fmt
= GET_RTX_FORMAT (code
);
2713 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2715 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
2716 else if (fmt
[i
] == 'E')
2717 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
2718 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
2719 return something_changed
;
2723 /* Output a 'call' insn that is a reference in memory. */
2726 output_call_mem (operands
)
2729 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
2730 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
2732 if (eliminate_lr2ip (&operands
[0]))
2733 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
2735 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
2736 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
2741 /* Output a move from arm registers to an fpu registers.
2742 OPERANDS[0] is an fpu register.
2743 OPERANDS[1] is the first registers of an arm register pair. */
2746 output_mov_long_double_fpu_from_arm (operands
)
2749 int arm_reg0
= REGNO (operands
[1]);
2755 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
2756 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
2757 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
2759 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
2760 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
2764 /* Output a move from an fpu register to arm registers.
2765 OPERANDS[0] is the first registers of an arm register pair.
2766 OPERANDS[1] is an fpu register. */
2769 output_mov_long_double_arm_from_fpu (operands
)
2772 int arm_reg0
= REGNO (operands
[0]);
2778 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
2779 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
2780 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
2782 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
2783 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
2787 /* Output a move from arm registers to arm registers of a long double
2788 OPERANDS[0] is the destination.
2789 OPERANDS[1] is the source. */
2791 output_mov_long_double_arm_from_arm (operands
)
2794 /* We have to be careful here because the two might overlap */
2795 int dest_start
= REGNO (operands
[0]);
2796 int src_start
= REGNO (operands
[1]);
2800 if (dest_start
< src_start
)
2802 for (i
= 0; i
< 3; i
++)
2804 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
2805 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
2806 output_asm_insn ("mov%?\t%0, %1", ops
);
2811 for (i
= 2; i
>= 0; i
--)
2813 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
2814 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
2815 output_asm_insn ("mov%?\t%0, %1", ops
);
2823 /* Output a move from arm registers to an fpu registers.
2824 OPERANDS[0] is an fpu register.
2825 OPERANDS[1] is the first registers of an arm register pair. */
2828 output_mov_double_fpu_from_arm (operands
)
2831 int arm_reg0
= REGNO (operands
[1]);
2836 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
2837 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
2838 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
2839 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
2843 /* Output a move from an fpu register to arm registers.
2844 OPERANDS[0] is the first registers of an arm register pair.
2845 OPERANDS[1] is an fpu register. */
2848 output_mov_double_arm_from_fpu (operands
)
2851 int arm_reg0
= REGNO (operands
[0]);
2857 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
2858 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
2859 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
2860 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
2864 /* Output a move between double words.
2865 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
2866 or MEM<-REG and all MEMs must be offsettable addresses. */
2869 output_move_double (operands
)
2872 enum rtx_code code0
= GET_CODE (operands
[0]);
2873 enum rtx_code code1
= GET_CODE (operands
[1]);
2878 int reg0
= REGNO (operands
[0]);
2880 otherops
[0] = gen_rtx (REG
, SImode
, 1 + reg0
);
2883 int reg1
= REGNO (operands
[1]);
2887 otherops
[1] = gen_rtx (REG
, SImode
, 1 + reg1
);
2889 /* Ensure the second source is not overwritten */
2890 if (reg0
== 1 + reg1
)
2892 output_asm_insn("mov%?\t%0, %1", otherops
);
2893 output_asm_insn("mov%?\t%0, %1", operands
);
2897 output_asm_insn("mov%?\t%0, %1", operands
);
2898 output_asm_insn("mov%?\t%0, %1", otherops
);
2901 else if (code1
== CONST_DOUBLE
)
2903 otherops
[1] = gen_rtx (CONST_INT
, VOIDmode
,
2904 CONST_DOUBLE_HIGH (operands
[1]));
2905 operands
[1] = gen_rtx (CONST_INT
, VOIDmode
,
2906 CONST_DOUBLE_LOW (operands
[1]));
2907 output_mov_immediate (operands
, FALSE
, "");
2908 output_mov_immediate (otherops
, FALSE
, "");
2910 else if (code1
== CONST_INT
)
2912 otherops
[1] = const0_rtx
;
2913 /* sign extend the intval into the high-order word */
2914 /* Note: output_mov_immediate may clobber operands[1], so we
2915 put this out first */
2916 if (INTVAL (operands
[1]) < 0)
2917 output_asm_insn ("mvn%?\t%0, %1", otherops
);
2919 output_asm_insn ("mov%?\t%0, %1", otherops
);
2920 output_mov_immediate (operands
, FALSE
, "");
2922 else if (code1
== MEM
)
2924 switch (GET_CODE (XEXP (operands
[1], 0)))
2927 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
2931 abort (); /* Should never happen now */
2935 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
2939 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
2943 abort (); /* Should never happen now */
2948 output_asm_insn ("adr%?\t%0, %1", operands
);
2949 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
2953 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1)))
2955 otherops
[0] = operands
[0];
2956 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
2957 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
2958 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
2960 if (GET_CODE (otherops
[2]) == CONST_INT
)
2962 switch (INTVAL (otherops
[2]))
2965 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
2968 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
2971 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
2974 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
2975 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
2977 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
2980 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
2983 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
2984 return "ldm%?ia\t%0, %M0";
2988 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
2989 /* Take care of overlapping base/data reg. */
2990 if (reg_mentioned_p (operands
[0], operands
[1]))
2992 output_asm_insn ("ldr%?\t%0, %1", otherops
);
2993 output_asm_insn ("ldr%?\t%0, %1", operands
);
2997 output_asm_insn ("ldr%?\t%0, %1", operands
);
2998 output_asm_insn ("ldr%?\t%0, %1", otherops
);
3004 abort(); /* Constraints should prevent this */
3006 else if (code0
== MEM
&& code1
== REG
)
3008 if (REGNO (operands
[1]) == 12)
3011 switch (GET_CODE (XEXP (operands
[0], 0)))
3014 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
3018 abort (); /* Should never happen now */
3022 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
3026 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
3030 abort (); /* Should never happen now */
3034 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
3036 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
3039 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
3043 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
3047 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
3054 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
3055 otherops
[1] = gen_rtx (REG
, SImode
, 1 + REGNO (operands
[1]));
3056 output_asm_insn ("str%?\t%1, %0", operands
);
3057 output_asm_insn ("str%?\t%1, %0", otherops
);
3061 abort(); /* Constraints should prevent this */
3067 /* Output an arbitrary MOV reg, #n.
3068 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
3071 output_mov_immediate (operands
)
3074 HOST_WIDE_INT n
= INTVAL (operands
[1]);
3078 /* Try to use one MOV */
3079 if (const_ok_for_arm (n
))
3081 output_asm_insn ("mov%?\t%0, %1", operands
);
3085 /* Try to use one MVN */
3086 if (const_ok_for_arm (~n
))
3088 operands
[1] = GEN_INT (~n
);
3089 output_asm_insn ("mvn%?\t%0, %1", operands
);
3093 /* If all else fails, make it out of ORRs or BICs as appropriate. */
3095 for (i
=0; i
< 32; i
++)
3099 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
3100 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
3103 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
3110 /* Output an ADD r, s, #n where n may be too big for one instruction. If
3111 adding zero to one register, output nothing. */
3114 output_add_immediate (operands
)
3117 HOST_WIDE_INT n
= INTVAL (operands
[2]);
3119 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
3122 output_multi_immediate (operands
,
3123 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
3126 output_multi_immediate (operands
,
3127 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
3134 /* Output a multiple immediate operation.
3135 OPERANDS is the vector of operands referred to in the output patterns.
3136 INSTR1 is the output pattern to use for the first constant.
3137 INSTR2 is the output pattern to use for subsequent constants.
3138 IMMED_OP is the index of the constant slot in OPERANDS.
3139 N is the constant value. */
3142 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
3144 char *instr1
, *instr2
;
3148 #if HOST_BITS_PER_WIDE_INT > 32
3154 operands
[immed_op
] = const0_rtx
;
3155 output_asm_insn (instr1
, operands
); /* Quick and easy output */
3160 char *instr
= instr1
;
3162 /* Note that n is never zero here (which would give no output) */
3163 for (i
= 0; i
< 32; i
+= 2)
3167 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
3168 output_asm_insn (instr
, operands
);
3178 /* Return the appropriate ARM instruction for the operation code.
3179 The returned result should not be overwritten. OP is the rtx of the
3180 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
3184 arithmetic_instr (op
, shift_first_arg
)
3186 int shift_first_arg
;
3188 switch (GET_CODE (op
))
3194 return shift_first_arg
? "rsb" : "sub";
3211 /* Ensure valid constant shifts and return the appropriate shift mnemonic
3212 for the operation code. The returned result should not be overwritten.
3213 OP is the rtx code of the shift.
3214 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
3218 shift_op (op
, amountp
)
3220 HOST_WIDE_INT
*amountp
;
3223 enum rtx_code code
= GET_CODE (op
);
3225 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
3227 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
3228 *amountp
= INTVAL (XEXP (op
, 1));
3251 /* We never have to worry about the amount being other than a
3252 power of 2, since this case can never be reloaded from a reg. */
3254 *amountp
= int_log2 (*amountp
);
3265 /* This is not 100% correct, but follows from the desire to merge
3266 multiplication by a power of 2 with the recognizer for a
3267 shift. >=32 is not a valid shift for "asl", so we must try and
3268 output a shift that produces the correct arithmetical result.
3269 Using lsr #32 is identical except for the fact that the carry bit
3270 is not set correctly if we set the flags; but we never use the
3271 carry bit from such an operation, so we can ignore that. */
3272 if (code
== ROTATERT
)
3273 *amountp
&= 31; /* Rotate is just modulo 32 */
3274 else if (*amountp
!= (*amountp
& 31))
3281 /* Shifts of 0 are no-ops. */
3290 /* Obtain the shift from the POWER of two. */
3294 HOST_WIDE_INT power
;
3296 HOST_WIDE_INT shift
= 0;
3298 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
3308 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
3309 /bin/as is horribly restrictive. */
3312 output_ascii_pseudo_op (stream
, p
, len
)
3318 int len_so_far
= 1000;
3319 int chars_so_far
= 0;
3321 for (i
= 0; i
< len
; i
++)
3323 register int c
= p
[i
];
3325 if (len_so_far
> 50)
3328 fputs ("\"\n", stream
);
3329 fputs ("\t.ascii\t\"", stream
);
3331 arm_increase_location (chars_so_far
);
3335 if (c
== '\"' || c
== '\\')
3341 if (c
>= ' ' && c
< 0177)
3348 fprintf (stream
, "\\%03o", c
);
3355 fputs ("\"\n", stream
);
3356 arm_increase_location (chars_so_far
);
3360 /* Try to determine whether a pattern really clobbers the link register.
3361 This information is useful when peepholing, so that lr need not be pushed
3362 if we combine a call followed by a return.
3363 NOTE: This code does not check for side-effect expressions in a SET_SRC:
3364 such a check should not be needed because these only update an existing
3365 value within a register; the register must still be set elsewhere within
3369 pattern_really_clobbers_lr (x
)
3374 switch (GET_CODE (x
))
3377 switch (GET_CODE (SET_DEST (x
)))
3380 return REGNO (SET_DEST (x
)) == 14;
3383 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
3384 return REGNO (XEXP (SET_DEST (x
), 0)) == 14;
3386 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
3395 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
3396 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
3401 switch (GET_CODE (XEXP (x
, 0)))
3404 return REGNO (XEXP (x
, 0)) == 14;
3407 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
3408 return REGNO (XEXP (XEXP (x
, 0), 0)) == 14;
3424 function_really_clobbers_lr (first
)
3429 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
3431 switch (GET_CODE (insn
))
3436 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
3441 if (pattern_really_clobbers_lr (PATTERN (insn
)))
3446 /* Don't yet know how to handle those calls that are not to a
3448 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
3451 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
3454 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
3460 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
3466 default: /* Don't recognize it, be safe */
3470 /* A call can be made (by peepholing) not to clobber lr iff it is
3471 followed by a return. There may, however, be a use insn iff
3472 we are returning the result of the call.
3473 If we run off the end of the insn chain, then that means the
3474 call was at the end of the function. Unfortunately we don't
3475 have a return insn for the peephole to recognize, so we
3476 must reject this. (Can this be fixed by adding our own insn?) */
3477 if ((next
= next_nonnote_insn (insn
)) == NULL
)
3480 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
3481 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
3482 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
3483 == REGNO (XEXP (PATTERN (next
), 0))))
3484 if ((next
= next_nonnote_insn (next
)) == NULL
)
3487 if (GET_CODE (next
) == JUMP_INSN
3488 && GET_CODE (PATTERN (next
)) == RETURN
)
3497 /* We have reached the end of the chain so lr was _not_ clobbered */
3502 output_return_instruction (operand
, really_return
)
3507 int reg
, live_regs
= 0;
3508 int volatile_func
= (optimize
> 0
3509 && TREE_THIS_VOLATILE (current_function_decl
));
3511 return_used_this_function
= 1;
3516 /* If this function was declared non-returning, and we have found a tail
3517 call, then we have to trust that the called function won't return. */
3518 if (! really_return
)
3521 /* Otherwise, trap an attempted return by aborting. */
3523 ops
[1] = gen_rtx (SYMBOL_REF
, Pmode
, "abort");
3524 assemble_external_libcall (ops
[1]);
3525 output_asm_insn ("bl%d0\t%a1", ops
);
3529 if (current_function_calls_alloca
&& ! really_return
)
3532 for (reg
= 0; reg
<= 10; reg
++)
3533 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3536 if (live_regs
|| (regs_ever_live
[14] && ! lr_save_eliminated
))
3539 if (frame_pointer_needed
)
3544 if (lr_save_eliminated
|| ! regs_ever_live
[14])
3547 if (frame_pointer_needed
)
3548 strcpy (instr
, "ldm%?%d0ea\t%|fp, {");
3550 strcpy (instr
, "ldm%?%d0fd\t%|sp!, {");
3552 for (reg
= 0; reg
<= 10; reg
++)
3553 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3555 strcat (instr
, "%|");
3556 strcat (instr
, reg_names
[reg
]);
3558 strcat (instr
, ", ");
3561 if (frame_pointer_needed
)
3563 strcat (instr
, "%|");
3564 strcat (instr
, reg_names
[11]);
3565 strcat (instr
, ", ");
3566 strcat (instr
, "%|");
3567 strcat (instr
, reg_names
[13]);
3568 strcat (instr
, ", ");
3569 strcat (instr
, "%|");
3570 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
3574 strcat (instr
, "%|");
3575 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
3577 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
3578 output_asm_insn (instr
, &operand
);
3580 else if (really_return
)
3582 strcpy (instr
, (TARGET_APCS_32
3583 ? "mov%?%d0\t%|pc, %|lr" : "mov%?%d0s\t%|pc, %|lr"));
3584 output_asm_insn (instr
, &operand
);
3590 /* Return nonzero if optimizing and the current function is volatile.
3591 Such functions never return, and many memory cycles can be saved
3592 by not storing register values that will never be needed again.
3593 This optimization was added to speed up context switching in a
3594 kernel application. */
3597 arm_volatile_func ()
3599 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
3602 /* Return the size of the prologue. It's not too bad if we slightly
3606 get_prologue_size ()
3608 return profile_flag
? 12 : 0;
3611 /* The amount of stack adjustment that happens here, in output_return and in
3612 output_epilogue must be exactly the same as was calculated during reload,
3613 or things will point to the wrong place. The only time we can safely
3614 ignore this constraint is when a function has no arguments on the stack,
3615 no stack frame requirement and no live registers execpt for `lr'. If we
3616 can guarantee that by making all function calls into tail calls and that
3617 lr is not clobbered in any other way, then there is no need to push lr
3621 output_func_prologue (f
, frame_size
)
3625 int reg
, live_regs_mask
= 0;
3627 int volatile_func
= (optimize
> 0
3628 && TREE_THIS_VOLATILE (current_function_decl
));
3630 /* Nonzero if we must stuff some register arguments onto the stack as if
3631 they were passed there. */
3632 int store_arg_regs
= 0;
3634 if (arm_ccfsm_state
|| arm_target_insn
)
3635 abort (); /* Sanity check */
3637 return_used_this_function
= 0;
3638 lr_save_eliminated
= 0;
3640 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
3641 ASM_COMMENT_START
, current_function_args_size
,
3642 current_function_pretend_args_size
, frame_size
);
3643 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
3644 ASM_COMMENT_START
, frame_pointer_needed
,
3645 current_function_anonymous_args
);
3648 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
3650 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
3653 for (reg
= 0; reg
<= 10; reg
++)
3654 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3655 live_regs_mask
|= (1 << reg
);
3657 if (frame_pointer_needed
)
3658 live_regs_mask
|= 0xD800;
3659 else if (regs_ever_live
[14])
3661 if (! current_function_args_size
3662 && ! function_really_clobbers_lr (get_insns ()))
3663 lr_save_eliminated
= 1;
3665 live_regs_mask
|= 0x4000;
3670 /* if a di mode load/store multiple is used, and the base register
3671 is r3, then r4 can become an ever live register without lr
3672 doing so, in this case we need to push lr as well, or we
3673 will fail to get a proper return. */
3675 live_regs_mask
|= 0x4000;
3676 lr_save_eliminated
= 0;
3680 if (lr_save_eliminated
)
3681 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
3687 output_func_epilogue (f
, frame_size
)
3691 int reg
, live_regs_mask
= 0, code_size
= 0;
3692 /* If we need this then it will always be at lesat this much */
3693 int floats_offset
= 24;
3695 int volatile_func
= (optimize
> 0
3696 && TREE_THIS_VOLATILE (current_function_decl
));
3698 if (use_return_insn() && return_used_this_function
)
3700 if (frame_size
&& !(frame_pointer_needed
|| TARGET_APCS
))
3707 /* A volatile function should never return. Call abort. */
3710 rtx op
= gen_rtx (SYMBOL_REF
, Pmode
, "abort");
3711 assemble_external_libcall (op
);
3712 output_asm_insn ("bl\t%a0", &op
);
3717 for (reg
= 0; reg
<= 10; reg
++)
3718 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3720 live_regs_mask
|= (1 << reg
);
3724 if (frame_pointer_needed
)
3726 for (reg
= 23; reg
> 15; reg
--)
3727 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3729 fprintf (f
, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX
,
3730 reg_names
[reg
], REGISTER_PREFIX
, floats_offset
);
3731 floats_offset
+= 12;
3735 live_regs_mask
|= 0xA800;
3736 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
3737 TARGET_APCS_32
? FALSE
: TRUE
);
3742 /* Restore stack pointer if necessary. */
3745 operands
[0] = operands
[1] = stack_pointer_rtx
;
3746 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
, frame_size
);
3747 output_add_immediate (operands
);
3750 for (reg
= 16; reg
< 24; reg
++)
3751 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3753 fprintf (f
, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX
,
3754 reg_names
[reg
], REGISTER_PREFIX
);
3757 if (current_function_pretend_args_size
== 0 && regs_ever_live
[14])
3759 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
3760 TARGET_APCS_32
? FALSE
: TRUE
);
3765 if (live_regs_mask
|| regs_ever_live
[14])
3767 live_regs_mask
|= 0x4000;
3768 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
3771 if (current_function_pretend_args_size
)
3773 operands
[0] = operands
[1] = stack_pointer_rtx
;
3774 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
,
3775 current_function_pretend_args_size
);
3776 output_add_immediate (operands
);
3778 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
3779 : "\tmovs\t%spc, %slr\n"),
3780 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
3787 /* insn_addresses isn't allocated when not optimizing */
3790 arm_increase_location (code_size
3791 + insn_addresses
[INSN_UID (get_last_insn ())]
3792 + get_prologue_size ());
3794 current_function_anonymous_args
= 0;
3798 emit_multi_reg_push (mask
)
3805 for (i
= 0; i
< 16; i
++)
3806 if (mask
& (1 << i
))
3809 if (num_regs
== 0 || num_regs
> 16)
3812 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (num_regs
));
3814 for (i
= 0; i
< 16; i
++)
3816 if (mask
& (1 << i
))
3819 = gen_rtx (SET
, VOIDmode
, gen_rtx (MEM
, BLKmode
,
3820 gen_rtx (PRE_DEC
, BLKmode
,
3821 stack_pointer_rtx
)),
3822 gen_rtx (UNSPEC
, BLKmode
,
3823 gen_rtvec (1, gen_rtx (REG
, SImode
, i
)),
3829 for (j
= 1, i
++; j
< num_regs
; i
++)
3831 if (mask
& (1 << i
))
3834 = gen_rtx (USE
, VOIDmode
, gen_rtx (REG
, SImode
, i
));
3842 arm_expand_prologue ()
3845 rtx amount
= GEN_INT (- get_frame_size ());
3848 int live_regs_mask
= 0;
3849 int store_arg_regs
= 0;
3850 int volatile_func
= (optimize
> 0
3851 && TREE_THIS_VOLATILE (current_function_decl
));
3853 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
3856 if (! volatile_func
)
3857 for (reg
= 0; reg
<= 10; reg
++)
3858 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3859 live_regs_mask
|= 1 << reg
;
3861 if (! volatile_func
&& regs_ever_live
[14])
3862 live_regs_mask
|= 0x4000;
3864 if (frame_pointer_needed
)
3866 live_regs_mask
|= 0xD800;
3867 emit_insn (gen_movsi (gen_rtx (REG
, SImode
, 12),
3868 stack_pointer_rtx
));
3871 if (current_function_pretend_args_size
)
3874 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
3877 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
3878 GEN_INT (-current_function_pretend_args_size
)));
3883 /* If we have to push any regs, then we must push lr as well, or
3884 we won't get a proper return. */
3885 live_regs_mask
|= 0x4000;
3886 emit_multi_reg_push (live_regs_mask
);
3889 /* For now the integer regs are still pushed in output_func_epilogue (). */
3891 if (! volatile_func
)
3892 for (reg
= 23; reg
> 15; reg
--)
3893 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3894 emit_insn (gen_rtx (SET
, VOIDmode
,
3895 gen_rtx (MEM
, XFmode
,
3896 gen_rtx (PRE_DEC
, XFmode
,
3897 stack_pointer_rtx
)),
3898 gen_rtx (REG
, XFmode
, reg
)));
3900 if (frame_pointer_needed
)
3901 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx (REG
, SImode
, 12),
3903 (-(4 + current_function_pretend_args_size
)))));
3905 if (amount
!= const0_rtx
)
3907 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
3908 emit_insn (gen_rtx (CLOBBER
, VOIDmode
,
3909 gen_rtx (MEM
, BLKmode
, stack_pointer_rtx
)));
3912 /* If we are profiling, make sure no instructions are scheduled before
3913 the call to mcount. */
3914 if (profile_flag
|| profile_block_flag
)
3915 emit_insn (gen_blockage ());
3919 /* If CODE is 'd', then the X is a condition operand and the instruction
3920 should only be executed if the condition is true.
3921 if CODE is 'D', then the X is a condition operand and the instruction
3922 should only be executed if the condition is false: however, if the mode
3923 of the comparison is CCFPEmode, then always execute the instruction -- we
3924 do this because in these circumstances !GE does not necessarily imply LT;
3925 in these cases the instruction pattern will take care to make sure that
3926 an instruction containing %d will follow, thereby undoing the effects of
3927 doing this instruction unconditionally.
3928 If CODE is 'N' then X is a floating point operand that must be negated
3930 If CODE is 'B' then output a bitwise inverted value of X (a const int).
3931 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
3934 arm_print_operand (stream
, x
, code
)
3942 fputs (ASM_COMMENT_START
, stream
);
3946 fputs (REGISTER_PREFIX
, stream
);
3950 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
3951 fputs (arm_condition_codes
[arm_current_cc
], stream
);
3957 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3958 r
= REAL_VALUE_NEGATE (r
);
3959 fprintf (stream
, "%s", fp_const_from_val (&r
));
3964 if (GET_CODE (x
) == CONST_INT
)
3966 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
3971 ARM_SIGN_EXTEND (~ INTVAL (x
)));
3975 output_addr_const (stream
, x
);
3980 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
3984 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
3990 char *shift
= shift_op (x
, &val
);
3994 fprintf (stream
, ", %s ", shift_op (x
, &val
));
3996 arm_print_operand (stream
, XEXP (x
, 1), 0);
3999 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
4012 fputs (REGISTER_PREFIX
, stream
);
4013 fputs (reg_names
[REGNO (x
) + 1], stream
);
4017 fputs (REGISTER_PREFIX
, stream
);
4018 if (GET_CODE (XEXP (x
, 0)) == REG
)
4019 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
4021 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
4025 fprintf (stream
, "{%s%s-%s%s}", REGISTER_PREFIX
, reg_names
[REGNO (x
)],
4026 REGISTER_PREFIX
, reg_names
[REGNO (x
) - 1
4027 + ((GET_MODE_SIZE (GET_MODE (x
))
4028 + GET_MODE_SIZE (SImode
) - 1)
4029 / GET_MODE_SIZE (SImode
))]);
4034 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
4039 if (x
&& (flag_fast_math
4040 || GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
4041 || (GET_MODE (XEXP (x
, 0)) != CCFPEmode
4042 && (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0)))
4044 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
4045 (get_arm_condition_code (x
))],
4053 if (GET_CODE (x
) == REG
)
4055 fputs (REGISTER_PREFIX
, stream
);
4056 fputs (reg_names
[REGNO (x
)], stream
);
4058 else if (GET_CODE (x
) == MEM
)
4060 output_memory_reference_mode
= GET_MODE (x
);
4061 output_address (XEXP (x
, 0));
4063 else if (GET_CODE (x
) == CONST_DOUBLE
)
4064 fprintf (stream
, "#%s", fp_immediate_constant (x
));
4065 else if (GET_CODE (x
) == NEG
)
4066 abort (); /* This should never happen now. */
4069 fputc ('#', stream
);
4070 output_addr_const (stream
, x
);
4075 /* Increase the `arm_text_location' by AMOUNT if we're in the text
4079 arm_increase_location (amount
)
4082 if (in_text_section ())
4083 arm_text_location
+= amount
;
4087 /* Output a label definition. If this label is within the .text segment, it
4088 is stored in OFFSET_TABLE, to be used when building `llc' instructions.
4089 Maybe GCC remembers names not starting with a `*' for a long time, but this
4090 is a minority anyway, so we just make a copy. Do not store the leading `*'
4091 if the name starts with one. */
4094 arm_asm_output_label (stream
, name
)
4098 char *real_name
, *s
;
4099 struct label_offset
*cur
;
4102 ARM_OUTPUT_LABEL (stream
, name
);
4103 if (! in_text_section ())
4108 real_name
= xmalloc (1 + strlen (&name
[1]));
4109 strcpy (real_name
, &name
[1]);
4113 real_name
= xmalloc (2 + strlen (name
));
4114 strcpy (real_name
, USER_LABEL_PREFIX
);
4115 strcat (real_name
, name
);
4117 for (s
= real_name
; *s
; s
++)
4120 hash
= hash
% LABEL_HASH_SIZE
;
4121 cur
= (struct label_offset
*) xmalloc (sizeof (struct label_offset
));
4122 cur
->name
= real_name
;
4123 cur
->offset
= arm_text_location
;
4124 cur
->cdr
= offset_table
[hash
];
4125 offset_table
[hash
] = cur
;
4128 /* Output code resembling an .lcomm directive. /bin/as doesn't have this
4129 directive hence this hack, which works by reserving some `.space' in the
4130 bss segment directly.
4132 XXX This is a severe hack, which is guaranteed NOT to work since it doesn't
4133 define STATIC COMMON space but merely STATIC BSS space. */
4136 output_lcomm_directive (stream
, name
, size
, rounded
)
4141 fprintf (stream
, "\n\t.bss\t%s .lcomm\n", ASM_COMMENT_START
);
4142 assemble_name (stream
, name
);
4143 fprintf (stream
, ":\t.space\t%d\n", rounded
);
4144 if (in_text_section ())
4145 fputs ("\n\t.text\n", stream
);
4147 fputs ("\n\t.data\n", stream
);
4150 /* A finite state machine takes care of noticing whether or not instructions
4151 can be conditionally executed, and thus decrease execution time and code
4152 size by deleting branch instructions. The fsm is controlled by
4153 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
4155 /* The state of the fsm controlling condition codes are:
4156 0: normal, do nothing special
4157 1: make ASM_OUTPUT_OPCODE not output this instruction
4158 2: make ASM_OUTPUT_OPCODE not output this instruction
4159 3: make instructions conditional
4160 4: make instructions conditional
4162 State transitions (state->state by whom under condition):
4163 0 -> 1 final_prescan_insn if the `target' is a label
4164 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
4165 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
4166 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
4167 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
4168 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
4169 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
4170 (the target insn is arm_target_insn).
4172 If the jump clobbers the conditions then we use states 2 and 4.
4174 A similar thing can be done with conditional return insns.
4176 XXX In case the `target' is an unconditional branch, this conditionalising
4177 of the instructions always reduces code size, but not always execution
4178 time. But then, I want to reduce the code size to somewhere near what
4179 /bin/cc produces. */
4181 /* Returns the index of the ARM condition code string in
4182 `arm_condition_codes'. COMPARISON should be an rtx like
4183 `(eq (...) (...))'. */
4186 get_arm_condition_code (comparison
)
4189 switch (GET_CODE (comparison
))
4191 case NE
: return (1);
4192 case EQ
: return (0);
4193 case GE
: return (10);
4194 case GT
: return (12);
4195 case LE
: return (13);
4196 case LT
: return (11);
4197 case GEU
: return (2);
4198 case GTU
: return (8);
4199 case LEU
: return (9);
4200 case LTU
: return (3);
4209 final_prescan_insn (insn
, opvec
, noperands
)
4214 /* BODY will hold the body of INSN. */
4215 register rtx body
= PATTERN (insn
);
4217 /* This will be 1 if trying to repeat the trick, and things need to be
4218 reversed if it appears to fail. */
4221 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
4222 taken are clobbered, even if the rtl suggests otherwise. It also
4223 means that we have to grub around within the jump expression to find
4224 out what the conditions are when the jump isn't taken. */
4225 int jump_clobbers
= 0;
4227 /* If we start with a return insn, we only succeed if we find another one. */
4228 int seeking_return
= 0;
4230 /* START_INSN will hold the insn from where we start looking. This is the
4231 first insn after the following code_label if REVERSE is true. */
4232 rtx start_insn
= insn
;
4234 /* If in state 4, check if the target branch is reached, in order to
4235 change back to state 0. */
4236 if (arm_ccfsm_state
== 4)
4238 if (insn
== arm_target_insn
)
4240 arm_target_insn
= NULL
;
4241 arm_ccfsm_state
= 0;
4246 /* If in state 3, it is possible to repeat the trick, if this insn is an
4247 unconditional branch to a label, and immediately following this branch
4248 is the previous target label which is only used once, and the label this
4249 branch jumps to is not too far off. */
4250 if (arm_ccfsm_state
== 3)
4252 if (simplejump_p (insn
))
4254 start_insn
= next_nonnote_insn (start_insn
);
4255 if (GET_CODE (start_insn
) == BARRIER
)
4257 /* XXX Isn't this always a barrier? */
4258 start_insn
= next_nonnote_insn (start_insn
);
4260 if (GET_CODE (start_insn
) == CODE_LABEL
4261 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
4262 && LABEL_NUSES (start_insn
) == 1)
4267 else if (GET_CODE (body
) == RETURN
)
4269 start_insn
= next_nonnote_insn (start_insn
);
4270 if (GET_CODE (start_insn
) == BARRIER
)
4271 start_insn
= next_nonnote_insn (start_insn
);
4272 if (GET_CODE (start_insn
) == CODE_LABEL
4273 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
4274 && LABEL_NUSES (start_insn
) == 1)
4286 if (arm_ccfsm_state
!= 0 && !reverse
)
4288 if (GET_CODE (insn
) != JUMP_INSN
)
4291 /* This jump might be paralleled with a clobber of the condition codes
4292 the jump should always come first */
4293 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
4294 body
= XVECEXP (body
, 0, 0);
4297 /* If this is a conditional return then we don't want to know */
4298 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
4299 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
4300 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
4301 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
4306 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
4307 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
4309 int insns_skipped
= 0, fail
= FALSE
, succeed
= FALSE
;
4310 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
4311 int then_not_else
= TRUE
;
4312 rtx this_insn
= start_insn
, label
= 0;
4314 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
4316 /* The code below is wrong for these, and I haven't time to
4317 fix it now. So we just do the safe thing and return. This
4318 whole function needs re-writing anyway. */
4323 /* Register the insn jumped to. */
4326 if (!seeking_return
)
4327 label
= XEXP (SET_SRC (body
), 0);
4329 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
4330 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
4331 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
4333 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
4334 then_not_else
= FALSE
;
4336 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
4338 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
4341 then_not_else
= FALSE
;
4346 /* See how many insns this branch skips, and what kind of insns. If all
4347 insns are okay, and the label or unconditional branch to the same
4348 label is not too far away, succeed. */
4349 for (insns_skipped
= 0;
4350 !fail
&& !succeed
&& insns_skipped
< MAX_INSNS_SKIPPED
;
4355 this_insn
= next_nonnote_insn (this_insn
);
4359 scanbody
= PATTERN (this_insn
);
4361 switch (GET_CODE (this_insn
))
4364 /* Succeed if it is the target label, otherwise fail since
4365 control falls in from somewhere else. */
4366 if (this_insn
== label
)
4370 arm_ccfsm_state
= 2;
4371 this_insn
= next_nonnote_insn (this_insn
);
4374 arm_ccfsm_state
= 1;
4382 /* Succeed if the following insn is the target label.
4384 If return insns are used then the last insn in a function
4385 will be a barrier. */
4386 this_insn
= next_nonnote_insn (this_insn
);
4387 if (this_insn
&& this_insn
== label
)
4391 arm_ccfsm_state
= 2;
4392 this_insn
= next_nonnote_insn (this_insn
);
4395 arm_ccfsm_state
= 1;
4403 /* If using 32-bit addresses the cc is not preserved over
4410 /* If this is an unconditional branch to the same label, succeed.
4411 If it is to another label, do nothing. If it is conditional,
4413 /* XXX Probably, the test for the SET and the PC are unnecessary. */
4415 if (GET_CODE (scanbody
) == SET
4416 && GET_CODE (SET_DEST (scanbody
)) == PC
)
4418 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
4419 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
4421 arm_ccfsm_state
= 2;
4424 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
4427 else if (GET_CODE (scanbody
) == RETURN
4430 arm_ccfsm_state
= 2;
4433 else if (GET_CODE (scanbody
) == PARALLEL
)
4435 switch (get_attr_conds (this_insn
))
4447 /* Instructions using or affecting the condition codes make it
4449 if ((GET_CODE (scanbody
) == SET
4450 || GET_CODE (scanbody
) == PARALLEL
)
4451 && get_attr_conds (this_insn
) != CONDS_NOCOND
)
4461 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
4462 arm_target_label
= CODE_LABEL_NUMBER (label
);
4463 else if (seeking_return
|| arm_ccfsm_state
== 2)
4465 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
4467 this_insn
= next_nonnote_insn (this_insn
);
4468 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
4469 || GET_CODE (this_insn
) == CODE_LABEL
))
4474 /* Oh, dear! we ran off the end.. give up */
4475 recog (PATTERN (insn
), insn
, NULL_PTR
);
4476 arm_ccfsm_state
= 0;
4477 arm_target_insn
= NULL
;
4480 arm_target_insn
= this_insn
;
4489 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
4491 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
4492 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
4493 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
4494 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
4498 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
4501 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
4505 if (reverse
|| then_not_else
)
4506 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
4508 /* restore recog_operand (getting the attributes of other insns can
4509 destroy this array, but final.c assumes that it remains intact
4510 across this call; since the insn has been recognized already we
4511 call recog direct). */
4512 recog (PATTERN (insn
), insn
, NULL_PTR
);
4516 #ifdef AOF_ASSEMBLER
4517 /* Special functions only needed when producing AOF syntax assembler. */
4519 int arm_text_section_count
= 1;
4522 aof_text_section (in_readonly
)
4525 static char buf
[100];
4528 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
4529 arm_text_section_count
++);
4531 strcat (buf
, ", PIC, REENTRANT");
4535 static int arm_data_section_count
= 1;
4540 static char buf
[100];
4541 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
4545 /* The AOF assembler is religiously strict about declarations of
4546 imported and exported symbols, so that it is impossible to declare
4547 a function as imported near the begining of the file, and then to
4548 export it later on. It is, however, possible to delay the decision
4549 until all the functions in the file have been compiled. To get
4550 around this, we maintain a list of the imports and exports, and
4551 delete from it any that are subsequently defined. At the end of
4552 compilation we spit the remainder of the list out before the END
4557 struct import
*next
;
4561 static struct import
*imports_list
= NULL
;
4564 aof_add_import (name
)
4569 for (new = imports_list
; new; new = new->next
)
4570 if (new->name
== name
)
4573 new = (struct import
*) xmalloc (sizeof (struct import
));
4574 new->next
= imports_list
;
4580 aof_delete_import (name
)
4583 struct import
**old
;
4585 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
4587 if ((*old
)->name
== name
)
4589 *old
= (*old
)->next
;
4595 int arm_main_function
= 0;
4598 aof_dump_imports (f
)
4601 /* The AOF assembler needs this to cause the startup code to be extracted
4602 from the library. Brining in __main causes the whole thing to work
4604 if (arm_main_function
)
4607 fputs ("\tIMPORT __main\n", f
);
4608 fputs ("\tDCD __main\n", f
);
4611 /* Now dump the remaining imports. */
4612 while (imports_list
)
4614 fprintf (f
, "\tIMPORT\t");
4615 assemble_name (f
, imports_list
->name
);
4617 imports_list
= imports_list
->next
;
4620 #endif /* AOF_ASSEMBLER */