1 /* Output routines for GCC for ARM/RISCiX.
2 Copyright (C) 1991, 1993, 1994, 1995 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "insn-flags.h"
36 #include "insn-attr.h"
42 /* The maximum number of insns skipped which will be conditionalised if
44 #define MAX_INSNS_SKIPPED 5
46 /* Some function declarations. */
47 extern FILE *asm_out_file
;
48 extern char *output_multi_immediate ();
49 extern void arm_increase_location ();
51 HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
52 static int get_prologue_size
PROTO ((void));
53 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
54 HOST_WIDE_INT
, rtx
, rtx
, int, int));
56 /* Define the information needed to generate branch insns. This is
57 stored from the compare operation. */
59 rtx arm_compare_op0
, arm_compare_op1
;
62 /* What type of cpu are we compiling for? */
63 enum processor_type arm_cpu
;
65 /* What type of floating point are we compiling for? */
66 enum floating_point_type arm_fpu
;
68 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
69 enum prog_mode_type arm_prgmode
;
71 char *target_cpu_name
= ARM_CPU_NAME
;
72 char *target_fpe_name
= NULL
;
74 /* Nonzero if this is an "M" variant of the processor. */
75 int arm_fast_multiply
= 0;
77 /* Nonzero if this chip support the ARM Architecture 4 extensions */
80 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
81 must report the mode of the memory reference from PRINT_OPERAND to
82 PRINT_OPERAND_ADDRESS. */
83 enum machine_mode output_memory_reference_mode
;
85 /* Nonzero if the prologue must setup `fp'. */
86 int current_function_anonymous_args
;
88 /* Location counter of .text segment. */
89 int arm_text_location
= 0;
91 /* Set to one if we think that lr is only saved because of subroutine calls,
92 but all of these can be `put after' return insns */
93 int lr_save_eliminated
;
95 /* A hash table is used to store text segment labels and their associated
96 offset from the start of the text segment. */
101 struct label_offset
*cdr
;
104 #define LABEL_HASH_SIZE 257
106 static struct label_offset
*offset_table
[LABEL_HASH_SIZE
];
108 /* Set to 1 when a return insn is output, this means that the epilogue
111 static int return_used_this_function
;
113 static int arm_constant_limit
= 3;
115 /* For an explanation of these variables, see final_prescan_insn below. */
119 int arm_target_label
;
121 /* The condition codes of the ARM, and the inverse function. */
122 char *arm_condition_codes
[] =
124 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
125 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
128 #define ARM_INVERSE_CONDITION_CODE(X) ((X) ^ 1)
131 /* Initialization code */
133 #define FL_CO_PROC 0x01 /* Has external co-processor bus */
134 #define FL_FAST_MULT 0x02 /* Fast multiply */
135 #define FL_MODE26 0x04 /* 26-bit mode support */
136 #define FL_MODE32 0x08 /* 32-bit mode support */
137 #define FL_ARCH4 0x10 /* Architecture rel 4 */
138 #define FL_THUMB 0x20 /* Thumb aware */
142 enum processor_type type
;
146 /* Not all of these give usefully different compilation alternatives,
147 but there is no simple way of generalizing them. */
148 static struct processors all_procs
[] =
150 {"arm2", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
151 {"arm250", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
152 {"arm3", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
153 {"arm6", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
154 {"arm60", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
155 {"arm600", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
156 {"arm610", PROCESSOR_ARM6
, FL_MODE32
| FL_MODE26
},
157 {"arm620", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
158 {"arm7", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
159 {"arm70", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
160 {"arm7d", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
161 {"arm7di", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
162 {"arm7dm", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
164 {"arm7dmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
166 {"arm700", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
167 {"arm700i", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
168 {"arm710", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
169 {"arm710c", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
170 {"arm7500", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
171 {"arm7tdmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
172 | FL_ARCH4
| FL_THUMB
)},
176 /* Fix up any incompatible options that the user has specified.
177 This has now turned into a maze. */
179 arm_override_options ()
181 int arm_thumb_aware
= 0;
183 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
184 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
186 if (TARGET_POKE_FUNCTION_NAME
)
187 target_flags
|= ARM_FLAG_APCS_FRAME
;
191 warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu-<proc>");
192 target_flags
|= ARM_FLAG_APCS_32
;
193 arm_cpu
= PROCESSOR_ARM6
;
198 warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu-<proc>");
199 target_flags
&= ~ARM_FLAG_APCS_32
;
200 arm_cpu
= PROCESSOR_ARM2
;
203 if ((TARGET_3
|| TARGET_6
) && target_cpu_name
!= NULL
)
204 fatal ("Incompatible mix of old and new options. -m%d and -mcpu-%s",
205 TARGET_3
? 3 : 6, target_cpu_name
);
207 if (TARGET_APCS_REENT
&& flag_pic
)
208 fatal ("-fpic and -mapcs-reent are incompatible");
210 if (TARGET_APCS_REENT
)
211 warning ("APCS reentrant code not supported. Ignored");
214 warning ("Position independent code not supported. Ignored");
216 if (TARGET_APCS_FLOAT
)
217 warning ("Passing floating point arguments in fp regs not yet supported");
219 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
221 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
222 target_flags
|= ARM_FLAG_APCS_FRAME
;
225 arm_cpu
= TARGET_6
? PROCESSOR_ARM6
: PROCESSOR_ARM2
;
228 if (target_cpu_name
!= NULL
)
230 char *c
= target_cpu_name
;
231 struct processors
*proc
;
233 /* Match against the supported types. */
234 for (proc
= all_procs
; proc
->name
!= NULL
; proc
++)
236 if (strcmp (proc
->name
, c
) == 0)
242 arm_cpu
= proc
->type
;
244 /* Default value for floating point code... if no co-processor
245 bus, then schedule for emulated floating point. Otherwise,
246 assume the user has an FPA, unless overridden with -mfpe-... */
247 if (proc
->flags
& FL_CO_PROC
== 0)
251 arm_fast_multiply
= (proc
->flags
& FL_FAST_MULT
) != 0;
252 arm_arch4
= (proc
->flags
& FL_ARCH4
) != 0;
253 arm_thumb_aware
= (proc
->flags
& FL_THUMB
) != 0;
254 /* Processors with a load delay slot can load constants faster,
255 from the pool than it takes to construct them, so reduce the
256 complexity of the constant that we will try to generate
260 fatal ("Unrecognized cpu type: %s", target_cpu_name
);
265 if (strcmp (target_fpe_name
, "2") == 0)
267 else if (strcmp (target_fpe_name
, "3") == 0)
270 fatal ("Invalid floating point emulation option: -mfpe-%s",
274 if (TARGET_THUMB_INTERWORK
&& ! arm_thumb_aware
)
276 warning ("This processor variant does not support Thumb interworking");
277 target_flags
&= ~ARM_FLAG_THUMB
;
280 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
283 /* For arm2/3 there is no need to do any scheduling if there is only
284 a floating point emulator, or we are doing software floating-point. */
285 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && arm_cpu
== PROCESSOR_ARM2
)
286 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
288 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
292 /* Return 1 if it is possible to return using a single instruction */
299 if (!reload_completed
||current_function_pretend_args_size
300 || current_function_anonymous_args
301 || (get_frame_size () && !(TARGET_APCS
|| frame_pointer_needed
)))
304 /* Can't be done if any of the FPU regs are pushed, since this also
306 for (regno
= 20; regno
< 24; regno
++)
307 if (regs_ever_live
[regno
])
313 /* Return TRUE if int I is a valid immediate ARM constant. */
319 unsigned HOST_WIDE_INT mask
= ~0xFF;
321 /* Fast return for 0 and powers of 2 */
322 if ((i
& (i
- 1)) == 0)
327 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
330 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
331 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
332 } while (mask
!= ~0xFF);
337 /* Return true if I is a valid constant for the operation CODE. */
339 const_ok_for_op (i
, code
, mode
)
342 enum machine_mode mode
;
344 if (const_ok_for_arm (i
))
350 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
352 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
358 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
365 /* Emit a sequence of insns to handle a large constant.
366 CODE is the code of the operation required, it can be any of SET, PLUS,
367 IOR, AND, XOR, MINUS;
368 MODE is the mode in which the operation is being performed;
369 VAL is the integer to operate on;
370 SOURCE is the other operand (a register, or a null-pointer for SET);
371 SUBTARGETS means it is safe to create scratch registers if that will
372 either produce a simpler sequence, or we will want to cse the values.
373 Return value is the number of insns emitted. */
376 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
378 enum machine_mode mode
;
384 if (subtargets
|| code
== SET
385 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
386 && REGNO (target
) != REGNO (source
)))
390 if (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
391 > arm_constant_limit
+ (code
!= SET
))
395 /* Currently SET is the only monadic value for CODE, all
396 the rest are diadic. */
397 emit_insn (gen_rtx (SET
, VOIDmode
, target
, GEN_INT (val
)));
402 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
404 emit_insn (gen_rtx (SET
, VOIDmode
, temp
, GEN_INT (val
)));
405 /* For MINUS, the value is subtracted from, since we never
406 have subtraction of a constant. */
408 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
409 gen_rtx (code
, mode
, temp
, source
)));
411 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
412 gen_rtx (code
, mode
, source
, temp
)));
418 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
421 /* As above, but extra parameter GENERATE which, if clear, suppresses
424 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
426 enum machine_mode mode
;
436 int can_negate_initial
= 0;
439 int num_bits_set
= 0;
440 int set_sign_bit_copies
= 0;
441 int clear_sign_bit_copies
= 0;
442 int clear_zero_bit_copies
= 0;
443 int set_zero_bit_copies
= 0;
446 unsigned HOST_WIDE_INT temp1
, temp2
;
447 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
449 /* find out which operations are safe for a given CODE. Also do a quick
450 check for degenerate cases; these can occur when DImode operations
462 can_negate_initial
= 1;
466 if (remainder
== 0xffffffff)
469 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
470 GEN_INT (ARM_SIGN_EXTEND (val
))));
475 if (reload_completed
&& rtx_equal_p (target
, source
))
478 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
487 emit_insn (gen_rtx (SET
, VOIDmode
, target
, const0_rtx
));
490 if (remainder
== 0xffffffff)
492 if (reload_completed
&& rtx_equal_p (target
, source
))
495 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
504 if (reload_completed
&& rtx_equal_p (target
, source
))
507 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
510 if (remainder
== 0xffffffff)
513 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
514 gen_rtx (NOT
, mode
, source
)));
518 /* We don't know how to handle this yet below. */
522 /* We treat MINUS as (val - source), since (source - val) is always
523 passed as (source + (-val)). */
527 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
528 gen_rtx (NEG
, mode
, source
)));
531 if (const_ok_for_arm (val
))
534 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
535 gen_rtx (MINUS
, mode
, GEN_INT (val
), source
)));
546 /* If we can do it in one insn get out quickly */
547 if (const_ok_for_arm (val
)
548 || (can_negate_initial
&& const_ok_for_arm (-val
))
549 || (can_invert
&& const_ok_for_arm (~val
)))
552 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
553 (source
? gen_rtx (code
, mode
, source
,
560 /* Calculate a few attributes that may be useful for specific
563 for (i
= 31; i
>= 0; i
--)
565 if ((remainder
& (1 << i
)) == 0)
566 clear_sign_bit_copies
++;
571 for (i
= 31; i
>= 0; i
--)
573 if ((remainder
& (1 << i
)) != 0)
574 set_sign_bit_copies
++;
579 for (i
= 0; i
<= 31; i
++)
581 if ((remainder
& (1 << i
)) == 0)
582 clear_zero_bit_copies
++;
587 for (i
= 0; i
<= 31; i
++)
589 if ((remainder
& (1 << i
)) != 0)
590 set_zero_bit_copies
++;
598 /* See if we can do this by sign_extending a constant that is known
599 to be negative. This is a good, way of doing it, since the shift
600 may well merge into a subsequent insn. */
601 if (set_sign_bit_copies
> 1)
604 (temp1
= ARM_SIGN_EXTEND (remainder
605 << (set_sign_bit_copies
- 1))))
609 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
610 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
612 emit_insn (gen_ashrsi3 (target
, new_src
,
613 GEN_INT (set_sign_bit_copies
- 1)));
617 /* For an inverted constant, we will need to set the low bits,
618 these will be shifted out of harm's way. */
619 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
620 if (const_ok_for_arm (~temp1
))
624 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
625 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
627 emit_insn (gen_ashrsi3 (target
, new_src
,
628 GEN_INT (set_sign_bit_copies
- 1)));
634 /* See if we can generate this by setting the bottom (or the top)
635 16 bits, and then shifting these into the other half of the
636 word. We only look for the simplest cases, to do more would cost
637 too much. Be careful, however, not to generate this when the
638 alternative would take fewer insns. */
639 if (val
& 0xffff0000)
641 temp1
= remainder
& 0xffff0000;
642 temp2
= remainder
& 0x0000ffff;
644 /* Overlaps outside this range are best done using other methods. */
645 for (i
= 9; i
< 24; i
++)
647 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
648 && ! const_ok_for_arm (temp2
))
650 insns
= arm_gen_constant (code
, mode
, temp2
,
651 new_src
= (subtargets
654 source
, subtargets
, generate
);
657 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
659 gen_rtx (ASHIFT
, mode
, source
,
666 /* Don't duplicate cases already considered. */
667 for (i
= 17; i
< 24; i
++)
669 if (((temp1
| (temp1
>> i
)) == remainder
)
670 && ! const_ok_for_arm (temp1
))
672 insns
= arm_gen_constant (code
, mode
, temp1
,
673 new_src
= (subtargets
676 source
, subtargets
, generate
);
679 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
681 gen_rtx (LSHIFTRT
, mode
,
682 source
, GEN_INT (i
)),
692 /* If we have IOR or XOR, and the inverse of the constant can be loaded
693 in a single instruction, and we can find a temporary to put it in,
694 then this can be done in two instructions instead of 3-4. */
696 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
698 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
702 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
704 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
705 GEN_INT (ARM_SIGN_EXTEND (~ val
))));
706 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
707 gen_rtx (code
, mode
, source
, sub
)));
716 if (set_sign_bit_copies
> 8
717 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
721 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
722 rtx shift
= GEN_INT (set_sign_bit_copies
);
724 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
726 gen_rtx (ASHIFT
, mode
, source
,
728 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
730 gen_rtx (LSHIFTRT
, mode
, sub
,
736 if (set_zero_bit_copies
> 8
737 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
741 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
742 rtx shift
= GEN_INT (set_zero_bit_copies
);
744 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
746 gen_rtx (LSHIFTRT
, mode
, source
,
748 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
750 gen_rtx (ASHIFT
, mode
, sub
,
756 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
760 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
761 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
762 gen_rtx (NOT
, mode
, source
)));
765 sub
= gen_reg_rtx (mode
);
766 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
767 gen_rtx (AND
, mode
, source
,
769 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
770 gen_rtx (NOT
, mode
, sub
)));
777 /* See if two shifts will do 2 or more insn's worth of work. */
778 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
780 HOST_WIDE_INT shift_mask
= ((0xffffffff
781 << (32 - clear_sign_bit_copies
))
786 if ((remainder
| shift_mask
) != 0xffffffff)
790 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
791 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
792 new_source
, source
, subtargets
, 1);
796 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
797 new_source
, source
, subtargets
, 0);
802 shift
= GEN_INT (clear_sign_bit_copies
);
803 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
804 emit_insn (gen_ashlsi3 (new_source
, source
, shift
));
805 emit_insn (gen_lshrsi3 (target
, new_source
, shift
));
811 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
813 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
817 if ((remainder
| shift_mask
) != 0xffffffff)
821 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
822 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
823 new_source
, source
, subtargets
, 1);
827 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
828 new_source
, source
, subtargets
, 0);
833 shift
= GEN_INT (clear_zero_bit_copies
);
834 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
835 emit_insn (gen_lshrsi3 (new_source
, source
, shift
));
836 emit_insn (gen_ashlsi3 (target
, new_source
, shift
));
848 for (i
= 0; i
< 32; i
++)
849 if (remainder
& (1 << i
))
852 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
853 remainder
= (~remainder
) & 0xffffffff;
854 else if (code
== PLUS
&& num_bits_set
> 16)
855 remainder
= (-remainder
) & 0xffffffff;
862 /* Now try and find a way of doing the job in either two or three
864 We start by looking for the largest block of zeros that are aligned on
865 a 2-bit boundary, we then fill up the temps, wrapping around to the
866 top of the word when we drop off the bottom.
867 In the worst case this code should produce no more than four insns. */
870 int best_consecutive_zeros
= 0;
872 for (i
= 0; i
< 32; i
+= 2)
874 int consecutive_zeros
= 0;
876 if (! (remainder
& (3 << i
)))
878 while ((i
< 32) && ! (remainder
& (3 << i
)))
880 consecutive_zeros
+= 2;
883 if (consecutive_zeros
> best_consecutive_zeros
)
885 best_consecutive_zeros
= consecutive_zeros
;
886 best_start
= i
- consecutive_zeros
;
892 /* Now start emitting the insns, starting with the one with the highest
893 bit set: we do this so that the smallest number will be emitted last;
894 this is more likely to be combinable with addressing insns. */
902 if (remainder
& (3 << (i
- 2)))
907 temp1
= remainder
& ((0x0ff << end
)
908 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
914 emit_insn (gen_rtx (SET
, VOIDmode
,
915 new_src
= (subtargets
918 GEN_INT (can_invert
? ~temp1
: temp1
)));
922 else if (code
== MINUS
)
925 emit_insn (gen_rtx (SET
, VOIDmode
,
926 new_src
= (subtargets
929 gen_rtx (code
, mode
, GEN_INT (temp1
),
936 emit_insn (gen_rtx (SET
, VOIDmode
,
942 gen_rtx (code
, mode
, source
,
943 GEN_INT (can_invert
? ~temp1
959 /* Handle aggregates that are not laid out in a BLKmode element.
960 This is a sub-element of RETURN_IN_MEMORY. */
962 arm_return_in_memory (type
)
965 if (TREE_CODE (type
) == RECORD_TYPE
)
969 /* For a struct, we can return in a register if every element was a
971 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
972 if (TREE_CODE (field
) != FIELD_DECL
973 || ! DECL_BIT_FIELD_TYPE (field
))
978 else if (TREE_CODE (type
) == UNION_TYPE
)
982 /* Unions can be returned in registers if every element is
983 integral, or can be returned in an integer register. */
984 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
986 if (TREE_CODE (field
) != FIELD_DECL
987 || (AGGREGATE_TYPE_P (TREE_TYPE (field
))
988 && RETURN_IN_MEMORY (TREE_TYPE (field
)))
989 || FLOAT_TYPE_P (TREE_TYPE (field
)))
994 /* XXX Not sure what should be done for other aggregates, so put them in
999 #define REG_OR_SUBREG_REG(X) \
1000 (GET_CODE (X) == REG \
1001 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1003 #define REG_OR_SUBREG_RTX(X) \
1004 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1006 #define ARM_FRAME_RTX(X) \
1007 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1008 || (X) == arg_pointer_rtx)
1011 arm_rtx_costs (x
, code
, outer_code
)
1013 enum rtx_code code
, outer_code
;
1015 enum machine_mode mode
= GET_MODE (x
);
1016 enum rtx_code subcode
;
1022 /* Memory costs quite a lot for the first word, but subsequent words
1023 load at the equivalent of a single insn each. */
1024 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1025 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1032 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1039 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1041 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1042 + ((GET_CODE (XEXP (x
, 0)) == REG
1043 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1044 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1046 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1047 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1048 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1050 + ((GET_CODE (XEXP (x
, 1)) == REG
1051 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1052 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1053 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1058 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1059 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1060 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1061 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1064 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1065 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1066 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1067 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1069 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1070 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1071 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1074 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1075 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1076 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1077 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1078 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1079 || subcode
== ROTATE
|| subcode
== ROTATERT
1081 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1082 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1083 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1084 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1085 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1086 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1087 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1092 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1093 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1094 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1095 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1096 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1100 case AND
: case XOR
: case IOR
:
1103 /* Normally the frame registers will be spilt into reg+const during
1104 reload, so it is a bad idea to combine them with other instructions,
1105 since then they might not be moved outside of loops. As a compromise
1106 we allow integration with ops that have a constant as their second
1108 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1109 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1110 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1111 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1112 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1116 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1117 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1118 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1119 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1122 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1123 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1124 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1125 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1126 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1129 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1130 return (1 + extra_cost
1131 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1132 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1133 || subcode
== ROTATE
|| subcode
== ROTATERT
1135 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1136 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1137 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0))
1138 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1139 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1140 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
1146 if (arm_fast_multiply
&& mode
== DImode
1147 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1148 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1149 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1152 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1156 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1158 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1159 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1160 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1162 int booth_unit_size
= (arm_fast_multiply
? 8 : 2);
1164 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1166 i
>>= booth_unit_size
;
1173 return ((arm_fast_multiply
? 8 : 30)
1174 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1175 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1178 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1179 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1183 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1185 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1188 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1196 return 4 + (mode
== DImode
? 4 : 0);
1199 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1200 return (4 + (mode
== DImode
? 4 : 0)
1201 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1204 switch (GET_MODE (XEXP (x
, 0)))
1207 return (1 + (mode
== DImode
? 4 : 0)
1208 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1211 return (4 + (mode
== DImode
? 4 : 0)
1212 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1215 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1224 /* This code has been fixed for cross compilation. */
1226 static int fpa_consts_inited
= 0;
1228 char *strings_fpa
[8] = {
1230 "4", "5", "0.5", "10"
1233 static REAL_VALUE_TYPE values_fpa
[8];
1241 for (i
= 0; i
< 8; i
++)
1243 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1247 fpa_consts_inited
= 1;
1250 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1253 const_double_rtx_ok_for_fpu (x
)
1259 if (!fpa_consts_inited
)
1262 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1263 if (REAL_VALUE_MINUS_ZERO (r
))
1266 for (i
= 0; i
< 8; i
++)
1267 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1273 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1276 neg_const_double_rtx_ok_for_fpu (x
)
1282 if (!fpa_consts_inited
)
1285 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1286 r
= REAL_VALUE_NEGATE (r
);
1287 if (REAL_VALUE_MINUS_ZERO (r
))
1290 for (i
= 0; i
< 8; i
++)
1291 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1297 /* Predicates for `match_operand' and `match_operator'. */
1299 /* s_register_operand is the same as register_operand, but it doesn't accept
1300 (SUBREG (MEM)...). */
1303 s_register_operand (op
, mode
)
1305 enum machine_mode mode
;
1307 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1310 if (GET_CODE (op
) == SUBREG
)
1311 op
= SUBREG_REG (op
);
1313 /* We don't consider registers whose class is NO_REGS
1314 to be a register operand. */
1315 return (GET_CODE (op
) == REG
1316 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1317 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1320 /* Only accept reg, subreg(reg), const_int. */
1323 reg_or_int_operand (op
, mode
)
1325 enum machine_mode mode
;
1327 if (GET_CODE (op
) == CONST_INT
)
1330 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1333 if (GET_CODE (op
) == SUBREG
)
1334 op
= SUBREG_REG (op
);
1336 /* We don't consider registers whose class is NO_REGS
1337 to be a register operand. */
1338 return (GET_CODE (op
) == REG
1339 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1340 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1343 /* Return 1 if OP is an item in memory, given that we are in reload. */
1346 reload_memory_operand (op
, mode
)
1348 enum machine_mode mode
;
1350 int regno
= true_regnum (op
);
1352 return (! CONSTANT_P (op
)
1354 || (GET_CODE (op
) == REG
1355 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
1358 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
1361 arm_rhs_operand (op
, mode
)
1363 enum machine_mode mode
;
1365 return (s_register_operand (op
, mode
)
1366 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
1369 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
1373 arm_rhsm_operand (op
, mode
)
1375 enum machine_mode mode
;
1377 return (s_register_operand (op
, mode
)
1378 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
1379 || memory_operand (op
, mode
));
1382 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
1383 constant that is valid when negated. */
1386 arm_add_operand (op
, mode
)
1388 enum machine_mode mode
;
1390 return (s_register_operand (op
, mode
)
1391 || (GET_CODE (op
) == CONST_INT
1392 && (const_ok_for_arm (INTVAL (op
))
1393 || const_ok_for_arm (-INTVAL (op
)))));
1397 arm_not_operand (op
, mode
)
1399 enum machine_mode mode
;
1401 return (s_register_operand (op
, mode
)
1402 || (GET_CODE (op
) == CONST_INT
1403 && (const_ok_for_arm (INTVAL (op
))
1404 || const_ok_for_arm (~INTVAL (op
)))));
1407 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
1410 fpu_rhs_operand (op
, mode
)
1412 enum machine_mode mode
;
1414 if (s_register_operand (op
, mode
))
1416 else if (GET_CODE (op
) == CONST_DOUBLE
)
1417 return (const_double_rtx_ok_for_fpu (op
));
1423 fpu_add_operand (op
, mode
)
1425 enum machine_mode mode
;
1427 if (s_register_operand (op
, mode
))
1429 else if (GET_CODE (op
) == CONST_DOUBLE
)
1430 return (const_double_rtx_ok_for_fpu (op
)
1431 || neg_const_double_rtx_ok_for_fpu (op
));
1436 /* Return nonzero if OP is a constant power of two. */
1439 power_of_two_operand (op
, mode
)
1441 enum machine_mode mode
;
1443 if (GET_CODE (op
) == CONST_INT
)
1445 HOST_WIDE_INT value
= INTVAL(op
);
1446 return value
!= 0 && (value
& (value
- 1)) == 0;
1451 /* Return TRUE for a valid operand of a DImode operation.
1452 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1453 Note that this disallows MEM(REG+REG), but allows
1454 MEM(PRE/POST_INC/DEC(REG)). */
1457 di_operand (op
, mode
)
1459 enum machine_mode mode
;
1461 if (s_register_operand (op
, mode
))
1464 switch (GET_CODE (op
))
1471 return memory_address_p (DImode
, XEXP (op
, 0));
1478 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
1479 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1480 Note that this disallows MEM(REG+REG), but allows
1481 MEM(PRE/POST_INC/DEC(REG)). */
1484 soft_df_operand (op
, mode
)
1486 enum machine_mode mode
;
1488 if (s_register_operand (op
, mode
))
1491 switch (GET_CODE (op
))
1497 return memory_address_p (DFmode
, XEXP (op
, 0));
1504 /* Return TRUE for valid index operands. */
1507 index_operand (op
, mode
)
1509 enum machine_mode mode
;
1511 return (s_register_operand(op
, mode
)
1512 || (immediate_operand (op
, mode
)
1513 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
1516 /* Return TRUE for valid shifts by a constant. This also accepts any
1517 power of two on the (somewhat overly relaxed) assumption that the
1518 shift operator in this case was a mult. */
1521 const_shift_operand (op
, mode
)
1523 enum machine_mode mode
;
1525 return (power_of_two_operand (op
, mode
)
1526 || (immediate_operand (op
, mode
)
1527 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
1530 /* Return TRUE for arithmetic operators which can be combined with a multiply
1534 shiftable_operator (x
, mode
)
1536 enum machine_mode mode
;
1538 if (GET_MODE (x
) != mode
)
1542 enum rtx_code code
= GET_CODE (x
);
1544 return (code
== PLUS
|| code
== MINUS
1545 || code
== IOR
|| code
== XOR
|| code
== AND
);
1549 /* Return TRUE for shift operators. */
1552 shift_operator (x
, mode
)
1554 enum machine_mode mode
;
1556 if (GET_MODE (x
) != mode
)
1560 enum rtx_code code
= GET_CODE (x
);
1563 return power_of_two_operand (XEXP (x
, 1));
1565 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
1566 || code
== ROTATERT
);
1570 int equality_operator (x
, mode
)
1572 enum machine_mode mode
;
1574 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
1577 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
1580 minmax_operator (x
, mode
)
1582 enum machine_mode mode
;
1584 enum rtx_code code
= GET_CODE (x
);
1586 if (GET_MODE (x
) != mode
)
1589 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
1592 /* return TRUE if x is EQ or NE */
1594 /* Return TRUE if this is the condition code register, if we aren't given
1595 a mode, accept any class CCmode register */
1598 cc_register (x
, mode
)
1600 enum machine_mode mode
;
1602 if (mode
== VOIDmode
)
1604 mode
= GET_MODE (x
);
1605 if (GET_MODE_CLASS (mode
) != MODE_CC
)
1609 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1615 /* Return TRUE if this is the condition code register, if we aren't given
1616 a mode, accept any mode in class CC_MODE that is reversible */
1619 reversible_cc_register (x
, mode
)
1621 enum machine_mode mode
;
1623 if (mode
== VOIDmode
)
1625 mode
= GET_MODE (x
);
1626 if (GET_MODE_CLASS (mode
) != MODE_CC
1627 && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1629 if (GET_MODE_CLASS (mode
) != MODE_CC
1630 || (! flag_fast_math
&& ! REVERSIBLE_CC_MODE (mode
)))
1634 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1640 /* Return TRUE if X references a SYMBOL_REF. */
1642 symbol_mentioned_p (x
)
1648 if (GET_CODE (x
) == SYMBOL_REF
)
1651 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1652 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1658 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1659 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
1662 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
1669 /* Return TRUE if X references a LABEL_REF. */
1671 label_mentioned_p (x
)
1677 if (GET_CODE (x
) == LABEL_REF
)
1680 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1681 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1687 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1688 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
1691 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
1702 enum rtx_code code
= GET_CODE (x
);
1706 else if (code
== SMIN
)
1708 else if (code
== UMIN
)
1710 else if (code
== UMAX
)
1716 /* Return 1 if memory locations are adjacent */
1719 adjacent_mem_locations (a
, b
)
1722 int val0
= 0, val1
= 0;
1725 if ((GET_CODE (XEXP (a
, 0)) == REG
1726 || (GET_CODE (XEXP (a
, 0)) == PLUS
1727 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
1728 && (GET_CODE (XEXP (b
, 0)) == REG
1729 || (GET_CODE (XEXP (b
, 0)) == PLUS
1730 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
1732 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
1734 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
1735 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
1738 reg0
= REGNO (XEXP (a
, 0));
1739 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
1741 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
1742 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
1745 reg1
= REGNO (XEXP (b
, 0));
1746 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
1751 /* Return 1 if OP is a load multiple operation. It is known to be
1752 parallel and the first section will be tested. */
1755 load_multiple_operation (op
, mode
)
1757 enum machine_mode mode
;
1759 HOST_WIDE_INT count
= XVECLEN (op
, 0);
1762 HOST_WIDE_INT i
= 1, base
= 0;
1766 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
1769 /* Check to see if this might be a write-back */
1770 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
1775 /* Now check it more carefully */
1776 if (GET_CODE (SET_DEST (elt
)) != REG
1777 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
1778 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
1779 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
1780 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
1781 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
1782 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
1783 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
1784 != REGNO (SET_DEST (elt
)))
1790 /* Perform a quick check so we don't blow up below. */
1792 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
1793 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
1794 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
1797 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
1798 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
1800 for (; i
< count
; i
++)
1802 rtx elt
= XVECEXP (op
, 0, i
);
1804 if (GET_CODE (elt
) != SET
1805 || GET_CODE (SET_DEST (elt
)) != REG
1806 || GET_MODE (SET_DEST (elt
)) != SImode
1807 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
1808 || GET_CODE (SET_SRC (elt
)) != MEM
1809 || GET_MODE (SET_SRC (elt
)) != SImode
1810 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
1811 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
1812 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
1813 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
1820 /* Return 1 if OP is a store multiple operation. It is known to be
1821 parallel and the first section will be tested. */
1824 store_multiple_operation (op
, mode
)
1826 enum machine_mode mode
;
1828 HOST_WIDE_INT count
= XVECLEN (op
, 0);
1831 HOST_WIDE_INT i
= 1, base
= 0;
1835 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
1838 /* Check to see if this might be a write-back */
1839 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
1844 /* Now check it more carefully */
1845 if (GET_CODE (SET_DEST (elt
)) != REG
1846 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
1847 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
1848 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
1849 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
1850 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
1851 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
1852 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
1853 != REGNO (SET_DEST (elt
)))
1859 /* Perform a quick check so we don't blow up below. */
1861 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
1862 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
1863 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
1866 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
1867 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
1869 for (; i
< count
; i
++)
1871 elt
= XVECEXP (op
, 0, i
);
1873 if (GET_CODE (elt
) != SET
1874 || GET_CODE (SET_SRC (elt
)) != REG
1875 || GET_MODE (SET_SRC (elt
)) != SImode
1876 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
1877 || GET_CODE (SET_DEST (elt
)) != MEM
1878 || GET_MODE (SET_DEST (elt
)) != SImode
1879 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
1880 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
1881 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
1882 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
1890 multi_register_push (op
, mode
)
1892 enum machine_mode mode
;
1894 if (GET_CODE (op
) != PARALLEL
1895 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
1896 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
1897 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
1904 /* Routines for use with attributes */
1907 const_pool_offset (symbol
)
1910 return get_pool_offset (symbol
) - get_pool_size () - get_prologue_size ();
1913 /* Routines for use in generating RTL */
1916 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
)
1925 int sign
= up
? 1 : -1;
1927 result
= gen_rtx (PARALLEL
, VOIDmode
,
1928 rtvec_alloc (count
+ (write_back
? 2 : 0)));
1931 XVECEXP (result
, 0, 0)
1932 = gen_rtx (SET
, GET_MODE (from
), from
,
1933 plus_constant (from
, count
* 4 * sign
));
1938 for (j
= 0; i
< count
; i
++, j
++)
1940 XVECEXP (result
, 0, i
)
1941 = gen_rtx (SET
, VOIDmode
, gen_rtx (REG
, SImode
, base_regno
+ j
),
1942 gen_rtx (MEM
, SImode
,
1943 plus_constant (from
, j
* 4 * sign
)));
1947 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, from
);
1953 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
)
1962 int sign
= up
? 1 : -1;
1964 result
= gen_rtx (PARALLEL
, VOIDmode
,
1965 rtvec_alloc (count
+ (write_back
? 2 : 0)));
1968 XVECEXP (result
, 0, 0)
1969 = gen_rtx (SET
, GET_MODE (to
), to
,
1970 plus_constant (to
, count
* 4 * sign
));
1975 for (j
= 0; i
< count
; i
++, j
++)
1977 XVECEXP (result
, 0, i
)
1978 = gen_rtx (SET
, VOIDmode
,
1979 gen_rtx (MEM
, SImode
, plus_constant (to
, j
* 4 * sign
)),
1980 gen_rtx (REG
, SImode
, base_regno
+ j
));
1984 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, to
);
1990 arm_gen_movstrqi (operands
)
1993 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
1996 rtx st_src
, st_dst
, end_src
, end_dst
, fin_src
, fin_dst
;
1997 rtx part_bytes_reg
= NULL
;
1998 extern int optimize
;
2000 if (GET_CODE (operands
[2]) != CONST_INT
2001 || GET_CODE (operands
[3]) != CONST_INT
2002 || INTVAL (operands
[2]) > 64
2003 || INTVAL (operands
[3]) & 3)
2006 st_dst
= XEXP (operands
[0], 0);
2007 st_src
= XEXP (operands
[1], 0);
2008 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
2009 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
2011 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
2012 out_words_to_go
= INTVAL (operands
[2]) / 4;
2013 last_bytes
= INTVAL (operands
[2]) & 3;
2015 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
2016 part_bytes_reg
= gen_rtx (REG
, SImode
, (in_words_to_go
- 1) & 3);
2018 for (i
= 0; in_words_to_go
>= 2; i
+=4)
2020 emit_insn (arm_gen_load_multiple (0, (in_words_to_go
> 4
2021 ? 4 : in_words_to_go
),
2023 if (out_words_to_go
)
2025 if (out_words_to_go
!= 1)
2026 emit_insn (arm_gen_store_multiple (0, (out_words_to_go
> 4
2027 ? 4 : out_words_to_go
),
2031 emit_move_insn (gen_rtx (MEM
, SImode
, dst
),
2032 gen_rtx (REG
, SImode
, 0));
2033 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
2037 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
2038 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
2041 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
2042 if (out_words_to_go
)
2046 emit_move_insn (sreg
= gen_reg_rtx (SImode
), gen_rtx (MEM
, SImode
, src
));
2047 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
2048 emit_move_insn (gen_rtx (MEM
, SImode
, dst
), sreg
);
2049 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
2052 if (in_words_to_go
) /* Sanity check */
2058 if (in_words_to_go
< 0)
2061 part_bytes_reg
= copy_to_mode_reg (SImode
, gen_rtx (MEM
, SImode
, src
));
2062 emit_insn (gen_addsi3 (src
, src
, GEN_INT (4)));
2065 if (BYTES_BIG_ENDIAN
&& last_bytes
)
2067 rtx tmp
= gen_reg_rtx (SImode
);
2069 if (part_bytes_reg
== NULL
)
2072 /* The bytes we want are in the top end of the word */
2073 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
2074 GEN_INT (8 * (4 - last_bytes
))));
2075 part_bytes_reg
= tmp
;
2079 emit_move_insn (gen_rtx (MEM
, QImode
,
2080 plus_constant (dst
, last_bytes
- 1)),
2081 gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
2084 tmp
= gen_reg_rtx (SImode
);
2085 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
2086 part_bytes_reg
= tmp
;
2095 if (part_bytes_reg
== NULL
)
2098 emit_move_insn (gen_rtx (MEM
, QImode
, dst
),
2099 gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
2100 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
2103 rtx tmp
= gen_reg_rtx (SImode
);
2104 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
2105 part_bytes_reg
= tmp
;
2113 /* X and Y are two things to compare using CODE. Emit the compare insn and
2114 return the rtx for register 0 in the proper mode. FP means this is a
2115 floating point compare: I don't think that it is needed on the arm. */
2118 gen_compare_reg (code
, x
, y
, fp
)
2122 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
2123 rtx cc_reg
= gen_rtx (REG
, mode
, 24);
2125 emit_insn (gen_rtx (SET
, VOIDmode
, cc_reg
,
2126 gen_rtx (COMPARE
, mode
, x
, y
)));
2132 arm_reload_in_hi (operands
)
2135 rtx base
= find_replacement (&XEXP (operands
[1], 0));
2137 emit_insn (gen_zero_extendqisi2 (operands
[2], gen_rtx (MEM
, QImode
, base
)));
2138 emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG
, SImode
, operands
[0], 0),
2139 gen_rtx (MEM
, QImode
,
2140 plus_constant (base
, 1))));
2141 if (BYTES_BIG_ENDIAN
)
2142 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
2144 gen_rtx (IOR
, SImode
,
2145 gen_rtx (ASHIFT
, SImode
,
2146 gen_rtx (SUBREG
, SImode
,
2151 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
2153 gen_rtx (IOR
, SImode
,
2154 gen_rtx (ASHIFT
, SImode
,
2157 gen_rtx (SUBREG
, SImode
, operands
[0], 0))));
2161 arm_reload_out_hi (operands
)
2164 rtx base
= find_replacement (&XEXP (operands
[0], 0));
2166 if (BYTES_BIG_ENDIAN
)
2168 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
2169 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
2170 emit_insn (gen_lshrsi3 (operands
[2],
2171 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
2173 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
2174 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
2178 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
2179 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
2180 emit_insn (gen_lshrsi3 (operands
[2],
2181 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
2183 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
2184 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
2188 /* Check to see if a branch is forwards or backwards. Return TRUE if it
2192 arm_backwards_branch (from
, to
)
2195 return insn_addresses
[to
] <= insn_addresses
[from
];
2198 /* Check to see if a branch is within the distance that can be done using
2199 an arithmetic expression. */
2201 short_branch (from
, to
)
2204 int delta
= insn_addresses
[from
] + 8 - insn_addresses
[to
];
2206 return abs (delta
) < 980; /* A small margin for safety */
2209 /* Check to see that the insn isn't the target of the conditionalizing
2212 arm_insn_not_targeted (insn
)
2215 return insn
!= arm_target_insn
;
2219 /* Routines for manipulation of the constant pool. */
2220 /* This is unashamedly hacked from the version in sh.c, since the problem is
2221 extremely similar. */
2223 /* Arm instructions cannot load a large constant into a register,
2224 constants have to come from a pc relative load. The reference of a pc
2225 relative load instruction must be less than 1k infront of the instruction.
2226 This means that we often have to dump a constant inside a function, and
2227 generate code to branch around it.
2229 It is important to minimize this, since the branches will slow things
2230 down and make things bigger.
2232 Worst case code looks like:
2248 We fix this by performing a scan before scheduling, which notices which
2249 instructions need to have their operands fetched from the constant table
2250 and builds the table.
2255 scan, find an instruction which needs a pcrel move. Look forward, find th
2256 last barrier which is within MAX_COUNT bytes of the requirement.
2257 If there isn't one, make one. Process all the instructions between
2258 the find and the barrier.
2260 In the above example, we can tell that L3 is within 1k of L1, so
2261 the first move can be shrunk from the 2 insn+constant sequence into
2262 just 1 insn, and the constant moved to L3 to make:
2273 Then the second move becomes the target for the shortening process.
2279 rtx value
; /* Value in table */
2280 HOST_WIDE_INT next_offset
;
2281 enum machine_mode mode
; /* Mode of value */
2284 /* The maximum number of constants that can fit into one pool, since
2285 the pc relative range is 0...1020 bytes and constants are at least 4
2288 #define MAX_POOL_SIZE (1020/4)
2289 static pool_node pool_vector
[MAX_POOL_SIZE
];
2290 static int pool_size
;
2291 static rtx pool_vector_label
;
2293 /* Add a constant to the pool and return its label. */
2294 static HOST_WIDE_INT
2295 add_constant (x
, mode
)
2297 enum machine_mode mode
;
2301 HOST_WIDE_INT offset
;
2303 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
2304 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
2305 x
= get_pool_constant (XEXP (x
, 0));
2306 #ifndef AOF_ASSEMBLER
2307 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
2308 x
= XVECEXP (x
, 0, 0);
2311 /* First see if we've already got it */
2312 for (i
= 0; i
< pool_size
; i
++)
2314 if (GET_CODE (x
) == pool_vector
[i
].value
->code
2315 && mode
== pool_vector
[i
].mode
)
2317 if (GET_CODE (x
) == CODE_LABEL
)
2319 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
2322 if (rtx_equal_p (x
, pool_vector
[i
].value
))
2323 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
2327 /* Need a new one */
2328 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
2331 pool_vector_label
= gen_label_rtx ();
2333 pool_vector
[pool_size
].next_offset
2334 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
2336 pool_vector
[pool_size
].value
= x
;
2337 pool_vector
[pool_size
].mode
= mode
;
2342 /* Output the literal table */
2349 scan
= emit_label_after (gen_label_rtx (), scan
);
2350 scan
= emit_insn_after (gen_align_4 (), scan
);
2351 scan
= emit_label_after (pool_vector_label
, scan
);
2353 for (i
= 0; i
< pool_size
; i
++)
2355 pool_node
*p
= pool_vector
+ i
;
2357 switch (GET_MODE_SIZE (p
->mode
))
2360 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
2364 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
2373 scan
= emit_insn_after (gen_consttable_end (), scan
);
2374 scan
= emit_barrier_after (scan
);
2378 /* Non zero if the src operand needs to be fixed up */
2380 fixit (src
, mode
, destreg
)
2382 enum machine_mode mode
;
2385 if (CONSTANT_P (src
))
2387 if (GET_CODE (src
) == CONST_INT
)
2388 return (! const_ok_for_arm (INTVAL (src
))
2389 && ! const_ok_for_arm (~INTVAL (src
)));
2390 if (GET_CODE (src
) == CONST_DOUBLE
)
2391 return (GET_MODE (src
) == VOIDmode
2393 || (! const_double_rtx_ok_for_fpu (src
)
2394 && ! neg_const_double_rtx_ok_for_fpu (src
)));
2395 return symbol_mentioned_p (src
);
2397 #ifndef AOF_ASSEMBLER
2398 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
2402 return (mode
== SImode
&& GET_CODE (src
) == MEM
2403 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
2404 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
2407 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
2409 find_barrier (from
, max_count
)
2414 rtx found_barrier
= 0;
2416 while (from
&& count
< max_count
)
2418 if (GET_CODE (from
) == BARRIER
)
2419 found_barrier
= from
;
2421 /* Count the length of this insn */
2422 if (GET_CODE (from
) == INSN
2423 && GET_CODE (PATTERN (from
)) == SET
2424 && CONSTANT_P (SET_SRC (PATTERN (from
)))
2425 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
2427 rtx src
= SET_SRC (PATTERN (from
));
2431 count
+= get_attr_length (from
);
2433 from
= NEXT_INSN (from
);
2438 /* We didn't find a barrier in time to
2439 dump our stuff, so we'll make one */
2440 rtx label
= gen_label_rtx ();
2443 from
= PREV_INSN (from
);
2445 from
= get_last_insn ();
2447 /* Walk back to be just before any jump */
2448 while (GET_CODE (from
) == JUMP_INSN
2449 || GET_CODE (from
) == NOTE
2450 || GET_CODE (from
) == CODE_LABEL
)
2451 from
= PREV_INSN (from
);
2453 from
= emit_jump_insn_after (gen_jump (label
), from
);
2454 JUMP_LABEL (from
) = label
;
2455 found_barrier
= emit_barrier_after (from
);
2456 emit_label_after (label
, found_barrier
);
2457 return found_barrier
;
2460 return found_barrier
;
2463 /* Non zero if the insn is a move instruction which needs to be fixed. */
2468 if (!INSN_DELETED_P (insn
)
2469 && GET_CODE (insn
) == INSN
2470 && GET_CODE (PATTERN (insn
)) == SET
)
2472 rtx pat
= PATTERN (insn
);
2473 rtx src
= SET_SRC (pat
);
2474 rtx dst
= SET_DEST (pat
);
2476 enum machine_mode mode
= GET_MODE (dst
);
2480 if (GET_CODE (dst
) == REG
)
2481 destreg
= REGNO (dst
);
2482 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
2483 destreg
= REGNO (SUBREG_REG (dst
));
2485 return fixit (src
, mode
, destreg
);
2499 /* The ldr instruction can work with up to a 4k offset, and most constants
2500 will be loaded with one of these instructions; however, the adr
2501 instruction and the ldf instructions only work with a 1k offset. This
2502 code needs to be rewritten to use the 4k offset when possible, and to
2503 adjust when a 1k offset is needed. For now we just use a 1k offset
2507 /* Floating point operands can't work further than 1024 bytes from the
2508 PC, so to make things simple we restrict all loads for such functions.
2510 if (TARGET_HARD_FLOAT
)
2511 for (regno
= 16; regno
< 24; regno
++)
2512 if (regs_ever_live
[regno
])
2521 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
2523 if (broken_move (insn
))
2525 /* This is a broken move instruction, scan ahead looking for
2526 a barrier to stick the constant table behind */
2528 rtx barrier
= find_barrier (insn
, count_size
);
2530 /* Now find all the moves between the points and modify them */
2531 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
2533 if (broken_move (scan
))
2535 /* This is a broken move instruction, add it to the pool */
2536 rtx pat
= PATTERN (scan
);
2537 rtx src
= SET_SRC (pat
);
2538 rtx dst
= SET_DEST (pat
);
2539 enum machine_mode mode
= GET_MODE (dst
);
2540 HOST_WIDE_INT offset
;
2546 /* If this is an HImode constant load, convert it into
2547 an SImode constant load. Since the register is always
2548 32 bits this is safe. We have to do this, since the
2549 load pc-relative instruction only does a 32-bit load. */
2553 if (GET_CODE (dst
) != REG
)
2555 PUT_MODE (dst
, SImode
);
2558 offset
= add_constant (src
, mode
);
2559 addr
= plus_constant (gen_rtx (LABEL_REF
, VOIDmode
,
2563 /* For wide moves to integer regs we need to split the
2564 address calculation off into a separate insn, so that
2565 the load can then be done with a load-multiple. This is
2566 safe, since we have already noted the length of such
2567 insns to be 8, and we are immediately over-writing the
2568 scratch we have grabbed with the final result. */
2569 if (GET_MODE_SIZE (mode
) > 4
2570 && (scratch
= REGNO (dst
)) < 16)
2572 rtx reg
= gen_rtx (REG
, SImode
, scratch
);
2573 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
2578 newsrc
= gen_rtx (MEM
, mode
, addr
);
2580 /* Build a jump insn wrapper around the move instead
2581 of an ordinary insn, because we want to have room for
2582 the target label rtx in fld[7], which an ordinary
2583 insn doesn't have. */
2584 newinsn
= emit_jump_insn_after (gen_rtx (SET
, VOIDmode
,
2587 JUMP_LABEL (newinsn
) = pool_vector_label
;
2589 /* But it's still an ordinary insn */
2590 PUT_CODE (newinsn
, INSN
);
2597 dump_table (barrier
);
2604 /* Routines to output assembly language. */
2606 /* If the rtx is the correct value then return the string of the number.
2607 In this way we can ensure that valid double constants are generated even
2608 when cross compiling. */
2610 fp_immediate_constant (x
)
2616 if (!fpa_consts_inited
)
2619 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
2620 for (i
= 0; i
< 8; i
++)
2621 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
2622 return strings_fpa
[i
];
2627 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
2629 fp_const_from_val (r
)
2634 if (! fpa_consts_inited
)
2637 for (i
= 0; i
< 8; i
++)
2638 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
2639 return strings_fpa
[i
];
2644 /* Output the operands of a LDM/STM instruction to STREAM.
2645 MASK is the ARM register set mask of which only bits 0-15 are important.
2646 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
2647 must follow the register list. */
2650 print_multi_reg (stream
, instr
, mask
, hat
)
2656 int not_first
= FALSE
;
2658 fputc ('\t', stream
);
2659 fprintf (stream
, instr
, REGISTER_PREFIX
);
2660 fputs (", {", stream
);
2661 for (i
= 0; i
< 16; i
++)
2662 if (mask
& (1 << i
))
2665 fprintf (stream
, ", ");
2666 fprintf (stream
, "%s%s", REGISTER_PREFIX
, reg_names
[i
]);
2670 fprintf (stream
, "}%s\n", hat
? "^" : "");
2673 /* Output a 'call' insn. */
2676 output_call (operands
)
2679 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
2681 if (REGNO (operands
[0]) == 14)
2683 operands
[0] = gen_rtx (REG
, SImode
, 12);
2684 output_asm_insn ("mov%?\t%0, %|lr", operands
);
2686 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
2687 output_asm_insn ("mov%?\t%|pc, %0", operands
);
2695 int something_changed
= 0;
2697 int code
= GET_CODE (x0
);
2704 if (REGNO (x0
) == 14)
2706 *x
= gen_rtx (REG
, SImode
, 12);
2711 /* Scan through the sub-elements and change any references there */
2712 fmt
= GET_RTX_FORMAT (code
);
2713 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2715 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
2716 else if (fmt
[i
] == 'E')
2717 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
2718 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
2719 return something_changed
;
2723 /* Output a 'call' insn that is a reference in memory. */
2726 output_call_mem (operands
)
2729 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
2730 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
2732 if (eliminate_lr2ip (&operands
[0]))
2733 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
2735 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
2736 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
2741 /* Output a move from arm registers to an fpu registers.
2742 OPERANDS[0] is an fpu register.
2743 OPERANDS[1] is the first registers of an arm register pair. */
2746 output_mov_long_double_fpu_from_arm (operands
)
2749 int arm_reg0
= REGNO (operands
[1]);
2755 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
2756 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
2757 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
2759 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
2760 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
2764 /* Output a move from an fpu register to arm registers.
2765 OPERANDS[0] is the first registers of an arm register pair.
2766 OPERANDS[1] is an fpu register. */
2769 output_mov_long_double_arm_from_fpu (operands
)
2772 int arm_reg0
= REGNO (operands
[0]);
2778 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
2779 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
2780 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
2782 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
2783 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
2787 /* Output a move from arm registers to arm registers of a long double
2788 OPERANDS[0] is the destination.
2789 OPERANDS[1] is the source. */
2791 output_mov_long_double_arm_from_arm (operands
)
2794 /* We have to be careful here because the two might overlap */
2795 int dest_start
= REGNO (operands
[0]);
2796 int src_start
= REGNO (operands
[1]);
2800 if (dest_start
< src_start
)
2802 for (i
= 0; i
< 3; i
++)
2804 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
2805 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
2806 output_asm_insn ("mov%?\t%0, %1", ops
);
2811 for (i
= 2; i
>= 0; i
--)
2813 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
2814 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
2815 output_asm_insn ("mov%?\t%0, %1", ops
);
2823 /* Output a move from arm registers to an fpu registers.
2824 OPERANDS[0] is an fpu register.
2825 OPERANDS[1] is the first registers of an arm register pair. */
2828 output_mov_double_fpu_from_arm (operands
)
2831 int arm_reg0
= REGNO (operands
[1]);
2836 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
2837 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
2838 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
2839 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
2843 /* Output a move from an fpu register to arm registers.
2844 OPERANDS[0] is the first registers of an arm register pair.
2845 OPERANDS[1] is an fpu register. */
2848 output_mov_double_arm_from_fpu (operands
)
2851 int arm_reg0
= REGNO (operands
[0]);
2857 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
2858 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
2859 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
2860 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
2864 /* Output a move between double words.
2865 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
2866 or MEM<-REG and all MEMs must be offsettable addresses. */
2869 output_move_double (operands
)
2872 enum rtx_code code0
= GET_CODE (operands
[0]);
2873 enum rtx_code code1
= GET_CODE (operands
[1]);
2878 int reg0
= REGNO (operands
[0]);
2880 otherops
[0] = gen_rtx (REG
, SImode
, 1 + reg0
);
2883 int reg1
= REGNO (operands
[1]);
2887 /* Ensure the second source is not overwritten */
2888 if (reg1
== reg0
+ (WORDS_BIG_ENDIAN
? -1 : 1))
2889 output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands
);
2891 output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands
);
2893 else if (code1
== CONST_DOUBLE
)
2895 if (GET_MODE (operands
[1]) == DFmode
)
2898 union real_extract u
;
2900 bcopy ((char *) &CONST_DOUBLE_LOW (operands
[1]), (char *) &u
,
2902 REAL_VALUE_TO_TARGET_DOUBLE (u
.d
, l
);
2903 otherops
[1] = GEN_INT(l
[1]);
2904 operands
[1] = GEN_INT(l
[0]);
2906 else if (GET_MODE (operands
[1]) != VOIDmode
)
2908 else if (WORDS_BIG_ENDIAN
)
2911 otherops
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
2912 operands
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
2917 otherops
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
2918 operands
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
2920 output_mov_immediate (operands
);
2921 output_mov_immediate (otherops
);
2923 else if (code1
== CONST_INT
)
2925 /* sign extend the intval into the high-order word */
2926 if (WORDS_BIG_ENDIAN
)
2928 otherops
[1] = operands
[1];
2929 operands
[1] = (INTVAL (operands
[1]) < 0
2930 ? constm1_rtx
: const0_rtx
);
2933 otherops
[1] = INTVAL (operands
[1]) < 0 ? constm1_rtx
: const0_rtx
;
2934 output_mov_immediate (otherops
);
2935 output_mov_immediate (operands
);
2937 else if (code1
== MEM
)
2939 switch (GET_CODE (XEXP (operands
[1], 0)))
2942 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
2946 abort (); /* Should never happen now */
2950 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
2954 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
2958 abort (); /* Should never happen now */
2963 output_asm_insn ("adr%?\t%0, %1", operands
);
2964 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
2968 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1)))
2970 otherops
[0] = operands
[0];
2971 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
2972 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
2973 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
2975 if (GET_CODE (otherops
[2]) == CONST_INT
)
2977 switch (INTVAL (otherops
[2]))
2980 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
2983 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
2986 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
2989 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
2990 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
2992 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
2995 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
2998 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
2999 return "ldm%?ia\t%0, %M0";
3003 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
3004 /* Take care of overlapping base/data reg. */
3005 if (reg_mentioned_p (operands
[0], operands
[1]))
3007 output_asm_insn ("ldr%?\t%0, %1", otherops
);
3008 output_asm_insn ("ldr%?\t%0, %1", operands
);
3012 output_asm_insn ("ldr%?\t%0, %1", operands
);
3013 output_asm_insn ("ldr%?\t%0, %1", otherops
);
3019 abort(); /* Constraints should prevent this */
3021 else if (code0
== MEM
&& code1
== REG
)
3023 if (REGNO (operands
[1]) == 12)
3026 switch (GET_CODE (XEXP (operands
[0], 0)))
3029 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
3033 abort (); /* Should never happen now */
3037 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
3041 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
3045 abort (); /* Should never happen now */
3049 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
3051 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
3054 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
3058 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
3062 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
3069 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
3070 otherops
[1] = gen_rtx (REG
, SImode
, 1 + REGNO (operands
[1]));
3071 output_asm_insn ("str%?\t%1, %0", operands
);
3072 output_asm_insn ("str%?\t%1, %0", otherops
);
3076 abort(); /* Constraints should prevent this */
3082 /* Output an arbitrary MOV reg, #n.
3083 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
3086 output_mov_immediate (operands
)
3089 HOST_WIDE_INT n
= INTVAL (operands
[1]);
3093 /* Try to use one MOV */
3094 if (const_ok_for_arm (n
))
3096 output_asm_insn ("mov%?\t%0, %1", operands
);
3100 /* Try to use one MVN */
3101 if (const_ok_for_arm (~n
))
3103 operands
[1] = GEN_INT (~n
);
3104 output_asm_insn ("mvn%?\t%0, %1", operands
);
3108 /* If all else fails, make it out of ORRs or BICs as appropriate. */
3110 for (i
=0; i
< 32; i
++)
3114 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
3115 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
3118 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
3125 /* Output an ADD r, s, #n where n may be too big for one instruction. If
3126 adding zero to one register, output nothing. */
3129 output_add_immediate (operands
)
3132 HOST_WIDE_INT n
= INTVAL (operands
[2]);
3134 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
3137 output_multi_immediate (operands
,
3138 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
3141 output_multi_immediate (operands
,
3142 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
3149 /* Output a multiple immediate operation.
3150 OPERANDS is the vector of operands referred to in the output patterns.
3151 INSTR1 is the output pattern to use for the first constant.
3152 INSTR2 is the output pattern to use for subsequent constants.
3153 IMMED_OP is the index of the constant slot in OPERANDS.
3154 N is the constant value. */
3157 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
3159 char *instr1
, *instr2
;
3163 #if HOST_BITS_PER_WIDE_INT > 32
3169 operands
[immed_op
] = const0_rtx
;
3170 output_asm_insn (instr1
, operands
); /* Quick and easy output */
3175 char *instr
= instr1
;
3177 /* Note that n is never zero here (which would give no output) */
3178 for (i
= 0; i
< 32; i
+= 2)
3182 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
3183 output_asm_insn (instr
, operands
);
3193 /* Return the appropriate ARM instruction for the operation code.
3194 The returned result should not be overwritten. OP is the rtx of the
3195 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
3199 arithmetic_instr (op
, shift_first_arg
)
3201 int shift_first_arg
;
3203 switch (GET_CODE (op
))
3209 return shift_first_arg
? "rsb" : "sub";
3226 /* Ensure valid constant shifts and return the appropriate shift mnemonic
3227 for the operation code. The returned result should not be overwritten.
3228 OP is the rtx code of the shift.
3229 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
3233 shift_op (op
, amountp
)
3235 HOST_WIDE_INT
*amountp
;
3238 enum rtx_code code
= GET_CODE (op
);
3240 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
3242 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
3243 *amountp
= INTVAL (XEXP (op
, 1));
3266 /* We never have to worry about the amount being other than a
3267 power of 2, since this case can never be reloaded from a reg. */
3269 *amountp
= int_log2 (*amountp
);
3280 /* This is not 100% correct, but follows from the desire to merge
3281 multiplication by a power of 2 with the recognizer for a
3282 shift. >=32 is not a valid shift for "asl", so we must try and
3283 output a shift that produces the correct arithmetical result.
3284 Using lsr #32 is identical except for the fact that the carry bit
3285 is not set correctly if we set the flags; but we never use the
3286 carry bit from such an operation, so we can ignore that. */
3287 if (code
== ROTATERT
)
3288 *amountp
&= 31; /* Rotate is just modulo 32 */
3289 else if (*amountp
!= (*amountp
& 31))
3296 /* Shifts of 0 are no-ops. */
3305 /* Obtain the shift from the POWER of two. */
3309 HOST_WIDE_INT power
;
3311 HOST_WIDE_INT shift
= 0;
3313 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
3323 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
3324 /bin/as is horribly restrictive. */
3327 output_ascii_pseudo_op (stream
, p
, len
)
3333 int len_so_far
= 1000;
3334 int chars_so_far
= 0;
3336 for (i
= 0; i
< len
; i
++)
3338 register int c
= p
[i
];
3340 if (len_so_far
> 50)
3343 fputs ("\"\n", stream
);
3344 fputs ("\t.ascii\t\"", stream
);
3346 arm_increase_location (chars_so_far
);
3350 if (c
== '\"' || c
== '\\')
3356 if (c
>= ' ' && c
< 0177)
3363 fprintf (stream
, "\\%03o", c
);
3370 fputs ("\"\n", stream
);
3371 arm_increase_location (chars_so_far
);
3375 /* Try to determine whether a pattern really clobbers the link register.
3376 This information is useful when peepholing, so that lr need not be pushed
3377 if we combine a call followed by a return.
3378 NOTE: This code does not check for side-effect expressions in a SET_SRC:
3379 such a check should not be needed because these only update an existing
3380 value within a register; the register must still be set elsewhere within
3384 pattern_really_clobbers_lr (x
)
3389 switch (GET_CODE (x
))
3392 switch (GET_CODE (SET_DEST (x
)))
3395 return REGNO (SET_DEST (x
)) == 14;
3398 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
3399 return REGNO (XEXP (SET_DEST (x
), 0)) == 14;
3401 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
3410 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
3411 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
3416 switch (GET_CODE (XEXP (x
, 0)))
3419 return REGNO (XEXP (x
, 0)) == 14;
3422 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
3423 return REGNO (XEXP (XEXP (x
, 0), 0)) == 14;
3439 function_really_clobbers_lr (first
)
3444 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
3446 switch (GET_CODE (insn
))
3451 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
3456 if (pattern_really_clobbers_lr (PATTERN (insn
)))
3461 /* Don't yet know how to handle those calls that are not to a
3463 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
3466 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
3469 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
3475 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
3481 default: /* Don't recognize it, be safe */
3485 /* A call can be made (by peepholing) not to clobber lr iff it is
3486 followed by a return. There may, however, be a use insn iff
3487 we are returning the result of the call.
3488 If we run off the end of the insn chain, then that means the
3489 call was at the end of the function. Unfortunately we don't
3490 have a return insn for the peephole to recognize, so we
3491 must reject this. (Can this be fixed by adding our own insn?) */
3492 if ((next
= next_nonnote_insn (insn
)) == NULL
)
3495 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
3496 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
3497 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
3498 == REGNO (XEXP (PATTERN (next
), 0))))
3499 if ((next
= next_nonnote_insn (next
)) == NULL
)
3502 if (GET_CODE (next
) == JUMP_INSN
3503 && GET_CODE (PATTERN (next
)) == RETURN
)
3512 /* We have reached the end of the chain so lr was _not_ clobbered */
3517 output_return_instruction (operand
, really_return
)
3522 int reg
, live_regs
= 0;
3523 int volatile_func
= (optimize
> 0
3524 && TREE_THIS_VOLATILE (current_function_decl
));
3526 return_used_this_function
= 1;
3531 /* If this function was declared non-returning, and we have found a tail
3532 call, then we have to trust that the called function won't return. */
3533 if (! really_return
)
3536 /* Otherwise, trap an attempted return by aborting. */
3538 ops
[1] = gen_rtx (SYMBOL_REF
, Pmode
, "abort");
3539 assemble_external_libcall (ops
[1]);
3540 output_asm_insn ("bl%d0\t%a1", ops
);
3544 if (current_function_calls_alloca
&& ! really_return
)
3547 for (reg
= 0; reg
<= 10; reg
++)
3548 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3551 if (live_regs
|| (regs_ever_live
[14] && ! lr_save_eliminated
))
3554 if (frame_pointer_needed
)
3559 if (lr_save_eliminated
|| ! regs_ever_live
[14])
3562 if (frame_pointer_needed
)
3563 strcpy (instr
, "ldm%?%d0ea\t%|fp, {");
3565 strcpy (instr
, "ldm%?%d0fd\t%|sp!, {");
3567 for (reg
= 0; reg
<= 10; reg
++)
3568 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3570 strcat (instr
, "%|");
3571 strcat (instr
, reg_names
[reg
]);
3573 strcat (instr
, ", ");
3576 if (frame_pointer_needed
)
3578 strcat (instr
, "%|");
3579 strcat (instr
, reg_names
[11]);
3580 strcat (instr
, ", ");
3581 strcat (instr
, "%|");
3582 strcat (instr
, reg_names
[13]);
3583 strcat (instr
, ", ");
3584 strcat (instr
, "%|");
3585 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
3589 strcat (instr
, "%|");
3590 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
3592 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
3593 output_asm_insn (instr
, &operand
);
3595 else if (really_return
)
3597 strcpy (instr
, (TARGET_APCS_32
3598 ? "mov%?%d0\t%|pc, %|lr" : "mov%?%d0s\t%|pc, %|lr"));
3599 output_asm_insn (instr
, &operand
);
3605 /* Return nonzero if optimizing and the current function is volatile.
3606 Such functions never return, and many memory cycles can be saved
3607 by not storing register values that will never be needed again.
3608 This optimization was added to speed up context switching in a
3609 kernel application. */
3612 arm_volatile_func ()
3614 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
3617 /* Return the size of the prologue. It's not too bad if we slightly
3621 get_prologue_size ()
3623 return profile_flag
? 12 : 0;
3626 /* The amount of stack adjustment that happens here, in output_return and in
3627 output_epilogue must be exactly the same as was calculated during reload,
3628 or things will point to the wrong place. The only time we can safely
3629 ignore this constraint is when a function has no arguments on the stack,
3630 no stack frame requirement and no live registers execpt for `lr'. If we
3631 can guarantee that by making all function calls into tail calls and that
3632 lr is not clobbered in any other way, then there is no need to push lr
3636 output_func_prologue (f
, frame_size
)
3640 int reg
, live_regs_mask
= 0;
3642 int volatile_func
= (optimize
> 0
3643 && TREE_THIS_VOLATILE (current_function_decl
));
3645 /* Nonzero if we must stuff some register arguments onto the stack as if
3646 they were passed there. */
3647 int store_arg_regs
= 0;
3649 if (arm_ccfsm_state
|| arm_target_insn
)
3650 abort (); /* Sanity check */
3652 return_used_this_function
= 0;
3653 lr_save_eliminated
= 0;
3655 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
3656 ASM_COMMENT_START
, current_function_args_size
,
3657 current_function_pretend_args_size
, frame_size
);
3658 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
3659 ASM_COMMENT_START
, frame_pointer_needed
,
3660 current_function_anonymous_args
);
3663 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
3665 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
3668 for (reg
= 0; reg
<= 10; reg
++)
3669 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3670 live_regs_mask
|= (1 << reg
);
3672 if (frame_pointer_needed
)
3673 live_regs_mask
|= 0xD800;
3674 else if (regs_ever_live
[14])
3676 if (! current_function_args_size
3677 && ! function_really_clobbers_lr (get_insns ()))
3678 lr_save_eliminated
= 1;
3680 live_regs_mask
|= 0x4000;
3685 /* if a di mode load/store multiple is used, and the base register
3686 is r3, then r4 can become an ever live register without lr
3687 doing so, in this case we need to push lr as well, or we
3688 will fail to get a proper return. */
3690 live_regs_mask
|= 0x4000;
3691 lr_save_eliminated
= 0;
3695 if (lr_save_eliminated
)
3696 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
3702 output_func_epilogue (f
, frame_size
)
3706 int reg
, live_regs_mask
= 0, code_size
= 0;
3707 /* If we need this then it will always be at lesat this much */
3708 int floats_offset
= 24;
3710 int volatile_func
= (optimize
> 0
3711 && TREE_THIS_VOLATILE (current_function_decl
));
3713 if (use_return_insn() && return_used_this_function
)
3715 if (frame_size
&& !(frame_pointer_needed
|| TARGET_APCS
))
3722 /* A volatile function should never return. Call abort. */
3725 rtx op
= gen_rtx (SYMBOL_REF
, Pmode
, "abort");
3726 assemble_external_libcall (op
);
3727 output_asm_insn ("bl\t%a0", &op
);
3732 for (reg
= 0; reg
<= 10; reg
++)
3733 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3735 live_regs_mask
|= (1 << reg
);
3739 if (frame_pointer_needed
)
3741 for (reg
= 23; reg
> 15; reg
--)
3742 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3744 fprintf (f
, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX
,
3745 reg_names
[reg
], REGISTER_PREFIX
, floats_offset
);
3746 floats_offset
+= 12;
3750 live_regs_mask
|= 0xA800;
3751 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
3752 TARGET_APCS_32
? FALSE
: TRUE
);
3757 /* Restore stack pointer if necessary. */
3760 operands
[0] = operands
[1] = stack_pointer_rtx
;
3761 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
, frame_size
);
3762 output_add_immediate (operands
);
3765 for (reg
= 16; reg
< 24; reg
++)
3766 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3768 fprintf (f
, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX
,
3769 reg_names
[reg
], REGISTER_PREFIX
);
3772 if (current_function_pretend_args_size
== 0 && regs_ever_live
[14])
3774 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
3775 TARGET_APCS_32
? FALSE
: TRUE
);
3780 if (live_regs_mask
|| regs_ever_live
[14])
3782 live_regs_mask
|= 0x4000;
3783 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
3786 if (current_function_pretend_args_size
)
3788 operands
[0] = operands
[1] = stack_pointer_rtx
;
3789 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
,
3790 current_function_pretend_args_size
);
3791 output_add_immediate (operands
);
3793 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
3794 : "\tmovs\t%spc, %slr\n"),
3795 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
3802 /* insn_addresses isn't allocated when not optimizing */
3805 arm_increase_location (code_size
3806 + insn_addresses
[INSN_UID (get_last_insn ())]
3807 + get_prologue_size ());
3809 current_function_anonymous_args
= 0;
3813 emit_multi_reg_push (mask
)
3820 for (i
= 0; i
< 16; i
++)
3821 if (mask
& (1 << i
))
3824 if (num_regs
== 0 || num_regs
> 16)
3827 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (num_regs
));
3829 for (i
= 0; i
< 16; i
++)
3831 if (mask
& (1 << i
))
3834 = gen_rtx (SET
, VOIDmode
, gen_rtx (MEM
, BLKmode
,
3835 gen_rtx (PRE_DEC
, BLKmode
,
3836 stack_pointer_rtx
)),
3837 gen_rtx (UNSPEC
, BLKmode
,
3838 gen_rtvec (1, gen_rtx (REG
, SImode
, i
)),
3844 for (j
= 1, i
++; j
< num_regs
; i
++)
3846 if (mask
& (1 << i
))
3849 = gen_rtx (USE
, VOIDmode
, gen_rtx (REG
, SImode
, i
));
3857 arm_expand_prologue ()
3860 rtx amount
= GEN_INT (- get_frame_size ());
3863 int live_regs_mask
= 0;
3864 int store_arg_regs
= 0;
3865 int volatile_func
= (optimize
> 0
3866 && TREE_THIS_VOLATILE (current_function_decl
));
3868 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
3871 if (! volatile_func
)
3872 for (reg
= 0; reg
<= 10; reg
++)
3873 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3874 live_regs_mask
|= 1 << reg
;
3876 if (! volatile_func
&& regs_ever_live
[14])
3877 live_regs_mask
|= 0x4000;
3879 if (frame_pointer_needed
)
3881 live_regs_mask
|= 0xD800;
3882 emit_insn (gen_movsi (gen_rtx (REG
, SImode
, 12),
3883 stack_pointer_rtx
));
3886 if (current_function_pretend_args_size
)
3889 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
3892 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
3893 GEN_INT (-current_function_pretend_args_size
)));
3898 /* If we have to push any regs, then we must push lr as well, or
3899 we won't get a proper return. */
3900 live_regs_mask
|= 0x4000;
3901 emit_multi_reg_push (live_regs_mask
);
3904 /* For now the integer regs are still pushed in output_func_epilogue (). */
3906 if (! volatile_func
)
3907 for (reg
= 23; reg
> 15; reg
--)
3908 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
3909 emit_insn (gen_rtx (SET
, VOIDmode
,
3910 gen_rtx (MEM
, XFmode
,
3911 gen_rtx (PRE_DEC
, XFmode
,
3912 stack_pointer_rtx
)),
3913 gen_rtx (REG
, XFmode
, reg
)));
3915 if (frame_pointer_needed
)
3916 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx (REG
, SImode
, 12),
3918 (-(4 + current_function_pretend_args_size
)))));
3920 if (amount
!= const0_rtx
)
3922 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
3923 emit_insn (gen_rtx (CLOBBER
, VOIDmode
,
3924 gen_rtx (MEM
, BLKmode
, stack_pointer_rtx
)));
3927 /* If we are profiling, make sure no instructions are scheduled before
3928 the call to mcount. */
3929 if (profile_flag
|| profile_block_flag
)
3930 emit_insn (gen_blockage ());
3934 /* If CODE is 'd', then the X is a condition operand and the instruction
3935 should only be executed if the condition is true.
3936 if CODE is 'D', then the X is a condition operand and the instruction
3937 should only be executed if the condition is false: however, if the mode
3938 of the comparison is CCFPEmode, then always execute the instruction -- we
3939 do this because in these circumstances !GE does not necessarily imply LT;
3940 in these cases the instruction pattern will take care to make sure that
3941 an instruction containing %d will follow, thereby undoing the effects of
3942 doing this instruction unconditionally.
3943 If CODE is 'N' then X is a floating point operand that must be negated
3945 If CODE is 'B' then output a bitwise inverted value of X (a const int).
3946 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
3949 arm_print_operand (stream
, x
, code
)
3957 fputs (ASM_COMMENT_START
, stream
);
3961 fputs (REGISTER_PREFIX
, stream
);
3965 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
3966 fputs (arm_condition_codes
[arm_current_cc
], stream
);
3972 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3973 r
= REAL_VALUE_NEGATE (r
);
3974 fprintf (stream
, "%s", fp_const_from_val (&r
));
3979 if (GET_CODE (x
) == CONST_INT
)
3981 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
3986 ARM_SIGN_EXTEND (~ INTVAL (x
)));
3990 output_addr_const (stream
, x
);
3995 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
3999 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
4005 char *shift
= shift_op (x
, &val
);
4009 fprintf (stream
, ", %s ", shift_op (x
, &val
));
4011 arm_print_operand (stream
, XEXP (x
, 1), 0);
4014 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
4027 fputs (REGISTER_PREFIX
, stream
);
4028 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0)], stream
);
4034 fputs (REGISTER_PREFIX
, stream
);
4035 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1)], stream
);
4039 fputs (REGISTER_PREFIX
, stream
);
4040 if (GET_CODE (XEXP (x
, 0)) == REG
)
4041 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
4043 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
4047 fprintf (stream
, "{%s%s-%s%s}", REGISTER_PREFIX
, reg_names
[REGNO (x
)],
4048 REGISTER_PREFIX
, reg_names
[REGNO (x
) - 1
4049 + ((GET_MODE_SIZE (GET_MODE (x
))
4050 + GET_MODE_SIZE (SImode
) - 1)
4051 / GET_MODE_SIZE (SImode
))]);
4056 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
4061 if (x
&& (flag_fast_math
4062 || GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
4063 || (GET_MODE (XEXP (x
, 0)) != CCFPEmode
4064 && (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0)))
4066 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
4067 (get_arm_condition_code (x
))],
4075 if (GET_CODE (x
) == REG
)
4077 fputs (REGISTER_PREFIX
, stream
);
4078 fputs (reg_names
[REGNO (x
)], stream
);
4080 else if (GET_CODE (x
) == MEM
)
4082 output_memory_reference_mode
= GET_MODE (x
);
4083 output_address (XEXP (x
, 0));
4085 else if (GET_CODE (x
) == CONST_DOUBLE
)
4086 fprintf (stream
, "#%s", fp_immediate_constant (x
));
4087 else if (GET_CODE (x
) == NEG
)
4088 abort (); /* This should never happen now. */
4091 fputc ('#', stream
);
4092 output_addr_const (stream
, x
);
4097 /* Increase the `arm_text_location' by AMOUNT if we're in the text
4101 arm_increase_location (amount
)
4104 if (in_text_section ())
4105 arm_text_location
+= amount
;
4109 /* Output a label definition. If this label is within the .text segment, it
4110 is stored in OFFSET_TABLE, to be used when building `llc' instructions.
4111 Maybe GCC remembers names not starting with a `*' for a long time, but this
4112 is a minority anyway, so we just make a copy. Do not store the leading `*'
4113 if the name starts with one. */
4116 arm_asm_output_label (stream
, name
)
4120 char *real_name
, *s
;
4121 struct label_offset
*cur
;
4124 ARM_OUTPUT_LABEL (stream
, name
);
4125 if (! in_text_section ())
4130 real_name
= xmalloc (1 + strlen (&name
[1]));
4131 strcpy (real_name
, &name
[1]);
4135 real_name
= xmalloc (2 + strlen (name
));
4136 strcpy (real_name
, USER_LABEL_PREFIX
);
4137 strcat (real_name
, name
);
4139 for (s
= real_name
; *s
; s
++)
4142 hash
= hash
% LABEL_HASH_SIZE
;
4143 cur
= (struct label_offset
*) xmalloc (sizeof (struct label_offset
));
4144 cur
->name
= real_name
;
4145 cur
->offset
= arm_text_location
;
4146 cur
->cdr
= offset_table
[hash
];
4147 offset_table
[hash
] = cur
;
4150 /* Output code resembling an .lcomm directive. /bin/as doesn't have this
4151 directive hence this hack, which works by reserving some `.space' in the
4152 bss segment directly.
4154 XXX This is a severe hack, which is guaranteed NOT to work since it doesn't
4155 define STATIC COMMON space but merely STATIC BSS space. */
4158 output_lcomm_directive (stream
, name
, size
, rounded
)
4163 fprintf (stream
, "\n\t.bss\t%s .lcomm\n", ASM_COMMENT_START
);
4164 assemble_name (stream
, name
);
4165 fprintf (stream
, ":\t.space\t%d\n", rounded
);
4166 if (in_text_section ())
4167 fputs ("\n\t.text\n", stream
);
4169 fputs ("\n\t.data\n", stream
);
4172 /* A finite state machine takes care of noticing whether or not instructions
4173 can be conditionally executed, and thus decrease execution time and code
4174 size by deleting branch instructions. The fsm is controlled by
4175 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
4177 /* The state of the fsm controlling condition codes are:
4178 0: normal, do nothing special
4179 1: make ASM_OUTPUT_OPCODE not output this instruction
4180 2: make ASM_OUTPUT_OPCODE not output this instruction
4181 3: make instructions conditional
4182 4: make instructions conditional
4184 State transitions (state->state by whom under condition):
4185 0 -> 1 final_prescan_insn if the `target' is a label
4186 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
4187 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
4188 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
4189 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
4190 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
4191 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
4192 (the target insn is arm_target_insn).
4194 If the jump clobbers the conditions then we use states 2 and 4.
4196 A similar thing can be done with conditional return insns.
4198 XXX In case the `target' is an unconditional branch, this conditionalising
4199 of the instructions always reduces code size, but not always execution
4200 time. But then, I want to reduce the code size to somewhere near what
4201 /bin/cc produces. */
4203 /* Returns the index of the ARM condition code string in
4204 `arm_condition_codes'. COMPARISON should be an rtx like
4205 `(eq (...) (...))'. */
4208 get_arm_condition_code (comparison
)
4211 switch (GET_CODE (comparison
))
4213 case NE
: return (1);
4214 case EQ
: return (0);
4215 case GE
: return (10);
4216 case GT
: return (12);
4217 case LE
: return (13);
4218 case LT
: return (11);
4219 case GEU
: return (2);
4220 case GTU
: return (8);
4221 case LEU
: return (9);
4222 case LTU
: return (3);
4231 final_prescan_insn (insn
, opvec
, noperands
)
4236 /* BODY will hold the body of INSN. */
4237 register rtx body
= PATTERN (insn
);
4239 /* This will be 1 if trying to repeat the trick, and things need to be
4240 reversed if it appears to fail. */
4243 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
4244 taken are clobbered, even if the rtl suggests otherwise. It also
4245 means that we have to grub around within the jump expression to find
4246 out what the conditions are when the jump isn't taken. */
4247 int jump_clobbers
= 0;
4249 /* If we start with a return insn, we only succeed if we find another one. */
4250 int seeking_return
= 0;
4252 /* START_INSN will hold the insn from where we start looking. This is the
4253 first insn after the following code_label if REVERSE is true. */
4254 rtx start_insn
= insn
;
4256 /* If in state 4, check if the target branch is reached, in order to
4257 change back to state 0. */
4258 if (arm_ccfsm_state
== 4)
4260 if (insn
== arm_target_insn
)
4262 arm_target_insn
= NULL
;
4263 arm_ccfsm_state
= 0;
4268 /* If in state 3, it is possible to repeat the trick, if this insn is an
4269 unconditional branch to a label, and immediately following this branch
4270 is the previous target label which is only used once, and the label this
4271 branch jumps to is not too far off. */
4272 if (arm_ccfsm_state
== 3)
4274 if (simplejump_p (insn
))
4276 start_insn
= next_nonnote_insn (start_insn
);
4277 if (GET_CODE (start_insn
) == BARRIER
)
4279 /* XXX Isn't this always a barrier? */
4280 start_insn
= next_nonnote_insn (start_insn
);
4282 if (GET_CODE (start_insn
) == CODE_LABEL
4283 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
4284 && LABEL_NUSES (start_insn
) == 1)
4289 else if (GET_CODE (body
) == RETURN
)
4291 start_insn
= next_nonnote_insn (start_insn
);
4292 if (GET_CODE (start_insn
) == BARRIER
)
4293 start_insn
= next_nonnote_insn (start_insn
);
4294 if (GET_CODE (start_insn
) == CODE_LABEL
4295 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
4296 && LABEL_NUSES (start_insn
) == 1)
4308 if (arm_ccfsm_state
!= 0 && !reverse
)
4310 if (GET_CODE (insn
) != JUMP_INSN
)
4313 /* This jump might be paralleled with a clobber of the condition codes
4314 the jump should always come first */
4315 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
4316 body
= XVECEXP (body
, 0, 0);
4319 /* If this is a conditional return then we don't want to know */
4320 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
4321 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
4322 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
4323 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
4328 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
4329 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
4331 int insns_skipped
= 0, fail
= FALSE
, succeed
= FALSE
;
4332 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
4333 int then_not_else
= TRUE
;
4334 rtx this_insn
= start_insn
, label
= 0;
4336 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
4338 /* The code below is wrong for these, and I haven't time to
4339 fix it now. So we just do the safe thing and return. This
4340 whole function needs re-writing anyway. */
4345 /* Register the insn jumped to. */
4348 if (!seeking_return
)
4349 label
= XEXP (SET_SRC (body
), 0);
4351 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
4352 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
4353 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
4355 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
4356 then_not_else
= FALSE
;
4358 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
4360 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
4363 then_not_else
= FALSE
;
4368 /* See how many insns this branch skips, and what kind of insns. If all
4369 insns are okay, and the label or unconditional branch to the same
4370 label is not too far away, succeed. */
4371 for (insns_skipped
= 0;
4372 !fail
&& !succeed
&& insns_skipped
< MAX_INSNS_SKIPPED
;
4377 this_insn
= next_nonnote_insn (this_insn
);
4381 scanbody
= PATTERN (this_insn
);
4383 switch (GET_CODE (this_insn
))
4386 /* Succeed if it is the target label, otherwise fail since
4387 control falls in from somewhere else. */
4388 if (this_insn
== label
)
4392 arm_ccfsm_state
= 2;
4393 this_insn
= next_nonnote_insn (this_insn
);
4396 arm_ccfsm_state
= 1;
4404 /* Succeed if the following insn is the target label.
4406 If return insns are used then the last insn in a function
4407 will be a barrier. */
4408 this_insn
= next_nonnote_insn (this_insn
);
4409 if (this_insn
&& this_insn
== label
)
4413 arm_ccfsm_state
= 2;
4414 this_insn
= next_nonnote_insn (this_insn
);
4417 arm_ccfsm_state
= 1;
4425 /* If using 32-bit addresses the cc is not preserved over
4432 /* If this is an unconditional branch to the same label, succeed.
4433 If it is to another label, do nothing. If it is conditional,
4435 /* XXX Probably, the test for the SET and the PC are unnecessary. */
4437 if (GET_CODE (scanbody
) == SET
4438 && GET_CODE (SET_DEST (scanbody
)) == PC
)
4440 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
4441 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
4443 arm_ccfsm_state
= 2;
4446 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
4449 else if (GET_CODE (scanbody
) == RETURN
4452 arm_ccfsm_state
= 2;
4455 else if (GET_CODE (scanbody
) == PARALLEL
)
4457 switch (get_attr_conds (this_insn
))
4469 /* Instructions using or affecting the condition codes make it
4471 if ((GET_CODE (scanbody
) == SET
4472 || GET_CODE (scanbody
) == PARALLEL
)
4473 && get_attr_conds (this_insn
) != CONDS_NOCOND
)
4483 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
4484 arm_target_label
= CODE_LABEL_NUMBER (label
);
4485 else if (seeking_return
|| arm_ccfsm_state
== 2)
4487 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
4489 this_insn
= next_nonnote_insn (this_insn
);
4490 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
4491 || GET_CODE (this_insn
) == CODE_LABEL
))
4496 /* Oh, dear! we ran off the end.. give up */
4497 recog (PATTERN (insn
), insn
, NULL_PTR
);
4498 arm_ccfsm_state
= 0;
4499 arm_target_insn
= NULL
;
4502 arm_target_insn
= this_insn
;
4511 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
4513 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
4514 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
4515 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
4516 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
4520 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
4523 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
4527 if (reverse
|| then_not_else
)
4528 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
4530 /* restore recog_operand (getting the attributes of other insns can
4531 destroy this array, but final.c assumes that it remains intact
4532 across this call; since the insn has been recognized already we
4533 call recog direct). */
4534 recog (PATTERN (insn
), insn
, NULL_PTR
);
4538 #ifdef AOF_ASSEMBLER
4539 /* Special functions only needed when producing AOF syntax assembler. */
4541 int arm_text_section_count
= 1;
4544 aof_text_section (in_readonly
)
4547 static char buf
[100];
4550 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
4551 arm_text_section_count
++);
4553 strcat (buf
, ", PIC, REENTRANT");
4557 static int arm_data_section_count
= 1;
4562 static char buf
[100];
4563 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
4567 /* The AOF assembler is religiously strict about declarations of
4568 imported and exported symbols, so that it is impossible to declare
4569 a function as imported near the begining of the file, and then to
4570 export it later on. It is, however, possible to delay the decision
4571 until all the functions in the file have been compiled. To get
4572 around this, we maintain a list of the imports and exports, and
4573 delete from it any that are subsequently defined. At the end of
4574 compilation we spit the remainder of the list out before the END
4579 struct import
*next
;
4583 static struct import
*imports_list
= NULL
;
4586 aof_add_import (name
)
4591 for (new = imports_list
; new; new = new->next
)
4592 if (new->name
== name
)
4595 new = (struct import
*) xmalloc (sizeof (struct import
));
4596 new->next
= imports_list
;
4602 aof_delete_import (name
)
4605 struct import
**old
;
4607 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
4609 if ((*old
)->name
== name
)
4611 *old
= (*old
)->next
;
4617 int arm_main_function
= 0;
4620 aof_dump_imports (f
)
4623 /* The AOF assembler needs this to cause the startup code to be extracted
4624 from the library. Brining in __main causes the whole thing to work
4626 if (arm_main_function
)
4629 fputs ("\tIMPORT __main\n", f
);
4630 fputs ("\tDCD __main\n", f
);
4633 /* Now dump the remaining imports. */
4634 while (imports_list
)
4636 fprintf (f
, "\tIMPORT\t");
4637 assemble_name (f
, imports_list
->name
);
4639 imports_list
= imports_list
->next
;
4642 #endif /* AOF_ASSEMBLER */