1 /* Output routines for GCC for ARM/RISCiX.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "insn-flags.h"
36 #include "insn-attr.h"
42 /* The maximum number of insns skipped which will be conditionalised if
44 #define MAX_INSNS_SKIPPED 5
46 /* Some function declarations. */
47 extern FILE *asm_out_file
;
48 extern char *output_multi_immediate ();
49 extern void arm_increase_location ();
51 HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
52 static int get_prologue_size
PROTO ((void));
53 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
54 HOST_WIDE_INT
, rtx
, rtx
, int, int));
56 /* Define the information needed to generate branch insns. This is
57 stored from the compare operation. */
59 rtx arm_compare_op0
, arm_compare_op1
;
62 /* What type of cpu are we compiling for? */
63 enum processor_type arm_cpu
;
65 /* What type of floating point are we compiling for? */
66 enum floating_point_type arm_fpu
;
68 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
69 enum prog_mode_type arm_prgmode
;
71 char *target_cpu_name
= ARM_CPU_NAME
;
72 char *target_fpe_name
= NULL
;
74 /* Nonzero if this is an "M" variant of the processor. */
75 int arm_fast_multiply
= 0;
77 /* Nonzero if this chip support the ARM Architecture 4 extensions */
80 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
81 must report the mode of the memory reference from PRINT_OPERAND to
82 PRINT_OPERAND_ADDRESS. */
83 enum machine_mode output_memory_reference_mode
;
85 /* Nonzero if the prologue must setup `fp'. */
86 int current_function_anonymous_args
;
88 /* Location counter of .text segment. */
89 int arm_text_location
= 0;
91 /* Set to one if we think that lr is only saved because of subroutine calls,
92 but all of these can be `put after' return insns */
93 int lr_save_eliminated
;
95 /* A hash table is used to store text segment labels and their associated
96 offset from the start of the text segment. */
101 struct label_offset
*cdr
;
104 #define LABEL_HASH_SIZE 257
106 static struct label_offset
*offset_table
[LABEL_HASH_SIZE
];
108 /* Set to 1 when a return insn is output, this means that the epilogue
111 static int return_used_this_function
;
113 static int arm_constant_limit
= 3;
115 /* For an explanation of these variables, see final_prescan_insn below. */
117 enum arm_cond_code arm_current_cc
;
119 int arm_target_label
;
121 /* The condition codes of the ARM, and the inverse function. */
122 char *arm_condition_codes
[] =
124 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
125 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
128 static enum arm_cond_code
get_arm_condition_code ();
131 /* Initialization code */
133 struct arm_cpu_select arm_select
[3] =
135 /* switch name, tune arch */
136 { (char *)0, "--with-cpu=", 1, 1 },
137 { (char *)0, "-mcpu=", 1, 1 },
138 { (char *)0, "-mtune=", 1, 0 },
141 #define FL_CO_PROC 0x01 /* Has external co-processor bus */
142 #define FL_FAST_MULT 0x02 /* Fast multiply */
143 #define FL_MODE26 0x04 /* 26-bit mode support */
144 #define FL_MODE32 0x08 /* 32-bit mode support */
145 #define FL_ARCH4 0x10 /* Architecture rel 4 */
146 #define FL_THUMB 0x20 /* Thumb aware */
150 enum processor_type type
;
154 /* Not all of these give usefully different compilation alternatives,
155 but there is no simple way of generalizing them. */
156 static struct processors all_procs
[] =
158 {"arm2", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
159 {"arm250", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
160 {"arm3", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
161 {"arm6", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
162 {"arm60", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
163 {"arm600", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
164 {"arm610", PROCESSOR_ARM6
, FL_MODE32
| FL_MODE26
},
165 {"arm620", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
166 {"arm7", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
167 {"arm70", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
168 {"arm7d", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
169 {"arm7di", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
170 {"arm7dm", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
172 {"arm7dmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
174 {"arm700", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
175 {"arm700i", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
176 {"arm710", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
177 {"arm710c", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
178 {"arm7100", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
179 {"arm7500", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
180 {"arm7tdmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
181 | FL_ARCH4
| FL_THUMB
)},
185 /* Fix up any incompatible options that the user has specified.
186 This has now turned into a maze. */
188 arm_override_options ()
190 int arm_thumb_aware
= 0;
193 struct arm_cpu_select
*ptr
;
195 arm_cpu
= PROCESSOR_DEFAULT
;
196 arm_select
[0].string
= TARGET_CPU_DEFAULT
;
198 for (i
= 0; i
< sizeof (arm_select
) / sizeof (arm_select
[0]); i
++)
200 ptr
= &arm_select
[i
];
201 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
203 struct processors
*sel
;
205 for (sel
= all_procs
; sel
->name
!= NULL
; sel
++)
206 if (! strcmp (ptr
->string
, sel
->name
))
216 if (sel
->name
== NULL
)
217 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
221 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
222 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
224 if (TARGET_POKE_FUNCTION_NAME
)
225 target_flags
|= ARM_FLAG_APCS_FRAME
;
229 warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu=<proc>");
230 target_flags
|= ARM_FLAG_APCS_32
;
231 arm_cpu
= PROCESSOR_ARM6
;
236 warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu=<proc>");
237 target_flags
&= ~ARM_FLAG_APCS_32
;
238 arm_cpu
= PROCESSOR_ARM2
;
241 if (TARGET_APCS_REENT
&& flag_pic
)
242 fatal ("-fpic and -mapcs-reent are incompatible");
244 if (TARGET_APCS_REENT
)
245 warning ("APCS reentrant code not supported. Ignored");
248 warning ("Position independent code not supported. Ignored");
250 if (TARGET_APCS_FLOAT
)
251 warning ("Passing floating point arguments in fp regs not yet supported");
253 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
255 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
256 target_flags
|= ARM_FLAG_APCS_FRAME
;
261 /* Default value for floating point code... if no co-processor
262 bus, then schedule for emulated floating point. Otherwise,
263 assume the user has an FPA, unless overridden with -mfpe-... */
264 if (flags
& FL_CO_PROC
== 0)
268 arm_fast_multiply
= (flags
& FL_FAST_MULT
) != 0;
269 arm_arch4
= (flags
& FL_ARCH4
) != 0;
270 arm_thumb_aware
= (flags
& FL_THUMB
) != 0;
274 if (strcmp (target_fpe_name
, "2") == 0)
276 else if (strcmp (target_fpe_name
, "3") == 0)
279 fatal ("Invalid floating point emulation option: -mfpe-%s",
283 if (TARGET_THUMB_INTERWORK
&& ! arm_thumb_aware
)
285 warning ("This processor variant does not support Thumb interworking");
286 target_flags
&= ~ARM_FLAG_THUMB
;
289 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
292 /* For arm2/3 there is no need to do any scheduling if there is only
293 a floating point emulator, or we are doing software floating-point. */
294 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && arm_cpu
== PROCESSOR_ARM2
)
295 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
297 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
300 /* Return 1 if it is possible to return using a single instruction */
307 if (!reload_completed
||current_function_pretend_args_size
308 || current_function_anonymous_args
309 || (get_frame_size () && !(TARGET_APCS
|| frame_pointer_needed
)))
312 /* Can't be done if any of the FPU regs are pushed, since this also
314 for (regno
= 20; regno
< 24; regno
++)
315 if (regs_ever_live
[regno
])
318 /* If a function is naked, don't use the "return" insn. */
319 if (arm_naked_function_p (current_function_decl
))
325 /* Return TRUE if int I is a valid immediate ARM constant. */
331 unsigned HOST_WIDE_INT mask
= ~0xFF;
333 /* Fast return for 0 and powers of 2 */
334 if ((i
& (i
- 1)) == 0)
339 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
342 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
343 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
344 } while (mask
!= ~0xFF);
349 /* Return true if I is a valid constant for the operation CODE. */
351 const_ok_for_op (i
, code
, mode
)
354 enum machine_mode mode
;
356 if (const_ok_for_arm (i
))
362 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
364 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
370 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
377 /* Emit a sequence of insns to handle a large constant.
378 CODE is the code of the operation required, it can be any of SET, PLUS,
379 IOR, AND, XOR, MINUS;
380 MODE is the mode in which the operation is being performed;
381 VAL is the integer to operate on;
382 SOURCE is the other operand (a register, or a null-pointer for SET);
383 SUBTARGETS means it is safe to create scratch registers if that will
384 either produce a simpler sequence, or we will want to cse the values.
385 Return value is the number of insns emitted. */
388 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
390 enum machine_mode mode
;
396 if (subtargets
|| code
== SET
397 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
398 && REGNO (target
) != REGNO (source
)))
402 if (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
403 > arm_constant_limit
+ (code
!= SET
))
407 /* Currently SET is the only monadic value for CODE, all
408 the rest are diadic. */
409 emit_insn (gen_rtx (SET
, VOIDmode
, target
, GEN_INT (val
)));
414 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
416 emit_insn (gen_rtx (SET
, VOIDmode
, temp
, GEN_INT (val
)));
417 /* For MINUS, the value is subtracted from, since we never
418 have subtraction of a constant. */
420 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
421 gen_rtx (code
, mode
, temp
, source
)));
423 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
424 gen_rtx (code
, mode
, source
, temp
)));
430 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
433 /* As above, but extra parameter GENERATE which, if clear, suppresses
436 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
438 enum machine_mode mode
;
448 int can_negate_initial
= 0;
451 int num_bits_set
= 0;
452 int set_sign_bit_copies
= 0;
453 int clear_sign_bit_copies
= 0;
454 int clear_zero_bit_copies
= 0;
455 int set_zero_bit_copies
= 0;
458 unsigned HOST_WIDE_INT temp1
, temp2
;
459 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
461 /* find out which operations are safe for a given CODE. Also do a quick
462 check for degenerate cases; these can occur when DImode operations
474 can_negate_initial
= 1;
478 if (remainder
== 0xffffffff)
481 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
482 GEN_INT (ARM_SIGN_EXTEND (val
))));
487 if (reload_completed
&& rtx_equal_p (target
, source
))
490 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
499 emit_insn (gen_rtx (SET
, VOIDmode
, target
, const0_rtx
));
502 if (remainder
== 0xffffffff)
504 if (reload_completed
&& rtx_equal_p (target
, source
))
507 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
516 if (reload_completed
&& rtx_equal_p (target
, source
))
519 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
522 if (remainder
== 0xffffffff)
525 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
526 gen_rtx (NOT
, mode
, source
)));
530 /* We don't know how to handle this yet below. */
534 /* We treat MINUS as (val - source), since (source - val) is always
535 passed as (source + (-val)). */
539 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
540 gen_rtx (NEG
, mode
, source
)));
543 if (const_ok_for_arm (val
))
546 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
547 gen_rtx (MINUS
, mode
, GEN_INT (val
), source
)));
558 /* If we can do it in one insn get out quickly */
559 if (const_ok_for_arm (val
)
560 || (can_negate_initial
&& const_ok_for_arm (-val
))
561 || (can_invert
&& const_ok_for_arm (~val
)))
564 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
565 (source
? gen_rtx (code
, mode
, source
,
572 /* Calculate a few attributes that may be useful for specific
575 for (i
= 31; i
>= 0; i
--)
577 if ((remainder
& (1 << i
)) == 0)
578 clear_sign_bit_copies
++;
583 for (i
= 31; i
>= 0; i
--)
585 if ((remainder
& (1 << i
)) != 0)
586 set_sign_bit_copies
++;
591 for (i
= 0; i
<= 31; i
++)
593 if ((remainder
& (1 << i
)) == 0)
594 clear_zero_bit_copies
++;
599 for (i
= 0; i
<= 31; i
++)
601 if ((remainder
& (1 << i
)) != 0)
602 set_zero_bit_copies
++;
610 /* See if we can do this by sign_extending a constant that is known
611 to be negative. This is a good, way of doing it, since the shift
612 may well merge into a subsequent insn. */
613 if (set_sign_bit_copies
> 1)
616 (temp1
= ARM_SIGN_EXTEND (remainder
617 << (set_sign_bit_copies
- 1))))
621 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
622 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
624 emit_insn (gen_ashrsi3 (target
, new_src
,
625 GEN_INT (set_sign_bit_copies
- 1)));
629 /* For an inverted constant, we will need to set the low bits,
630 these will be shifted out of harm's way. */
631 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
632 if (const_ok_for_arm (~temp1
))
636 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
637 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
639 emit_insn (gen_ashrsi3 (target
, new_src
,
640 GEN_INT (set_sign_bit_copies
- 1)));
646 /* See if we can generate this by setting the bottom (or the top)
647 16 bits, and then shifting these into the other half of the
648 word. We only look for the simplest cases, to do more would cost
649 too much. Be careful, however, not to generate this when the
650 alternative would take fewer insns. */
651 if (val
& 0xffff0000)
653 temp1
= remainder
& 0xffff0000;
654 temp2
= remainder
& 0x0000ffff;
656 /* Overlaps outside this range are best done using other methods. */
657 for (i
= 9; i
< 24; i
++)
659 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
660 && ! const_ok_for_arm (temp2
))
662 insns
= arm_gen_constant (code
, mode
, temp2
,
663 new_src
= (subtargets
666 source
, subtargets
, generate
);
669 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
671 gen_rtx (ASHIFT
, mode
, source
,
678 /* Don't duplicate cases already considered. */
679 for (i
= 17; i
< 24; i
++)
681 if (((temp1
| (temp1
>> i
)) == remainder
)
682 && ! const_ok_for_arm (temp1
))
684 insns
= arm_gen_constant (code
, mode
, temp1
,
685 new_src
= (subtargets
688 source
, subtargets
, generate
);
691 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
693 gen_rtx (LSHIFTRT
, mode
,
694 source
, GEN_INT (i
)),
704 /* If we have IOR or XOR, and the constant can be loaded in a
705 single instruction, and we can find a temporary to put it in,
706 then this can be done in two instructions instead of 3-4. */
708 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
710 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
714 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
716 emit_insn (gen_rtx (SET
, VOIDmode
, sub
, GEN_INT (val
)));
717 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
718 gen_rtx (code
, mode
, source
, sub
)));
727 if (set_sign_bit_copies
> 8
728 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
732 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
733 rtx shift
= GEN_INT (set_sign_bit_copies
);
735 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
737 gen_rtx (ASHIFT
, mode
, source
,
739 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
741 gen_rtx (LSHIFTRT
, mode
, sub
,
747 if (set_zero_bit_copies
> 8
748 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
752 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
753 rtx shift
= GEN_INT (set_zero_bit_copies
);
755 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
757 gen_rtx (LSHIFTRT
, mode
, source
,
759 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
761 gen_rtx (ASHIFT
, mode
, sub
,
767 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
771 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
772 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
773 gen_rtx (NOT
, mode
, source
)));
776 sub
= gen_reg_rtx (mode
);
777 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
778 gen_rtx (AND
, mode
, source
,
780 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
781 gen_rtx (NOT
, mode
, sub
)));
788 /* See if two shifts will do 2 or more insn's worth of work. */
789 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
791 HOST_WIDE_INT shift_mask
= ((0xffffffff
792 << (32 - clear_sign_bit_copies
))
797 if ((remainder
| shift_mask
) != 0xffffffff)
801 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
802 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
803 new_source
, source
, subtargets
, 1);
807 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
808 new_source
, source
, subtargets
, 0);
813 shift
= GEN_INT (clear_sign_bit_copies
);
814 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
815 emit_insn (gen_ashlsi3 (new_source
, source
, shift
));
816 emit_insn (gen_lshrsi3 (target
, new_source
, shift
));
822 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
824 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
828 if ((remainder
| shift_mask
) != 0xffffffff)
832 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
833 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
834 new_source
, source
, subtargets
, 1);
838 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
839 new_source
, source
, subtargets
, 0);
844 shift
= GEN_INT (clear_zero_bit_copies
);
845 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
846 emit_insn (gen_lshrsi3 (new_source
, source
, shift
));
847 emit_insn (gen_ashlsi3 (target
, new_source
, shift
));
859 for (i
= 0; i
< 32; i
++)
860 if (remainder
& (1 << i
))
863 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
864 remainder
= (~remainder
) & 0xffffffff;
865 else if (code
== PLUS
&& num_bits_set
> 16)
866 remainder
= (-remainder
) & 0xffffffff;
873 /* Now try and find a way of doing the job in either two or three
875 We start by looking for the largest block of zeros that are aligned on
876 a 2-bit boundary, we then fill up the temps, wrapping around to the
877 top of the word when we drop off the bottom.
878 In the worst case this code should produce no more than four insns. */
881 int best_consecutive_zeros
= 0;
883 for (i
= 0; i
< 32; i
+= 2)
885 int consecutive_zeros
= 0;
887 if (! (remainder
& (3 << i
)))
889 while ((i
< 32) && ! (remainder
& (3 << i
)))
891 consecutive_zeros
+= 2;
894 if (consecutive_zeros
> best_consecutive_zeros
)
896 best_consecutive_zeros
= consecutive_zeros
;
897 best_start
= i
- consecutive_zeros
;
903 /* Now start emitting the insns, starting with the one with the highest
904 bit set: we do this so that the smallest number will be emitted last;
905 this is more likely to be combinable with addressing insns. */
913 if (remainder
& (3 << (i
- 2)))
918 temp1
= remainder
& ((0x0ff << end
)
919 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
925 emit_insn (gen_rtx (SET
, VOIDmode
,
926 new_src
= (subtargets
929 GEN_INT (can_invert
? ~temp1
: temp1
)));
933 else if (code
== MINUS
)
936 emit_insn (gen_rtx (SET
, VOIDmode
,
937 new_src
= (subtargets
940 gen_rtx (code
, mode
, GEN_INT (temp1
),
947 emit_insn (gen_rtx (SET
, VOIDmode
,
953 gen_rtx (code
, mode
, source
,
954 GEN_INT (can_invert
? ~temp1
970 /* Canonicalize a comparison so that we are more likely to recognize it.
971 This can be done for a few constant compares, where we can make the
972 immediate value easier to load. */
974 arm_canonicalize_comparison (code
, op1
)
978 HOST_WIDE_INT i
= INTVAL (*op1
);
988 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1) - 1)
989 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
991 *op1
= GEN_INT (i
+1);
992 return code
== GT
? GE
: LT
;
998 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1))
999 && (const_ok_for_arm (i
-1) || const_ok_for_arm (- (i
-1))))
1001 *op1
= GEN_INT (i
-1);
1002 return code
== GE
? GT
: LE
;
1009 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1011 *op1
= GEN_INT (i
+ 1);
1012 return code
== GTU
? GEU
: LTU
;
1019 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (- (i
- 1))))
1021 *op1
= GEN_INT (i
- 1);
1022 return code
== GEU
? GTU
: LEU
;
1034 /* Handle aggregates that are not laid out in a BLKmode element.
1035 This is a sub-element of RETURN_IN_MEMORY. */
1037 arm_return_in_memory (type
)
1040 if (TREE_CODE (type
) == RECORD_TYPE
)
1044 /* For a struct, we can return in a register if every element was a
1046 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1047 if (TREE_CODE (field
) != FIELD_DECL
1048 || ! DECL_BIT_FIELD_TYPE (field
))
1053 else if (TREE_CODE (type
) == UNION_TYPE
)
1057 /* Unions can be returned in registers if every element is
1058 integral, or can be returned in an integer register. */
1059 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1061 if (TREE_CODE (field
) != FIELD_DECL
1062 || (AGGREGATE_TYPE_P (TREE_TYPE (field
))
1063 && RETURN_IN_MEMORY (TREE_TYPE (field
)))
1064 || FLOAT_TYPE_P (TREE_TYPE (field
)))
1069 /* XXX Not sure what should be done for other aggregates, so put them in
1074 #define REG_OR_SUBREG_REG(X) \
1075 (GET_CODE (X) == REG \
1076 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1078 #define REG_OR_SUBREG_RTX(X) \
1079 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1081 #define ARM_FRAME_RTX(X) \
1082 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1083 || (X) == arg_pointer_rtx)
1086 arm_rtx_costs (x
, code
, outer_code
)
1088 enum rtx_code code
, outer_code
;
1090 enum machine_mode mode
= GET_MODE (x
);
1091 enum rtx_code subcode
;
1097 /* Memory costs quite a lot for the first word, but subsequent words
1098 load at the equivalent of a single insn each. */
1099 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1100 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1107 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1114 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1116 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1117 + ((GET_CODE (XEXP (x
, 0)) == REG
1118 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1119 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1121 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1122 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1123 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1125 + ((GET_CODE (XEXP (x
, 1)) == REG
1126 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1127 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1128 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1133 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1134 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1135 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1136 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1139 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1140 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1141 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1142 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1144 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1145 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1146 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1149 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1150 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1151 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1152 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1153 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1154 || subcode
== ROTATE
|| subcode
== ROTATERT
1156 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1157 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1158 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1159 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1160 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1161 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1162 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1167 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1168 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1169 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1170 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1171 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1175 case AND
: case XOR
: case IOR
:
1178 /* Normally the frame registers will be spilt into reg+const during
1179 reload, so it is a bad idea to combine them with other instructions,
1180 since then they might not be moved outside of loops. As a compromise
1181 we allow integration with ops that have a constant as their second
1183 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1184 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1185 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1186 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1187 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1191 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1192 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1193 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1194 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1197 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1198 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1199 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1200 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1201 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1204 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1205 return (1 + extra_cost
1206 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1207 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1208 || subcode
== ROTATE
|| subcode
== ROTATERT
1210 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1211 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1212 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0))
1213 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1214 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1215 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
1221 if (arm_fast_multiply
&& mode
== DImode
1222 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1223 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1224 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1227 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1231 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1233 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1234 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1235 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1237 int booth_unit_size
= (arm_fast_multiply
? 8 : 2);
1239 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1241 i
>>= booth_unit_size
;
1248 return ((arm_fast_multiply
? 8 : 30)
1249 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1250 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1253 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1254 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1258 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1260 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1263 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1271 return 4 + (mode
== DImode
? 4 : 0);
1274 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1275 return (4 + (mode
== DImode
? 4 : 0)
1276 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1279 switch (GET_MODE (XEXP (x
, 0)))
1282 return (1 + (mode
== DImode
? 4 : 0)
1283 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1286 return (4 + (mode
== DImode
? 4 : 0)
1287 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1290 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1299 /* This code has been fixed for cross compilation. */
1301 static int fpa_consts_inited
= 0;
1303 char *strings_fpa
[8] = {
1305 "4", "5", "0.5", "10"
1308 static REAL_VALUE_TYPE values_fpa
[8];
1316 for (i
= 0; i
< 8; i
++)
1318 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1322 fpa_consts_inited
= 1;
1325 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1328 const_double_rtx_ok_for_fpu (x
)
1334 if (!fpa_consts_inited
)
1337 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1338 if (REAL_VALUE_MINUS_ZERO (r
))
1341 for (i
= 0; i
< 8; i
++)
1342 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1348 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1351 neg_const_double_rtx_ok_for_fpu (x
)
1357 if (!fpa_consts_inited
)
1360 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1361 r
= REAL_VALUE_NEGATE (r
);
1362 if (REAL_VALUE_MINUS_ZERO (r
))
1365 for (i
= 0; i
< 8; i
++)
1366 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1372 /* Predicates for `match_operand' and `match_operator'. */
1374 /* s_register_operand is the same as register_operand, but it doesn't accept
1377 This function exists because at the time it was put in it led to better
1378 code. SUBREG(MEM) always needs a reload in the places where
1379 s_register_operand is used, and this seemed to lead to excessive
1383 s_register_operand (op
, mode
)
1385 enum machine_mode mode
;
1387 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1390 if (GET_CODE (op
) == SUBREG
)
1391 op
= SUBREG_REG (op
);
1393 /* We don't consider registers whose class is NO_REGS
1394 to be a register operand. */
1395 return (GET_CODE (op
) == REG
1396 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1397 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1400 /* Only accept reg, subreg(reg), const_int. */
1403 reg_or_int_operand (op
, mode
)
1405 enum machine_mode mode
;
1407 if (GET_CODE (op
) == CONST_INT
)
1410 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1413 if (GET_CODE (op
) == SUBREG
)
1414 op
= SUBREG_REG (op
);
1416 /* We don't consider registers whose class is NO_REGS
1417 to be a register operand. */
1418 return (GET_CODE (op
) == REG
1419 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1420 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1423 /* Return 1 if OP is an item in memory, given that we are in reload. */
1426 reload_memory_operand (op
, mode
)
1428 enum machine_mode mode
;
1430 int regno
= true_regnum (op
);
1432 return (! CONSTANT_P (op
)
1434 || (GET_CODE (op
) == REG
1435 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
1438 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
1441 arm_rhs_operand (op
, mode
)
1443 enum machine_mode mode
;
1445 return (s_register_operand (op
, mode
)
1446 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
1449 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
1453 arm_rhsm_operand (op
, mode
)
1455 enum machine_mode mode
;
1457 return (s_register_operand (op
, mode
)
1458 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
1459 || memory_operand (op
, mode
));
1462 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
1463 constant that is valid when negated. */
1466 arm_add_operand (op
, mode
)
1468 enum machine_mode mode
;
1470 return (s_register_operand (op
, mode
)
1471 || (GET_CODE (op
) == CONST_INT
1472 && (const_ok_for_arm (INTVAL (op
))
1473 || const_ok_for_arm (-INTVAL (op
)))));
1477 arm_not_operand (op
, mode
)
1479 enum machine_mode mode
;
1481 return (s_register_operand (op
, mode
)
1482 || (GET_CODE (op
) == CONST_INT
1483 && (const_ok_for_arm (INTVAL (op
))
1484 || const_ok_for_arm (~INTVAL (op
)))));
1487 /* Return TRUE if the operand is a memory reference which contains an
1488 offsettable address. */
1490 offsettable_memory_operand (op
, mode
)
1492 enum machine_mode mode
;
1494 if (mode
== VOIDmode
)
1495 mode
= GET_MODE (op
);
1497 return (mode
== GET_MODE (op
)
1498 && GET_CODE (op
) == MEM
1499 && offsettable_address_p (reload_completed
| reload_in_progress
,
1500 mode
, XEXP (op
, 0)));
1503 /* Return TRUE if the operand is a memory reference which is, or can be
1504 made word aligned by adjusting the offset. */
1506 alignable_memory_operand (op
, mode
)
1508 enum machine_mode mode
;
1512 if (mode
== VOIDmode
)
1513 mode
= GET_MODE (op
);
1515 if (mode
!= GET_MODE (op
) || GET_CODE (op
) != MEM
)
1520 return ((GET_CODE (reg
= op
) == REG
1521 || (GET_CODE (op
) == SUBREG
1522 && GET_CODE (reg
= SUBREG_REG (op
)) == REG
)
1523 || (GET_CODE (op
) == PLUS
1524 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1525 && (GET_CODE (reg
= XEXP (op
, 0)) == REG
1526 || (GET_CODE (XEXP (op
, 0)) == SUBREG
1527 && GET_CODE (reg
= SUBREG_REG (XEXP (op
, 0))) == REG
))))
1528 && REGNO_POINTER_ALIGN (REGNO (reg
)) >= 4);
1531 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
1534 fpu_rhs_operand (op
, mode
)
1536 enum machine_mode mode
;
1538 if (s_register_operand (op
, mode
))
1540 else if (GET_CODE (op
) == CONST_DOUBLE
)
1541 return (const_double_rtx_ok_for_fpu (op
));
1547 fpu_add_operand (op
, mode
)
1549 enum machine_mode mode
;
1551 if (s_register_operand (op
, mode
))
1553 else if (GET_CODE (op
) == CONST_DOUBLE
)
1554 return (const_double_rtx_ok_for_fpu (op
)
1555 || neg_const_double_rtx_ok_for_fpu (op
));
1560 /* Return nonzero if OP is a constant power of two. */
1563 power_of_two_operand (op
, mode
)
1565 enum machine_mode mode
;
1567 if (GET_CODE (op
) == CONST_INT
)
1569 HOST_WIDE_INT value
= INTVAL(op
);
1570 return value
!= 0 && (value
& (value
- 1)) == 0;
1575 /* Return TRUE for a valid operand of a DImode operation.
1576 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1577 Note that this disallows MEM(REG+REG), but allows
1578 MEM(PRE/POST_INC/DEC(REG)). */
1581 di_operand (op
, mode
)
1583 enum machine_mode mode
;
1585 if (s_register_operand (op
, mode
))
1588 switch (GET_CODE (op
))
1595 return memory_address_p (DImode
, XEXP (op
, 0));
1602 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
1603 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1604 Note that this disallows MEM(REG+REG), but allows
1605 MEM(PRE/POST_INC/DEC(REG)). */
1608 soft_df_operand (op
, mode
)
1610 enum machine_mode mode
;
1612 if (s_register_operand (op
, mode
))
1615 switch (GET_CODE (op
))
1621 return memory_address_p (DFmode
, XEXP (op
, 0));
1628 /* Return TRUE for valid index operands. */
1631 index_operand (op
, mode
)
1633 enum machine_mode mode
;
1635 return (s_register_operand(op
, mode
)
1636 || (immediate_operand (op
, mode
)
1637 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
1640 /* Return TRUE for valid shifts by a constant. This also accepts any
1641 power of two on the (somewhat overly relaxed) assumption that the
1642 shift operator in this case was a mult. */
1645 const_shift_operand (op
, mode
)
1647 enum machine_mode mode
;
1649 return (power_of_two_operand (op
, mode
)
1650 || (immediate_operand (op
, mode
)
1651 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
1654 /* Return TRUE for arithmetic operators which can be combined with a multiply
1658 shiftable_operator (x
, mode
)
1660 enum machine_mode mode
;
1662 if (GET_MODE (x
) != mode
)
1666 enum rtx_code code
= GET_CODE (x
);
1668 return (code
== PLUS
|| code
== MINUS
1669 || code
== IOR
|| code
== XOR
|| code
== AND
);
1673 /* Return TRUE for shift operators. */
1676 shift_operator (x
, mode
)
1678 enum machine_mode mode
;
1680 if (GET_MODE (x
) != mode
)
1684 enum rtx_code code
= GET_CODE (x
);
1687 return power_of_two_operand (XEXP (x
, 1));
1689 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
1690 || code
== ROTATERT
);
1694 int equality_operator (x
, mode
)
1696 enum machine_mode mode
;
1698 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
1701 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
1704 minmax_operator (x
, mode
)
1706 enum machine_mode mode
;
1708 enum rtx_code code
= GET_CODE (x
);
1710 if (GET_MODE (x
) != mode
)
1713 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
1716 /* return TRUE if x is EQ or NE */
1718 /* Return TRUE if this is the condition code register, if we aren't given
1719 a mode, accept any class CCmode register */
1722 cc_register (x
, mode
)
1724 enum machine_mode mode
;
1726 if (mode
== VOIDmode
)
1728 mode
= GET_MODE (x
);
1729 if (GET_MODE_CLASS (mode
) != MODE_CC
)
1733 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1739 /* Return TRUE if this is the condition code register, if we aren't given
1740 a mode, accept any class CCmode register which indicates a dominance
1744 dominant_cc_register (x
, mode
)
1746 enum machine_mode mode
;
1748 if (mode
== VOIDmode
)
1750 mode
= GET_MODE (x
);
1751 if (GET_MODE_CLASS (mode
) != MODE_CC
)
1755 if (mode
!= CC_DNEmode
&& mode
!= CC_DEQmode
1756 && mode
!= CC_DLEmode
&& mode
!= CC_DLTmode
1757 && mode
!= CC_DGEmode
&& mode
!= CC_DGTmode
1758 && mode
!= CC_DLEUmode
&& mode
!= CC_DLTUmode
1759 && mode
!= CC_DGEUmode
&& mode
!= CC_DGTUmode
)
1762 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
1768 /* Return TRUE if X references a SYMBOL_REF. */
1770 symbol_mentioned_p (x
)
1776 if (GET_CODE (x
) == SYMBOL_REF
)
1779 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1780 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1786 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1787 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
1790 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
1797 /* Return TRUE if X references a LABEL_REF. */
1799 label_mentioned_p (x
)
1805 if (GET_CODE (x
) == LABEL_REF
)
1808 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1809 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1815 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1816 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
1819 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
1830 enum rtx_code code
= GET_CODE (x
);
1834 else if (code
== SMIN
)
1836 else if (code
== UMIN
)
1838 else if (code
== UMAX
)
1844 /* Return 1 if memory locations are adjacent */
1847 adjacent_mem_locations (a
, b
)
1850 int val0
= 0, val1
= 0;
1853 if ((GET_CODE (XEXP (a
, 0)) == REG
1854 || (GET_CODE (XEXP (a
, 0)) == PLUS
1855 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
1856 && (GET_CODE (XEXP (b
, 0)) == REG
1857 || (GET_CODE (XEXP (b
, 0)) == PLUS
1858 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
1860 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
1862 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
1863 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
1866 reg0
= REGNO (XEXP (a
, 0));
1867 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
1869 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
1870 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
1873 reg1
= REGNO (XEXP (b
, 0));
1874 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
1879 /* Return 1 if OP is a load multiple operation. It is known to be
1880 parallel and the first section will be tested. */
1883 load_multiple_operation (op
, mode
)
1885 enum machine_mode mode
;
1887 HOST_WIDE_INT count
= XVECLEN (op
, 0);
1890 HOST_WIDE_INT i
= 1, base
= 0;
1894 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
1897 /* Check to see if this might be a write-back */
1898 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
1903 /* Now check it more carefully */
1904 if (GET_CODE (SET_DEST (elt
)) != REG
1905 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
1906 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
1907 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
1908 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
1909 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
1910 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
1911 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
1912 != REGNO (SET_DEST (elt
)))
1918 /* Perform a quick check so we don't blow up below. */
1920 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
1921 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
1922 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
1925 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
1926 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
1928 for (; i
< count
; i
++)
1930 rtx elt
= XVECEXP (op
, 0, i
);
1932 if (GET_CODE (elt
) != SET
1933 || GET_CODE (SET_DEST (elt
)) != REG
1934 || GET_MODE (SET_DEST (elt
)) != SImode
1935 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
1936 || GET_CODE (SET_SRC (elt
)) != MEM
1937 || GET_MODE (SET_SRC (elt
)) != SImode
1938 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
1939 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
1940 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
1941 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
1948 /* Return 1 if OP is a store multiple operation. It is known to be
1949 parallel and the first section will be tested. */
1952 store_multiple_operation (op
, mode
)
1954 enum machine_mode mode
;
1956 HOST_WIDE_INT count
= XVECLEN (op
, 0);
1959 HOST_WIDE_INT i
= 1, base
= 0;
1963 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
1966 /* Check to see if this might be a write-back */
1967 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
1972 /* Now check it more carefully */
1973 if (GET_CODE (SET_DEST (elt
)) != REG
1974 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
1975 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
1976 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
1977 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
1978 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
1979 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
1980 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
1981 != REGNO (SET_DEST (elt
)))
1987 /* Perform a quick check so we don't blow up below. */
1989 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
1990 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
1991 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
1994 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
1995 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
1997 for (; i
< count
; i
++)
1999 elt
= XVECEXP (op
, 0, i
);
2001 if (GET_CODE (elt
) != SET
2002 || GET_CODE (SET_SRC (elt
)) != REG
2003 || GET_MODE (SET_SRC (elt
)) != SImode
2004 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
2005 || GET_CODE (SET_DEST (elt
)) != MEM
2006 || GET_MODE (SET_DEST (elt
)) != SImode
2007 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
2008 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
2009 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
2010 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
2018 load_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2023 HOST_WIDE_INT
*load_offset
;
2025 int unsorted_regs
[4];
2026 HOST_WIDE_INT unsorted_offsets
[4];
2031 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2032 extended if required. */
2033 if (nops
< 2 || nops
> 4)
2036 /* Loop over the operands and check that the memory references are
2037 suitable (ie immediate offsets from the same base register). At
2038 the same time, extract the target register, and the memory
2040 for (i
= 0; i
< nops
; i
++)
2045 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2048 /* Don't reorder volatile memory references; it doesn't seem worth
2049 looking for the case where the order is ok anyway. */
2050 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2053 offset
= const0_rtx
;
2055 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2056 || (GET_CODE (reg
) == SUBREG
2057 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2058 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2059 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2061 || (GET_CODE (reg
) == SUBREG
2062 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2063 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2068 base_reg
= REGNO(reg
);
2069 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2070 ? REGNO (operands
[i
])
2071 : REGNO (SUBREG_REG (operands
[i
])));
2076 if (base_reg
!= REGNO (reg
))
2077 /* Not addressed from the same base register. */
2080 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2081 ? REGNO (operands
[i
])
2082 : REGNO (SUBREG_REG (operands
[i
])));
2083 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2087 /* If it isn't an integer register, or if it overwrites the
2088 base register but isn't the last insn in the list, then
2089 we can't do this. */
2090 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
2091 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
2094 unsorted_offsets
[i
] = INTVAL (offset
);
2097 /* Not a suitable memory address. */
2101 /* All the useful information has now been extracted from the
2102 operands into unsorted_regs and unsorted_offsets; additionally,
2103 order[0] has been set to the lowest numbered register in the
2104 list. Sort the registers into order, and check that the memory
2105 offsets are ascending and adjacent. */
2107 for (i
= 1; i
< nops
; i
++)
2111 order
[i
] = order
[i
- 1];
2112 for (j
= 0; j
< nops
; j
++)
2113 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2114 && (order
[i
] == order
[i
- 1]
2115 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2118 /* Have we found a suitable register? if not, one must be used more
2120 if (order
[i
] == order
[i
- 1])
2123 /* Is the memory address adjacent and ascending? */
2124 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2132 for (i
= 0; i
< nops
; i
++)
2133 regs
[i
] = unsorted_regs
[order
[i
]];
2135 *load_offset
= unsorted_offsets
[order
[0]];
2138 if (unsorted_offsets
[order
[0]] == 0)
2139 return 1; /* ldmia */
2141 if (unsorted_offsets
[order
[0]] == 4)
2142 return 2; /* ldmib */
2144 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2145 return 3; /* ldmda */
2147 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2148 return 4; /* ldmdb */
2150 /* Can't do it without setting up the offset, only do this if it takes
2151 no more than one insn. */
2152 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
2153 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
2157 emit_ldm_seq (operands
, nops
)
2163 HOST_WIDE_INT offset
;
2167 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2170 strcpy (buf
, "ldm%?ia\t");
2174 strcpy (buf
, "ldm%?ib\t");
2178 strcpy (buf
, "ldm%?da\t");
2182 strcpy (buf
, "ldm%?db\t");
2187 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2188 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2191 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2192 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2194 output_asm_insn (buf
, operands
);
2196 strcpy (buf
, "ldm%?ia\t");
2203 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2204 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2206 for (i
= 1; i
< nops
; i
++)
2207 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2208 reg_names
[regs
[i
]]);
2210 strcat (buf
, "}\t%@ phole ldm");
2212 output_asm_insn (buf
, operands
);
2217 store_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2222 HOST_WIDE_INT
*load_offset
;
2224 int unsorted_regs
[4];
2225 HOST_WIDE_INT unsorted_offsets
[4];
2230 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2231 extended if required. */
2232 if (nops
< 2 || nops
> 4)
2235 /* Loop over the operands and check that the memory references are
2236 suitable (ie immediate offsets from the same base register). At
2237 the same time, extract the target register, and the memory
2239 for (i
= 0; i
< nops
; i
++)
2244 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2247 /* Don't reorder volatile memory references; it doesn't seem worth
2248 looking for the case where the order is ok anyway. */
2249 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2252 offset
= const0_rtx
;
2254 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2255 || (GET_CODE (reg
) == SUBREG
2256 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2257 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2258 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2260 || (GET_CODE (reg
) == SUBREG
2261 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2262 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2267 base_reg
= REGNO(reg
);
2268 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2269 ? REGNO (operands
[i
])
2270 : REGNO (SUBREG_REG (operands
[i
])));
2275 if (base_reg
!= REGNO (reg
))
2276 /* Not addressed from the same base register. */
2279 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2280 ? REGNO (operands
[i
])
2281 : REGNO (SUBREG_REG (operands
[i
])));
2282 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2286 /* If it isn't an integer register, then we can't do this. */
2287 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
2290 unsorted_offsets
[i
] = INTVAL (offset
);
2293 /* Not a suitable memory address. */
2297 /* All the useful information has now been extracted from the
2298 operands into unsorted_regs and unsorted_offsets; additionally,
2299 order[0] has been set to the lowest numbered register in the
2300 list. Sort the registers into order, and check that the memory
2301 offsets are ascending and adjacent. */
2303 for (i
= 1; i
< nops
; i
++)
2307 order
[i
] = order
[i
- 1];
2308 for (j
= 0; j
< nops
; j
++)
2309 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2310 && (order
[i
] == order
[i
- 1]
2311 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2314 /* Have we found a suitable register? if not, one must be used more
2316 if (order
[i
] == order
[i
- 1])
2319 /* Is the memory address adjacent and ascending? */
2320 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2328 for (i
= 0; i
< nops
; i
++)
2329 regs
[i
] = unsorted_regs
[order
[i
]];
2331 *load_offset
= unsorted_offsets
[order
[0]];
2334 if (unsorted_offsets
[order
[0]] == 0)
2335 return 1; /* stmia */
2337 if (unsorted_offsets
[order
[0]] == 4)
2338 return 2; /* stmib */
2340 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2341 return 3; /* stmda */
2343 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2344 return 4; /* stmdb */
2350 emit_stm_seq (operands
, nops
)
2356 HOST_WIDE_INT offset
;
2360 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2363 strcpy (buf
, "stm%?ia\t");
2367 strcpy (buf
, "stm%?ib\t");
2371 strcpy (buf
, "stm%?da\t");
2375 strcpy (buf
, "stm%?db\t");
2382 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2383 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2385 for (i
= 1; i
< nops
; i
++)
2386 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2387 reg_names
[regs
[i
]]);
2389 strcat (buf
, "}\t%@ phole stm");
2391 output_asm_insn (buf
, operands
);
2396 multi_register_push (op
, mode
)
2398 enum machine_mode mode
;
2400 if (GET_CODE (op
) != PARALLEL
2401 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2402 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
2403 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
2410 /* Routines for use with attributes */
2413 const_pool_offset (symbol
)
2416 return get_pool_offset (symbol
) - get_pool_size () - get_prologue_size ();
2419 /* Return nonzero if ATTR is a valid attribute for DECL.
2420 ATTRIBUTES are any existing attributes and ARGS are the arguments
2423 Supported attributes:
2425 naked: don't output any prologue or epilogue code, the user is assumed
2426 to do the right thing. */
2429 arm_valid_machine_decl_attribute (decl
, attributes
, attr
, args
)
2435 if (args
!= NULL_TREE
)
2438 if (is_attribute_p ("naked", attr
))
2439 return TREE_CODE (decl
) == FUNCTION_DECL
;
2443 /* Return non-zero if FUNC is a naked function. */
2446 arm_naked_function_p (func
)
2451 if (TREE_CODE (func
) != FUNCTION_DECL
)
2454 a
= lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func
));
2455 return a
!= NULL_TREE
;
2458 /* Routines for use in generating RTL */
2461 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
)
2470 int sign
= up
? 1 : -1;
2472 result
= gen_rtx (PARALLEL
, VOIDmode
,
2473 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2476 XVECEXP (result
, 0, 0)
2477 = gen_rtx (SET
, GET_MODE (from
), from
,
2478 plus_constant (from
, count
* 4 * sign
));
2483 for (j
= 0; i
< count
; i
++, j
++)
2485 XVECEXP (result
, 0, i
)
2486 = gen_rtx (SET
, VOIDmode
, gen_rtx (REG
, SImode
, base_regno
+ j
),
2487 gen_rtx (MEM
, SImode
,
2488 plus_constant (from
, j
* 4 * sign
)));
2492 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, from
);
2498 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
)
2507 int sign
= up
? 1 : -1;
2509 result
= gen_rtx (PARALLEL
, VOIDmode
,
2510 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2513 XVECEXP (result
, 0, 0)
2514 = gen_rtx (SET
, GET_MODE (to
), to
,
2515 plus_constant (to
, count
* 4 * sign
));
2520 for (j
= 0; i
< count
; i
++, j
++)
2522 XVECEXP (result
, 0, i
)
2523 = gen_rtx (SET
, VOIDmode
,
2524 gen_rtx (MEM
, SImode
, plus_constant (to
, j
* 4 * sign
)),
2525 gen_rtx (REG
, SImode
, base_regno
+ j
));
2529 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, to
);
2535 arm_gen_movstrqi (operands
)
2538 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
2541 rtx st_src
, st_dst
, end_src
, end_dst
, fin_src
, fin_dst
;
2542 rtx part_bytes_reg
= NULL
;
2543 extern int optimize
;
2545 if (GET_CODE (operands
[2]) != CONST_INT
2546 || GET_CODE (operands
[3]) != CONST_INT
2547 || INTVAL (operands
[2]) > 64
2548 || INTVAL (operands
[3]) & 3)
2551 st_dst
= XEXP (operands
[0], 0);
2552 st_src
= XEXP (operands
[1], 0);
2553 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
2554 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
2556 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
2557 out_words_to_go
= INTVAL (operands
[2]) / 4;
2558 last_bytes
= INTVAL (operands
[2]) & 3;
2560 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
2561 part_bytes_reg
= gen_rtx (REG
, SImode
, (in_words_to_go
- 1) & 3);
2563 for (i
= 0; in_words_to_go
>= 2; i
+=4)
2565 if (in_words_to_go
> 4)
2566 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
));
2568 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
2571 if (out_words_to_go
)
2573 if (out_words_to_go
> 4)
2574 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
));
2575 else if (out_words_to_go
!= 1)
2576 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
2582 emit_move_insn (gen_rtx (MEM
, SImode
, dst
),
2583 gen_rtx (REG
, SImode
, 0));
2584 if (last_bytes
!= 0)
2585 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
2589 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
2590 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
2593 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
2594 if (out_words_to_go
)
2598 emit_move_insn (sreg
= gen_reg_rtx (SImode
), gen_rtx (MEM
, SImode
, src
));
2599 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
2600 emit_move_insn (gen_rtx (MEM
, SImode
, dst
), sreg
);
2601 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
2604 if (in_words_to_go
) /* Sanity check */
2610 if (in_words_to_go
< 0)
2613 part_bytes_reg
= copy_to_mode_reg (SImode
, gen_rtx (MEM
, SImode
, src
));
2616 if (BYTES_BIG_ENDIAN
&& last_bytes
)
2618 rtx tmp
= gen_reg_rtx (SImode
);
2620 if (part_bytes_reg
== NULL
)
2623 /* The bytes we want are in the top end of the word */
2624 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
2625 GEN_INT (8 * (4 - last_bytes
))));
2626 part_bytes_reg
= tmp
;
2630 emit_move_insn (gen_rtx (MEM
, QImode
,
2631 plus_constant (dst
, last_bytes
- 1)),
2632 gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
2635 tmp
= gen_reg_rtx (SImode
);
2636 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
2637 part_bytes_reg
= tmp
;
2646 if (part_bytes_reg
== NULL
)
2649 emit_move_insn (gen_rtx (MEM
, QImode
, dst
),
2650 gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
2653 rtx tmp
= gen_reg_rtx (SImode
);
2655 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
2656 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
2657 part_bytes_reg
= tmp
;
2665 /* Generate a memory reference for a half word, such that it will be loaded
2666 into the top 16 bits of the word. We can assume that the address is
2667 known to be alignable and of the form reg, or plus (reg, const). */
2669 gen_rotated_half_load (memref
)
2672 HOST_WIDE_INT offset
= 0;
2673 rtx base
= XEXP (memref
, 0);
2675 if (GET_CODE (base
) == PLUS
)
2677 offset
= INTVAL (XEXP (base
, 1));
2678 base
= XEXP (base
, 0);
2681 /* If we aren't allowed to generate unalligned addresses, then fail. */
2682 if (TARGET_SHORT_BY_BYTES
2683 && ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 0)))
2686 base
= gen_rtx (MEM
, SImode
, plus_constant (base
, offset
& ~2));
2688 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 2))
2691 return gen_rtx (ROTATE
, SImode
, base
, GEN_INT (16));
2694 static enum machine_mode
2695 select_dominance_cc_mode (op
, x
, y
, cond_or
)
2699 HOST_WIDE_INT cond_or
;
2701 enum rtx_code cond1
, cond2
;
2704 /* Currently we will probably get the wrong result if the individual
2705 comparisons are not simple. This also ensures that it is safe to
2706 reverse a comparions if necessary. */
2707 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
2709 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
2714 cond1
= reverse_condition (cond1
);
2716 /* If the comparisons are not equal, and one doesn't dominate the other,
2717 then we can't do this. */
2719 && ! comparison_dominates_p (cond1
, cond2
)
2720 && (swapped
= 1, ! comparison_dominates_p (cond2
, cond1
)))
2725 enum rtx_code temp
= cond1
;
2733 if (cond2
== EQ
|| ! cond_or
)
2738 case LE
: return CC_DLEmode
;
2739 case LEU
: return CC_DLEUmode
;
2740 case GE
: return CC_DGEmode
;
2741 case GEU
: return CC_DGEUmode
;
2747 if (cond2
== LT
|| ! cond_or
)
2756 if (cond2
== GT
|| ! cond_or
)
2765 if (cond2
== LTU
|| ! cond_or
)
2774 if (cond2
== GTU
|| ! cond_or
)
2782 /* The remaining cases only occur when both comparisons are the
2804 arm_select_cc_mode (op
, x
, y
)
2809 /* All floating point compares return CCFP if it is an equality
2810 comparison, and CCFPE otherwise. */
2811 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2812 return (op
== EQ
|| op
== NE
) ? CCFPmode
: CCFPEmode
;
2814 /* A compare with a shifted operand. Because of canonicalization, the
2815 comparison will have to be swapped when we emit the assembler. */
2816 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
2817 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
2818 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
2819 || GET_CODE (x
) == ROTATERT
))
2822 /* This is a special case, that is used by combine to alow a
2823 comarison of a shifted byte load to be split into a zero-extend
2824 followed by a comparison of the shifted integer (only valid for
2825 equalities and unsigned inequalites. */
2826 if (GET_MODE (x
) == SImode
2827 && GET_CODE (x
) == ASHIFT
2828 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
2829 && GET_CODE (XEXP (x
, 0)) == SUBREG
2830 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
2831 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
2832 && (op
== EQ
|| op
== NE
2833 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
2834 && GET_CODE (y
) == CONST_INT
)
2837 /* An operation that sets the condition codes as a side-effect, the
2838 V flag is not set correctly, so we can only use comparisons where
2839 this doesn't matter. (For LT and GE we can use "mi" and "pl"
2841 if (GET_MODE (x
) == SImode
2843 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
2844 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
2845 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
2846 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
2847 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
2848 || GET_CODE (x
) == LSHIFTRT
2849 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
2850 || GET_CODE (x
) == ROTATERT
|| GET_CODE (x
) == ZERO_EXTRACT
))
2853 /* A construct for a conditional compare, if the false arm contains
2854 0, then both conditions must be true, otherwise either condition
2855 must be true. Not all conditions are possible, so CCmode is
2856 returned if it can't be done. */
2857 if (GET_CODE (x
) == IF_THEN_ELSE
2858 && (XEXP (x
, 2) == const0_rtx
2859 || XEXP (x
, 2) == const1_rtx
)
2860 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 0))) == '<'
2861 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 1))) == '<')
2862 return select_dominance_cc_mode (op
, XEXP (x
, 0), XEXP (x
, 1),
2863 INTVAL (XEXP (x
, 2)));
2865 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
2868 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
2869 && GET_CODE (x
) == PLUS
2870 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
2876 /* X and Y are two things to compare using CODE. Emit the compare insn and
2877 return the rtx for register 0 in the proper mode. FP means this is a
2878 floating point compare: I don't think that it is needed on the arm. */
2881 gen_compare_reg (code
, x
, y
, fp
)
2885 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
2886 rtx cc_reg
= gen_rtx (REG
, mode
, 24);
2888 emit_insn (gen_rtx (SET
, VOIDmode
, cc_reg
,
2889 gen_rtx (COMPARE
, mode
, x
, y
)));
2895 arm_reload_in_hi (operands
)
2898 rtx base
= find_replacement (&XEXP (operands
[1], 0));
2900 emit_insn (gen_zero_extendqisi2 (operands
[2], gen_rtx (MEM
, QImode
, base
)));
2901 emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG
, SImode
, operands
[0], 0),
2902 gen_rtx (MEM
, QImode
,
2903 plus_constant (base
, 1))));
2904 if (BYTES_BIG_ENDIAN
)
2905 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
2907 gen_rtx (IOR
, SImode
,
2908 gen_rtx (ASHIFT
, SImode
,
2909 gen_rtx (SUBREG
, SImode
,
2914 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
2916 gen_rtx (IOR
, SImode
,
2917 gen_rtx (ASHIFT
, SImode
,
2920 gen_rtx (SUBREG
, SImode
, operands
[0], 0))));
2924 arm_reload_out_hi (operands
)
2927 rtx base
= find_replacement (&XEXP (operands
[0], 0));
2929 if (BYTES_BIG_ENDIAN
)
2931 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
2932 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
2933 emit_insn (gen_lshrsi3 (operands
[2],
2934 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
2936 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
2937 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
2941 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
2942 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
2943 emit_insn (gen_lshrsi3 (operands
[2],
2944 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
2946 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
2947 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
2951 /* Check to see if a branch is forwards or backwards. Return TRUE if it
2955 arm_backwards_branch (from
, to
)
2958 return insn_addresses
[to
] <= insn_addresses
[from
];
2961 /* Check to see if a branch is within the distance that can be done using
2962 an arithmetic expression. */
2964 short_branch (from
, to
)
2967 int delta
= insn_addresses
[from
] + 8 - insn_addresses
[to
];
2969 return abs (delta
) < 980; /* A small margin for safety */
2972 /* Check to see that the insn isn't the target of the conditionalizing
2975 arm_insn_not_targeted (insn
)
2978 return insn
!= arm_target_insn
;
2982 /* Routines for manipulation of the constant pool. */
2983 /* This is unashamedly hacked from the version in sh.c, since the problem is
2984 extremely similar. */
2986 /* Arm instructions cannot load a large constant into a register,
2987 constants have to come from a pc relative load. The reference of a pc
2988 relative load instruction must be less than 1k infront of the instruction.
2989 This means that we often have to dump a constant inside a function, and
2990 generate code to branch around it.
2992 It is important to minimize this, since the branches will slow things
2993 down and make things bigger.
2995 Worst case code looks like:
3011 We fix this by performing a scan before scheduling, which notices which
3012 instructions need to have their operands fetched from the constant table
3013 and builds the table.
3018 scan, find an instruction which needs a pcrel move. Look forward, find th
3019 last barrier which is within MAX_COUNT bytes of the requirement.
3020 If there isn't one, make one. Process all the instructions between
3021 the find and the barrier.
3023 In the above example, we can tell that L3 is within 1k of L1, so
3024 the first move can be shrunk from the 2 insn+constant sequence into
3025 just 1 insn, and the constant moved to L3 to make:
3036 Then the second move becomes the target for the shortening process.
3042 rtx value
; /* Value in table */
3043 HOST_WIDE_INT next_offset
;
3044 enum machine_mode mode
; /* Mode of value */
3047 /* The maximum number of constants that can fit into one pool, since
3048 the pc relative range is 0...1020 bytes and constants are at least 4
3051 #define MAX_POOL_SIZE (1020/4)
3052 static pool_node pool_vector
[MAX_POOL_SIZE
];
3053 static int pool_size
;
3054 static rtx pool_vector_label
;
3056 /* Add a constant to the pool and return its label. */
3057 static HOST_WIDE_INT
3058 add_constant (x
, mode
)
3060 enum machine_mode mode
;
3064 HOST_WIDE_INT offset
;
3066 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
3067 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
3068 x
= get_pool_constant (XEXP (x
, 0));
3069 #ifndef AOF_ASSEMBLER
3070 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
3071 x
= XVECEXP (x
, 0, 0);
3074 /* First see if we've already got it */
3075 for (i
= 0; i
< pool_size
; i
++)
3077 if (GET_CODE (x
) == pool_vector
[i
].value
->code
3078 && mode
== pool_vector
[i
].mode
)
3080 if (GET_CODE (x
) == CODE_LABEL
)
3082 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
3085 if (rtx_equal_p (x
, pool_vector
[i
].value
))
3086 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
3090 /* Need a new one */
3091 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
3094 pool_vector_label
= gen_label_rtx ();
3096 pool_vector
[pool_size
].next_offset
3097 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
3099 pool_vector
[pool_size
].value
= x
;
3100 pool_vector
[pool_size
].mode
= mode
;
3105 /* Output the literal table */
3112 scan
= emit_label_after (gen_label_rtx (), scan
);
3113 scan
= emit_insn_after (gen_align_4 (), scan
);
3114 scan
= emit_label_after (pool_vector_label
, scan
);
3116 for (i
= 0; i
< pool_size
; i
++)
3118 pool_node
*p
= pool_vector
+ i
;
3120 switch (GET_MODE_SIZE (p
->mode
))
3123 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
3127 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
3136 scan
= emit_insn_after (gen_consttable_end (), scan
);
3137 scan
= emit_barrier_after (scan
);
3141 /* Non zero if the src operand needs to be fixed up */
3143 fixit (src
, mode
, destreg
)
3145 enum machine_mode mode
;
3148 if (CONSTANT_P (src
))
3150 if (GET_CODE (src
) == CONST_INT
)
3151 return (! const_ok_for_arm (INTVAL (src
))
3152 && ! const_ok_for_arm (~INTVAL (src
)));
3153 if (GET_CODE (src
) == CONST_DOUBLE
)
3154 return (GET_MODE (src
) == VOIDmode
3156 || (! const_double_rtx_ok_for_fpu (src
)
3157 && ! neg_const_double_rtx_ok_for_fpu (src
)));
3158 return symbol_mentioned_p (src
);
3160 #ifndef AOF_ASSEMBLER
3161 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
3165 return (mode
== SImode
&& GET_CODE (src
) == MEM
3166 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
3167 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
3170 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
3172 find_barrier (from
, max_count
)
3177 rtx found_barrier
= 0;
3179 while (from
&& count
< max_count
)
3181 if (GET_CODE (from
) == BARRIER
)
3182 found_barrier
= from
;
3184 /* Count the length of this insn */
3185 if (GET_CODE (from
) == INSN
3186 && GET_CODE (PATTERN (from
)) == SET
3187 && CONSTANT_P (SET_SRC (PATTERN (from
)))
3188 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
3190 rtx src
= SET_SRC (PATTERN (from
));
3194 count
+= get_attr_length (from
);
3196 from
= NEXT_INSN (from
);
3201 /* We didn't find a barrier in time to
3202 dump our stuff, so we'll make one */
3203 rtx label
= gen_label_rtx ();
3206 from
= PREV_INSN (from
);
3208 from
= get_last_insn ();
3210 /* Walk back to be just before any jump */
3211 while (GET_CODE (from
) == JUMP_INSN
3212 || GET_CODE (from
) == NOTE
3213 || GET_CODE (from
) == CODE_LABEL
)
3214 from
= PREV_INSN (from
);
3216 from
= emit_jump_insn_after (gen_jump (label
), from
);
3217 JUMP_LABEL (from
) = label
;
3218 found_barrier
= emit_barrier_after (from
);
3219 emit_label_after (label
, found_barrier
);
3220 return found_barrier
;
3223 return found_barrier
;
3226 /* Non zero if the insn is a move instruction which needs to be fixed. */
3231 if (!INSN_DELETED_P (insn
)
3232 && GET_CODE (insn
) == INSN
3233 && GET_CODE (PATTERN (insn
)) == SET
)
3235 rtx pat
= PATTERN (insn
);
3236 rtx src
= SET_SRC (pat
);
3237 rtx dst
= SET_DEST (pat
);
3239 enum machine_mode mode
= GET_MODE (dst
);
3243 if (GET_CODE (dst
) == REG
)
3244 destreg
= REGNO (dst
);
3245 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
3246 destreg
= REGNO (SUBREG_REG (dst
));
3248 return fixit (src
, mode
, destreg
);
3262 /* The ldr instruction can work with up to a 4k offset, and most constants
3263 will be loaded with one of these instructions; however, the adr
3264 instruction and the ldf instructions only work with a 1k offset. This
3265 code needs to be rewritten to use the 4k offset when possible, and to
3266 adjust when a 1k offset is needed. For now we just use a 1k offset
3270 /* Floating point operands can't work further than 1024 bytes from the
3271 PC, so to make things simple we restrict all loads for such functions.
3273 if (TARGET_HARD_FLOAT
)
3274 for (regno
= 16; regno
< 24; regno
++)
3275 if (regs_ever_live
[regno
])
3284 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
3286 if (broken_move (insn
))
3288 /* This is a broken move instruction, scan ahead looking for
3289 a barrier to stick the constant table behind */
3291 rtx barrier
= find_barrier (insn
, count_size
);
3293 /* Now find all the moves between the points and modify them */
3294 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
3296 if (broken_move (scan
))
3298 /* This is a broken move instruction, add it to the pool */
3299 rtx pat
= PATTERN (scan
);
3300 rtx src
= SET_SRC (pat
);
3301 rtx dst
= SET_DEST (pat
);
3302 enum machine_mode mode
= GET_MODE (dst
);
3303 HOST_WIDE_INT offset
;
3309 /* If this is an HImode constant load, convert it into
3310 an SImode constant load. Since the register is always
3311 32 bits this is safe. We have to do this, since the
3312 load pc-relative instruction only does a 32-bit load. */
3316 if (GET_CODE (dst
) != REG
)
3318 PUT_MODE (dst
, SImode
);
3321 offset
= add_constant (src
, mode
);
3322 addr
= plus_constant (gen_rtx (LABEL_REF
, VOIDmode
,
3326 /* For wide moves to integer regs we need to split the
3327 address calculation off into a separate insn, so that
3328 the load can then be done with a load-multiple. This is
3329 safe, since we have already noted the length of such
3330 insns to be 8, and we are immediately over-writing the
3331 scratch we have grabbed with the final result. */
3332 if (GET_MODE_SIZE (mode
) > 4
3333 && (scratch
= REGNO (dst
)) < 16)
3335 rtx reg
= gen_rtx (REG
, SImode
, scratch
);
3336 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
3341 newsrc
= gen_rtx (MEM
, mode
, addr
);
3343 /* Build a jump insn wrapper around the move instead
3344 of an ordinary insn, because we want to have room for
3345 the target label rtx in fld[7], which an ordinary
3346 insn doesn't have. */
3347 newinsn
= emit_jump_insn_after (gen_rtx (SET
, VOIDmode
,
3350 JUMP_LABEL (newinsn
) = pool_vector_label
;
3352 /* But it's still an ordinary insn */
3353 PUT_CODE (newinsn
, INSN
);
3360 dump_table (barrier
);
3367 /* Routines to output assembly language. */
3369 /* If the rtx is the correct value then return the string of the number.
3370 In this way we can ensure that valid double constants are generated even
3371 when cross compiling. */
3373 fp_immediate_constant (x
)
3379 if (!fpa_consts_inited
)
3382 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3383 for (i
= 0; i
< 8; i
++)
3384 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
3385 return strings_fpa
[i
];
3390 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
3392 fp_const_from_val (r
)
3397 if (! fpa_consts_inited
)
3400 for (i
= 0; i
< 8; i
++)
3401 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
3402 return strings_fpa
[i
];
3407 /* Output the operands of a LDM/STM instruction to STREAM.
3408 MASK is the ARM register set mask of which only bits 0-15 are important.
3409 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
3410 must follow the register list. */
3413 print_multi_reg (stream
, instr
, mask
, hat
)
3419 int not_first
= FALSE
;
3421 fputc ('\t', stream
);
3422 fprintf (stream
, instr
, REGISTER_PREFIX
);
3423 fputs (", {", stream
);
3424 for (i
= 0; i
< 16; i
++)
3425 if (mask
& (1 << i
))
3428 fprintf (stream
, ", ");
3429 fprintf (stream
, "%s%s", REGISTER_PREFIX
, reg_names
[i
]);
3433 fprintf (stream
, "}%s\n", hat
? "^" : "");
3436 /* Output a 'call' insn. */
3439 output_call (operands
)
3442 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
3444 if (REGNO (operands
[0]) == 14)
3446 operands
[0] = gen_rtx (REG
, SImode
, 12);
3447 output_asm_insn ("mov%?\t%0, %|lr", operands
);
3449 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3450 output_asm_insn ("mov%?\t%|pc, %0", operands
);
3458 int something_changed
= 0;
3460 int code
= GET_CODE (x0
);
3467 if (REGNO (x0
) == 14)
3469 *x
= gen_rtx (REG
, SImode
, 12);
3474 /* Scan through the sub-elements and change any references there */
3475 fmt
= GET_RTX_FORMAT (code
);
3476 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3478 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
3479 else if (fmt
[i
] == 'E')
3480 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
3481 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
3482 return something_changed
;
3486 /* Output a 'call' insn that is a reference in memory. */
3489 output_call_mem (operands
)
3492 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
3493 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
3495 if (eliminate_lr2ip (&operands
[0]))
3496 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
3498 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3499 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
3504 /* Output a move from arm registers to an fpu registers.
3505 OPERANDS[0] is an fpu register.
3506 OPERANDS[1] is the first registers of an arm register pair. */
3509 output_mov_long_double_fpu_from_arm (operands
)
3512 int arm_reg0
= REGNO (operands
[1]);
3518 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3519 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3520 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3522 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3523 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
3527 /* Output a move from an fpu register to arm registers.
3528 OPERANDS[0] is the first registers of an arm register pair.
3529 OPERANDS[1] is an fpu register. */
3532 output_mov_long_double_arm_from_fpu (operands
)
3535 int arm_reg0
= REGNO (operands
[0]);
3541 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3542 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3543 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3545 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
3546 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3550 /* Output a move from arm registers to arm registers of a long double
3551 OPERANDS[0] is the destination.
3552 OPERANDS[1] is the source. */
3554 output_mov_long_double_arm_from_arm (operands
)
3557 /* We have to be careful here because the two might overlap */
3558 int dest_start
= REGNO (operands
[0]);
3559 int src_start
= REGNO (operands
[1]);
3563 if (dest_start
< src_start
)
3565 for (i
= 0; i
< 3; i
++)
3567 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3568 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3569 output_asm_insn ("mov%?\t%0, %1", ops
);
3574 for (i
= 2; i
>= 0; i
--)
3576 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3577 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3578 output_asm_insn ("mov%?\t%0, %1", ops
);
3586 /* Output a move from arm registers to an fpu registers.
3587 OPERANDS[0] is an fpu register.
3588 OPERANDS[1] is the first registers of an arm register pair. */
3591 output_mov_double_fpu_from_arm (operands
)
3594 int arm_reg0
= REGNO (operands
[1]);
3599 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3600 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3601 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
3602 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
3606 /* Output a move from an fpu register to arm registers.
3607 OPERANDS[0] is the first registers of an arm register pair.
3608 OPERANDS[1] is an fpu register. */
3611 output_mov_double_arm_from_fpu (operands
)
3614 int arm_reg0
= REGNO (operands
[0]);
3620 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3621 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3622 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
3623 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
3627 /* Output a move between double words.
3628 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
3629 or MEM<-REG and all MEMs must be offsettable addresses. */
3632 output_move_double (operands
)
3635 enum rtx_code code0
= GET_CODE (operands
[0]);
3636 enum rtx_code code1
= GET_CODE (operands
[1]);
3641 int reg0
= REGNO (operands
[0]);
3643 otherops
[0] = gen_rtx (REG
, SImode
, 1 + reg0
);
3646 int reg1
= REGNO (operands
[1]);
3650 /* Ensure the second source is not overwritten */
3651 if (reg1
== reg0
+ (WORDS_BIG_ENDIAN
? -1 : 1))
3652 output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands
);
3654 output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands
);
3656 else if (code1
== CONST_DOUBLE
)
3658 if (GET_MODE (operands
[1]) == DFmode
)
3661 union real_extract u
;
3663 bcopy ((char *) &CONST_DOUBLE_LOW (operands
[1]), (char *) &u
,
3665 REAL_VALUE_TO_TARGET_DOUBLE (u
.d
, l
);
3666 otherops
[1] = GEN_INT(l
[1]);
3667 operands
[1] = GEN_INT(l
[0]);
3669 else if (GET_MODE (operands
[1]) != VOIDmode
)
3671 else if (WORDS_BIG_ENDIAN
)
3674 otherops
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
3675 operands
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
3680 otherops
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
3681 operands
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
3683 output_mov_immediate (operands
);
3684 output_mov_immediate (otherops
);
3686 else if (code1
== CONST_INT
)
3688 /* sign extend the intval into the high-order word */
3689 if (WORDS_BIG_ENDIAN
)
3691 otherops
[1] = operands
[1];
3692 operands
[1] = (INTVAL (operands
[1]) < 0
3693 ? constm1_rtx
: const0_rtx
);
3696 otherops
[1] = INTVAL (operands
[1]) < 0 ? constm1_rtx
: const0_rtx
;
3697 output_mov_immediate (otherops
);
3698 output_mov_immediate (operands
);
3700 else if (code1
== MEM
)
3702 switch (GET_CODE (XEXP (operands
[1], 0)))
3705 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
3709 abort (); /* Should never happen now */
3713 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
3717 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
3721 abort (); /* Should never happen now */
3726 output_asm_insn ("adr%?\t%0, %1", operands
);
3727 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
3731 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1)))
3733 otherops
[0] = operands
[0];
3734 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
3735 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
3736 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
3738 if (GET_CODE (otherops
[2]) == CONST_INT
)
3740 switch (INTVAL (otherops
[2]))
3743 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
3746 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
3749 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
3752 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
3753 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
3755 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
3758 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
3761 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
3762 return "ldm%?ia\t%0, %M0";
3766 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
3767 /* Take care of overlapping base/data reg. */
3768 if (reg_mentioned_p (operands
[0], operands
[1]))
3770 output_asm_insn ("ldr%?\t%0, %1", otherops
);
3771 output_asm_insn ("ldr%?\t%0, %1", operands
);
3775 output_asm_insn ("ldr%?\t%0, %1", operands
);
3776 output_asm_insn ("ldr%?\t%0, %1", otherops
);
3782 abort(); /* Constraints should prevent this */
3784 else if (code0
== MEM
&& code1
== REG
)
3786 if (REGNO (operands
[1]) == 12)
3789 switch (GET_CODE (XEXP (operands
[0], 0)))
3792 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
3796 abort (); /* Should never happen now */
3800 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
3804 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
3808 abort (); /* Should never happen now */
3812 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
3814 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
3817 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
3821 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
3825 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
3832 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
3833 otherops
[1] = gen_rtx (REG
, SImode
, 1 + REGNO (operands
[1]));
3834 output_asm_insn ("str%?\t%1, %0", operands
);
3835 output_asm_insn ("str%?\t%1, %0", otherops
);
3839 abort(); /* Constraints should prevent this */
3845 /* Output an arbitrary MOV reg, #n.
3846 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
3849 output_mov_immediate (operands
)
3852 HOST_WIDE_INT n
= INTVAL (operands
[1]);
3856 /* Try to use one MOV */
3857 if (const_ok_for_arm (n
))
3859 output_asm_insn ("mov%?\t%0, %1", operands
);
3863 /* Try to use one MVN */
3864 if (const_ok_for_arm (~n
))
3866 operands
[1] = GEN_INT (~n
);
3867 output_asm_insn ("mvn%?\t%0, %1", operands
);
3871 /* If all else fails, make it out of ORRs or BICs as appropriate. */
3873 for (i
=0; i
< 32; i
++)
3877 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
3878 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
3881 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
3888 /* Output an ADD r, s, #n where n may be too big for one instruction. If
3889 adding zero to one register, output nothing. */
3892 output_add_immediate (operands
)
3895 HOST_WIDE_INT n
= INTVAL (operands
[2]);
3897 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
3900 output_multi_immediate (operands
,
3901 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
3904 output_multi_immediate (operands
,
3905 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
3912 /* Output a multiple immediate operation.
3913 OPERANDS is the vector of operands referred to in the output patterns.
3914 INSTR1 is the output pattern to use for the first constant.
3915 INSTR2 is the output pattern to use for subsequent constants.
3916 IMMED_OP is the index of the constant slot in OPERANDS.
3917 N is the constant value. */
3920 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
3922 char *instr1
, *instr2
;
3926 #if HOST_BITS_PER_WIDE_INT > 32
3932 operands
[immed_op
] = const0_rtx
;
3933 output_asm_insn (instr1
, operands
); /* Quick and easy output */
3938 char *instr
= instr1
;
3940 /* Note that n is never zero here (which would give no output) */
3941 for (i
= 0; i
< 32; i
+= 2)
3945 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
3946 output_asm_insn (instr
, operands
);
3956 /* Return the appropriate ARM instruction for the operation code.
3957 The returned result should not be overwritten. OP is the rtx of the
3958 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
3962 arithmetic_instr (op
, shift_first_arg
)
3964 int shift_first_arg
;
3966 switch (GET_CODE (op
))
3972 return shift_first_arg
? "rsb" : "sub";
3989 /* Ensure valid constant shifts and return the appropriate shift mnemonic
3990 for the operation code. The returned result should not be overwritten.
3991 OP is the rtx code of the shift.
3992 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
3996 shift_op (op
, amountp
)
3998 HOST_WIDE_INT
*amountp
;
4001 enum rtx_code code
= GET_CODE (op
);
4003 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
4005 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
4006 *amountp
= INTVAL (XEXP (op
, 1));
4029 /* We never have to worry about the amount being other than a
4030 power of 2, since this case can never be reloaded from a reg. */
4032 *amountp
= int_log2 (*amountp
);
4043 /* This is not 100% correct, but follows from the desire to merge
4044 multiplication by a power of 2 with the recognizer for a
4045 shift. >=32 is not a valid shift for "asl", so we must try and
4046 output a shift that produces the correct arithmetical result.
4047 Using lsr #32 is identical except for the fact that the carry bit
4048 is not set correctly if we set the flags; but we never use the
4049 carry bit from such an operation, so we can ignore that. */
4050 if (code
== ROTATERT
)
4051 *amountp
&= 31; /* Rotate is just modulo 32 */
4052 else if (*amountp
!= (*amountp
& 31))
4059 /* Shifts of 0 are no-ops. */
4068 /* Obtain the shift from the POWER of two. */
4072 HOST_WIDE_INT power
;
4074 HOST_WIDE_INT shift
= 0;
4076 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
4086 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
4087 /bin/as is horribly restrictive. */
4090 output_ascii_pseudo_op (stream
, p
, len
)
4096 int len_so_far
= 1000;
4097 int chars_so_far
= 0;
4099 for (i
= 0; i
< len
; i
++)
4101 register int c
= p
[i
];
4103 if (len_so_far
> 50)
4106 fputs ("\"\n", stream
);
4107 fputs ("\t.ascii\t\"", stream
);
4109 arm_increase_location (chars_so_far
);
4113 if (c
== '\"' || c
== '\\')
4119 if (c
>= ' ' && c
< 0177)
4126 fprintf (stream
, "\\%03o", c
);
4133 fputs ("\"\n", stream
);
4134 arm_increase_location (chars_so_far
);
4138 /* Try to determine whether a pattern really clobbers the link register.
4139 This information is useful when peepholing, so that lr need not be pushed
4140 if we combine a call followed by a return.
4141 NOTE: This code does not check for side-effect expressions in a SET_SRC:
4142 such a check should not be needed because these only update an existing
4143 value within a register; the register must still be set elsewhere within
4147 pattern_really_clobbers_lr (x
)
4152 switch (GET_CODE (x
))
4155 switch (GET_CODE (SET_DEST (x
)))
4158 return REGNO (SET_DEST (x
)) == 14;
4161 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
4162 return REGNO (XEXP (SET_DEST (x
), 0)) == 14;
4164 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
4173 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
4174 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
4179 switch (GET_CODE (XEXP (x
, 0)))
4182 return REGNO (XEXP (x
, 0)) == 14;
4185 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
4186 return REGNO (XEXP (XEXP (x
, 0), 0)) == 14;
4202 function_really_clobbers_lr (first
)
4207 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
4209 switch (GET_CODE (insn
))
4214 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
4219 if (pattern_really_clobbers_lr (PATTERN (insn
)))
4224 /* Don't yet know how to handle those calls that are not to a
4226 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
4229 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
4232 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
4238 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
4244 default: /* Don't recognize it, be safe */
4248 /* A call can be made (by peepholing) not to clobber lr iff it is
4249 followed by a return. There may, however, be a use insn iff
4250 we are returning the result of the call.
4251 If we run off the end of the insn chain, then that means the
4252 call was at the end of the function. Unfortunately we don't
4253 have a return insn for the peephole to recognize, so we
4254 must reject this. (Can this be fixed by adding our own insn?) */
4255 if ((next
= next_nonnote_insn (insn
)) == NULL
)
4258 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
4259 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
4260 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
4261 == REGNO (XEXP (PATTERN (next
), 0))))
4262 if ((next
= next_nonnote_insn (next
)) == NULL
)
4265 if (GET_CODE (next
) == JUMP_INSN
4266 && GET_CODE (PATTERN (next
)) == RETURN
)
4275 /* We have reached the end of the chain so lr was _not_ clobbered */
4280 output_return_instruction (operand
, really_return
, reverse
)
4286 int reg
, live_regs
= 0;
4287 int volatile_func
= (optimize
> 0
4288 && TREE_THIS_VOLATILE (current_function_decl
));
4290 return_used_this_function
= 1;
4295 /* If this function was declared non-returning, and we have found a tail
4296 call, then we have to trust that the called function won't return. */
4297 if (! really_return
)
4300 /* Otherwise, trap an attempted return by aborting. */
4302 ops
[1] = gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4303 assemble_external_libcall (ops
[1]);
4304 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
4308 if (current_function_calls_alloca
&& ! really_return
)
4311 for (reg
= 0; reg
<= 10; reg
++)
4312 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4315 if (live_regs
|| (regs_ever_live
[14] && ! lr_save_eliminated
))
4318 if (frame_pointer_needed
)
4323 if (lr_save_eliminated
|| ! regs_ever_live
[14])
4326 if (frame_pointer_needed
)
4328 reverse
? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
4331 reverse
? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
4333 for (reg
= 0; reg
<= 10; reg
++)
4334 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4336 strcat (instr
, "%|");
4337 strcat (instr
, reg_names
[reg
]);
4339 strcat (instr
, ", ");
4342 if (frame_pointer_needed
)
4344 strcat (instr
, "%|");
4345 strcat (instr
, reg_names
[11]);
4346 strcat (instr
, ", ");
4347 strcat (instr
, "%|");
4348 strcat (instr
, reg_names
[13]);
4349 strcat (instr
, ", ");
4350 strcat (instr
, "%|");
4351 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4355 strcat (instr
, "%|");
4356 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4358 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
4359 output_asm_insn (instr
, &operand
);
4361 else if (really_return
)
4363 sprintf (instr
, "mov%%?%%%s0%s\t%%|pc, %%|lr",
4364 reverse
? "D" : "d", TARGET_APCS_32
? "" : "s");
4365 output_asm_insn (instr
, &operand
);
4371 /* Return nonzero if optimizing and the current function is volatile.
4372 Such functions never return, and many memory cycles can be saved
4373 by not storing register values that will never be needed again.
4374 This optimization was added to speed up context switching in a
4375 kernel application. */
4378 arm_volatile_func ()
4380 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
4383 /* Return the size of the prologue. It's not too bad if we slightly
4387 get_prologue_size ()
4389 return profile_flag
? 12 : 0;
4392 /* The amount of stack adjustment that happens here, in output_return and in
4393 output_epilogue must be exactly the same as was calculated during reload,
4394 or things will point to the wrong place. The only time we can safely
4395 ignore this constraint is when a function has no arguments on the stack,
4396 no stack frame requirement and no live registers execpt for `lr'. If we
4397 can guarantee that by making all function calls into tail calls and that
4398 lr is not clobbered in any other way, then there is no need to push lr
4402 output_func_prologue (f
, frame_size
)
4406 int reg
, live_regs_mask
= 0;
4408 int volatile_func
= (optimize
> 0
4409 && TREE_THIS_VOLATILE (current_function_decl
));
4411 /* Nonzero if we must stuff some register arguments onto the stack as if
4412 they were passed there. */
4413 int store_arg_regs
= 0;
4415 if (arm_ccfsm_state
|| arm_target_insn
)
4416 abort (); /* Sanity check */
4418 if (arm_naked_function_p (current_function_decl
))
4421 return_used_this_function
= 0;
4422 lr_save_eliminated
= 0;
4424 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
4425 ASM_COMMENT_START
, current_function_args_size
,
4426 current_function_pretend_args_size
, frame_size
);
4427 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
4428 ASM_COMMENT_START
, frame_pointer_needed
,
4429 current_function_anonymous_args
);
4432 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
4434 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
4437 for (reg
= 0; reg
<= 10; reg
++)
4438 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4439 live_regs_mask
|= (1 << reg
);
4441 if (frame_pointer_needed
)
4442 live_regs_mask
|= 0xD800;
4443 else if (regs_ever_live
[14])
4445 if (! current_function_args_size
4446 && ! function_really_clobbers_lr (get_insns ()))
4447 lr_save_eliminated
= 1;
4449 live_regs_mask
|= 0x4000;
4454 /* if a di mode load/store multiple is used, and the base register
4455 is r3, then r4 can become an ever live register without lr
4456 doing so, in this case we need to push lr as well, or we
4457 will fail to get a proper return. */
4459 live_regs_mask
|= 0x4000;
4460 lr_save_eliminated
= 0;
4464 if (lr_save_eliminated
)
4465 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
4471 output_func_epilogue (f
, frame_size
)
4475 int reg
, live_regs_mask
= 0, code_size
= 0;
4476 /* If we need this then it will always be at lesat this much */
4477 int floats_offset
= 24;
4479 int volatile_func
= (optimize
> 0
4480 && TREE_THIS_VOLATILE (current_function_decl
));
4482 if (use_return_insn() && return_used_this_function
)
4484 if (frame_size
&& !(frame_pointer_needed
|| TARGET_APCS
))
4491 /* Naked functions don't have epilogues. */
4492 if (arm_naked_function_p (current_function_decl
))
4495 /* A volatile function should never return. Call abort. */
4498 rtx op
= gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4499 assemble_external_libcall (op
);
4500 output_asm_insn ("bl\t%a0", &op
);
4505 for (reg
= 0; reg
<= 10; reg
++)
4506 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4508 live_regs_mask
|= (1 << reg
);
4512 if (frame_pointer_needed
)
4514 for (reg
= 23; reg
> 15; reg
--)
4515 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4517 fprintf (f
, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX
,
4518 reg_names
[reg
], REGISTER_PREFIX
, floats_offset
);
4519 floats_offset
+= 12;
4523 live_regs_mask
|= 0xA800;
4524 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
4525 TARGET_APCS_32
? FALSE
: TRUE
);
4530 /* Restore stack pointer if necessary. */
4533 operands
[0] = operands
[1] = stack_pointer_rtx
;
4534 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
, frame_size
);
4535 output_add_immediate (operands
);
4538 for (reg
= 16; reg
< 24; reg
++)
4539 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4541 fprintf (f
, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX
,
4542 reg_names
[reg
], REGISTER_PREFIX
);
4545 if (current_function_pretend_args_size
== 0 && regs_ever_live
[14])
4547 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
4548 TARGET_APCS_32
? FALSE
: TRUE
);
4553 if (live_regs_mask
|| regs_ever_live
[14])
4555 live_regs_mask
|= 0x4000;
4556 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
4559 if (current_function_pretend_args_size
)
4561 operands
[0] = operands
[1] = stack_pointer_rtx
;
4562 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
,
4563 current_function_pretend_args_size
);
4564 output_add_immediate (operands
);
4566 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
4567 : "\tmovs\t%spc, %slr\n"),
4568 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
4575 /* insn_addresses isn't allocated when not optimizing */
4576 /* ??? The previous comment is incorrect. Clarify. */
4579 arm_increase_location (code_size
4580 + insn_addresses
[INSN_UID (get_last_insn ())]
4581 + get_prologue_size ());
4583 current_function_anonymous_args
= 0;
4587 emit_multi_reg_push (mask
)
4594 for (i
= 0; i
< 16; i
++)
4595 if (mask
& (1 << i
))
4598 if (num_regs
== 0 || num_regs
> 16)
4601 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (num_regs
));
4603 for (i
= 0; i
< 16; i
++)
4605 if (mask
& (1 << i
))
4608 = gen_rtx (SET
, VOIDmode
, gen_rtx (MEM
, BLKmode
,
4609 gen_rtx (PRE_DEC
, BLKmode
,
4610 stack_pointer_rtx
)),
4611 gen_rtx (UNSPEC
, BLKmode
,
4612 gen_rtvec (1, gen_rtx (REG
, SImode
, i
)),
4618 for (j
= 1, i
++; j
< num_regs
; i
++)
4620 if (mask
& (1 << i
))
4623 = gen_rtx (USE
, VOIDmode
, gen_rtx (REG
, SImode
, i
));
4631 arm_expand_prologue ()
4634 rtx amount
= GEN_INT (- get_frame_size ());
4637 int live_regs_mask
= 0;
4638 int store_arg_regs
= 0;
4639 int volatile_func
= (optimize
> 0
4640 && TREE_THIS_VOLATILE (current_function_decl
));
4642 /* Naked functions don't have prologues. */
4643 if (arm_naked_function_p (current_function_decl
))
4646 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
4649 if (! volatile_func
)
4650 for (reg
= 0; reg
<= 10; reg
++)
4651 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4652 live_regs_mask
|= 1 << reg
;
4654 if (! volatile_func
&& regs_ever_live
[14])
4655 live_regs_mask
|= 0x4000;
4657 if (frame_pointer_needed
)
4659 live_regs_mask
|= 0xD800;
4660 emit_insn (gen_movsi (gen_rtx (REG
, SImode
, 12),
4661 stack_pointer_rtx
));
4664 if (current_function_pretend_args_size
)
4667 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
4670 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
4671 GEN_INT (-current_function_pretend_args_size
)));
4676 /* If we have to push any regs, then we must push lr as well, or
4677 we won't get a proper return. */
4678 live_regs_mask
|= 0x4000;
4679 emit_multi_reg_push (live_regs_mask
);
4682 /* For now the integer regs are still pushed in output_func_epilogue (). */
4684 if (! volatile_func
)
4685 for (reg
= 23; reg
> 15; reg
--)
4686 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4687 emit_insn (gen_rtx (SET
, VOIDmode
,
4688 gen_rtx (MEM
, XFmode
,
4689 gen_rtx (PRE_DEC
, XFmode
,
4690 stack_pointer_rtx
)),
4691 gen_rtx (REG
, XFmode
, reg
)));
4693 if (frame_pointer_needed
)
4694 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx (REG
, SImode
, 12),
4696 (-(4 + current_function_pretend_args_size
)))));
4698 if (amount
!= const0_rtx
)
4700 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
4701 emit_insn (gen_rtx (CLOBBER
, VOIDmode
,
4702 gen_rtx (MEM
, BLKmode
, stack_pointer_rtx
)));
4705 /* If we are profiling, make sure no instructions are scheduled before
4706 the call to mcount. */
4707 if (profile_flag
|| profile_block_flag
)
4708 emit_insn (gen_blockage ());
4712 /* If CODE is 'd', then the X is a condition operand and the instruction
4713 should only be executed if the condition is true.
4714 if CODE is 'D', then the X is a condition operand and the instruction
4715 should only be executed if the condition is false: however, if the mode
4716 of the comparison is CCFPEmode, then always execute the instruction -- we
4717 do this because in these circumstances !GE does not necessarily imply LT;
4718 in these cases the instruction pattern will take care to make sure that
4719 an instruction containing %d will follow, thereby undoing the effects of
4720 doing this instruction unconditionally.
4721 If CODE is 'N' then X is a floating point operand that must be negated
4723 If CODE is 'B' then output a bitwise inverted value of X (a const int).
4724 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
4727 arm_print_operand (stream
, x
, code
)
4735 fputs (ASM_COMMENT_START
, stream
);
4739 fputs (REGISTER_PREFIX
, stream
);
4743 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
4744 fputs (arm_condition_codes
[arm_current_cc
], stream
);
4750 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4751 r
= REAL_VALUE_NEGATE (r
);
4752 fprintf (stream
, "%s", fp_const_from_val (&r
));
4757 if (GET_CODE (x
) == CONST_INT
)
4759 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
4764 ARM_SIGN_EXTEND (~ INTVAL (x
)));
4768 output_addr_const (stream
, x
);
4773 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
4777 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
4783 char *shift
= shift_op (x
, &val
);
4787 fprintf (stream
, ", %s ", shift_op (x
, &val
));
4789 arm_print_operand (stream
, XEXP (x
, 1), 0);
4792 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
4805 fputs (REGISTER_PREFIX
, stream
);
4806 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0)], stream
);
4812 fputs (REGISTER_PREFIX
, stream
);
4813 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1)], stream
);
4817 fputs (REGISTER_PREFIX
, stream
);
4818 if (GET_CODE (XEXP (x
, 0)) == REG
)
4819 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
4821 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
4825 fprintf (stream
, "{%s%s-%s%s}", REGISTER_PREFIX
, reg_names
[REGNO (x
)],
4826 REGISTER_PREFIX
, reg_names
[REGNO (x
) - 1
4827 + ((GET_MODE_SIZE (GET_MODE (x
))
4828 + GET_MODE_SIZE (SImode
) - 1)
4829 / GET_MODE_SIZE (SImode
))]);
4834 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
4840 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
4841 (get_arm_condition_code (x
))],
4849 if (GET_CODE (x
) == REG
)
4851 fputs (REGISTER_PREFIX
, stream
);
4852 fputs (reg_names
[REGNO (x
)], stream
);
4854 else if (GET_CODE (x
) == MEM
)
4856 output_memory_reference_mode
= GET_MODE (x
);
4857 output_address (XEXP (x
, 0));
4859 else if (GET_CODE (x
) == CONST_DOUBLE
)
4860 fprintf (stream
, "#%s", fp_immediate_constant (x
));
4861 else if (GET_CODE (x
) == NEG
)
4862 abort (); /* This should never happen now. */
4865 fputc ('#', stream
);
4866 output_addr_const (stream
, x
);
4871 /* Increase the `arm_text_location' by AMOUNT if we're in the text
4875 arm_increase_location (amount
)
4878 if (in_text_section ())
4879 arm_text_location
+= amount
;
4883 /* Output a label definition. If this label is within the .text segment, it
4884 is stored in OFFSET_TABLE, to be used when building `llc' instructions.
4885 Maybe GCC remembers names not starting with a `*' for a long time, but this
4886 is a minority anyway, so we just make a copy. Do not store the leading `*'
4887 if the name starts with one. */
4890 arm_asm_output_label (stream
, name
)
4894 char *real_name
, *s
;
4895 struct label_offset
*cur
;
4898 ARM_OUTPUT_LABEL (stream
, name
);
4899 if (! in_text_section ())
4904 real_name
= xmalloc (1 + strlen (&name
[1]));
4905 strcpy (real_name
, &name
[1]);
4909 real_name
= xmalloc (2 + strlen (name
));
4910 strcpy (real_name
, USER_LABEL_PREFIX
);
4911 strcat (real_name
, name
);
4913 for (s
= real_name
; *s
; s
++)
4916 hash
= hash
% LABEL_HASH_SIZE
;
4917 cur
= (struct label_offset
*) xmalloc (sizeof (struct label_offset
));
4918 cur
->name
= real_name
;
4919 cur
->offset
= arm_text_location
;
4920 cur
->cdr
= offset_table
[hash
];
4921 offset_table
[hash
] = cur
;
4924 /* Output code resembling an .lcomm directive. /bin/as doesn't have this
4925 directive hence this hack, which works by reserving some `.space' in the
4926 bss segment directly.
4928 XXX This is a severe hack, which is guaranteed NOT to work since it doesn't
4929 define STATIC COMMON space but merely STATIC BSS space. */
4932 output_lcomm_directive (stream
, name
, size
, align
)
4938 ASM_OUTPUT_ALIGN (stream
, floor_log2 (align
/ BITS_PER_UNIT
));
4939 ARM_OUTPUT_LABEL (stream
, name
);
4940 fprintf (stream
, "\t.space\t%d\n", size
);
4943 /* A finite state machine takes care of noticing whether or not instructions
4944 can be conditionally executed, and thus decrease execution time and code
4945 size by deleting branch instructions. The fsm is controlled by
4946 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
4948 /* The state of the fsm controlling condition codes are:
4949 0: normal, do nothing special
4950 1: make ASM_OUTPUT_OPCODE not output this instruction
4951 2: make ASM_OUTPUT_OPCODE not output this instruction
4952 3: make instructions conditional
4953 4: make instructions conditional
4955 State transitions (state->state by whom under condition):
4956 0 -> 1 final_prescan_insn if the `target' is a label
4957 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
4958 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
4959 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
4960 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
4961 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
4962 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
4963 (the target insn is arm_target_insn).
4965 If the jump clobbers the conditions then we use states 2 and 4.
4967 A similar thing can be done with conditional return insns.
4969 XXX In case the `target' is an unconditional branch, this conditionalising
4970 of the instructions always reduces code size, but not always execution
4971 time. But then, I want to reduce the code size to somewhere near what
4972 /bin/cc produces. */
4974 /* Returns the index of the ARM condition code string in
4975 `arm_condition_codes'. COMPARISON should be an rtx like
4976 `(eq (...) (...))'. */
4978 static enum arm_cond_code
4979 get_arm_condition_code (comparison
)
4982 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
4984 register enum rtx_code comp_code
= GET_CODE (comparison
);
4986 if (GET_MODE_CLASS (mode
) != MODE_CC
)
4987 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
4988 XEXP (comparison
, 1));
4992 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
4993 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
4994 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
4995 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
4996 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
4997 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
4998 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
4999 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
5000 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
5001 case CC_DLTUmode
: code
= ARM_CC
;
5004 if (comp_code
!= EQ
&& comp_code
!= NE
)
5007 if (comp_code
== EQ
)
5008 return ARM_INVERSE_CONDITION_CODE (code
);
5014 case NE
: return ARM_NE
;
5015 case EQ
: return ARM_EQ
;
5016 case GE
: return ARM_PL
;
5017 case LT
: return ARM_MI
;
5025 case NE
: return ARM_NE
;
5026 case EQ
: return ARM_EQ
;
5033 case GE
: return ARM_GE
;
5034 case GT
: return ARM_GT
;
5035 case LE
: return ARM_LS
;
5036 case LT
: return ARM_MI
;
5043 case NE
: return ARM_NE
;
5044 case EQ
: return ARM_EQ
;
5045 case GE
: return ARM_LE
;
5046 case GT
: return ARM_LT
;
5047 case LE
: return ARM_GE
;
5048 case LT
: return ARM_GT
;
5049 case GEU
: return ARM_LS
;
5050 case GTU
: return ARM_CC
;
5051 case LEU
: return ARM_CS
;
5052 case LTU
: return ARM_HI
;
5059 case LTU
: return ARM_CS
;
5060 case GEU
: return ARM_CC
;
5067 case NE
: return ARM_NE
;
5068 case EQ
: return ARM_EQ
;
5069 case GE
: return ARM_GE
;
5070 case GT
: return ARM_GT
;
5071 case LE
: return ARM_LE
;
5072 case LT
: return ARM_LT
;
5073 case GEU
: return ARM_CS
;
5074 case GTU
: return ARM_HI
;
5075 case LEU
: return ARM_LS
;
5076 case LTU
: return ARM_CC
;
5088 final_prescan_insn (insn
, opvec
, noperands
)
5093 /* BODY will hold the body of INSN. */
5094 register rtx body
= PATTERN (insn
);
5096 /* This will be 1 if trying to repeat the trick, and things need to be
5097 reversed if it appears to fail. */
5100 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
5101 taken are clobbered, even if the rtl suggests otherwise. It also
5102 means that we have to grub around within the jump expression to find
5103 out what the conditions are when the jump isn't taken. */
5104 int jump_clobbers
= 0;
5106 /* If we start with a return insn, we only succeed if we find another one. */
5107 int seeking_return
= 0;
5109 /* START_INSN will hold the insn from where we start looking. This is the
5110 first insn after the following code_label if REVERSE is true. */
5111 rtx start_insn
= insn
;
5113 /* If in state 4, check if the target branch is reached, in order to
5114 change back to state 0. */
5115 if (arm_ccfsm_state
== 4)
5117 if (insn
== arm_target_insn
)
5119 arm_target_insn
= NULL
;
5120 arm_ccfsm_state
= 0;
5125 /* If in state 3, it is possible to repeat the trick, if this insn is an
5126 unconditional branch to a label, and immediately following this branch
5127 is the previous target label which is only used once, and the label this
5128 branch jumps to is not too far off. */
5129 if (arm_ccfsm_state
== 3)
5131 if (simplejump_p (insn
))
5133 start_insn
= next_nonnote_insn (start_insn
);
5134 if (GET_CODE (start_insn
) == BARRIER
)
5136 /* XXX Isn't this always a barrier? */
5137 start_insn
= next_nonnote_insn (start_insn
);
5139 if (GET_CODE (start_insn
) == CODE_LABEL
5140 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5141 && LABEL_NUSES (start_insn
) == 1)
5146 else if (GET_CODE (body
) == RETURN
)
5148 start_insn
= next_nonnote_insn (start_insn
);
5149 if (GET_CODE (start_insn
) == BARRIER
)
5150 start_insn
= next_nonnote_insn (start_insn
);
5151 if (GET_CODE (start_insn
) == CODE_LABEL
5152 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5153 && LABEL_NUSES (start_insn
) == 1)
5165 if (arm_ccfsm_state
!= 0 && !reverse
)
5167 if (GET_CODE (insn
) != JUMP_INSN
)
5170 /* This jump might be paralleled with a clobber of the condition codes
5171 the jump should always come first */
5172 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
5173 body
= XVECEXP (body
, 0, 0);
5176 /* If this is a conditional return then we don't want to know */
5177 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5178 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
5179 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
5180 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
5185 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5186 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
5189 int fail
= FALSE
, succeed
= FALSE
;
5190 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
5191 int then_not_else
= TRUE
;
5192 rtx this_insn
= start_insn
, label
= 0;
5194 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
5196 /* The code below is wrong for these, and I haven't time to
5197 fix it now. So we just do the safe thing and return. This
5198 whole function needs re-writing anyway. */
5203 /* Register the insn jumped to. */
5206 if (!seeking_return
)
5207 label
= XEXP (SET_SRC (body
), 0);
5209 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
5210 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
5211 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
5213 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
5214 then_not_else
= FALSE
;
5216 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
5218 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
5221 then_not_else
= FALSE
;
5226 /* See how many insns this branch skips, and what kind of insns. If all
5227 insns are okay, and the label or unconditional branch to the same
5228 label is not too far away, succeed. */
5229 for (insns_skipped
= 0;
5230 !fail
&& !succeed
&& insns_skipped
++ < MAX_INSNS_SKIPPED
;)
5234 this_insn
= next_nonnote_insn (this_insn
);
5238 scanbody
= PATTERN (this_insn
);
5240 switch (GET_CODE (this_insn
))
5243 /* Succeed if it is the target label, otherwise fail since
5244 control falls in from somewhere else. */
5245 if (this_insn
== label
)
5249 arm_ccfsm_state
= 2;
5250 this_insn
= next_nonnote_insn (this_insn
);
5253 arm_ccfsm_state
= 1;
5261 /* Succeed if the following insn is the target label.
5263 If return insns are used then the last insn in a function
5264 will be a barrier. */
5265 this_insn
= next_nonnote_insn (this_insn
);
5266 if (this_insn
&& this_insn
== label
)
5270 arm_ccfsm_state
= 2;
5271 this_insn
= next_nonnote_insn (this_insn
);
5274 arm_ccfsm_state
= 1;
5282 /* If using 32-bit addresses the cc is not preserved over
5286 /* Succeed if the following insn is the target label,
5287 or if the following two insns are a barrier and
5288 the target label. */
5289 this_insn
= next_nonnote_insn (this_insn
);
5290 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
5291 this_insn
= next_nonnote_insn (this_insn
);
5293 if (this_insn
&& this_insn
== label
5294 && insns_skipped
< MAX_INSNS_SKIPPED
)
5298 arm_ccfsm_state
= 2;
5299 this_insn
= next_nonnote_insn (this_insn
);
5302 arm_ccfsm_state
= 1;
5311 /* If this is an unconditional branch to the same label, succeed.
5312 If it is to another label, do nothing. If it is conditional,
5314 /* XXX Probably, the test for the SET and the PC are unnecessary. */
5316 if (GET_CODE (scanbody
) == SET
5317 && GET_CODE (SET_DEST (scanbody
)) == PC
)
5319 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
5320 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
5322 arm_ccfsm_state
= 2;
5325 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
5328 else if (GET_CODE (scanbody
) == RETURN
5331 arm_ccfsm_state
= 2;
5334 else if (GET_CODE (scanbody
) == PARALLEL
)
5336 switch (get_attr_conds (this_insn
))
5348 /* Instructions using or affecting the condition codes make it
5350 if ((GET_CODE (scanbody
) == SET
5351 || GET_CODE (scanbody
) == PARALLEL
)
5352 && get_attr_conds (this_insn
) != CONDS_NOCOND
)
5362 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
5363 arm_target_label
= CODE_LABEL_NUMBER (label
);
5364 else if (seeking_return
|| arm_ccfsm_state
== 2)
5366 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
5368 this_insn
= next_nonnote_insn (this_insn
);
5369 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
5370 || GET_CODE (this_insn
) == CODE_LABEL
))
5375 /* Oh, dear! we ran off the end.. give up */
5376 recog (PATTERN (insn
), insn
, NULL_PTR
);
5377 arm_ccfsm_state
= 0;
5378 arm_target_insn
= NULL
;
5381 arm_target_insn
= this_insn
;
5390 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
5392 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
5393 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5394 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
5395 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5399 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
5402 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
5406 if (reverse
|| then_not_else
)
5407 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5409 /* restore recog_operand (getting the attributes of other insns can
5410 destroy this array, but final.c assumes that it remains intact
5411 across this call; since the insn has been recognized already we
5412 call recog direct). */
5413 recog (PATTERN (insn
), insn
, NULL_PTR
);
5417 #ifdef AOF_ASSEMBLER
5418 /* Special functions only needed when producing AOF syntax assembler. */
5420 int arm_text_section_count
= 1;
5425 static char buf
[100];
5426 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
5427 arm_text_section_count
++);
5429 strcat (buf
, ", PIC, REENTRANT");
5433 static int arm_data_section_count
= 1;
5438 static char buf
[100];
5439 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
5443 /* The AOF assembler is religiously strict about declarations of
5444 imported and exported symbols, so that it is impossible to declare
5445 a function as imported near the begining of the file, and then to
5446 export it later on. It is, however, possible to delay the decision
5447 until all the functions in the file have been compiled. To get
5448 around this, we maintain a list of the imports and exports, and
5449 delete from it any that are subsequently defined. At the end of
5450 compilation we spit the remainder of the list out before the END
5455 struct import
*next
;
5459 static struct import
*imports_list
= NULL
;
5462 aof_add_import (name
)
5467 for (new = imports_list
; new; new = new->next
)
5468 if (new->name
== name
)
5471 new = (struct import
*) xmalloc (sizeof (struct import
));
5472 new->next
= imports_list
;
5478 aof_delete_import (name
)
5481 struct import
**old
;
5483 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
5485 if ((*old
)->name
== name
)
5487 *old
= (*old
)->next
;
5493 int arm_main_function
= 0;
5496 aof_dump_imports (f
)
5499 /* The AOF assembler needs this to cause the startup code to be extracted
5500 from the library. Brining in __main causes the whole thing to work
5502 if (arm_main_function
)
5505 fputs ("\tIMPORT __main\n", f
);
5506 fputs ("\tDCD __main\n", f
);
5509 /* Now dump the remaining imports. */
5510 while (imports_list
)
5512 fprintf (f
, "\tIMPORT\t");
5513 assemble_name (f
, imports_list
->name
);
5515 imports_list
= imports_list
->next
;
5518 #endif /* AOF_ASSEMBLER */