1 /* Output routines for GCC for ARM/RISCiX.
2 Copyright (C) 1991, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
28 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
34 #include "insn-attr.h"
40 /* The maximum number of insns skipped which will be conditionalised if
42 #define MAX_INSNS_SKIPPED 5
44 /* Some function declarations. */
45 extern FILE *asm_out_file
;
47 static HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
48 static char *output_multi_immediate
PROTO ((rtx
*, char *, char *, int,
50 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
51 HOST_WIDE_INT
, rtx
, rtx
, int, int));
52 static int arm_naked_function_p
PROTO ((tree
));
53 static void init_fpa_table
PROTO ((void));
54 static enum machine_mode select_dominance_cc_mode
PROTO ((enum rtx_code
, rtx
,
56 static HOST_WIDE_INT add_constant
PROTO ((rtx
, enum machine_mode
));
57 static void dump_table
PROTO ((rtx
));
58 static int fixit
PROTO ((rtx
, enum machine_mode
, int));
59 static rtx find_barrier
PROTO ((rtx
, int));
60 static int broken_move
PROTO ((rtx
));
61 static char *fp_const_from_val
PROTO ((REAL_VALUE_TYPE
*));
62 static int eliminate_lr2ip
PROTO ((rtx
*));
63 static char *shift_op
PROTO ((rtx
, HOST_WIDE_INT
*));
64 static int pattern_really_clobbers_lr
PROTO ((rtx
));
65 static int function_really_clobbers_lr
PROTO ((rtx
));
66 static void emit_multi_reg_push
PROTO ((int));
67 static void emit_sfm
PROTO ((int, int));
68 static enum arm_cond_code get_arm_condition_code
PROTO ((rtx
));
70 /* Define the information needed to generate branch insns. This is
71 stored from the compare operation. */
73 rtx arm_compare_op0
, arm_compare_op1
;
76 /* What type of cpu are we compiling for? */
77 enum processor_type arm_cpu
;
79 /* What type of floating point are we tuning for? */
80 enum floating_point_type arm_fpu
;
82 /* What type of floating point instructions are available? */
83 enum floating_point_type arm_fpu_arch
;
85 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
86 enum prog_mode_type arm_prgmode
;
88 /* Set by the -mfp=... option */
89 char *target_fp_name
= NULL
;
91 /* Nonzero if this is an "M" variant of the processor. */
92 int arm_fast_multiply
= 0;
94 /* Nonzero if this chip supports the ARM Architecture 4 extensions */
97 /* Set to the features we should tune the code for (multiply speed etc). */
100 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
101 must report the mode of the memory reference from PRINT_OPERAND to
102 PRINT_OPERAND_ADDRESS. */
103 enum machine_mode output_memory_reference_mode
;
105 /* Nonzero if the prologue must setup `fp'. */
106 int current_function_anonymous_args
;
108 /* The register number to be used for the PIC offset register. */
109 int arm_pic_register
= 9;
111 /* Location counter of .text segment. */
112 int arm_text_location
= 0;
114 /* Set to one if we think that lr is only saved because of subroutine calls,
115 but all of these can be `put after' return insns */
116 int lr_save_eliminated
;
118 /* Set to 1 when a return insn is output, this means that the epilogue
121 static int return_used_this_function
;
123 static int arm_constant_limit
= 3;
125 /* For an explanation of these variables, see final_prescan_insn below. */
127 enum arm_cond_code arm_current_cc
;
129 int arm_target_label
;
131 /* The condition codes of the ARM, and the inverse function. */
132 char *arm_condition_codes
[] =
134 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
135 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
138 static enum arm_cond_code
get_arm_condition_code ();
141 /* Initialization code */
143 struct arm_cpu_select arm_select
[4] =
145 /* switch name, tune arch */
146 { (char *)0, "--with-cpu=", 1, 1 },
147 { (char *)0, "-mcpu=", 1, 1 },
148 { (char *)0, "-march=", 0, 1 },
149 { (char *)0, "-mtune=", 1, 0 },
152 #define FL_CO_PROC 0x01 /* Has external co-processor bus */
153 #define FL_FAST_MULT 0x02 /* Fast multiply */
154 #define FL_MODE26 0x04 /* 26-bit mode support */
155 #define FL_MODE32 0x08 /* 32-bit mode support */
156 #define FL_ARCH4 0x10 /* Architecture rel 4 */
157 #define FL_THUMB 0x20 /* Thumb aware */
162 enum processor_type type
;
166 /* Not all of these give usefully different compilation alternatives,
167 but there is no simple way of generalizing them. */
168 static struct processors all_procs
[] =
170 {"arm2", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
171 {"arm250", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
172 {"arm3", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
173 {"arm6", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
174 {"arm600", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
175 {"arm610", PROCESSOR_ARM6
, FL_MODE32
| FL_MODE26
},
176 {"arm7", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
177 /* arm7m doesn't exist on its own, only in conjunction with D, (and I), but
178 those don't alter the code, so it is sometimes known as the arm7m */
179 {"arm7m", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
181 {"arm7dm", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
183 {"arm7dmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
185 {"arm700", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
186 {"arm710", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
187 {"arm7100", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
188 {"arm7500", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
189 /* Doesn't really have an external co-proc, but does have embedded fpu */
190 {"arm7500fe", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
191 {"arm7tdmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
192 | FL_ARCH4
| FL_THUMB
)},
193 {"arm8", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
195 {"arm810", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
197 {"strongarm", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
199 {"strongarm110", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
201 {"armv2", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE26
},
202 {"armv2a", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE26
},
203 {"armv3", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
204 {"armv3m", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
206 {"armv4", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
207 | FL_MODE26
| FL_ARCH4
)},
208 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
209 implementations that support it, so we will leave it out for now. */
210 {"armv4t", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
215 /* Fix up any incompatible options that the user has specified.
216 This has now turned into a maze. */
218 arm_override_options ()
220 int arm_thumb_aware
= 0;
223 struct arm_cpu_select
*ptr
;
224 static struct cpu_default
{
228 { TARGET_CPU_arm2
, "arm2" },
229 { TARGET_CPU_arm6
, "arm6" },
230 { TARGET_CPU_arm610
, "arm610" },
231 { TARGET_CPU_arm7dm
, "arm7dm" },
232 { TARGET_CPU_arm7500fe
, "arm7500fe" },
233 { TARGET_CPU_arm7tdmi
, "arm7tdmi" },
234 { TARGET_CPU_arm8
, "arm8" },
235 { TARGET_CPU_arm810
, "arm810" },
236 { TARGET_CPU_strongarm
, "strongarm" },
239 struct cpu_default
*def
;
241 /* Set the default. */
242 for (def
= &cpu_defaults
[0]; def
->name
; ++def
)
243 if (def
->cpu
== TARGET_CPU_DEFAULT
)
248 arm_select
[0].string
= def
->name
;
250 for (i
= 0; i
< sizeof (arm_select
) / sizeof (arm_select
[0]); i
++)
252 ptr
= &arm_select
[i
];
253 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
255 struct processors
*sel
;
257 for (sel
= all_procs
; sel
->name
!= NULL
; sel
++)
258 if (! strcmp (ptr
->string
, sel
->name
))
260 /* -march= is the only flag that can take an architecture
261 type, so if we match when the tune bit is set, the
262 option was invalid. */
265 if (sel
->type
== PROCESSOR_NONE
)
266 continue; /* Its an architecture, not a cpu */
269 tune_flags
= sel
->flags
;
278 if (sel
->name
== NULL
)
279 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
283 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
284 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
286 if (TARGET_POKE_FUNCTION_NAME
)
287 target_flags
|= ARM_FLAG_APCS_FRAME
;
290 warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu=<proc>");
293 warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu=<proc>");
295 if (TARGET_APCS_REENT
&& flag_pic
)
296 fatal ("-fpic and -mapcs-reent are incompatible");
298 if (TARGET_APCS_REENT
)
299 warning ("APCS reentrant code not supported.");
301 /* If stack checking is disabled, we can use r10 as the PIC register,
302 which keeps r9 available. */
303 if (flag_pic
&& ! TARGET_APCS_STACK
)
304 arm_pic_register
= 10;
306 /* Well, I'm about to have a go, but pic is NOT going to be compatible
307 with APCS reentrancy, since that requires too much support in the
308 assembler and linker, and the ARMASM assembler seems to lack some
309 required directives. */
311 warning ("Position independent code not supported. Ignored");
313 if (TARGET_APCS_FLOAT
)
314 warning ("Passing floating point arguments in fp regs not yet supported");
316 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
318 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
319 target_flags
|= ARM_FLAG_APCS_FRAME
;
322 /* Default is to tune for an FPA */
325 /* Default value for floating point code... if no co-processor
326 bus, then schedule for emulated floating point. Otherwise,
327 assume the user has an FPA.
328 Note: this does not prevent use of floating point instructions,
329 -msoft-float does that. */
330 if (tune_flags
& FL_CO_PROC
== 0)
333 arm_fast_multiply
= (flags
& FL_FAST_MULT
) != 0;
334 arm_arch4
= (flags
& FL_ARCH4
) != 0;
335 arm_thumb_aware
= (flags
& FL_THUMB
) != 0;
339 if (strcmp (target_fp_name
, "2") == 0)
340 arm_fpu_arch
= FP_SOFT2
;
341 else if (strcmp (target_fp_name
, "3") == 0)
342 arm_fpu_arch
= FP_HARD
;
344 fatal ("Invalid floating point emulation option: -mfpe=%s",
348 arm_fpu_arch
= FP_DEFAULT
;
350 if (TARGET_THUMB_INTERWORK
&& ! arm_thumb_aware
)
352 warning ("This processor variant does not support Thumb interworking");
353 target_flags
&= ~ARM_FLAG_THUMB
;
356 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
359 /* For arm2/3 there is no need to do any scheduling if there is only
360 a floating point emulator, or we are doing software floating-point. */
361 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && arm_cpu
== PROCESSOR_ARM2
)
362 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
364 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
368 /* Return 1 if it is possible to return using a single instruction */
375 if (!reload_completed
||current_function_pretend_args_size
376 || current_function_anonymous_args
377 || ((get_frame_size () + current_function_outgoing_args_size
!= 0)
378 && !(TARGET_APCS
|| frame_pointer_needed
)))
381 /* Can't be done if interworking with Thumb, and any registers have been
383 if (TARGET_THUMB_INTERWORK
)
384 for (regno
= 0; regno
< 16; regno
++)
385 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
388 /* Can't be done if any of the FPU regs are pushed, since this also
390 for (regno
= 16; regno
< 24; regno
++)
391 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
394 /* If a function is naked, don't use the "return" insn. */
395 if (arm_naked_function_p (current_function_decl
))
401 /* Return TRUE if int I is a valid immediate ARM constant. */
407 unsigned HOST_WIDE_INT mask
= ~0xFF;
409 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
410 be all zero, or all one. */
411 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff) != 0
412 && ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff)
413 != (((HOST_WIDE_INT
) -1) & ~(unsigned HOST_WIDE_INT
) 0xffffffff)))
416 /* Fast return for 0 and powers of 2 */
417 if ((i
& (i
- 1)) == 0)
422 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
425 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
426 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
427 } while (mask
!= ~0xFF);
432 /* Return true if I is a valid constant for the operation CODE. */
434 const_ok_for_op (i
, code
, mode
)
437 enum machine_mode mode
;
439 if (const_ok_for_arm (i
))
445 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
447 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
453 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
460 /* Emit a sequence of insns to handle a large constant.
461 CODE is the code of the operation required, it can be any of SET, PLUS,
462 IOR, AND, XOR, MINUS;
463 MODE is the mode in which the operation is being performed;
464 VAL is the integer to operate on;
465 SOURCE is the other operand (a register, or a null-pointer for SET);
466 SUBTARGETS means it is safe to create scratch registers if that will
467 either produce a simpler sequence, or we will want to cse the values.
468 Return value is the number of insns emitted. */
471 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
473 enum machine_mode mode
;
479 if (subtargets
|| code
== SET
480 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
481 && REGNO (target
) != REGNO (source
)))
485 if (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
486 > arm_constant_limit
+ (code
!= SET
))
490 /* Currently SET is the only monadic value for CODE, all
491 the rest are diadic. */
492 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (val
)));
497 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
499 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (val
)));
500 /* For MINUS, the value is subtracted from, since we never
501 have subtraction of a constant. */
503 emit_insn (gen_rtx_SET (VOIDmode
, target
,
504 gen_rtx (code
, mode
, temp
, source
)));
506 emit_insn (gen_rtx_SET (VOIDmode
, target
,
507 gen_rtx (code
, mode
, source
, temp
)));
513 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
516 /* As above, but extra parameter GENERATE which, if clear, suppresses
519 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
521 enum machine_mode mode
;
531 int can_negate_initial
= 0;
534 int num_bits_set
= 0;
535 int set_sign_bit_copies
= 0;
536 int clear_sign_bit_copies
= 0;
537 int clear_zero_bit_copies
= 0;
538 int set_zero_bit_copies
= 0;
541 unsigned HOST_WIDE_INT temp1
, temp2
;
542 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
544 /* find out which operations are safe for a given CODE. Also do a quick
545 check for degenerate cases; these can occur when DImode operations
557 can_negate_initial
= 1;
561 if (remainder
== 0xffffffff)
564 emit_insn (gen_rtx_SET (VOIDmode
, target
,
565 GEN_INT (ARM_SIGN_EXTEND (val
))));
570 if (reload_completed
&& rtx_equal_p (target
, source
))
573 emit_insn (gen_rtx_SET (VOIDmode
, target
, source
));
582 emit_insn (gen_rtx_SET (VOIDmode
, target
, const0_rtx
));
585 if (remainder
== 0xffffffff)
587 if (reload_completed
&& rtx_equal_p (target
, source
))
590 emit_insn (gen_rtx_SET (VOIDmode
, target
, source
));
599 if (reload_completed
&& rtx_equal_p (target
, source
))
602 emit_insn (gen_rtx_SET (VOIDmode
, target
, source
));
605 if (remainder
== 0xffffffff)
608 emit_insn (gen_rtx_SET (VOIDmode
, target
,
609 gen_rtx_NOT (mode
, source
)));
613 /* We don't know how to handle this yet below. */
617 /* We treat MINUS as (val - source), since (source - val) is always
618 passed as (source + (-val)). */
622 emit_insn (gen_rtx_SET (VOIDmode
, target
,
623 gen_rtx_NEG (mode
, source
)));
626 if (const_ok_for_arm (val
))
629 emit_insn (gen_rtx_SET (VOIDmode
, target
,
630 gen_rtx_MINUS (mode
, GEN_INT (val
),
642 /* If we can do it in one insn get out quickly */
643 if (const_ok_for_arm (val
)
644 || (can_negate_initial
&& const_ok_for_arm (-val
))
645 || (can_invert
&& const_ok_for_arm (~val
)))
648 emit_insn (gen_rtx_SET (VOIDmode
, target
,
649 (source
? gen_rtx (code
, mode
, source
,
656 /* Calculate a few attributes that may be useful for specific
659 for (i
= 31; i
>= 0; i
--)
661 if ((remainder
& (1 << i
)) == 0)
662 clear_sign_bit_copies
++;
667 for (i
= 31; i
>= 0; i
--)
669 if ((remainder
& (1 << i
)) != 0)
670 set_sign_bit_copies
++;
675 for (i
= 0; i
<= 31; i
++)
677 if ((remainder
& (1 << i
)) == 0)
678 clear_zero_bit_copies
++;
683 for (i
= 0; i
<= 31; i
++)
685 if ((remainder
& (1 << i
)) != 0)
686 set_zero_bit_copies
++;
694 /* See if we can do this by sign_extending a constant that is known
695 to be negative. This is a good, way of doing it, since the shift
696 may well merge into a subsequent insn. */
697 if (set_sign_bit_copies
> 1)
700 (temp1
= ARM_SIGN_EXTEND (remainder
701 << (set_sign_bit_copies
- 1))))
705 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
706 emit_insn (gen_rtx_SET (VOIDmode
, new_src
,
708 emit_insn (gen_ashrsi3 (target
, new_src
,
709 GEN_INT (set_sign_bit_copies
- 1)));
713 /* For an inverted constant, we will need to set the low bits,
714 these will be shifted out of harm's way. */
715 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
716 if (const_ok_for_arm (~temp1
))
720 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
721 emit_insn (gen_rtx_SET (VOIDmode
, new_src
,
723 emit_insn (gen_ashrsi3 (target
, new_src
,
724 GEN_INT (set_sign_bit_copies
- 1)));
730 /* See if we can generate this by setting the bottom (or the top)
731 16 bits, and then shifting these into the other half of the
732 word. We only look for the simplest cases, to do more would cost
733 too much. Be careful, however, not to generate this when the
734 alternative would take fewer insns. */
735 if (val
& 0xffff0000)
737 temp1
= remainder
& 0xffff0000;
738 temp2
= remainder
& 0x0000ffff;
740 /* Overlaps outside this range are best done using other methods. */
741 for (i
= 9; i
< 24; i
++)
743 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
744 && ! const_ok_for_arm (temp2
))
746 insns
= arm_gen_constant (code
, mode
, temp2
,
747 new_src
= (subtargets
750 source
, subtargets
, generate
);
753 emit_insn (gen_rtx_SET
756 gen_rtx_ASHIFT (mode
, source
,
763 /* Don't duplicate cases already considered. */
764 for (i
= 17; i
< 24; i
++)
766 if (((temp1
| (temp1
>> i
)) == remainder
)
767 && ! const_ok_for_arm (temp1
))
769 insns
= arm_gen_constant (code
, mode
, temp1
,
770 new_src
= (subtargets
773 source
, subtargets
, generate
);
777 (gen_rtx_SET (VOIDmode
, target
,
780 gen_rtx_LSHIFTRT (mode
, source
,
791 /* If we have IOR or XOR, and the constant can be loaded in a
792 single instruction, and we can find a temporary to put it in,
793 then this can be done in two instructions instead of 3-4. */
795 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
797 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
801 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
803 emit_insn (gen_rtx_SET (VOIDmode
, sub
, GEN_INT (val
)));
804 emit_insn (gen_rtx_SET (VOIDmode
, target
,
805 gen_rtx (code
, mode
, source
, sub
)));
814 if (set_sign_bit_copies
> 8
815 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
819 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
820 rtx shift
= GEN_INT (set_sign_bit_copies
);
822 emit_insn (gen_rtx_SET (VOIDmode
, sub
,
824 gen_rtx_ASHIFT (mode
,
827 emit_insn (gen_rtx_SET (VOIDmode
, target
,
829 gen_rtx_LSHIFTRT (mode
, sub
,
835 if (set_zero_bit_copies
> 8
836 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
840 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
841 rtx shift
= GEN_INT (set_zero_bit_copies
);
843 emit_insn (gen_rtx_SET (VOIDmode
, sub
,
845 gen_rtx_LSHIFTRT (mode
,
848 emit_insn (gen_rtx_SET (VOIDmode
, target
,
850 gen_rtx_ASHIFT (mode
, sub
,
856 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
860 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
861 emit_insn (gen_rtx_SET (VOIDmode
, sub
,
862 gen_rtx_NOT (mode
, source
)));
865 sub
= gen_reg_rtx (mode
);
866 emit_insn (gen_rtx_SET (VOIDmode
, sub
,
867 gen_rtx_AND (mode
, source
,
869 emit_insn (gen_rtx_SET (VOIDmode
, target
,
870 gen_rtx_NOT (mode
, sub
)));
877 /* See if two shifts will do 2 or more insn's worth of work. */
878 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
880 HOST_WIDE_INT shift_mask
= ((0xffffffff
881 << (32 - clear_sign_bit_copies
))
886 if ((remainder
| shift_mask
) != 0xffffffff)
890 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
891 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
892 new_source
, source
, subtargets
, 1);
896 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
897 new_source
, source
, subtargets
, 0);
902 shift
= GEN_INT (clear_sign_bit_copies
);
903 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
904 emit_insn (gen_ashlsi3 (new_source
, source
, shift
));
905 emit_insn (gen_lshrsi3 (target
, new_source
, shift
));
911 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
913 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
917 if ((remainder
| shift_mask
) != 0xffffffff)
921 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
922 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
923 new_source
, source
, subtargets
, 1);
927 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
928 new_source
, source
, subtargets
, 0);
933 shift
= GEN_INT (clear_zero_bit_copies
);
934 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
935 emit_insn (gen_lshrsi3 (new_source
, source
, shift
));
936 emit_insn (gen_ashlsi3 (target
, new_source
, shift
));
948 for (i
= 0; i
< 32; i
++)
949 if (remainder
& (1 << i
))
952 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
953 remainder
= (~remainder
) & 0xffffffff;
954 else if (code
== PLUS
&& num_bits_set
> 16)
955 remainder
= (-remainder
) & 0xffffffff;
962 /* Now try and find a way of doing the job in either two or three
964 We start by looking for the largest block of zeros that are aligned on
965 a 2-bit boundary, we then fill up the temps, wrapping around to the
966 top of the word when we drop off the bottom.
967 In the worst case this code should produce no more than four insns. */
970 int best_consecutive_zeros
= 0;
972 for (i
= 0; i
< 32; i
+= 2)
974 int consecutive_zeros
= 0;
976 if (! (remainder
& (3 << i
)))
978 while ((i
< 32) && ! (remainder
& (3 << i
)))
980 consecutive_zeros
+= 2;
983 if (consecutive_zeros
> best_consecutive_zeros
)
985 best_consecutive_zeros
= consecutive_zeros
;
986 best_start
= i
- consecutive_zeros
;
992 /* Now start emitting the insns, starting with the one with the highest
993 bit set: we do this so that the smallest number will be emitted last;
994 this is more likely to be combinable with addressing insns. */
1002 if (remainder
& (3 << (i
- 2)))
1007 temp1
= remainder
& ((0x0ff << end
)
1008 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
1009 remainder
&= ~temp1
;
1014 emit_insn (gen_rtx_SET (VOIDmode
,
1015 new_src
= (subtargets
1016 ? gen_reg_rtx (mode
)
1019 ? ~temp1
: temp1
)));
1023 else if (code
== MINUS
)
1026 emit_insn (gen_rtx_SET (VOIDmode
,
1027 new_src
= (subtargets
1028 ? gen_reg_rtx (mode
)
1030 gen_rtx (code
, mode
, GEN_INT (temp1
),
1037 emit_insn (gen_rtx_SET (VOIDmode
,
1038 new_src
= (remainder
1040 ? gen_reg_rtx (mode
)
1043 gen_rtx (code
, mode
, source
,
1044 GEN_INT (can_invert
? ~temp1
1055 } while (remainder
);
1060 /* Canonicalize a comparison so that we are more likely to recognize it.
1061 This can be done for a few constant compares, where we can make the
1062 immediate value easier to load. */
1064 arm_canonicalize_comparison (code
, op1
)
1068 HOST_WIDE_INT i
= INTVAL (*op1
);
1078 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1) - 1)
1079 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1081 *op1
= GEN_INT (i
+1);
1082 return code
== GT
? GE
: LT
;
1088 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1))
1089 && (const_ok_for_arm (i
-1) || const_ok_for_arm (- (i
-1))))
1091 *op1
= GEN_INT (i
-1);
1092 return code
== GE
? GT
: LE
;
1099 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1101 *op1
= GEN_INT (i
+ 1);
1102 return code
== GTU
? GEU
: LTU
;
1109 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (- (i
- 1))))
1111 *op1
= GEN_INT (i
- 1);
1112 return code
== GEU
? GTU
: LEU
;
1124 /* Handle aggregates that are not laid out in a BLKmode element.
1125 This is a sub-element of RETURN_IN_MEMORY. */
1127 arm_return_in_memory (type
)
1130 if (TREE_CODE (type
) == RECORD_TYPE
)
1134 /* For a struct, we can return in a register if every element was a
1136 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1137 if (TREE_CODE (field
) != FIELD_DECL
1138 || ! DECL_BIT_FIELD_TYPE (field
))
1143 else if (TREE_CODE (type
) == UNION_TYPE
)
1147 /* Unions can be returned in registers if every element is
1148 integral, or can be returned in an integer register. */
1149 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1151 if (TREE_CODE (field
) != FIELD_DECL
1152 || (AGGREGATE_TYPE_P (TREE_TYPE (field
))
1153 && RETURN_IN_MEMORY (TREE_TYPE (field
)))
1154 || FLOAT_TYPE_P (TREE_TYPE (field
)))
1159 /* XXX Not sure what should be done for other aggregates, so put them in
1165 legitimate_pic_operand_p (x
)
1168 if (CONSTANT_P (x
) && flag_pic
1169 && (GET_CODE (x
) == SYMBOL_REF
1170 || (GET_CODE (x
) == CONST
1171 && GET_CODE (XEXP (x
, 0)) == PLUS
1172 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)))
1179 legitimize_pic_address (orig
, mode
, reg
)
1181 enum machine_mode mode
;
1184 if (GET_CODE (orig
) == SYMBOL_REF
)
1186 rtx pic_ref
, address
;
1192 if (reload_in_progress
|| reload_completed
)
1195 reg
= gen_reg_rtx (Pmode
);
1200 #ifdef AOF_ASSEMBLER
1201 /* The AOF assembler can generate relocations for these directly, and
1202 understands that the PIC register has to be added into the offset.
1204 insn
= emit_insn (gen_pic_load_addr_based (reg
, orig
));
1207 address
= gen_reg_rtx (Pmode
);
1211 emit_insn (gen_pic_load_addr (address
, orig
));
1213 pic_ref
= gen_rtx_MEM (Pmode
,
1214 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
1216 RTX_UNCHANGING_P (pic_ref
) = 1;
1217 insn
= emit_move_insn (reg
, pic_ref
);
1219 current_function_uses_pic_offset_table
= 1;
1220 /* Put a REG_EQUAL note on this insn, so that it can be optimized
1222 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, orig
,
1226 else if (GET_CODE (orig
) == CONST
)
1230 if (GET_CODE (XEXP (orig
, 0)) == PLUS
1231 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
1236 if (reload_in_progress
|| reload_completed
)
1239 reg
= gen_reg_rtx (Pmode
);
1242 if (GET_CODE (XEXP (orig
, 0)) == PLUS
)
1244 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
1245 offset
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
1246 base
== reg
? 0 : reg
);
1251 if (GET_CODE (offset
) == CONST_INT
)
1253 /* The base register doesn't really matter, we only want to
1254 test the index for the appropriate mode. */
1255 GO_IF_LEGITIMATE_INDEX (mode
, 0, offset
, win
);
1257 if (! reload_in_progress
&& ! reload_completed
)
1258 offset
= force_reg (Pmode
, offset
);
1263 if (GET_CODE (offset
) == CONST_INT
)
1264 return plus_constant_for_output (base
, INTVAL (offset
));
1267 if (GET_MODE_SIZE (mode
) > 4
1268 && (GET_MODE_CLASS (mode
) == MODE_INT
1269 || TARGET_SOFT_FLOAT
))
1271 emit_insn (gen_addsi3 (reg
, base
, offset
));
1275 return gen_rtx_PLUS (Pmode
, base
, offset
);
1277 else if (GET_CODE (orig
) == LABEL_REF
)
1278 current_function_uses_pic_offset_table
= 1;
1297 #ifndef AOF_ASSEMBLER
1298 rtx l1
, pic_tmp
, pic_tmp2
, seq
;
1299 rtx global_offset_table
;
1301 if (current_function_uses_pic_offset_table
== 0)
1308 l1
= gen_label_rtx ();
1310 global_offset_table
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
1311 /* The PC contains 'dot'+8, but the label L1 is on the next
1312 instruction, so the offset is only 'dot'+4. */
1313 pic_tmp
= plus_constant (gen_rtx_LABEL_REF (Pmode
, l1
),
1315 pic_tmp2
= gen_rtx_CONST (VOIDmode
,
1316 gen_rtx_PLUS (Pmode
, global_offset_table
, pc_rtx
));
1318 pic_rtx
= gen_rtx_CONST (Pmode
, gen_rtx_MINUS (Pmode
, pic_tmp2
, pic_tmp
));
1320 emit_insn (gen_pic_load_addr (pic_offset_table_rtx
, pic_rtx
));
1321 emit_jump_insn (gen_pic_add_dot_plus_eight(l1
, pic_offset_table_rtx
));
1324 seq
= gen_sequence ();
1326 emit_insn_after (seq
, get_insns ());
1328 /* Need to emit this whether or not we obey regdecls,
1329 since setjmp/longjmp can cause life info to screw up. */
1330 emit_insn (gen_rtx_USE (VOIDmode
, pic_offset_table_rtx
));
1331 #endif /* AOF_ASSEMBLER */
1334 #define REG_OR_SUBREG_REG(X) \
1335 (GET_CODE (X) == REG \
1336 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1338 #define REG_OR_SUBREG_RTX(X) \
1339 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1341 #define ARM_FRAME_RTX(X) \
1342 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1343 || (X) == arg_pointer_rtx)
1346 arm_rtx_costs (x
, code
, outer_code
)
1348 enum rtx_code code
, outer_code
;
1350 enum machine_mode mode
= GET_MODE (x
);
1351 enum rtx_code subcode
;
1357 /* Memory costs quite a lot for the first word, but subsequent words
1358 load at the equivalent of a single insn each. */
1359 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1360 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1367 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1374 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1376 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1377 + ((GET_CODE (XEXP (x
, 0)) == REG
1378 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1379 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1381 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1382 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1383 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1385 + ((GET_CODE (XEXP (x
, 1)) == REG
1386 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1387 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1388 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1393 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1394 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1395 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1396 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1399 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1400 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1401 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1402 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1404 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1405 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1406 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1409 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1410 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1411 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1412 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1413 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1414 || subcode
== ROTATE
|| subcode
== ROTATERT
1416 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1417 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1418 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1419 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1420 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1421 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1422 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1427 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1428 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1429 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1430 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1431 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1435 case AND
: case XOR
: case IOR
:
1438 /* Normally the frame registers will be spilt into reg+const during
1439 reload, so it is a bad idea to combine them with other instructions,
1440 since then they might not be moved outside of loops. As a compromise
1441 we allow integration with ops that have a constant as their second
1443 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1444 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1445 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1446 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1447 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1451 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1452 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1453 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1454 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1457 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1458 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1459 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1460 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1461 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1464 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1465 return (1 + extra_cost
1466 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1467 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1468 || subcode
== ROTATE
|| subcode
== ROTATERT
1470 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1471 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1472 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0))
1473 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1474 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1475 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
1481 /* There is no point basing this on the tuning, since it is always the
1482 fast variant if it exists at all */
1483 if (arm_fast_multiply
&& mode
== DImode
1484 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1485 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1486 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1489 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1493 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1495 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1496 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1497 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1499 /* Tune as appropriate */
1500 int booth_unit_size
= ((tune_flags
& FL_FAST_MULT
) ? 8 : 2);
1502 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1504 i
>>= booth_unit_size
;
1511 return (((tune_flags
& FL_FAST_MULT
) ? 8 : 30)
1512 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1513 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1516 if (arm_fast_multiply
&& mode
== SImode
1517 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
1518 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1519 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0))
1520 == GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1521 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
1522 || GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
))
1527 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1528 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1532 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1534 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1537 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1545 return 4 + (mode
== DImode
? 4 : 0);
1548 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1549 return (4 + (mode
== DImode
? 4 : 0)
1550 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1553 switch (GET_MODE (XEXP (x
, 0)))
1556 return (1 + (mode
== DImode
? 4 : 0)
1557 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1560 return (4 + (mode
== DImode
? 4 : 0)
1561 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1564 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1574 arm_adjust_cost (insn
, link
, dep
, cost
)
1582 if ((i_pat
= single_set (insn
)) != NULL
1583 && GET_CODE (SET_SRC (i_pat
)) == MEM
1584 && (d_pat
= single_set (dep
)) != NULL
1585 && GET_CODE (SET_DEST (d_pat
)) == MEM
)
1587 /* This is a load after a store, there is no conflict if the load reads
1588 from a cached area. Assume that loads from the stack, and from the
1589 constant pool are cached, and that others will miss. This is a
1592 /* debug_rtx (insn);
1595 fprintf (stderr, "costs %d\n", cost); */
1597 if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat
), 0))
1598 || reg_mentioned_p (stack_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1599 || reg_mentioned_p (frame_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1600 || reg_mentioned_p (hard_frame_pointer_rtx
,
1601 XEXP (SET_SRC (i_pat
), 0)))
1603 /* fprintf (stderr, "***** Now 1\n"); */
1611 /* This code has been fixed for cross compilation. */
1613 static int fpa_consts_inited
= 0;
1615 char *strings_fpa
[8] = {
1617 "4", "5", "0.5", "10"
1620 static REAL_VALUE_TYPE values_fpa
[8];
1628 for (i
= 0; i
< 8; i
++)
1630 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1634 fpa_consts_inited
= 1;
1637 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1640 const_double_rtx_ok_for_fpu (x
)
1646 if (!fpa_consts_inited
)
1649 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1650 if (REAL_VALUE_MINUS_ZERO (r
))
1653 for (i
= 0; i
< 8; i
++)
1654 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1660 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1663 neg_const_double_rtx_ok_for_fpu (x
)
1669 if (!fpa_consts_inited
)
1672 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1673 r
= REAL_VALUE_NEGATE (r
);
1674 if (REAL_VALUE_MINUS_ZERO (r
))
1677 for (i
= 0; i
< 8; i
++)
1678 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1684 /* Predicates for `match_operand' and `match_operator'. */
1686 /* s_register_operand is the same as register_operand, but it doesn't accept
1689 This function exists because at the time it was put in it led to better
1690 code. SUBREG(MEM) always needs a reload in the places where
1691 s_register_operand is used, and this seemed to lead to excessive
1695 s_register_operand (op
, mode
)
1697 enum machine_mode mode
;
1699 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1702 if (GET_CODE (op
) == SUBREG
)
1703 op
= SUBREG_REG (op
);
1705 /* We don't consider registers whose class is NO_REGS
1706 to be a register operand. */
1707 return (GET_CODE (op
) == REG
1708 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1709 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1712 /* Only accept reg, subreg(reg), const_int. */
1715 reg_or_int_operand (op
, mode
)
1717 enum machine_mode mode
;
1719 if (GET_CODE (op
) == CONST_INT
)
1722 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1725 if (GET_CODE (op
) == SUBREG
)
1726 op
= SUBREG_REG (op
);
1728 /* We don't consider registers whose class is NO_REGS
1729 to be a register operand. */
1730 return (GET_CODE (op
) == REG
1731 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1732 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1735 /* Return 1 if OP is an item in memory, given that we are in reload. */
1738 reload_memory_operand (op
, mode
)
1740 enum machine_mode mode
;
1742 int regno
= true_regnum (op
);
1744 return (! CONSTANT_P (op
)
1746 || (GET_CODE (op
) == REG
1747 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
1750 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
1753 arm_rhs_operand (op
, mode
)
1755 enum machine_mode mode
;
1757 return (s_register_operand (op
, mode
)
1758 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
1761 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
1765 arm_rhsm_operand (op
, mode
)
1767 enum machine_mode mode
;
1769 return (s_register_operand (op
, mode
)
1770 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
1771 || memory_operand (op
, mode
));
1774 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
1775 constant that is valid when negated. */
1778 arm_add_operand (op
, mode
)
1780 enum machine_mode mode
;
1782 return (s_register_operand (op
, mode
)
1783 || (GET_CODE (op
) == CONST_INT
1784 && (const_ok_for_arm (INTVAL (op
))
1785 || const_ok_for_arm (-INTVAL (op
)))));
1789 arm_not_operand (op
, mode
)
1791 enum machine_mode mode
;
1793 return (s_register_operand (op
, mode
)
1794 || (GET_CODE (op
) == CONST_INT
1795 && (const_ok_for_arm (INTVAL (op
))
1796 || const_ok_for_arm (~INTVAL (op
)))));
1799 /* Return TRUE if the operand is a memory reference which contains an
1800 offsettable address. */
1802 offsettable_memory_operand (op
, mode
)
1804 enum machine_mode mode
;
1806 if (mode
== VOIDmode
)
1807 mode
= GET_MODE (op
);
1809 return (mode
== GET_MODE (op
)
1810 && GET_CODE (op
) == MEM
1811 && offsettable_address_p (reload_completed
| reload_in_progress
,
1812 mode
, XEXP (op
, 0)));
1815 /* Return TRUE if the operand is a memory reference which is, or can be
1816 made word aligned by adjusting the offset. */
1818 alignable_memory_operand (op
, mode
)
1820 enum machine_mode mode
;
1824 if (mode
== VOIDmode
)
1825 mode
= GET_MODE (op
);
1827 if (mode
!= GET_MODE (op
) || GET_CODE (op
) != MEM
)
1832 return ((GET_CODE (reg
= op
) == REG
1833 || (GET_CODE (op
) == SUBREG
1834 && GET_CODE (reg
= SUBREG_REG (op
)) == REG
)
1835 || (GET_CODE (op
) == PLUS
1836 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1837 && (GET_CODE (reg
= XEXP (op
, 0)) == REG
1838 || (GET_CODE (XEXP (op
, 0)) == SUBREG
1839 && GET_CODE (reg
= SUBREG_REG (XEXP (op
, 0))) == REG
))))
1840 && REGNO_POINTER_ALIGN (REGNO (reg
)) >= 4);
1843 /* Similar to s_register_operand, but does not allow hard integer
1846 f_register_operand (op
, mode
)
1848 enum machine_mode mode
;
1850 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1853 if (GET_CODE (op
) == SUBREG
)
1854 op
= SUBREG_REG (op
);
1856 /* We don't consider registers whose class is NO_REGS
1857 to be a register operand. */
1858 return (GET_CODE (op
) == REG
1859 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1860 || REGNO_REG_CLASS (REGNO (op
)) == FPU_REGS
));
1863 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
1866 fpu_rhs_operand (op
, mode
)
1868 enum machine_mode mode
;
1870 if (s_register_operand (op
, mode
))
1872 else if (GET_CODE (op
) == CONST_DOUBLE
)
1873 return (const_double_rtx_ok_for_fpu (op
));
1879 fpu_add_operand (op
, mode
)
1881 enum machine_mode mode
;
1883 if (s_register_operand (op
, mode
))
1885 else if (GET_CODE (op
) == CONST_DOUBLE
)
1886 return (const_double_rtx_ok_for_fpu (op
)
1887 || neg_const_double_rtx_ok_for_fpu (op
));
1892 /* Return nonzero if OP is a constant power of two. */
1895 power_of_two_operand (op
, mode
)
1897 enum machine_mode mode
;
1899 if (GET_CODE (op
) == CONST_INT
)
1901 HOST_WIDE_INT value
= INTVAL(op
);
1902 return value
!= 0 && (value
& (value
- 1)) == 0;
1907 /* Return TRUE for a valid operand of a DImode operation.
1908 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1909 Note that this disallows MEM(REG+REG), but allows
1910 MEM(PRE/POST_INC/DEC(REG)). */
1913 di_operand (op
, mode
)
1915 enum machine_mode mode
;
1917 if (s_register_operand (op
, mode
))
1920 switch (GET_CODE (op
))
1927 return memory_address_p (DImode
, XEXP (op
, 0));
1934 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
1935 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1936 Note that this disallows MEM(REG+REG), but allows
1937 MEM(PRE/POST_INC/DEC(REG)). */
1940 soft_df_operand (op
, mode
)
1942 enum machine_mode mode
;
1944 if (s_register_operand (op
, mode
))
1947 switch (GET_CODE (op
))
1953 return memory_address_p (DFmode
, XEXP (op
, 0));
1960 /* Return TRUE for valid index operands. */
1963 index_operand (op
, mode
)
1965 enum machine_mode mode
;
1967 return (s_register_operand(op
, mode
)
1968 || (immediate_operand (op
, mode
)
1969 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
1972 /* Return TRUE for valid shifts by a constant. This also accepts any
1973 power of two on the (somewhat overly relaxed) assumption that the
1974 shift operator in this case was a mult. */
1977 const_shift_operand (op
, mode
)
1979 enum machine_mode mode
;
1981 return (power_of_two_operand (op
, mode
)
1982 || (immediate_operand (op
, mode
)
1983 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
1986 /* Return TRUE for arithmetic operators which can be combined with a multiply
1990 shiftable_operator (x
, mode
)
1992 enum machine_mode mode
;
1994 if (GET_MODE (x
) != mode
)
1998 enum rtx_code code
= GET_CODE (x
);
2000 return (code
== PLUS
|| code
== MINUS
2001 || code
== IOR
|| code
== XOR
|| code
== AND
);
2005 /* Return TRUE for shift operators. */
2008 shift_operator (x
, mode
)
2010 enum machine_mode mode
;
2012 if (GET_MODE (x
) != mode
)
2016 enum rtx_code code
= GET_CODE (x
);
2019 return power_of_two_operand (XEXP (x
, 1));
2021 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
2022 || code
== ROTATERT
);
2026 int equality_operator (x
, mode
)
2028 enum machine_mode mode
;
2030 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
2033 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
2036 minmax_operator (x
, mode
)
2038 enum machine_mode mode
;
2040 enum rtx_code code
= GET_CODE (x
);
2042 if (GET_MODE (x
) != mode
)
2045 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
2048 /* return TRUE if x is EQ or NE */
2050 /* Return TRUE if this is the condition code register, if we aren't given
2051 a mode, accept any class CCmode register */
2054 cc_register (x
, mode
)
2056 enum machine_mode mode
;
2058 if (mode
== VOIDmode
)
2060 mode
= GET_MODE (x
);
2061 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2065 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2071 /* Return TRUE if this is the condition code register, if we aren't given
2072 a mode, accept any class CCmode register which indicates a dominance
2076 dominant_cc_register (x
, mode
)
2078 enum machine_mode mode
;
2080 if (mode
== VOIDmode
)
2082 mode
= GET_MODE (x
);
2083 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2087 if (mode
!= CC_DNEmode
&& mode
!= CC_DEQmode
2088 && mode
!= CC_DLEmode
&& mode
!= CC_DLTmode
2089 && mode
!= CC_DGEmode
&& mode
!= CC_DGTmode
2090 && mode
!= CC_DLEUmode
&& mode
!= CC_DLTUmode
2091 && mode
!= CC_DGEUmode
&& mode
!= CC_DGTUmode
)
2094 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2100 /* Return TRUE if X references a SYMBOL_REF. */
2102 symbol_mentioned_p (x
)
2108 if (GET_CODE (x
) == SYMBOL_REF
)
2111 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2112 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2118 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2119 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
2122 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
2129 /* Return TRUE if X references a LABEL_REF. */
2131 label_mentioned_p (x
)
2137 if (GET_CODE (x
) == LABEL_REF
)
2140 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2141 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2147 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2148 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
2151 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
2162 enum rtx_code code
= GET_CODE (x
);
2166 else if (code
== SMIN
)
2168 else if (code
== UMIN
)
2170 else if (code
== UMAX
)
2176 /* Return 1 if memory locations are adjacent */
2179 adjacent_mem_locations (a
, b
)
2182 int val0
= 0, val1
= 0;
2185 if ((GET_CODE (XEXP (a
, 0)) == REG
2186 || (GET_CODE (XEXP (a
, 0)) == PLUS
2187 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
2188 && (GET_CODE (XEXP (b
, 0)) == REG
2189 || (GET_CODE (XEXP (b
, 0)) == PLUS
2190 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
2192 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
2194 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
2195 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
2198 reg0
= REGNO (XEXP (a
, 0));
2199 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
2201 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
2202 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
2205 reg1
= REGNO (XEXP (b
, 0));
2206 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
2211 /* Return 1 if OP is a load multiple operation. It is known to be
2212 parallel and the first section will be tested. */
2215 load_multiple_operation (op
, mode
)
2217 enum machine_mode mode
;
2219 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2222 HOST_WIDE_INT i
= 1, base
= 0;
2226 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2229 /* Check to see if this might be a write-back */
2230 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2235 /* Now check it more carefully */
2236 if (GET_CODE (SET_DEST (elt
)) != REG
2237 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2238 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2239 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2240 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2241 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2242 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2243 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2244 != REGNO (SET_DEST (elt
)))
2250 /* Perform a quick check so we don't blow up below. */
2252 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2253 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
2254 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
2257 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
2258 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
2260 for (; i
< count
; i
++)
2262 rtx elt
= XVECEXP (op
, 0, i
);
2264 if (GET_CODE (elt
) != SET
2265 || GET_CODE (SET_DEST (elt
)) != REG
2266 || GET_MODE (SET_DEST (elt
)) != SImode
2267 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
2268 || GET_CODE (SET_SRC (elt
)) != MEM
2269 || GET_MODE (SET_SRC (elt
)) != SImode
2270 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
2271 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
2272 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
2273 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
2280 /* Return 1 if OP is a store multiple operation. It is known to be
2281 parallel and the first section will be tested. */
2284 store_multiple_operation (op
, mode
)
2286 enum machine_mode mode
;
2288 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2291 HOST_WIDE_INT i
= 1, base
= 0;
2295 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2298 /* Check to see if this might be a write-back */
2299 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2304 /* Now check it more carefully */
2305 if (GET_CODE (SET_DEST (elt
)) != REG
2306 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2307 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2308 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2309 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2310 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2311 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2312 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2313 != REGNO (SET_DEST (elt
)))
2319 /* Perform a quick check so we don't blow up below. */
2321 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2322 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
2323 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
2326 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
2327 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
2329 for (; i
< count
; i
++)
2331 elt
= XVECEXP (op
, 0, i
);
2333 if (GET_CODE (elt
) != SET
2334 || GET_CODE (SET_SRC (elt
)) != REG
2335 || GET_MODE (SET_SRC (elt
)) != SImode
2336 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
2337 || GET_CODE (SET_DEST (elt
)) != MEM
2338 || GET_MODE (SET_DEST (elt
)) != SImode
2339 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
2340 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
2341 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
2342 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
2350 load_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2355 HOST_WIDE_INT
*load_offset
;
2357 int unsorted_regs
[4];
2358 HOST_WIDE_INT unsorted_offsets
[4];
2363 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2364 extended if required. */
2365 if (nops
< 2 || nops
> 4)
2368 /* Loop over the operands and check that the memory references are
2369 suitable (ie immediate offsets from the same base register). At
2370 the same time, extract the target register, and the memory
2372 for (i
= 0; i
< nops
; i
++)
2377 /* Convert a subreg of a mem into the mem itself. */
2378 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2379 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2381 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2384 /* Don't reorder volatile memory references; it doesn't seem worth
2385 looking for the case where the order is ok anyway. */
2386 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2389 offset
= const0_rtx
;
2391 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2392 || (GET_CODE (reg
) == SUBREG
2393 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2394 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2395 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2397 || (GET_CODE (reg
) == SUBREG
2398 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2399 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2404 base_reg
= REGNO(reg
);
2405 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2406 ? REGNO (operands
[i
])
2407 : REGNO (SUBREG_REG (operands
[i
])));
2412 if (base_reg
!= REGNO (reg
))
2413 /* Not addressed from the same base register. */
2416 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2417 ? REGNO (operands
[i
])
2418 : REGNO (SUBREG_REG (operands
[i
])));
2419 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2423 /* If it isn't an integer register, or if it overwrites the
2424 base register but isn't the last insn in the list, then
2425 we can't do this. */
2426 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
2427 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
2430 unsorted_offsets
[i
] = INTVAL (offset
);
2433 /* Not a suitable memory address. */
2437 /* All the useful information has now been extracted from the
2438 operands into unsorted_regs and unsorted_offsets; additionally,
2439 order[0] has been set to the lowest numbered register in the
2440 list. Sort the registers into order, and check that the memory
2441 offsets are ascending and adjacent. */
2443 for (i
= 1; i
< nops
; i
++)
2447 order
[i
] = order
[i
- 1];
2448 for (j
= 0; j
< nops
; j
++)
2449 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2450 && (order
[i
] == order
[i
- 1]
2451 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2454 /* Have we found a suitable register? if not, one must be used more
2456 if (order
[i
] == order
[i
- 1])
2459 /* Is the memory address adjacent and ascending? */
2460 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2468 for (i
= 0; i
< nops
; i
++)
2469 regs
[i
] = unsorted_regs
[order
[i
]];
2471 *load_offset
= unsorted_offsets
[order
[0]];
2474 if (unsorted_offsets
[order
[0]] == 0)
2475 return 1; /* ldmia */
2477 if (unsorted_offsets
[order
[0]] == 4)
2478 return 2; /* ldmib */
2480 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2481 return 3; /* ldmda */
2483 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2484 return 4; /* ldmdb */
2486 /* Can't do it without setting up the offset, only do this if it takes
2487 no more than one insn. */
2488 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
2489 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
2493 emit_ldm_seq (operands
, nops
)
2499 HOST_WIDE_INT offset
;
2503 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2506 strcpy (buf
, "ldm%?ia\t");
2510 strcpy (buf
, "ldm%?ib\t");
2514 strcpy (buf
, "ldm%?da\t");
2518 strcpy (buf
, "ldm%?db\t");
2523 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2524 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2527 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2528 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2530 output_asm_insn (buf
, operands
);
2532 strcpy (buf
, "ldm%?ia\t");
2539 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2540 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2542 for (i
= 1; i
< nops
; i
++)
2543 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2544 reg_names
[regs
[i
]]);
2546 strcat (buf
, "}\t%@ phole ldm");
2548 output_asm_insn (buf
, operands
);
2553 store_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2558 HOST_WIDE_INT
*load_offset
;
2560 int unsorted_regs
[4];
2561 HOST_WIDE_INT unsorted_offsets
[4];
2566 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2567 extended if required. */
2568 if (nops
< 2 || nops
> 4)
2571 /* Loop over the operands and check that the memory references are
2572 suitable (ie immediate offsets from the same base register). At
2573 the same time, extract the target register, and the memory
2575 for (i
= 0; i
< nops
; i
++)
2580 /* Convert a subreg of a mem into the mem itself. */
2581 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2582 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2584 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2587 /* Don't reorder volatile memory references; it doesn't seem worth
2588 looking for the case where the order is ok anyway. */
2589 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2592 offset
= const0_rtx
;
2594 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2595 || (GET_CODE (reg
) == SUBREG
2596 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2597 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2598 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2600 || (GET_CODE (reg
) == SUBREG
2601 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2602 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2607 base_reg
= REGNO(reg
);
2608 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2609 ? REGNO (operands
[i
])
2610 : REGNO (SUBREG_REG (operands
[i
])));
2615 if (base_reg
!= REGNO (reg
))
2616 /* Not addressed from the same base register. */
2619 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2620 ? REGNO (operands
[i
])
2621 : REGNO (SUBREG_REG (operands
[i
])));
2622 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2626 /* If it isn't an integer register, then we can't do this. */
2627 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
2630 unsorted_offsets
[i
] = INTVAL (offset
);
2633 /* Not a suitable memory address. */
2637 /* All the useful information has now been extracted from the
2638 operands into unsorted_regs and unsorted_offsets; additionally,
2639 order[0] has been set to the lowest numbered register in the
2640 list. Sort the registers into order, and check that the memory
2641 offsets are ascending and adjacent. */
2643 for (i
= 1; i
< nops
; i
++)
2647 order
[i
] = order
[i
- 1];
2648 for (j
= 0; j
< nops
; j
++)
2649 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2650 && (order
[i
] == order
[i
- 1]
2651 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2654 /* Have we found a suitable register? if not, one must be used more
2656 if (order
[i
] == order
[i
- 1])
2659 /* Is the memory address adjacent and ascending? */
2660 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2668 for (i
= 0; i
< nops
; i
++)
2669 regs
[i
] = unsorted_regs
[order
[i
]];
2671 *load_offset
= unsorted_offsets
[order
[0]];
2674 if (unsorted_offsets
[order
[0]] == 0)
2675 return 1; /* stmia */
2677 if (unsorted_offsets
[order
[0]] == 4)
2678 return 2; /* stmib */
2680 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2681 return 3; /* stmda */
2683 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2684 return 4; /* stmdb */
2690 emit_stm_seq (operands
, nops
)
2696 HOST_WIDE_INT offset
;
2700 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2703 strcpy (buf
, "stm%?ia\t");
2707 strcpy (buf
, "stm%?ib\t");
2711 strcpy (buf
, "stm%?da\t");
2715 strcpy (buf
, "stm%?db\t");
2722 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2723 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2725 for (i
= 1; i
< nops
; i
++)
2726 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2727 reg_names
[regs
[i
]]);
2729 strcat (buf
, "}\t%@ phole stm");
2731 output_asm_insn (buf
, operands
);
2736 multi_register_push (op
, mode
)
2738 enum machine_mode mode
;
2740 if (GET_CODE (op
) != PARALLEL
2741 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2742 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
2743 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
2750 /* Routines for use with attributes */
2752 /* Return nonzero if ATTR is a valid attribute for DECL.
2753 ATTRIBUTES are any existing attributes and ARGS are the arguments
2756 Supported attributes:
2758 naked: don't output any prologue or epilogue code, the user is assumed
2759 to do the right thing. */
2762 arm_valid_machine_decl_attribute (decl
, attributes
, attr
, args
)
2768 if (args
!= NULL_TREE
)
2771 if (is_attribute_p ("naked", attr
))
2772 return TREE_CODE (decl
) == FUNCTION_DECL
;
2776 /* Return non-zero if FUNC is a naked function. */
2779 arm_naked_function_p (func
)
2784 if (TREE_CODE (func
) != FUNCTION_DECL
)
2787 a
= lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func
));
2788 return a
!= NULL_TREE
;
2791 /* Routines for use in generating RTL */
2794 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
, unchanging_p
,
2806 int sign
= up
? 1 : -1;
2809 result
= gen_rtx_PARALLEL (VOIDmode
,
2810 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2813 XVECEXP (result
, 0, 0)
2814 = gen_rtx_SET (GET_MODE (from
), from
,
2815 plus_constant (from
, count
* 4 * sign
));
2820 for (j
= 0; i
< count
; i
++, j
++)
2822 mem
= gen_rtx_MEM (SImode
, plus_constant (from
, j
* 4 * sign
));
2823 RTX_UNCHANGING_P (mem
) = unchanging_p
;
2824 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
2826 XVECEXP (result
, 0, i
)
2827 = gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, base_regno
+ j
), mem
);
2831 XVECEXP (result
, 0, i
) = gen_rtx_CLOBBER (SImode
, from
);
2837 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
, unchanging_p
,
2849 int sign
= up
? 1 : -1;
2852 result
= gen_rtx_PARALLEL (VOIDmode
,
2853 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2856 XVECEXP (result
, 0, 0)
2857 = gen_rtx_SET (GET_MODE (to
), to
,
2858 plus_constant (to
, count
* 4 * sign
));
2863 for (j
= 0; i
< count
; i
++, j
++)
2865 mem
= gen_rtx_MEM (SImode
, plus_constant (to
, j
* 4 * sign
));
2866 RTX_UNCHANGING_P (mem
) = unchanging_p
;
2867 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
2869 XVECEXP (result
, 0, i
)
2870 = gen_rtx_SET (VOIDmode
, mem
, gen_rtx_REG (SImode
, base_regno
+ j
));
2874 XVECEXP (result
, 0, i
) = gen_rtx_CLOBBER (SImode
, to
);
2880 arm_gen_movstrqi (operands
)
2883 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
2886 rtx st_src
, st_dst
, end_src
, end_dst
, fin_src
, fin_dst
;
2887 rtx part_bytes_reg
= NULL
;
2889 int dst_unchanging_p
, dst_in_struct_p
, src_unchanging_p
, src_in_struct_p
;
2890 extern int optimize
;
2892 if (GET_CODE (operands
[2]) != CONST_INT
2893 || GET_CODE (operands
[3]) != CONST_INT
2894 || INTVAL (operands
[2]) > 64
2895 || INTVAL (operands
[3]) & 3)
2898 st_dst
= XEXP (operands
[0], 0);
2899 st_src
= XEXP (operands
[1], 0);
2901 dst_unchanging_p
= RTX_UNCHANGING_P (operands
[0]);
2902 dst_in_struct_p
= MEM_IN_STRUCT_P (operands
[0]);
2903 src_unchanging_p
= RTX_UNCHANGING_P (operands
[1]);
2904 src_in_struct_p
= MEM_IN_STRUCT_P (operands
[1]);
2906 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
2907 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
2909 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
2910 out_words_to_go
= INTVAL (operands
[2]) / 4;
2911 last_bytes
= INTVAL (operands
[2]) & 3;
2913 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
2914 part_bytes_reg
= gen_rtx_REG (SImode
, (in_words_to_go
- 1) & 3);
2916 for (i
= 0; in_words_to_go
>= 2; i
+=4)
2918 if (in_words_to_go
> 4)
2919 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
,
2920 src_unchanging_p
, src_in_struct_p
));
2922 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
2923 FALSE
, src_unchanging_p
,
2926 if (out_words_to_go
)
2928 if (out_words_to_go
> 4)
2929 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
,
2932 else if (out_words_to_go
!= 1)
2933 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
2941 mem
= gen_rtx_MEM (SImode
, dst
);
2942 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
2943 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
2944 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
2945 if (last_bytes
!= 0)
2946 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
2950 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
2951 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
2954 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
2955 if (out_words_to_go
)
2959 mem
= gen_rtx_MEM (SImode
, src
);
2960 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
2961 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
2962 emit_move_insn (sreg
= gen_reg_rtx (SImode
), mem
);
2963 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
2965 mem
= gen_rtx_MEM (SImode
, dst
);
2966 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
2967 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
2968 emit_move_insn (mem
, sreg
);
2969 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
2972 if (in_words_to_go
) /* Sanity check */
2978 if (in_words_to_go
< 0)
2981 mem
= gen_rtx_MEM (SImode
, src
);
2982 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
2983 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
2984 part_bytes_reg
= copy_to_mode_reg (SImode
, mem
);
2987 if (BYTES_BIG_ENDIAN
&& last_bytes
)
2989 rtx tmp
= gen_reg_rtx (SImode
);
2991 if (part_bytes_reg
== NULL
)
2994 /* The bytes we want are in the top end of the word */
2995 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
2996 GEN_INT (8 * (4 - last_bytes
))));
2997 part_bytes_reg
= tmp
;
3001 mem
= gen_rtx_MEM (QImode
, plus_constant (dst
, last_bytes
- 1));
3002 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3003 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3004 emit_move_insn (mem
, gen_rtx_SUBREG (QImode
, part_bytes_reg
, 0));
3007 tmp
= gen_reg_rtx (SImode
);
3008 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3009 part_bytes_reg
= tmp
;
3018 if (part_bytes_reg
== NULL
)
3021 mem
= gen_rtx_MEM (QImode
, dst
);
3022 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3023 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3024 emit_move_insn (mem
, gen_rtx_SUBREG (QImode
, part_bytes_reg
, 0));
3027 rtx tmp
= gen_reg_rtx (SImode
);
3029 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
3030 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3031 part_bytes_reg
= tmp
;
3039 /* Generate a memory reference for a half word, such that it will be loaded
3040 into the top 16 bits of the word. We can assume that the address is
3041 known to be alignable and of the form reg, or plus (reg, const). */
3043 gen_rotated_half_load (memref
)
3046 HOST_WIDE_INT offset
= 0;
3047 rtx base
= XEXP (memref
, 0);
3049 if (GET_CODE (base
) == PLUS
)
3051 offset
= INTVAL (XEXP (base
, 1));
3052 base
= XEXP (base
, 0);
3055 /* If we aren't allowed to generate unaligned addresses, then fail. */
3056 if (TARGET_SHORT_BY_BYTES
3057 && ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 0)))
3060 base
= gen_rtx_MEM (SImode
, plus_constant (base
, offset
& ~2));
3062 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 2))
3065 return gen_rtx_ROTATE (SImode
, base
, GEN_INT (16));
3068 static enum machine_mode
3069 select_dominance_cc_mode (op
, x
, y
, cond_or
)
3073 HOST_WIDE_INT cond_or
;
3075 enum rtx_code cond1
, cond2
;
3078 /* Currently we will probably get the wrong result if the individual
3079 comparisons are not simple. This also ensures that it is safe to
3080 reverse a comparison if necessary. */
3081 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
3083 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
3088 cond1
= reverse_condition (cond1
);
3090 /* If the comparisons are not equal, and one doesn't dominate the other,
3091 then we can't do this. */
3093 && ! comparison_dominates_p (cond1
, cond2
)
3094 && (swapped
= 1, ! comparison_dominates_p (cond2
, cond1
)))
3099 enum rtx_code temp
= cond1
;
3107 if (cond2
== EQ
|| ! cond_or
)
3112 case LE
: return CC_DLEmode
;
3113 case LEU
: return CC_DLEUmode
;
3114 case GE
: return CC_DGEmode
;
3115 case GEU
: return CC_DGEUmode
;
3121 if (cond2
== LT
|| ! cond_or
)
3130 if (cond2
== GT
|| ! cond_or
)
3139 if (cond2
== LTU
|| ! cond_or
)
3148 if (cond2
== GTU
|| ! cond_or
)
3156 /* The remaining cases only occur when both comparisons are the
3178 arm_select_cc_mode (op
, x
, y
)
3183 /* All floating point compares return CCFP if it is an equality
3184 comparison, and CCFPE otherwise. */
3185 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
3186 return (op
== EQ
|| op
== NE
) ? CCFPmode
: CCFPEmode
;
3188 /* A compare with a shifted operand. Because of canonicalization, the
3189 comparison will have to be swapped when we emit the assembler. */
3190 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
3191 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3192 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
3193 || GET_CODE (x
) == ROTATERT
))
3196 /* This is a special case that is used by combine to allow a
3197 comparison of a shifted byte load to be split into a zero-extend
3198 followed by a comparison of the shifted integer (only valid for
3199 equalities and unsigned inequalities). */
3200 if (GET_MODE (x
) == SImode
3201 && GET_CODE (x
) == ASHIFT
3202 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
3203 && GET_CODE (XEXP (x
, 0)) == SUBREG
3204 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
3205 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
3206 && (op
== EQ
|| op
== NE
3207 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
3208 && GET_CODE (y
) == CONST_INT
)
3211 /* An operation that sets the condition codes as a side-effect, the
3212 V flag is not set correctly, so we can only use comparisons where
3213 this doesn't matter. (For LT and GE we can use "mi" and "pl"
3215 if (GET_MODE (x
) == SImode
3217 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
3218 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
3219 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
3220 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
3221 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
3222 || GET_CODE (x
) == LSHIFTRT
3223 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3224 || GET_CODE (x
) == ROTATERT
|| GET_CODE (x
) == ZERO_EXTRACT
))
3227 /* A construct for a conditional compare, if the false arm contains
3228 0, then both conditions must be true, otherwise either condition
3229 must be true. Not all conditions are possible, so CCmode is
3230 returned if it can't be done. */
3231 if (GET_CODE (x
) == IF_THEN_ELSE
3232 && (XEXP (x
, 2) == const0_rtx
3233 || XEXP (x
, 2) == const1_rtx
)
3234 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 0))) == '<'
3235 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 1))) == '<')
3236 return select_dominance_cc_mode (op
, XEXP (x
, 0), XEXP (x
, 1),
3237 INTVAL (XEXP (x
, 2)));
3239 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
3242 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
3243 && GET_CODE (x
) == PLUS
3244 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
3250 /* X and Y are two things to compare using CODE. Emit the compare insn and
3251 return the rtx for register 0 in the proper mode. FP means this is a
3252 floating point compare: I don't think that it is needed on the arm. */
3255 gen_compare_reg (code
, x
, y
, fp
)
3259 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
3260 rtx cc_reg
= gen_rtx_REG (mode
, 24);
3262 emit_insn (gen_rtx_SET (VOIDmode
, cc_reg
,
3263 gen_rtx_COMPARE (mode
, x
, y
)));
3269 arm_reload_in_hi (operands
)
3272 rtx base
= find_replacement (&XEXP (operands
[1], 0));
3274 emit_insn (gen_zero_extendqisi2 (operands
[2], gen_rtx_MEM (QImode
, base
)));
3275 /* Handle the case where the address is too complex to be offset by 1. */
3276 if (GET_CODE (base
) == MINUS
3277 || (GET_CODE (base
) == PLUS
&& GET_CODE (XEXP (base
, 1)) != CONST_INT
))
3279 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[0]));
3281 emit_insn (gen_rtx_SET (VOIDmode
, base_plus
, base
));
3285 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode
, operands
[0], 0),
3286 gen_rtx_MEM (QImode
,
3287 plus_constant (base
, 1))));
3288 if (BYTES_BIG_ENDIAN
)
3289 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_SUBREG (SImode
, operands
[0], 0),
3290 gen_rtx_IOR (SImode
,
3293 gen_rtx_SUBREG (SImode
, operands
[0], 0),
3297 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_SUBREG (SImode
, operands
[0], 0),
3298 gen_rtx_IOR (SImode
,
3299 gen_rtx_ASHIFT (SImode
, operands
[2],
3301 gen_rtx_SUBREG (SImode
, operands
[0],
3306 arm_reload_out_hi (operands
)
3309 rtx base
= find_replacement (&XEXP (operands
[0], 0));
3311 if (BYTES_BIG_ENDIAN
)
3313 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (base
, 1)),
3314 gen_rtx_SUBREG (QImode
, operands
[1], 0)));
3315 emit_insn (gen_lshrsi3 (operands
[2],
3316 gen_rtx_SUBREG (SImode
, operands
[1], 0),
3318 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, base
),
3319 gen_rtx_SUBREG (QImode
, operands
[2], 0)));
3323 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, base
),
3324 gen_rtx_SUBREG (QImode
, operands
[1], 0)));
3325 emit_insn (gen_lshrsi3 (operands
[2],
3326 gen_rtx_SUBREG (SImode
, operands
[1], 0),
3328 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (base
, 1)),
3329 gen_rtx_SUBREG (QImode
, operands
[2], 0)));
3333 /* Routines for manipulation of the constant pool. */
3334 /* This is unashamedly hacked from the version in sh.c, since the problem is
3335 extremely similar. */
3337 /* Arm instructions cannot load a large constant into a register,
3338 constants have to come from a pc relative load. The reference of a pc
3339 relative load instruction must be less than 1k infront of the instruction.
3340 This means that we often have to dump a constant inside a function, and
3341 generate code to branch around it.
3343 It is important to minimize this, since the branches will slow things
3344 down and make things bigger.
3346 Worst case code looks like:
3362 We fix this by performing a scan before scheduling, which notices which
3363 instructions need to have their operands fetched from the constant table
3364 and builds the table.
3369 scan, find an instruction which needs a pcrel move. Look forward, find th
3370 last barrier which is within MAX_COUNT bytes of the requirement.
3371 If there isn't one, make one. Process all the instructions between
3372 the find and the barrier.
3374 In the above example, we can tell that L3 is within 1k of L1, so
3375 the first move can be shrunk from the 2 insn+constant sequence into
3376 just 1 insn, and the constant moved to L3 to make:
3387 Then the second move becomes the target for the shortening process.
3393 rtx value
; /* Value in table */
3394 HOST_WIDE_INT next_offset
;
3395 enum machine_mode mode
; /* Mode of value */
3398 /* The maximum number of constants that can fit into one pool, since
3399 the pc relative range is 0...1020 bytes and constants are at least 4
3402 #define MAX_POOL_SIZE (1020/4)
3403 static pool_node pool_vector
[MAX_POOL_SIZE
];
3404 static int pool_size
;
3405 static rtx pool_vector_label
;
3407 /* Add a constant to the pool and return its label. */
3408 static HOST_WIDE_INT
3409 add_constant (x
, mode
)
3411 enum machine_mode mode
;
3415 HOST_WIDE_INT offset
;
3417 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
3418 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
3419 x
= get_pool_constant (XEXP (x
, 0));
3420 #ifndef AOF_ASSEMBLER
3421 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
3422 x
= XVECEXP (x
, 0, 0);
3425 #ifdef AOF_ASSEMBLER
3426 /* PIC Symbol references need to be converted into offsets into the
3428 if (flag_pic
&& GET_CODE (x
) == SYMBOL_REF
)
3429 x
= aof_pic_entry (x
);
3430 #endif /* AOF_ASSEMBLER */
3432 /* First see if we've already got it */
3433 for (i
= 0; i
< pool_size
; i
++)
3435 if (GET_CODE (x
) == pool_vector
[i
].value
->code
3436 && mode
== pool_vector
[i
].mode
)
3438 if (GET_CODE (x
) == CODE_LABEL
)
3440 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
3443 if (rtx_equal_p (x
, pool_vector
[i
].value
))
3444 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
3448 /* Need a new one */
3449 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
3452 pool_vector_label
= gen_label_rtx ();
3454 pool_vector
[pool_size
].next_offset
3455 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
3457 pool_vector
[pool_size
].value
= x
;
3458 pool_vector
[pool_size
].mode
= mode
;
3463 /* Output the literal table */
3470 scan
= emit_label_after (gen_label_rtx (), scan
);
3471 scan
= emit_insn_after (gen_align_4 (), scan
);
3472 scan
= emit_label_after (pool_vector_label
, scan
);
3474 for (i
= 0; i
< pool_size
; i
++)
3476 pool_node
*p
= pool_vector
+ i
;
3478 switch (GET_MODE_SIZE (p
->mode
))
3481 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
3485 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
3494 scan
= emit_insn_after (gen_consttable_end (), scan
);
3495 scan
= emit_barrier_after (scan
);
3499 /* Non zero if the src operand needs to be fixed up */
3501 fixit (src
, mode
, destreg
)
3503 enum machine_mode mode
;
3506 if (CONSTANT_P (src
))
3508 if (GET_CODE (src
) == CONST_INT
)
3509 return (! const_ok_for_arm (INTVAL (src
))
3510 && ! const_ok_for_arm (~INTVAL (src
)));
3511 if (GET_CODE (src
) == CONST_DOUBLE
)
3512 return (GET_MODE (src
) == VOIDmode
3514 || (! const_double_rtx_ok_for_fpu (src
)
3515 && ! neg_const_double_rtx_ok_for_fpu (src
)));
3516 return symbol_mentioned_p (src
);
3518 #ifndef AOF_ASSEMBLER
3519 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
3523 return (mode
== SImode
&& GET_CODE (src
) == MEM
3524 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
3525 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
3528 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
3530 find_barrier (from
, max_count
)
3535 rtx found_barrier
= 0;
3538 while (from
&& count
< max_count
)
3540 if (GET_CODE (from
) == BARRIER
)
3541 found_barrier
= from
;
3543 /* Count the length of this insn */
3544 if (GET_CODE (from
) == INSN
3545 && GET_CODE (PATTERN (from
)) == SET
3546 && CONSTANT_P (SET_SRC (PATTERN (from
)))
3547 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
3549 rtx src
= SET_SRC (PATTERN (from
));
3553 count
+= get_attr_length (from
);
3556 from
= NEXT_INSN (from
);
3561 /* We didn't find a barrier in time to
3562 dump our stuff, so we'll make one */
3563 rtx label
= gen_label_rtx ();
3566 from
= PREV_INSN (last
);
3568 from
= get_last_insn ();
3570 /* Walk back to be just before any jump */
3571 while (GET_CODE (from
) == JUMP_INSN
3572 || GET_CODE (from
) == NOTE
3573 || GET_CODE (from
) == CODE_LABEL
)
3574 from
= PREV_INSN (from
);
3576 from
= emit_jump_insn_after (gen_jump (label
), from
);
3577 JUMP_LABEL (from
) = label
;
3578 found_barrier
= emit_barrier_after (from
);
3579 emit_label_after (label
, found_barrier
);
3580 return found_barrier
;
3583 return found_barrier
;
3586 /* Non zero if the insn is a move instruction which needs to be fixed. */
3591 if (!INSN_DELETED_P (insn
)
3592 && GET_CODE (insn
) == INSN
3593 && GET_CODE (PATTERN (insn
)) == SET
)
3595 rtx pat
= PATTERN (insn
);
3596 rtx src
= SET_SRC (pat
);
3597 rtx dst
= SET_DEST (pat
);
3599 enum machine_mode mode
= GET_MODE (dst
);
3603 if (GET_CODE (dst
) == REG
)
3604 destreg
= REGNO (dst
);
3605 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
3606 destreg
= REGNO (SUBREG_REG (dst
));
3608 return fixit (src
, mode
, destreg
);
3622 /* The ldr instruction can work with up to a 4k offset, and most constants
3623 will be loaded with one of these instructions; however, the adr
3624 instruction and the ldf instructions only work with a 1k offset. This
3625 code needs to be rewritten to use the 4k offset when possible, and to
3626 adjust when a 1k offset is needed. For now we just use a 1k offset
3630 /* Floating point operands can't work further than 1024 bytes from the
3631 PC, so to make things simple we restrict all loads for such functions.
3633 if (TARGET_HARD_FLOAT
)
3634 for (regno
= 16; regno
< 24; regno
++)
3635 if (regs_ever_live
[regno
])
3644 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
3646 if (broken_move (insn
))
3648 /* This is a broken move instruction, scan ahead looking for
3649 a barrier to stick the constant table behind */
3651 rtx barrier
= find_barrier (insn
, count_size
);
3653 /* Now find all the moves between the points and modify them */
3654 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
3656 if (broken_move (scan
))
3658 /* This is a broken move instruction, add it to the pool */
3659 rtx pat
= PATTERN (scan
);
3660 rtx src
= SET_SRC (pat
);
3661 rtx dst
= SET_DEST (pat
);
3662 enum machine_mode mode
= GET_MODE (dst
);
3663 HOST_WIDE_INT offset
;
3669 /* If this is an HImode constant load, convert it into
3670 an SImode constant load. Since the register is always
3671 32 bits this is safe. We have to do this, since the
3672 load pc-relative instruction only does a 32-bit load. */
3676 if (GET_CODE (dst
) != REG
)
3678 PUT_MODE (dst
, SImode
);
3681 offset
= add_constant (src
, mode
);
3682 addr
= plus_constant (gen_rtx_LABEL_REF (VOIDmode
,
3686 /* For wide moves to integer regs we need to split the
3687 address calculation off into a separate insn, so that
3688 the load can then be done with a load-multiple. This is
3689 safe, since we have already noted the length of such
3690 insns to be 8, and we are immediately over-writing the
3691 scratch we have grabbed with the final result. */
3692 if (GET_MODE_SIZE (mode
) > 4
3693 && (scratch
= REGNO (dst
)) < 16)
3695 rtx reg
= gen_rtx_REG (SImode
, scratch
);
3696 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
3701 newsrc
= gen_rtx_MEM (mode
, addr
);
3703 /* Build a jump insn wrapper around the move instead
3704 of an ordinary insn, because we want to have room for
3705 the target label rtx in fld[7], which an ordinary
3706 insn doesn't have. */
3707 newinsn
= emit_jump_insn_after (gen_rtx_SET (VOIDmode
,
3710 JUMP_LABEL (newinsn
) = pool_vector_label
;
3712 /* But it's still an ordinary insn */
3713 PUT_CODE (newinsn
, INSN
);
3720 dump_table (barrier
);
3727 /* Routines to output assembly language. */
3729 /* If the rtx is the correct value then return the string of the number.
3730 In this way we can ensure that valid double constants are generated even
3731 when cross compiling. */
3733 fp_immediate_constant (x
)
3739 if (!fpa_consts_inited
)
3742 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3743 for (i
= 0; i
< 8; i
++)
3744 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
3745 return strings_fpa
[i
];
3750 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
3752 fp_const_from_val (r
)
3757 if (! fpa_consts_inited
)
3760 for (i
= 0; i
< 8; i
++)
3761 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
3762 return strings_fpa
[i
];
3767 /* Output the operands of a LDM/STM instruction to STREAM.
3768 MASK is the ARM register set mask of which only bits 0-15 are important.
3769 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
3770 must follow the register list. */
3773 print_multi_reg (stream
, instr
, mask
, hat
)
3779 int not_first
= FALSE
;
3781 fputc ('\t', stream
);
3782 fprintf (stream
, instr
, REGISTER_PREFIX
);
3783 fputs (", {", stream
);
3784 for (i
= 0; i
< 16; i
++)
3785 if (mask
& (1 << i
))
3788 fprintf (stream
, ", ");
3789 fprintf (stream
, "%s%s", REGISTER_PREFIX
, reg_names
[i
]);
3793 fprintf (stream
, "}%s\n", hat
? "^" : "");
3796 /* Output a 'call' insn. */
3799 output_call (operands
)
3802 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
3804 if (REGNO (operands
[0]) == 14)
3806 operands
[0] = gen_rtx_REG (SImode
, 12);
3807 output_asm_insn ("mov%?\t%0, %|lr", operands
);
3809 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3810 output_asm_insn ("mov%?\t%|pc, %0", operands
);
3818 int something_changed
= 0;
3820 int code
= GET_CODE (x0
);
3827 if (REGNO (x0
) == 14)
3829 *x
= gen_rtx_REG (SImode
, 12);
3834 /* Scan through the sub-elements and change any references there */
3835 fmt
= GET_RTX_FORMAT (code
);
3836 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3838 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
3839 else if (fmt
[i
] == 'E')
3840 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
3841 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
3842 return something_changed
;
3846 /* Output a 'call' insn that is a reference in memory. */
3849 output_call_mem (operands
)
3852 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
3853 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
3855 if (eliminate_lr2ip (&operands
[0]))
3856 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
3858 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3859 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
3864 /* Output a move from arm registers to an fpu registers.
3865 OPERANDS[0] is an fpu register.
3866 OPERANDS[1] is the first registers of an arm register pair. */
3869 output_mov_long_double_fpu_from_arm (operands
)
3872 int arm_reg0
= REGNO (operands
[1]);
3878 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
3879 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
3880 ops
[2] = gen_rtx_REG (SImode
, 2 + arm_reg0
);
3882 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3883 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
3887 /* Output a move from an fpu register to arm registers.
3888 OPERANDS[0] is the first registers of an arm register pair.
3889 OPERANDS[1] is an fpu register. */
3892 output_mov_long_double_arm_from_fpu (operands
)
3895 int arm_reg0
= REGNO (operands
[0]);
3901 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
3902 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
3903 ops
[2] = gen_rtx_REG (SImode
, 2 + arm_reg0
);
3905 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
3906 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3910 /* Output a move from arm registers to arm registers of a long double
3911 OPERANDS[0] is the destination.
3912 OPERANDS[1] is the source. */
3914 output_mov_long_double_arm_from_arm (operands
)
3917 /* We have to be careful here because the two might overlap */
3918 int dest_start
= REGNO (operands
[0]);
3919 int src_start
= REGNO (operands
[1]);
3923 if (dest_start
< src_start
)
3925 for (i
= 0; i
< 3; i
++)
3927 ops
[0] = gen_rtx_REG (SImode
, dest_start
+ i
);
3928 ops
[1] = gen_rtx_REG (SImode
, src_start
+ i
);
3929 output_asm_insn ("mov%?\t%0, %1", ops
);
3934 for (i
= 2; i
>= 0; i
--)
3936 ops
[0] = gen_rtx_REG (SImode
, dest_start
+ i
);
3937 ops
[1] = gen_rtx_REG (SImode
, src_start
+ i
);
3938 output_asm_insn ("mov%?\t%0, %1", ops
);
3946 /* Output a move from arm registers to an fpu registers.
3947 OPERANDS[0] is an fpu register.
3948 OPERANDS[1] is the first registers of an arm register pair. */
3951 output_mov_double_fpu_from_arm (operands
)
3954 int arm_reg0
= REGNO (operands
[1]);
3959 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
3960 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
3961 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
3962 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
3966 /* Output a move from an fpu register to arm registers.
3967 OPERANDS[0] is the first registers of an arm register pair.
3968 OPERANDS[1] is an fpu register. */
3971 output_mov_double_arm_from_fpu (operands
)
3974 int arm_reg0
= REGNO (operands
[0]);
3980 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
3981 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
3982 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
3983 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
3987 /* Output a move between double words.
3988 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
3989 or MEM<-REG and all MEMs must be offsettable addresses. */
3992 output_move_double (operands
)
3995 enum rtx_code code0
= GET_CODE (operands
[0]);
3996 enum rtx_code code1
= GET_CODE (operands
[1]);
4001 int reg0
= REGNO (operands
[0]);
4003 otherops
[0] = gen_rtx_REG (SImode
, 1 + reg0
);
4006 int reg1
= REGNO (operands
[1]);
4010 /* Ensure the second source is not overwritten */
4011 if (reg1
== reg0
+ (WORDS_BIG_ENDIAN
? -1 : 1))
4012 output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands
);
4014 output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands
);
4016 else if (code1
== CONST_DOUBLE
)
4018 if (GET_MODE (operands
[1]) == DFmode
)
4021 union real_extract u
;
4023 bcopy ((char *) &CONST_DOUBLE_LOW (operands
[1]), (char *) &u
,
4025 REAL_VALUE_TO_TARGET_DOUBLE (u
.d
, l
);
4026 otherops
[1] = GEN_INT(l
[1]);
4027 operands
[1] = GEN_INT(l
[0]);
4029 else if (GET_MODE (operands
[1]) != VOIDmode
)
4031 else if (WORDS_BIG_ENDIAN
)
4034 otherops
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4035 operands
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4040 otherops
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4041 operands
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4043 output_mov_immediate (operands
);
4044 output_mov_immediate (otherops
);
4046 else if (code1
== CONST_INT
)
4048 #if HOST_BITS_PER_WIDE_INT > 32
4049 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
4050 what the upper word is. */
4051 if (WORDS_BIG_ENDIAN
)
4053 otherops
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4054 operands
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4058 otherops
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4059 operands
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4062 /* Sign extend the intval into the high-order word */
4063 if (WORDS_BIG_ENDIAN
)
4065 otherops
[1] = operands
[1];
4066 operands
[1] = (INTVAL (operands
[1]) < 0
4067 ? constm1_rtx
: const0_rtx
);
4070 otherops
[1] = INTVAL (operands
[1]) < 0 ? constm1_rtx
: const0_rtx
;
4072 output_mov_immediate (otherops
);
4073 output_mov_immediate (operands
);
4075 else if (code1
== MEM
)
4077 switch (GET_CODE (XEXP (operands
[1], 0)))
4080 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
4084 abort (); /* Should never happen now */
4088 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
4092 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
4096 abort (); /* Should never happen now */
4101 output_asm_insn ("adr%?\t%0, %1", operands
);
4102 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
4106 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1)))
4108 otherops
[0] = operands
[0];
4109 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
4110 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
4111 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
4113 if (GET_CODE (otherops
[2]) == CONST_INT
)
4115 switch (INTVAL (otherops
[2]))
4118 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
4121 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
4124 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
4127 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
4128 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
4130 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4133 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4136 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
4137 return "ldm%?ia\t%0, %M0";
4141 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
4142 /* Take care of overlapping base/data reg. */
4143 if (reg_mentioned_p (operands
[0], operands
[1]))
4145 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4146 output_asm_insn ("ldr%?\t%0, %1", operands
);
4150 output_asm_insn ("ldr%?\t%0, %1", operands
);
4151 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4157 abort(); /* Constraints should prevent this */
4159 else if (code0
== MEM
&& code1
== REG
)
4161 if (REGNO (operands
[1]) == 12)
4164 switch (GET_CODE (XEXP (operands
[0], 0)))
4167 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
4171 abort (); /* Should never happen now */
4175 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
4179 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
4183 abort (); /* Should never happen now */
4187 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
4189 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
4192 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
4196 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
4200 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
4207 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
4208 otherops
[1] = gen_rtx_REG (SImode
, 1 + REGNO (operands
[1]));
4209 output_asm_insn ("str%?\t%1, %0", operands
);
4210 output_asm_insn ("str%?\t%1, %0", otherops
);
4214 abort(); /* Constraints should prevent this */
4220 /* Output an arbitrary MOV reg, #n.
4221 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
4224 output_mov_immediate (operands
)
4227 HOST_WIDE_INT n
= INTVAL (operands
[1]);
4231 /* Try to use one MOV */
4232 if (const_ok_for_arm (n
))
4234 output_asm_insn ("mov%?\t%0, %1", operands
);
4238 /* Try to use one MVN */
4239 if (const_ok_for_arm (~n
))
4241 operands
[1] = GEN_INT (~n
);
4242 output_asm_insn ("mvn%?\t%0, %1", operands
);
4246 /* If all else fails, make it out of ORRs or BICs as appropriate. */
4248 for (i
=0; i
< 32; i
++)
4252 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
4253 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
4256 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
4263 /* Output an ADD r, s, #n where n may be too big for one instruction. If
4264 adding zero to one register, output nothing. */
4267 output_add_immediate (operands
)
4270 HOST_WIDE_INT n
= INTVAL (operands
[2]);
4272 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
4275 output_multi_immediate (operands
,
4276 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
4279 output_multi_immediate (operands
,
4280 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
4287 /* Output a multiple immediate operation.
4288 OPERANDS is the vector of operands referred to in the output patterns.
4289 INSTR1 is the output pattern to use for the first constant.
4290 INSTR2 is the output pattern to use for subsequent constants.
4291 IMMED_OP is the index of the constant slot in OPERANDS.
4292 N is the constant value. */
4295 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
4297 char *instr1
, *instr2
;
4301 #if HOST_BITS_PER_WIDE_INT > 32
4307 operands
[immed_op
] = const0_rtx
;
4308 output_asm_insn (instr1
, operands
); /* Quick and easy output */
4313 char *instr
= instr1
;
4315 /* Note that n is never zero here (which would give no output) */
4316 for (i
= 0; i
< 32; i
+= 2)
4320 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
4321 output_asm_insn (instr
, operands
);
4331 /* Return the appropriate ARM instruction for the operation code.
4332 The returned result should not be overwritten. OP is the rtx of the
4333 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
4337 arithmetic_instr (op
, shift_first_arg
)
4339 int shift_first_arg
;
4341 switch (GET_CODE (op
))
4347 return shift_first_arg
? "rsb" : "sub";
4364 /* Ensure valid constant shifts and return the appropriate shift mnemonic
4365 for the operation code. The returned result should not be overwritten.
4366 OP is the rtx code of the shift.
4367 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
4371 shift_op (op
, amountp
)
4373 HOST_WIDE_INT
*amountp
;
4376 enum rtx_code code
= GET_CODE (op
);
4378 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
4380 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
4381 *amountp
= INTVAL (XEXP (op
, 1));
4404 /* We never have to worry about the amount being other than a
4405 power of 2, since this case can never be reloaded from a reg. */
4407 *amountp
= int_log2 (*amountp
);
4418 /* This is not 100% correct, but follows from the desire to merge
4419 multiplication by a power of 2 with the recognizer for a
4420 shift. >=32 is not a valid shift for "asl", so we must try and
4421 output a shift that produces the correct arithmetical result.
4422 Using lsr #32 is identical except for the fact that the carry bit
4423 is not set correctly if we set the flags; but we never use the
4424 carry bit from such an operation, so we can ignore that. */
4425 if (code
== ROTATERT
)
4426 *amountp
&= 31; /* Rotate is just modulo 32 */
4427 else if (*amountp
!= (*amountp
& 31))
4434 /* Shifts of 0 are no-ops. */
4443 /* Obtain the shift from the POWER of two. */
4445 static HOST_WIDE_INT
4447 HOST_WIDE_INT power
;
4449 HOST_WIDE_INT shift
= 0;
4451 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
4461 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
4462 /bin/as is horribly restrictive. */
4465 output_ascii_pseudo_op (stream
, p
, len
)
4471 int len_so_far
= 1000;
4472 int chars_so_far
= 0;
4474 for (i
= 0; i
< len
; i
++)
4476 register int c
= p
[i
];
4478 if (len_so_far
> 50)
4481 fputs ("\"\n", stream
);
4482 fputs ("\t.ascii\t\"", stream
);
4487 if (c
== '\"' || c
== '\\')
4493 if (c
>= ' ' && c
< 0177)
4500 fprintf (stream
, "\\%03o", c
);
4507 fputs ("\"\n", stream
);
4511 /* Try to determine whether a pattern really clobbers the link register.
4512 This information is useful when peepholing, so that lr need not be pushed
4513 if we combine a call followed by a return.
4514 NOTE: This code does not check for side-effect expressions in a SET_SRC:
4515 such a check should not be needed because these only update an existing
4516 value within a register; the register must still be set elsewhere within
4520 pattern_really_clobbers_lr (x
)
4525 switch (GET_CODE (x
))
4528 switch (GET_CODE (SET_DEST (x
)))
4531 return REGNO (SET_DEST (x
)) == 14;
4534 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
4535 return REGNO (XEXP (SET_DEST (x
), 0)) == 14;
4537 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
4546 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
4547 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
4552 switch (GET_CODE (XEXP (x
, 0)))
4555 return REGNO (XEXP (x
, 0)) == 14;
4558 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
4559 return REGNO (XEXP (XEXP (x
, 0), 0)) == 14;
4575 function_really_clobbers_lr (first
)
4580 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
4582 switch (GET_CODE (insn
))
4587 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
4592 if (pattern_really_clobbers_lr (PATTERN (insn
)))
4597 /* Don't yet know how to handle those calls that are not to a
4599 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
4602 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
4605 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
4611 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
4617 default: /* Don't recognize it, be safe */
4621 /* A call can be made (by peepholing) not to clobber lr iff it is
4622 followed by a return. There may, however, be a use insn iff
4623 we are returning the result of the call.
4624 If we run off the end of the insn chain, then that means the
4625 call was at the end of the function. Unfortunately we don't
4626 have a return insn for the peephole to recognize, so we
4627 must reject this. (Can this be fixed by adding our own insn?) */
4628 if ((next
= next_nonnote_insn (insn
)) == NULL
)
4631 /* No need to worry about lr if the call never returns */
4632 if (GET_CODE (next
) == BARRIER
)
4635 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
4636 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
4637 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
4638 == REGNO (XEXP (PATTERN (next
), 0))))
4639 if ((next
= next_nonnote_insn (next
)) == NULL
)
4642 if (GET_CODE (next
) == JUMP_INSN
4643 && GET_CODE (PATTERN (next
)) == RETURN
)
4652 /* We have reached the end of the chain so lr was _not_ clobbered */
4657 output_return_instruction (operand
, really_return
, reverse
)
4663 int reg
, live_regs
= 0;
4664 int volatile_func
= (optimize
> 0
4665 && TREE_THIS_VOLATILE (current_function_decl
));
4667 return_used_this_function
= 1;
4672 /* If this function was declared non-returning, and we have found a tail
4673 call, then we have to trust that the called function won't return. */
4674 if (! really_return
)
4677 /* Otherwise, trap an attempted return by aborting. */
4679 ops
[1] = gen_rtx_SYMBOL_REF (Pmode
, "abort");
4680 assemble_external_libcall (ops
[1]);
4681 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
4685 if (current_function_calls_alloca
&& ! really_return
)
4688 for (reg
= 0; reg
<= 10; reg
++)
4689 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4692 if (live_regs
|| (regs_ever_live
[14] && ! lr_save_eliminated
))
4695 if (frame_pointer_needed
)
4700 if (lr_save_eliminated
|| ! regs_ever_live
[14])
4703 if (frame_pointer_needed
)
4705 reverse
? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
4708 reverse
? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
4710 for (reg
= 0; reg
<= 10; reg
++)
4711 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4713 strcat (instr
, "%|");
4714 strcat (instr
, reg_names
[reg
]);
4716 strcat (instr
, ", ");
4719 if (frame_pointer_needed
)
4721 strcat (instr
, "%|");
4722 strcat (instr
, reg_names
[11]);
4723 strcat (instr
, ", ");
4724 strcat (instr
, "%|");
4725 strcat (instr
, reg_names
[13]);
4726 strcat (instr
, ", ");
4727 strcat (instr
, "%|");
4728 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4732 strcat (instr
, "%|");
4733 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4735 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
4736 output_asm_insn (instr
, &operand
);
4738 else if (really_return
)
4740 if (TARGET_THUMB_INTERWORK
)
4741 sprintf (instr
, "bx%%?%%%s\t%%|lr", reverse
? "D" : "d");
4743 sprintf (instr
, "mov%%?%%%s0%s\t%%|pc, %%|lr",
4744 reverse
? "D" : "d", TARGET_APCS_32
? "" : "s");
4745 output_asm_insn (instr
, &operand
);
4751 /* Return nonzero if optimizing and the current function is volatile.
4752 Such functions never return, and many memory cycles can be saved
4753 by not storing register values that will never be needed again.
4754 This optimization was added to speed up context switching in a
4755 kernel application. */
4758 arm_volatile_func ()
4760 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
4763 /* The amount of stack adjustment that happens here, in output_return and in
4764 output_epilogue must be exactly the same as was calculated during reload,
4765 or things will point to the wrong place. The only time we can safely
4766 ignore this constraint is when a function has no arguments on the stack,
4767 no stack frame requirement and no live registers execpt for `lr'. If we
4768 can guarantee that by making all function calls into tail calls and that
4769 lr is not clobbered in any other way, then there is no need to push lr
4773 output_func_prologue (f
, frame_size
)
4777 int reg
, live_regs_mask
= 0;
4779 int volatile_func
= (optimize
> 0
4780 && TREE_THIS_VOLATILE (current_function_decl
));
4782 /* Nonzero if we must stuff some register arguments onto the stack as if
4783 they were passed there. */
4784 int store_arg_regs
= 0;
4786 if (arm_ccfsm_state
|| arm_target_insn
)
4787 abort (); /* Sanity check */
4789 if (arm_naked_function_p (current_function_decl
))
4792 return_used_this_function
= 0;
4793 lr_save_eliminated
= 0;
4795 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
4796 ASM_COMMENT_START
, current_function_args_size
,
4797 current_function_pretend_args_size
, frame_size
);
4798 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
4799 ASM_COMMENT_START
, frame_pointer_needed
,
4800 current_function_anonymous_args
);
4803 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
4805 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
4808 for (reg
= 0; reg
<= 10; reg
++)
4809 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4810 live_regs_mask
|= (1 << reg
);
4812 if (frame_pointer_needed
)
4813 live_regs_mask
|= 0xD800;
4814 else if (regs_ever_live
[14])
4816 if (! current_function_args_size
4817 && ! function_really_clobbers_lr (get_insns ()))
4818 lr_save_eliminated
= 1;
4820 live_regs_mask
|= 0x4000;
4825 /* if a di mode load/store multiple is used, and the base register
4826 is r3, then r4 can become an ever live register without lr
4827 doing so, in this case we need to push lr as well, or we
4828 will fail to get a proper return. */
4830 live_regs_mask
|= 0x4000;
4831 lr_save_eliminated
= 0;
4835 if (lr_save_eliminated
)
4836 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
4839 #ifdef AOF_ASSEMBLER
4841 fprintf (f
, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX
, REGISTER_PREFIX
,
4842 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
4848 output_func_epilogue (f
, frame_size
)
4852 int reg
, live_regs_mask
= 0;
4853 /* If we need this then it will always be at least this much */
4854 int floats_offset
= 12;
4856 int volatile_func
= (optimize
> 0
4857 && TREE_THIS_VOLATILE (current_function_decl
));
4859 if (use_return_insn() && return_used_this_function
)
4861 if ((frame_size
+ current_function_outgoing_args_size
) != 0
4862 && !(frame_pointer_needed
|| TARGET_APCS
))
4867 /* Naked functions don't have epilogues. */
4868 if (arm_naked_function_p (current_function_decl
))
4871 /* A volatile function should never return. Call abort. */
4874 rtx op
= gen_rtx_SYMBOL_REF (Pmode
, "abort");
4875 assemble_external_libcall (op
);
4876 output_asm_insn ("bl\t%a0", &op
);
4880 for (reg
= 0; reg
<= 10; reg
++)
4881 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4883 live_regs_mask
|= (1 << reg
);
4887 if (frame_pointer_needed
)
4889 if (arm_fpu_arch
== FP_SOFT2
)
4891 for (reg
= 23; reg
> 15; reg
--)
4892 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4894 floats_offset
+= 12;
4895 fprintf (f
, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX
,
4896 reg_names
[reg
], REGISTER_PREFIX
, floats_offset
);
4903 for (reg
= 23; reg
> 15; reg
--)
4905 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4907 floats_offset
+= 12;
4908 /* We can't unstack more than four registers at once */
4909 if (start_reg
- reg
== 3)
4911 fprintf (f
, "\tlfm\t%s%s, 4, [%sfp, #-%d]\n",
4912 REGISTER_PREFIX
, reg_names
[reg
],
4913 REGISTER_PREFIX
, floats_offset
);
4914 start_reg
= reg
- 1;
4919 if (reg
!= start_reg
)
4920 fprintf (f
, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
4921 REGISTER_PREFIX
, reg_names
[reg
+ 1],
4922 start_reg
- reg
, REGISTER_PREFIX
, floats_offset
);
4924 start_reg
= reg
- 1;
4928 /* Just in case the last register checked also needs unstacking. */
4929 if (reg
!= start_reg
)
4930 fprintf (f
, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
4931 REGISTER_PREFIX
, reg_names
[reg
+ 1],
4932 start_reg
- reg
, REGISTER_PREFIX
, floats_offset
);
4935 if (TARGET_THUMB_INTERWORK
)
4937 live_regs_mask
|= 0x6800;
4938 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
, FALSE
);
4939 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
4943 live_regs_mask
|= 0xA800;
4944 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
4945 TARGET_APCS_32
? FALSE
: TRUE
);
4950 /* Restore stack pointer if necessary. */
4951 if (frame_size
+ current_function_outgoing_args_size
!= 0)
4953 operands
[0] = operands
[1] = stack_pointer_rtx
;
4954 operands
[2] = GEN_INT (frame_size
4955 + current_function_outgoing_args_size
);
4956 output_add_immediate (operands
);
4959 if (arm_fpu_arch
== FP_SOFT2
)
4961 for (reg
= 16; reg
< 24; reg
++)
4962 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4963 fprintf (f
, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX
,
4964 reg_names
[reg
], REGISTER_PREFIX
);
4970 for (reg
= 16; reg
< 24; reg
++)
4972 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4974 if (reg
- start_reg
== 3)
4976 fprintf (f
, "\tlfmfd\t%s%s, 4, [%ssp]!\n",
4977 REGISTER_PREFIX
, reg_names
[start_reg
],
4979 start_reg
= reg
+ 1;
4984 if (reg
!= start_reg
)
4985 fprintf (f
, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
4986 REGISTER_PREFIX
, reg_names
[start_reg
],
4987 reg
- start_reg
, REGISTER_PREFIX
);
4989 start_reg
= reg
+ 1;
4993 /* Just in case the last register checked also needs unstacking. */
4994 if (reg
!= start_reg
)
4995 fprintf (f
, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
4996 REGISTER_PREFIX
, reg_names
[start_reg
],
4997 reg
- start_reg
, REGISTER_PREFIX
);
5000 if (current_function_pretend_args_size
== 0 && regs_ever_live
[14])
5002 if (TARGET_THUMB_INTERWORK
)
5004 if (! lr_save_eliminated
)
5005 print_multi_reg(f
, "ldmfd\t%ssp!", live_regs_mask
| 0x4000,
5008 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
5010 else if (lr_save_eliminated
)
5011 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
5012 : "\tmovs\t%spc, %slr\n"),
5013 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
5015 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
5016 TARGET_APCS_32
? FALSE
: TRUE
);
5020 if (live_regs_mask
|| regs_ever_live
[14])
5022 /* Restore the integer regs, and the return address into lr */
5023 if (! lr_save_eliminated
)
5024 live_regs_mask
|= 0x4000;
5026 if (live_regs_mask
!= 0)
5027 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
5030 if (current_function_pretend_args_size
)
5032 /* Unwind the pre-pushed regs */
5033 operands
[0] = operands
[1] = stack_pointer_rtx
;
5034 operands
[2] = GEN_INT (current_function_pretend_args_size
);
5035 output_add_immediate (operands
);
5037 /* And finally, go home */
5038 if (TARGET_THUMB_INTERWORK
)
5039 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
5041 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
5042 : "\tmovs\t%spc, %slr\n"),
5043 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
5049 current_function_anonymous_args
= 0;
5053 emit_multi_reg_push (mask
)
5060 for (i
= 0; i
< 16; i
++)
5061 if (mask
& (1 << i
))
5064 if (num_regs
== 0 || num_regs
> 16)
5067 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (num_regs
));
5069 for (i
= 0; i
< 16; i
++)
5071 if (mask
& (1 << i
))
5074 = gen_rtx_SET (VOIDmode
,
5075 gen_rtx_MEM (BLKmode
,
5076 gen_rtx_PRE_DEC (BLKmode
,
5077 stack_pointer_rtx
)),
5078 gen_rtx_UNSPEC (BLKmode
,
5080 gen_rtx_REG (SImode
, i
)),
5086 for (j
= 1, i
++; j
< num_regs
; i
++)
5088 if (mask
& (1 << i
))
5091 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, i
));
5100 emit_sfm (base_reg
, count
)
5107 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
));
5110 = gen_rtx_SET (VOIDmode
,
5111 gen_rtx_MEM (BLKmode
,
5112 gen_rtx_PRE_DEC (BLKmode
, stack_pointer_rtx
)),
5113 gen_rtx_UNSPEC (BLKmode
,
5114 gen_rtvec (1, gen_rtx_REG (XFmode
,
5118 for (i
= 1; i
< count
; i
++)
5119 XVECEXP (par
, 0, i
) = gen_rtx_USE (VOIDmode
,
5120 gen_rtx_REG (XFmode
, base_reg
++));
5126 arm_expand_prologue ()
5129 rtx amount
= GEN_INT (-(get_frame_size ()
5130 + current_function_outgoing_args_size
));
5133 int live_regs_mask
= 0;
5134 int store_arg_regs
= 0;
5135 int volatile_func
= (optimize
> 0
5136 && TREE_THIS_VOLATILE (current_function_decl
));
5138 /* Naked functions don't have prologues. */
5139 if (arm_naked_function_p (current_function_decl
))
5142 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
5145 if (! volatile_func
)
5146 for (reg
= 0; reg
<= 10; reg
++)
5147 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5148 live_regs_mask
|= 1 << reg
;
5150 if (! volatile_func
&& regs_ever_live
[14])
5151 live_regs_mask
|= 0x4000;
5153 if (frame_pointer_needed
)
5155 live_regs_mask
|= 0xD800;
5156 emit_insn (gen_movsi (gen_rtx_REG (SImode
, 12),
5157 stack_pointer_rtx
));
5160 if (current_function_pretend_args_size
)
5163 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
5166 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
5167 GEN_INT (-current_function_pretend_args_size
)));
5172 /* If we have to push any regs, then we must push lr as well, or
5173 we won't get a proper return. */
5174 live_regs_mask
|= 0x4000;
5175 emit_multi_reg_push (live_regs_mask
);
5178 /* For now the integer regs are still pushed in output_func_epilogue (). */
5180 if (! volatile_func
)
5182 if (arm_fpu_arch
== FP_SOFT2
)
5184 for (reg
= 23; reg
> 15; reg
--)
5185 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5186 emit_insn (gen_rtx_SET
5188 gen_rtx_MEM (XFmode
,
5189 gen_rtx_PRE_DEC (XFmode
,
5190 stack_pointer_rtx
)),
5191 gen_rtx_REG (XFmode
, reg
)));
5197 for (reg
= 23; reg
> 15; reg
--)
5199 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5201 if (start_reg
- reg
== 3)
5204 start_reg
= reg
- 1;
5209 if (start_reg
!= reg
)
5210 emit_sfm (reg
+ 1, start_reg
- reg
);
5211 start_reg
= reg
- 1;
5215 if (start_reg
!= reg
)
5216 emit_sfm (reg
+ 1, start_reg
- reg
);
5220 if (frame_pointer_needed
)
5221 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx_REG (SImode
, 12),
5223 (-(4 + current_function_pretend_args_size
)))));
5225 if (amount
!= const0_rtx
)
5227 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
5228 emit_insn (gen_rtx_CLOBBER (VOIDmode
,
5229 gen_rtx_MEM (BLKmode
, stack_pointer_rtx
)));
5232 /* If we are profiling, make sure no instructions are scheduled before
5233 the call to mcount. */
5234 if (profile_flag
|| profile_block_flag
)
5235 emit_insn (gen_blockage ());
5239 /* If CODE is 'd', then the X is a condition operand and the instruction
5240 should only be executed if the condition is true.
5241 if CODE is 'D', then the X is a condition operand and the instruction
5242 should only be executed if the condition is false: however, if the mode
5243 of the comparison is CCFPEmode, then always execute the instruction -- we
5244 do this because in these circumstances !GE does not necessarily imply LT;
5245 in these cases the instruction pattern will take care to make sure that
5246 an instruction containing %d will follow, thereby undoing the effects of
5247 doing this instruction unconditionally.
5248 If CODE is 'N' then X is a floating point operand that must be negated
5250 If CODE is 'B' then output a bitwise inverted value of X (a const int).
5251 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
5254 arm_print_operand (stream
, x
, code
)
5262 fputs (ASM_COMMENT_START
, stream
);
5266 fputs (REGISTER_PREFIX
, stream
);
5270 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
5271 fputs (arm_condition_codes
[arm_current_cc
], stream
);
5277 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
5278 r
= REAL_VALUE_NEGATE (r
);
5279 fprintf (stream
, "%s", fp_const_from_val (&r
));
5284 if (GET_CODE (x
) == CONST_INT
)
5286 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5291 ARM_SIGN_EXTEND (~ INTVAL (x
)));
5295 output_addr_const (stream
, x
);
5300 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
5304 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
5310 char *shift
= shift_op (x
, &val
);
5314 fprintf (stream
, ", %s ", shift_op (x
, &val
));
5316 arm_print_operand (stream
, XEXP (x
, 1), 0);
5319 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5332 fputs (REGISTER_PREFIX
, stream
);
5333 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0)], stream
);
5339 fputs (REGISTER_PREFIX
, stream
);
5340 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1)], stream
);
5344 fputs (REGISTER_PREFIX
, stream
);
5345 if (GET_CODE (XEXP (x
, 0)) == REG
)
5346 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
5348 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
5352 fprintf (stream
, "{%s%s-%s%s}", REGISTER_PREFIX
, reg_names
[REGNO (x
)],
5353 REGISTER_PREFIX
, reg_names
[REGNO (x
) - 1
5354 + ((GET_MODE_SIZE (GET_MODE (x
))
5355 + GET_MODE_SIZE (SImode
) - 1)
5356 / GET_MODE_SIZE (SImode
))]);
5361 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
5367 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
5368 (get_arm_condition_code (x
))],
5376 if (GET_CODE (x
) == REG
)
5378 fputs (REGISTER_PREFIX
, stream
);
5379 fputs (reg_names
[REGNO (x
)], stream
);
5381 else if (GET_CODE (x
) == MEM
)
5383 output_memory_reference_mode
= GET_MODE (x
);
5384 output_address (XEXP (x
, 0));
5386 else if (GET_CODE (x
) == CONST_DOUBLE
)
5387 fprintf (stream
, "#%s", fp_immediate_constant (x
));
5388 else if (GET_CODE (x
) == NEG
)
5389 abort (); /* This should never happen now. */
5392 fputc ('#', stream
);
5393 output_addr_const (stream
, x
);
5399 /* A finite state machine takes care of noticing whether or not instructions
5400 can be conditionally executed, and thus decrease execution time and code
5401 size by deleting branch instructions. The fsm is controlled by
5402 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
5404 /* The state of the fsm controlling condition codes are:
5405 0: normal, do nothing special
5406 1: make ASM_OUTPUT_OPCODE not output this instruction
5407 2: make ASM_OUTPUT_OPCODE not output this instruction
5408 3: make instructions conditional
5409 4: make instructions conditional
5411 State transitions (state->state by whom under condition):
5412 0 -> 1 final_prescan_insn if the `target' is a label
5413 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
5414 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
5415 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
5416 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
5417 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
5418 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
5419 (the target insn is arm_target_insn).
5421 If the jump clobbers the conditions then we use states 2 and 4.
5423 A similar thing can be done with conditional return insns.
5425 XXX In case the `target' is an unconditional branch, this conditionalising
5426 of the instructions always reduces code size, but not always execution
5427 time. But then, I want to reduce the code size to somewhere near what
5428 /bin/cc produces. */
5430 /* Returns the index of the ARM condition code string in
5431 `arm_condition_codes'. COMPARISON should be an rtx like
5432 `(eq (...) (...))'. */
5434 static enum arm_cond_code
5435 get_arm_condition_code (comparison
)
5438 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
5440 register enum rtx_code comp_code
= GET_CODE (comparison
);
5442 if (GET_MODE_CLASS (mode
) != MODE_CC
)
5443 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
5444 XEXP (comparison
, 1));
5448 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
5449 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
5450 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
5451 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
5452 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
5453 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
5454 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
5455 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
5456 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
5457 case CC_DLTUmode
: code
= ARM_CC
;
5460 if (comp_code
!= EQ
&& comp_code
!= NE
)
5463 if (comp_code
== EQ
)
5464 return ARM_INVERSE_CONDITION_CODE (code
);
5470 case NE
: return ARM_NE
;
5471 case EQ
: return ARM_EQ
;
5472 case GE
: return ARM_PL
;
5473 case LT
: return ARM_MI
;
5481 case NE
: return ARM_NE
;
5482 case EQ
: return ARM_EQ
;
5489 case GE
: return ARM_GE
;
5490 case GT
: return ARM_GT
;
5491 case LE
: return ARM_LS
;
5492 case LT
: return ARM_MI
;
5499 case NE
: return ARM_NE
;
5500 case EQ
: return ARM_EQ
;
5501 case GE
: return ARM_LE
;
5502 case GT
: return ARM_LT
;
5503 case LE
: return ARM_GE
;
5504 case LT
: return ARM_GT
;
5505 case GEU
: return ARM_LS
;
5506 case GTU
: return ARM_CC
;
5507 case LEU
: return ARM_CS
;
5508 case LTU
: return ARM_HI
;
5515 case LTU
: return ARM_CS
;
5516 case GEU
: return ARM_CC
;
5523 case NE
: return ARM_NE
;
5524 case EQ
: return ARM_EQ
;
5525 case GE
: return ARM_GE
;
5526 case GT
: return ARM_GT
;
5527 case LE
: return ARM_LE
;
5528 case LT
: return ARM_LT
;
5529 case GEU
: return ARM_CS
;
5530 case GTU
: return ARM_HI
;
5531 case LEU
: return ARM_LS
;
5532 case LTU
: return ARM_CC
;
5544 final_prescan_insn (insn
, opvec
, noperands
)
5549 /* BODY will hold the body of INSN. */
5550 register rtx body
= PATTERN (insn
);
5552 /* This will be 1 if trying to repeat the trick, and things need to be
5553 reversed if it appears to fail. */
5556 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
5557 taken are clobbered, even if the rtl suggests otherwise. It also
5558 means that we have to grub around within the jump expression to find
5559 out what the conditions are when the jump isn't taken. */
5560 int jump_clobbers
= 0;
5562 /* If we start with a return insn, we only succeed if we find another one. */
5563 int seeking_return
= 0;
5565 /* START_INSN will hold the insn from where we start looking. This is the
5566 first insn after the following code_label if REVERSE is true. */
5567 rtx start_insn
= insn
;
5569 /* If in state 4, check if the target branch is reached, in order to
5570 change back to state 0. */
5571 if (arm_ccfsm_state
== 4)
5573 if (insn
== arm_target_insn
)
5575 arm_target_insn
= NULL
;
5576 arm_ccfsm_state
= 0;
5581 /* If in state 3, it is possible to repeat the trick, if this insn is an
5582 unconditional branch to a label, and immediately following this branch
5583 is the previous target label which is only used once, and the label this
5584 branch jumps to is not too far off. */
5585 if (arm_ccfsm_state
== 3)
5587 if (simplejump_p (insn
))
5589 start_insn
= next_nonnote_insn (start_insn
);
5590 if (GET_CODE (start_insn
) == BARRIER
)
5592 /* XXX Isn't this always a barrier? */
5593 start_insn
= next_nonnote_insn (start_insn
);
5595 if (GET_CODE (start_insn
) == CODE_LABEL
5596 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5597 && LABEL_NUSES (start_insn
) == 1)
5602 else if (GET_CODE (body
) == RETURN
)
5604 start_insn
= next_nonnote_insn (start_insn
);
5605 if (GET_CODE (start_insn
) == BARRIER
)
5606 start_insn
= next_nonnote_insn (start_insn
);
5607 if (GET_CODE (start_insn
) == CODE_LABEL
5608 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5609 && LABEL_NUSES (start_insn
) == 1)
5621 if (arm_ccfsm_state
!= 0 && !reverse
)
5623 if (GET_CODE (insn
) != JUMP_INSN
)
5626 /* This jump might be paralleled with a clobber of the condition codes
5627 the jump should always come first */
5628 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
5629 body
= XVECEXP (body
, 0, 0);
5632 /* If this is a conditional return then we don't want to know */
5633 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5634 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
5635 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
5636 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
5641 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5642 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
5645 int fail
= FALSE
, succeed
= FALSE
;
5646 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
5647 int then_not_else
= TRUE
;
5648 rtx this_insn
= start_insn
, label
= 0;
5650 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
5652 /* The code below is wrong for these, and I haven't time to
5653 fix it now. So we just do the safe thing and return. This
5654 whole function needs re-writing anyway. */
5659 /* Register the insn jumped to. */
5662 if (!seeking_return
)
5663 label
= XEXP (SET_SRC (body
), 0);
5665 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
5666 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
5667 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
5669 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
5670 then_not_else
= FALSE
;
5672 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
5674 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
5677 then_not_else
= FALSE
;
5682 /* See how many insns this branch skips, and what kind of insns. If all
5683 insns are okay, and the label or unconditional branch to the same
5684 label is not too far away, succeed. */
5685 for (insns_skipped
= 0;
5686 !fail
&& !succeed
&& insns_skipped
++ < MAX_INSNS_SKIPPED
;)
5690 this_insn
= next_nonnote_insn (this_insn
);
5694 scanbody
= PATTERN (this_insn
);
5696 switch (GET_CODE (this_insn
))
5699 /* Succeed if it is the target label, otherwise fail since
5700 control falls in from somewhere else. */
5701 if (this_insn
== label
)
5705 arm_ccfsm_state
= 2;
5706 this_insn
= next_nonnote_insn (this_insn
);
5709 arm_ccfsm_state
= 1;
5717 /* Succeed if the following insn is the target label.
5719 If return insns are used then the last insn in a function
5720 will be a barrier. */
5721 this_insn
= next_nonnote_insn (this_insn
);
5722 if (this_insn
&& this_insn
== label
)
5726 arm_ccfsm_state
= 2;
5727 this_insn
= next_nonnote_insn (this_insn
);
5730 arm_ccfsm_state
= 1;
5738 /* If using 32-bit addresses the cc is not preserved over
5742 /* Succeed if the following insn is the target label,
5743 or if the following two insns are a barrier and
5744 the target label. */
5745 this_insn
= next_nonnote_insn (this_insn
);
5746 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
5747 this_insn
= next_nonnote_insn (this_insn
);
5749 if (this_insn
&& this_insn
== label
5750 && insns_skipped
< MAX_INSNS_SKIPPED
)
5754 arm_ccfsm_state
= 2;
5755 this_insn
= next_nonnote_insn (this_insn
);
5758 arm_ccfsm_state
= 1;
5767 /* If this is an unconditional branch to the same label, succeed.
5768 If it is to another label, do nothing. If it is conditional,
5770 /* XXX Probably, the test for the SET and the PC are unnecessary. */
5772 if (GET_CODE (scanbody
) == SET
5773 && GET_CODE (SET_DEST (scanbody
)) == PC
)
5775 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
5776 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
5778 arm_ccfsm_state
= 2;
5781 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
5784 else if (GET_CODE (scanbody
) == RETURN
5787 arm_ccfsm_state
= 2;
5790 else if (GET_CODE (scanbody
) == PARALLEL
)
5792 switch (get_attr_conds (this_insn
))
5804 /* Instructions using or affecting the condition codes make it
5806 if ((GET_CODE (scanbody
) == SET
5807 || GET_CODE (scanbody
) == PARALLEL
)
5808 && get_attr_conds (this_insn
) != CONDS_NOCOND
)
5818 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
5819 arm_target_label
= CODE_LABEL_NUMBER (label
);
5820 else if (seeking_return
|| arm_ccfsm_state
== 2)
5822 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
5824 this_insn
= next_nonnote_insn (this_insn
);
5825 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
5826 || GET_CODE (this_insn
) == CODE_LABEL
))
5831 /* Oh, dear! we ran off the end.. give up */
5832 recog (PATTERN (insn
), insn
, NULL_PTR
);
5833 arm_ccfsm_state
= 0;
5834 arm_target_insn
= NULL
;
5837 arm_target_insn
= this_insn
;
5846 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
5848 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
5849 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5850 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
5851 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5855 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
5858 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
5862 if (reverse
|| then_not_else
)
5863 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5865 /* restore recog_operand (getting the attributes of other insns can
5866 destroy this array, but final.c assumes that it remains intact
5867 across this call; since the insn has been recognized already we
5868 call recog direct). */
5869 recog (PATTERN (insn
), insn
, NULL_PTR
);
5873 #ifdef AOF_ASSEMBLER
5874 /* Special functions only needed when producing AOF syntax assembler. */
5876 rtx aof_pic_label
= NULL_RTX
;
5879 struct pic_chain
*next
;
5883 static struct pic_chain
*aof_pic_chain
= NULL
;
5889 struct pic_chain
**chainp
;
5892 if (aof_pic_label
== NULL_RTX
)
5894 /* This needs to persist throughout the compilation. */
5895 end_temporary_allocation ();
5896 aof_pic_label
= gen_rtx_SYMBOL_REF (Pmode
, "x$adcons");
5897 resume_temporary_allocation ();
5900 for (offset
= 0, chainp
= &aof_pic_chain
; *chainp
;
5901 offset
+= 4, chainp
= &(*chainp
)->next
)
5902 if ((*chainp
)->symname
== XSTR (x
, 0))
5903 return plus_constant (aof_pic_label
, offset
);
5905 *chainp
= (struct pic_chain
*) xmalloc (sizeof (struct pic_chain
));
5906 (*chainp
)->next
= NULL
;
5907 (*chainp
)->symname
= XSTR (x
, 0);
5908 return plus_constant (aof_pic_label
, offset
);
5912 aof_dump_pic_table (f
)
5915 struct pic_chain
*chain
;
5917 if (aof_pic_chain
== NULL
)
5920 fprintf (f
, "\tAREA |%s$$adcons|, BASED %s%s\n",
5921 reg_names
[PIC_OFFSET_TABLE_REGNUM
], REGISTER_PREFIX
,
5922 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
5923 fputs ("|x$adcons|\n", f
);
5925 for (chain
= aof_pic_chain
; chain
; chain
= chain
->next
)
5927 fputs ("\tDCD\t", f
);
5928 assemble_name (f
, chain
->symname
);
5933 int arm_text_section_count
= 1;
5938 static char buf
[100];
5939 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
5940 arm_text_section_count
++);
5942 strcat (buf
, ", PIC, REENTRANT");
5946 static int arm_data_section_count
= 1;
5951 static char buf
[100];
5952 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
5956 /* The AOF assembler is religiously strict about declarations of
5957 imported and exported symbols, so that it is impossible to declare
5958 a function as imported near the beginning of the file, and then to
5959 export it later on. It is, however, possible to delay the decision
5960 until all the functions in the file have been compiled. To get
5961 around this, we maintain a list of the imports and exports, and
5962 delete from it any that are subsequently defined. At the end of
5963 compilation we spit the remainder of the list out before the END
5968 struct import
*next
;
5972 static struct import
*imports_list
= NULL
;
5975 aof_add_import (name
)
5980 for (new = imports_list
; new; new = new->next
)
5981 if (new->name
== name
)
5984 new = (struct import
*) xmalloc (sizeof (struct import
));
5985 new->next
= imports_list
;
5991 aof_delete_import (name
)
5994 struct import
**old
;
5996 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
5998 if ((*old
)->name
== name
)
6000 *old
= (*old
)->next
;
6006 int arm_main_function
= 0;
6009 aof_dump_imports (f
)
6012 /* The AOF assembler needs this to cause the startup code to be extracted
6013 from the library. Brining in __main causes the whole thing to work
6015 if (arm_main_function
)
6018 fputs ("\tIMPORT __main\n", f
);
6019 fputs ("\tDCD __main\n", f
);
6022 /* Now dump the remaining imports. */
6023 while (imports_list
)
6025 fprintf (f
, "\tIMPORT\t");
6026 assemble_name (f
, imports_list
->name
);
6028 imports_list
= imports_list
->next
;
6031 #endif /* AOF_ASSEMBLER */