1 /* Output routines for GCC for ARM/RISCiX.
2 Copyright (C) 1991, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-flags.h"
35 #include "insn-attr.h"
41 /* The maximum number of insns skipped which will be conditionalised if
43 #define MAX_INSNS_SKIPPED 5
45 /* Some function declarations. */
46 extern FILE *asm_out_file
;
48 static HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
49 static char *output_multi_immediate
PROTO ((rtx
*, char *, char *, int,
51 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
52 HOST_WIDE_INT
, rtx
, rtx
, int, int));
53 static int arm_naked_function_p
PROTO ((tree
));
54 static void init_fpa_table
PROTO ((void));
55 static enum machine_mode select_dominance_cc_mode
PROTO ((enum rtx_code
, rtx
,
57 static HOST_WIDE_INT add_constant
PROTO ((rtx
, enum machine_mode
));
58 static void dump_table
PROTO ((rtx
));
59 static int fixit
PROTO ((rtx
, enum machine_mode
, int));
60 static rtx find_barrier
PROTO ((rtx
, int));
61 static int broken_move
PROTO ((rtx
));
62 static char *fp_const_from_val
PROTO ((REAL_VALUE_TYPE
*));
63 static int eliminate_lr2ip
PROTO ((rtx
*));
64 static char *shift_op
PROTO ((rtx
, HOST_WIDE_INT
*));
65 static int pattern_really_clobbers_lr
PROTO ((rtx
));
66 static int function_really_clobbers_lr
PROTO ((rtx
));
67 static void emit_multi_reg_push
PROTO ((int));
68 static void emit_sfm
PROTO ((int, int));
69 static enum arm_cond_code get_arm_condition_code
PROTO ((rtx
));
71 /* Define the information needed to generate branch insns. This is
72 stored from the compare operation. */
74 rtx arm_compare_op0
, arm_compare_op1
;
77 /* What type of cpu are we compiling for? */
78 enum processor_type arm_cpu
;
80 /* What type of floating point are we tuning for? */
81 enum floating_point_type arm_fpu
;
83 /* What type of floating point instructions are available? */
84 enum floating_point_type arm_fpu_arch
;
86 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
87 enum prog_mode_type arm_prgmode
;
89 /* Set by the -mfp=... option */
90 char *target_fp_name
= NULL
;
92 /* Nonzero if this is an "M" variant of the processor. */
93 int arm_fast_multiply
= 0;
95 /* Nonzero if this chip supports the ARM Architecture 4 extensions */
98 /* Set to the features we should tune the code for (multiply speed etc). */
101 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
102 must report the mode of the memory reference from PRINT_OPERAND to
103 PRINT_OPERAND_ADDRESS. */
104 enum machine_mode output_memory_reference_mode
;
106 /* Nonzero if the prologue must setup `fp'. */
107 int current_function_anonymous_args
;
109 /* The register number to be used for the PIC offset register. */
110 int arm_pic_register
= 9;
112 /* Location counter of .text segment. */
113 int arm_text_location
= 0;
115 /* Set to one if we think that lr is only saved because of subroutine calls,
116 but all of these can be `put after' return insns */
117 int lr_save_eliminated
;
119 /* Set to 1 when a return insn is output, this means that the epilogue
122 static int return_used_this_function
;
124 static int arm_constant_limit
= 3;
126 /* For an explanation of these variables, see final_prescan_insn below. */
128 enum arm_cond_code arm_current_cc
;
130 int arm_target_label
;
132 /* The condition codes of the ARM, and the inverse function. */
133 char *arm_condition_codes
[] =
135 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
136 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
139 static enum arm_cond_code
get_arm_condition_code ();
142 /* Initialization code */
144 struct arm_cpu_select arm_select
[4] =
146 /* switch name, tune arch */
147 { (char *)0, "--with-cpu=", 1, 1 },
148 { (char *)0, "-mcpu=", 1, 1 },
149 { (char *)0, "-march=", 0, 1 },
150 { (char *)0, "-mtune=", 1, 0 },
153 #define FL_CO_PROC 0x01 /* Has external co-processor bus */
154 #define FL_FAST_MULT 0x02 /* Fast multiply */
155 #define FL_MODE26 0x04 /* 26-bit mode support */
156 #define FL_MODE32 0x08 /* 32-bit mode support */
157 #define FL_ARCH4 0x10 /* Architecture rel 4 */
158 #define FL_THUMB 0x20 /* Thumb aware */
163 enum processor_type type
;
167 /* Not all of these give usefully different compilation alternatives,
168 but there is no simple way of generalizing them. */
169 static struct processors all_procs
[] =
171 {"arm2", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
172 {"arm250", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
173 {"arm3", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
174 {"arm6", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
175 {"arm600", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
176 {"arm610", PROCESSOR_ARM6
, FL_MODE32
| FL_MODE26
},
177 {"arm7", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
178 /* arm7m doesn't exist on its own, only in conjunction with D, (and I), but
179 those don't alter the code, so it is sometimes known as the arm7m */
180 {"arm7m", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
182 {"arm7dm", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
184 {"arm7dmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
186 {"arm700", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
187 {"arm710", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
188 {"arm7100", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
189 {"arm7500", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
190 /* Doesn't really have an external co-proc, but does have embedded fpu */
191 {"arm7500fe", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
192 {"arm7tdmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
193 | FL_ARCH4
| FL_THUMB
)},
194 {"arm8", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
196 {"arm810", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
198 {"strongarm", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
200 {"strongarm110", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
202 {"armv2", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE26
},
203 {"armv2a", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE26
},
204 {"armv3", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
205 {"armv3m", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
207 {"armv4", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
208 | FL_MODE26
| FL_ARCH4
)},
209 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
210 implementations that support it, so we will leave it out for now. */
211 {"armv4t", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
216 /* Fix up any incompatible options that the user has specified.
217 This has now turned into a maze. */
219 arm_override_options ()
221 int arm_thumb_aware
= 0;
224 struct arm_cpu_select
*ptr
;
225 static struct cpu_default
{
229 { TARGET_CPU_arm2
, "arm2" },
230 { TARGET_CPU_arm6
, "arm6" },
231 { TARGET_CPU_arm610
, "arm610" },
232 { TARGET_CPU_arm7dm
, "arm7dm" },
233 { TARGET_CPU_arm7500fe
, "arm7500fe" },
234 { TARGET_CPU_arm7tdmi
, "arm7tdmi" },
235 { TARGET_CPU_arm8
, "arm8" },
236 { TARGET_CPU_arm810
, "arm810" },
237 { TARGET_CPU_strongarm
, "strongarm" },
240 struct cpu_default
*def
;
242 /* Set the default. */
243 for (def
= &cpu_defaults
[0]; def
->name
; ++def
)
244 if (def
->cpu
== TARGET_CPU_DEFAULT
)
249 arm_select
[0].string
= def
->name
;
251 for (i
= 0; i
< sizeof (arm_select
) / sizeof (arm_select
[0]); i
++)
253 ptr
= &arm_select
[i
];
254 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
256 struct processors
*sel
;
258 for (sel
= all_procs
; sel
->name
!= NULL
; sel
++)
259 if (! strcmp (ptr
->string
, sel
->name
))
261 /* -march= is the only flag that can take an architecture
262 type, so if we match when the tune bit is set, the
263 option was invalid. */
266 if (sel
->type
== PROCESSOR_NONE
)
267 continue; /* Its an architecture, not a cpu */
270 tune_flags
= sel
->flags
;
279 if (sel
->name
== NULL
)
280 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
284 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
285 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
287 if (TARGET_POKE_FUNCTION_NAME
)
288 target_flags
|= ARM_FLAG_APCS_FRAME
;
291 warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu=<proc>");
294 warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu=<proc>");
296 if (TARGET_APCS_REENT
&& flag_pic
)
297 fatal ("-fpic and -mapcs-reent are incompatible");
299 if (TARGET_APCS_REENT
)
300 warning ("APCS reentrant code not supported.");
302 /* If stack checking is disabled, we can use r10 as the PIC register,
303 which keeps r9 available. */
304 if (flag_pic
&& ! TARGET_APCS_STACK
)
305 arm_pic_register
= 10;
307 /* Well, I'm about to have a go, but pic is NOT going to be compatible
308 with APCS reentrancy, since that requires too much support in the
309 assembler and linker, and the ARMASM assembler seems to lack some
310 required directives. */
312 warning ("Position independent code not supported. Ignored");
314 if (TARGET_APCS_FLOAT
)
315 warning ("Passing floating point arguments in fp regs not yet supported");
317 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
319 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
320 target_flags
|= ARM_FLAG_APCS_FRAME
;
323 /* Default is to tune for an FPA */
326 /* Default value for floating point code... if no co-processor
327 bus, then schedule for emulated floating point. Otherwise,
328 assume the user has an FPA.
329 Note: this does not prevent use of floating point instructions,
330 -msoft-float does that. */
331 if (tune_flags
& FL_CO_PROC
== 0)
334 arm_fast_multiply
= (flags
& FL_FAST_MULT
) != 0;
335 arm_arch4
= (flags
& FL_ARCH4
) != 0;
336 arm_thumb_aware
= (flags
& FL_THUMB
) != 0;
340 if (strcmp (target_fp_name
, "2") == 0)
341 arm_fpu_arch
= FP_SOFT2
;
342 else if (strcmp (target_fp_name
, "3") == 0)
343 arm_fpu_arch
= FP_HARD
;
345 fatal ("Invalid floating point emulation option: -mfpe=%s",
349 arm_fpu_arch
= FP_DEFAULT
;
351 if (TARGET_THUMB_INTERWORK
&& ! arm_thumb_aware
)
353 warning ("This processor variant does not support Thumb interworking");
354 target_flags
&= ~ARM_FLAG_THUMB
;
357 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
360 /* For arm2/3 there is no need to do any scheduling if there is only
361 a floating point emulator, or we are doing software floating-point. */
362 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && arm_cpu
== PROCESSOR_ARM2
)
363 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
365 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
369 /* Return 1 if it is possible to return using a single instruction */
376 if (!reload_completed
||current_function_pretend_args_size
377 || current_function_anonymous_args
378 || ((get_frame_size () + current_function_outgoing_args_size
!= 0)
379 && !(TARGET_APCS
|| frame_pointer_needed
)))
382 /* Can't be done if interworking with Thumb, and any registers have been
384 if (TARGET_THUMB_INTERWORK
)
385 for (regno
= 0; regno
< 16; regno
++)
386 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
389 /* Can't be done if any of the FPU regs are pushed, since this also
391 for (regno
= 16; regno
< 24; regno
++)
392 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
395 /* If a function is naked, don't use the "return" insn. */
396 if (arm_naked_function_p (current_function_decl
))
402 /* Return TRUE if int I is a valid immediate ARM constant. */
408 unsigned HOST_WIDE_INT mask
= ~0xFF;
410 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
411 be all zero, or all one. */
412 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff) != 0
413 && ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff)
414 != (((HOST_WIDE_INT
) -1) & ~(unsigned HOST_WIDE_INT
) 0xffffffff)))
417 /* Fast return for 0 and powers of 2 */
418 if ((i
& (i
- 1)) == 0)
423 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
426 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
427 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
428 } while (mask
!= ~0xFF);
433 /* Return true if I is a valid constant for the operation CODE. */
435 const_ok_for_op (i
, code
, mode
)
438 enum machine_mode mode
;
440 if (const_ok_for_arm (i
))
446 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
448 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
454 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
461 /* Emit a sequence of insns to handle a large constant.
462 CODE is the code of the operation required, it can be any of SET, PLUS,
463 IOR, AND, XOR, MINUS;
464 MODE is the mode in which the operation is being performed;
465 VAL is the integer to operate on;
466 SOURCE is the other operand (a register, or a null-pointer for SET);
467 SUBTARGETS means it is safe to create scratch registers if that will
468 either produce a simpler sequence, or we will want to cse the values.
469 Return value is the number of insns emitted. */
472 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
474 enum machine_mode mode
;
480 if (subtargets
|| code
== SET
481 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
482 && REGNO (target
) != REGNO (source
)))
486 if (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
487 > arm_constant_limit
+ (code
!= SET
))
491 /* Currently SET is the only monadic value for CODE, all
492 the rest are diadic. */
493 emit_insn (gen_rtx (SET
, VOIDmode
, target
, GEN_INT (val
)));
498 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
500 emit_insn (gen_rtx (SET
, VOIDmode
, temp
, GEN_INT (val
)));
501 /* For MINUS, the value is subtracted from, since we never
502 have subtraction of a constant. */
504 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
505 gen_rtx (code
, mode
, temp
, source
)));
507 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
508 gen_rtx (code
, mode
, source
, temp
)));
514 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
517 /* As above, but extra parameter GENERATE which, if clear, suppresses
520 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
522 enum machine_mode mode
;
532 int can_negate_initial
= 0;
535 int num_bits_set
= 0;
536 int set_sign_bit_copies
= 0;
537 int clear_sign_bit_copies
= 0;
538 int clear_zero_bit_copies
= 0;
539 int set_zero_bit_copies
= 0;
542 unsigned HOST_WIDE_INT temp1
, temp2
;
543 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
545 /* find out which operations are safe for a given CODE. Also do a quick
546 check for degenerate cases; these can occur when DImode operations
558 can_negate_initial
= 1;
562 if (remainder
== 0xffffffff)
565 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
566 GEN_INT (ARM_SIGN_EXTEND (val
))));
571 if (reload_completed
&& rtx_equal_p (target
, source
))
574 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
583 emit_insn (gen_rtx (SET
, VOIDmode
, target
, const0_rtx
));
586 if (remainder
== 0xffffffff)
588 if (reload_completed
&& rtx_equal_p (target
, source
))
591 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
600 if (reload_completed
&& rtx_equal_p (target
, source
))
603 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
606 if (remainder
== 0xffffffff)
609 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
610 gen_rtx (NOT
, mode
, source
)));
614 /* We don't know how to handle this yet below. */
618 /* We treat MINUS as (val - source), since (source - val) is always
619 passed as (source + (-val)). */
623 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
624 gen_rtx (NEG
, mode
, source
)));
627 if (const_ok_for_arm (val
))
630 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
631 gen_rtx (MINUS
, mode
, GEN_INT (val
), source
)));
642 /* If we can do it in one insn get out quickly */
643 if (const_ok_for_arm (val
)
644 || (can_negate_initial
&& const_ok_for_arm (-val
))
645 || (can_invert
&& const_ok_for_arm (~val
)))
648 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
649 (source
? gen_rtx (code
, mode
, source
,
656 /* Calculate a few attributes that may be useful for specific
659 for (i
= 31; i
>= 0; i
--)
661 if ((remainder
& (1 << i
)) == 0)
662 clear_sign_bit_copies
++;
667 for (i
= 31; i
>= 0; i
--)
669 if ((remainder
& (1 << i
)) != 0)
670 set_sign_bit_copies
++;
675 for (i
= 0; i
<= 31; i
++)
677 if ((remainder
& (1 << i
)) == 0)
678 clear_zero_bit_copies
++;
683 for (i
= 0; i
<= 31; i
++)
685 if ((remainder
& (1 << i
)) != 0)
686 set_zero_bit_copies
++;
694 /* See if we can do this by sign_extending a constant that is known
695 to be negative. This is a good, way of doing it, since the shift
696 may well merge into a subsequent insn. */
697 if (set_sign_bit_copies
> 1)
700 (temp1
= ARM_SIGN_EXTEND (remainder
701 << (set_sign_bit_copies
- 1))))
705 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
706 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
708 emit_insn (gen_ashrsi3 (target
, new_src
,
709 GEN_INT (set_sign_bit_copies
- 1)));
713 /* For an inverted constant, we will need to set the low bits,
714 these will be shifted out of harm's way. */
715 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
716 if (const_ok_for_arm (~temp1
))
720 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
721 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
723 emit_insn (gen_ashrsi3 (target
, new_src
,
724 GEN_INT (set_sign_bit_copies
- 1)));
730 /* See if we can generate this by setting the bottom (or the top)
731 16 bits, and then shifting these into the other half of the
732 word. We only look for the simplest cases, to do more would cost
733 too much. Be careful, however, not to generate this when the
734 alternative would take fewer insns. */
735 if (val
& 0xffff0000)
737 temp1
= remainder
& 0xffff0000;
738 temp2
= remainder
& 0x0000ffff;
740 /* Overlaps outside this range are best done using other methods. */
741 for (i
= 9; i
< 24; i
++)
743 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
744 && ! const_ok_for_arm (temp2
))
746 insns
= arm_gen_constant (code
, mode
, temp2
,
747 new_src
= (subtargets
750 source
, subtargets
, generate
);
753 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
755 gen_rtx (ASHIFT
, mode
, source
,
762 /* Don't duplicate cases already considered. */
763 for (i
= 17; i
< 24; i
++)
765 if (((temp1
| (temp1
>> i
)) == remainder
)
766 && ! const_ok_for_arm (temp1
))
768 insns
= arm_gen_constant (code
, mode
, temp1
,
769 new_src
= (subtargets
772 source
, subtargets
, generate
);
775 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
777 gen_rtx (LSHIFTRT
, mode
,
778 source
, GEN_INT (i
)),
788 /* If we have IOR or XOR, and the constant can be loaded in a
789 single instruction, and we can find a temporary to put it in,
790 then this can be done in two instructions instead of 3-4. */
792 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
794 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
798 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
800 emit_insn (gen_rtx (SET
, VOIDmode
, sub
, GEN_INT (val
)));
801 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
802 gen_rtx (code
, mode
, source
, sub
)));
811 if (set_sign_bit_copies
> 8
812 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
816 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
817 rtx shift
= GEN_INT (set_sign_bit_copies
);
819 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
821 gen_rtx (ASHIFT
, mode
, source
,
823 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
825 gen_rtx (LSHIFTRT
, mode
, sub
,
831 if (set_zero_bit_copies
> 8
832 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
836 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
837 rtx shift
= GEN_INT (set_zero_bit_copies
);
839 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
841 gen_rtx (LSHIFTRT
, mode
, source
,
843 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
845 gen_rtx (ASHIFT
, mode
, sub
,
851 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
855 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
856 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
857 gen_rtx (NOT
, mode
, source
)));
860 sub
= gen_reg_rtx (mode
);
861 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
862 gen_rtx (AND
, mode
, source
,
864 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
865 gen_rtx (NOT
, mode
, sub
)));
872 /* See if two shifts will do 2 or more insn's worth of work. */
873 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
875 HOST_WIDE_INT shift_mask
= ((0xffffffff
876 << (32 - clear_sign_bit_copies
))
881 if ((remainder
| shift_mask
) != 0xffffffff)
885 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
886 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
887 new_source
, source
, subtargets
, 1);
891 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
892 new_source
, source
, subtargets
, 0);
897 shift
= GEN_INT (clear_sign_bit_copies
);
898 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
899 emit_insn (gen_ashlsi3 (new_source
, source
, shift
));
900 emit_insn (gen_lshrsi3 (target
, new_source
, shift
));
906 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
908 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
912 if ((remainder
| shift_mask
) != 0xffffffff)
916 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
917 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
918 new_source
, source
, subtargets
, 1);
922 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
923 new_source
, source
, subtargets
, 0);
928 shift
= GEN_INT (clear_zero_bit_copies
);
929 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
930 emit_insn (gen_lshrsi3 (new_source
, source
, shift
));
931 emit_insn (gen_ashlsi3 (target
, new_source
, shift
));
943 for (i
= 0; i
< 32; i
++)
944 if (remainder
& (1 << i
))
947 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
948 remainder
= (~remainder
) & 0xffffffff;
949 else if (code
== PLUS
&& num_bits_set
> 16)
950 remainder
= (-remainder
) & 0xffffffff;
957 /* Now try and find a way of doing the job in either two or three
959 We start by looking for the largest block of zeros that are aligned on
960 a 2-bit boundary, we then fill up the temps, wrapping around to the
961 top of the word when we drop off the bottom.
962 In the worst case this code should produce no more than four insns. */
965 int best_consecutive_zeros
= 0;
967 for (i
= 0; i
< 32; i
+= 2)
969 int consecutive_zeros
= 0;
971 if (! (remainder
& (3 << i
)))
973 while ((i
< 32) && ! (remainder
& (3 << i
)))
975 consecutive_zeros
+= 2;
978 if (consecutive_zeros
> best_consecutive_zeros
)
980 best_consecutive_zeros
= consecutive_zeros
;
981 best_start
= i
- consecutive_zeros
;
987 /* Now start emitting the insns, starting with the one with the highest
988 bit set: we do this so that the smallest number will be emitted last;
989 this is more likely to be combinable with addressing insns. */
997 if (remainder
& (3 << (i
- 2)))
1002 temp1
= remainder
& ((0x0ff << end
)
1003 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
1004 remainder
&= ~temp1
;
1009 emit_insn (gen_rtx (SET
, VOIDmode
,
1010 new_src
= (subtargets
1011 ? gen_reg_rtx (mode
)
1013 GEN_INT (can_invert
? ~temp1
: temp1
)));
1017 else if (code
== MINUS
)
1020 emit_insn (gen_rtx (SET
, VOIDmode
,
1021 new_src
= (subtargets
1022 ? gen_reg_rtx (mode
)
1024 gen_rtx (code
, mode
, GEN_INT (temp1
),
1031 emit_insn (gen_rtx (SET
, VOIDmode
,
1032 new_src
= (remainder
1034 ? gen_reg_rtx (mode
)
1037 gen_rtx (code
, mode
, source
,
1038 GEN_INT (can_invert
? ~temp1
1049 } while (remainder
);
1054 /* Canonicalize a comparison so that we are more likely to recognize it.
1055 This can be done for a few constant compares, where we can make the
1056 immediate value easier to load. */
1058 arm_canonicalize_comparison (code
, op1
)
1062 HOST_WIDE_INT i
= INTVAL (*op1
);
1072 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1) - 1)
1073 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1075 *op1
= GEN_INT (i
+1);
1076 return code
== GT
? GE
: LT
;
1082 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1))
1083 && (const_ok_for_arm (i
-1) || const_ok_for_arm (- (i
-1))))
1085 *op1
= GEN_INT (i
-1);
1086 return code
== GE
? GT
: LE
;
1093 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1095 *op1
= GEN_INT (i
+ 1);
1096 return code
== GTU
? GEU
: LTU
;
1103 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (- (i
- 1))))
1105 *op1
= GEN_INT (i
- 1);
1106 return code
== GEU
? GTU
: LEU
;
1118 /* Handle aggregates that are not laid out in a BLKmode element.
1119 This is a sub-element of RETURN_IN_MEMORY. */
1121 arm_return_in_memory (type
)
1124 if (TREE_CODE (type
) == RECORD_TYPE
)
1128 /* For a struct, we can return in a register if every element was a
1130 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1131 if (TREE_CODE (field
) != FIELD_DECL
1132 || ! DECL_BIT_FIELD_TYPE (field
))
1137 else if (TREE_CODE (type
) == UNION_TYPE
)
1141 /* Unions can be returned in registers if every element is
1142 integral, or can be returned in an integer register. */
1143 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1145 if (TREE_CODE (field
) != FIELD_DECL
1146 || (AGGREGATE_TYPE_P (TREE_TYPE (field
))
1147 && RETURN_IN_MEMORY (TREE_TYPE (field
)))
1148 || FLOAT_TYPE_P (TREE_TYPE (field
)))
1153 /* XXX Not sure what should be done for other aggregates, so put them in
1159 legitimate_pic_operand_p (x
)
1162 if (CONSTANT_P (x
) && flag_pic
1163 && (GET_CODE (x
) == SYMBOL_REF
1164 || (GET_CODE (x
) == CONST
1165 && GET_CODE (XEXP (x
, 0)) == PLUS
1166 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)))
1173 legitimize_pic_address (orig
, mode
, reg
)
1175 enum machine_mode mode
;
1178 if (GET_CODE (orig
) == SYMBOL_REF
)
1180 rtx pic_ref
, address
;
1186 if (reload_in_progress
|| reload_completed
)
1189 reg
= gen_reg_rtx (Pmode
);
1194 #ifdef AOF_ASSEMBLER
1195 /* The AOF assembler can generate relocations for these directly, and
1196 understands that the PIC register has to be added into the offset.
1198 insn
= emit_insn (gen_pic_load_addr_based (reg
, orig
));
1201 address
= gen_reg_rtx (Pmode
);
1205 emit_insn (gen_pic_load_addr (address
, orig
));
1207 pic_ref
= gen_rtx (MEM
, Pmode
,
1208 gen_rtx (PLUS
, Pmode
, pic_offset_table_rtx
, address
));
1209 RTX_UNCHANGING_P (pic_ref
) = 1;
1210 insn
= emit_move_insn (reg
, pic_ref
);
1212 current_function_uses_pic_offset_table
= 1;
1213 /* Put a REG_EQUAL note on this insn, so that it can be optimized
1215 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_EQUAL
, orig
,
1219 else if (GET_CODE (orig
) == CONST
)
1223 if (GET_CODE (XEXP (orig
, 0)) == PLUS
1224 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
1229 if (reload_in_progress
|| reload_completed
)
1232 reg
= gen_reg_rtx (Pmode
);
1235 if (GET_CODE (XEXP (orig
, 0)) == PLUS
)
1237 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
1238 offset
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
1239 base
== reg
? 0 : reg
);
1244 if (GET_CODE (offset
) == CONST_INT
)
1246 /* The base register doesn't really matter, we only want to
1247 test the index for the appropriate mode. */
1248 GO_IF_LEGITIMATE_INDEX (mode
, 0, offset
, win
);
1250 if (! reload_in_progress
&& ! reload_completed
)
1251 offset
= force_reg (Pmode
, offset
);
1256 if (GET_CODE (offset
) == CONST_INT
)
1257 return plus_constant_for_output (base
, INTVAL (offset
));
1260 if (GET_MODE_SIZE (mode
) > 4
1261 && (GET_MODE_CLASS (mode
) == MODE_INT
1262 || TARGET_SOFT_FLOAT
))
1264 emit_insn (gen_addsi3 (reg
, base
, offset
));
1268 return gen_rtx (PLUS
, Pmode
, base
, offset
);
1270 else if (GET_CODE (orig
) == LABEL_REF
)
1271 current_function_uses_pic_offset_table
= 1;
1290 #ifndef AOF_ASSEMBLER
1291 rtx l1
, pic_tmp
, pic_tmp2
, seq
;
1292 rtx global_offset_table
;
1294 if (current_function_uses_pic_offset_table
== 0)
1301 l1
= gen_label_rtx ();
1303 global_offset_table
= gen_rtx (SYMBOL_REF
, Pmode
, "_GLOBAL_OFFSET_TABLE_");
1304 /* The PC contains 'dot'+8, but the label L1 is on the next
1305 instruction, so the offset is only 'dot'+4. */
1306 pic_tmp
= gen_rtx (CONST
, VOIDmode
,
1307 gen_rtx (PLUS
, Pmode
,
1308 gen_rtx (LABEL_REF
, VOIDmode
, l1
),
1310 pic_tmp2
= gen_rtx (CONST
, VOIDmode
,
1311 gen_rtx (PLUS
, Pmode
,
1312 global_offset_table
,
1315 pic_rtx
= gen_rtx (CONST
, Pmode
,
1316 gen_rtx (MINUS
, Pmode
, pic_tmp2
, pic_tmp
));
1318 emit_insn (gen_pic_load_addr (pic_offset_table_rtx
, pic_rtx
));
1319 emit_jump_insn (gen_pic_add_dot_plus_eight(l1
, pic_offset_table_rtx
));
1322 seq
= gen_sequence ();
1324 emit_insn_after (seq
, get_insns ());
1326 /* Need to emit this whether or not we obey regdecls,
1327 since setjmp/longjmp can cause life info to screw up. */
1328 emit_insn (gen_rtx (USE
, VOIDmode
, pic_offset_table_rtx
));
1329 #endif /* AOF_ASSEMBLER */
1332 #define REG_OR_SUBREG_REG(X) \
1333 (GET_CODE (X) == REG \
1334 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1336 #define REG_OR_SUBREG_RTX(X) \
1337 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1339 #define ARM_FRAME_RTX(X) \
1340 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1341 || (X) == arg_pointer_rtx)
1344 arm_rtx_costs (x
, code
, outer_code
)
1346 enum rtx_code code
, outer_code
;
1348 enum machine_mode mode
= GET_MODE (x
);
1349 enum rtx_code subcode
;
1355 /* Memory costs quite a lot for the first word, but subsequent words
1356 load at the equivalent of a single insn each. */
1357 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1358 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1365 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1372 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1374 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1375 + ((GET_CODE (XEXP (x
, 0)) == REG
1376 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1377 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1379 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1380 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1381 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1383 + ((GET_CODE (XEXP (x
, 1)) == REG
1384 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1385 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1386 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1391 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1392 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1393 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1394 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1397 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1398 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1399 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1400 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1402 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1403 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1404 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1407 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1408 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1409 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1410 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1411 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1412 || subcode
== ROTATE
|| subcode
== ROTATERT
1414 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1415 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1416 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1417 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1418 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1419 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1420 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1425 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1426 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1427 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1428 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1429 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1433 case AND
: case XOR
: case IOR
:
1436 /* Normally the frame registers will be spilt into reg+const during
1437 reload, so it is a bad idea to combine them with other instructions,
1438 since then they might not be moved outside of loops. As a compromise
1439 we allow integration with ops that have a constant as their second
1441 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1442 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1443 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1444 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1445 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1449 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1450 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1451 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1452 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1455 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1456 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1457 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1458 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1459 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1462 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1463 return (1 + extra_cost
1464 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1465 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1466 || subcode
== ROTATE
|| subcode
== ROTATERT
1468 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1469 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1470 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0))
1471 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1472 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1473 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
1479 /* There is no point basing this on the tuning, since it is always the
1480 fast variant if it exists at all */
1481 if (arm_fast_multiply
&& mode
== DImode
1482 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1483 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1484 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1487 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1491 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1493 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1494 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1495 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1497 /* Tune as appropriate */
1498 int booth_unit_size
= ((tune_flags
& FL_FAST_MULT
) ? 8 : 2);
1500 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1502 i
>>= booth_unit_size
;
1509 return (((tune_flags
& FL_FAST_MULT
) ? 8 : 30)
1510 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1511 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1514 if (arm_fast_multiply
&& mode
== SImode
1515 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
1516 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1517 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0))
1518 == GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1519 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
1520 || GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
))
1525 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1526 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1530 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1532 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1535 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1543 return 4 + (mode
== DImode
? 4 : 0);
1546 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1547 return (4 + (mode
== DImode
? 4 : 0)
1548 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1551 switch (GET_MODE (XEXP (x
, 0)))
1554 return (1 + (mode
== DImode
? 4 : 0)
1555 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1558 return (4 + (mode
== DImode
? 4 : 0)
1559 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1562 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1572 arm_adjust_cost (insn
, link
, dep
, cost
)
1580 if ((i_pat
= single_set (insn
)) != NULL
1581 && GET_CODE (SET_SRC (i_pat
)) == MEM
1582 && (d_pat
= single_set (dep
)) != NULL
1583 && GET_CODE (SET_DEST (d_pat
)) == MEM
)
1585 /* This is a load after a store, there is no conflict if the load reads
1586 from a cached area. Assume that loads from the stack, and from the
1587 constant pool are cached, and that others will miss. This is a
1590 /* debug_rtx (insn);
1593 fprintf (stderr, "costs %d\n", cost); */
1595 if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat
), 0))
1596 || reg_mentioned_p (stack_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1597 || reg_mentioned_p (frame_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1598 || reg_mentioned_p (hard_frame_pointer_rtx
,
1599 XEXP (SET_SRC (i_pat
), 0)))
1601 /* fprintf (stderr, "***** Now 1\n"); */
1609 /* This code has been fixed for cross compilation. */
1611 static int fpa_consts_inited
= 0;
1613 char *strings_fpa
[8] = {
1615 "4", "5", "0.5", "10"
1618 static REAL_VALUE_TYPE values_fpa
[8];
1626 for (i
= 0; i
< 8; i
++)
1628 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1632 fpa_consts_inited
= 1;
1635 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1638 const_double_rtx_ok_for_fpu (x
)
1644 if (!fpa_consts_inited
)
1647 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1648 if (REAL_VALUE_MINUS_ZERO (r
))
1651 for (i
= 0; i
< 8; i
++)
1652 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1658 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1661 neg_const_double_rtx_ok_for_fpu (x
)
1667 if (!fpa_consts_inited
)
1670 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1671 r
= REAL_VALUE_NEGATE (r
);
1672 if (REAL_VALUE_MINUS_ZERO (r
))
1675 for (i
= 0; i
< 8; i
++)
1676 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1682 /* Predicates for `match_operand' and `match_operator'. */
1684 /* s_register_operand is the same as register_operand, but it doesn't accept
1687 This function exists because at the time it was put in it led to better
1688 code. SUBREG(MEM) always needs a reload in the places where
1689 s_register_operand is used, and this seemed to lead to excessive
1693 s_register_operand (op
, mode
)
1695 enum machine_mode mode
;
1697 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1700 if (GET_CODE (op
) == SUBREG
)
1701 op
= SUBREG_REG (op
);
1703 /* We don't consider registers whose class is NO_REGS
1704 to be a register operand. */
1705 return (GET_CODE (op
) == REG
1706 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1707 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1710 /* Only accept reg, subreg(reg), const_int. */
1713 reg_or_int_operand (op
, mode
)
1715 enum machine_mode mode
;
1717 if (GET_CODE (op
) == CONST_INT
)
1720 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1723 if (GET_CODE (op
) == SUBREG
)
1724 op
= SUBREG_REG (op
);
1726 /* We don't consider registers whose class is NO_REGS
1727 to be a register operand. */
1728 return (GET_CODE (op
) == REG
1729 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1730 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1733 /* Return 1 if OP is an item in memory, given that we are in reload. */
1736 reload_memory_operand (op
, mode
)
1738 enum machine_mode mode
;
1740 int regno
= true_regnum (op
);
1742 return (! CONSTANT_P (op
)
1744 || (GET_CODE (op
) == REG
1745 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
1748 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
1749 memory access (architecture V4) */
1751 bad_signed_byte_operand (op
, mode
)
1753 enum machine_mode mode
;
1755 if (! memory_operand (op
, mode
) || GET_CODE (op
) != MEM
)
1760 /* A sum of anything more complex than reg + reg or reg + const is bad */
1761 if ((GET_CODE (op
) == PLUS
|| GET_CODE (op
) == MINUS
)
1762 && ! s_register_operand (XEXP (op
, 0), VOIDmode
))
1765 /* Big constants are also bad */
1766 if (GET_CODE (op
) == PLUS
&& GET_CODE (XEXP (op
, 1)) == CONST_INT
1767 && (INTVAL (XEXP (op
, 1)) > 0xff
1768 || -INTVAL (XEXP (op
, 1)) > 0xff))
1771 /* Everything else is good, or can will automatically be made so. */
1775 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
1778 arm_rhs_operand (op
, mode
)
1780 enum machine_mode mode
;
1782 return (s_register_operand (op
, mode
)
1783 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
1786 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
1790 arm_rhsm_operand (op
, mode
)
1792 enum machine_mode mode
;
1794 return (s_register_operand (op
, mode
)
1795 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
1796 || memory_operand (op
, mode
));
1799 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
1800 constant that is valid when negated. */
1803 arm_add_operand (op
, mode
)
1805 enum machine_mode mode
;
1807 return (s_register_operand (op
, mode
)
1808 || (GET_CODE (op
) == CONST_INT
1809 && (const_ok_for_arm (INTVAL (op
))
1810 || const_ok_for_arm (-INTVAL (op
)))));
1814 arm_not_operand (op
, mode
)
1816 enum machine_mode mode
;
1818 return (s_register_operand (op
, mode
)
1819 || (GET_CODE (op
) == CONST_INT
1820 && (const_ok_for_arm (INTVAL (op
))
1821 || const_ok_for_arm (~INTVAL (op
)))));
1824 /* Return TRUE if the operand is a memory reference which contains an
1825 offsettable address. */
1827 offsettable_memory_operand (op
, mode
)
1829 enum machine_mode mode
;
1831 if (mode
== VOIDmode
)
1832 mode
= GET_MODE (op
);
1834 return (mode
== GET_MODE (op
)
1835 && GET_CODE (op
) == MEM
1836 && offsettable_address_p (reload_completed
| reload_in_progress
,
1837 mode
, XEXP (op
, 0)));
1840 /* Return TRUE if the operand is a memory reference which is, or can be
1841 made word aligned by adjusting the offset. */
1843 alignable_memory_operand (op
, mode
)
1845 enum machine_mode mode
;
1849 if (mode
== VOIDmode
)
1850 mode
= GET_MODE (op
);
1852 if (mode
!= GET_MODE (op
) || GET_CODE (op
) != MEM
)
1857 return ((GET_CODE (reg
= op
) == REG
1858 || (GET_CODE (op
) == SUBREG
1859 && GET_CODE (reg
= SUBREG_REG (op
)) == REG
)
1860 || (GET_CODE (op
) == PLUS
1861 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1862 && (GET_CODE (reg
= XEXP (op
, 0)) == REG
1863 || (GET_CODE (XEXP (op
, 0)) == SUBREG
1864 && GET_CODE (reg
= SUBREG_REG (XEXP (op
, 0))) == REG
))))
1865 && REGNO_POINTER_ALIGN (REGNO (reg
)) >= 4);
1868 /* Similar to s_register_operand, but does not allow hard integer
1871 f_register_operand (op
, mode
)
1873 enum machine_mode mode
;
1875 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1878 if (GET_CODE (op
) == SUBREG
)
1879 op
= SUBREG_REG (op
);
1881 /* We don't consider registers whose class is NO_REGS
1882 to be a register operand. */
1883 return (GET_CODE (op
) == REG
1884 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1885 || REGNO_REG_CLASS (REGNO (op
)) == FPU_REGS
));
1888 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
1891 fpu_rhs_operand (op
, mode
)
1893 enum machine_mode mode
;
1895 if (s_register_operand (op
, mode
))
1897 else if (GET_CODE (op
) == CONST_DOUBLE
)
1898 return (const_double_rtx_ok_for_fpu (op
));
1904 fpu_add_operand (op
, mode
)
1906 enum machine_mode mode
;
1908 if (s_register_operand (op
, mode
))
1910 else if (GET_CODE (op
) == CONST_DOUBLE
)
1911 return (const_double_rtx_ok_for_fpu (op
)
1912 || neg_const_double_rtx_ok_for_fpu (op
));
1917 /* Return nonzero if OP is a constant power of two. */
1920 power_of_two_operand (op
, mode
)
1922 enum machine_mode mode
;
1924 if (GET_CODE (op
) == CONST_INT
)
1926 HOST_WIDE_INT value
= INTVAL(op
);
1927 return value
!= 0 && (value
& (value
- 1)) == 0;
1932 /* Return TRUE for a valid operand of a DImode operation.
1933 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1934 Note that this disallows MEM(REG+REG), but allows
1935 MEM(PRE/POST_INC/DEC(REG)). */
1938 di_operand (op
, mode
)
1940 enum machine_mode mode
;
1942 if (s_register_operand (op
, mode
))
1945 switch (GET_CODE (op
))
1952 return memory_address_p (DImode
, XEXP (op
, 0));
1959 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
1960 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1961 Note that this disallows MEM(REG+REG), but allows
1962 MEM(PRE/POST_INC/DEC(REG)). */
1965 soft_df_operand (op
, mode
)
1967 enum machine_mode mode
;
1969 if (s_register_operand (op
, mode
))
1972 switch (GET_CODE (op
))
1978 return memory_address_p (DFmode
, XEXP (op
, 0));
1985 /* Return TRUE for valid index operands. */
1988 index_operand (op
, mode
)
1990 enum machine_mode mode
;
1992 return (s_register_operand(op
, mode
)
1993 || (immediate_operand (op
, mode
)
1994 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
1997 /* Return TRUE for valid shifts by a constant. This also accepts any
1998 power of two on the (somewhat overly relaxed) assumption that the
1999 shift operator in this case was a mult. */
2002 const_shift_operand (op
, mode
)
2004 enum machine_mode mode
;
2006 return (power_of_two_operand (op
, mode
)
2007 || (immediate_operand (op
, mode
)
2008 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
2011 /* Return TRUE for arithmetic operators which can be combined with a multiply
2015 shiftable_operator (x
, mode
)
2017 enum machine_mode mode
;
2019 if (GET_MODE (x
) != mode
)
2023 enum rtx_code code
= GET_CODE (x
);
2025 return (code
== PLUS
|| code
== MINUS
2026 || code
== IOR
|| code
== XOR
|| code
== AND
);
2030 /* Return TRUE for shift operators. */
2033 shift_operator (x
, mode
)
2035 enum machine_mode mode
;
2037 if (GET_MODE (x
) != mode
)
2041 enum rtx_code code
= GET_CODE (x
);
2044 return power_of_two_operand (XEXP (x
, 1));
2046 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
2047 || code
== ROTATERT
);
2051 int equality_operator (x
, mode
)
2053 enum machine_mode mode
;
2055 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
2058 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
2061 minmax_operator (x
, mode
)
2063 enum machine_mode mode
;
2065 enum rtx_code code
= GET_CODE (x
);
2067 if (GET_MODE (x
) != mode
)
2070 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
2073 /* return TRUE if x is EQ or NE */
2075 /* Return TRUE if this is the condition code register, if we aren't given
2076 a mode, accept any class CCmode register */
2079 cc_register (x
, mode
)
2081 enum machine_mode mode
;
2083 if (mode
== VOIDmode
)
2085 mode
= GET_MODE (x
);
2086 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2090 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2096 /* Return TRUE if this is the condition code register, if we aren't given
2097 a mode, accept any class CCmode register which indicates a dominance
2101 dominant_cc_register (x
, mode
)
2103 enum machine_mode mode
;
2105 if (mode
== VOIDmode
)
2107 mode
= GET_MODE (x
);
2108 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2112 if (mode
!= CC_DNEmode
&& mode
!= CC_DEQmode
2113 && mode
!= CC_DLEmode
&& mode
!= CC_DLTmode
2114 && mode
!= CC_DGEmode
&& mode
!= CC_DGTmode
2115 && mode
!= CC_DLEUmode
&& mode
!= CC_DLTUmode
2116 && mode
!= CC_DGEUmode
&& mode
!= CC_DGTUmode
)
2119 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2125 /* Return TRUE if X references a SYMBOL_REF. */
2127 symbol_mentioned_p (x
)
2133 if (GET_CODE (x
) == SYMBOL_REF
)
2136 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2137 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2143 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2144 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
2147 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
2154 /* Return TRUE if X references a LABEL_REF. */
2156 label_mentioned_p (x
)
2162 if (GET_CODE (x
) == LABEL_REF
)
2165 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2166 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2172 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2173 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
2176 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
2187 enum rtx_code code
= GET_CODE (x
);
2191 else if (code
== SMIN
)
2193 else if (code
== UMIN
)
2195 else if (code
== UMAX
)
2201 /* Return 1 if memory locations are adjacent */
2204 adjacent_mem_locations (a
, b
)
2207 int val0
= 0, val1
= 0;
2210 if ((GET_CODE (XEXP (a
, 0)) == REG
2211 || (GET_CODE (XEXP (a
, 0)) == PLUS
2212 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
2213 && (GET_CODE (XEXP (b
, 0)) == REG
2214 || (GET_CODE (XEXP (b
, 0)) == PLUS
2215 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
2217 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
2219 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
2220 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
2223 reg0
= REGNO (XEXP (a
, 0));
2224 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
2226 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
2227 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
2230 reg1
= REGNO (XEXP (b
, 0));
2231 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
2236 /* Return 1 if OP is a load multiple operation. It is known to be
2237 parallel and the first section will be tested. */
2240 load_multiple_operation (op
, mode
)
2242 enum machine_mode mode
;
2244 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2247 HOST_WIDE_INT i
= 1, base
= 0;
2251 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2254 /* Check to see if this might be a write-back */
2255 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2260 /* Now check it more carefully */
2261 if (GET_CODE (SET_DEST (elt
)) != REG
2262 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2263 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2264 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2265 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2266 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2267 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2268 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2269 != REGNO (SET_DEST (elt
)))
2275 /* Perform a quick check so we don't blow up below. */
2277 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2278 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
2279 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
2282 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
2283 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
2285 for (; i
< count
; i
++)
2287 rtx elt
= XVECEXP (op
, 0, i
);
2289 if (GET_CODE (elt
) != SET
2290 || GET_CODE (SET_DEST (elt
)) != REG
2291 || GET_MODE (SET_DEST (elt
)) != SImode
2292 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
2293 || GET_CODE (SET_SRC (elt
)) != MEM
2294 || GET_MODE (SET_SRC (elt
)) != SImode
2295 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
2296 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
2297 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
2298 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
2305 /* Return 1 if OP is a store multiple operation. It is known to be
2306 parallel and the first section will be tested. */
2309 store_multiple_operation (op
, mode
)
2311 enum machine_mode mode
;
2313 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2316 HOST_WIDE_INT i
= 1, base
= 0;
2320 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2323 /* Check to see if this might be a write-back */
2324 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2329 /* Now check it more carefully */
2330 if (GET_CODE (SET_DEST (elt
)) != REG
2331 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2332 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2333 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2334 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2335 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2336 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2337 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2338 != REGNO (SET_DEST (elt
)))
2344 /* Perform a quick check so we don't blow up below. */
2346 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2347 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
2348 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
2351 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
2352 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
2354 for (; i
< count
; i
++)
2356 elt
= XVECEXP (op
, 0, i
);
2358 if (GET_CODE (elt
) != SET
2359 || GET_CODE (SET_SRC (elt
)) != REG
2360 || GET_MODE (SET_SRC (elt
)) != SImode
2361 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
2362 || GET_CODE (SET_DEST (elt
)) != MEM
2363 || GET_MODE (SET_DEST (elt
)) != SImode
2364 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
2365 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
2366 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
2367 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
2375 load_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2380 HOST_WIDE_INT
*load_offset
;
2382 int unsorted_regs
[4];
2383 HOST_WIDE_INT unsorted_offsets
[4];
2388 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2389 extended if required. */
2390 if (nops
< 2 || nops
> 4)
2393 /* Loop over the operands and check that the memory references are
2394 suitable (ie immediate offsets from the same base register). At
2395 the same time, extract the target register, and the memory
2397 for (i
= 0; i
< nops
; i
++)
2402 /* Convert a subreg of a mem into the mem itself. */
2403 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2404 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2406 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2409 /* Don't reorder volatile memory references; it doesn't seem worth
2410 looking for the case where the order is ok anyway. */
2411 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2414 offset
= const0_rtx
;
2416 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2417 || (GET_CODE (reg
) == SUBREG
2418 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2419 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2420 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2422 || (GET_CODE (reg
) == SUBREG
2423 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2424 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2429 base_reg
= REGNO(reg
);
2430 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2431 ? REGNO (operands
[i
])
2432 : REGNO (SUBREG_REG (operands
[i
])));
2437 if (base_reg
!= REGNO (reg
))
2438 /* Not addressed from the same base register. */
2441 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2442 ? REGNO (operands
[i
])
2443 : REGNO (SUBREG_REG (operands
[i
])));
2444 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2448 /* If it isn't an integer register, or if it overwrites the
2449 base register but isn't the last insn in the list, then
2450 we can't do this. */
2451 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
2452 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
2455 unsorted_offsets
[i
] = INTVAL (offset
);
2458 /* Not a suitable memory address. */
2462 /* All the useful information has now been extracted from the
2463 operands into unsorted_regs and unsorted_offsets; additionally,
2464 order[0] has been set to the lowest numbered register in the
2465 list. Sort the registers into order, and check that the memory
2466 offsets are ascending and adjacent. */
2468 for (i
= 1; i
< nops
; i
++)
2472 order
[i
] = order
[i
- 1];
2473 for (j
= 0; j
< nops
; j
++)
2474 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2475 && (order
[i
] == order
[i
- 1]
2476 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2479 /* Have we found a suitable register? if not, one must be used more
2481 if (order
[i
] == order
[i
- 1])
2484 /* Is the memory address adjacent and ascending? */
2485 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2493 for (i
= 0; i
< nops
; i
++)
2494 regs
[i
] = unsorted_regs
[order
[i
]];
2496 *load_offset
= unsorted_offsets
[order
[0]];
2499 if (unsorted_offsets
[order
[0]] == 0)
2500 return 1; /* ldmia */
2502 if (unsorted_offsets
[order
[0]] == 4)
2503 return 2; /* ldmib */
2505 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2506 return 3; /* ldmda */
2508 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2509 return 4; /* ldmdb */
2511 /* Can't do it without setting up the offset, only do this if it takes
2512 no more than one insn. */
2513 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
2514 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
2518 emit_ldm_seq (operands
, nops
)
2524 HOST_WIDE_INT offset
;
2528 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2531 strcpy (buf
, "ldm%?ia\t");
2535 strcpy (buf
, "ldm%?ib\t");
2539 strcpy (buf
, "ldm%?da\t");
2543 strcpy (buf
, "ldm%?db\t");
2548 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2549 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2552 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2553 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2555 output_asm_insn (buf
, operands
);
2557 strcpy (buf
, "ldm%?ia\t");
2564 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2565 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2567 for (i
= 1; i
< nops
; i
++)
2568 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2569 reg_names
[regs
[i
]]);
2571 strcat (buf
, "}\t%@ phole ldm");
2573 output_asm_insn (buf
, operands
);
2578 store_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2583 HOST_WIDE_INT
*load_offset
;
2585 int unsorted_regs
[4];
2586 HOST_WIDE_INT unsorted_offsets
[4];
2591 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2592 extended if required. */
2593 if (nops
< 2 || nops
> 4)
2596 /* Loop over the operands and check that the memory references are
2597 suitable (ie immediate offsets from the same base register). At
2598 the same time, extract the target register, and the memory
2600 for (i
= 0; i
< nops
; i
++)
2605 /* Convert a subreg of a mem into the mem itself. */
2606 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2607 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2609 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2612 /* Don't reorder volatile memory references; it doesn't seem worth
2613 looking for the case where the order is ok anyway. */
2614 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2617 offset
= const0_rtx
;
2619 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2620 || (GET_CODE (reg
) == SUBREG
2621 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2622 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2623 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2625 || (GET_CODE (reg
) == SUBREG
2626 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2627 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2632 base_reg
= REGNO(reg
);
2633 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2634 ? REGNO (operands
[i
])
2635 : REGNO (SUBREG_REG (operands
[i
])));
2640 if (base_reg
!= REGNO (reg
))
2641 /* Not addressed from the same base register. */
2644 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2645 ? REGNO (operands
[i
])
2646 : REGNO (SUBREG_REG (operands
[i
])));
2647 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2651 /* If it isn't an integer register, then we can't do this. */
2652 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
2655 unsorted_offsets
[i
] = INTVAL (offset
);
2658 /* Not a suitable memory address. */
2662 /* All the useful information has now been extracted from the
2663 operands into unsorted_regs and unsorted_offsets; additionally,
2664 order[0] has been set to the lowest numbered register in the
2665 list. Sort the registers into order, and check that the memory
2666 offsets are ascending and adjacent. */
2668 for (i
= 1; i
< nops
; i
++)
2672 order
[i
] = order
[i
- 1];
2673 for (j
= 0; j
< nops
; j
++)
2674 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2675 && (order
[i
] == order
[i
- 1]
2676 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2679 /* Have we found a suitable register? if not, one must be used more
2681 if (order
[i
] == order
[i
- 1])
2684 /* Is the memory address adjacent and ascending? */
2685 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2693 for (i
= 0; i
< nops
; i
++)
2694 regs
[i
] = unsorted_regs
[order
[i
]];
2696 *load_offset
= unsorted_offsets
[order
[0]];
2699 if (unsorted_offsets
[order
[0]] == 0)
2700 return 1; /* stmia */
2702 if (unsorted_offsets
[order
[0]] == 4)
2703 return 2; /* stmib */
2705 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2706 return 3; /* stmda */
2708 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2709 return 4; /* stmdb */
2715 emit_stm_seq (operands
, nops
)
2721 HOST_WIDE_INT offset
;
2725 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2728 strcpy (buf
, "stm%?ia\t");
2732 strcpy (buf
, "stm%?ib\t");
2736 strcpy (buf
, "stm%?da\t");
2740 strcpy (buf
, "stm%?db\t");
2747 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2748 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2750 for (i
= 1; i
< nops
; i
++)
2751 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2752 reg_names
[regs
[i
]]);
2754 strcat (buf
, "}\t%@ phole stm");
2756 output_asm_insn (buf
, operands
);
2761 multi_register_push (op
, mode
)
2763 enum machine_mode mode
;
2765 if (GET_CODE (op
) != PARALLEL
2766 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2767 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
2768 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
2775 /* Routines for use with attributes */
2777 /* Return nonzero if ATTR is a valid attribute for DECL.
2778 ATTRIBUTES are any existing attributes and ARGS are the arguments
2781 Supported attributes:
2783 naked: don't output any prologue or epilogue code, the user is assumed
2784 to do the right thing. */
2787 arm_valid_machine_decl_attribute (decl
, attributes
, attr
, args
)
2793 if (args
!= NULL_TREE
)
2796 if (is_attribute_p ("naked", attr
))
2797 return TREE_CODE (decl
) == FUNCTION_DECL
;
2801 /* Return non-zero if FUNC is a naked function. */
2804 arm_naked_function_p (func
)
2809 if (TREE_CODE (func
) != FUNCTION_DECL
)
2812 a
= lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func
));
2813 return a
!= NULL_TREE
;
2816 /* Routines for use in generating RTL */
2819 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
, unchanging_p
,
2831 int sign
= up
? 1 : -1;
2834 result
= gen_rtx (PARALLEL
, VOIDmode
,
2835 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2838 XVECEXP (result
, 0, 0)
2839 = gen_rtx (SET
, GET_MODE (from
), from
,
2840 plus_constant (from
, count
* 4 * sign
));
2845 for (j
= 0; i
< count
; i
++, j
++)
2847 mem
= gen_rtx (MEM
, SImode
, plus_constant (from
, j
* 4 * sign
));
2848 RTX_UNCHANGING_P (mem
) = unchanging_p
;
2849 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
2851 XVECEXP (result
, 0, i
) = gen_rtx (SET
, VOIDmode
,
2852 gen_rtx (REG
, SImode
, base_regno
+ j
),
2857 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, from
);
2863 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
, unchanging_p
,
2875 int sign
= up
? 1 : -1;
2878 result
= gen_rtx (PARALLEL
, VOIDmode
,
2879 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2882 XVECEXP (result
, 0, 0)
2883 = gen_rtx (SET
, GET_MODE (to
), to
,
2884 plus_constant (to
, count
* 4 * sign
));
2889 for (j
= 0; i
< count
; i
++, j
++)
2891 mem
= gen_rtx (MEM
, SImode
, plus_constant (to
, j
* 4 * sign
));
2892 RTX_UNCHANGING_P (mem
) = unchanging_p
;
2893 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
2895 XVECEXP (result
, 0, i
) = gen_rtx (SET
, VOIDmode
, mem
,
2896 gen_rtx (REG
, SImode
, base_regno
+ j
));
2900 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, to
);
2906 arm_gen_movstrqi (operands
)
2909 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
2912 rtx st_src
, st_dst
, end_src
, end_dst
, fin_src
, fin_dst
;
2913 rtx part_bytes_reg
= NULL
;
2915 int dst_unchanging_p
, dst_in_struct_p
, src_unchanging_p
, src_in_struct_p
;
2916 extern int optimize
;
2918 if (GET_CODE (operands
[2]) != CONST_INT
2919 || GET_CODE (operands
[3]) != CONST_INT
2920 || INTVAL (operands
[2]) > 64
2921 || INTVAL (operands
[3]) & 3)
2924 st_dst
= XEXP (operands
[0], 0);
2925 st_src
= XEXP (operands
[1], 0);
2927 dst_unchanging_p
= RTX_UNCHANGING_P (operands
[0]);
2928 dst_in_struct_p
= MEM_IN_STRUCT_P (operands
[0]);
2929 src_unchanging_p
= RTX_UNCHANGING_P (operands
[1]);
2930 src_in_struct_p
= MEM_IN_STRUCT_P (operands
[1]);
2932 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
2933 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
2935 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
2936 out_words_to_go
= INTVAL (operands
[2]) / 4;
2937 last_bytes
= INTVAL (operands
[2]) & 3;
2939 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
2940 part_bytes_reg
= gen_rtx (REG
, SImode
, (in_words_to_go
- 1) & 3);
2942 for (i
= 0; in_words_to_go
>= 2; i
+=4)
2944 if (in_words_to_go
> 4)
2945 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
,
2946 src_unchanging_p
, src_in_struct_p
));
2948 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
2949 FALSE
, src_unchanging_p
,
2952 if (out_words_to_go
)
2954 if (out_words_to_go
> 4)
2955 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
,
2958 else if (out_words_to_go
!= 1)
2959 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
2967 mem
= gen_rtx (MEM
, SImode
, dst
);
2968 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
2969 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
2970 emit_move_insn (mem
, gen_rtx (REG
, SImode
, 0));
2971 if (last_bytes
!= 0)
2972 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
2976 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
2977 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
2980 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
2981 if (out_words_to_go
)
2985 mem
= gen_rtx (MEM
, SImode
, src
);
2986 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
2987 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
2988 emit_move_insn (sreg
= gen_reg_rtx (SImode
), mem
);
2989 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
2991 mem
= gen_rtx (MEM
, SImode
, dst
);
2992 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
2993 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
2994 emit_move_insn (mem
, sreg
);
2995 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
2998 if (in_words_to_go
) /* Sanity check */
3004 if (in_words_to_go
< 0)
3007 mem
= gen_rtx (MEM
, SImode
, src
);
3008 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
3009 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
3010 part_bytes_reg
= copy_to_mode_reg (SImode
, mem
);
3013 if (BYTES_BIG_ENDIAN
&& last_bytes
)
3015 rtx tmp
= gen_reg_rtx (SImode
);
3017 if (part_bytes_reg
== NULL
)
3020 /* The bytes we want are in the top end of the word */
3021 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
3022 GEN_INT (8 * (4 - last_bytes
))));
3023 part_bytes_reg
= tmp
;
3027 mem
= gen_rtx (MEM
, QImode
, plus_constant (dst
, last_bytes
- 1));
3028 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3029 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3030 emit_move_insn (mem
, gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
3033 tmp
= gen_reg_rtx (SImode
);
3034 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3035 part_bytes_reg
= tmp
;
3044 if (part_bytes_reg
== NULL
)
3047 mem
= gen_rtx (MEM
, QImode
, dst
);
3048 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3049 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3050 emit_move_insn (mem
, gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
3053 rtx tmp
= gen_reg_rtx (SImode
);
3055 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
3056 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3057 part_bytes_reg
= tmp
;
3065 /* Generate a memory reference for a half word, such that it will be loaded
3066 into the top 16 bits of the word. We can assume that the address is
3067 known to be alignable and of the form reg, or plus (reg, const). */
3069 gen_rotated_half_load (memref
)
3072 HOST_WIDE_INT offset
= 0;
3073 rtx base
= XEXP (memref
, 0);
3075 if (GET_CODE (base
) == PLUS
)
3077 offset
= INTVAL (XEXP (base
, 1));
3078 base
= XEXP (base
, 0);
3081 /* If we aren't allowed to generate unaligned addresses, then fail. */
3082 if (TARGET_SHORT_BY_BYTES
3083 && ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 0)))
3086 base
= gen_rtx (MEM
, SImode
, plus_constant (base
, offset
& ~2));
3088 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 2))
3091 return gen_rtx (ROTATE
, SImode
, base
, GEN_INT (16));
3094 static enum machine_mode
3095 select_dominance_cc_mode (op
, x
, y
, cond_or
)
3099 HOST_WIDE_INT cond_or
;
3101 enum rtx_code cond1
, cond2
;
3104 /* Currently we will probably get the wrong result if the individual
3105 comparisons are not simple. This also ensures that it is safe to
3106 reverse a comparison if necessary. */
3107 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
3109 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
3114 cond1
= reverse_condition (cond1
);
3116 /* If the comparisons are not equal, and one doesn't dominate the other,
3117 then we can't do this. */
3119 && ! comparison_dominates_p (cond1
, cond2
)
3120 && (swapped
= 1, ! comparison_dominates_p (cond2
, cond1
)))
3125 enum rtx_code temp
= cond1
;
3133 if (cond2
== EQ
|| ! cond_or
)
3138 case LE
: return CC_DLEmode
;
3139 case LEU
: return CC_DLEUmode
;
3140 case GE
: return CC_DGEmode
;
3141 case GEU
: return CC_DGEUmode
;
3147 if (cond2
== LT
|| ! cond_or
)
3156 if (cond2
== GT
|| ! cond_or
)
3165 if (cond2
== LTU
|| ! cond_or
)
3174 if (cond2
== GTU
|| ! cond_or
)
3182 /* The remaining cases only occur when both comparisons are the
3204 arm_select_cc_mode (op
, x
, y
)
3209 /* All floating point compares return CCFP if it is an equality
3210 comparison, and CCFPE otherwise. */
3211 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
3212 return (op
== EQ
|| op
== NE
) ? CCFPmode
: CCFPEmode
;
3214 /* A compare with a shifted operand. Because of canonicalization, the
3215 comparison will have to be swapped when we emit the assembler. */
3216 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
3217 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3218 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
3219 || GET_CODE (x
) == ROTATERT
))
3222 /* This is a special case that is used by combine to allow a
3223 comparison of a shifted byte load to be split into a zero-extend
3224 followed by a comparison of the shifted integer (only valid for
3225 equalities and unsigned inequalities). */
3226 if (GET_MODE (x
) == SImode
3227 && GET_CODE (x
) == ASHIFT
3228 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
3229 && GET_CODE (XEXP (x
, 0)) == SUBREG
3230 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
3231 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
3232 && (op
== EQ
|| op
== NE
3233 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
3234 && GET_CODE (y
) == CONST_INT
)
3237 /* An operation that sets the condition codes as a side-effect, the
3238 V flag is not set correctly, so we can only use comparisons where
3239 this doesn't matter. (For LT and GE we can use "mi" and "pl"
3241 if (GET_MODE (x
) == SImode
3243 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
3244 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
3245 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
3246 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
3247 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
3248 || GET_CODE (x
) == LSHIFTRT
3249 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3250 || GET_CODE (x
) == ROTATERT
|| GET_CODE (x
) == ZERO_EXTRACT
))
3253 /* A construct for a conditional compare, if the false arm contains
3254 0, then both conditions must be true, otherwise either condition
3255 must be true. Not all conditions are possible, so CCmode is
3256 returned if it can't be done. */
3257 if (GET_CODE (x
) == IF_THEN_ELSE
3258 && (XEXP (x
, 2) == const0_rtx
3259 || XEXP (x
, 2) == const1_rtx
)
3260 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 0))) == '<'
3261 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 1))) == '<')
3262 return select_dominance_cc_mode (op
, XEXP (x
, 0), XEXP (x
, 1),
3263 INTVAL (XEXP (x
, 2)));
3265 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
3268 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
3269 && GET_CODE (x
) == PLUS
3270 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
3276 /* X and Y are two things to compare using CODE. Emit the compare insn and
3277 return the rtx for register 0 in the proper mode. FP means this is a
3278 floating point compare: I don't think that it is needed on the arm. */
3281 gen_compare_reg (code
, x
, y
, fp
)
3285 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
3286 rtx cc_reg
= gen_rtx (REG
, mode
, 24);
3288 emit_insn (gen_rtx (SET
, VOIDmode
, cc_reg
,
3289 gen_rtx (COMPARE
, mode
, x
, y
)));
3295 arm_reload_in_hi (operands
)
3298 rtx base
= find_replacement (&XEXP (operands
[1], 0));
3300 emit_insn (gen_zero_extendqisi2 (operands
[2], gen_rtx (MEM
, QImode
, base
)));
3301 /* Handle the case where the address is too complex to be offset by 1. */
3302 if (GET_CODE (base
) == MINUS
3303 || (GET_CODE (base
) == PLUS
&& GET_CODE (XEXP (base
, 1)) != CONST_INT
))
3305 rtx base_plus
= gen_rtx (REG
, SImode
, REGNO (operands
[0]));
3307 emit_insn (gen_rtx (SET
, VOIDmode
, base_plus
, base
));
3311 emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG
, SImode
, operands
[0], 0),
3312 gen_rtx (MEM
, QImode
,
3313 plus_constant (base
, 1))));
3314 if (BYTES_BIG_ENDIAN
)
3315 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
3317 gen_rtx (IOR
, SImode
,
3318 gen_rtx (ASHIFT
, SImode
,
3319 gen_rtx (SUBREG
, SImode
,
3324 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
3326 gen_rtx (IOR
, SImode
,
3327 gen_rtx (ASHIFT
, SImode
,
3330 gen_rtx (SUBREG
, SImode
, operands
[0], 0))));
3334 arm_reload_out_hi (operands
)
3337 rtx base
= find_replacement (&XEXP (operands
[0], 0));
3339 if (BYTES_BIG_ENDIAN
)
3341 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
3342 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
3343 emit_insn (gen_lshrsi3 (operands
[2],
3344 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
3346 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
3347 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
3351 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
3352 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
3353 emit_insn (gen_lshrsi3 (operands
[2],
3354 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
3356 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
3357 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
3361 /* Routines for manipulation of the constant pool. */
3362 /* This is unashamedly hacked from the version in sh.c, since the problem is
3363 extremely similar. */
3365 /* Arm instructions cannot load a large constant into a register,
3366 constants have to come from a pc relative load. The reference of a pc
3367 relative load instruction must be less than 1k infront of the instruction.
3368 This means that we often have to dump a constant inside a function, and
3369 generate code to branch around it.
3371 It is important to minimize this, since the branches will slow things
3372 down and make things bigger.
3374 Worst case code looks like:
3390 We fix this by performing a scan before scheduling, which notices which
3391 instructions need to have their operands fetched from the constant table
3392 and builds the table.
3397 scan, find an instruction which needs a pcrel move. Look forward, find th
3398 last barrier which is within MAX_COUNT bytes of the requirement.
3399 If there isn't one, make one. Process all the instructions between
3400 the find and the barrier.
3402 In the above example, we can tell that L3 is within 1k of L1, so
3403 the first move can be shrunk from the 2 insn+constant sequence into
3404 just 1 insn, and the constant moved to L3 to make:
3415 Then the second move becomes the target for the shortening process.
3421 rtx value
; /* Value in table */
3422 HOST_WIDE_INT next_offset
;
3423 enum machine_mode mode
; /* Mode of value */
3426 /* The maximum number of constants that can fit into one pool, since
3427 the pc relative range is 0...1020 bytes and constants are at least 4
3430 #define MAX_POOL_SIZE (1020/4)
3431 static pool_node pool_vector
[MAX_POOL_SIZE
];
3432 static int pool_size
;
3433 static rtx pool_vector_label
;
3435 /* Add a constant to the pool and return its label. */
3436 static HOST_WIDE_INT
3437 add_constant (x
, mode
)
3439 enum machine_mode mode
;
3443 HOST_WIDE_INT offset
;
3445 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
3446 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
3447 x
= get_pool_constant (XEXP (x
, 0));
3448 #ifndef AOF_ASSEMBLER
3449 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
3450 x
= XVECEXP (x
, 0, 0);
3453 #ifdef AOF_ASSEMBLER
3454 /* PIC Symbol references need to be converted into offsets into the
3456 if (flag_pic
&& GET_CODE (x
) == SYMBOL_REF
)
3457 x
= aof_pic_entry (x
);
3458 #endif /* AOF_ASSEMBLER */
3460 /* First see if we've already got it */
3461 for (i
= 0; i
< pool_size
; i
++)
3463 if (GET_CODE (x
) == pool_vector
[i
].value
->code
3464 && mode
== pool_vector
[i
].mode
)
3466 if (GET_CODE (x
) == CODE_LABEL
)
3468 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
3471 if (rtx_equal_p (x
, pool_vector
[i
].value
))
3472 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
3476 /* Need a new one */
3477 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
3480 pool_vector_label
= gen_label_rtx ();
3482 pool_vector
[pool_size
].next_offset
3483 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
3485 pool_vector
[pool_size
].value
= x
;
3486 pool_vector
[pool_size
].mode
= mode
;
3491 /* Output the literal table */
3498 scan
= emit_label_after (gen_label_rtx (), scan
);
3499 scan
= emit_insn_after (gen_align_4 (), scan
);
3500 scan
= emit_label_after (pool_vector_label
, scan
);
3502 for (i
= 0; i
< pool_size
; i
++)
3504 pool_node
*p
= pool_vector
+ i
;
3506 switch (GET_MODE_SIZE (p
->mode
))
3509 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
3513 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
3522 scan
= emit_insn_after (gen_consttable_end (), scan
);
3523 scan
= emit_barrier_after (scan
);
3527 /* Non zero if the src operand needs to be fixed up */
3529 fixit (src
, mode
, destreg
)
3531 enum machine_mode mode
;
3534 if (CONSTANT_P (src
))
3536 if (GET_CODE (src
) == CONST_INT
)
3537 return (! const_ok_for_arm (INTVAL (src
))
3538 && ! const_ok_for_arm (~INTVAL (src
)));
3539 if (GET_CODE (src
) == CONST_DOUBLE
)
3540 return (GET_MODE (src
) == VOIDmode
3542 || (! const_double_rtx_ok_for_fpu (src
)
3543 && ! neg_const_double_rtx_ok_for_fpu (src
)));
3544 return symbol_mentioned_p (src
);
3546 #ifndef AOF_ASSEMBLER
3547 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
3551 return (mode
== SImode
&& GET_CODE (src
) == MEM
3552 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
3553 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
3556 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
3558 find_barrier (from
, max_count
)
3563 rtx found_barrier
= 0;
3566 while (from
&& count
< max_count
)
3568 if (GET_CODE (from
) == BARRIER
)
3571 /* Count the length of this insn */
3572 if (GET_CODE (from
) == INSN
3573 && GET_CODE (PATTERN (from
)) == SET
3574 && CONSTANT_P (SET_SRC (PATTERN (from
)))
3575 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
3577 rtx src
= SET_SRC (PATTERN (from
));
3581 count
+= get_attr_length (from
);
3584 from
= NEXT_INSN (from
);
3589 /* We didn't find a barrier in time to
3590 dump our stuff, so we'll make one */
3591 rtx label
= gen_label_rtx ();
3594 from
= PREV_INSN (last
);
3596 from
= get_last_insn ();
3598 /* Walk back to be just before any jump */
3599 while (GET_CODE (from
) == JUMP_INSN
3600 || GET_CODE (from
) == NOTE
3601 || GET_CODE (from
) == CODE_LABEL
)
3602 from
= PREV_INSN (from
);
3604 from
= emit_jump_insn_after (gen_jump (label
), from
);
3605 JUMP_LABEL (from
) = label
;
3606 found_barrier
= emit_barrier_after (from
);
3607 emit_label_after (label
, found_barrier
);
3608 return found_barrier
;
3611 return found_barrier
;
3614 /* Non zero if the insn is a move instruction which needs to be fixed. */
3619 if (!INSN_DELETED_P (insn
)
3620 && GET_CODE (insn
) == INSN
3621 && GET_CODE (PATTERN (insn
)) == SET
)
3623 rtx pat
= PATTERN (insn
);
3624 rtx src
= SET_SRC (pat
);
3625 rtx dst
= SET_DEST (pat
);
3627 enum machine_mode mode
= GET_MODE (dst
);
3631 if (GET_CODE (dst
) == REG
)
3632 destreg
= REGNO (dst
);
3633 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
3634 destreg
= REGNO (SUBREG_REG (dst
));
3636 return fixit (src
, mode
, destreg
);
3650 /* The ldr instruction can work with up to a 4k offset, and most constants
3651 will be loaded with one of these instructions; however, the adr
3652 instruction and the ldf instructions only work with a 1k offset. This
3653 code needs to be rewritten to use the 4k offset when possible, and to
3654 adjust when a 1k offset is needed. For now we just use a 1k offset
3658 /* Floating point operands can't work further than 1024 bytes from the
3659 PC, so to make things simple we restrict all loads for such functions.
3661 if (TARGET_HARD_FLOAT
)
3662 for (regno
= 16; regno
< 24; regno
++)
3663 if (regs_ever_live
[regno
])
3672 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
3674 if (broken_move (insn
))
3676 /* This is a broken move instruction, scan ahead looking for
3677 a barrier to stick the constant table behind */
3679 rtx barrier
= find_barrier (insn
, count_size
);
3681 /* Now find all the moves between the points and modify them */
3682 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
3684 if (broken_move (scan
))
3686 /* This is a broken move instruction, add it to the pool */
3687 rtx pat
= PATTERN (scan
);
3688 rtx src
= SET_SRC (pat
);
3689 rtx dst
= SET_DEST (pat
);
3690 enum machine_mode mode
= GET_MODE (dst
);
3691 HOST_WIDE_INT offset
;
3697 /* If this is an HImode constant load, convert it into
3698 an SImode constant load. Since the register is always
3699 32 bits this is safe. We have to do this, since the
3700 load pc-relative instruction only does a 32-bit load. */
3704 if (GET_CODE (dst
) != REG
)
3706 PUT_MODE (dst
, SImode
);
3709 offset
= add_constant (src
, mode
);
3710 addr
= plus_constant (gen_rtx (LABEL_REF
, VOIDmode
,
3714 /* For wide moves to integer regs we need to split the
3715 address calculation off into a separate insn, so that
3716 the load can then be done with a load-multiple. This is
3717 safe, since we have already noted the length of such
3718 insns to be 8, and we are immediately over-writing the
3719 scratch we have grabbed with the final result. */
3720 if (GET_MODE_SIZE (mode
) > 4
3721 && (scratch
= REGNO (dst
)) < 16)
3723 rtx reg
= gen_rtx (REG
, SImode
, scratch
);
3724 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
3729 newsrc
= gen_rtx (MEM
, mode
, addr
);
3731 /* Build a jump insn wrapper around the move instead
3732 of an ordinary insn, because we want to have room for
3733 the target label rtx in fld[7], which an ordinary
3734 insn doesn't have. */
3735 newinsn
= emit_jump_insn_after (gen_rtx (SET
, VOIDmode
,
3738 JUMP_LABEL (newinsn
) = pool_vector_label
;
3740 /* But it's still an ordinary insn */
3741 PUT_CODE (newinsn
, INSN
);
3748 dump_table (barrier
);
3755 /* Routines to output assembly language. */
3757 /* If the rtx is the correct value then return the string of the number.
3758 In this way we can ensure that valid double constants are generated even
3759 when cross compiling. */
3761 fp_immediate_constant (x
)
3767 if (!fpa_consts_inited
)
3770 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3771 for (i
= 0; i
< 8; i
++)
3772 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
3773 return strings_fpa
[i
];
3778 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
3780 fp_const_from_val (r
)
3785 if (! fpa_consts_inited
)
3788 for (i
= 0; i
< 8; i
++)
3789 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
3790 return strings_fpa
[i
];
3795 /* Output the operands of a LDM/STM instruction to STREAM.
3796 MASK is the ARM register set mask of which only bits 0-15 are important.
3797 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
3798 must follow the register list. */
3801 print_multi_reg (stream
, instr
, mask
, hat
)
3807 int not_first
= FALSE
;
3809 fputc ('\t', stream
);
3810 fprintf (stream
, instr
, REGISTER_PREFIX
);
3811 fputs (", {", stream
);
3812 for (i
= 0; i
< 16; i
++)
3813 if (mask
& (1 << i
))
3816 fprintf (stream
, ", ");
3817 fprintf (stream
, "%s%s", REGISTER_PREFIX
, reg_names
[i
]);
3821 fprintf (stream
, "}%s\n", hat
? "^" : "");
3824 /* Output a 'call' insn. */
3827 output_call (operands
)
3830 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
3832 if (REGNO (operands
[0]) == 14)
3834 operands
[0] = gen_rtx (REG
, SImode
, 12);
3835 output_asm_insn ("mov%?\t%0, %|lr", operands
);
3837 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3838 output_asm_insn ("mov%?\t%|pc, %0", operands
);
3846 int something_changed
= 0;
3848 int code
= GET_CODE (x0
);
3855 if (REGNO (x0
) == 14)
3857 *x
= gen_rtx (REG
, SImode
, 12);
3862 /* Scan through the sub-elements and change any references there */
3863 fmt
= GET_RTX_FORMAT (code
);
3864 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3866 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
3867 else if (fmt
[i
] == 'E')
3868 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
3869 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
3870 return something_changed
;
3874 /* Output a 'call' insn that is a reference in memory. */
3877 output_call_mem (operands
)
3880 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
3881 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
3883 if (eliminate_lr2ip (&operands
[0]))
3884 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
3886 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3887 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
3892 /* Output a move from arm registers to an fpu registers.
3893 OPERANDS[0] is an fpu register.
3894 OPERANDS[1] is the first registers of an arm register pair. */
3897 output_mov_long_double_fpu_from_arm (operands
)
3900 int arm_reg0
= REGNO (operands
[1]);
3906 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3907 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3908 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3910 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3911 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
3915 /* Output a move from an fpu register to arm registers.
3916 OPERANDS[0] is the first registers of an arm register pair.
3917 OPERANDS[1] is an fpu register. */
3920 output_mov_long_double_arm_from_fpu (operands
)
3923 int arm_reg0
= REGNO (operands
[0]);
3929 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3930 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3931 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3933 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
3934 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3938 /* Output a move from arm registers to arm registers of a long double
3939 OPERANDS[0] is the destination.
3940 OPERANDS[1] is the source. */
3942 output_mov_long_double_arm_from_arm (operands
)
3945 /* We have to be careful here because the two might overlap */
3946 int dest_start
= REGNO (operands
[0]);
3947 int src_start
= REGNO (operands
[1]);
3951 if (dest_start
< src_start
)
3953 for (i
= 0; i
< 3; i
++)
3955 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3956 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3957 output_asm_insn ("mov%?\t%0, %1", ops
);
3962 for (i
= 2; i
>= 0; i
--)
3964 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3965 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3966 output_asm_insn ("mov%?\t%0, %1", ops
);
3974 /* Output a move from arm registers to an fpu registers.
3975 OPERANDS[0] is an fpu register.
3976 OPERANDS[1] is the first registers of an arm register pair. */
3979 output_mov_double_fpu_from_arm (operands
)
3982 int arm_reg0
= REGNO (operands
[1]);
3987 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3988 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3989 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
3990 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
3994 /* Output a move from an fpu register to arm registers.
3995 OPERANDS[0] is the first registers of an arm register pair.
3996 OPERANDS[1] is an fpu register. */
3999 output_mov_double_arm_from_fpu (operands
)
4002 int arm_reg0
= REGNO (operands
[0]);
4008 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
4009 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
4010 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
4011 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
4015 /* Output a move between double words.
4016 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
4017 or MEM<-REG and all MEMs must be offsettable addresses. */
4020 output_move_double (operands
)
4023 enum rtx_code code0
= GET_CODE (operands
[0]);
4024 enum rtx_code code1
= GET_CODE (operands
[1]);
4029 int reg0
= REGNO (operands
[0]);
4031 otherops
[0] = gen_rtx (REG
, SImode
, 1 + reg0
);
4034 int reg1
= REGNO (operands
[1]);
4038 /* Ensure the second source is not overwritten */
4039 if (reg1
== reg0
+ (WORDS_BIG_ENDIAN
? -1 : 1))
4040 output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands
);
4042 output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands
);
4044 else if (code1
== CONST_DOUBLE
)
4046 if (GET_MODE (operands
[1]) == DFmode
)
4049 union real_extract u
;
4051 bcopy ((char *) &CONST_DOUBLE_LOW (operands
[1]), (char *) &u
,
4053 REAL_VALUE_TO_TARGET_DOUBLE (u
.d
, l
);
4054 otherops
[1] = GEN_INT(l
[1]);
4055 operands
[1] = GEN_INT(l
[0]);
4057 else if (GET_MODE (operands
[1]) != VOIDmode
)
4059 else if (WORDS_BIG_ENDIAN
)
4062 otherops
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4063 operands
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4068 otherops
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4069 operands
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4071 output_mov_immediate (operands
);
4072 output_mov_immediate (otherops
);
4074 else if (code1
== CONST_INT
)
4076 #if HOST_BITS_PER_WIDE_INT > 32
4077 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
4078 what the upper word is. */
4079 if (WORDS_BIG_ENDIAN
)
4081 otherops
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4082 operands
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4086 otherops
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4087 operands
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4090 /* Sign extend the intval into the high-order word */
4091 if (WORDS_BIG_ENDIAN
)
4093 otherops
[1] = operands
[1];
4094 operands
[1] = (INTVAL (operands
[1]) < 0
4095 ? constm1_rtx
: const0_rtx
);
4098 otherops
[1] = INTVAL (operands
[1]) < 0 ? constm1_rtx
: const0_rtx
;
4100 output_mov_immediate (otherops
);
4101 output_mov_immediate (operands
);
4103 else if (code1
== MEM
)
4105 switch (GET_CODE (XEXP (operands
[1], 0)))
4108 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
4112 abort (); /* Should never happen now */
4116 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
4120 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
4124 abort (); /* Should never happen now */
4129 output_asm_insn ("adr%?\t%0, %1", operands
);
4130 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
4134 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1)))
4136 otherops
[0] = operands
[0];
4137 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
4138 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
4139 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
4141 if (GET_CODE (otherops
[2]) == CONST_INT
)
4143 switch (INTVAL (otherops
[2]))
4146 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
4149 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
4152 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
4155 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
4156 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
4158 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4161 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4164 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
4165 return "ldm%?ia\t%0, %M0";
4169 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
4170 /* Take care of overlapping base/data reg. */
4171 if (reg_mentioned_p (operands
[0], operands
[1]))
4173 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4174 output_asm_insn ("ldr%?\t%0, %1", operands
);
4178 output_asm_insn ("ldr%?\t%0, %1", operands
);
4179 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4185 abort(); /* Constraints should prevent this */
4187 else if (code0
== MEM
&& code1
== REG
)
4189 if (REGNO (operands
[1]) == 12)
4192 switch (GET_CODE (XEXP (operands
[0], 0)))
4195 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
4199 abort (); /* Should never happen now */
4203 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
4207 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
4211 abort (); /* Should never happen now */
4215 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
4217 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
4220 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
4224 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
4228 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
4235 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
4236 otherops
[1] = gen_rtx (REG
, SImode
, 1 + REGNO (operands
[1]));
4237 output_asm_insn ("str%?\t%1, %0", operands
);
4238 output_asm_insn ("str%?\t%1, %0", otherops
);
4242 abort(); /* Constraints should prevent this */
4248 /* Output an arbitrary MOV reg, #n.
4249 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
4252 output_mov_immediate (operands
)
4255 HOST_WIDE_INT n
= INTVAL (operands
[1]);
4259 /* Try to use one MOV */
4260 if (const_ok_for_arm (n
))
4262 output_asm_insn ("mov%?\t%0, %1", operands
);
4266 /* Try to use one MVN */
4267 if (const_ok_for_arm (~n
))
4269 operands
[1] = GEN_INT (~n
);
4270 output_asm_insn ("mvn%?\t%0, %1", operands
);
4274 /* If all else fails, make it out of ORRs or BICs as appropriate. */
4276 for (i
=0; i
< 32; i
++)
4280 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
4281 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
4284 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
4291 /* Output an ADD r, s, #n where n may be too big for one instruction. If
4292 adding zero to one register, output nothing. */
4295 output_add_immediate (operands
)
4298 HOST_WIDE_INT n
= INTVAL (operands
[2]);
4300 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
4303 output_multi_immediate (operands
,
4304 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
4307 output_multi_immediate (operands
,
4308 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
4315 /* Output a multiple immediate operation.
4316 OPERANDS is the vector of operands referred to in the output patterns.
4317 INSTR1 is the output pattern to use for the first constant.
4318 INSTR2 is the output pattern to use for subsequent constants.
4319 IMMED_OP is the index of the constant slot in OPERANDS.
4320 N is the constant value. */
4323 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
4325 char *instr1
, *instr2
;
4329 #if HOST_BITS_PER_WIDE_INT > 32
4335 operands
[immed_op
] = const0_rtx
;
4336 output_asm_insn (instr1
, operands
); /* Quick and easy output */
4341 char *instr
= instr1
;
4343 /* Note that n is never zero here (which would give no output) */
4344 for (i
= 0; i
< 32; i
+= 2)
4348 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
4349 output_asm_insn (instr
, operands
);
4359 /* Return the appropriate ARM instruction for the operation code.
4360 The returned result should not be overwritten. OP is the rtx of the
4361 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
4365 arithmetic_instr (op
, shift_first_arg
)
4367 int shift_first_arg
;
4369 switch (GET_CODE (op
))
4375 return shift_first_arg
? "rsb" : "sub";
4392 /* Ensure valid constant shifts and return the appropriate shift mnemonic
4393 for the operation code. The returned result should not be overwritten.
4394 OP is the rtx code of the shift.
4395 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
4399 shift_op (op
, amountp
)
4401 HOST_WIDE_INT
*amountp
;
4404 enum rtx_code code
= GET_CODE (op
);
4406 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
4408 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
4409 *amountp
= INTVAL (XEXP (op
, 1));
4432 /* We never have to worry about the amount being other than a
4433 power of 2, since this case can never be reloaded from a reg. */
4435 *amountp
= int_log2 (*amountp
);
4446 /* This is not 100% correct, but follows from the desire to merge
4447 multiplication by a power of 2 with the recognizer for a
4448 shift. >=32 is not a valid shift for "asl", so we must try and
4449 output a shift that produces the correct arithmetical result.
4450 Using lsr #32 is identical except for the fact that the carry bit
4451 is not set correctly if we set the flags; but we never use the
4452 carry bit from such an operation, so we can ignore that. */
4453 if (code
== ROTATERT
)
4454 *amountp
&= 31; /* Rotate is just modulo 32 */
4455 else if (*amountp
!= (*amountp
& 31))
4462 /* Shifts of 0 are no-ops. */
4471 /* Obtain the shift from the POWER of two. */
4473 static HOST_WIDE_INT
4475 HOST_WIDE_INT power
;
4477 HOST_WIDE_INT shift
= 0;
4479 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
4489 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
4490 /bin/as is horribly restrictive. */
4493 output_ascii_pseudo_op (stream
, p
, len
)
4499 int len_so_far
= 1000;
4500 int chars_so_far
= 0;
4502 for (i
= 0; i
< len
; i
++)
4504 register int c
= p
[i
];
4506 if (len_so_far
> 50)
4509 fputs ("\"\n", stream
);
4510 fputs ("\t.ascii\t\"", stream
);
4515 if (c
== '\"' || c
== '\\')
4521 if (c
>= ' ' && c
< 0177)
4528 fprintf (stream
, "\\%03o", c
);
4535 fputs ("\"\n", stream
);
4539 /* Try to determine whether a pattern really clobbers the link register.
4540 This information is useful when peepholing, so that lr need not be pushed
4541 if we combine a call followed by a return.
4542 NOTE: This code does not check for side-effect expressions in a SET_SRC:
4543 such a check should not be needed because these only update an existing
4544 value within a register; the register must still be set elsewhere within
4548 pattern_really_clobbers_lr (x
)
4553 switch (GET_CODE (x
))
4556 switch (GET_CODE (SET_DEST (x
)))
4559 return REGNO (SET_DEST (x
)) == 14;
4562 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
4563 return REGNO (XEXP (SET_DEST (x
), 0)) == 14;
4565 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
4574 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
4575 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
4580 switch (GET_CODE (XEXP (x
, 0)))
4583 return REGNO (XEXP (x
, 0)) == 14;
4586 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
4587 return REGNO (XEXP (XEXP (x
, 0), 0)) == 14;
4603 function_really_clobbers_lr (first
)
4608 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
4610 switch (GET_CODE (insn
))
4615 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
4620 if (pattern_really_clobbers_lr (PATTERN (insn
)))
4625 /* Don't yet know how to handle those calls that are not to a
4627 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
4630 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
4633 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
4639 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
4645 default: /* Don't recognize it, be safe */
4649 /* A call can be made (by peepholing) not to clobber lr iff it is
4650 followed by a return. There may, however, be a use insn iff
4651 we are returning the result of the call.
4652 If we run off the end of the insn chain, then that means the
4653 call was at the end of the function. Unfortunately we don't
4654 have a return insn for the peephole to recognize, so we
4655 must reject this. (Can this be fixed by adding our own insn?) */
4656 if ((next
= next_nonnote_insn (insn
)) == NULL
)
4659 /* No need to worry about lr if the call never returns */
4660 if (GET_CODE (next
) == BARRIER
)
4663 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
4664 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
4665 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
4666 == REGNO (XEXP (PATTERN (next
), 0))))
4667 if ((next
= next_nonnote_insn (next
)) == NULL
)
4670 if (GET_CODE (next
) == JUMP_INSN
4671 && GET_CODE (PATTERN (next
)) == RETURN
)
4680 /* We have reached the end of the chain so lr was _not_ clobbered */
4685 output_return_instruction (operand
, really_return
, reverse
)
4691 int reg
, live_regs
= 0;
4692 int volatile_func
= (optimize
> 0
4693 && TREE_THIS_VOLATILE (current_function_decl
));
4695 return_used_this_function
= 1;
4700 /* If this function was declared non-returning, and we have found a tail
4701 call, then we have to trust that the called function won't return. */
4702 if (! really_return
)
4705 /* Otherwise, trap an attempted return by aborting. */
4707 ops
[1] = gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4708 assemble_external_libcall (ops
[1]);
4709 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
4713 if (current_function_calls_alloca
&& ! really_return
)
4716 for (reg
= 0; reg
<= 10; reg
++)
4717 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4720 if (live_regs
|| (regs_ever_live
[14] && ! lr_save_eliminated
))
4723 if (frame_pointer_needed
)
4728 if (lr_save_eliminated
|| ! regs_ever_live
[14])
4731 if (frame_pointer_needed
)
4733 reverse
? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
4736 reverse
? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
4738 for (reg
= 0; reg
<= 10; reg
++)
4739 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4741 strcat (instr
, "%|");
4742 strcat (instr
, reg_names
[reg
]);
4744 strcat (instr
, ", ");
4747 if (frame_pointer_needed
)
4749 strcat (instr
, "%|");
4750 strcat (instr
, reg_names
[11]);
4751 strcat (instr
, ", ");
4752 strcat (instr
, "%|");
4753 strcat (instr
, reg_names
[13]);
4754 strcat (instr
, ", ");
4755 strcat (instr
, "%|");
4756 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4760 strcat (instr
, "%|");
4761 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4763 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
4764 output_asm_insn (instr
, &operand
);
4766 else if (really_return
)
4768 if (TARGET_THUMB_INTERWORK
)
4769 sprintf (instr
, "bx%%?%%%s\t%%|lr", reverse
? "D" : "d");
4771 sprintf (instr
, "mov%%?%%%s0%s\t%%|pc, %%|lr",
4772 reverse
? "D" : "d", TARGET_APCS_32
? "" : "s");
4773 output_asm_insn (instr
, &operand
);
4779 /* Return nonzero if optimizing and the current function is volatile.
4780 Such functions never return, and many memory cycles can be saved
4781 by not storing register values that will never be needed again.
4782 This optimization was added to speed up context switching in a
4783 kernel application. */
4786 arm_volatile_func ()
4788 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
4791 /* The amount of stack adjustment that happens here, in output_return and in
4792 output_epilogue must be exactly the same as was calculated during reload,
4793 or things will point to the wrong place. The only time we can safely
4794 ignore this constraint is when a function has no arguments on the stack,
4795 no stack frame requirement and no live registers execpt for `lr'. If we
4796 can guarantee that by making all function calls into tail calls and that
4797 lr is not clobbered in any other way, then there is no need to push lr
4801 output_func_prologue (f
, frame_size
)
4805 int reg
, live_regs_mask
= 0;
4807 int volatile_func
= (optimize
> 0
4808 && TREE_THIS_VOLATILE (current_function_decl
));
4810 /* Nonzero if we must stuff some register arguments onto the stack as if
4811 they were passed there. */
4812 int store_arg_regs
= 0;
4814 if (arm_ccfsm_state
|| arm_target_insn
)
4815 abort (); /* Sanity check */
4817 if (arm_naked_function_p (current_function_decl
))
4820 return_used_this_function
= 0;
4821 lr_save_eliminated
= 0;
4823 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
4824 ASM_COMMENT_START
, current_function_args_size
,
4825 current_function_pretend_args_size
, frame_size
);
4826 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
4827 ASM_COMMENT_START
, frame_pointer_needed
,
4828 current_function_anonymous_args
);
4831 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
4833 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
4836 for (reg
= 0; reg
<= 10; reg
++)
4837 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4838 live_regs_mask
|= (1 << reg
);
4840 if (frame_pointer_needed
)
4841 live_regs_mask
|= 0xD800;
4842 else if (regs_ever_live
[14])
4844 if (! current_function_args_size
4845 && ! function_really_clobbers_lr (get_insns ()))
4846 lr_save_eliminated
= 1;
4848 live_regs_mask
|= 0x4000;
4853 /* if a di mode load/store multiple is used, and the base register
4854 is r3, then r4 can become an ever live register without lr
4855 doing so, in this case we need to push lr as well, or we
4856 will fail to get a proper return. */
4858 live_regs_mask
|= 0x4000;
4859 lr_save_eliminated
= 0;
4863 if (lr_save_eliminated
)
4864 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
4867 #ifdef AOF_ASSEMBLER
4869 fprintf (f
, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX
, REGISTER_PREFIX
,
4870 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
4876 output_func_epilogue (f
, frame_size
)
4880 int reg
, live_regs_mask
= 0;
4881 /* If we need this then it will always be at least this much */
4882 int floats_offset
= 12;
4884 int volatile_func
= (optimize
> 0
4885 && TREE_THIS_VOLATILE (current_function_decl
));
4887 if (use_return_insn() && return_used_this_function
)
4889 if ((frame_size
+ current_function_outgoing_args_size
) != 0
4890 && !(frame_pointer_needed
|| TARGET_APCS
))
4895 /* Naked functions don't have epilogues. */
4896 if (arm_naked_function_p (current_function_decl
))
4899 /* A volatile function should never return. Call abort. */
4902 rtx op
= gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4903 assemble_external_libcall (op
);
4904 output_asm_insn ("bl\t%a0", &op
);
4908 for (reg
= 0; reg
<= 10; reg
++)
4909 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4911 live_regs_mask
|= (1 << reg
);
4915 if (frame_pointer_needed
)
4917 if (arm_fpu_arch
== FP_SOFT2
)
4919 for (reg
= 23; reg
> 15; reg
--)
4920 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4922 floats_offset
+= 12;
4923 fprintf (f
, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX
,
4924 reg_names
[reg
], REGISTER_PREFIX
, floats_offset
);
4931 for (reg
= 23; reg
> 15; reg
--)
4933 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4935 floats_offset
+= 12;
4936 /* We can't unstack more than four registers at once */
4937 if (start_reg
- reg
== 3)
4939 fprintf (f
, "\tlfm\t%s%s, 4, [%sfp, #-%d]\n",
4940 REGISTER_PREFIX
, reg_names
[reg
],
4941 REGISTER_PREFIX
, floats_offset
);
4942 start_reg
= reg
- 1;
4947 if (reg
!= start_reg
)
4948 fprintf (f
, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
4949 REGISTER_PREFIX
, reg_names
[reg
+ 1],
4950 start_reg
- reg
, REGISTER_PREFIX
, floats_offset
);
4952 start_reg
= reg
- 1;
4956 /* Just in case the last register checked also needs unstacking. */
4957 if (reg
!= start_reg
)
4958 fprintf (f
, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
4959 REGISTER_PREFIX
, reg_names
[reg
+ 1],
4960 start_reg
- reg
, REGISTER_PREFIX
, floats_offset
);
4963 if (TARGET_THUMB_INTERWORK
)
4965 live_regs_mask
|= 0x6800;
4966 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
, FALSE
);
4967 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
4971 live_regs_mask
|= 0xA800;
4972 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
4973 TARGET_APCS_32
? FALSE
: TRUE
);
4978 /* Restore stack pointer if necessary. */
4979 if (frame_size
+ current_function_outgoing_args_size
!= 0)
4981 operands
[0] = operands
[1] = stack_pointer_rtx
;
4982 operands
[2] = GEN_INT (frame_size
4983 + current_function_outgoing_args_size
);
4984 output_add_immediate (operands
);
4987 if (arm_fpu_arch
== FP_SOFT2
)
4989 for (reg
= 16; reg
< 24; reg
++)
4990 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4991 fprintf (f
, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX
,
4992 reg_names
[reg
], REGISTER_PREFIX
);
4998 for (reg
= 16; reg
< 24; reg
++)
5000 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5002 if (reg
- start_reg
== 3)
5004 fprintf (f
, "\tlfmfd\t%s%s, 4, [%ssp]!\n",
5005 REGISTER_PREFIX
, reg_names
[start_reg
],
5007 start_reg
= reg
+ 1;
5012 if (reg
!= start_reg
)
5013 fprintf (f
, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
5014 REGISTER_PREFIX
, reg_names
[start_reg
],
5015 reg
- start_reg
, REGISTER_PREFIX
);
5017 start_reg
= reg
+ 1;
5021 /* Just in case the last register checked also needs unstacking. */
5022 if (reg
!= start_reg
)
5023 fprintf (f
, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
5024 REGISTER_PREFIX
, reg_names
[start_reg
],
5025 reg
- start_reg
, REGISTER_PREFIX
);
5028 if (current_function_pretend_args_size
== 0 && regs_ever_live
[14])
5030 if (TARGET_THUMB_INTERWORK
)
5032 if (! lr_save_eliminated
)
5033 print_multi_reg(f
, "ldmfd\t%ssp!", live_regs_mask
| 0x4000,
5036 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
5038 else if (lr_save_eliminated
)
5039 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
5040 : "\tmovs\t%spc, %slr\n"),
5041 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
5043 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
5044 TARGET_APCS_32
? FALSE
: TRUE
);
5048 if (live_regs_mask
|| regs_ever_live
[14])
5050 /* Restore the integer regs, and the return address into lr */
5051 if (! lr_save_eliminated
)
5052 live_regs_mask
|= 0x4000;
5054 if (live_regs_mask
!= 0)
5055 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
5058 if (current_function_pretend_args_size
)
5060 /* Unwind the pre-pushed regs */
5061 operands
[0] = operands
[1] = stack_pointer_rtx
;
5062 operands
[2] = GEN_INT (current_function_pretend_args_size
);
5063 output_add_immediate (operands
);
5065 /* And finally, go home */
5066 if (TARGET_THUMB_INTERWORK
)
5067 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
5069 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
5070 : "\tmovs\t%spc, %slr\n"),
5071 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
5077 current_function_anonymous_args
= 0;
5081 emit_multi_reg_push (mask
)
5088 for (i
= 0; i
< 16; i
++)
5089 if (mask
& (1 << i
))
5092 if (num_regs
== 0 || num_regs
> 16)
5095 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (num_regs
));
5097 for (i
= 0; i
< 16; i
++)
5099 if (mask
& (1 << i
))
5102 = gen_rtx (SET
, VOIDmode
, gen_rtx (MEM
, BLKmode
,
5103 gen_rtx (PRE_DEC
, BLKmode
,
5104 stack_pointer_rtx
)),
5105 gen_rtx (UNSPEC
, BLKmode
,
5106 gen_rtvec (1, gen_rtx (REG
, SImode
, i
)),
5112 for (j
= 1, i
++; j
< num_regs
; i
++)
5114 if (mask
& (1 << i
))
5117 = gen_rtx (USE
, VOIDmode
, gen_rtx (REG
, SImode
, i
));
5126 emit_sfm (base_reg
, count
)
5133 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (count
));
5135 XVECEXP (par
, 0, 0) = gen_rtx (SET
, VOIDmode
,
5136 gen_rtx (MEM
, BLKmode
,
5137 gen_rtx (PRE_DEC
, BLKmode
,
5138 stack_pointer_rtx
)),
5139 gen_rtx (UNSPEC
, BLKmode
,
5140 gen_rtvec (1, gen_rtx (REG
, XFmode
,
5143 for (i
= 1; i
< count
; i
++)
5144 XVECEXP (par
, 0, i
) = gen_rtx (USE
, VOIDmode
,
5145 gen_rtx (REG
, XFmode
, base_reg
++));
5151 arm_expand_prologue ()
5154 rtx amount
= GEN_INT (-(get_frame_size ()
5155 + current_function_outgoing_args_size
));
5158 int live_regs_mask
= 0;
5159 int store_arg_regs
= 0;
5160 int volatile_func
= (optimize
> 0
5161 && TREE_THIS_VOLATILE (current_function_decl
));
5163 /* Naked functions don't have prologues. */
5164 if (arm_naked_function_p (current_function_decl
))
5167 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
5170 if (! volatile_func
)
5171 for (reg
= 0; reg
<= 10; reg
++)
5172 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5173 live_regs_mask
|= 1 << reg
;
5175 if (! volatile_func
&& regs_ever_live
[14])
5176 live_regs_mask
|= 0x4000;
5178 if (frame_pointer_needed
)
5180 live_regs_mask
|= 0xD800;
5181 emit_insn (gen_movsi (gen_rtx (REG
, SImode
, 12),
5182 stack_pointer_rtx
));
5185 if (current_function_pretend_args_size
)
5188 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
5191 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
5192 GEN_INT (-current_function_pretend_args_size
)));
5197 /* If we have to push any regs, then we must push lr as well, or
5198 we won't get a proper return. */
5199 live_regs_mask
|= 0x4000;
5200 emit_multi_reg_push (live_regs_mask
);
5203 /* For now the integer regs are still pushed in output_func_epilogue (). */
5205 if (! volatile_func
)
5207 if (arm_fpu_arch
== FP_SOFT2
)
5209 for (reg
= 23; reg
> 15; reg
--)
5210 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5211 emit_insn (gen_rtx (SET
, VOIDmode
,
5212 gen_rtx (MEM
, XFmode
,
5213 gen_rtx (PRE_DEC
, XFmode
,
5214 stack_pointer_rtx
)),
5215 gen_rtx (REG
, XFmode
, reg
)));
5221 for (reg
= 23; reg
> 15; reg
--)
5223 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5225 if (start_reg
- reg
== 3)
5228 start_reg
= reg
- 1;
5233 if (start_reg
!= reg
)
5234 emit_sfm (reg
+ 1, start_reg
- reg
);
5235 start_reg
= reg
- 1;
5239 if (start_reg
!= reg
)
5240 emit_sfm (reg
+ 1, start_reg
- reg
);
5244 if (frame_pointer_needed
)
5245 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx (REG
, SImode
, 12),
5247 (-(4 + current_function_pretend_args_size
)))));
5249 if (amount
!= const0_rtx
)
5251 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
5252 emit_insn (gen_rtx (CLOBBER
, VOIDmode
,
5253 gen_rtx (MEM
, BLKmode
, stack_pointer_rtx
)));
5256 /* If we are profiling, make sure no instructions are scheduled before
5257 the call to mcount. */
5258 if (profile_flag
|| profile_block_flag
)
5259 emit_insn (gen_blockage ());
5263 /* If CODE is 'd', then the X is a condition operand and the instruction
5264 should only be executed if the condition is true.
5265 if CODE is 'D', then the X is a condition operand and the instruction
5266 should only be executed if the condition is false: however, if the mode
5267 of the comparison is CCFPEmode, then always execute the instruction -- we
5268 do this because in these circumstances !GE does not necessarily imply LT;
5269 in these cases the instruction pattern will take care to make sure that
5270 an instruction containing %d will follow, thereby undoing the effects of
5271 doing this instruction unconditionally.
5272 If CODE is 'N' then X is a floating point operand that must be negated
5274 If CODE is 'B' then output a bitwise inverted value of X (a const int).
5275 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
5278 arm_print_operand (stream
, x
, code
)
5286 fputs (ASM_COMMENT_START
, stream
);
5290 fputs (REGISTER_PREFIX
, stream
);
5294 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
5295 fputs (arm_condition_codes
[arm_current_cc
], stream
);
5301 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
5302 r
= REAL_VALUE_NEGATE (r
);
5303 fprintf (stream
, "%s", fp_const_from_val (&r
));
5308 if (GET_CODE (x
) == CONST_INT
)
5310 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5315 ARM_SIGN_EXTEND (~ INTVAL (x
)));
5319 output_addr_const (stream
, x
);
5324 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
5328 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
5334 char *shift
= shift_op (x
, &val
);
5338 fprintf (stream
, ", %s ", shift_op (x
, &val
));
5340 arm_print_operand (stream
, XEXP (x
, 1), 0);
5343 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5356 fputs (REGISTER_PREFIX
, stream
);
5357 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0)], stream
);
5363 fputs (REGISTER_PREFIX
, stream
);
5364 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1)], stream
);
5368 fputs (REGISTER_PREFIX
, stream
);
5369 if (GET_CODE (XEXP (x
, 0)) == REG
)
5370 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
5372 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
5376 fprintf (stream
, "{%s%s-%s%s}", REGISTER_PREFIX
, reg_names
[REGNO (x
)],
5377 REGISTER_PREFIX
, reg_names
[REGNO (x
) - 1
5378 + ((GET_MODE_SIZE (GET_MODE (x
))
5379 + GET_MODE_SIZE (SImode
) - 1)
5380 / GET_MODE_SIZE (SImode
))]);
5385 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
5391 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
5392 (get_arm_condition_code (x
))],
5400 if (GET_CODE (x
) == REG
)
5402 fputs (REGISTER_PREFIX
, stream
);
5403 fputs (reg_names
[REGNO (x
)], stream
);
5405 else if (GET_CODE (x
) == MEM
)
5407 output_memory_reference_mode
= GET_MODE (x
);
5408 output_address (XEXP (x
, 0));
5410 else if (GET_CODE (x
) == CONST_DOUBLE
)
5411 fprintf (stream
, "#%s", fp_immediate_constant (x
));
5412 else if (GET_CODE (x
) == NEG
)
5413 abort (); /* This should never happen now. */
5416 fputc ('#', stream
);
5417 output_addr_const (stream
, x
);
5423 /* A finite state machine takes care of noticing whether or not instructions
5424 can be conditionally executed, and thus decrease execution time and code
5425 size by deleting branch instructions. The fsm is controlled by
5426 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
5428 /* The state of the fsm controlling condition codes are:
5429 0: normal, do nothing special
5430 1: make ASM_OUTPUT_OPCODE not output this instruction
5431 2: make ASM_OUTPUT_OPCODE not output this instruction
5432 3: make instructions conditional
5433 4: make instructions conditional
5435 State transitions (state->state by whom under condition):
5436 0 -> 1 final_prescan_insn if the `target' is a label
5437 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
5438 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
5439 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
5440 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
5441 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
5442 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
5443 (the target insn is arm_target_insn).
5445 If the jump clobbers the conditions then we use states 2 and 4.
5447 A similar thing can be done with conditional return insns.
5449 XXX In case the `target' is an unconditional branch, this conditionalising
5450 of the instructions always reduces code size, but not always execution
5451 time. But then, I want to reduce the code size to somewhere near what
5452 /bin/cc produces. */
5454 /* Returns the index of the ARM condition code string in
5455 `arm_condition_codes'. COMPARISON should be an rtx like
5456 `(eq (...) (...))'. */
5458 static enum arm_cond_code
5459 get_arm_condition_code (comparison
)
5462 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
5464 register enum rtx_code comp_code
= GET_CODE (comparison
);
5466 if (GET_MODE_CLASS (mode
) != MODE_CC
)
5467 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
5468 XEXP (comparison
, 1));
5472 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
5473 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
5474 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
5475 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
5476 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
5477 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
5478 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
5479 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
5480 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
5481 case CC_DLTUmode
: code
= ARM_CC
;
5484 if (comp_code
!= EQ
&& comp_code
!= NE
)
5487 if (comp_code
== EQ
)
5488 return ARM_INVERSE_CONDITION_CODE (code
);
5494 case NE
: return ARM_NE
;
5495 case EQ
: return ARM_EQ
;
5496 case GE
: return ARM_PL
;
5497 case LT
: return ARM_MI
;
5505 case NE
: return ARM_NE
;
5506 case EQ
: return ARM_EQ
;
5513 case GE
: return ARM_GE
;
5514 case GT
: return ARM_GT
;
5515 case LE
: return ARM_LS
;
5516 case LT
: return ARM_MI
;
5523 case NE
: return ARM_NE
;
5524 case EQ
: return ARM_EQ
;
5525 case GE
: return ARM_LE
;
5526 case GT
: return ARM_LT
;
5527 case LE
: return ARM_GE
;
5528 case LT
: return ARM_GT
;
5529 case GEU
: return ARM_LS
;
5530 case GTU
: return ARM_CC
;
5531 case LEU
: return ARM_CS
;
5532 case LTU
: return ARM_HI
;
5539 case LTU
: return ARM_CS
;
5540 case GEU
: return ARM_CC
;
5547 case NE
: return ARM_NE
;
5548 case EQ
: return ARM_EQ
;
5549 case GE
: return ARM_GE
;
5550 case GT
: return ARM_GT
;
5551 case LE
: return ARM_LE
;
5552 case LT
: return ARM_LT
;
5553 case GEU
: return ARM_CS
;
5554 case GTU
: return ARM_HI
;
5555 case LEU
: return ARM_LS
;
5556 case LTU
: return ARM_CC
;
5568 final_prescan_insn (insn
, opvec
, noperands
)
5573 /* BODY will hold the body of INSN. */
5574 register rtx body
= PATTERN (insn
);
5576 /* This will be 1 if trying to repeat the trick, and things need to be
5577 reversed if it appears to fail. */
5580 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
5581 taken are clobbered, even if the rtl suggests otherwise. It also
5582 means that we have to grub around within the jump expression to find
5583 out what the conditions are when the jump isn't taken. */
5584 int jump_clobbers
= 0;
5586 /* If we start with a return insn, we only succeed if we find another one. */
5587 int seeking_return
= 0;
5589 /* START_INSN will hold the insn from where we start looking. This is the
5590 first insn after the following code_label if REVERSE is true. */
5591 rtx start_insn
= insn
;
5593 /* If in state 4, check if the target branch is reached, in order to
5594 change back to state 0. */
5595 if (arm_ccfsm_state
== 4)
5597 if (insn
== arm_target_insn
)
5599 arm_target_insn
= NULL
;
5600 arm_ccfsm_state
= 0;
5605 /* If in state 3, it is possible to repeat the trick, if this insn is an
5606 unconditional branch to a label, and immediately following this branch
5607 is the previous target label which is only used once, and the label this
5608 branch jumps to is not too far off. */
5609 if (arm_ccfsm_state
== 3)
5611 if (simplejump_p (insn
))
5613 start_insn
= next_nonnote_insn (start_insn
);
5614 if (GET_CODE (start_insn
) == BARRIER
)
5616 /* XXX Isn't this always a barrier? */
5617 start_insn
= next_nonnote_insn (start_insn
);
5619 if (GET_CODE (start_insn
) == CODE_LABEL
5620 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5621 && LABEL_NUSES (start_insn
) == 1)
5626 else if (GET_CODE (body
) == RETURN
)
5628 start_insn
= next_nonnote_insn (start_insn
);
5629 if (GET_CODE (start_insn
) == BARRIER
)
5630 start_insn
= next_nonnote_insn (start_insn
);
5631 if (GET_CODE (start_insn
) == CODE_LABEL
5632 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5633 && LABEL_NUSES (start_insn
) == 1)
5645 if (arm_ccfsm_state
!= 0 && !reverse
)
5647 if (GET_CODE (insn
) != JUMP_INSN
)
5650 /* This jump might be paralleled with a clobber of the condition codes
5651 the jump should always come first */
5652 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
5653 body
= XVECEXP (body
, 0, 0);
5656 /* If this is a conditional return then we don't want to know */
5657 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5658 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
5659 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
5660 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
5665 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5666 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
5669 int fail
= FALSE
, succeed
= FALSE
;
5670 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
5671 int then_not_else
= TRUE
;
5672 rtx this_insn
= start_insn
, label
= 0;
5674 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
5676 /* The code below is wrong for these, and I haven't time to
5677 fix it now. So we just do the safe thing and return. This
5678 whole function needs re-writing anyway. */
5683 /* Register the insn jumped to. */
5686 if (!seeking_return
)
5687 label
= XEXP (SET_SRC (body
), 0);
5689 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
5690 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
5691 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
5693 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
5694 then_not_else
= FALSE
;
5696 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
5698 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
5701 then_not_else
= FALSE
;
5706 /* See how many insns this branch skips, and what kind of insns. If all
5707 insns are okay, and the label or unconditional branch to the same
5708 label is not too far away, succeed. */
5709 for (insns_skipped
= 0;
5710 !fail
&& !succeed
&& insns_skipped
++ < MAX_INSNS_SKIPPED
;)
5714 this_insn
= next_nonnote_insn (this_insn
);
5718 scanbody
= PATTERN (this_insn
);
5720 switch (GET_CODE (this_insn
))
5723 /* Succeed if it is the target label, otherwise fail since
5724 control falls in from somewhere else. */
5725 if (this_insn
== label
)
5729 arm_ccfsm_state
= 2;
5730 this_insn
= next_nonnote_insn (this_insn
);
5733 arm_ccfsm_state
= 1;
5741 /* Succeed if the following insn is the target label.
5743 If return insns are used then the last insn in a function
5744 will be a barrier. */
5745 this_insn
= next_nonnote_insn (this_insn
);
5746 if (this_insn
&& this_insn
== label
)
5750 arm_ccfsm_state
= 2;
5751 this_insn
= next_nonnote_insn (this_insn
);
5754 arm_ccfsm_state
= 1;
5762 /* If using 32-bit addresses the cc is not preserved over
5766 /* Succeed if the following insn is the target label,
5767 or if the following two insns are a barrier and
5768 the target label. */
5769 this_insn
= next_nonnote_insn (this_insn
);
5770 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
5771 this_insn
= next_nonnote_insn (this_insn
);
5773 if (this_insn
&& this_insn
== label
5774 && insns_skipped
< MAX_INSNS_SKIPPED
)
5778 arm_ccfsm_state
= 2;
5779 this_insn
= next_nonnote_insn (this_insn
);
5782 arm_ccfsm_state
= 1;
5791 /* If this is an unconditional branch to the same label, succeed.
5792 If it is to another label, do nothing. If it is conditional,
5794 /* XXX Probably, the test for the SET and the PC are unnecessary. */
5796 if (GET_CODE (scanbody
) == SET
5797 && GET_CODE (SET_DEST (scanbody
)) == PC
)
5799 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
5800 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
5802 arm_ccfsm_state
= 2;
5805 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
5808 else if (GET_CODE (scanbody
) == RETURN
5811 arm_ccfsm_state
= 2;
5814 else if (GET_CODE (scanbody
) == PARALLEL
)
5816 switch (get_attr_conds (this_insn
))
5828 /* Instructions using or affecting the condition codes make it
5830 if ((GET_CODE (scanbody
) == SET
5831 || GET_CODE (scanbody
) == PARALLEL
)
5832 && get_attr_conds (this_insn
) != CONDS_NOCOND
)
5842 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
5843 arm_target_label
= CODE_LABEL_NUMBER (label
);
5844 else if (seeking_return
|| arm_ccfsm_state
== 2)
5846 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
5848 this_insn
= next_nonnote_insn (this_insn
);
5849 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
5850 || GET_CODE (this_insn
) == CODE_LABEL
))
5855 /* Oh, dear! we ran off the end.. give up */
5856 recog (PATTERN (insn
), insn
, NULL_PTR
);
5857 arm_ccfsm_state
= 0;
5858 arm_target_insn
= NULL
;
5861 arm_target_insn
= this_insn
;
5870 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
5872 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
5873 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5874 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
5875 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5879 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
5882 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
5886 if (reverse
|| then_not_else
)
5887 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5889 /* restore recog_operand (getting the attributes of other insns can
5890 destroy this array, but final.c assumes that it remains intact
5891 across this call; since the insn has been recognized already we
5892 call recog direct). */
5893 recog (PATTERN (insn
), insn
, NULL_PTR
);
5897 #ifdef AOF_ASSEMBLER
5898 /* Special functions only needed when producing AOF syntax assembler. */
5900 rtx aof_pic_label
= NULL_RTX
;
5903 struct pic_chain
*next
;
5907 static struct pic_chain
*aof_pic_chain
= NULL
;
5913 struct pic_chain
**chainp
;
5916 if (aof_pic_label
== NULL_RTX
)
5918 /* This needs to persist throughout the compilation. */
5919 end_temporary_allocation ();
5920 aof_pic_label
= gen_rtx (SYMBOL_REF
, Pmode
, "x$adcons");
5921 resume_temporary_allocation ();
5924 for (offset
= 0, chainp
= &aof_pic_chain
; *chainp
;
5925 offset
+= 4, chainp
= &(*chainp
)->next
)
5926 if ((*chainp
)->symname
== XSTR (x
, 0))
5927 return plus_constant (aof_pic_label
, offset
);
5929 *chainp
= (struct pic_chain
*) xmalloc (sizeof (struct pic_chain
));
5930 (*chainp
)->next
= NULL
;
5931 (*chainp
)->symname
= XSTR (x
, 0);
5932 return plus_constant (aof_pic_label
, offset
);
5936 aof_dump_pic_table (f
)
5939 struct pic_chain
*chain
;
5941 if (aof_pic_chain
== NULL
)
5944 fprintf (f
, "\tAREA |%s$$adcons|, BASED %s%s\n",
5945 reg_names
[PIC_OFFSET_TABLE_REGNUM
], REGISTER_PREFIX
,
5946 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
5947 fputs ("|x$adcons|\n", f
);
5949 for (chain
= aof_pic_chain
; chain
; chain
= chain
->next
)
5951 fputs ("\tDCD\t", f
);
5952 assemble_name (f
, chain
->symname
);
5957 int arm_text_section_count
= 1;
5962 static char buf
[100];
5963 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
5964 arm_text_section_count
++);
5966 strcat (buf
, ", PIC, REENTRANT");
5970 static int arm_data_section_count
= 1;
5975 static char buf
[100];
5976 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
5980 /* The AOF assembler is religiously strict about declarations of
5981 imported and exported symbols, so that it is impossible to declare
5982 a function as imported near the beginning of the file, and then to
5983 export it later on. It is, however, possible to delay the decision
5984 until all the functions in the file have been compiled. To get
5985 around this, we maintain a list of the imports and exports, and
5986 delete from it any that are subsequently defined. At the end of
5987 compilation we spit the remainder of the list out before the END
5992 struct import
*next
;
5996 static struct import
*imports_list
= NULL
;
5999 aof_add_import (name
)
6004 for (new = imports_list
; new; new = new->next
)
6005 if (new->name
== name
)
6008 new = (struct import
*) xmalloc (sizeof (struct import
));
6009 new->next
= imports_list
;
6015 aof_delete_import (name
)
6018 struct import
**old
;
6020 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
6022 if ((*old
)->name
== name
)
6024 *old
= (*old
)->next
;
6030 int arm_main_function
= 0;
6033 aof_dump_imports (f
)
6036 /* The AOF assembler needs this to cause the startup code to be extracted
6037 from the library. Brining in __main causes the whole thing to work
6039 if (arm_main_function
)
6042 fputs ("\tIMPORT __main\n", f
);
6043 fputs ("\tDCD __main\n", f
);
6046 /* Now dump the remaining imports. */
6047 while (imports_list
)
6049 fprintf (f
, "\tIMPORT\t");
6050 assemble_name (f
, imports_list
->name
);
6052 imports_list
= imports_list
->next
;
6055 #endif /* AOF_ASSEMBLER */