1 /* Output routines for GCC for ARM/RISCiX.
2 Copyright (C) 1991, 93, 94, 95, 96, 1997 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "insn-flags.h"
36 #include "insn-attr.h"
42 /* The maximum number of insns skipped which will be conditionalised if
44 #define MAX_INSNS_SKIPPED 5
46 /* Some function declarations. */
47 extern FILE *asm_out_file
;
49 static HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
50 static char *output_multi_immediate
PROTO ((rtx
*, char *, char *, int,
52 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
53 HOST_WIDE_INT
, rtx
, rtx
, int, int));
54 static int arm_naked_function_p
PROTO ((tree
));
55 static void init_fpa_table
PROTO ((void));
56 static enum machine_mode select_dominance_cc_mode
PROTO ((enum rtx_code
, rtx
,
58 static HOST_WIDE_INT add_constant
PROTO ((rtx
, enum machine_mode
));
59 static void dump_table
PROTO ((rtx
));
60 static int fixit
PROTO ((rtx
, enum machine_mode
, int));
61 static rtx find_barrier
PROTO ((rtx
, int));
62 static int broken_move
PROTO ((rtx
));
63 static char *fp_const_from_val
PROTO ((REAL_VALUE_TYPE
*));
64 static int eliminate_lr2ip
PROTO ((rtx
*));
65 static char *shift_op
PROTO ((rtx
, HOST_WIDE_INT
*));
66 static int pattern_really_clobbers_lr
PROTO ((rtx
));
67 static int function_really_clobbers_lr
PROTO ((rtx
));
68 static void emit_multi_reg_push
PROTO ((int));
69 static void emit_sfm
PROTO ((int, int));
70 static enum arm_cond_code get_arm_condition_code
PROTO ((rtx
));
72 /* Define the information needed to generate branch insns. This is
73 stored from the compare operation. */
75 rtx arm_compare_op0
, arm_compare_op1
;
78 /* What type of cpu are we compiling for? */
79 enum processor_type arm_cpu
;
81 /* What type of floating point are we tuning for? */
82 enum floating_point_type arm_fpu
;
84 /* What type of floating point instructions are available? */
85 enum floating_point_type arm_fpu_arch
;
87 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
88 enum prog_mode_type arm_prgmode
;
90 /* Set by the -mfp=... option */
91 char *target_fp_name
= NULL
;
93 /* Nonzero if this is an "M" variant of the processor. */
94 int arm_fast_multiply
= 0;
96 /* Nonzero if this chip supports the ARM Architecture 4 extensions */
99 /* Set to the features we should tune the code for (multiply speed etc). */
102 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
103 must report the mode of the memory reference from PRINT_OPERAND to
104 PRINT_OPERAND_ADDRESS. */
105 enum machine_mode output_memory_reference_mode
;
107 /* Nonzero if the prologue must setup `fp'. */
108 int current_function_anonymous_args
;
110 /* The register number to be used for the PIC offset register. */
111 int arm_pic_register
= 9;
113 /* Location counter of .text segment. */
114 int arm_text_location
= 0;
116 /* Set to one if we think that lr is only saved because of subroutine calls,
117 but all of these can be `put after' return insns */
118 int lr_save_eliminated
;
120 /* Set to 1 when a return insn is output, this means that the epilogue
123 static int return_used_this_function
;
125 static int arm_constant_limit
= 3;
127 /* For an explanation of these variables, see final_prescan_insn below. */
129 enum arm_cond_code arm_current_cc
;
131 int arm_target_label
;
133 /* The condition codes of the ARM, and the inverse function. */
134 char *arm_condition_codes
[] =
136 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
137 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
140 static enum arm_cond_code
get_arm_condition_code ();
143 /* Initialization code */
145 struct arm_cpu_select arm_select
[4] =
147 /* switch name, tune arch */
148 { (char *)0, "--with-cpu=", 1, 1 },
149 { (char *)0, "-mcpu=", 1, 1 },
150 { (char *)0, "-march=", 0, 1 },
151 { (char *)0, "-mtune=", 1, 0 },
154 #define FL_CO_PROC 0x01 /* Has external co-processor bus */
155 #define FL_FAST_MULT 0x02 /* Fast multiply */
156 #define FL_MODE26 0x04 /* 26-bit mode support */
157 #define FL_MODE32 0x08 /* 32-bit mode support */
158 #define FL_ARCH4 0x10 /* Architecture rel 4 */
159 #define FL_THUMB 0x20 /* Thumb aware */
164 enum processor_type type
;
168 /* Not all of these give usefully different compilation alternatives,
169 but there is no simple way of generalizing them. */
170 static struct processors all_procs
[] =
172 {"arm2", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
173 {"arm250", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
174 {"arm3", PROCESSOR_ARM2
, FL_CO_PROC
| FL_MODE26
},
175 {"arm6", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
176 {"arm600", PROCESSOR_ARM6
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
177 {"arm610", PROCESSOR_ARM6
, FL_MODE32
| FL_MODE26
},
178 {"arm7", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
179 /* arm7m doesn't exist on its own, only in conjunction with D, (and I), but
180 those don't alter the code, so it is sometimes known as the arm7m */
181 {"arm7m", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
183 {"arm7dm", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
185 {"arm7dmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
187 {"arm700", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
188 {"arm710", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
189 {"arm7100", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
190 {"arm7500", PROCESSOR_ARM7
, FL_MODE32
| FL_MODE26
},
191 /* Doesn't really have an external co-proc, but does have embedded fpu */
192 {"arm7500fe", PROCESSOR_ARM7
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
193 {"arm7tdmi", PROCESSOR_ARM7
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
194 | FL_ARCH4
| FL_THUMB
)},
195 {"arm8", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
197 {"arm810", PROCESSOR_ARM8
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
199 {"strongarm", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
201 {"strongarm110", PROCESSOR_STARM
, (FL_FAST_MULT
| FL_MODE32
| FL_MODE26
203 {"armv2", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE26
},
204 {"armv2a", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE26
},
205 {"armv3", PROCESSOR_NONE
, FL_CO_PROC
| FL_MODE32
| FL_MODE26
},
206 {"armv3m", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
208 {"armv4", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
209 | FL_MODE26
| FL_ARCH4
)},
210 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
211 implementations that support it, so we will leave it out for now. */
212 {"armv4t", PROCESSOR_NONE
, (FL_CO_PROC
| FL_FAST_MULT
| FL_MODE32
217 /* Fix up any incompatible options that the user has specified.
218 This has now turned into a maze. */
220 arm_override_options ()
222 int arm_thumb_aware
= 0;
225 struct arm_cpu_select
*ptr
;
226 static struct cpu_default
{
230 { TARGET_CPU_arm2
, "arm2" },
231 { TARGET_CPU_arm6
, "arm6" },
232 { TARGET_CPU_arm610
, "arm610" },
233 { TARGET_CPU_arm7dm
, "arm7dm" },
234 { TARGET_CPU_arm7500fe
, "arm7500fe" },
235 { TARGET_CPU_arm7tdmi
, "arm7tdmi" },
236 { TARGET_CPU_arm8
, "arm8" },
237 { TARGET_CPU_arm810
, "arm810" },
238 { TARGET_CPU_strongarm
, "strongarm" },
241 struct cpu_default
*def
;
243 /* Set the default. */
244 for (def
= &cpu_defaults
[0]; def
->name
; ++def
)
245 if (def
->cpu
== TARGET_CPU_DEFAULT
)
250 arm_select
[0].string
= def
->name
;
252 for (i
= 0; i
< sizeof (arm_select
) / sizeof (arm_select
[0]); i
++)
254 ptr
= &arm_select
[i
];
255 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
257 struct processors
*sel
;
259 for (sel
= all_procs
; sel
->name
!= NULL
; sel
++)
260 if (! strcmp (ptr
->string
, sel
->name
))
262 /* -march= is the only flag that can take an architecture
263 type, so if we match when the tune bit is set, the
264 option was invalid. */
267 if (sel
->type
== PROCESSOR_NONE
)
268 continue; /* Its an architecture, not a cpu */
271 tune_flags
= sel
->flags
;
280 if (sel
->name
== NULL
)
281 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
285 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
286 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
288 if (TARGET_POKE_FUNCTION_NAME
)
289 target_flags
|= ARM_FLAG_APCS_FRAME
;
292 warning ("Option '-m6' deprecated. Use: '-mapcs-32' or -mcpu=<proc>");
295 warning ("Option '-m3' deprecated. Use: '-mapcs-26' or -mcpu=<proc>");
297 if (TARGET_APCS_REENT
&& flag_pic
)
298 fatal ("-fpic and -mapcs-reent are incompatible");
300 if (TARGET_APCS_REENT
)
301 warning ("APCS reentrant code not supported.");
303 /* If stack checking is disabled, we can use r10 as the PIC register,
304 which keeps r9 available. */
305 if (flag_pic
&& ! TARGET_APCS_STACK
)
306 arm_pic_register
= 10;
308 /* Well, I'm about to have a go, but pic is NOT going to be compatible
309 with APCS reentrancy, since that requires too much support in the
310 assembler and linker, and the ARMASM assembler seems to lack some
311 required directives. */
313 warning ("Position independent code not supported. Ignored");
315 if (TARGET_APCS_FLOAT
)
316 warning ("Passing floating point arguments in fp regs not yet supported");
318 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
320 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
321 target_flags
|= ARM_FLAG_APCS_FRAME
;
324 /* Default is to tune for an FPA */
327 /* Default value for floating point code... if no co-processor
328 bus, then schedule for emulated floating point. Otherwise,
329 assume the user has an FPA.
330 Note: this does not prevent use of floating point instructions,
331 -msoft-float does that. */
332 if (tune_flags
& FL_CO_PROC
== 0)
335 arm_fast_multiply
= (flags
& FL_FAST_MULT
) != 0;
336 arm_arch4
= (flags
& FL_ARCH4
) != 0;
337 arm_thumb_aware
= (flags
& FL_THUMB
) != 0;
341 if (strcmp (target_fp_name
, "2") == 0)
342 arm_fpu_arch
= FP_SOFT2
;
343 else if (strcmp (target_fp_name
, "3") == 0)
344 arm_fpu_arch
= FP_HARD
;
346 fatal ("Invalid floating point emulation option: -mfpe=%s",
350 arm_fpu_arch
= FP_DEFAULT
;
352 if (TARGET_THUMB_INTERWORK
&& ! arm_thumb_aware
)
354 warning ("This processor variant does not support Thumb interworking");
355 target_flags
&= ~ARM_FLAG_THUMB
;
358 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
361 /* For arm2/3 there is no need to do any scheduling if there is only
362 a floating point emulator, or we are doing software floating-point. */
363 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && arm_cpu
== PROCESSOR_ARM2
)
364 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
366 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
370 /* Return 1 if it is possible to return using a single instruction */
377 if (!reload_completed
||current_function_pretend_args_size
378 || current_function_anonymous_args
379 || ((get_frame_size () + current_function_outgoing_args_size
!= 0)
380 && !(TARGET_APCS
|| frame_pointer_needed
)))
383 /* Can't be done if interworking with Thumb, and any registers have been
385 if (TARGET_THUMB_INTERWORK
)
386 for (regno
= 0; regno
< 16; regno
++)
387 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
390 /* Can't be done if any of the FPU regs are pushed, since this also
392 for (regno
= 16; regno
< 24; regno
++)
393 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
396 /* If a function is naked, don't use the "return" insn. */
397 if (arm_naked_function_p (current_function_decl
))
403 /* Return TRUE if int I is a valid immediate ARM constant. */
409 unsigned HOST_WIDE_INT mask
= ~0xFF;
411 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
412 be all zero, or all one. */
413 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff) != 0
414 && ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff)
415 != (((HOST_WIDE_INT
) -1) & ~(unsigned HOST_WIDE_INT
) 0xffffffff)))
418 /* Fast return for 0 and powers of 2 */
419 if ((i
& (i
- 1)) == 0)
424 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
427 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
428 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
429 } while (mask
!= ~0xFF);
434 /* Return true if I is a valid constant for the operation CODE. */
436 const_ok_for_op (i
, code
, mode
)
439 enum machine_mode mode
;
441 if (const_ok_for_arm (i
))
447 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
449 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
455 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
462 /* Emit a sequence of insns to handle a large constant.
463 CODE is the code of the operation required, it can be any of SET, PLUS,
464 IOR, AND, XOR, MINUS;
465 MODE is the mode in which the operation is being performed;
466 VAL is the integer to operate on;
467 SOURCE is the other operand (a register, or a null-pointer for SET);
468 SUBTARGETS means it is safe to create scratch registers if that will
469 either produce a simpler sequence, or we will want to cse the values.
470 Return value is the number of insns emitted. */
473 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
475 enum machine_mode mode
;
481 if (subtargets
|| code
== SET
482 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
483 && REGNO (target
) != REGNO (source
)))
487 if (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
488 > arm_constant_limit
+ (code
!= SET
))
492 /* Currently SET is the only monadic value for CODE, all
493 the rest are diadic. */
494 emit_insn (gen_rtx (SET
, VOIDmode
, target
, GEN_INT (val
)));
499 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
501 emit_insn (gen_rtx (SET
, VOIDmode
, temp
, GEN_INT (val
)));
502 /* For MINUS, the value is subtracted from, since we never
503 have subtraction of a constant. */
505 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
506 gen_rtx (code
, mode
, temp
, source
)));
508 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
509 gen_rtx (code
, mode
, source
, temp
)));
515 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
518 /* As above, but extra parameter GENERATE which, if clear, suppresses
521 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
523 enum machine_mode mode
;
533 int can_negate_initial
= 0;
536 int num_bits_set
= 0;
537 int set_sign_bit_copies
= 0;
538 int clear_sign_bit_copies
= 0;
539 int clear_zero_bit_copies
= 0;
540 int set_zero_bit_copies
= 0;
543 unsigned HOST_WIDE_INT temp1
, temp2
;
544 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
546 /* find out which operations are safe for a given CODE. Also do a quick
547 check for degenerate cases; these can occur when DImode operations
559 can_negate_initial
= 1;
563 if (remainder
== 0xffffffff)
566 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
567 GEN_INT (ARM_SIGN_EXTEND (val
))));
572 if (reload_completed
&& rtx_equal_p (target
, source
))
575 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
584 emit_insn (gen_rtx (SET
, VOIDmode
, target
, const0_rtx
));
587 if (remainder
== 0xffffffff)
589 if (reload_completed
&& rtx_equal_p (target
, source
))
592 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
601 if (reload_completed
&& rtx_equal_p (target
, source
))
604 emit_insn (gen_rtx (SET
, VOIDmode
, target
, source
));
607 if (remainder
== 0xffffffff)
610 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
611 gen_rtx (NOT
, mode
, source
)));
615 /* We don't know how to handle this yet below. */
619 /* We treat MINUS as (val - source), since (source - val) is always
620 passed as (source + (-val)). */
624 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
625 gen_rtx (NEG
, mode
, source
)));
628 if (const_ok_for_arm (val
))
631 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
632 gen_rtx (MINUS
, mode
, GEN_INT (val
), source
)));
643 /* If we can do it in one insn get out quickly */
644 if (const_ok_for_arm (val
)
645 || (can_negate_initial
&& const_ok_for_arm (-val
))
646 || (can_invert
&& const_ok_for_arm (~val
)))
649 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
650 (source
? gen_rtx (code
, mode
, source
,
657 /* Calculate a few attributes that may be useful for specific
660 for (i
= 31; i
>= 0; i
--)
662 if ((remainder
& (1 << i
)) == 0)
663 clear_sign_bit_copies
++;
668 for (i
= 31; i
>= 0; i
--)
670 if ((remainder
& (1 << i
)) != 0)
671 set_sign_bit_copies
++;
676 for (i
= 0; i
<= 31; i
++)
678 if ((remainder
& (1 << i
)) == 0)
679 clear_zero_bit_copies
++;
684 for (i
= 0; i
<= 31; i
++)
686 if ((remainder
& (1 << i
)) != 0)
687 set_zero_bit_copies
++;
695 /* See if we can do this by sign_extending a constant that is known
696 to be negative. This is a good, way of doing it, since the shift
697 may well merge into a subsequent insn. */
698 if (set_sign_bit_copies
> 1)
701 (temp1
= ARM_SIGN_EXTEND (remainder
702 << (set_sign_bit_copies
- 1))))
706 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
707 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
709 emit_insn (gen_ashrsi3 (target
, new_src
,
710 GEN_INT (set_sign_bit_copies
- 1)));
714 /* For an inverted constant, we will need to set the low bits,
715 these will be shifted out of harm's way. */
716 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
717 if (const_ok_for_arm (~temp1
))
721 new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
722 emit_insn (gen_rtx (SET
, VOIDmode
, new_src
,
724 emit_insn (gen_ashrsi3 (target
, new_src
,
725 GEN_INT (set_sign_bit_copies
- 1)));
731 /* See if we can generate this by setting the bottom (or the top)
732 16 bits, and then shifting these into the other half of the
733 word. We only look for the simplest cases, to do more would cost
734 too much. Be careful, however, not to generate this when the
735 alternative would take fewer insns. */
736 if (val
& 0xffff0000)
738 temp1
= remainder
& 0xffff0000;
739 temp2
= remainder
& 0x0000ffff;
741 /* Overlaps outside this range are best done using other methods. */
742 for (i
= 9; i
< 24; i
++)
744 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
745 && ! const_ok_for_arm (temp2
))
747 insns
= arm_gen_constant (code
, mode
, temp2
,
748 new_src
= (subtargets
751 source
, subtargets
, generate
);
754 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
756 gen_rtx (ASHIFT
, mode
, source
,
763 /* Don't duplicate cases already considered. */
764 for (i
= 17; i
< 24; i
++)
766 if (((temp1
| (temp1
>> i
)) == remainder
)
767 && ! const_ok_for_arm (temp1
))
769 insns
= arm_gen_constant (code
, mode
, temp1
,
770 new_src
= (subtargets
773 source
, subtargets
, generate
);
776 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
778 gen_rtx (LSHIFTRT
, mode
,
779 source
, GEN_INT (i
)),
789 /* If we have IOR or XOR, and the constant can be loaded in a
790 single instruction, and we can find a temporary to put it in,
791 then this can be done in two instructions instead of 3-4. */
793 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
795 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
799 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
801 emit_insn (gen_rtx (SET
, VOIDmode
, sub
, GEN_INT (val
)));
802 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
803 gen_rtx (code
, mode
, source
, sub
)));
812 if (set_sign_bit_copies
> 8
813 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
817 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
818 rtx shift
= GEN_INT (set_sign_bit_copies
);
820 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
822 gen_rtx (ASHIFT
, mode
, source
,
824 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
826 gen_rtx (LSHIFTRT
, mode
, sub
,
832 if (set_zero_bit_copies
> 8
833 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
837 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
838 rtx shift
= GEN_INT (set_zero_bit_copies
);
840 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
842 gen_rtx (LSHIFTRT
, mode
, source
,
844 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
846 gen_rtx (ASHIFT
, mode
, sub
,
852 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
856 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
857 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
858 gen_rtx (NOT
, mode
, source
)));
861 sub
= gen_reg_rtx (mode
);
862 emit_insn (gen_rtx (SET
, VOIDmode
, sub
,
863 gen_rtx (AND
, mode
, source
,
865 emit_insn (gen_rtx (SET
, VOIDmode
, target
,
866 gen_rtx (NOT
, mode
, sub
)));
873 /* See if two shifts will do 2 or more insn's worth of work. */
874 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
876 HOST_WIDE_INT shift_mask
= ((0xffffffff
877 << (32 - clear_sign_bit_copies
))
882 if ((remainder
| shift_mask
) != 0xffffffff)
886 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
887 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
888 new_source
, source
, subtargets
, 1);
892 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
893 new_source
, source
, subtargets
, 0);
898 shift
= GEN_INT (clear_sign_bit_copies
);
899 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
900 emit_insn (gen_ashlsi3 (new_source
, source
, shift
));
901 emit_insn (gen_lshrsi3 (target
, new_source
, shift
));
907 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
909 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
913 if ((remainder
| shift_mask
) != 0xffffffff)
917 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
918 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
919 new_source
, source
, subtargets
, 1);
923 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
924 new_source
, source
, subtargets
, 0);
929 shift
= GEN_INT (clear_zero_bit_copies
);
930 new_source
= subtargets
? gen_reg_rtx (mode
) : target
;
931 emit_insn (gen_lshrsi3 (new_source
, source
, shift
));
932 emit_insn (gen_ashlsi3 (target
, new_source
, shift
));
944 for (i
= 0; i
< 32; i
++)
945 if (remainder
& (1 << i
))
948 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
949 remainder
= (~remainder
) & 0xffffffff;
950 else if (code
== PLUS
&& num_bits_set
> 16)
951 remainder
= (-remainder
) & 0xffffffff;
958 /* Now try and find a way of doing the job in either two or three
960 We start by looking for the largest block of zeros that are aligned on
961 a 2-bit boundary, we then fill up the temps, wrapping around to the
962 top of the word when we drop off the bottom.
963 In the worst case this code should produce no more than four insns. */
966 int best_consecutive_zeros
= 0;
968 for (i
= 0; i
< 32; i
+= 2)
970 int consecutive_zeros
= 0;
972 if (! (remainder
& (3 << i
)))
974 while ((i
< 32) && ! (remainder
& (3 << i
)))
976 consecutive_zeros
+= 2;
979 if (consecutive_zeros
> best_consecutive_zeros
)
981 best_consecutive_zeros
= consecutive_zeros
;
982 best_start
= i
- consecutive_zeros
;
988 /* Now start emitting the insns, starting with the one with the highest
989 bit set: we do this so that the smallest number will be emitted last;
990 this is more likely to be combinable with addressing insns. */
998 if (remainder
& (3 << (i
- 2)))
1003 temp1
= remainder
& ((0x0ff << end
)
1004 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
1005 remainder
&= ~temp1
;
1010 emit_insn (gen_rtx (SET
, VOIDmode
,
1011 new_src
= (subtargets
1012 ? gen_reg_rtx (mode
)
1014 GEN_INT (can_invert
? ~temp1
: temp1
)));
1018 else if (code
== MINUS
)
1021 emit_insn (gen_rtx (SET
, VOIDmode
,
1022 new_src
= (subtargets
1023 ? gen_reg_rtx (mode
)
1025 gen_rtx (code
, mode
, GEN_INT (temp1
),
1032 emit_insn (gen_rtx (SET
, VOIDmode
,
1033 new_src
= (remainder
1035 ? gen_reg_rtx (mode
)
1038 gen_rtx (code
, mode
, source
,
1039 GEN_INT (can_invert
? ~temp1
1050 } while (remainder
);
1055 /* Canonicalize a comparison so that we are more likely to recognize it.
1056 This can be done for a few constant compares, where we can make the
1057 immediate value easier to load. */
1059 arm_canonicalize_comparison (code
, op1
)
1063 HOST_WIDE_INT i
= INTVAL (*op1
);
1073 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1) - 1)
1074 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1076 *op1
= GEN_INT (i
+1);
1077 return code
== GT
? GE
: LT
;
1083 if (i
!= (1 << (HOST_BITS_PER_WIDE_INT
- 1))
1084 && (const_ok_for_arm (i
-1) || const_ok_for_arm (- (i
-1))))
1086 *op1
= GEN_INT (i
-1);
1087 return code
== GE
? GT
: LE
;
1094 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1096 *op1
= GEN_INT (i
+ 1);
1097 return code
== GTU
? GEU
: LTU
;
1104 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (- (i
- 1))))
1106 *op1
= GEN_INT (i
- 1);
1107 return code
== GEU
? GTU
: LEU
;
1119 /* Handle aggregates that are not laid out in a BLKmode element.
1120 This is a sub-element of RETURN_IN_MEMORY. */
1122 arm_return_in_memory (type
)
1125 if (TREE_CODE (type
) == RECORD_TYPE
)
1129 /* For a struct, we can return in a register if every element was a
1131 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1132 if (TREE_CODE (field
) != FIELD_DECL
1133 || ! DECL_BIT_FIELD_TYPE (field
))
1138 else if (TREE_CODE (type
) == UNION_TYPE
)
1142 /* Unions can be returned in registers if every element is
1143 integral, or can be returned in an integer register. */
1144 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
1146 if (TREE_CODE (field
) != FIELD_DECL
1147 || (AGGREGATE_TYPE_P (TREE_TYPE (field
))
1148 && RETURN_IN_MEMORY (TREE_TYPE (field
)))
1149 || FLOAT_TYPE_P (TREE_TYPE (field
)))
1154 /* XXX Not sure what should be done for other aggregates, so put them in
1160 legitimate_pic_operand_p (x
)
1163 if (CONSTANT_P (x
) && flag_pic
1164 && (GET_CODE (x
) == SYMBOL_REF
1165 || (GET_CODE (x
) == CONST
1166 && GET_CODE (XEXP (x
, 0)) == PLUS
1167 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)))
1174 legitimize_pic_address (orig
, mode
, reg
)
1176 enum machine_mode mode
;
1179 if (GET_CODE (orig
) == SYMBOL_REF
)
1181 rtx pic_ref
, address
;
1187 if (reload_in_progress
|| reload_completed
)
1190 reg
= gen_reg_rtx (Pmode
);
1195 #ifdef AOF_ASSEMBLER
1196 /* The AOF assembler can generate relocations for these directly, and
1197 understands that the PIC register has to be added into the offset.
1199 insn
= emit_insn (gen_pic_load_addr_based (reg
, orig
));
1202 address
= gen_reg_rtx (Pmode
);
1206 emit_insn (gen_pic_load_addr (address
, orig
));
1208 pic_ref
= gen_rtx (MEM
, Pmode
,
1209 gen_rtx (PLUS
, Pmode
, pic_offset_table_rtx
, address
));
1210 RTX_UNCHANGING_P (pic_ref
) = 1;
1211 insn
= emit_move_insn (reg
, pic_ref
);
1213 current_function_uses_pic_offset_table
= 1;
1214 /* Put a REG_EQUAL note on this insn, so that it can be optimized
1216 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_EQUAL
, orig
,
1220 else if (GET_CODE (orig
) == CONST
)
1224 if (GET_CODE (XEXP (orig
, 0)) == PLUS
1225 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
1230 if (reload_in_progress
|| reload_completed
)
1233 reg
= gen_reg_rtx (Pmode
);
1236 if (GET_CODE (XEXP (orig
, 0)) == PLUS
)
1238 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
1239 offset
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
1240 base
== reg
? 0 : reg
);
1245 if (GET_CODE (offset
) == CONST_INT
)
1247 /* The base register doesn't really matter, we only want to
1248 test the index for the appropriate mode. */
1249 GO_IF_LEGITIMATE_INDEX (mode
, 0, offset
, win
);
1251 if (! reload_in_progress
&& ! reload_completed
)
1252 offset
= force_reg (Pmode
, offset
);
1257 if (GET_CODE (offset
) == CONST_INT
)
1258 return plus_constant_for_output (base
, INTVAL (offset
));
1261 if (GET_MODE_SIZE (mode
) > 4
1262 && (GET_MODE_CLASS (mode
) == MODE_INT
1263 || TARGET_SOFT_FLOAT
))
1265 emit_insn (gen_addsi3 (reg
, base
, offset
));
1269 return gen_rtx (PLUS
, Pmode
, base
, offset
);
1271 else if (GET_CODE (orig
) == LABEL_REF
)
1272 current_function_uses_pic_offset_table
= 1;
1291 #ifndef AOF_ASSEMBLER
1292 rtx l1
, pic_tmp
, pic_tmp2
, seq
;
1293 rtx global_offset_table
;
1295 if (current_function_uses_pic_offset_table
== 0)
1302 l1
= gen_label_rtx ();
1304 global_offset_table
= gen_rtx (SYMBOL_REF
, Pmode
, "_GLOBAL_OFFSET_TABLE_");
1305 /* The PC contains 'dot'+8, but the label L1 is on the next
1306 instruction, so the offset is only 'dot'+4. */
1307 pic_tmp
= gen_rtx (CONST
, VOIDmode
,
1308 gen_rtx (PLUS
, Pmode
,
1309 gen_rtx (LABEL_REF
, VOIDmode
, l1
),
1311 pic_tmp2
= gen_rtx (CONST
, VOIDmode
,
1312 gen_rtx (PLUS
, Pmode
,
1313 global_offset_table
,
1316 pic_rtx
= gen_rtx (CONST
, Pmode
,
1317 gen_rtx (MINUS
, Pmode
, pic_tmp2
, pic_tmp
));
1319 emit_insn (gen_pic_load_addr (pic_offset_table_rtx
, pic_rtx
));
1320 emit_jump_insn (gen_pic_add_dot_plus_eight(l1
, pic_offset_table_rtx
));
1323 seq
= gen_sequence ();
1325 emit_insn_after (seq
, get_insns ());
1327 /* Need to emit this whether or not we obey regdecls,
1328 since setjmp/longjmp can cause life info to screw up. */
1329 emit_insn (gen_rtx (USE
, VOIDmode
, pic_offset_table_rtx
));
1330 #endif /* AOF_ASSEMBLER */
1333 #define REG_OR_SUBREG_REG(X) \
1334 (GET_CODE (X) == REG \
1335 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1337 #define REG_OR_SUBREG_RTX(X) \
1338 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1340 #define ARM_FRAME_RTX(X) \
1341 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1342 || (X) == arg_pointer_rtx)
1345 arm_rtx_costs (x
, code
, outer_code
)
1347 enum rtx_code code
, outer_code
;
1349 enum machine_mode mode
= GET_MODE (x
);
1350 enum rtx_code subcode
;
1356 /* Memory costs quite a lot for the first word, but subsequent words
1357 load at the equivalent of a single insn each. */
1358 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1359 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1366 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1373 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1375 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1376 + ((GET_CODE (XEXP (x
, 0)) == REG
1377 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1378 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1380 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1381 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1382 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1384 + ((GET_CODE (XEXP (x
, 1)) == REG
1385 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1386 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1387 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1392 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1393 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1394 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1395 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1398 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1399 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1400 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1401 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1403 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1404 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1405 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1408 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1409 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1410 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1411 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1412 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1413 || subcode
== ROTATE
|| subcode
== ROTATERT
1415 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1416 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1417 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1418 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1419 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1420 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1421 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1426 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1427 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1428 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1429 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1430 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1434 case AND
: case XOR
: case IOR
:
1437 /* Normally the frame registers will be spilt into reg+const during
1438 reload, so it is a bad idea to combine them with other instructions,
1439 since then they might not be moved outside of loops. As a compromise
1440 we allow integration with ops that have a constant as their second
1442 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1443 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1444 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1445 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1446 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1450 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1451 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1452 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1453 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1456 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1457 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1458 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1459 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1460 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
, mode
)))
1463 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1464 return (1 + extra_cost
1465 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1466 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1467 || subcode
== ROTATE
|| subcode
== ROTATERT
1469 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1470 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1471 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0))
1472 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1473 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1474 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
1480 /* There is no point basing this on the tuning, since it is always the
1481 fast variant if it exists at all */
1482 if (arm_fast_multiply
&& mode
== DImode
1483 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1484 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1485 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1488 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1492 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1494 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1495 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1496 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1498 /* Tune as appropriate */
1499 int booth_unit_size
= ((tune_flags
& FL_FAST_MULT
) ? 8 : 2);
1501 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1503 i
>>= booth_unit_size
;
1510 return (((tune_flags
& FL_FAST_MULT
) ? 8 : 30)
1511 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1512 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1515 if (arm_fast_multiply
&& mode
== SImode
1516 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
1517 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1518 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0))
1519 == GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1520 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
1521 || GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
))
1526 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1527 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1531 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1533 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1536 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1544 return 4 + (mode
== DImode
? 4 : 0);
1547 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1548 return (4 + (mode
== DImode
? 4 : 0)
1549 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1552 switch (GET_MODE (XEXP (x
, 0)))
1555 return (1 + (mode
== DImode
? 4 : 0)
1556 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1559 return (4 + (mode
== DImode
? 4 : 0)
1560 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1563 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1573 arm_adjust_cost (insn
, link
, dep
, cost
)
1581 if ((i_pat
= single_set (insn
)) != NULL
1582 && GET_CODE (SET_SRC (i_pat
)) == MEM
1583 && (d_pat
= single_set (dep
)) != NULL
1584 && GET_CODE (SET_DEST (d_pat
)) == MEM
)
1586 /* This is a load after a store, there is no conflict if the load reads
1587 from a cached area. Assume that loads from the stack, and from the
1588 constant pool are cached, and that others will miss. This is a
1591 /* debug_rtx (insn);
1594 fprintf (stderr, "costs %d\n", cost); */
1596 if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat
), 0))
1597 || reg_mentioned_p (stack_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1598 || reg_mentioned_p (frame_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1599 || reg_mentioned_p (hard_frame_pointer_rtx
,
1600 XEXP (SET_SRC (i_pat
), 0)))
1602 /* fprintf (stderr, "***** Now 1\n"); */
1610 /* This code has been fixed for cross compilation. */
1612 static int fpa_consts_inited
= 0;
1614 char *strings_fpa
[8] = {
1616 "4", "5", "0.5", "10"
1619 static REAL_VALUE_TYPE values_fpa
[8];
1627 for (i
= 0; i
< 8; i
++)
1629 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1633 fpa_consts_inited
= 1;
1636 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1639 const_double_rtx_ok_for_fpu (x
)
1645 if (!fpa_consts_inited
)
1648 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1649 if (REAL_VALUE_MINUS_ZERO (r
))
1652 for (i
= 0; i
< 8; i
++)
1653 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1659 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1662 neg_const_double_rtx_ok_for_fpu (x
)
1668 if (!fpa_consts_inited
)
1671 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1672 r
= REAL_VALUE_NEGATE (r
);
1673 if (REAL_VALUE_MINUS_ZERO (r
))
1676 for (i
= 0; i
< 8; i
++)
1677 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1683 /* Predicates for `match_operand' and `match_operator'. */
1685 /* s_register_operand is the same as register_operand, but it doesn't accept
1688 This function exists because at the time it was put in it led to better
1689 code. SUBREG(MEM) always needs a reload in the places where
1690 s_register_operand is used, and this seemed to lead to excessive
1694 s_register_operand (op
, mode
)
1696 enum machine_mode mode
;
1698 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1701 if (GET_CODE (op
) == SUBREG
)
1702 op
= SUBREG_REG (op
);
1704 /* We don't consider registers whose class is NO_REGS
1705 to be a register operand. */
1706 return (GET_CODE (op
) == REG
1707 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1708 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1711 /* Only accept reg, subreg(reg), const_int. */
1714 reg_or_int_operand (op
, mode
)
1716 enum machine_mode mode
;
1718 if (GET_CODE (op
) == CONST_INT
)
1721 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1724 if (GET_CODE (op
) == SUBREG
)
1725 op
= SUBREG_REG (op
);
1727 /* We don't consider registers whose class is NO_REGS
1728 to be a register operand. */
1729 return (GET_CODE (op
) == REG
1730 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1731 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1734 /* Return 1 if OP is an item in memory, given that we are in reload. */
1737 reload_memory_operand (op
, mode
)
1739 enum machine_mode mode
;
1741 int regno
= true_regnum (op
);
1743 return (! CONSTANT_P (op
)
1745 || (GET_CODE (op
) == REG
1746 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
1749 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
1752 arm_rhs_operand (op
, mode
)
1754 enum machine_mode mode
;
1756 return (s_register_operand (op
, mode
)
1757 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
1760 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
1764 arm_rhsm_operand (op
, mode
)
1766 enum machine_mode mode
;
1768 return (s_register_operand (op
, mode
)
1769 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
1770 || memory_operand (op
, mode
));
1773 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
1774 constant that is valid when negated. */
1777 arm_add_operand (op
, mode
)
1779 enum machine_mode mode
;
1781 return (s_register_operand (op
, mode
)
1782 || (GET_CODE (op
) == CONST_INT
1783 && (const_ok_for_arm (INTVAL (op
))
1784 || const_ok_for_arm (-INTVAL (op
)))));
1788 arm_not_operand (op
, mode
)
1790 enum machine_mode mode
;
1792 return (s_register_operand (op
, mode
)
1793 || (GET_CODE (op
) == CONST_INT
1794 && (const_ok_for_arm (INTVAL (op
))
1795 || const_ok_for_arm (~INTVAL (op
)))));
1798 /* Return TRUE if the operand is a memory reference which contains an
1799 offsettable address. */
1801 offsettable_memory_operand (op
, mode
)
1803 enum machine_mode mode
;
1805 if (mode
== VOIDmode
)
1806 mode
= GET_MODE (op
);
1808 return (mode
== GET_MODE (op
)
1809 && GET_CODE (op
) == MEM
1810 && offsettable_address_p (reload_completed
| reload_in_progress
,
1811 mode
, XEXP (op
, 0)));
1814 /* Return TRUE if the operand is a memory reference which is, or can be
1815 made word aligned by adjusting the offset. */
1817 alignable_memory_operand (op
, mode
)
1819 enum machine_mode mode
;
1823 if (mode
== VOIDmode
)
1824 mode
= GET_MODE (op
);
1826 if (mode
!= GET_MODE (op
) || GET_CODE (op
) != MEM
)
1831 return ((GET_CODE (reg
= op
) == REG
1832 || (GET_CODE (op
) == SUBREG
1833 && GET_CODE (reg
= SUBREG_REG (op
)) == REG
)
1834 || (GET_CODE (op
) == PLUS
1835 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1836 && (GET_CODE (reg
= XEXP (op
, 0)) == REG
1837 || (GET_CODE (XEXP (op
, 0)) == SUBREG
1838 && GET_CODE (reg
= SUBREG_REG (XEXP (op
, 0))) == REG
))))
1839 && REGNO_POINTER_ALIGN (REGNO (reg
)) >= 4);
1842 /* Similar to s_register_operand, but does not allow hard integer
1845 f_register_operand (op
, mode
)
1847 enum machine_mode mode
;
1849 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1852 if (GET_CODE (op
) == SUBREG
)
1853 op
= SUBREG_REG (op
);
1855 /* We don't consider registers whose class is NO_REGS
1856 to be a register operand. */
1857 return (GET_CODE (op
) == REG
1858 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1859 || REGNO_REG_CLASS (REGNO (op
)) == FPU_REGS
));
1862 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
1865 fpu_rhs_operand (op
, mode
)
1867 enum machine_mode mode
;
1869 if (s_register_operand (op
, mode
))
1871 else if (GET_CODE (op
) == CONST_DOUBLE
)
1872 return (const_double_rtx_ok_for_fpu (op
));
1878 fpu_add_operand (op
, mode
)
1880 enum machine_mode mode
;
1882 if (s_register_operand (op
, mode
))
1884 else if (GET_CODE (op
) == CONST_DOUBLE
)
1885 return (const_double_rtx_ok_for_fpu (op
)
1886 || neg_const_double_rtx_ok_for_fpu (op
));
1891 /* Return nonzero if OP is a constant power of two. */
1894 power_of_two_operand (op
, mode
)
1896 enum machine_mode mode
;
1898 if (GET_CODE (op
) == CONST_INT
)
1900 HOST_WIDE_INT value
= INTVAL(op
);
1901 return value
!= 0 && (value
& (value
- 1)) == 0;
1906 /* Return TRUE for a valid operand of a DImode operation.
1907 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1908 Note that this disallows MEM(REG+REG), but allows
1909 MEM(PRE/POST_INC/DEC(REG)). */
1912 di_operand (op
, mode
)
1914 enum machine_mode mode
;
1916 if (s_register_operand (op
, mode
))
1919 switch (GET_CODE (op
))
1926 return memory_address_p (DImode
, XEXP (op
, 0));
1933 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
1934 Either: REG, CONST_DOUBLE or MEM(DImode_address).
1935 Note that this disallows MEM(REG+REG), but allows
1936 MEM(PRE/POST_INC/DEC(REG)). */
1939 soft_df_operand (op
, mode
)
1941 enum machine_mode mode
;
1943 if (s_register_operand (op
, mode
))
1946 switch (GET_CODE (op
))
1952 return memory_address_p (DFmode
, XEXP (op
, 0));
1959 /* Return TRUE for valid index operands. */
1962 index_operand (op
, mode
)
1964 enum machine_mode mode
;
1966 return (s_register_operand(op
, mode
)
1967 || (immediate_operand (op
, mode
)
1968 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
1971 /* Return TRUE for valid shifts by a constant. This also accepts any
1972 power of two on the (somewhat overly relaxed) assumption that the
1973 shift operator in this case was a mult. */
1976 const_shift_operand (op
, mode
)
1978 enum machine_mode mode
;
1980 return (power_of_two_operand (op
, mode
)
1981 || (immediate_operand (op
, mode
)
1982 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
1985 /* Return TRUE for arithmetic operators which can be combined with a multiply
1989 shiftable_operator (x
, mode
)
1991 enum machine_mode mode
;
1993 if (GET_MODE (x
) != mode
)
1997 enum rtx_code code
= GET_CODE (x
);
1999 return (code
== PLUS
|| code
== MINUS
2000 || code
== IOR
|| code
== XOR
|| code
== AND
);
2004 /* Return TRUE for shift operators. */
2007 shift_operator (x
, mode
)
2009 enum machine_mode mode
;
2011 if (GET_MODE (x
) != mode
)
2015 enum rtx_code code
= GET_CODE (x
);
2018 return power_of_two_operand (XEXP (x
, 1));
2020 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
2021 || code
== ROTATERT
);
2025 int equality_operator (x
, mode
)
2027 enum machine_mode mode
;
2029 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
2032 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
2035 minmax_operator (x
, mode
)
2037 enum machine_mode mode
;
2039 enum rtx_code code
= GET_CODE (x
);
2041 if (GET_MODE (x
) != mode
)
2044 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
2047 /* return TRUE if x is EQ or NE */
2049 /* Return TRUE if this is the condition code register, if we aren't given
2050 a mode, accept any class CCmode register */
2053 cc_register (x
, mode
)
2055 enum machine_mode mode
;
2057 if (mode
== VOIDmode
)
2059 mode
= GET_MODE (x
);
2060 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2064 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2070 /* Return TRUE if this is the condition code register, if we aren't given
2071 a mode, accept any class CCmode register which indicates a dominance
2075 dominant_cc_register (x
, mode
)
2077 enum machine_mode mode
;
2079 if (mode
== VOIDmode
)
2081 mode
= GET_MODE (x
);
2082 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2086 if (mode
!= CC_DNEmode
&& mode
!= CC_DEQmode
2087 && mode
!= CC_DLEmode
&& mode
!= CC_DLTmode
2088 && mode
!= CC_DGEmode
&& mode
!= CC_DGTmode
2089 && mode
!= CC_DLEUmode
&& mode
!= CC_DLTUmode
2090 && mode
!= CC_DGEUmode
&& mode
!= CC_DGTUmode
)
2093 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2099 /* Return TRUE if X references a SYMBOL_REF. */
2101 symbol_mentioned_p (x
)
2107 if (GET_CODE (x
) == SYMBOL_REF
)
2110 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2111 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2117 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2118 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
2121 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
2128 /* Return TRUE if X references a LABEL_REF. */
2130 label_mentioned_p (x
)
2136 if (GET_CODE (x
) == LABEL_REF
)
2139 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2140 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2146 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2147 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
2150 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
2161 enum rtx_code code
= GET_CODE (x
);
2165 else if (code
== SMIN
)
2167 else if (code
== UMIN
)
2169 else if (code
== UMAX
)
2175 /* Return 1 if memory locations are adjacent */
2178 adjacent_mem_locations (a
, b
)
2181 int val0
= 0, val1
= 0;
2184 if ((GET_CODE (XEXP (a
, 0)) == REG
2185 || (GET_CODE (XEXP (a
, 0)) == PLUS
2186 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
2187 && (GET_CODE (XEXP (b
, 0)) == REG
2188 || (GET_CODE (XEXP (b
, 0)) == PLUS
2189 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
2191 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
2193 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
2194 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
2197 reg0
= REGNO (XEXP (a
, 0));
2198 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
2200 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
2201 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
2204 reg1
= REGNO (XEXP (b
, 0));
2205 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
2210 /* Return 1 if OP is a load multiple operation. It is known to be
2211 parallel and the first section will be tested. */
2214 load_multiple_operation (op
, mode
)
2216 enum machine_mode mode
;
2218 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2221 HOST_WIDE_INT i
= 1, base
= 0;
2225 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2228 /* Check to see if this might be a write-back */
2229 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2234 /* Now check it more carefully */
2235 if (GET_CODE (SET_DEST (elt
)) != REG
2236 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2237 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2238 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2239 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2240 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2241 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2242 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2243 != REGNO (SET_DEST (elt
)))
2249 /* Perform a quick check so we don't blow up below. */
2251 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2252 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
2253 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
2256 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
2257 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
2259 for (; i
< count
; i
++)
2261 rtx elt
= XVECEXP (op
, 0, i
);
2263 if (GET_CODE (elt
) != SET
2264 || GET_CODE (SET_DEST (elt
)) != REG
2265 || GET_MODE (SET_DEST (elt
)) != SImode
2266 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
2267 || GET_CODE (SET_SRC (elt
)) != MEM
2268 || GET_MODE (SET_SRC (elt
)) != SImode
2269 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
2270 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
2271 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
2272 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
2279 /* Return 1 if OP is a store multiple operation. It is known to be
2280 parallel and the first section will be tested. */
2283 store_multiple_operation (op
, mode
)
2285 enum machine_mode mode
;
2287 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2290 HOST_WIDE_INT i
= 1, base
= 0;
2294 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2297 /* Check to see if this might be a write-back */
2298 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2303 /* Now check it more carefully */
2304 if (GET_CODE (SET_DEST (elt
)) != REG
2305 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2306 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2307 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2308 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2309 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2310 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2311 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2312 != REGNO (SET_DEST (elt
)))
2318 /* Perform a quick check so we don't blow up below. */
2320 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2321 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
2322 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
2325 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
2326 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
2328 for (; i
< count
; i
++)
2330 elt
= XVECEXP (op
, 0, i
);
2332 if (GET_CODE (elt
) != SET
2333 || GET_CODE (SET_SRC (elt
)) != REG
2334 || GET_MODE (SET_SRC (elt
)) != SImode
2335 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
2336 || GET_CODE (SET_DEST (elt
)) != MEM
2337 || GET_MODE (SET_DEST (elt
)) != SImode
2338 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
2339 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
2340 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
2341 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
2349 load_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2354 HOST_WIDE_INT
*load_offset
;
2356 int unsorted_regs
[4];
2357 HOST_WIDE_INT unsorted_offsets
[4];
2362 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2363 extended if required. */
2364 if (nops
< 2 || nops
> 4)
2367 /* Loop over the operands and check that the memory references are
2368 suitable (ie immediate offsets from the same base register). At
2369 the same time, extract the target register, and the memory
2371 for (i
= 0; i
< nops
; i
++)
2376 /* Convert a subreg of a mem into the mem itself. */
2377 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2378 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2380 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2383 /* Don't reorder volatile memory references; it doesn't seem worth
2384 looking for the case where the order is ok anyway. */
2385 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2388 offset
= const0_rtx
;
2390 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2391 || (GET_CODE (reg
) == SUBREG
2392 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2393 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2394 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2396 || (GET_CODE (reg
) == SUBREG
2397 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2398 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2403 base_reg
= REGNO(reg
);
2404 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2405 ? REGNO (operands
[i
])
2406 : REGNO (SUBREG_REG (operands
[i
])));
2411 if (base_reg
!= REGNO (reg
))
2412 /* Not addressed from the same base register. */
2415 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2416 ? REGNO (operands
[i
])
2417 : REGNO (SUBREG_REG (operands
[i
])));
2418 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2422 /* If it isn't an integer register, or if it overwrites the
2423 base register but isn't the last insn in the list, then
2424 we can't do this. */
2425 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
2426 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
2429 unsorted_offsets
[i
] = INTVAL (offset
);
2432 /* Not a suitable memory address. */
2436 /* All the useful information has now been extracted from the
2437 operands into unsorted_regs and unsorted_offsets; additionally,
2438 order[0] has been set to the lowest numbered register in the
2439 list. Sort the registers into order, and check that the memory
2440 offsets are ascending and adjacent. */
2442 for (i
= 1; i
< nops
; i
++)
2446 order
[i
] = order
[i
- 1];
2447 for (j
= 0; j
< nops
; j
++)
2448 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2449 && (order
[i
] == order
[i
- 1]
2450 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2453 /* Have we found a suitable register? if not, one must be used more
2455 if (order
[i
] == order
[i
- 1])
2458 /* Is the memory address adjacent and ascending? */
2459 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2467 for (i
= 0; i
< nops
; i
++)
2468 regs
[i
] = unsorted_regs
[order
[i
]];
2470 *load_offset
= unsorted_offsets
[order
[0]];
2473 if (unsorted_offsets
[order
[0]] == 0)
2474 return 1; /* ldmia */
2476 if (unsorted_offsets
[order
[0]] == 4)
2477 return 2; /* ldmib */
2479 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2480 return 3; /* ldmda */
2482 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2483 return 4; /* ldmdb */
2485 /* Can't do it without setting up the offset, only do this if it takes
2486 no more than one insn. */
2487 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
2488 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
2492 emit_ldm_seq (operands
, nops
)
2498 HOST_WIDE_INT offset
;
2502 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2505 strcpy (buf
, "ldm%?ia\t");
2509 strcpy (buf
, "ldm%?ib\t");
2513 strcpy (buf
, "ldm%?da\t");
2517 strcpy (buf
, "ldm%?db\t");
2522 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2523 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2526 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2527 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2529 output_asm_insn (buf
, operands
);
2531 strcpy (buf
, "ldm%?ia\t");
2538 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2539 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2541 for (i
= 1; i
< nops
; i
++)
2542 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2543 reg_names
[regs
[i
]]);
2545 strcat (buf
, "}\t%@ phole ldm");
2547 output_asm_insn (buf
, operands
);
2552 store_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2557 HOST_WIDE_INT
*load_offset
;
2559 int unsorted_regs
[4];
2560 HOST_WIDE_INT unsorted_offsets
[4];
2565 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2566 extended if required. */
2567 if (nops
< 2 || nops
> 4)
2570 /* Loop over the operands and check that the memory references are
2571 suitable (ie immediate offsets from the same base register). At
2572 the same time, extract the target register, and the memory
2574 for (i
= 0; i
< nops
; i
++)
2579 /* Convert a subreg of a mem into the mem itself. */
2580 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2581 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2583 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2586 /* Don't reorder volatile memory references; it doesn't seem worth
2587 looking for the case where the order is ok anyway. */
2588 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2591 offset
= const0_rtx
;
2593 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2594 || (GET_CODE (reg
) == SUBREG
2595 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2596 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2597 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2599 || (GET_CODE (reg
) == SUBREG
2600 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2601 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2606 base_reg
= REGNO(reg
);
2607 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2608 ? REGNO (operands
[i
])
2609 : REGNO (SUBREG_REG (operands
[i
])));
2614 if (base_reg
!= REGNO (reg
))
2615 /* Not addressed from the same base register. */
2618 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2619 ? REGNO (operands
[i
])
2620 : REGNO (SUBREG_REG (operands
[i
])));
2621 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2625 /* If it isn't an integer register, then we can't do this. */
2626 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
2629 unsorted_offsets
[i
] = INTVAL (offset
);
2632 /* Not a suitable memory address. */
2636 /* All the useful information has now been extracted from the
2637 operands into unsorted_regs and unsorted_offsets; additionally,
2638 order[0] has been set to the lowest numbered register in the
2639 list. Sort the registers into order, and check that the memory
2640 offsets are ascending and adjacent. */
2642 for (i
= 1; i
< nops
; i
++)
2646 order
[i
] = order
[i
- 1];
2647 for (j
= 0; j
< nops
; j
++)
2648 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2649 && (order
[i
] == order
[i
- 1]
2650 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2653 /* Have we found a suitable register? if not, one must be used more
2655 if (order
[i
] == order
[i
- 1])
2658 /* Is the memory address adjacent and ascending? */
2659 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2667 for (i
= 0; i
< nops
; i
++)
2668 regs
[i
] = unsorted_regs
[order
[i
]];
2670 *load_offset
= unsorted_offsets
[order
[0]];
2673 if (unsorted_offsets
[order
[0]] == 0)
2674 return 1; /* stmia */
2676 if (unsorted_offsets
[order
[0]] == 4)
2677 return 2; /* stmib */
2679 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2680 return 3; /* stmda */
2682 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2683 return 4; /* stmdb */
2689 emit_stm_seq (operands
, nops
)
2695 HOST_WIDE_INT offset
;
2699 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2702 strcpy (buf
, "stm%?ia\t");
2706 strcpy (buf
, "stm%?ib\t");
2710 strcpy (buf
, "stm%?da\t");
2714 strcpy (buf
, "stm%?db\t");
2721 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2722 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2724 for (i
= 1; i
< nops
; i
++)
2725 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2726 reg_names
[regs
[i
]]);
2728 strcat (buf
, "}\t%@ phole stm");
2730 output_asm_insn (buf
, operands
);
2735 multi_register_push (op
, mode
)
2737 enum machine_mode mode
;
2739 if (GET_CODE (op
) != PARALLEL
2740 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2741 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
2742 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
2749 /* Routines for use with attributes */
2751 /* Return nonzero if ATTR is a valid attribute for DECL.
2752 ATTRIBUTES are any existing attributes and ARGS are the arguments
2755 Supported attributes:
2757 naked: don't output any prologue or epilogue code, the user is assumed
2758 to do the right thing. */
2761 arm_valid_machine_decl_attribute (decl
, attributes
, attr
, args
)
2767 if (args
!= NULL_TREE
)
2770 if (is_attribute_p ("naked", attr
))
2771 return TREE_CODE (decl
) == FUNCTION_DECL
;
2775 /* Return non-zero if FUNC is a naked function. */
2778 arm_naked_function_p (func
)
2783 if (TREE_CODE (func
) != FUNCTION_DECL
)
2786 a
= lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func
));
2787 return a
!= NULL_TREE
;
2790 /* Routines for use in generating RTL */
2793 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
, unchanging_p
,
2805 int sign
= up
? 1 : -1;
2808 result
= gen_rtx (PARALLEL
, VOIDmode
,
2809 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2812 XVECEXP (result
, 0, 0)
2813 = gen_rtx (SET
, GET_MODE (from
), from
,
2814 plus_constant (from
, count
* 4 * sign
));
2819 for (j
= 0; i
< count
; i
++, j
++)
2821 mem
= gen_rtx (MEM
, SImode
, plus_constant (from
, j
* 4 * sign
));
2822 RTX_UNCHANGING_P (mem
) = unchanging_p
;
2823 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
2825 XVECEXP (result
, 0, i
) = gen_rtx (SET
, VOIDmode
,
2826 gen_rtx (REG
, SImode
, base_regno
+ j
),
2831 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, from
);
2837 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
, unchanging_p
,
2849 int sign
= up
? 1 : -1;
2852 result
= gen_rtx (PARALLEL
, VOIDmode
,
2853 rtvec_alloc (count
+ (write_back
? 2 : 0)));
2856 XVECEXP (result
, 0, 0)
2857 = gen_rtx (SET
, GET_MODE (to
), to
,
2858 plus_constant (to
, count
* 4 * sign
));
2863 for (j
= 0; i
< count
; i
++, j
++)
2865 mem
= gen_rtx (MEM
, SImode
, plus_constant (to
, j
* 4 * sign
));
2866 RTX_UNCHANGING_P (mem
) = unchanging_p
;
2867 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
2869 XVECEXP (result
, 0, i
) = gen_rtx (SET
, VOIDmode
, mem
,
2870 gen_rtx (REG
, SImode
, base_regno
+ j
));
2874 XVECEXP (result
, 0, i
) = gen_rtx (CLOBBER
, SImode
, to
);
2880 arm_gen_movstrqi (operands
)
2883 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
2886 rtx st_src
, st_dst
, end_src
, end_dst
, fin_src
, fin_dst
;
2887 rtx part_bytes_reg
= NULL
;
2889 int dst_unchanging_p
, dst_in_struct_p
, src_unchanging_p
, src_in_struct_p
;
2890 extern int optimize
;
2892 if (GET_CODE (operands
[2]) != CONST_INT
2893 || GET_CODE (operands
[3]) != CONST_INT
2894 || INTVAL (operands
[2]) > 64
2895 || INTVAL (operands
[3]) & 3)
2898 st_dst
= XEXP (operands
[0], 0);
2899 st_src
= XEXP (operands
[1], 0);
2901 dst_unchanging_p
= RTX_UNCHANGING_P (operands
[0]);
2902 dst_in_struct_p
= MEM_IN_STRUCT_P (operands
[0]);
2903 src_unchanging_p
= RTX_UNCHANGING_P (operands
[1]);
2904 src_in_struct_p
= MEM_IN_STRUCT_P (operands
[1]);
2906 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
2907 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
2909 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
2910 out_words_to_go
= INTVAL (operands
[2]) / 4;
2911 last_bytes
= INTVAL (operands
[2]) & 3;
2913 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
2914 part_bytes_reg
= gen_rtx (REG
, SImode
, (in_words_to_go
- 1) & 3);
2916 for (i
= 0; in_words_to_go
>= 2; i
+=4)
2918 if (in_words_to_go
> 4)
2919 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
,
2920 src_unchanging_p
, src_in_struct_p
));
2922 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
2923 FALSE
, src_unchanging_p
,
2926 if (out_words_to_go
)
2928 if (out_words_to_go
> 4)
2929 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
,
2932 else if (out_words_to_go
!= 1)
2933 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
2941 mem
= gen_rtx (MEM
, SImode
, dst
);
2942 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
2943 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
2944 emit_move_insn (mem
, gen_rtx (REG
, SImode
, 0));
2945 if (last_bytes
!= 0)
2946 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
2950 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
2951 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
2954 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
2955 if (out_words_to_go
)
2959 mem
= gen_rtx (MEM
, SImode
, src
);
2960 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
2961 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
2962 emit_move_insn (sreg
= gen_reg_rtx (SImode
), mem
);
2963 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
2965 mem
= gen_rtx (MEM
, SImode
, dst
);
2966 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
2967 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
2968 emit_move_insn (mem
, sreg
);
2969 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
2972 if (in_words_to_go
) /* Sanity check */
2978 if (in_words_to_go
< 0)
2981 mem
= gen_rtx (MEM
, SImode
, src
);
2982 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
2983 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
2984 part_bytes_reg
= copy_to_mode_reg (SImode
, mem
);
2987 if (BYTES_BIG_ENDIAN
&& last_bytes
)
2989 rtx tmp
= gen_reg_rtx (SImode
);
2991 if (part_bytes_reg
== NULL
)
2994 /* The bytes we want are in the top end of the word */
2995 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
2996 GEN_INT (8 * (4 - last_bytes
))));
2997 part_bytes_reg
= tmp
;
3001 mem
= gen_rtx (MEM
, QImode
, plus_constant (dst
, last_bytes
- 1));
3002 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3003 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3004 emit_move_insn (mem
, gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
3007 tmp
= gen_reg_rtx (SImode
);
3008 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3009 part_bytes_reg
= tmp
;
3018 if (part_bytes_reg
== NULL
)
3021 mem
= gen_rtx (MEM
, QImode
, dst
);
3022 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3023 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3024 emit_move_insn (mem
, gen_rtx (SUBREG
, QImode
, part_bytes_reg
, 0));
3027 rtx tmp
= gen_reg_rtx (SImode
);
3029 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
3030 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3031 part_bytes_reg
= tmp
;
3039 /* Generate a memory reference for a half word, such that it will be loaded
3040 into the top 16 bits of the word. We can assume that the address is
3041 known to be alignable and of the form reg, or plus (reg, const). */
3043 gen_rotated_half_load (memref
)
3046 HOST_WIDE_INT offset
= 0;
3047 rtx base
= XEXP (memref
, 0);
3049 if (GET_CODE (base
) == PLUS
)
3051 offset
= INTVAL (XEXP (base
, 1));
3052 base
= XEXP (base
, 0);
3055 /* If we aren't allowed to generate unaligned addresses, then fail. */
3056 if (TARGET_SHORT_BY_BYTES
3057 && ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 0)))
3060 base
= gen_rtx (MEM
, SImode
, plus_constant (base
, offset
& ~2));
3062 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 2))
3065 return gen_rtx (ROTATE
, SImode
, base
, GEN_INT (16));
3068 static enum machine_mode
3069 select_dominance_cc_mode (op
, x
, y
, cond_or
)
3073 HOST_WIDE_INT cond_or
;
3075 enum rtx_code cond1
, cond2
;
3078 /* Currently we will probably get the wrong result if the individual
3079 comparisons are not simple. This also ensures that it is safe to
3080 reverse a comparison if necessary. */
3081 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
3083 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
3088 cond1
= reverse_condition (cond1
);
3090 /* If the comparisons are not equal, and one doesn't dominate the other,
3091 then we can't do this. */
3093 && ! comparison_dominates_p (cond1
, cond2
)
3094 && (swapped
= 1, ! comparison_dominates_p (cond2
, cond1
)))
3099 enum rtx_code temp
= cond1
;
3107 if (cond2
== EQ
|| ! cond_or
)
3112 case LE
: return CC_DLEmode
;
3113 case LEU
: return CC_DLEUmode
;
3114 case GE
: return CC_DGEmode
;
3115 case GEU
: return CC_DGEUmode
;
3121 if (cond2
== LT
|| ! cond_or
)
3130 if (cond2
== GT
|| ! cond_or
)
3139 if (cond2
== LTU
|| ! cond_or
)
3148 if (cond2
== GTU
|| ! cond_or
)
3156 /* The remaining cases only occur when both comparisons are the
3178 arm_select_cc_mode (op
, x
, y
)
3183 /* All floating point compares return CCFP if it is an equality
3184 comparison, and CCFPE otherwise. */
3185 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
3186 return (op
== EQ
|| op
== NE
) ? CCFPmode
: CCFPEmode
;
3188 /* A compare with a shifted operand. Because of canonicalization, the
3189 comparison will have to be swapped when we emit the assembler. */
3190 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
3191 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3192 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
3193 || GET_CODE (x
) == ROTATERT
))
3196 /* This is a special case that is used by combine to allow a
3197 comparison of a shifted byte load to be split into a zero-extend
3198 followed by a comparison of the shifted integer (only valid for
3199 equalities and unsigned inequalities). */
3200 if (GET_MODE (x
) == SImode
3201 && GET_CODE (x
) == ASHIFT
3202 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
3203 && GET_CODE (XEXP (x
, 0)) == SUBREG
3204 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
3205 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
3206 && (op
== EQ
|| op
== NE
3207 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
3208 && GET_CODE (y
) == CONST_INT
)
3211 /* An operation that sets the condition codes as a side-effect, the
3212 V flag is not set correctly, so we can only use comparisons where
3213 this doesn't matter. (For LT and GE we can use "mi" and "pl"
3215 if (GET_MODE (x
) == SImode
3217 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
3218 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
3219 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
3220 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
3221 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
3222 || GET_CODE (x
) == LSHIFTRT
3223 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3224 || GET_CODE (x
) == ROTATERT
|| GET_CODE (x
) == ZERO_EXTRACT
))
3227 /* A construct for a conditional compare, if the false arm contains
3228 0, then both conditions must be true, otherwise either condition
3229 must be true. Not all conditions are possible, so CCmode is
3230 returned if it can't be done. */
3231 if (GET_CODE (x
) == IF_THEN_ELSE
3232 && (XEXP (x
, 2) == const0_rtx
3233 || XEXP (x
, 2) == const1_rtx
)
3234 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 0))) == '<'
3235 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 1))) == '<')
3236 return select_dominance_cc_mode (op
, XEXP (x
, 0), XEXP (x
, 1),
3237 INTVAL (XEXP (x
, 2)));
3239 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
3242 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
3243 && GET_CODE (x
) == PLUS
3244 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
3250 /* X and Y are two things to compare using CODE. Emit the compare insn and
3251 return the rtx for register 0 in the proper mode. FP means this is a
3252 floating point compare: I don't think that it is needed on the arm. */
3255 gen_compare_reg (code
, x
, y
, fp
)
3259 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
3260 rtx cc_reg
= gen_rtx (REG
, mode
, 24);
3262 emit_insn (gen_rtx (SET
, VOIDmode
, cc_reg
,
3263 gen_rtx (COMPARE
, mode
, x
, y
)));
3269 arm_reload_in_hi (operands
)
3272 rtx base
= find_replacement (&XEXP (operands
[1], 0));
3274 emit_insn (gen_zero_extendqisi2 (operands
[2], gen_rtx (MEM
, QImode
, base
)));
3275 emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG
, SImode
, operands
[0], 0),
3276 gen_rtx (MEM
, QImode
,
3277 plus_constant (base
, 1))));
3278 if (BYTES_BIG_ENDIAN
)
3279 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
3281 gen_rtx (IOR
, SImode
,
3282 gen_rtx (ASHIFT
, SImode
,
3283 gen_rtx (SUBREG
, SImode
,
3288 emit_insn (gen_rtx (SET
, VOIDmode
, gen_rtx (SUBREG
, SImode
,
3290 gen_rtx (IOR
, SImode
,
3291 gen_rtx (ASHIFT
, SImode
,
3294 gen_rtx (SUBREG
, SImode
, operands
[0], 0))));
3298 arm_reload_out_hi (operands
)
3301 rtx base
= find_replacement (&XEXP (operands
[0], 0));
3303 if (BYTES_BIG_ENDIAN
)
3305 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
3306 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
3307 emit_insn (gen_lshrsi3 (operands
[2],
3308 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
3310 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
3311 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
3315 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, base
),
3316 gen_rtx (SUBREG
, QImode
, operands
[1], 0)));
3317 emit_insn (gen_lshrsi3 (operands
[2],
3318 gen_rtx (SUBREG
, SImode
, operands
[1], 0),
3320 emit_insn (gen_movqi (gen_rtx (MEM
, QImode
, plus_constant (base
, 1)),
3321 gen_rtx (SUBREG
, QImode
, operands
[2], 0)));
3325 /* Routines for manipulation of the constant pool. */
3326 /* This is unashamedly hacked from the version in sh.c, since the problem is
3327 extremely similar. */
3329 /* Arm instructions cannot load a large constant into a register,
3330 constants have to come from a pc relative load. The reference of a pc
3331 relative load instruction must be less than 1k infront of the instruction.
3332 This means that we often have to dump a constant inside a function, and
3333 generate code to branch around it.
3335 It is important to minimize this, since the branches will slow things
3336 down and make things bigger.
3338 Worst case code looks like:
3354 We fix this by performing a scan before scheduling, which notices which
3355 instructions need to have their operands fetched from the constant table
3356 and builds the table.
3361 scan, find an instruction which needs a pcrel move. Look forward, find th
3362 last barrier which is within MAX_COUNT bytes of the requirement.
3363 If there isn't one, make one. Process all the instructions between
3364 the find and the barrier.
3366 In the above example, we can tell that L3 is within 1k of L1, so
3367 the first move can be shrunk from the 2 insn+constant sequence into
3368 just 1 insn, and the constant moved to L3 to make:
3379 Then the second move becomes the target for the shortening process.
3385 rtx value
; /* Value in table */
3386 HOST_WIDE_INT next_offset
;
3387 enum machine_mode mode
; /* Mode of value */
3390 /* The maximum number of constants that can fit into one pool, since
3391 the pc relative range is 0...1020 bytes and constants are at least 4
3394 #define MAX_POOL_SIZE (1020/4)
3395 static pool_node pool_vector
[MAX_POOL_SIZE
];
3396 static int pool_size
;
3397 static rtx pool_vector_label
;
3399 /* Add a constant to the pool and return its label. */
3400 static HOST_WIDE_INT
3401 add_constant (x
, mode
)
3403 enum machine_mode mode
;
3407 HOST_WIDE_INT offset
;
3409 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
3410 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
3411 x
= get_pool_constant (XEXP (x
, 0));
3412 #ifndef AOF_ASSEMBLER
3413 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
3414 x
= XVECEXP (x
, 0, 0);
3417 #ifdef AOF_ASSEMBLER
3418 /* PIC Symbol references need to be converted into offsets into the
3420 if (flag_pic
&& GET_CODE (x
) == SYMBOL_REF
)
3421 x
= aof_pic_entry (x
);
3422 #endif /* AOF_ASSEMBLER */
3424 /* First see if we've already got it */
3425 for (i
= 0; i
< pool_size
; i
++)
3427 if (GET_CODE (x
) == pool_vector
[i
].value
->code
3428 && mode
== pool_vector
[i
].mode
)
3430 if (GET_CODE (x
) == CODE_LABEL
)
3432 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
3435 if (rtx_equal_p (x
, pool_vector
[i
].value
))
3436 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
3440 /* Need a new one */
3441 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
3444 pool_vector_label
= gen_label_rtx ();
3446 pool_vector
[pool_size
].next_offset
3447 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
3449 pool_vector
[pool_size
].value
= x
;
3450 pool_vector
[pool_size
].mode
= mode
;
3455 /* Output the literal table */
3462 scan
= emit_label_after (gen_label_rtx (), scan
);
3463 scan
= emit_insn_after (gen_align_4 (), scan
);
3464 scan
= emit_label_after (pool_vector_label
, scan
);
3466 for (i
= 0; i
< pool_size
; i
++)
3468 pool_node
*p
= pool_vector
+ i
;
3470 switch (GET_MODE_SIZE (p
->mode
))
3473 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
3477 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
3486 scan
= emit_insn_after (gen_consttable_end (), scan
);
3487 scan
= emit_barrier_after (scan
);
3491 /* Non zero if the src operand needs to be fixed up */
3493 fixit (src
, mode
, destreg
)
3495 enum machine_mode mode
;
3498 if (CONSTANT_P (src
))
3500 if (GET_CODE (src
) == CONST_INT
)
3501 return (! const_ok_for_arm (INTVAL (src
))
3502 && ! const_ok_for_arm (~INTVAL (src
)));
3503 if (GET_CODE (src
) == CONST_DOUBLE
)
3504 return (GET_MODE (src
) == VOIDmode
3506 || (! const_double_rtx_ok_for_fpu (src
)
3507 && ! neg_const_double_rtx_ok_for_fpu (src
)));
3508 return symbol_mentioned_p (src
);
3510 #ifndef AOF_ASSEMBLER
3511 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
3515 return (mode
== SImode
&& GET_CODE (src
) == MEM
3516 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
3517 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
3520 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
3522 find_barrier (from
, max_count
)
3527 rtx found_barrier
= 0;
3529 while (from
&& count
< max_count
)
3531 if (GET_CODE (from
) == BARRIER
)
3532 found_barrier
= from
;
3534 /* Count the length of this insn */
3535 if (GET_CODE (from
) == INSN
3536 && GET_CODE (PATTERN (from
)) == SET
3537 && CONSTANT_P (SET_SRC (PATTERN (from
)))
3538 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
3540 rtx src
= SET_SRC (PATTERN (from
));
3544 count
+= get_attr_length (from
);
3546 from
= NEXT_INSN (from
);
3551 /* We didn't find a barrier in time to
3552 dump our stuff, so we'll make one */
3553 rtx label
= gen_label_rtx ();
3556 from
= PREV_INSN (from
);
3558 from
= get_last_insn ();
3560 /* Walk back to be just before any jump */
3561 while (GET_CODE (from
) == JUMP_INSN
3562 || GET_CODE (from
) == NOTE
3563 || GET_CODE (from
) == CODE_LABEL
)
3564 from
= PREV_INSN (from
);
3566 from
= emit_jump_insn_after (gen_jump (label
), from
);
3567 JUMP_LABEL (from
) = label
;
3568 found_barrier
= emit_barrier_after (from
);
3569 emit_label_after (label
, found_barrier
);
3570 return found_barrier
;
3573 return found_barrier
;
3576 /* Non zero if the insn is a move instruction which needs to be fixed. */
3581 if (!INSN_DELETED_P (insn
)
3582 && GET_CODE (insn
) == INSN
3583 && GET_CODE (PATTERN (insn
)) == SET
)
3585 rtx pat
= PATTERN (insn
);
3586 rtx src
= SET_SRC (pat
);
3587 rtx dst
= SET_DEST (pat
);
3589 enum machine_mode mode
= GET_MODE (dst
);
3593 if (GET_CODE (dst
) == REG
)
3594 destreg
= REGNO (dst
);
3595 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
3596 destreg
= REGNO (SUBREG_REG (dst
));
3598 return fixit (src
, mode
, destreg
);
3612 /* The ldr instruction can work with up to a 4k offset, and most constants
3613 will be loaded with one of these instructions; however, the adr
3614 instruction and the ldf instructions only work with a 1k offset. This
3615 code needs to be rewritten to use the 4k offset when possible, and to
3616 adjust when a 1k offset is needed. For now we just use a 1k offset
3620 /* Floating point operands can't work further than 1024 bytes from the
3621 PC, so to make things simple we restrict all loads for such functions.
3623 if (TARGET_HARD_FLOAT
)
3624 for (regno
= 16; regno
< 24; regno
++)
3625 if (regs_ever_live
[regno
])
3634 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
3636 if (broken_move (insn
))
3638 /* This is a broken move instruction, scan ahead looking for
3639 a barrier to stick the constant table behind */
3641 rtx barrier
= find_barrier (insn
, count_size
);
3643 /* Now find all the moves between the points and modify them */
3644 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
3646 if (broken_move (scan
))
3648 /* This is a broken move instruction, add it to the pool */
3649 rtx pat
= PATTERN (scan
);
3650 rtx src
= SET_SRC (pat
);
3651 rtx dst
= SET_DEST (pat
);
3652 enum machine_mode mode
= GET_MODE (dst
);
3653 HOST_WIDE_INT offset
;
3659 /* If this is an HImode constant load, convert it into
3660 an SImode constant load. Since the register is always
3661 32 bits this is safe. We have to do this, since the
3662 load pc-relative instruction only does a 32-bit load. */
3666 if (GET_CODE (dst
) != REG
)
3668 PUT_MODE (dst
, SImode
);
3671 offset
= add_constant (src
, mode
);
3672 addr
= plus_constant (gen_rtx (LABEL_REF
, VOIDmode
,
3676 /* For wide moves to integer regs we need to split the
3677 address calculation off into a separate insn, so that
3678 the load can then be done with a load-multiple. This is
3679 safe, since we have already noted the length of such
3680 insns to be 8, and we are immediately over-writing the
3681 scratch we have grabbed with the final result. */
3682 if (GET_MODE_SIZE (mode
) > 4
3683 && (scratch
= REGNO (dst
)) < 16)
3685 rtx reg
= gen_rtx (REG
, SImode
, scratch
);
3686 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
3691 newsrc
= gen_rtx (MEM
, mode
, addr
);
3693 /* Build a jump insn wrapper around the move instead
3694 of an ordinary insn, because we want to have room for
3695 the target label rtx in fld[7], which an ordinary
3696 insn doesn't have. */
3697 newinsn
= emit_jump_insn_after (gen_rtx (SET
, VOIDmode
,
3700 JUMP_LABEL (newinsn
) = pool_vector_label
;
3702 /* But it's still an ordinary insn */
3703 PUT_CODE (newinsn
, INSN
);
3710 dump_table (barrier
);
3717 /* Routines to output assembly language. */
3719 /* If the rtx is the correct value then return the string of the number.
3720 In this way we can ensure that valid double constants are generated even
3721 when cross compiling. */
3723 fp_immediate_constant (x
)
3729 if (!fpa_consts_inited
)
3732 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
3733 for (i
= 0; i
< 8; i
++)
3734 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
3735 return strings_fpa
[i
];
3740 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
3742 fp_const_from_val (r
)
3747 if (! fpa_consts_inited
)
3750 for (i
= 0; i
< 8; i
++)
3751 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
3752 return strings_fpa
[i
];
3757 /* Output the operands of a LDM/STM instruction to STREAM.
3758 MASK is the ARM register set mask of which only bits 0-15 are important.
3759 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
3760 must follow the register list. */
3763 print_multi_reg (stream
, instr
, mask
, hat
)
3769 int not_first
= FALSE
;
3771 fputc ('\t', stream
);
3772 fprintf (stream
, instr
, REGISTER_PREFIX
);
3773 fputs (", {", stream
);
3774 for (i
= 0; i
< 16; i
++)
3775 if (mask
& (1 << i
))
3778 fprintf (stream
, ", ");
3779 fprintf (stream
, "%s%s", REGISTER_PREFIX
, reg_names
[i
]);
3783 fprintf (stream
, "}%s\n", hat
? "^" : "");
3786 /* Output a 'call' insn. */
3789 output_call (operands
)
3792 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
3794 if (REGNO (operands
[0]) == 14)
3796 operands
[0] = gen_rtx (REG
, SImode
, 12);
3797 output_asm_insn ("mov%?\t%0, %|lr", operands
);
3799 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3800 output_asm_insn ("mov%?\t%|pc, %0", operands
);
3808 int something_changed
= 0;
3810 int code
= GET_CODE (x0
);
3817 if (REGNO (x0
) == 14)
3819 *x
= gen_rtx (REG
, SImode
, 12);
3824 /* Scan through the sub-elements and change any references there */
3825 fmt
= GET_RTX_FORMAT (code
);
3826 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3828 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
3829 else if (fmt
[i
] == 'E')
3830 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
3831 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
3832 return something_changed
;
3836 /* Output a 'call' insn that is a reference in memory. */
3839 output_call_mem (operands
)
3842 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
3843 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
3845 if (eliminate_lr2ip (&operands
[0]))
3846 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
3848 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
3849 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
3854 /* Output a move from arm registers to an fpu registers.
3855 OPERANDS[0] is an fpu register.
3856 OPERANDS[1] is the first registers of an arm register pair. */
3859 output_mov_long_double_fpu_from_arm (operands
)
3862 int arm_reg0
= REGNO (operands
[1]);
3868 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3869 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3870 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3872 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3873 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
3877 /* Output a move from an fpu register to arm registers.
3878 OPERANDS[0] is the first registers of an arm register pair.
3879 OPERANDS[1] is an fpu register. */
3882 output_mov_long_double_arm_from_fpu (operands
)
3885 int arm_reg0
= REGNO (operands
[0]);
3891 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3892 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3893 ops
[2] = gen_rtx (REG
, SImode
, 2 + arm_reg0
);
3895 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
3896 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
3900 /* Output a move from arm registers to arm registers of a long double
3901 OPERANDS[0] is the destination.
3902 OPERANDS[1] is the source. */
3904 output_mov_long_double_arm_from_arm (operands
)
3907 /* We have to be careful here because the two might overlap */
3908 int dest_start
= REGNO (operands
[0]);
3909 int src_start
= REGNO (operands
[1]);
3913 if (dest_start
< src_start
)
3915 for (i
= 0; i
< 3; i
++)
3917 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3918 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3919 output_asm_insn ("mov%?\t%0, %1", ops
);
3924 for (i
= 2; i
>= 0; i
--)
3926 ops
[0] = gen_rtx (REG
, SImode
, dest_start
+ i
);
3927 ops
[1] = gen_rtx (REG
, SImode
, src_start
+ i
);
3928 output_asm_insn ("mov%?\t%0, %1", ops
);
3936 /* Output a move from arm registers to an fpu registers.
3937 OPERANDS[0] is an fpu register.
3938 OPERANDS[1] is the first registers of an arm register pair. */
3941 output_mov_double_fpu_from_arm (operands
)
3944 int arm_reg0
= REGNO (operands
[1]);
3949 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3950 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3951 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
3952 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
3956 /* Output a move from an fpu register to arm registers.
3957 OPERANDS[0] is the first registers of an arm register pair.
3958 OPERANDS[1] is an fpu register. */
3961 output_mov_double_arm_from_fpu (operands
)
3964 int arm_reg0
= REGNO (operands
[0]);
3970 ops
[0] = gen_rtx (REG
, SImode
, arm_reg0
);
3971 ops
[1] = gen_rtx (REG
, SImode
, 1 + arm_reg0
);
3972 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
3973 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
3977 /* Output a move between double words.
3978 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
3979 or MEM<-REG and all MEMs must be offsettable addresses. */
3982 output_move_double (operands
)
3985 enum rtx_code code0
= GET_CODE (operands
[0]);
3986 enum rtx_code code1
= GET_CODE (operands
[1]);
3991 int reg0
= REGNO (operands
[0]);
3993 otherops
[0] = gen_rtx (REG
, SImode
, 1 + reg0
);
3996 int reg1
= REGNO (operands
[1]);
4000 /* Ensure the second source is not overwritten */
4001 if (reg1
== reg0
+ (WORDS_BIG_ENDIAN
? -1 : 1))
4002 output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands
);
4004 output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands
);
4006 else if (code1
== CONST_DOUBLE
)
4008 if (GET_MODE (operands
[1]) == DFmode
)
4011 union real_extract u
;
4013 bcopy ((char *) &CONST_DOUBLE_LOW (operands
[1]), (char *) &u
,
4015 REAL_VALUE_TO_TARGET_DOUBLE (u
.d
, l
);
4016 otherops
[1] = GEN_INT(l
[1]);
4017 operands
[1] = GEN_INT(l
[0]);
4019 else if (GET_MODE (operands
[1]) != VOIDmode
)
4021 else if (WORDS_BIG_ENDIAN
)
4024 otherops
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4025 operands
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4030 otherops
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4031 operands
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4033 output_mov_immediate (operands
);
4034 output_mov_immediate (otherops
);
4036 else if (code1
== CONST_INT
)
4038 #if HOST_BITS_PER_WIDE_INT > 32
4039 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
4040 what the upper word is. */
4041 if (WORDS_BIG_ENDIAN
)
4043 otherops
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4044 operands
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4048 otherops
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4049 operands
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4052 /* Sign extend the intval into the high-order word */
4053 if (WORDS_BIG_ENDIAN
)
4055 otherops
[1] = operands
[1];
4056 operands
[1] = (INTVAL (operands
[1]) < 0
4057 ? constm1_rtx
: const0_rtx
);
4060 otherops
[1] = INTVAL (operands
[1]) < 0 ? constm1_rtx
: const0_rtx
;
4062 output_mov_immediate (otherops
);
4063 output_mov_immediate (operands
);
4065 else if (code1
== MEM
)
4067 switch (GET_CODE (XEXP (operands
[1], 0)))
4070 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
4074 abort (); /* Should never happen now */
4078 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
4082 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
4086 abort (); /* Should never happen now */
4091 output_asm_insn ("adr%?\t%0, %1", operands
);
4092 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
4096 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1)))
4098 otherops
[0] = operands
[0];
4099 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
4100 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
4101 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
4103 if (GET_CODE (otherops
[2]) == CONST_INT
)
4105 switch (INTVAL (otherops
[2]))
4108 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
4111 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
4114 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
4117 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
4118 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
4120 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4123 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4126 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
4127 return "ldm%?ia\t%0, %M0";
4131 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
4132 /* Take care of overlapping base/data reg. */
4133 if (reg_mentioned_p (operands
[0], operands
[1]))
4135 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4136 output_asm_insn ("ldr%?\t%0, %1", operands
);
4140 output_asm_insn ("ldr%?\t%0, %1", operands
);
4141 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4147 abort(); /* Constraints should prevent this */
4149 else if (code0
== MEM
&& code1
== REG
)
4151 if (REGNO (operands
[1]) == 12)
4154 switch (GET_CODE (XEXP (operands
[0], 0)))
4157 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
4161 abort (); /* Should never happen now */
4165 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
4169 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
4173 abort (); /* Should never happen now */
4177 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
4179 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
4182 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
4186 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
4190 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
4197 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
4198 otherops
[1] = gen_rtx (REG
, SImode
, 1 + REGNO (operands
[1]));
4199 output_asm_insn ("str%?\t%1, %0", operands
);
4200 output_asm_insn ("str%?\t%1, %0", otherops
);
4204 abort(); /* Constraints should prevent this */
4210 /* Output an arbitrary MOV reg, #n.
4211 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
4214 output_mov_immediate (operands
)
4217 HOST_WIDE_INT n
= INTVAL (operands
[1]);
4221 /* Try to use one MOV */
4222 if (const_ok_for_arm (n
))
4224 output_asm_insn ("mov%?\t%0, %1", operands
);
4228 /* Try to use one MVN */
4229 if (const_ok_for_arm (~n
))
4231 operands
[1] = GEN_INT (~n
);
4232 output_asm_insn ("mvn%?\t%0, %1", operands
);
4236 /* If all else fails, make it out of ORRs or BICs as appropriate. */
4238 for (i
=0; i
< 32; i
++)
4242 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
4243 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
4246 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
4253 /* Output an ADD r, s, #n where n may be too big for one instruction. If
4254 adding zero to one register, output nothing. */
4257 output_add_immediate (operands
)
4260 HOST_WIDE_INT n
= INTVAL (operands
[2]);
4262 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
4265 output_multi_immediate (operands
,
4266 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
4269 output_multi_immediate (operands
,
4270 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
4277 /* Output a multiple immediate operation.
4278 OPERANDS is the vector of operands referred to in the output patterns.
4279 INSTR1 is the output pattern to use for the first constant.
4280 INSTR2 is the output pattern to use for subsequent constants.
4281 IMMED_OP is the index of the constant slot in OPERANDS.
4282 N is the constant value. */
4285 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
4287 char *instr1
, *instr2
;
4291 #if HOST_BITS_PER_WIDE_INT > 32
4297 operands
[immed_op
] = const0_rtx
;
4298 output_asm_insn (instr1
, operands
); /* Quick and easy output */
4303 char *instr
= instr1
;
4305 /* Note that n is never zero here (which would give no output) */
4306 for (i
= 0; i
< 32; i
+= 2)
4310 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
4311 output_asm_insn (instr
, operands
);
4321 /* Return the appropriate ARM instruction for the operation code.
4322 The returned result should not be overwritten. OP is the rtx of the
4323 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
4327 arithmetic_instr (op
, shift_first_arg
)
4329 int shift_first_arg
;
4331 switch (GET_CODE (op
))
4337 return shift_first_arg
? "rsb" : "sub";
4354 /* Ensure valid constant shifts and return the appropriate shift mnemonic
4355 for the operation code. The returned result should not be overwritten.
4356 OP is the rtx code of the shift.
4357 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
4361 shift_op (op
, amountp
)
4363 HOST_WIDE_INT
*amountp
;
4366 enum rtx_code code
= GET_CODE (op
);
4368 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
4370 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
4371 *amountp
= INTVAL (XEXP (op
, 1));
4394 /* We never have to worry about the amount being other than a
4395 power of 2, since this case can never be reloaded from a reg. */
4397 *amountp
= int_log2 (*amountp
);
4408 /* This is not 100% correct, but follows from the desire to merge
4409 multiplication by a power of 2 with the recognizer for a
4410 shift. >=32 is not a valid shift for "asl", so we must try and
4411 output a shift that produces the correct arithmetical result.
4412 Using lsr #32 is identical except for the fact that the carry bit
4413 is not set correctly if we set the flags; but we never use the
4414 carry bit from such an operation, so we can ignore that. */
4415 if (code
== ROTATERT
)
4416 *amountp
&= 31; /* Rotate is just modulo 32 */
4417 else if (*amountp
!= (*amountp
& 31))
4424 /* Shifts of 0 are no-ops. */
4433 /* Obtain the shift from the POWER of two. */
4435 static HOST_WIDE_INT
4437 HOST_WIDE_INT power
;
4439 HOST_WIDE_INT shift
= 0;
4441 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
4451 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
4452 /bin/as is horribly restrictive. */
4455 output_ascii_pseudo_op (stream
, p
, len
)
4461 int len_so_far
= 1000;
4462 int chars_so_far
= 0;
4464 for (i
= 0; i
< len
; i
++)
4466 register int c
= p
[i
];
4468 if (len_so_far
> 50)
4471 fputs ("\"\n", stream
);
4472 fputs ("\t.ascii\t\"", stream
);
4477 if (c
== '\"' || c
== '\\')
4483 if (c
>= ' ' && c
< 0177)
4490 fprintf (stream
, "\\%03o", c
);
4497 fputs ("\"\n", stream
);
4501 /* Try to determine whether a pattern really clobbers the link register.
4502 This information is useful when peepholing, so that lr need not be pushed
4503 if we combine a call followed by a return.
4504 NOTE: This code does not check for side-effect expressions in a SET_SRC:
4505 such a check should not be needed because these only update an existing
4506 value within a register; the register must still be set elsewhere within
4510 pattern_really_clobbers_lr (x
)
4515 switch (GET_CODE (x
))
4518 switch (GET_CODE (SET_DEST (x
)))
4521 return REGNO (SET_DEST (x
)) == 14;
4524 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
4525 return REGNO (XEXP (SET_DEST (x
), 0)) == 14;
4527 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
4536 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
4537 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
4542 switch (GET_CODE (XEXP (x
, 0)))
4545 return REGNO (XEXP (x
, 0)) == 14;
4548 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
4549 return REGNO (XEXP (XEXP (x
, 0), 0)) == 14;
4565 function_really_clobbers_lr (first
)
4570 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
4572 switch (GET_CODE (insn
))
4577 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
4582 if (pattern_really_clobbers_lr (PATTERN (insn
)))
4587 /* Don't yet know how to handle those calls that are not to a
4589 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
4592 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
4595 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
4601 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
4607 default: /* Don't recognize it, be safe */
4611 /* A call can be made (by peepholing) not to clobber lr iff it is
4612 followed by a return. There may, however, be a use insn iff
4613 we are returning the result of the call.
4614 If we run off the end of the insn chain, then that means the
4615 call was at the end of the function. Unfortunately we don't
4616 have a return insn for the peephole to recognize, so we
4617 must reject this. (Can this be fixed by adding our own insn?) */
4618 if ((next
= next_nonnote_insn (insn
)) == NULL
)
4621 /* No need to worry about lr if the call never returns */
4622 if (GET_CODE (next
) == BARRIER
)
4625 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
4626 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
4627 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
4628 == REGNO (XEXP (PATTERN (next
), 0))))
4629 if ((next
= next_nonnote_insn (next
)) == NULL
)
4632 if (GET_CODE (next
) == JUMP_INSN
4633 && GET_CODE (PATTERN (next
)) == RETURN
)
4642 /* We have reached the end of the chain so lr was _not_ clobbered */
4647 output_return_instruction (operand
, really_return
, reverse
)
4653 int reg
, live_regs
= 0;
4654 int volatile_func
= (optimize
> 0
4655 && TREE_THIS_VOLATILE (current_function_decl
));
4657 return_used_this_function
= 1;
4662 /* If this function was declared non-returning, and we have found a tail
4663 call, then we have to trust that the called function won't return. */
4664 if (! really_return
)
4667 /* Otherwise, trap an attempted return by aborting. */
4669 ops
[1] = gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4670 assemble_external_libcall (ops
[1]);
4671 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
4675 if (current_function_calls_alloca
&& ! really_return
)
4678 for (reg
= 0; reg
<= 10; reg
++)
4679 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4682 if (live_regs
|| (regs_ever_live
[14] && ! lr_save_eliminated
))
4685 if (frame_pointer_needed
)
4690 if (lr_save_eliminated
|| ! regs_ever_live
[14])
4693 if (frame_pointer_needed
)
4695 reverse
? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
4698 reverse
? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
4700 for (reg
= 0; reg
<= 10; reg
++)
4701 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4703 strcat (instr
, "%|");
4704 strcat (instr
, reg_names
[reg
]);
4706 strcat (instr
, ", ");
4709 if (frame_pointer_needed
)
4711 strcat (instr
, "%|");
4712 strcat (instr
, reg_names
[11]);
4713 strcat (instr
, ", ");
4714 strcat (instr
, "%|");
4715 strcat (instr
, reg_names
[13]);
4716 strcat (instr
, ", ");
4717 strcat (instr
, "%|");
4718 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4722 strcat (instr
, "%|");
4723 strcat (instr
, really_return
? reg_names
[15] : reg_names
[14]);
4725 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
4726 output_asm_insn (instr
, &operand
);
4728 else if (really_return
)
4730 if (TARGET_THUMB_INTERWORK
)
4731 sprintf (instr
, "bx%%?%%%s\t%%|lr", reverse
? "D" : "d");
4733 sprintf (instr
, "mov%%?%%%s0%s\t%%|pc, %%|lr",
4734 reverse
? "D" : "d", TARGET_APCS_32
? "" : "s");
4735 output_asm_insn (instr
, &operand
);
4741 /* Return nonzero if optimizing and the current function is volatile.
4742 Such functions never return, and many memory cycles can be saved
4743 by not storing register values that will never be needed again.
4744 This optimization was added to speed up context switching in a
4745 kernel application. */
4748 arm_volatile_func ()
4750 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
4753 /* The amount of stack adjustment that happens here, in output_return and in
4754 output_epilogue must be exactly the same as was calculated during reload,
4755 or things will point to the wrong place. The only time we can safely
4756 ignore this constraint is when a function has no arguments on the stack,
4757 no stack frame requirement and no live registers execpt for `lr'. If we
4758 can guarantee that by making all function calls into tail calls and that
4759 lr is not clobbered in any other way, then there is no need to push lr
4763 output_func_prologue (f
, frame_size
)
4767 int reg
, live_regs_mask
= 0;
4769 int volatile_func
= (optimize
> 0
4770 && TREE_THIS_VOLATILE (current_function_decl
));
4772 /* Nonzero if we must stuff some register arguments onto the stack as if
4773 they were passed there. */
4774 int store_arg_regs
= 0;
4776 if (arm_ccfsm_state
|| arm_target_insn
)
4777 abort (); /* Sanity check */
4779 if (arm_naked_function_p (current_function_decl
))
4782 return_used_this_function
= 0;
4783 lr_save_eliminated
= 0;
4785 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
4786 ASM_COMMENT_START
, current_function_args_size
,
4787 current_function_pretend_args_size
, frame_size
);
4788 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
4789 ASM_COMMENT_START
, frame_pointer_needed
,
4790 current_function_anonymous_args
);
4793 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
4795 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
4798 for (reg
= 0; reg
<= 10; reg
++)
4799 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4800 live_regs_mask
|= (1 << reg
);
4802 if (frame_pointer_needed
)
4803 live_regs_mask
|= 0xD800;
4804 else if (regs_ever_live
[14])
4806 if (! current_function_args_size
4807 && ! function_really_clobbers_lr (get_insns ()))
4808 lr_save_eliminated
= 1;
4810 live_regs_mask
|= 0x4000;
4815 /* if a di mode load/store multiple is used, and the base register
4816 is r3, then r4 can become an ever live register without lr
4817 doing so, in this case we need to push lr as well, or we
4818 will fail to get a proper return. */
4820 live_regs_mask
|= 0x4000;
4821 lr_save_eliminated
= 0;
4825 if (lr_save_eliminated
)
4826 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
4829 #ifdef AOF_ASSEMBLER
4831 fprintf (f
, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX
, REGISTER_PREFIX
,
4832 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
4838 output_func_epilogue (f
, frame_size
)
4842 int reg
, live_regs_mask
= 0;
4843 /* If we need this then it will always be at least this much */
4844 int floats_offset
= 12;
4846 int volatile_func
= (optimize
> 0
4847 && TREE_THIS_VOLATILE (current_function_decl
));
4849 if (use_return_insn() && return_used_this_function
)
4851 if ((frame_size
+ current_function_outgoing_args_size
) != 0
4852 && !(frame_pointer_needed
|| TARGET_APCS
))
4857 /* Naked functions don't have epilogues. */
4858 if (arm_naked_function_p (current_function_decl
))
4861 /* A volatile function should never return. Call abort. */
4864 rtx op
= gen_rtx (SYMBOL_REF
, Pmode
, "abort");
4865 assemble_external_libcall (op
);
4866 output_asm_insn ("bl\t%a0", &op
);
4870 for (reg
= 0; reg
<= 10; reg
++)
4871 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4873 live_regs_mask
|= (1 << reg
);
4877 if (frame_pointer_needed
)
4879 if (arm_fpu_arch
== FP_SOFT2
)
4881 for (reg
= 23; reg
> 15; reg
--)
4882 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4884 floats_offset
+= 12;
4885 fprintf (f
, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX
,
4886 reg_names
[reg
], REGISTER_PREFIX
, floats_offset
);
4893 for (reg
= 23; reg
> 15; reg
--)
4895 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4897 floats_offset
+= 12;
4898 /* We can't unstack more than four registers at once */
4899 if (start_reg
- reg
== 3)
4901 fprintf (f
, "\tlfm\t%s%s, 4, [%sfp, #-%d]\n",
4902 REGISTER_PREFIX
, reg_names
[reg
],
4903 REGISTER_PREFIX
, floats_offset
);
4904 start_reg
= reg
- 1;
4909 if (reg
!= start_reg
)
4910 fprintf (f
, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
4911 REGISTER_PREFIX
, reg_names
[reg
+ 1],
4912 start_reg
- reg
, REGISTER_PREFIX
, floats_offset
);
4914 start_reg
= reg
- 1;
4918 /* Just in case the last register checked also needs unstacking. */
4919 if (reg
!= start_reg
)
4920 fprintf (f
, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
4921 REGISTER_PREFIX
, reg_names
[reg
+ 1],
4922 start_reg
- reg
, REGISTER_PREFIX
, floats_offset
);
4925 if (TARGET_THUMB_INTERWORK
)
4927 live_regs_mask
|= 0x6800;
4928 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
, FALSE
);
4929 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
4933 live_regs_mask
|= 0xA800;
4934 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
4935 TARGET_APCS_32
? FALSE
: TRUE
);
4940 /* Restore stack pointer if necessary. */
4941 if (frame_size
+ current_function_outgoing_args_size
!= 0)
4943 operands
[0] = operands
[1] = stack_pointer_rtx
;
4944 operands
[2] = GEN_INT (frame_size
4945 + current_function_outgoing_args_size
);
4946 output_add_immediate (operands
);
4949 if (arm_fpu_arch
== FP_SOFT2
)
4951 for (reg
= 16; reg
< 24; reg
++)
4952 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4953 fprintf (f
, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX
,
4954 reg_names
[reg
], REGISTER_PREFIX
);
4960 for (reg
= 16; reg
< 24; reg
++)
4962 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
4964 if (reg
- start_reg
== 3)
4966 fprintf (f
, "\tlfmfd\t%s%s, 4, [%ssp]!\n",
4967 REGISTER_PREFIX
, reg_names
[start_reg
],
4969 start_reg
= reg
+ 1;
4974 if (reg
!= start_reg
)
4975 fprintf (f
, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
4976 REGISTER_PREFIX
, reg_names
[start_reg
],
4977 reg
- start_reg
, REGISTER_PREFIX
);
4979 start_reg
= reg
+ 1;
4983 /* Just in case the last register checked also needs unstacking. */
4984 if (reg
!= start_reg
)
4985 fprintf (f
, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
4986 REGISTER_PREFIX
, reg_names
[start_reg
],
4987 reg
- start_reg
, REGISTER_PREFIX
);
4990 if (current_function_pretend_args_size
== 0 && regs_ever_live
[14])
4992 if (TARGET_THUMB_INTERWORK
)
4994 if (! lr_save_eliminated
)
4995 print_multi_reg(f
, "ldmfd\t%ssp!", live_regs_mask
| 0x4000,
4998 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
5000 else if (lr_save_eliminated
)
5001 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
5002 : "\tmovs\t%spc, %slr\n"),
5003 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
5005 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
5006 TARGET_APCS_32
? FALSE
: TRUE
);
5010 if (live_regs_mask
|| regs_ever_live
[14])
5012 /* Restore the integer regs, and the return address into lr */
5013 if (! lr_save_eliminated
)
5014 live_regs_mask
|= 0x4000;
5016 if (live_regs_mask
!= 0)
5017 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
5020 if (current_function_pretend_args_size
)
5022 /* Unwind the pre-pushed regs */
5023 operands
[0] = operands
[1] = stack_pointer_rtx
;
5024 operands
[2] = gen_rtx (CONST_INT
, VOIDmode
,
5025 current_function_pretend_args_size
);
5026 output_add_immediate (operands
);
5028 /* And finally, go home */
5029 if (TARGET_THUMB_INTERWORK
)
5030 fprintf (f
, "\tbx\t%slr\n", REGISTER_PREFIX
);
5032 fprintf (f
, (TARGET_APCS_32
? "\tmov\t%spc, %slr\n"
5033 : "\tmovs\t%spc, %slr\n"),
5034 REGISTER_PREFIX
, REGISTER_PREFIX
, f
);
5040 current_function_anonymous_args
= 0;
5044 emit_multi_reg_push (mask
)
5051 for (i
= 0; i
< 16; i
++)
5052 if (mask
& (1 << i
))
5055 if (num_regs
== 0 || num_regs
> 16)
5058 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (num_regs
));
5060 for (i
= 0; i
< 16; i
++)
5062 if (mask
& (1 << i
))
5065 = gen_rtx (SET
, VOIDmode
, gen_rtx (MEM
, BLKmode
,
5066 gen_rtx (PRE_DEC
, BLKmode
,
5067 stack_pointer_rtx
)),
5068 gen_rtx (UNSPEC
, BLKmode
,
5069 gen_rtvec (1, gen_rtx (REG
, SImode
, i
)),
5075 for (j
= 1, i
++; j
< num_regs
; i
++)
5077 if (mask
& (1 << i
))
5080 = gen_rtx (USE
, VOIDmode
, gen_rtx (REG
, SImode
, i
));
5089 emit_sfm (base_reg
, count
)
5096 par
= gen_rtx (PARALLEL
, VOIDmode
, rtvec_alloc (count
));
5098 XVECEXP (par
, 0, 0) = gen_rtx (SET
, VOIDmode
,
5099 gen_rtx (MEM
, BLKmode
,
5100 gen_rtx (PRE_DEC
, BLKmode
,
5101 stack_pointer_rtx
)),
5102 gen_rtx (UNSPEC
, BLKmode
,
5103 gen_rtvec (1, gen_rtx (REG
, XFmode
,
5106 for (i
= 1; i
< count
; i
++)
5107 XVECEXP (par
, 0, i
) = gen_rtx (USE
, VOIDmode
,
5108 gen_rtx (REG
, XFmode
, base_reg
++));
5114 arm_expand_prologue ()
5117 rtx amount
= GEN_INT (-(get_frame_size ()
5118 + current_function_outgoing_args_size
));
5121 int live_regs_mask
= 0;
5122 int store_arg_regs
= 0;
5123 int volatile_func
= (optimize
> 0
5124 && TREE_THIS_VOLATILE (current_function_decl
));
5126 /* Naked functions don't have prologues. */
5127 if (arm_naked_function_p (current_function_decl
))
5130 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
5133 if (! volatile_func
)
5134 for (reg
= 0; reg
<= 10; reg
++)
5135 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5136 live_regs_mask
|= 1 << reg
;
5138 if (! volatile_func
&& regs_ever_live
[14])
5139 live_regs_mask
|= 0x4000;
5141 if (frame_pointer_needed
)
5143 live_regs_mask
|= 0xD800;
5144 emit_insn (gen_movsi (gen_rtx (REG
, SImode
, 12),
5145 stack_pointer_rtx
));
5148 if (current_function_pretend_args_size
)
5151 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
5154 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
5155 GEN_INT (-current_function_pretend_args_size
)));
5160 /* If we have to push any regs, then we must push lr as well, or
5161 we won't get a proper return. */
5162 live_regs_mask
|= 0x4000;
5163 emit_multi_reg_push (live_regs_mask
);
5166 /* For now the integer regs are still pushed in output_func_epilogue (). */
5168 if (! volatile_func
)
5170 if (arm_fpu_arch
== FP_SOFT2
)
5172 for (reg
= 23; reg
> 15; reg
--)
5173 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5174 emit_insn (gen_rtx (SET
, VOIDmode
,
5175 gen_rtx (MEM
, XFmode
,
5176 gen_rtx (PRE_DEC
, XFmode
,
5177 stack_pointer_rtx
)),
5178 gen_rtx (REG
, XFmode
, reg
)));
5184 for (reg
= 23; reg
> 15; reg
--)
5186 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5188 if (start_reg
- reg
== 3)
5191 start_reg
= reg
- 1;
5196 if (start_reg
!= reg
)
5197 emit_sfm (reg
+ 1, start_reg
- reg
);
5198 start_reg
= reg
- 1;
5202 if (start_reg
!= reg
)
5203 emit_sfm (reg
+ 1, start_reg
- reg
);
5207 if (frame_pointer_needed
)
5208 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx (REG
, SImode
, 12),
5210 (-(4 + current_function_pretend_args_size
)))));
5212 if (amount
!= const0_rtx
)
5214 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
5215 emit_insn (gen_rtx (CLOBBER
, VOIDmode
,
5216 gen_rtx (MEM
, BLKmode
, stack_pointer_rtx
)));
5219 /* If we are profiling, make sure no instructions are scheduled before
5220 the call to mcount. */
5221 if (profile_flag
|| profile_block_flag
)
5222 emit_insn (gen_blockage ());
5226 /* If CODE is 'd', then the X is a condition operand and the instruction
5227 should only be executed if the condition is true.
5228 if CODE is 'D', then the X is a condition operand and the instruction
5229 should only be executed if the condition is false: however, if the mode
5230 of the comparison is CCFPEmode, then always execute the instruction -- we
5231 do this because in these circumstances !GE does not necessarily imply LT;
5232 in these cases the instruction pattern will take care to make sure that
5233 an instruction containing %d will follow, thereby undoing the effects of
5234 doing this instruction unconditionally.
5235 If CODE is 'N' then X is a floating point operand that must be negated
5237 If CODE is 'B' then output a bitwise inverted value of X (a const int).
5238 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
5241 arm_print_operand (stream
, x
, code
)
5249 fputs (ASM_COMMENT_START
, stream
);
5253 fputs (REGISTER_PREFIX
, stream
);
5257 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
5258 fputs (arm_condition_codes
[arm_current_cc
], stream
);
5264 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
5265 r
= REAL_VALUE_NEGATE (r
);
5266 fprintf (stream
, "%s", fp_const_from_val (&r
));
5271 if (GET_CODE (x
) == CONST_INT
)
5273 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5278 ARM_SIGN_EXTEND (~ INTVAL (x
)));
5282 output_addr_const (stream
, x
);
5287 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
5291 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
5297 char *shift
= shift_op (x
, &val
);
5301 fprintf (stream
, ", %s ", shift_op (x
, &val
));
5303 arm_print_operand (stream
, XEXP (x
, 1), 0);
5306 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
5319 fputs (REGISTER_PREFIX
, stream
);
5320 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0)], stream
);
5326 fputs (REGISTER_PREFIX
, stream
);
5327 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1)], stream
);
5331 fputs (REGISTER_PREFIX
, stream
);
5332 if (GET_CODE (XEXP (x
, 0)) == REG
)
5333 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
5335 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
5339 fprintf (stream
, "{%s%s-%s%s}", REGISTER_PREFIX
, reg_names
[REGNO (x
)],
5340 REGISTER_PREFIX
, reg_names
[REGNO (x
) - 1
5341 + ((GET_MODE_SIZE (GET_MODE (x
))
5342 + GET_MODE_SIZE (SImode
) - 1)
5343 / GET_MODE_SIZE (SImode
))]);
5348 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
5354 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
5355 (get_arm_condition_code (x
))],
5363 if (GET_CODE (x
) == REG
)
5365 fputs (REGISTER_PREFIX
, stream
);
5366 fputs (reg_names
[REGNO (x
)], stream
);
5368 else if (GET_CODE (x
) == MEM
)
5370 output_memory_reference_mode
= GET_MODE (x
);
5371 output_address (XEXP (x
, 0));
5373 else if (GET_CODE (x
) == CONST_DOUBLE
)
5374 fprintf (stream
, "#%s", fp_immediate_constant (x
));
5375 else if (GET_CODE (x
) == NEG
)
5376 abort (); /* This should never happen now. */
5379 fputc ('#', stream
);
5380 output_addr_const (stream
, x
);
5386 /* A finite state machine takes care of noticing whether or not instructions
5387 can be conditionally executed, and thus decrease execution time and code
5388 size by deleting branch instructions. The fsm is controlled by
5389 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
5391 /* The state of the fsm controlling condition codes are:
5392 0: normal, do nothing special
5393 1: make ASM_OUTPUT_OPCODE not output this instruction
5394 2: make ASM_OUTPUT_OPCODE not output this instruction
5395 3: make instructions conditional
5396 4: make instructions conditional
5398 State transitions (state->state by whom under condition):
5399 0 -> 1 final_prescan_insn if the `target' is a label
5400 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
5401 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
5402 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
5403 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
5404 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
5405 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
5406 (the target insn is arm_target_insn).
5408 If the jump clobbers the conditions then we use states 2 and 4.
5410 A similar thing can be done with conditional return insns.
5412 XXX In case the `target' is an unconditional branch, this conditionalising
5413 of the instructions always reduces code size, but not always execution
5414 time. But then, I want to reduce the code size to somewhere near what
5415 /bin/cc produces. */
5417 /* Returns the index of the ARM condition code string in
5418 `arm_condition_codes'. COMPARISON should be an rtx like
5419 `(eq (...) (...))'. */
5421 static enum arm_cond_code
5422 get_arm_condition_code (comparison
)
5425 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
5427 register enum rtx_code comp_code
= GET_CODE (comparison
);
5429 if (GET_MODE_CLASS (mode
) != MODE_CC
)
5430 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
5431 XEXP (comparison
, 1));
5435 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
5436 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
5437 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
5438 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
5439 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
5440 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
5441 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
5442 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
5443 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
5444 case CC_DLTUmode
: code
= ARM_CC
;
5447 if (comp_code
!= EQ
&& comp_code
!= NE
)
5450 if (comp_code
== EQ
)
5451 return ARM_INVERSE_CONDITION_CODE (code
);
5457 case NE
: return ARM_NE
;
5458 case EQ
: return ARM_EQ
;
5459 case GE
: return ARM_PL
;
5460 case LT
: return ARM_MI
;
5468 case NE
: return ARM_NE
;
5469 case EQ
: return ARM_EQ
;
5476 case GE
: return ARM_GE
;
5477 case GT
: return ARM_GT
;
5478 case LE
: return ARM_LS
;
5479 case LT
: return ARM_MI
;
5486 case NE
: return ARM_NE
;
5487 case EQ
: return ARM_EQ
;
5488 case GE
: return ARM_LE
;
5489 case GT
: return ARM_LT
;
5490 case LE
: return ARM_GE
;
5491 case LT
: return ARM_GT
;
5492 case GEU
: return ARM_LS
;
5493 case GTU
: return ARM_CC
;
5494 case LEU
: return ARM_CS
;
5495 case LTU
: return ARM_HI
;
5502 case LTU
: return ARM_CS
;
5503 case GEU
: return ARM_CC
;
5510 case NE
: return ARM_NE
;
5511 case EQ
: return ARM_EQ
;
5512 case GE
: return ARM_GE
;
5513 case GT
: return ARM_GT
;
5514 case LE
: return ARM_LE
;
5515 case LT
: return ARM_LT
;
5516 case GEU
: return ARM_CS
;
5517 case GTU
: return ARM_HI
;
5518 case LEU
: return ARM_LS
;
5519 case LTU
: return ARM_CC
;
5531 final_prescan_insn (insn
, opvec
, noperands
)
5536 /* BODY will hold the body of INSN. */
5537 register rtx body
= PATTERN (insn
);
5539 /* This will be 1 if trying to repeat the trick, and things need to be
5540 reversed if it appears to fail. */
5543 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
5544 taken are clobbered, even if the rtl suggests otherwise. It also
5545 means that we have to grub around within the jump expression to find
5546 out what the conditions are when the jump isn't taken. */
5547 int jump_clobbers
= 0;
5549 /* If we start with a return insn, we only succeed if we find another one. */
5550 int seeking_return
= 0;
5552 /* START_INSN will hold the insn from where we start looking. This is the
5553 first insn after the following code_label if REVERSE is true. */
5554 rtx start_insn
= insn
;
5556 /* If in state 4, check if the target branch is reached, in order to
5557 change back to state 0. */
5558 if (arm_ccfsm_state
== 4)
5560 if (insn
== arm_target_insn
)
5562 arm_target_insn
= NULL
;
5563 arm_ccfsm_state
= 0;
5568 /* If in state 3, it is possible to repeat the trick, if this insn is an
5569 unconditional branch to a label, and immediately following this branch
5570 is the previous target label which is only used once, and the label this
5571 branch jumps to is not too far off. */
5572 if (arm_ccfsm_state
== 3)
5574 if (simplejump_p (insn
))
5576 start_insn
= next_nonnote_insn (start_insn
);
5577 if (GET_CODE (start_insn
) == BARRIER
)
5579 /* XXX Isn't this always a barrier? */
5580 start_insn
= next_nonnote_insn (start_insn
);
5582 if (GET_CODE (start_insn
) == CODE_LABEL
5583 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5584 && LABEL_NUSES (start_insn
) == 1)
5589 else if (GET_CODE (body
) == RETURN
)
5591 start_insn
= next_nonnote_insn (start_insn
);
5592 if (GET_CODE (start_insn
) == BARRIER
)
5593 start_insn
= next_nonnote_insn (start_insn
);
5594 if (GET_CODE (start_insn
) == CODE_LABEL
5595 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
5596 && LABEL_NUSES (start_insn
) == 1)
5608 if (arm_ccfsm_state
!= 0 && !reverse
)
5610 if (GET_CODE (insn
) != JUMP_INSN
)
5613 /* This jump might be paralleled with a clobber of the condition codes
5614 the jump should always come first */
5615 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
5616 body
= XVECEXP (body
, 0, 0);
5619 /* If this is a conditional return then we don't want to know */
5620 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5621 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
5622 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
5623 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
5628 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
5629 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
5632 int fail
= FALSE
, succeed
= FALSE
;
5633 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
5634 int then_not_else
= TRUE
;
5635 rtx this_insn
= start_insn
, label
= 0;
5637 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
5639 /* The code below is wrong for these, and I haven't time to
5640 fix it now. So we just do the safe thing and return. This
5641 whole function needs re-writing anyway. */
5646 /* Register the insn jumped to. */
5649 if (!seeking_return
)
5650 label
= XEXP (SET_SRC (body
), 0);
5652 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
5653 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
5654 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
5656 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
5657 then_not_else
= FALSE
;
5659 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
5661 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
5664 then_not_else
= FALSE
;
5669 /* See how many insns this branch skips, and what kind of insns. If all
5670 insns are okay, and the label or unconditional branch to the same
5671 label is not too far away, succeed. */
5672 for (insns_skipped
= 0;
5673 !fail
&& !succeed
&& insns_skipped
++ < MAX_INSNS_SKIPPED
;)
5677 this_insn
= next_nonnote_insn (this_insn
);
5681 scanbody
= PATTERN (this_insn
);
5683 switch (GET_CODE (this_insn
))
5686 /* Succeed if it is the target label, otherwise fail since
5687 control falls in from somewhere else. */
5688 if (this_insn
== label
)
5692 arm_ccfsm_state
= 2;
5693 this_insn
= next_nonnote_insn (this_insn
);
5696 arm_ccfsm_state
= 1;
5704 /* Succeed if the following insn is the target label.
5706 If return insns are used then the last insn in a function
5707 will be a barrier. */
5708 this_insn
= next_nonnote_insn (this_insn
);
5709 if (this_insn
&& this_insn
== label
)
5713 arm_ccfsm_state
= 2;
5714 this_insn
= next_nonnote_insn (this_insn
);
5717 arm_ccfsm_state
= 1;
5725 /* If using 32-bit addresses the cc is not preserved over
5729 /* Succeed if the following insn is the target label,
5730 or if the following two insns are a barrier and
5731 the target label. */
5732 this_insn
= next_nonnote_insn (this_insn
);
5733 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
5734 this_insn
= next_nonnote_insn (this_insn
);
5736 if (this_insn
&& this_insn
== label
5737 && insns_skipped
< MAX_INSNS_SKIPPED
)
5741 arm_ccfsm_state
= 2;
5742 this_insn
= next_nonnote_insn (this_insn
);
5745 arm_ccfsm_state
= 1;
5754 /* If this is an unconditional branch to the same label, succeed.
5755 If it is to another label, do nothing. If it is conditional,
5757 /* XXX Probably, the test for the SET and the PC are unnecessary. */
5759 if (GET_CODE (scanbody
) == SET
5760 && GET_CODE (SET_DEST (scanbody
)) == PC
)
5762 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
5763 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
5765 arm_ccfsm_state
= 2;
5768 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
5771 else if (GET_CODE (scanbody
) == RETURN
5774 arm_ccfsm_state
= 2;
5777 else if (GET_CODE (scanbody
) == PARALLEL
)
5779 switch (get_attr_conds (this_insn
))
5791 /* Instructions using or affecting the condition codes make it
5793 if ((GET_CODE (scanbody
) == SET
5794 || GET_CODE (scanbody
) == PARALLEL
)
5795 && get_attr_conds (this_insn
) != CONDS_NOCOND
)
5805 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
5806 arm_target_label
= CODE_LABEL_NUMBER (label
);
5807 else if (seeking_return
|| arm_ccfsm_state
== 2)
5809 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
5811 this_insn
= next_nonnote_insn (this_insn
);
5812 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
5813 || GET_CODE (this_insn
) == CODE_LABEL
))
5818 /* Oh, dear! we ran off the end.. give up */
5819 recog (PATTERN (insn
), insn
, NULL_PTR
);
5820 arm_ccfsm_state
= 0;
5821 arm_target_insn
= NULL
;
5824 arm_target_insn
= this_insn
;
5833 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
5835 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
5836 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5837 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
5838 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5842 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
5845 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
5849 if (reverse
|| then_not_else
)
5850 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
5852 /* restore recog_operand (getting the attributes of other insns can
5853 destroy this array, but final.c assumes that it remains intact
5854 across this call; since the insn has been recognized already we
5855 call recog direct). */
5856 recog (PATTERN (insn
), insn
, NULL_PTR
);
5860 #ifdef AOF_ASSEMBLER
5861 /* Special functions only needed when producing AOF syntax assembler. */
5863 rtx aof_pic_label
= NULL_RTX
;
5866 struct pic_chain
*next
;
5870 static struct pic_chain
*aof_pic_chain
= NULL
;
5876 struct pic_chain
**chainp
;
5879 if (aof_pic_label
== NULL_RTX
)
5881 /* This needs to persist throughout the compilation. */
5882 end_temporary_allocation ();
5883 aof_pic_label
= gen_rtx (SYMBOL_REF
, Pmode
, "x$adcons");
5884 resume_temporary_allocation ();
5887 for (offset
= 0, chainp
= &aof_pic_chain
; *chainp
;
5888 offset
+= 4, chainp
= &(*chainp
)->next
)
5889 if ((*chainp
)->symname
== XSTR (x
, 0))
5890 return plus_constant (aof_pic_label
, offset
);
5892 *chainp
= (struct pic_chain
*) xmalloc (sizeof (struct pic_chain
));
5893 (*chainp
)->next
= NULL
;
5894 (*chainp
)->symname
= XSTR (x
, 0);
5895 return plus_constant (aof_pic_label
, offset
);
5899 aof_dump_pic_table (f
)
5902 struct pic_chain
*chain
;
5904 if (aof_pic_chain
== NULL
)
5907 fprintf (f
, "\tAREA |%s$$adcons|, BASED %s%s\n",
5908 reg_names
[PIC_OFFSET_TABLE_REGNUM
], REGISTER_PREFIX
,
5909 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
5910 fputs ("|x$adcons|\n", f
);
5912 for (chain
= aof_pic_chain
; chain
; chain
= chain
->next
)
5914 fputs ("\tDCD\t", f
);
5915 assemble_name (f
, chain
->symname
);
5920 int arm_text_section_count
= 1;
5925 static char buf
[100];
5926 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
5927 arm_text_section_count
++);
5929 strcat (buf
, ", PIC, REENTRANT");
5933 static int arm_data_section_count
= 1;
5938 static char buf
[100];
5939 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
5943 /* The AOF assembler is religiously strict about declarations of
5944 imported and exported symbols, so that it is impossible to declare
5945 a function as imported near the beginning of the file, and then to
5946 export it later on. It is, however, possible to delay the decision
5947 until all the functions in the file have been compiled. To get
5948 around this, we maintain a list of the imports and exports, and
5949 delete from it any that are subsequently defined. At the end of
5950 compilation we spit the remainder of the list out before the END
5955 struct import
*next
;
5959 static struct import
*imports_list
= NULL
;
5962 aof_add_import (name
)
5967 for (new = imports_list
; new; new = new->next
)
5968 if (new->name
== name
)
5971 new = (struct import
*) xmalloc (sizeof (struct import
));
5972 new->next
= imports_list
;
5978 aof_delete_import (name
)
5981 struct import
**old
;
5983 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
5985 if ((*old
)->name
== name
)
5987 *old
= (*old
)->next
;
5993 int arm_main_function
= 0;
5996 aof_dump_imports (f
)
5999 /* The AOF assembler needs this to cause the startup code to be extracted
6000 from the library. Brining in __main causes the whole thing to work
6002 if (arm_main_function
)
6005 fputs ("\tIMPORT __main\n", f
);
6006 fputs ("\tDCD __main\n", f
);
6009 /* Now dump the remaining imports. */
6010 while (imports_list
)
6012 fprintf (f
, "\tIMPORT\t");
6013 assemble_name (f
, imports_list
->name
);
6015 imports_list
= imports_list
->next
;
6018 #endif /* AOF_ASSEMBLER */