1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7 This file is part of GNU CC.
9 GNU CC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GNU CC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GNU CC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
28 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
34 #include "insn-attr.h"
42 /* The maximum number of insns skipped which will be conditionalised if
44 static int max_insns_skipped
= 5;
46 extern FILE * asm_out_file
;
47 /* Some function declarations. */
49 static HOST_WIDE_INT int_log2
PROTO ((HOST_WIDE_INT
));
50 static char * output_multi_immediate
PROTO ((rtx
*, char *, char *, int,
52 static int arm_gen_constant
PROTO ((enum rtx_code
, enum machine_mode
,
53 HOST_WIDE_INT
, rtx
, rtx
, int, int));
54 static int arm_naked_function_p
PROTO ((tree
));
55 static void init_fpa_table
PROTO ((void));
56 static enum machine_mode select_dominance_cc_mode
PROTO ((rtx
, rtx
,
58 static HOST_WIDE_INT add_constant
PROTO ((rtx
, enum machine_mode
, int *));
59 static void dump_table
PROTO ((rtx
));
60 static int fixit
PROTO ((rtx
, enum machine_mode
, int));
61 static rtx find_barrier
PROTO ((rtx
, int));
62 static int broken_move
PROTO ((rtx
));
63 static char * fp_const_from_val
PROTO ((REAL_VALUE_TYPE
*));
64 static int eliminate_lr2ip
PROTO ((rtx
*));
65 static char * shift_op
PROTO ((rtx
, HOST_WIDE_INT
*));
66 static int pattern_really_clobbers_lr
PROTO ((rtx
));
67 static int function_really_clobbers_lr
PROTO ((rtx
));
68 static void emit_multi_reg_push
PROTO ((int));
69 static void emit_sfm
PROTO ((int, int));
70 static enum arm_cond_code get_arm_condition_code
PROTO ((rtx
));
71 static int const_ok_for_op
RTX_CODE_PROTO ((Hint
, Rcode
));
73 /* True if we are currently building a constant table. */
74 int making_const_table
;
76 /* Define the information needed to generate branch insns. This is
77 stored from the compare operation. */
78 rtx arm_compare_op0
, arm_compare_op1
;
80 /* What type of floating point are we tuning for? */
81 enum floating_point_type arm_fpu
;
83 /* What type of floating point instructions are available? */
84 enum floating_point_type arm_fpu_arch
;
86 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
87 enum prog_mode_type arm_prgmode
;
89 /* Set by the -mfp=... option */
90 const char * target_fp_name
= NULL
;
92 /* Used to parse -mstructure_size_boundary command line option. */
93 const char * structure_size_string
= NULL
;
94 int arm_structure_size_boundary
= 32; /* Used to be 8 */
96 /* Bit values used to identify processor capabilities. */
97 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
98 #define FL_FAST_MULT (1 << 1) /* Fast multiply */
99 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
100 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
101 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
102 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
103 #define FL_THUMB (1 << 6) /* Thumb aware */
104 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
105 #define FL_STRONG (1 << 8) /* StrongARM */
107 /* The bits in this mask specify which instructions we are allowed to generate. */
108 static int insn_flags
= 0;
109 /* The bits in this mask specify which instruction scheduling options should
110 be used. Note - there is an overlap with the FL_FAST_MULT. For some
111 hardware we want to be able to generate the multiply instructions, but to
112 tune as if they were not present in the architecture. */
113 static int tune_flags
= 0;
115 /* The following are used in the arm.md file as equivalents to bits
116 in the above two flag variables. */
118 /* Nonzero if this is an "M" variant of the processor. */
119 int arm_fast_multiply
= 0;
121 /* Nonzero if this chip supports the ARM Architecture 4 extensions */
124 /* Nonzero if this chip supports the ARM Architecture 5 extensions */
127 /* Nonzero if this chip can benefit from load scheduling. */
128 int arm_ld_sched
= 0;
130 /* Nonzero if this chip is a StrongARM. */
131 int arm_is_strong
= 0;
133 /* Nonzero if this chip is a an ARM6 or an ARM7. */
134 int arm_is_6_or_7
= 0;
136 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
137 must report the mode of the memory reference from PRINT_OPERAND to
138 PRINT_OPERAND_ADDRESS. */
139 enum machine_mode output_memory_reference_mode
;
141 /* Nonzero if the prologue must setup `fp'. */
142 int current_function_anonymous_args
;
144 /* The register number to be used for the PIC offset register. */
145 int arm_pic_register
= 9;
147 /* Set to one if we think that lr is only saved because of subroutine calls,
148 but all of these can be `put after' return insns */
149 int lr_save_eliminated
;
151 /* Set to 1 when a return insn is output, this means that the epilogue
153 static int return_used_this_function
;
155 /* Set to 1 after arm_reorg has started. Reset to start at the start of
156 the next function. */
157 static int after_arm_reorg
= 0;
159 /* The maximum number of insns to be used when loading a constant. */
160 static int arm_constant_limit
= 3;
162 /* For an explanation of these variables, see final_prescan_insn below. */
164 enum arm_cond_code arm_current_cc
;
166 int arm_target_label
;
168 /* The condition codes of the ARM, and the inverse function. */
169 char * arm_condition_codes
[] =
171 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
172 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
175 static enum arm_cond_code
get_arm_condition_code ();
177 #define streq(string1, string2) (strcmp (string1, string2) == 0)
179 /* Initialization code */
187 /* Not all of these give usefully different compilation alternatives,
188 but there is no simple way of generalizing them. */
189 static struct processors all_cores
[] =
193 {"arm2", FL_CO_PROC
| FL_MODE26
},
194 {"arm250", FL_CO_PROC
| FL_MODE26
},
195 {"arm3", FL_CO_PROC
| FL_MODE26
},
196 {"arm6", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
197 {"arm60", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
198 {"arm600", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
199 {"arm610", FL_MODE26
| FL_MODE32
},
200 {"arm620", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
201 {"arm7", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
202 {"arm7m", FL_CO_PROC
| FL_MODE26
| FL_MODE32
| FL_FAST_MULT
}, /* arm7m doesn't exist on its own, */
203 {"arm7d", FL_CO_PROC
| FL_MODE26
| FL_MODE32
}, /* but only with D, (and I), */
204 {"arm7dm", FL_CO_PROC
| FL_MODE26
| FL_MODE32
| FL_FAST_MULT
}, /* but those don't alter the code, */
205 {"arm7di", FL_CO_PROC
| FL_MODE26
| FL_MODE32
}, /* so arm7m is sometimes used. */
206 {"arm7dmi", FL_CO_PROC
| FL_MODE26
| FL_MODE32
| FL_FAST_MULT
},
207 {"arm70", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
208 {"arm700", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
209 {"arm700i", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
210 {"arm710", FL_MODE26
| FL_MODE32
},
211 {"arm710c", FL_MODE26
| FL_MODE32
},
212 {"arm7100", FL_MODE26
| FL_MODE32
},
213 {"arm7500", FL_MODE26
| FL_MODE32
},
214 {"arm7500fe", FL_CO_PROC
| FL_MODE26
| FL_MODE32
}, /* Doesn't really have an external co-proc, but does have embedded fpu. */
215 {"arm7tdmi", FL_CO_PROC
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_THUMB
},
216 {"arm8", FL_MODE26
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_LDSCHED
},
217 {"arm810", FL_MODE26
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_LDSCHED
},
218 {"arm9", FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_THUMB
| FL_LDSCHED
},
219 {"arm920", FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_LDSCHED
},
220 {"arm920t", FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_THUMB
| FL_LDSCHED
},
221 {"arm9tdmi", FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_THUMB
| FL_LDSCHED
},
222 {"strongarm", FL_MODE26
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_LDSCHED
| FL_STRONG
},
223 {"strongarm110", FL_MODE26
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_LDSCHED
| FL_STRONG
},
224 {"strongarm1100", FL_MODE26
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_LDSCHED
| FL_STRONG
},
229 static struct processors all_architectures
[] =
231 /* ARM Architectures */
233 { "armv2", FL_CO_PROC
| FL_MODE26
},
234 { "armv2a", FL_CO_PROC
| FL_MODE26
},
235 { "armv3", FL_CO_PROC
| FL_MODE26
| FL_MODE32
},
236 { "armv3m", FL_CO_PROC
| FL_MODE26
| FL_MODE32
| FL_FAST_MULT
},
237 { "armv4", FL_CO_PROC
| FL_MODE26
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
},
238 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
239 implementations that support it, so we will leave it out for now. */
240 { "armv4t", FL_CO_PROC
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_THUMB
},
241 { "armv5", FL_CO_PROC
| FL_MODE32
| FL_FAST_MULT
| FL_ARCH4
| FL_THUMB
| FL_ARCH5
},
245 /* This is a magic stucture. The 'string' field is magically filled in
246 with a pointer to the value specified by the user on the command line
247 assuming that the user has specified such a value. */
249 struct arm_cpu_select arm_select
[] =
251 /* string name processors */
252 { NULL
, "-mcpu=", all_cores
},
253 { NULL
, "-march=", all_architectures
},
254 { NULL
, "-mtune=", all_cores
}
257 /* Return the number of bits set in value' */
262 unsigned int count
= 0;
266 value
&= ~(value
& - value
);
273 /* Fix up any incompatible options that the user has specified.
274 This has now turned into a maze. */
276 arm_override_options ()
280 /* Set up the flags based on the cpu/architecture selected by the user. */
281 for (i
= sizeof (arm_select
) / sizeof (arm_select
[0]); i
--;)
283 struct arm_cpu_select
* ptr
= arm_select
+ i
;
285 if (ptr
->string
!= NULL
&& ptr
->string
[0] != '\0')
287 const struct processors
* sel
;
289 for (sel
= ptr
->processors
; sel
->name
!= NULL
; sel
++)
290 if (streq (ptr
->string
, sel
->name
))
293 tune_flags
= sel
->flags
;
296 /* If we have been given an architecture and a processor
297 make sure that they are compatible. We only generate
298 a warning though, and we prefer the CPU over the
300 if (insn_flags
!= 0 && (insn_flags
^ sel
->flags
))
301 warning ("switch -mcpu=%s conflicts with -march= switch",
304 insn_flags
= sel
->flags
;
310 if (sel
->name
== NULL
)
311 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
315 /* If the user did not specify a processor, choose one for them. */
318 struct processors
* sel
;
320 static struct cpu_default
327 { TARGET_CPU_arm2
, "arm2" },
328 { TARGET_CPU_arm6
, "arm6" },
329 { TARGET_CPU_arm610
, "arm610" },
330 { TARGET_CPU_arm710
, "arm710" },
331 { TARGET_CPU_arm7m
, "arm7m" },
332 { TARGET_CPU_arm7500fe
, "arm7500fe" },
333 { TARGET_CPU_arm7tdmi
, "arm7tdmi" },
334 { TARGET_CPU_arm8
, "arm8" },
335 { TARGET_CPU_arm810
, "arm810" },
336 { TARGET_CPU_arm9
, "arm9" },
337 { TARGET_CPU_strongarm
, "strongarm" },
338 { TARGET_CPU_generic
, "arm" },
341 struct cpu_default
* def
;
343 /* Find the default. */
344 for (def
= cpu_defaults
; def
->name
; def
++)
345 if (def
->cpu
== TARGET_CPU_DEFAULT
)
348 /* Make sure we found the default CPU. */
349 if (def
->name
== NULL
)
352 /* Find the default CPU's flags. */
353 for (sel
= all_cores
; sel
->name
!= NULL
; sel
++)
354 if (streq (def
->name
, sel
->name
))
357 if (sel
->name
== NULL
)
360 insn_flags
= sel
->flags
;
362 /* Now check to see if the user has specified some command line
363 switch that require certain abilities from the cpu. */
366 if (TARGET_INTERWORK
)
368 sought
|= (FL_THUMB
| FL_MODE32
);
370 /* Force apcs-32 to be used for interworking. */
371 target_flags
|= ARM_FLAG_APCS_32
;
373 /* There are no ARM processor that supports both APCS-26 and
374 interworking. Therefore we force FL_MODE26 to be removed
375 from insn_flags here (if it was set), so that the search
376 below will always be able to find a compatible processor. */
377 insn_flags
&= ~ FL_MODE26
;
380 if (! TARGET_APCS_32
)
383 if (sought
!= 0 && ((sought
& insn_flags
) != sought
))
385 /* Try to locate a CPU type that supports all of the abilities
386 of the default CPU, plus the extra abilities requested by
388 for (sel
= all_cores
; sel
->name
!= NULL
; sel
++)
389 if ((sel
->flags
& sought
) == (sought
| insn_flags
))
392 if (sel
->name
== NULL
)
394 unsigned int current_bit_count
= 0;
395 struct processors
* best_fit
= NULL
;
397 /* Ideally we would like to issue an error message here
398 saying that it was not possible to find a CPU compatible
399 with the default CPU, but which also supports the command
400 line options specified by the programmer, and so they
401 ought to use the -mcpu=<name> command line option to
402 override the default CPU type.
404 Unfortunately this does not work with multilibing. We
405 need to be able to support multilibs for -mapcs-26 and for
406 -mthumb-interwork and there is no CPU that can support both
407 options. Instead if we cannot find a cpu that has both the
408 characteristics of the default cpu and the given command line
409 options we scan the array again looking for a best match. */
410 for (sel
= all_cores
; sel
->name
!= NULL
; sel
++)
411 if ((sel
->flags
& sought
) == sought
)
415 count
= bit_count (sel
->flags
& insn_flags
);
417 if (count
>= current_bit_count
)
420 current_bit_count
= count
;
424 if (best_fit
== NULL
)
430 insn_flags
= sel
->flags
;
434 /* If tuning has not been specified, tune for whichever processor or
435 architecture has been selected. */
437 tune_flags
= insn_flags
;
439 /* Make sure that the processor choice does not conflict with any of the
440 other command line choices. */
441 if (TARGET_APCS_32
&& !(insn_flags
& FL_MODE32
))
443 /* If APCS-32 was not the default then it must have been set by the
444 user, so issue a warning message. If the user has specified
445 "-mapcs-32 -mcpu=arm2" then we loose here. */
446 if ((TARGET_DEFAULT
& ARM_FLAG_APCS_32
) == 0)
447 warning ("target CPU does not support APCS-32" );
448 target_flags
&= ~ ARM_FLAG_APCS_32
;
450 else if (! TARGET_APCS_32
&& !(insn_flags
& FL_MODE26
))
452 warning ("target CPU does not support APCS-26" );
453 target_flags
|= ARM_FLAG_APCS_32
;
456 if (TARGET_INTERWORK
&& !(insn_flags
& FL_THUMB
))
458 warning ("target CPU does not support interworking" );
459 target_flags
&= ~ARM_FLAG_INTERWORK
;
462 /* If interworking is enabled then APCS-32 must be selected as well. */
463 if (TARGET_INTERWORK
)
465 if (! TARGET_APCS_32
)
466 warning ("interworking forces APCS-32 to be used" );
467 target_flags
|= ARM_FLAG_APCS_32
;
470 if (TARGET_APCS_STACK
&& ! TARGET_APCS
)
472 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
473 target_flags
|= ARM_FLAG_APCS_FRAME
;
476 if (TARGET_POKE_FUNCTION_NAME
)
477 target_flags
|= ARM_FLAG_APCS_FRAME
;
479 if (TARGET_APCS_REENT
&& flag_pic
)
480 fatal ("-fpic and -mapcs-reent are incompatible");
482 if (TARGET_APCS_REENT
)
483 warning ("APCS reentrant code not supported. Ignored");
485 if (write_symbols
!= NO_DEBUG
&& flag_omit_frame_pointer
)
486 warning ("-g with -fomit-frame-pointer may not give sensible debugging");
488 /* If stack checking is disabled, we can use r10 as the PIC register,
489 which keeps r9 available. */
490 if (flag_pic
&& ! TARGET_APCS_STACK
)
491 arm_pic_register
= 10;
493 if (TARGET_APCS_FLOAT
)
494 warning ("Passing floating point arguments in fp regs not yet supported");
496 /* Initialise boolean versions of the flags, for use in the arm.md file. */
497 arm_fast_multiply
= insn_flags
& FL_FAST_MULT
;
498 arm_arch4
= insn_flags
& FL_ARCH4
;
499 arm_arch5
= insn_flags
& FL_ARCH5
;
501 arm_ld_sched
= tune_flags
& FL_LDSCHED
;
502 arm_is_strong
= tune_flags
& FL_STRONG
;
503 arm_is_6_or_7
= ((tune_flags
& (FL_MODE26
| FL_MODE32
))
504 && !(tune_flags
& FL_ARCH4
));
506 /* Default value for floating point code... if no co-processor
507 bus, then schedule for emulated floating point. Otherwise,
508 assume the user has an FPA.
509 Note: this does not prevent use of floating point instructions,
510 -msoft-float does that. */
511 arm_fpu
= (tune_flags
& FL_CO_PROC
) ? FP_HARD
: FP_SOFT3
;
515 if (streq (target_fp_name
, "2"))
516 arm_fpu_arch
= FP_SOFT2
;
517 else if (streq (target_fp_name
, "3"))
518 arm_fpu_arch
= FP_SOFT3
;
520 fatal ("Invalid floating point emulation option: -mfpe-%s",
524 arm_fpu_arch
= FP_DEFAULT
;
526 if (TARGET_FPE
&& arm_fpu
!= FP_HARD
)
529 /* For arm2/3 there is no need to do any scheduling if there is only
530 a floating point emulator, or we are doing software floating-point. */
531 if ((TARGET_SOFT_FLOAT
|| arm_fpu
!= FP_HARD
) && (tune_flags
& FL_MODE32
) == 0)
532 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
534 arm_prog_mode
= TARGET_APCS_32
? PROG_MODE_PROG32
: PROG_MODE_PROG26
;
536 if (structure_size_string
!= NULL
)
538 int size
= strtol (structure_size_string
, NULL
, 0);
540 if (size
== 8 || size
== 32)
541 arm_structure_size_boundary
= size
;
543 warning ("Structure size boundary can only be set to 8 or 32");
546 /* If optimizing for space, don't synthesize constants.
547 For processors with load scheduling, it never costs more than 2 cycles
548 to load a constant, and the load scheduler may well reduce that to 1. */
549 if (optimize_size
|| (tune_flags
& FL_LDSCHED
))
550 arm_constant_limit
= 1;
552 /* If optimizing for size, bump the number of instructions that we
553 are prepared to conditionally execute (even on a StrongARM).
554 Otherwise for the StrongARM, which has early execution of branches,
555 a sequence that is worth skipping is shorter. */
557 max_insns_skipped
= 6;
558 else if (arm_is_strong
)
559 max_insns_skipped
= 3;
562 /* Return 1 if it is possible to return using a single instruction */
565 use_return_insn (iscond
)
570 if (!reload_completed
571 || current_function_pretend_args_size
572 || current_function_anonymous_args
573 || ((get_frame_size () + current_function_outgoing_args_size
!= 0)
574 && !(TARGET_APCS
&& frame_pointer_needed
)))
577 /* Can't be done if interworking with Thumb, and any registers have been
578 stacked. Similarly, on StrongARM, conditional returns are expensive
579 if they aren't taken and registers have been stacked. */
580 if (iscond
&& arm_is_strong
&& frame_pointer_needed
)
582 if ((iscond
&& arm_is_strong
)
585 for (regno
= 0; regno
< 16; regno
++)
586 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
589 if (flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
])
593 /* Can't be done if any of the FPU regs are pushed, since this also
595 for (regno
= 16; regno
< 24; regno
++)
596 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
599 /* If a function is naked, don't use the "return" insn. */
600 if (arm_naked_function_p (current_function_decl
))
606 /* Return TRUE if int I is a valid immediate ARM constant. */
612 unsigned HOST_WIDE_INT mask
= ~(unsigned HOST_WIDE_INT
)0xFF;
614 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
615 be all zero, or all one. */
616 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff) != 0
617 && ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff)
618 != ((~(unsigned HOST_WIDE_INT
) 0)
619 & ~(unsigned HOST_WIDE_INT
) 0xffffffff)))
622 /* Fast return for 0 and powers of 2 */
623 if ((i
& (i
- 1)) == 0)
628 if ((i
& mask
& (unsigned HOST_WIDE_INT
) 0xffffffff) == 0)
631 (mask
<< 2) | ((mask
& (unsigned HOST_WIDE_INT
) 0xffffffff)
632 >> (32 - 2)) | ~((unsigned HOST_WIDE_INT
) 0xffffffff);
633 } while (mask
!= ~(unsigned HOST_WIDE_INT
) 0xFF);
638 /* Return true if I is a valid constant for the operation CODE. */
640 const_ok_for_op (i
, code
)
644 if (const_ok_for_arm (i
))
650 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
652 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
658 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
665 /* Emit a sequence of insns to handle a large constant.
666 CODE is the code of the operation required, it can be any of SET, PLUS,
667 IOR, AND, XOR, MINUS;
668 MODE is the mode in which the operation is being performed;
669 VAL is the integer to operate on;
670 SOURCE is the other operand (a register, or a null-pointer for SET);
671 SUBTARGETS means it is safe to create scratch registers if that will
672 either produce a simpler sequence, or we will want to cse the values.
673 Return value is the number of insns emitted. */
676 arm_split_constant (code
, mode
, val
, target
, source
, subtargets
)
678 enum machine_mode mode
;
684 if (subtargets
|| code
== SET
685 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
686 && REGNO (target
) != REGNO (source
)))
688 /* After arm_reorg has been called, we can't fix up expensive
689 constants by pushing them into memory so we must synthesise
690 them in-line, regardless of the cost. This is only likely to
691 be more costly on chips that have load delay slots and we are
692 compiling without running the scheduler (so no splitting
693 occurred before the final instruction emission).
695 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
697 if (! after_arm_reorg
698 && (arm_gen_constant (code
, mode
, val
, target
, source
, 1, 0)
699 > arm_constant_limit
+ (code
!= SET
)))
703 /* Currently SET is the only monadic value for CODE, all
704 the rest are diadic. */
705 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (val
)));
710 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
712 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (val
)));
713 /* For MINUS, the value is subtracted from, since we never
714 have subtraction of a constant. */
716 emit_insn (gen_rtx_SET (VOIDmode
, target
,
717 gen_rtx (code
, mode
, temp
, source
)));
719 emit_insn (gen_rtx_SET (VOIDmode
, target
,
720 gen_rtx (code
, mode
, source
, temp
)));
726 return arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, 1);
729 /* As above, but extra parameter GENERATE which, if clear, suppresses
732 arm_gen_constant (code
, mode
, val
, target
, source
, subtargets
, generate
)
734 enum machine_mode mode
;
743 int can_negate_initial
= 0;
746 int num_bits_set
= 0;
747 int set_sign_bit_copies
= 0;
748 int clear_sign_bit_copies
= 0;
749 int clear_zero_bit_copies
= 0;
750 int set_zero_bit_copies
= 0;
752 unsigned HOST_WIDE_INT temp1
, temp2
;
753 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
755 /* find out which operations are safe for a given CODE. Also do a quick
756 check for degenerate cases; these can occur when DImode operations
768 can_negate_initial
= 1;
772 if (remainder
== 0xffffffff)
775 emit_insn (gen_rtx_SET (VOIDmode
, target
,
776 GEN_INT (ARM_SIGN_EXTEND (val
))));
781 if (reload_completed
&& rtx_equal_p (target
, source
))
784 emit_insn (gen_rtx_SET (VOIDmode
, target
, source
));
793 emit_insn (gen_rtx_SET (VOIDmode
, target
, const0_rtx
));
796 if (remainder
== 0xffffffff)
798 if (reload_completed
&& rtx_equal_p (target
, source
))
801 emit_insn (gen_rtx_SET (VOIDmode
, target
, source
));
810 if (reload_completed
&& rtx_equal_p (target
, source
))
813 emit_insn (gen_rtx_SET (VOIDmode
, target
, source
));
816 if (remainder
== 0xffffffff)
819 emit_insn (gen_rtx_SET (VOIDmode
, target
,
820 gen_rtx_NOT (mode
, source
)));
824 /* We don't know how to handle this yet below. */
828 /* We treat MINUS as (val - source), since (source - val) is always
829 passed as (source + (-val)). */
833 emit_insn (gen_rtx_SET (VOIDmode
, target
,
834 gen_rtx_NEG (mode
, source
)));
837 if (const_ok_for_arm (val
))
840 emit_insn (gen_rtx_SET (VOIDmode
, target
,
841 gen_rtx_MINUS (mode
, GEN_INT (val
),
853 /* If we can do it in one insn get out quickly */
854 if (const_ok_for_arm (val
)
855 || (can_negate_initial
&& const_ok_for_arm (-val
))
856 || (can_invert
&& const_ok_for_arm (~val
)))
859 emit_insn (gen_rtx_SET (VOIDmode
, target
,
860 (source
? gen_rtx (code
, mode
, source
,
867 /* Calculate a few attributes that may be useful for specific
870 for (i
= 31; i
>= 0; i
--)
872 if ((remainder
& (1 << i
)) == 0)
873 clear_sign_bit_copies
++;
878 for (i
= 31; i
>= 0; i
--)
880 if ((remainder
& (1 << i
)) != 0)
881 set_sign_bit_copies
++;
886 for (i
= 0; i
<= 31; i
++)
888 if ((remainder
& (1 << i
)) == 0)
889 clear_zero_bit_copies
++;
894 for (i
= 0; i
<= 31; i
++)
896 if ((remainder
& (1 << i
)) != 0)
897 set_zero_bit_copies
++;
905 /* See if we can do this by sign_extending a constant that is known
906 to be negative. This is a good, way of doing it, since the shift
907 may well merge into a subsequent insn. */
908 if (set_sign_bit_copies
> 1)
911 (temp1
= ARM_SIGN_EXTEND (remainder
912 << (set_sign_bit_copies
- 1))))
916 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
917 emit_insn (gen_rtx_SET (VOIDmode
, new_src
,
919 emit_insn (gen_ashrsi3 (target
, new_src
,
920 GEN_INT (set_sign_bit_copies
- 1)));
924 /* For an inverted constant, we will need to set the low bits,
925 these will be shifted out of harm's way. */
926 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
927 if (const_ok_for_arm (~temp1
))
931 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
932 emit_insn (gen_rtx_SET (VOIDmode
, new_src
,
934 emit_insn (gen_ashrsi3 (target
, new_src
,
935 GEN_INT (set_sign_bit_copies
- 1)));
941 /* See if we can generate this by setting the bottom (or the top)
942 16 bits, and then shifting these into the other half of the
943 word. We only look for the simplest cases, to do more would cost
944 too much. Be careful, however, not to generate this when the
945 alternative would take fewer insns. */
946 if (val
& 0xffff0000)
948 temp1
= remainder
& 0xffff0000;
949 temp2
= remainder
& 0x0000ffff;
951 /* Overlaps outside this range are best done using other methods. */
952 for (i
= 9; i
< 24; i
++)
954 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
955 && ! const_ok_for_arm (temp2
))
957 rtx new_src
= (subtargets
958 ? (generate
? gen_reg_rtx (mode
) : NULL_RTX
)
960 insns
= arm_gen_constant (code
, mode
, temp2
, new_src
,
961 source
, subtargets
, generate
);
964 emit_insn (gen_rtx_SET
967 gen_rtx_ASHIFT (mode
, source
,
974 /* Don't duplicate cases already considered. */
975 for (i
= 17; i
< 24; i
++)
977 if (((temp1
| (temp1
>> i
)) == remainder
)
978 && ! const_ok_for_arm (temp1
))
980 rtx new_src
= (subtargets
981 ? (generate
? gen_reg_rtx (mode
) : NULL_RTX
)
983 insns
= arm_gen_constant (code
, mode
, temp1
, new_src
,
984 source
, subtargets
, generate
);
988 (gen_rtx_SET (VOIDmode
, target
,
991 gen_rtx_LSHIFTRT (mode
, source
,
1002 /* If we have IOR or XOR, and the constant can be loaded in a
1003 single instruction, and we can find a temporary to put it in,
1004 then this can be done in two instructions instead of 3-4. */
1006 /* TARGET can't be NULL if SUBTARGETS is 0 */
1007 || (reload_completed
&& ! reg_mentioned_p (target
, source
)))
1009 if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val
)))
1013 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
1015 emit_insn (gen_rtx_SET (VOIDmode
, sub
, GEN_INT (val
)));
1016 emit_insn (gen_rtx_SET (VOIDmode
, target
,
1017 gen_rtx (code
, mode
, source
, sub
)));
1026 if (set_sign_bit_copies
> 8
1027 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
1031 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
1032 rtx shift
= GEN_INT (set_sign_bit_copies
);
1034 emit_insn (gen_rtx_SET (VOIDmode
, sub
,
1036 gen_rtx_ASHIFT (mode
,
1039 emit_insn (gen_rtx_SET (VOIDmode
, target
,
1041 gen_rtx_LSHIFTRT (mode
, sub
,
1047 if (set_zero_bit_copies
> 8
1048 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
1052 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
1053 rtx shift
= GEN_INT (set_zero_bit_copies
);
1055 emit_insn (gen_rtx_SET (VOIDmode
, sub
,
1057 gen_rtx_LSHIFTRT (mode
,
1060 emit_insn (gen_rtx_SET (VOIDmode
, target
,
1062 gen_rtx_ASHIFT (mode
, sub
,
1068 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~ val
)))
1072 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
1073 emit_insn (gen_rtx_SET (VOIDmode
, sub
,
1074 gen_rtx_NOT (mode
, source
)));
1077 sub
= gen_reg_rtx (mode
);
1078 emit_insn (gen_rtx_SET (VOIDmode
, sub
,
1079 gen_rtx_AND (mode
, source
,
1081 emit_insn (gen_rtx_SET (VOIDmode
, target
,
1082 gen_rtx_NOT (mode
, sub
)));
1089 /* See if two shifts will do 2 or more insn's worth of work. */
1090 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
1092 HOST_WIDE_INT shift_mask
= ((0xffffffff
1093 << (32 - clear_sign_bit_copies
))
1096 if ((remainder
| shift_mask
) != 0xffffffff)
1100 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1101 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
1102 new_src
, source
, subtargets
, 1);
1107 rtx targ
= subtargets
? NULL_RTX
: target
;
1108 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
1109 targ
, source
, subtargets
, 0);
1115 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1116 rtx shift
= GEN_INT (clear_sign_bit_copies
);
1118 emit_insn (gen_ashlsi3 (new_src
, source
, shift
));
1119 emit_insn (gen_lshrsi3 (target
, new_src
, shift
));
1125 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
1127 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
1129 if ((remainder
| shift_mask
) != 0xffffffff)
1133 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1135 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
1136 new_src
, source
, subtargets
, 1);
1141 rtx targ
= subtargets
? NULL_RTX
: target
;
1143 insns
= arm_gen_constant (AND
, mode
, remainder
| shift_mask
,
1144 targ
, source
, subtargets
, 0);
1150 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1151 rtx shift
= GEN_INT (clear_zero_bit_copies
);
1153 emit_insn (gen_lshrsi3 (new_src
, source
, shift
));
1154 emit_insn (gen_ashlsi3 (target
, new_src
, shift
));
1166 for (i
= 0; i
< 32; i
++)
1167 if (remainder
& (1 << i
))
1170 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
1171 remainder
= (~remainder
) & 0xffffffff;
1172 else if (code
== PLUS
&& num_bits_set
> 16)
1173 remainder
= (-remainder
) & 0xffffffff;
1180 /* Now try and find a way of doing the job in either two or three
1182 We start by looking for the largest block of zeros that are aligned on
1183 a 2-bit boundary, we then fill up the temps, wrapping around to the
1184 top of the word when we drop off the bottom.
1185 In the worst case this code should produce no more than four insns. */
1188 int best_consecutive_zeros
= 0;
1190 for (i
= 0; i
< 32; i
+= 2)
1192 int consecutive_zeros
= 0;
1194 if (! (remainder
& (3 << i
)))
1196 while ((i
< 32) && ! (remainder
& (3 << i
)))
1198 consecutive_zeros
+= 2;
1201 if (consecutive_zeros
> best_consecutive_zeros
)
1203 best_consecutive_zeros
= consecutive_zeros
;
1204 best_start
= i
- consecutive_zeros
;
1210 /* Now start emitting the insns, starting with the one with the highest
1211 bit set: we do this so that the smallest number will be emitted last;
1212 this is more likely to be combinable with addressing insns. */
1220 if (remainder
& (3 << (i
- 2)))
1225 temp1
= remainder
& ((0x0ff << end
)
1226 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
1227 remainder
&= ~temp1
;
1234 emit_insn (gen_rtx_SET (VOIDmode
,
1235 new_src
= (subtargets
1236 ? gen_reg_rtx (mode
)
1239 ? ~temp1
: temp1
)));
1240 else if (code
== MINUS
)
1241 emit_insn (gen_rtx_SET (VOIDmode
,
1242 new_src
= (subtargets
1243 ? gen_reg_rtx (mode
)
1245 gen_rtx (code
, mode
, GEN_INT (temp1
),
1248 emit_insn (gen_rtx_SET (VOIDmode
,
1249 new_src
= (remainder
1251 ? gen_reg_rtx (mode
)
1254 gen_rtx (code
, mode
, source
,
1255 GEN_INT (can_invert
? ~temp1
1267 else if (code
== MINUS
)
1274 } while (remainder
);
1279 /* Canonicalize a comparison so that we are more likely to recognize it.
1280 This can be done for a few constant compares, where we can make the
1281 immediate value easier to load. */
1283 arm_canonicalize_comparison (code
, op1
)
1287 unsigned HOST_WIDE_INT i
= INTVAL (*op1
);
1297 if (i
!= ((((unsigned HOST_WIDE_INT
) 1) << (HOST_BITS_PER_WIDE_INT
- 1))
1299 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1301 *op1
= GEN_INT (i
+1);
1302 return code
== GT
? GE
: LT
;
1308 if (i
!= (((unsigned HOST_WIDE_INT
) 1) << (HOST_BITS_PER_WIDE_INT
- 1))
1309 && (const_ok_for_arm (i
-1) || const_ok_for_arm (- (i
-1))))
1311 *op1
= GEN_INT (i
-1);
1312 return code
== GE
? GT
: LE
;
1318 if (i
!= ~((unsigned HOST_WIDE_INT
) 0)
1319 && (const_ok_for_arm (i
+1) || const_ok_for_arm (- (i
+1))))
1321 *op1
= GEN_INT (i
+ 1);
1322 return code
== GTU
? GEU
: LTU
;
1329 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (- (i
- 1))))
1331 *op1
= GEN_INT (i
- 1);
1332 return code
== GEU
? GTU
: LEU
;
1343 /* Decide whether a type should be returned in memory (true)
1344 or in a register (false). This is called by the macro
1345 RETURN_IN_MEMORY. */
1347 arm_return_in_memory (type
)
1350 if (! AGGREGATE_TYPE_P (type
))
1352 /* All simple types are returned in registers. */
1355 else if (int_size_in_bytes (type
) > 4)
1357 /* All structures/unions bigger than one word are returned in memory. */
1360 else if (TREE_CODE (type
) == RECORD_TYPE
)
1364 /* For a struct the APCS says that we must return in a register if
1365 every addressable element has an offset of zero. For practical
1366 purposes this means that the structure can have at most one non
1367 bit-field element and that this element must be the first one in
1370 /* Find the first field, ignoring non FIELD_DECL things which will
1371 have been created by C++. */
1372 for (field
= TYPE_FIELDS (type
);
1373 field
&& TREE_CODE (field
) != FIELD_DECL
;
1374 field
= TREE_CHAIN (field
))
1378 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1380 /* Now check the remaining fields, if any. */
1381 for (field
= TREE_CHAIN (field
);
1383 field
= TREE_CHAIN (field
))
1385 if (TREE_CODE (field
) != FIELD_DECL
)
1388 if (! DECL_BIT_FIELD_TYPE (field
))
1394 else if (TREE_CODE (type
) == UNION_TYPE
)
1398 /* Unions can be returned in registers if every element is
1399 integral, or can be returned in an integer register. */
1400 for (field
= TYPE_FIELDS (type
);
1402 field
= TREE_CHAIN (field
))
1404 if (TREE_CODE (field
) != FIELD_DECL
)
1407 if (FLOAT_TYPE_P (TREE_TYPE (field
)))
1410 if (RETURN_IN_MEMORY (TREE_TYPE (field
)))
1417 /* XXX Not sure what should be done for other aggregates, so put them in
1423 legitimate_pic_operand_p (x
)
1426 if (CONSTANT_P (x
) && flag_pic
1427 && (GET_CODE (x
) == SYMBOL_REF
1428 || (GET_CODE (x
) == CONST
1429 && GET_CODE (XEXP (x
, 0)) == PLUS
1430 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)))
1437 legitimize_pic_address (orig
, mode
, reg
)
1439 enum machine_mode mode
;
1442 if (GET_CODE (orig
) == SYMBOL_REF
)
1444 rtx pic_ref
, address
;
1450 if (reload_in_progress
|| reload_completed
)
1453 reg
= gen_reg_rtx (Pmode
);
1458 #ifdef AOF_ASSEMBLER
1459 /* The AOF assembler can generate relocations for these directly, and
1460 understands that the PIC register has to be added into the offset.
1462 insn
= emit_insn (gen_pic_load_addr_based (reg
, orig
));
1465 address
= gen_reg_rtx (Pmode
);
1469 emit_insn (gen_pic_load_addr (address
, orig
));
1471 pic_ref
= gen_rtx_MEM (Pmode
,
1472 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
1474 RTX_UNCHANGING_P (pic_ref
) = 1;
1475 insn
= emit_move_insn (reg
, pic_ref
);
1477 current_function_uses_pic_offset_table
= 1;
1478 /* Put a REG_EQUAL note on this insn, so that it can be optimized
1480 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, orig
,
1484 else if (GET_CODE (orig
) == CONST
)
1488 if (GET_CODE (XEXP (orig
, 0)) == PLUS
1489 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
1494 if (reload_in_progress
|| reload_completed
)
1497 reg
= gen_reg_rtx (Pmode
);
1500 if (GET_CODE (XEXP (orig
, 0)) == PLUS
)
1502 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
1503 offset
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
1504 base
== reg
? 0 : reg
);
1509 if (GET_CODE (offset
) == CONST_INT
)
1511 /* The base register doesn't really matter, we only want to
1512 test the index for the appropriate mode. */
1513 GO_IF_LEGITIMATE_INDEX (mode
, 0, offset
, win
);
1515 if (! reload_in_progress
&& ! reload_completed
)
1516 offset
= force_reg (Pmode
, offset
);
1521 if (GET_CODE (offset
) == CONST_INT
)
1522 return plus_constant_for_output (base
, INTVAL (offset
));
1525 if (GET_MODE_SIZE (mode
) > 4
1526 && (GET_MODE_CLASS (mode
) == MODE_INT
1527 || TARGET_SOFT_FLOAT
))
1529 emit_insn (gen_addsi3 (reg
, base
, offset
));
1533 return gen_rtx_PLUS (Pmode
, base
, offset
);
1535 else if (GET_CODE (orig
) == LABEL_REF
)
1536 current_function_uses_pic_offset_table
= 1;
1555 #ifndef AOF_ASSEMBLER
1556 rtx l1
, pic_tmp
, pic_tmp2
, seq
;
1557 rtx global_offset_table
;
1559 if (current_function_uses_pic_offset_table
== 0)
1566 l1
= gen_label_rtx ();
1568 global_offset_table
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
1569 /* On the ARM the PC register contains 'dot + 8' at the time of the
1571 pic_tmp
= plus_constant (gen_rtx_LABEL_REF (Pmode
, l1
), 8);
1573 pic_tmp2
= gen_rtx_CONST (VOIDmode
,
1574 gen_rtx_PLUS (Pmode
, global_offset_table
, pc_rtx
));
1576 pic_tmp2
= gen_rtx_CONST (VOIDmode
, global_offset_table
);
1578 pic_rtx
= gen_rtx_CONST (Pmode
, gen_rtx_MINUS (Pmode
, pic_tmp2
, pic_tmp
));
1580 emit_insn (gen_pic_load_addr (pic_offset_table_rtx
, pic_rtx
));
1581 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx
, l1
));
1583 seq
= gen_sequence ();
1585 emit_insn_after (seq
, get_insns ());
1587 /* Need to emit this whether or not we obey regdecls,
1588 since setjmp/longjmp can cause life info to screw up. */
1589 emit_insn (gen_rtx_USE (VOIDmode
, pic_offset_table_rtx
));
1590 #endif /* AOF_ASSEMBLER */
1593 #define REG_OR_SUBREG_REG(X) \
1594 (GET_CODE (X) == REG \
1595 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
1597 #define REG_OR_SUBREG_RTX(X) \
1598 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
1600 #define ARM_FRAME_RTX(X) \
1601 ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
1602 || (X) == arg_pointer_rtx)
1605 arm_rtx_costs (x
, code
)
1609 enum machine_mode mode
= GET_MODE (x
);
1610 enum rtx_code subcode
;
1616 /* Memory costs quite a lot for the first word, but subsequent words
1617 load at the equivalent of a single insn each. */
1618 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
1619 + (CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
1626 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
1633 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
1635 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
1636 + ((GET_CODE (XEXP (x
, 0)) == REG
1637 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1638 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1640 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
1641 || (GET_CODE (XEXP (x
, 0)) == SUBREG
1642 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
1644 + ((GET_CODE (XEXP (x
, 1)) == REG
1645 || (GET_CODE (XEXP (x
, 1)) == SUBREG
1646 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
1647 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
1652 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
1653 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1654 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
1655 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
1658 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1659 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1660 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1661 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1663 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1664 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
1665 && const_double_rtx_ok_for_fpu (XEXP (x
, 0))))
1668 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
1669 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
1670 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
1671 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
1672 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
1673 || subcode
== ROTATE
|| subcode
== ROTATERT
1675 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
1676 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
1677 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
1678 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
1679 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
1680 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
1681 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
1686 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1687 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1688 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1689 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
1690 && const_double_rtx_ok_for_fpu (XEXP (x
, 1))))
1694 case AND
: case XOR
: case IOR
:
1697 /* Normally the frame registers will be spilt into reg+const during
1698 reload, so it is a bad idea to combine them with other instructions,
1699 since then they might not be moved outside of loops. As a compromise
1700 we allow integration with ops that have a constant as their second
1702 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
1703 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
1704 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1705 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
1706 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
1710 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
1711 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1712 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1713 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
)))
1716 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
1717 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
1718 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
1719 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
1720 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
)))
1723 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
1724 return (1 + extra_cost
1725 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
1726 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
1727 || subcode
== ROTATE
|| subcode
== ROTATERT
1729 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
1730 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
1731 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0)))
1732 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
1733 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
1734 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
))
1740 /* There is no point basing this on the tuning, since it is always the
1741 fast variant if it exists at all */
1742 if (arm_fast_multiply
&& mode
== DImode
1743 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
1744 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
1745 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
1748 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1752 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
1754 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
1755 & (unsigned HOST_WIDE_INT
) 0xffffffff);
1756 int add_cost
= const_ok_for_arm (i
) ? 4 : 8;
1758 /* Tune as appropriate */
1759 int booth_unit_size
= ((tune_flags
& FL_FAST_MULT
) ? 8 : 2);
1761 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
1763 i
>>= booth_unit_size
;
1770 return (((tune_flags
& FL_FAST_MULT
) ? 8 : 30)
1771 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
1772 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4));
1775 if (arm_fast_multiply
&& mode
== SImode
1776 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
1777 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
1778 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0))
1779 == GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
1780 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
1781 || GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
))
1786 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1787 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
1791 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1793 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
1796 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
1804 return 4 + (mode
== DImode
? 4 : 0);
1807 if (GET_MODE (XEXP (x
, 0)) == QImode
)
1808 return (4 + (mode
== DImode
? 4 : 0)
1809 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1812 switch (GET_MODE (XEXP (x
, 0)))
1815 return (1 + (mode
== DImode
? 4 : 0)
1816 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1819 return (4 + (mode
== DImode
? 4 : 0)
1820 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1823 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
1836 arm_adjust_cost (insn
, link
, dep
, cost
)
1844 /* XXX This is not strictly true for the FPA. */
1845 if (REG_NOTE_KIND(link
) == REG_DEP_ANTI
1846 || REG_NOTE_KIND(link
) == REG_DEP_OUTPUT
)
1849 if ((i_pat
= single_set (insn
)) != NULL
1850 && GET_CODE (SET_SRC (i_pat
)) == MEM
1851 && (d_pat
= single_set (dep
)) != NULL
1852 && GET_CODE (SET_DEST (d_pat
)) == MEM
)
1854 /* This is a load after a store, there is no conflict if the load reads
1855 from a cached area. Assume that loads from the stack, and from the
1856 constant pool are cached, and that others will miss. This is a
1859 /* debug_rtx (insn);
1862 fprintf (stderr, "costs %d\n", cost); */
1864 if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat
), 0))
1865 || reg_mentioned_p (stack_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1866 || reg_mentioned_p (frame_pointer_rtx
, XEXP (SET_SRC (i_pat
), 0))
1867 || reg_mentioned_p (hard_frame_pointer_rtx
,
1868 XEXP (SET_SRC (i_pat
), 0)))
1870 /* fprintf (stderr, "***** Now 1\n"); */
1878 /* This code has been fixed for cross compilation. */
1880 static int fpa_consts_inited
= 0;
1882 char * strings_fpa
[8] =
1885 "4", "5", "0.5", "10"
1888 static REAL_VALUE_TYPE values_fpa
[8];
1896 for (i
= 0; i
< 8; i
++)
1898 r
= REAL_VALUE_ATOF (strings_fpa
[i
], DFmode
);
1902 fpa_consts_inited
= 1;
1905 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1908 const_double_rtx_ok_for_fpu (x
)
1914 if (!fpa_consts_inited
)
1917 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1918 if (REAL_VALUE_MINUS_ZERO (r
))
1921 for (i
= 0; i
< 8; i
++)
1922 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1928 /* Return TRUE if rtx X is a valid immediate FPU constant. */
1931 neg_const_double_rtx_ok_for_fpu (x
)
1937 if (!fpa_consts_inited
)
1940 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
1941 r
= REAL_VALUE_NEGATE (r
);
1942 if (REAL_VALUE_MINUS_ZERO (r
))
1945 for (i
= 0; i
< 8; i
++)
1946 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
1952 /* Predicates for `match_operand' and `match_operator'. */
1954 /* s_register_operand is the same as register_operand, but it doesn't accept
1957 This function exists because at the time it was put in it led to better
1958 code. SUBREG(MEM) always needs a reload in the places where
1959 s_register_operand is used, and this seemed to lead to excessive
1963 s_register_operand (op
, mode
)
1965 enum machine_mode mode
;
1967 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1970 if (GET_CODE (op
) == SUBREG
)
1971 op
= SUBREG_REG (op
);
1973 /* We don't consider registers whose class is NO_REGS
1974 to be a register operand. */
1975 return (GET_CODE (op
) == REG
1976 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
1977 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
1980 /* Only accept reg, subreg(reg), const_int. */
1983 reg_or_int_operand (op
, mode
)
1985 enum machine_mode mode
;
1987 if (GET_CODE (op
) == CONST_INT
)
1990 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
1993 if (GET_CODE (op
) == SUBREG
)
1994 op
= SUBREG_REG (op
);
1996 /* We don't consider registers whose class is NO_REGS
1997 to be a register operand. */
1998 return (GET_CODE (op
) == REG
1999 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
2000 || REGNO_REG_CLASS (REGNO (op
)) != NO_REGS
));
2003 /* Return 1 if OP is an item in memory, given that we are in reload. */
2006 reload_memory_operand (op
, mode
)
2008 enum machine_mode mode ATTRIBUTE_UNUSED
;
2010 int regno
= true_regnum (op
);
2012 return (! CONSTANT_P (op
)
2014 || (GET_CODE (op
) == REG
2015 && REGNO (op
) >= FIRST_PSEUDO_REGISTER
)));
2018 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
2019 memory access (architecture V4) */
2021 bad_signed_byte_operand (op
, mode
)
2023 enum machine_mode mode
;
2025 if (! memory_operand (op
, mode
) || GET_CODE (op
) != MEM
)
2030 /* A sum of anything more complex than reg + reg or reg + const is bad */
2031 if ((GET_CODE (op
) == PLUS
|| GET_CODE (op
) == MINUS
)
2032 && (! s_register_operand (XEXP (op
, 0), VOIDmode
)
2033 || (! s_register_operand (XEXP (op
, 1), VOIDmode
)
2034 && GET_CODE (XEXP (op
, 1)) != CONST_INT
)))
2037 /* Big constants are also bad */
2038 if (GET_CODE (op
) == PLUS
&& GET_CODE (XEXP (op
, 1)) == CONST_INT
2039 && (INTVAL (XEXP (op
, 1)) > 0xff
2040 || -INTVAL (XEXP (op
, 1)) > 0xff))
2043 /* Everything else is good, or can will automatically be made so. */
2047 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
2050 arm_rhs_operand (op
, mode
)
2052 enum machine_mode mode
;
2054 return (s_register_operand (op
, mode
)
2055 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
))));
2058 /* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
2062 arm_rhsm_operand (op
, mode
)
2064 enum machine_mode mode
;
2066 return (s_register_operand (op
, mode
)
2067 || (GET_CODE (op
) == CONST_INT
&& const_ok_for_arm (INTVAL (op
)))
2068 || memory_operand (op
, mode
));
2071 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
2072 constant that is valid when negated. */
2075 arm_add_operand (op
, mode
)
2077 enum machine_mode mode
;
2079 return (s_register_operand (op
, mode
)
2080 || (GET_CODE (op
) == CONST_INT
2081 && (const_ok_for_arm (INTVAL (op
))
2082 || const_ok_for_arm (-INTVAL (op
)))));
2086 arm_not_operand (op
, mode
)
2088 enum machine_mode mode
;
2090 return (s_register_operand (op
, mode
)
2091 || (GET_CODE (op
) == CONST_INT
2092 && (const_ok_for_arm (INTVAL (op
))
2093 || const_ok_for_arm (~INTVAL (op
)))));
2096 /* Return TRUE if the operand is a memory reference which contains an
2097 offsettable address. */
2099 offsettable_memory_operand (op
, mode
)
2101 enum machine_mode mode
;
2103 if (mode
== VOIDmode
)
2104 mode
= GET_MODE (op
);
2106 return (mode
== GET_MODE (op
)
2107 && GET_CODE (op
) == MEM
2108 && offsettable_address_p (reload_completed
| reload_in_progress
,
2109 mode
, XEXP (op
, 0)));
2112 /* Return TRUE if the operand is a memory reference which is, or can be
2113 made word aligned by adjusting the offset. */
2115 alignable_memory_operand (op
, mode
)
2117 enum machine_mode mode
;
2121 if (mode
== VOIDmode
)
2122 mode
= GET_MODE (op
);
2124 if (mode
!= GET_MODE (op
) || GET_CODE (op
) != MEM
)
2129 return ((GET_CODE (reg
= op
) == REG
2130 || (GET_CODE (op
) == SUBREG
2131 && GET_CODE (reg
= SUBREG_REG (op
)) == REG
)
2132 || (GET_CODE (op
) == PLUS
2133 && GET_CODE (XEXP (op
, 1)) == CONST_INT
2134 && (GET_CODE (reg
= XEXP (op
, 0)) == REG
2135 || (GET_CODE (XEXP (op
, 0)) == SUBREG
2136 && GET_CODE (reg
= SUBREG_REG (XEXP (op
, 0))) == REG
))))
2137 && REGNO_POINTER_ALIGN (REGNO (reg
)) >= 4);
2140 /* Similar to s_register_operand, but does not allow hard integer
2143 f_register_operand (op
, mode
)
2145 enum machine_mode mode
;
2147 if (GET_MODE (op
) != mode
&& mode
!= VOIDmode
)
2150 if (GET_CODE (op
) == SUBREG
)
2151 op
= SUBREG_REG (op
);
2153 /* We don't consider registers whose class is NO_REGS
2154 to be a register operand. */
2155 return (GET_CODE (op
) == REG
2156 && (REGNO (op
) >= FIRST_PSEUDO_REGISTER
2157 || REGNO_REG_CLASS (REGNO (op
)) == FPU_REGS
));
2160 /* Return TRUE for valid operands for the rhs of an FPU instruction. */
2163 fpu_rhs_operand (op
, mode
)
2165 enum machine_mode mode
;
2167 if (s_register_operand (op
, mode
))
2169 else if (GET_CODE (op
) == CONST_DOUBLE
)
2170 return (const_double_rtx_ok_for_fpu (op
));
2176 fpu_add_operand (op
, mode
)
2178 enum machine_mode mode
;
2180 if (s_register_operand (op
, mode
))
2182 else if (GET_CODE (op
) == CONST_DOUBLE
)
2183 return (const_double_rtx_ok_for_fpu (op
)
2184 || neg_const_double_rtx_ok_for_fpu (op
));
2189 /* Return nonzero if OP is a constant power of two. */
2192 power_of_two_operand (op
, mode
)
2194 enum machine_mode mode ATTRIBUTE_UNUSED
;
2196 if (GET_CODE (op
) == CONST_INT
)
2198 HOST_WIDE_INT value
= INTVAL(op
);
2199 return value
!= 0 && (value
& (value
- 1)) == 0;
2204 /* Return TRUE for a valid operand of a DImode operation.
2205 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
2206 Note that this disallows MEM(REG+REG), but allows
2207 MEM(PRE/POST_INC/DEC(REG)). */
2210 di_operand (op
, mode
)
2212 enum machine_mode mode
;
2214 if (s_register_operand (op
, mode
))
2217 if (GET_CODE (op
) == SUBREG
)
2218 op
= SUBREG_REG (op
);
2220 switch (GET_CODE (op
))
2227 return memory_address_p (DImode
, XEXP (op
, 0));
2234 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
2235 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
2236 Note that this disallows MEM(REG+REG), but allows
2237 MEM(PRE/POST_INC/DEC(REG)). */
2240 soft_df_operand (op
, mode
)
2242 enum machine_mode mode
;
2244 if (s_register_operand (op
, mode
))
2247 if (GET_CODE (op
) == SUBREG
)
2248 op
= SUBREG_REG (op
);
2250 switch (GET_CODE (op
))
2256 return memory_address_p (DFmode
, XEXP (op
, 0));
2263 /* Return TRUE for valid index operands. */
2266 index_operand (op
, mode
)
2268 enum machine_mode mode
;
2270 return (s_register_operand(op
, mode
)
2271 || (immediate_operand (op
, mode
)
2272 && INTVAL (op
) < 4096 && INTVAL (op
) > -4096));
2275 /* Return TRUE for valid shifts by a constant. This also accepts any
2276 power of two on the (somewhat overly relaxed) assumption that the
2277 shift operator in this case was a mult. */
2280 const_shift_operand (op
, mode
)
2282 enum machine_mode mode
;
2284 return (power_of_two_operand (op
, mode
)
2285 || (immediate_operand (op
, mode
)
2286 && (INTVAL (op
) < 32 && INTVAL (op
) > 0)));
2289 /* Return TRUE for arithmetic operators which can be combined with a multiply
2293 shiftable_operator (x
, mode
)
2295 enum machine_mode mode
;
2297 if (GET_MODE (x
) != mode
)
2301 enum rtx_code code
= GET_CODE (x
);
2303 return (code
== PLUS
|| code
== MINUS
2304 || code
== IOR
|| code
== XOR
|| code
== AND
);
2308 /* Return TRUE for shift operators. */
2311 shift_operator (x
, mode
)
2313 enum machine_mode mode
;
2315 if (GET_MODE (x
) != mode
)
2319 enum rtx_code code
= GET_CODE (x
);
2322 return power_of_two_operand (XEXP (x
, 1), mode
);
2324 return (code
== ASHIFT
|| code
== ASHIFTRT
|| code
== LSHIFTRT
2325 || code
== ROTATERT
);
2329 int equality_operator (x
, mode
)
2331 enum machine_mode mode ATTRIBUTE_UNUSED
;
2333 return GET_CODE (x
) == EQ
|| GET_CODE (x
) == NE
;
2336 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
2339 minmax_operator (x
, mode
)
2341 enum machine_mode mode
;
2343 enum rtx_code code
= GET_CODE (x
);
2345 if (GET_MODE (x
) != mode
)
2348 return code
== SMIN
|| code
== SMAX
|| code
== UMIN
|| code
== UMAX
;
2351 /* return TRUE if x is EQ or NE */
2353 /* Return TRUE if this is the condition code register, if we aren't given
2354 a mode, accept any class CCmode register */
2357 cc_register (x
, mode
)
2359 enum machine_mode mode
;
2361 if (mode
== VOIDmode
)
2363 mode
= GET_MODE (x
);
2364 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2368 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2374 /* Return TRUE if this is the condition code register, if we aren't given
2375 a mode, accept any class CCmode register which indicates a dominance
2379 dominant_cc_register (x
, mode
)
2381 enum machine_mode mode
;
2383 if (mode
== VOIDmode
)
2385 mode
= GET_MODE (x
);
2386 if (GET_MODE_CLASS (mode
) != MODE_CC
)
2390 if (mode
!= CC_DNEmode
&& mode
!= CC_DEQmode
2391 && mode
!= CC_DLEmode
&& mode
!= CC_DLTmode
2392 && mode
!= CC_DGEmode
&& mode
!= CC_DGTmode
2393 && mode
!= CC_DLEUmode
&& mode
!= CC_DLTUmode
2394 && mode
!= CC_DGEUmode
&& mode
!= CC_DGTUmode
)
2397 if (mode
== GET_MODE (x
) && GET_CODE (x
) == REG
&& REGNO (x
) == 24)
2403 /* Return TRUE if X references a SYMBOL_REF. */
2405 symbol_mentioned_p (x
)
2408 register char * fmt
;
2411 if (GET_CODE (x
) == SYMBOL_REF
)
2414 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2415 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2421 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2422 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
2425 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
2432 /* Return TRUE if X references a LABEL_REF. */
2434 label_mentioned_p (x
)
2437 register char * fmt
;
2440 if (GET_CODE (x
) == LABEL_REF
)
2443 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2444 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2450 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2451 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
2454 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
2465 enum rtx_code code
= GET_CODE (x
);
2469 else if (code
== SMIN
)
2471 else if (code
== UMIN
)
2473 else if (code
== UMAX
)
2479 /* Return 1 if memory locations are adjacent */
2482 adjacent_mem_locations (a
, b
)
2485 int val0
= 0, val1
= 0;
2488 if ((GET_CODE (XEXP (a
, 0)) == REG
2489 || (GET_CODE (XEXP (a
, 0)) == PLUS
2490 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
2491 && (GET_CODE (XEXP (b
, 0)) == REG
2492 || (GET_CODE (XEXP (b
, 0)) == PLUS
2493 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
2495 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
2497 reg0
= REGNO (XEXP (XEXP (a
, 0), 0));
2498 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
2501 reg0
= REGNO (XEXP (a
, 0));
2502 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
2504 reg1
= REGNO (XEXP (XEXP (b
, 0), 0));
2505 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
2508 reg1
= REGNO (XEXP (b
, 0));
2509 return (reg0
== reg1
) && ((val1
- val0
) == 4 || (val0
- val1
) == 4);
2514 /* Return 1 if OP is a load multiple operation. It is known to be
2515 parallel and the first section will be tested. */
2518 load_multiple_operation (op
, mode
)
2520 enum machine_mode mode ATTRIBUTE_UNUSED
;
2522 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2525 HOST_WIDE_INT i
= 1, base
= 0;
2529 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2532 /* Check to see if this might be a write-back */
2533 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2538 /* Now check it more carefully */
2539 if (GET_CODE (SET_DEST (elt
)) != REG
2540 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2541 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2542 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2543 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2544 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2545 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2546 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2547 != REGNO (SET_DEST (elt
)))
2553 /* Perform a quick check so we don't blow up below. */
2555 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2556 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != REG
2557 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != MEM
)
2560 dest_regno
= REGNO (SET_DEST (XVECEXP (op
, 0, i
- 1)));
2561 src_addr
= XEXP (SET_SRC (XVECEXP (op
, 0, i
- 1)), 0);
2563 for (; i
< count
; i
++)
2565 elt
= XVECEXP (op
, 0, i
);
2567 if (GET_CODE (elt
) != SET
2568 || GET_CODE (SET_DEST (elt
)) != REG
2569 || GET_MODE (SET_DEST (elt
)) != SImode
2570 || REGNO (SET_DEST (elt
)) != dest_regno
+ i
- base
2571 || GET_CODE (SET_SRC (elt
)) != MEM
2572 || GET_MODE (SET_SRC (elt
)) != SImode
2573 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != PLUS
2574 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt
), 0), 0), src_addr
)
2575 || GET_CODE (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != CONST_INT
2576 || INTVAL (XEXP (XEXP (SET_SRC (elt
), 0), 1)) != (i
- base
) * 4)
2583 /* Return 1 if OP is a store multiple operation. It is known to be
2584 parallel and the first section will be tested. */
2587 store_multiple_operation (op
, mode
)
2589 enum machine_mode mode ATTRIBUTE_UNUSED
;
2591 HOST_WIDE_INT count
= XVECLEN (op
, 0);
2594 HOST_WIDE_INT i
= 1, base
= 0;
2598 || GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
2601 /* Check to see if this might be a write-back */
2602 if (GET_CODE (SET_SRC (elt
= XVECEXP (op
, 0, 0))) == PLUS
)
2607 /* Now check it more carefully */
2608 if (GET_CODE (SET_DEST (elt
)) != REG
2609 || GET_CODE (XEXP (SET_SRC (elt
), 0)) != REG
2610 || REGNO (XEXP (SET_SRC (elt
), 0)) != REGNO (SET_DEST (elt
))
2611 || GET_CODE (XEXP (SET_SRC (elt
), 1)) != CONST_INT
2612 || INTVAL (XEXP (SET_SRC (elt
), 1)) != (count
- 2) * 4
2613 || GET_CODE (XVECEXP (op
, 0, count
- 1)) != CLOBBER
2614 || GET_CODE (XEXP (XVECEXP (op
, 0, count
- 1), 0)) != REG
2615 || REGNO (XEXP (XVECEXP (op
, 0, count
- 1), 0))
2616 != REGNO (SET_DEST (elt
)))
2622 /* Perform a quick check so we don't blow up below. */
2624 || GET_CODE (XVECEXP (op
, 0, i
- 1)) != SET
2625 || GET_CODE (SET_DEST (XVECEXP (op
, 0, i
- 1))) != MEM
2626 || GET_CODE (SET_SRC (XVECEXP (op
, 0, i
- 1))) != REG
)
2629 src_regno
= REGNO (SET_SRC (XVECEXP (op
, 0, i
- 1)));
2630 dest_addr
= XEXP (SET_DEST (XVECEXP (op
, 0, i
- 1)), 0);
2632 for (; i
< count
; i
++)
2634 elt
= XVECEXP (op
, 0, i
);
2636 if (GET_CODE (elt
) != SET
2637 || GET_CODE (SET_SRC (elt
)) != REG
2638 || GET_MODE (SET_SRC (elt
)) != SImode
2639 || REGNO (SET_SRC (elt
)) != src_regno
+ i
- base
2640 || GET_CODE (SET_DEST (elt
)) != MEM
2641 || GET_MODE (SET_DEST (elt
)) != SImode
2642 || GET_CODE (XEXP (SET_DEST (elt
), 0)) != PLUS
2643 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt
), 0), 0), dest_addr
)
2644 || GET_CODE (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != CONST_INT
2645 || INTVAL (XEXP (XEXP (SET_DEST (elt
), 0), 1)) != (i
- base
) * 4)
2653 load_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2658 HOST_WIDE_INT
* load_offset
;
2660 int unsorted_regs
[4];
2661 HOST_WIDE_INT unsorted_offsets
[4];
2666 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2667 extended if required. */
2668 if (nops
< 2 || nops
> 4)
2671 /* Loop over the operands and check that the memory references are
2672 suitable (ie immediate offsets from the same base register). At
2673 the same time, extract the target register, and the memory
2675 for (i
= 0; i
< nops
; i
++)
2680 /* Convert a subreg of a mem into the mem itself. */
2681 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2682 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2684 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2687 /* Don't reorder volatile memory references; it doesn't seem worth
2688 looking for the case where the order is ok anyway. */
2689 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2692 offset
= const0_rtx
;
2694 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2695 || (GET_CODE (reg
) == SUBREG
2696 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2697 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2698 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2700 || (GET_CODE (reg
) == SUBREG
2701 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2702 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2707 base_reg
= REGNO(reg
);
2708 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2709 ? REGNO (operands
[i
])
2710 : REGNO (SUBREG_REG (operands
[i
])));
2715 if (base_reg
!= REGNO (reg
))
2716 /* Not addressed from the same base register. */
2719 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2720 ? REGNO (operands
[i
])
2721 : REGNO (SUBREG_REG (operands
[i
])));
2722 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2726 /* If it isn't an integer register, or if it overwrites the
2727 base register but isn't the last insn in the list, then
2728 we can't do this. */
2729 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
2730 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
2733 unsorted_offsets
[i
] = INTVAL (offset
);
2736 /* Not a suitable memory address. */
2740 /* All the useful information has now been extracted from the
2741 operands into unsorted_regs and unsorted_offsets; additionally,
2742 order[0] has been set to the lowest numbered register in the
2743 list. Sort the registers into order, and check that the memory
2744 offsets are ascending and adjacent. */
2746 for (i
= 1; i
< nops
; i
++)
2750 order
[i
] = order
[i
- 1];
2751 for (j
= 0; j
< nops
; j
++)
2752 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2753 && (order
[i
] == order
[i
- 1]
2754 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2757 /* Have we found a suitable register? if not, one must be used more
2759 if (order
[i
] == order
[i
- 1])
2762 /* Is the memory address adjacent and ascending? */
2763 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
2771 for (i
= 0; i
< nops
; i
++)
2772 regs
[i
] = unsorted_regs
[order
[i
]];
2774 *load_offset
= unsorted_offsets
[order
[0]];
2777 if (unsorted_offsets
[order
[0]] == 0)
2778 return 1; /* ldmia */
2780 if (unsorted_offsets
[order
[0]] == 4)
2781 return 2; /* ldmib */
2783 if (unsorted_offsets
[order
[nops
- 1]] == 0)
2784 return 3; /* ldmda */
2786 if (unsorted_offsets
[order
[nops
- 1]] == -4)
2787 return 4; /* ldmdb */
2789 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm if
2790 the offset isn't small enough. The reason 2 ldrs are faster is because
2791 these ARMs are able to do more than one cache access in a single cycle.
2792 The ARM9 and StrongARM have Harvard caches, whilst the ARM8 has a double
2793 bandwidth cache. This means that these cores can do both an instruction
2794 fetch and a data fetch in a single cycle, so the trick of calculating the
2795 address into a scratch register (one of the result regs) and then doing a
2796 load multiple actually becomes slower (and no smaller in code size). That
2797 is the transformation
2799 ldr rd1, [rbase + offset]
2800 ldr rd2, [rbase + offset + 4]
2804 add rd1, rbase, offset
2805 ldmia rd1, {rd1, rd2}
2807 produces worse code -- '3 cycles + any stalls on rd2' instead of '2 cycles
2808 + any stalls on rd2'. On ARMs with only one cache access per cycle, the
2809 first sequence could never complete in less than 6 cycles, whereas the ldm
2810 sequence would only take 5 and would make better use of sequential accesses
2811 if not hitting the cache.
2813 We cheat here and test 'arm_ld_sched' which we currently know to only be
2814 true for the ARM8, ARM9 and StrongARM. If this ever changes, then the test
2815 below needs to be reworked. */
2816 if (nops
== 2 && arm_ld_sched
)
2819 /* Can't do it without setting up the offset, only do this if it takes
2820 no more than one insn. */
2821 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
2822 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
2826 emit_ldm_seq (operands
, nops
)
2832 HOST_WIDE_INT offset
;
2836 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
2839 strcpy (buf
, "ldm%?ia\t");
2843 strcpy (buf
, "ldm%?ib\t");
2847 strcpy (buf
, "ldm%?da\t");
2851 strcpy (buf
, "ldm%?db\t");
2856 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2857 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2860 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
2861 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
2863 output_asm_insn (buf
, operands
);
2865 strcpy (buf
, "ldm%?ia\t");
2872 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
2873 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
2875 for (i
= 1; i
< nops
; i
++)
2876 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
2877 reg_names
[regs
[i
]]);
2879 strcat (buf
, "}\t%@ phole ldm");
2881 output_asm_insn (buf
, operands
);
2886 store_multiple_sequence (operands
, nops
, regs
, base
, load_offset
)
2891 HOST_WIDE_INT
* load_offset
;
2893 int unsorted_regs
[4];
2894 HOST_WIDE_INT unsorted_offsets
[4];
2899 /* Can only handle 2, 3, or 4 insns at present, though could be easily
2900 extended if required. */
2901 if (nops
< 2 || nops
> 4)
2904 /* Loop over the operands and check that the memory references are
2905 suitable (ie immediate offsets from the same base register). At
2906 the same time, extract the target register, and the memory
2908 for (i
= 0; i
< nops
; i
++)
2913 /* Convert a subreg of a mem into the mem itself. */
2914 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
2915 operands
[nops
+ i
] = alter_subreg(operands
[nops
+ i
]);
2917 if (GET_CODE (operands
[nops
+ i
]) != MEM
)
2920 /* Don't reorder volatile memory references; it doesn't seem worth
2921 looking for the case where the order is ok anyway. */
2922 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
2925 offset
= const0_rtx
;
2927 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
2928 || (GET_CODE (reg
) == SUBREG
2929 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2930 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
2931 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
2933 || (GET_CODE (reg
) == SUBREG
2934 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
2935 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
2940 base_reg
= REGNO (reg
);
2941 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
2942 ? REGNO (operands
[i
])
2943 : REGNO (SUBREG_REG (operands
[i
])));
2948 if (base_reg
!= REGNO (reg
))
2949 /* Not addressed from the same base register. */
2952 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
2953 ? REGNO (operands
[i
])
2954 : REGNO (SUBREG_REG (operands
[i
])));
2955 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
2959 /* If it isn't an integer register, then we can't do this. */
2960 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
2963 unsorted_offsets
[i
] = INTVAL (offset
);
2966 /* Not a suitable memory address. */
2970 /* All the useful information has now been extracted from the
2971 operands into unsorted_regs and unsorted_offsets; additionally,
2972 order[0] has been set to the lowest numbered register in the
2973 list. Sort the registers into order, and check that the memory
2974 offsets are ascending and adjacent. */
2976 for (i
= 1; i
< nops
; i
++)
2980 order
[i
] = order
[i
- 1];
2981 for (j
= 0; j
< nops
; j
++)
2982 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
2983 && (order
[i
] == order
[i
- 1]
2984 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
2987 /* Have we found a suitable register? if not, one must be used more
2989 if (order
[i
] == order
[i
- 1])
2992 /* Is the memory address adjacent and ascending? */
2993 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
3001 for (i
= 0; i
< nops
; i
++)
3002 regs
[i
] = unsorted_regs
[order
[i
]];
3004 *load_offset
= unsorted_offsets
[order
[0]];
3007 if (unsorted_offsets
[order
[0]] == 0)
3008 return 1; /* stmia */
3010 if (unsorted_offsets
[order
[0]] == 4)
3011 return 2; /* stmib */
3013 if (unsorted_offsets
[order
[nops
- 1]] == 0)
3014 return 3; /* stmda */
3016 if (unsorted_offsets
[order
[nops
- 1]] == -4)
3017 return 4; /* stmdb */
3023 emit_stm_seq (operands
, nops
)
3029 HOST_WIDE_INT offset
;
3033 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
3036 strcpy (buf
, "stm%?ia\t");
3040 strcpy (buf
, "stm%?ib\t");
3044 strcpy (buf
, "stm%?da\t");
3048 strcpy (buf
, "stm%?db\t");
3055 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
3056 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
3058 for (i
= 1; i
< nops
; i
++)
3059 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
3060 reg_names
[regs
[i
]]);
3062 strcat (buf
, "}\t%@ phole stm");
3064 output_asm_insn (buf
, operands
);
3069 multi_register_push (op
, mode
)
3071 enum machine_mode mode ATTRIBUTE_UNUSED
;
3073 if (GET_CODE (op
) != PARALLEL
3074 || (GET_CODE (XVECEXP (op
, 0, 0)) != SET
)
3075 || (GET_CODE (SET_SRC (XVECEXP (op
, 0, 0))) != UNSPEC
)
3076 || (XINT (SET_SRC (XVECEXP (op
, 0, 0)), 1) != 2))
3083 /* Routines for use with attributes */
3085 /* Return nonzero if ATTR is a valid attribute for DECL.
3086 ATTRIBUTES are any existing attributes and ARGS are the arguments
3089 Supported attributes:
3091 naked: don't output any prologue or epilogue code, the user is assumed
3092 to do the right thing. */
3095 arm_valid_machine_decl_attribute (decl
, attr
, args
)
3100 if (args
!= NULL_TREE
)
3103 if (is_attribute_p ("naked", attr
))
3104 return TREE_CODE (decl
) == FUNCTION_DECL
;
3108 /* Return non-zero if FUNC is a naked function. */
3111 arm_naked_function_p (func
)
3116 if (TREE_CODE (func
) != FUNCTION_DECL
)
3119 a
= lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func
));
3120 return a
!= NULL_TREE
;
3123 /* Routines for use in generating RTL */
3126 arm_gen_load_multiple (base_regno
, count
, from
, up
, write_back
, unchanging_p
,
3127 in_struct_p
, scalar_p
)
3139 int sign
= up
? 1 : -1;
3142 result
= gen_rtx_PARALLEL (VOIDmode
,
3143 rtvec_alloc (count
+ (write_back
? 2 : 0)));
3146 XVECEXP (result
, 0, 0)
3147 = gen_rtx_SET (GET_MODE (from
), from
,
3148 plus_constant (from
, count
* 4 * sign
));
3153 for (j
= 0; i
< count
; i
++, j
++)
3155 mem
= gen_rtx_MEM (SImode
, plus_constant (from
, j
* 4 * sign
));
3156 RTX_UNCHANGING_P (mem
) = unchanging_p
;
3157 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
3158 MEM_SCALAR_P (mem
) = scalar_p
;
3159 XVECEXP (result
, 0, i
)
3160 = gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, base_regno
+ j
), mem
);
3164 XVECEXP (result
, 0, i
) = gen_rtx_CLOBBER (SImode
, from
);
3170 arm_gen_store_multiple (base_regno
, count
, to
, up
, write_back
, unchanging_p
,
3171 in_struct_p
, scalar_p
)
3183 int sign
= up
? 1 : -1;
3186 result
= gen_rtx_PARALLEL (VOIDmode
,
3187 rtvec_alloc (count
+ (write_back
? 2 : 0)));
3190 XVECEXP (result
, 0, 0)
3191 = gen_rtx_SET (GET_MODE (to
), to
,
3192 plus_constant (to
, count
* 4 * sign
));
3197 for (j
= 0; i
< count
; i
++, j
++)
3199 mem
= gen_rtx_MEM (SImode
, plus_constant (to
, j
* 4 * sign
));
3200 RTX_UNCHANGING_P (mem
) = unchanging_p
;
3201 MEM_IN_STRUCT_P (mem
) = in_struct_p
;
3202 MEM_SCALAR_P (mem
) = scalar_p
;
3204 XVECEXP (result
, 0, i
)
3205 = gen_rtx_SET (VOIDmode
, mem
, gen_rtx_REG (SImode
, base_regno
+ j
));
3209 XVECEXP (result
, 0, i
) = gen_rtx_CLOBBER (SImode
, to
);
3215 arm_gen_movstrqi (operands
)
3218 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
3221 rtx st_src
, st_dst
, fin_src
, fin_dst
;
3222 rtx part_bytes_reg
= NULL
;
3224 int dst_unchanging_p
, dst_in_struct_p
, src_unchanging_p
, src_in_struct_p
;
3225 int dst_scalar_p
, src_scalar_p
;
3227 if (GET_CODE (operands
[2]) != CONST_INT
3228 || GET_CODE (operands
[3]) != CONST_INT
3229 || INTVAL (operands
[2]) > 64
3230 || INTVAL (operands
[3]) & 3)
3233 st_dst
= XEXP (operands
[0], 0);
3234 st_src
= XEXP (operands
[1], 0);
3236 dst_unchanging_p
= RTX_UNCHANGING_P (operands
[0]);
3237 dst_in_struct_p
= MEM_IN_STRUCT_P (operands
[0]);
3238 dst_scalar_p
= MEM_SCALAR_P (operands
[0]);
3239 src_unchanging_p
= RTX_UNCHANGING_P (operands
[1]);
3240 src_in_struct_p
= MEM_IN_STRUCT_P (operands
[1]);
3241 src_scalar_p
= MEM_SCALAR_P (operands
[1]);
3243 fin_dst
= dst
= copy_to_mode_reg (SImode
, st_dst
);
3244 fin_src
= src
= copy_to_mode_reg (SImode
, st_src
);
3246 in_words_to_go
= (INTVAL (operands
[2]) + 3) / 4;
3247 out_words_to_go
= INTVAL (operands
[2]) / 4;
3248 last_bytes
= INTVAL (operands
[2]) & 3;
3250 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
3251 part_bytes_reg
= gen_rtx_REG (SImode
, (in_words_to_go
- 1) & 3);
3253 for (i
= 0; in_words_to_go
>= 2; i
+=4)
3255 if (in_words_to_go
> 4)
3256 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
,
3261 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
3262 FALSE
, src_unchanging_p
,
3263 src_in_struct_p
, src_scalar_p
));
3265 if (out_words_to_go
)
3267 if (out_words_to_go
> 4)
3268 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
,
3272 else if (out_words_to_go
!= 1)
3273 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
3282 mem
= gen_rtx_MEM (SImode
, dst
);
3283 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3284 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3285 MEM_SCALAR_P (mem
) = dst_scalar_p
;
3286 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
3287 if (last_bytes
!= 0)
3288 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
3292 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
3293 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
3296 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
3297 if (out_words_to_go
)
3301 mem
= gen_rtx_MEM (SImode
, src
);
3302 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
3303 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
3304 MEM_SCALAR_P (mem
) = src_scalar_p
;
3305 emit_move_insn (sreg
= gen_reg_rtx (SImode
), mem
);
3306 emit_move_insn (fin_src
= gen_reg_rtx (SImode
), plus_constant (src
, 4));
3308 mem
= gen_rtx_MEM (SImode
, dst
);
3309 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3310 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3311 MEM_SCALAR_P (mem
) = dst_scalar_p
;
3312 emit_move_insn (mem
, sreg
);
3313 emit_move_insn (fin_dst
= gen_reg_rtx (SImode
), plus_constant (dst
, 4));
3316 if (in_words_to_go
) /* Sanity check */
3322 if (in_words_to_go
< 0)
3325 mem
= gen_rtx_MEM (SImode
, src
);
3326 RTX_UNCHANGING_P (mem
) = src_unchanging_p
;
3327 MEM_IN_STRUCT_P (mem
) = src_in_struct_p
;
3328 MEM_SCALAR_P (mem
) = src_scalar_p
;
3329 part_bytes_reg
= copy_to_mode_reg (SImode
, mem
);
3332 if (BYTES_BIG_ENDIAN
&& last_bytes
)
3334 rtx tmp
= gen_reg_rtx (SImode
);
3336 if (part_bytes_reg
== NULL
)
3339 /* The bytes we want are in the top end of the word */
3340 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
3341 GEN_INT (8 * (4 - last_bytes
))));
3342 part_bytes_reg
= tmp
;
3346 mem
= gen_rtx_MEM (QImode
, plus_constant (dst
, last_bytes
- 1));
3347 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3348 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3349 MEM_SCALAR_P (mem
) = dst_scalar_p
;
3350 emit_move_insn (mem
, gen_rtx_SUBREG (QImode
, part_bytes_reg
, 0));
3354 tmp
= gen_reg_rtx (SImode
);
3355 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3356 part_bytes_reg
= tmp
;
3365 if (part_bytes_reg
== NULL
)
3368 mem
= gen_rtx_MEM (QImode
, dst
);
3369 RTX_UNCHANGING_P (mem
) = dst_unchanging_p
;
3370 MEM_IN_STRUCT_P (mem
) = dst_in_struct_p
;
3371 MEM_SCALAR_P (mem
) = dst_scalar_p
;
3372 emit_move_insn (mem
, gen_rtx_SUBREG (QImode
, part_bytes_reg
, 0));
3376 rtx tmp
= gen_reg_rtx (SImode
);
3378 emit_insn (gen_addsi3 (dst
, dst
, const1_rtx
));
3379 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
3380 part_bytes_reg
= tmp
;
3388 /* Generate a memory reference for a half word, such that it will be loaded
3389 into the top 16 bits of the word. We can assume that the address is
3390 known to be alignable and of the form reg, or plus (reg, const). */
3392 gen_rotated_half_load (memref
)
3395 HOST_WIDE_INT offset
= 0;
3396 rtx base
= XEXP (memref
, 0);
3398 if (GET_CODE (base
) == PLUS
)
3400 offset
= INTVAL (XEXP (base
, 1));
3401 base
= XEXP (base
, 0);
3404 /* If we aren't allowed to generate unaligned addresses, then fail. */
3405 if (TARGET_SHORT_BY_BYTES
3406 && ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 0)))
3409 base
= gen_rtx_MEM (SImode
, plus_constant (base
, offset
& ~2));
3411 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 2))
3414 return gen_rtx_ROTATE (SImode
, base
, GEN_INT (16));
3417 static enum machine_mode
3418 select_dominance_cc_mode (x
, y
, cond_or
)
3421 HOST_WIDE_INT cond_or
;
3423 enum rtx_code cond1
, cond2
;
3426 /* Currently we will probably get the wrong result if the individual
3427 comparisons are not simple. This also ensures that it is safe to
3428 reverse a comparison if necessary. */
3429 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
3431 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
3436 cond1
= reverse_condition (cond1
);
3438 /* If the comparisons are not equal, and one doesn't dominate the other,
3439 then we can't do this. */
3441 && ! comparison_dominates_p (cond1
, cond2
)
3442 && (swapped
= 1, ! comparison_dominates_p (cond2
, cond1
)))
3447 enum rtx_code temp
= cond1
;
3455 if (cond2
== EQ
|| ! cond_or
)
3460 case LE
: return CC_DLEmode
;
3461 case LEU
: return CC_DLEUmode
;
3462 case GE
: return CC_DGEmode
;
3463 case GEU
: return CC_DGEUmode
;
3470 if (cond2
== LT
|| ! cond_or
)
3479 if (cond2
== GT
|| ! cond_or
)
3488 if (cond2
== LTU
|| ! cond_or
)
3497 if (cond2
== GTU
|| ! cond_or
)
3505 /* The remaining cases only occur when both comparisons are the
3530 arm_select_cc_mode (op
, x
, y
)
3535 /* All floating point compares return CCFP if it is an equality
3536 comparison, and CCFPE otherwise. */
3537 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
3538 return (op
== EQ
|| op
== NE
) ? CCFPmode
: CCFPEmode
;
3540 /* A compare with a shifted operand. Because of canonicalization, the
3541 comparison will have to be swapped when we emit the assembler. */
3542 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
3543 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3544 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
3545 || GET_CODE (x
) == ROTATERT
))
3548 /* This is a special case that is used by combine to allow a
3549 comparison of a shifted byte load to be split into a zero-extend
3550 followed by a comparison of the shifted integer (only valid for
3551 equalities and unsigned inequalities). */
3552 if (GET_MODE (x
) == SImode
3553 && GET_CODE (x
) == ASHIFT
3554 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
3555 && GET_CODE (XEXP (x
, 0)) == SUBREG
3556 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
3557 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
3558 && (op
== EQ
|| op
== NE
3559 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
3560 && GET_CODE (y
) == CONST_INT
)
3563 /* An operation that sets the condition codes as a side-effect, the
3564 V flag is not set correctly, so we can only use comparisons where
3565 this doesn't matter. (For LT and GE we can use "mi" and "pl"
3567 if (GET_MODE (x
) == SImode
3569 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
3570 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
3571 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
3572 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
3573 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
3574 || GET_CODE (x
) == LSHIFTRT
3575 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
3576 || GET_CODE (x
) == ROTATERT
|| GET_CODE (x
) == ZERO_EXTRACT
))
3579 /* A construct for a conditional compare, if the false arm contains
3580 0, then both conditions must be true, otherwise either condition
3581 must be true. Not all conditions are possible, so CCmode is
3582 returned if it can't be done. */
3583 if (GET_CODE (x
) == IF_THEN_ELSE
3584 && (XEXP (x
, 2) == const0_rtx
3585 || XEXP (x
, 2) == const1_rtx
)
3586 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 0))) == '<'
3587 && GET_RTX_CLASS (GET_CODE (XEXP (x
, 1))) == '<')
3588 return select_dominance_cc_mode (XEXP (x
, 0), XEXP (x
, 1),
3589 INTVAL (XEXP (x
, 2)));
3591 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
3594 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
3595 && GET_CODE (x
) == PLUS
3596 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
3602 /* X and Y are two things to compare using CODE. Emit the compare insn and
3603 return the rtx for register 0 in the proper mode. FP means this is a
3604 floating point compare: I don't think that it is needed on the arm. */
3607 gen_compare_reg (code
, x
, y
)
3611 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
3612 rtx cc_reg
= gen_rtx_REG (mode
, 24);
3614 emit_insn (gen_rtx_SET (VOIDmode
, cc_reg
,
3615 gen_rtx_COMPARE (mode
, x
, y
)));
3621 arm_reload_in_hi (operands
)
3624 rtx ref
= operands
[1];
3626 HOST_WIDE_INT offset
= 0;
3628 if (GET_CODE (ref
) == SUBREG
)
3630 offset
= SUBREG_WORD (ref
) * UNITS_PER_WORD
;
3631 if (BYTES_BIG_ENDIAN
)
3632 offset
-= (MIN (UNITS_PER_WORD
, GET_MODE_SIZE (GET_MODE (ref
)))
3633 - MIN (UNITS_PER_WORD
,
3634 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref
)))));
3635 ref
= SUBREG_REG (ref
);
3638 if (GET_CODE (ref
) == REG
)
3640 /* We have a pseudo which has been spilt onto the stack; there
3641 are two cases here: the first where there is a simple
3642 stack-slot replacement and a second where the stack-slot is
3643 out of range, or is used as a subreg. */
3644 if (reg_equiv_mem
[REGNO (ref
)])
3646 ref
= reg_equiv_mem
[REGNO (ref
)];
3647 base
= find_replacement (&XEXP (ref
, 0));
3650 /* The slot is out of range, or was dressed up in a SUBREG */
3651 base
= reg_equiv_address
[REGNO (ref
)];
3654 base
= find_replacement (&XEXP (ref
, 0));
3656 /* Handle the case where the address is too complex to be offset by 1. */
3657 if (GET_CODE (base
) == MINUS
3658 || (GET_CODE (base
) == PLUS
&& GET_CODE (XEXP (base
, 1)) != CONST_INT
))
3660 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
3662 emit_insn (gen_rtx_SET (VOIDmode
, base_plus
, base
));
3665 else if (GET_CODE (base
) == PLUS
)
3667 /* The addend must be CONST_INT, or we would have dealt with it above */
3668 HOST_WIDE_INT hi
, lo
;
3670 offset
+= INTVAL (XEXP (base
, 1));
3671 base
= XEXP (base
, 0);
3673 /* Rework the address into a legal sequence of insns */
3674 /* Valid range for lo is -4095 -> 4095 */
3677 : -((-offset
) & 0xfff));
3679 /* Corner case, if lo is the max offset then we would be out of range
3680 once we have added the additional 1 below, so bump the msb into the
3681 pre-loading insn(s). */
3685 hi
= ((((offset
- lo
) & (HOST_WIDE_INT
) 0xFFFFFFFF)
3686 ^ (HOST_WIDE_INT
) 0x80000000)
3687 - (HOST_WIDE_INT
) 0x80000000);
3689 if (hi
+ lo
!= offset
)
3694 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
3696 /* Get the base address; addsi3 knows how to handle constants
3697 that require more than one insn */
3698 emit_insn (gen_addsi3 (base_plus
, base
, GEN_INT (hi
)));
3704 scratch
= gen_rtx_REG (SImode
, REGNO (operands
[2]));
3705 emit_insn (gen_zero_extendqisi2 (scratch
,
3706 gen_rtx_MEM (QImode
,
3707 plus_constant (base
,
3709 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode
, operands
[0], 0),
3710 gen_rtx_MEM (QImode
,
3711 plus_constant (base
,
3713 if (! BYTES_BIG_ENDIAN
)
3714 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_SUBREG (SImode
, operands
[0], 0),
3715 gen_rtx_IOR (SImode
,
3718 gen_rtx_SUBREG (SImode
, operands
[0], 0),
3722 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_SUBREG (SImode
, operands
[0], 0),
3723 gen_rtx_IOR (SImode
,
3724 gen_rtx_ASHIFT (SImode
, scratch
,
3726 gen_rtx_SUBREG (SImode
, operands
[0],
3730 /* Handle storing a half-word to memory during reload by synthesising as two
3731 byte stores. Take care not to clobber the input values until after we
3732 have moved them somewhere safe. This code assumes that if the DImode
3733 scratch in operands[2] overlaps either the input value or output address
3734 in some way, then that value must die in this insn (we absolutely need
3735 two scratch registers for some corner cases). */
3737 arm_reload_out_hi (operands
)
3740 rtx ref
= operands
[0];
3741 rtx outval
= operands
[1];
3743 HOST_WIDE_INT offset
= 0;
3745 if (GET_CODE (ref
) == SUBREG
)
3747 offset
= SUBREG_WORD (ref
) * UNITS_PER_WORD
;
3748 if (BYTES_BIG_ENDIAN
)
3749 offset
-= (MIN (UNITS_PER_WORD
, GET_MODE_SIZE (GET_MODE (ref
)))
3750 - MIN (UNITS_PER_WORD
,
3751 GET_MODE_SIZE (GET_MODE (SUBREG_REG (ref
)))));
3752 ref
= SUBREG_REG (ref
);
3756 if (GET_CODE (ref
) == REG
)
3758 /* We have a pseudo which has been spilt onto the stack; there
3759 are two cases here: the first where there is a simple
3760 stack-slot replacement and a second where the stack-slot is
3761 out of range, or is used as a subreg. */
3762 if (reg_equiv_mem
[REGNO (ref
)])
3764 ref
= reg_equiv_mem
[REGNO (ref
)];
3765 base
= find_replacement (&XEXP (ref
, 0));
3768 /* The slot is out of range, or was dressed up in a SUBREG */
3769 base
= reg_equiv_address
[REGNO (ref
)];
3772 base
= find_replacement (&XEXP (ref
, 0));
3774 scratch
= gen_rtx_REG (SImode
, REGNO (operands
[2]));
3776 /* Handle the case where the address is too complex to be offset by 1. */
3777 if (GET_CODE (base
) == MINUS
3778 || (GET_CODE (base
) == PLUS
&& GET_CODE (XEXP (base
, 1)) != CONST_INT
))
3780 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
3782 /* Be careful not to destroy OUTVAL. */
3783 if (reg_overlap_mentioned_p (base_plus
, outval
))
3785 /* Updating base_plus might destroy outval, see if we can
3786 swap the scratch and base_plus. */
3787 if (! reg_overlap_mentioned_p (scratch
, outval
))
3790 scratch
= base_plus
;
3795 rtx scratch_hi
= gen_rtx_REG (HImode
, REGNO (operands
[2]));
3797 /* Be conservative and copy OUTVAL into the scratch now,
3798 this should only be necessary if outval is a subreg
3799 of something larger than a word. */
3800 /* XXX Might this clobber base? I can't see how it can,
3801 since scratch is known to overlap with OUTVAL, and
3802 must be wider than a word. */
3803 emit_insn (gen_movhi (scratch_hi
, outval
));
3804 outval
= scratch_hi
;
3808 emit_insn (gen_rtx_SET (VOIDmode
, base_plus
, base
));
3811 else if (GET_CODE (base
) == PLUS
)
3813 /* The addend must be CONST_INT, or we would have dealt with it above */
3814 HOST_WIDE_INT hi
, lo
;
3816 offset
+= INTVAL (XEXP (base
, 1));
3817 base
= XEXP (base
, 0);
3819 /* Rework the address into a legal sequence of insns */
3820 /* Valid range for lo is -4095 -> 4095 */
3823 : -((-offset
) & 0xfff));
3825 /* Corner case, if lo is the max offset then we would be out of range
3826 once we have added the additional 1 below, so bump the msb into the
3827 pre-loading insn(s). */
3831 hi
= ((((offset
- lo
) & (HOST_WIDE_INT
) 0xFFFFFFFF)
3832 ^ (HOST_WIDE_INT
) 0x80000000)
3833 - (HOST_WIDE_INT
) 0x80000000);
3835 if (hi
+ lo
!= offset
)
3840 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
3842 /* Be careful not to destroy OUTVAL. */
3843 if (reg_overlap_mentioned_p (base_plus
, outval
))
3845 /* Updating base_plus might destroy outval, see if we
3846 can swap the scratch and base_plus. */
3847 if (! reg_overlap_mentioned_p (scratch
, outval
))
3850 scratch
= base_plus
;
3855 rtx scratch_hi
= gen_rtx_REG (HImode
, REGNO (operands
[2]));
3857 /* Be conservative and copy outval into scratch now,
3858 this should only be necessary if outval is a
3859 subreg of something larger than a word. */
3860 /* XXX Might this clobber base? I can't see how it
3861 can, since scratch is known to overlap with
3863 emit_insn (gen_movhi (scratch_hi
, outval
));
3864 outval
= scratch_hi
;
3868 /* Get the base address; addsi3 knows how to handle constants
3869 that require more than one insn */
3870 emit_insn (gen_addsi3 (base_plus
, base
, GEN_INT (hi
)));
3876 if (BYTES_BIG_ENDIAN
)
3878 emit_insn (gen_movqi (gen_rtx_MEM (QImode
,
3879 plus_constant (base
, offset
+ 1)),
3880 gen_rtx_SUBREG (QImode
, outval
, 0)));
3881 emit_insn (gen_lshrsi3 (scratch
,
3882 gen_rtx_SUBREG (SImode
, outval
, 0),
3884 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (base
, offset
)),
3885 gen_rtx_SUBREG (QImode
, scratch
, 0)));
3889 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (base
, offset
)),
3890 gen_rtx_SUBREG (QImode
, outval
, 0)));
3891 emit_insn (gen_lshrsi3 (scratch
,
3892 gen_rtx_SUBREG (SImode
, outval
, 0),
3894 emit_insn (gen_movqi (gen_rtx_MEM (QImode
,
3895 plus_constant (base
, offset
+ 1)),
3896 gen_rtx_SUBREG (QImode
, scratch
, 0)));
3900 /* Routines for manipulation of the constant pool. */
3901 /* This is unashamedly hacked from the version in sh.c, since the problem is
3902 extremely similar. */
3904 /* Arm instructions cannot load a large constant into a register,
3905 constants have to come from a pc relative load. The reference of a pc
3906 relative load instruction must be less than 1k infront of the instruction.
3907 This means that we often have to dump a constant inside a function, and
3908 generate code to branch around it.
3910 It is important to minimize this, since the branches will slow things
3911 down and make things bigger.
3913 Worst case code looks like:
3929 We fix this by performing a scan before scheduling, which notices which
3930 instructions need to have their operands fetched from the constant table
3931 and builds the table.
3936 scan, find an instruction which needs a pcrel move. Look forward, find th
3937 last barrier which is within MAX_COUNT bytes of the requirement.
3938 If there isn't one, make one. Process all the instructions between
3939 the find and the barrier.
3941 In the above example, we can tell that L3 is within 1k of L1, so
3942 the first move can be shrunk from the 2 insn+constant sequence into
3943 just 1 insn, and the constant moved to L3 to make:
3954 Then the second move becomes the target for the shortening process.
3960 rtx value
; /* Value in table */
3961 HOST_WIDE_INT next_offset
;
3962 enum machine_mode mode
; /* Mode of value */
3965 /* The maximum number of constants that can fit into one pool, since
3966 the pc relative range is 0...1020 bytes and constants are at least 4
3969 #define MAX_POOL_SIZE (1020/4)
3970 static pool_node pool_vector
[MAX_POOL_SIZE
];
3971 static int pool_size
;
3972 static rtx pool_vector_label
;
3974 /* Add a constant to the pool and return its offset within the current
3977 X is the rtx we want to replace. MODE is its mode. On return,
3978 ADDRESS_ONLY will be non-zero if we really want the address of such
3979 a constant, not the constant itself. */
3980 static HOST_WIDE_INT
3981 add_constant (x
, mode
, address_only
)
3983 enum machine_mode mode
;
3987 HOST_WIDE_INT offset
;
3991 if (mode
== SImode
&& GET_CODE (x
) == MEM
&& CONSTANT_P (XEXP (x
, 0))
3992 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)))
3993 x
= get_pool_constant (XEXP (x
, 0));
3994 else if (GET_CODE (x
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P(x
))
3997 mode
= get_pool_mode (x
);
3998 x
= get_pool_constant (x
);
4000 #ifndef AOF_ASSEMBLER
4001 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == 3)
4002 x
= XVECEXP (x
, 0, 0);
4005 #ifdef AOF_ASSEMBLER
4006 /* PIC Symbol references need to be converted into offsets into the
4008 if (flag_pic
&& GET_CODE (x
) == SYMBOL_REF
)
4009 x
= aof_pic_entry (x
);
4010 #endif /* AOF_ASSEMBLER */
4012 /* First see if we've already got it */
4013 for (i
= 0; i
< pool_size
; i
++)
4015 if (GET_CODE (x
) == pool_vector
[i
].value
->code
4016 && mode
== pool_vector
[i
].mode
)
4018 if (GET_CODE (x
) == CODE_LABEL
)
4020 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
4023 if (rtx_equal_p (x
, pool_vector
[i
].value
))
4024 return pool_vector
[i
].next_offset
- GET_MODE_SIZE (mode
);
4028 /* Need a new one */
4029 pool_vector
[pool_size
].next_offset
= GET_MODE_SIZE (mode
);
4032 pool_vector_label
= gen_label_rtx ();
4034 pool_vector
[pool_size
].next_offset
4035 += (offset
= pool_vector
[pool_size
- 1].next_offset
);
4037 pool_vector
[pool_size
].value
= x
;
4038 pool_vector
[pool_size
].mode
= mode
;
4043 /* Output the literal table */
4050 scan
= emit_label_after (gen_label_rtx (), scan
);
4051 scan
= emit_insn_after (gen_align_4 (), scan
);
4052 scan
= emit_label_after (pool_vector_label
, scan
);
4054 for (i
= 0; i
< pool_size
; i
++)
4056 pool_node
* p
= pool_vector
+ i
;
4058 switch (GET_MODE_SIZE (p
->mode
))
4061 scan
= emit_insn_after (gen_consttable_4 (p
->value
), scan
);
4065 scan
= emit_insn_after (gen_consttable_8 (p
->value
), scan
);
4074 scan
= emit_insn_after (gen_consttable_end (), scan
);
4075 scan
= emit_barrier_after (scan
);
4079 /* Non zero if the src operand needs to be fixed up */
4081 fixit (src
, mode
, destreg
)
4083 enum machine_mode mode
;
4086 if (CONSTANT_P (src
))
4088 if (GET_CODE (src
) == CONST_INT
)
4089 return (! const_ok_for_arm (INTVAL (src
))
4090 && ! const_ok_for_arm (~INTVAL (src
)));
4091 if (GET_CODE (src
) == CONST_DOUBLE
)
4092 return (GET_MODE (src
) == VOIDmode
4094 || (! const_double_rtx_ok_for_fpu (src
)
4095 && ! neg_const_double_rtx_ok_for_fpu (src
)));
4096 return symbol_mentioned_p (src
);
4098 #ifndef AOF_ASSEMBLER
4099 else if (GET_CODE (src
) == UNSPEC
&& XINT (src
, 1) == 3)
4103 return (mode
== SImode
&& GET_CODE (src
) == MEM
4104 && GET_CODE (XEXP (src
, 0)) == SYMBOL_REF
4105 && CONSTANT_POOL_ADDRESS_P (XEXP (src
, 0)));
4108 /* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
4110 find_barrier (from
, max_count
)
4115 rtx found_barrier
= 0;
4118 while (from
&& count
< max_count
)
4122 if (GET_CODE (from
) == BARRIER
)
4123 found_barrier
= from
;
4125 /* Count the length of this insn */
4126 if (GET_CODE (from
) == INSN
4127 && GET_CODE (PATTERN (from
)) == SET
4128 && CONSTANT_P (SET_SRC (PATTERN (from
)))
4129 && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from
))))
4131 /* Handle table jumps as a single entity. */
4132 else if (GET_CODE (from
) == JUMP_INSN
4133 && JUMP_LABEL (from
) != 0
4134 && ((tmp
= next_real_insn (JUMP_LABEL (from
)))
4135 == next_real_insn (from
))
4137 && GET_CODE (tmp
) == JUMP_INSN
4138 && (GET_CODE (PATTERN (tmp
)) == ADDR_VEC
4139 || GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
))
4141 int elt
= GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
? 1 : 0;
4142 count
+= (get_attr_length (from
)
4143 + GET_MODE_SIZE (SImode
) * XVECLEN (PATTERN (tmp
), elt
));
4144 /* Continue after the dispatch table. */
4146 from
= NEXT_INSN (tmp
);
4150 count
+= get_attr_length (from
);
4153 from
= NEXT_INSN (from
);
4156 if (! found_barrier
)
4158 /* We didn't find a barrier in time to
4159 dump our stuff, so we'll make one. */
4160 rtx label
= gen_label_rtx ();
4163 from
= PREV_INSN (last
);
4165 from
= get_last_insn ();
4167 /* Walk back to be just before any jump. */
4168 while (GET_CODE (from
) == JUMP_INSN
4169 || GET_CODE (from
) == NOTE
4170 || GET_CODE (from
) == CODE_LABEL
)
4171 from
= PREV_INSN (from
);
4173 from
= emit_jump_insn_after (gen_jump (label
), from
);
4174 JUMP_LABEL (from
) = label
;
4175 found_barrier
= emit_barrier_after (from
);
4176 emit_label_after (label
, found_barrier
);
4179 return found_barrier
;
4182 /* Non zero if the insn is a move instruction which needs to be fixed. */
4187 if (!INSN_DELETED_P (insn
)
4188 && GET_CODE (insn
) == INSN
4189 && GET_CODE (PATTERN (insn
)) == SET
)
4191 rtx pat
= PATTERN (insn
);
4192 rtx src
= SET_SRC (pat
);
4193 rtx dst
= SET_DEST (pat
);
4195 enum machine_mode mode
= GET_MODE (dst
);
4200 if (GET_CODE (dst
) == REG
)
4201 destreg
= REGNO (dst
);
4202 else if (GET_CODE (dst
) == SUBREG
&& GET_CODE (SUBREG_REG (dst
)) == REG
)
4203 destreg
= REGNO (SUBREG_REG (dst
));
4207 return fixit (src
, mode
, destreg
);
4220 /* The ldr instruction can work with up to a 4k offset, and most constants
4221 will be loaded with one of these instructions; however, the adr
4222 instruction and the ldf instructions only work with a 1k offset. This
4223 code needs to be rewritten to use the 4k offset when possible, and to
4224 adjust when a 1k offset is needed. For now we just use a 1k offset
4228 /* Floating point operands can't work further than 1024 bytes from the
4229 PC, so to make things simple we restrict all loads for such functions.
4231 if (TARGET_HARD_FLOAT
)
4235 for (regno
= 16; regno
< 24; regno
++)
4236 if (regs_ever_live
[regno
])
4246 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
4248 if (broken_move (insn
))
4250 /* This is a broken move instruction, scan ahead looking for
4251 a barrier to stick the constant table behind */
4253 rtx barrier
= find_barrier (insn
, count_size
);
4255 /* Now find all the moves between the points and modify them */
4256 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
4258 if (broken_move (scan
))
4260 /* This is a broken move instruction, add it to the pool */
4261 rtx pat
= PATTERN (scan
);
4262 rtx src
= SET_SRC (pat
);
4263 rtx dst
= SET_DEST (pat
);
4264 enum machine_mode mode
= GET_MODE (dst
);
4265 HOST_WIDE_INT offset
;
4272 /* If this is an HImode constant load, convert it into
4273 an SImode constant load. Since the register is always
4274 32 bits this is safe. We have to do this, since the
4275 load pc-relative instruction only does a 32-bit load. */
4279 if (GET_CODE (dst
) != REG
)
4281 PUT_MODE (dst
, SImode
);
4284 offset
= add_constant (src
, mode
, &address_only
);
4285 addr
= plus_constant (gen_rtx_LABEL_REF (VOIDmode
,
4289 /* If we only want the address of the pool entry, or
4290 for wide moves to integer regs we need to split
4291 the address calculation off into a separate insn.
4292 If necessary, the load can then be done with a
4293 load-multiple. This is safe, since we have
4294 already noted the length of such insns to be 8,
4295 and we are immediately over-writing the scratch
4296 we have grabbed with the final result. */
4297 if ((address_only
|| GET_MODE_SIZE (mode
) > 4)
4298 && (scratch
= REGNO (dst
)) < 16)
4305 reg
= gen_rtx_REG (SImode
, scratch
);
4307 newinsn
= emit_insn_after (gen_movaddr (reg
, addr
),
4314 newsrc
= gen_rtx_MEM (mode
, addr
);
4316 /* XXX Fixme -- I think the following is bogus. */
4317 /* Build a jump insn wrapper around the move instead
4318 of an ordinary insn, because we want to have room for
4319 the target label rtx in fld[7], which an ordinary
4320 insn doesn't have. */
4322 = emit_jump_insn_after (gen_rtx_SET (VOIDmode
, dst
,
4325 JUMP_LABEL (newinsn
) = pool_vector_label
;
4327 /* But it's still an ordinary insn */
4328 PUT_CODE (newinsn
, INSN
);
4336 dump_table (barrier
);
4341 after_arm_reorg
= 1;
4345 /* Routines to output assembly language. */
4347 /* If the rtx is the correct value then return the string of the number.
4348 In this way we can ensure that valid double constants are generated even
4349 when cross compiling. */
4351 fp_immediate_constant (x
)
4357 if (!fpa_consts_inited
)
4360 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4361 for (i
= 0; i
< 8; i
++)
4362 if (REAL_VALUES_EQUAL (r
, values_fpa
[i
]))
4363 return strings_fpa
[i
];
4368 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
4370 fp_const_from_val (r
)
4371 REAL_VALUE_TYPE
* r
;
4375 if (! fpa_consts_inited
)
4378 for (i
= 0; i
< 8; i
++)
4379 if (REAL_VALUES_EQUAL (*r
, values_fpa
[i
]))
4380 return strings_fpa
[i
];
4385 /* Output the operands of a LDM/STM instruction to STREAM.
4386 MASK is the ARM register set mask of which only bits 0-15 are important.
4387 INSTR is the possibly suffixed base register. HAT unequals zero if a hat
4388 must follow the register list. */
4391 print_multi_reg (stream
, instr
, mask
, hat
)
4397 int not_first
= FALSE
;
4399 fputc ('\t', stream
);
4400 fprintf (stream
, instr
, REGISTER_PREFIX
);
4401 fputs (", {", stream
);
4403 for (i
= 0; i
< 16; i
++)
4404 if (mask
& (1 << i
))
4407 fprintf (stream
, ", ");
4409 asm_fprintf (stream
, "%R%s", reg_names
[i
]);
4413 fprintf (stream
, "}%s\n", hat
? "^" : "");
4416 /* Output a 'call' insn. */
4419 output_call (operands
)
4422 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
4424 if (REGNO (operands
[0]) == LR_REGNUM
)
4426 operands
[0] = gen_rtx_REG (SImode
, IP_REGNUM
);
4427 output_asm_insn ("mov%?\t%0, %|lr", operands
);
4430 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
4432 if (TARGET_INTERWORK
)
4433 output_asm_insn ("bx%?\t%0", operands
);
4435 output_asm_insn ("mov%?\t%|pc, %0", operands
);
4444 int something_changed
= 0;
4446 int code
= GET_CODE (x0
);
4448 register char * fmt
;
4453 if (REGNO (x0
) == LR_REGNUM
)
4455 *x
= gen_rtx_REG (SImode
, IP_REGNUM
);
4460 /* Scan through the sub-elements and change any references there */
4461 fmt
= GET_RTX_FORMAT (code
);
4463 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
4465 something_changed
|= eliminate_lr2ip (&XEXP (x0
, i
));
4466 else if (fmt
[i
] == 'E')
4467 for (j
= 0; j
< XVECLEN (x0
, i
); j
++)
4468 something_changed
|= eliminate_lr2ip (&XVECEXP (x0
, i
, j
));
4470 return something_changed
;
4474 /* Output a 'call' insn that is a reference in memory. */
4477 output_call_mem (operands
)
4480 operands
[0] = copy_rtx (operands
[0]); /* Be ultra careful */
4481 /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
4483 if (eliminate_lr2ip (&operands
[0]))
4484 output_asm_insn ("mov%?\t%|ip, %|lr", operands
);
4486 if (TARGET_INTERWORK
)
4488 output_asm_insn ("ldr%?\t%|ip, %0", operands
);
4489 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
4490 output_asm_insn ("bx%?\t%|ip", operands
);
4494 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
4495 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
4502 /* Output a move from arm registers to an fpu registers.
4503 OPERANDS[0] is an fpu register.
4504 OPERANDS[1] is the first registers of an arm register pair. */
4507 output_mov_long_double_fpu_from_arm (operands
)
4510 int arm_reg0
= REGNO (operands
[1]);
4513 if (arm_reg0
== IP_REGNUM
)
4516 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
4517 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
4518 ops
[2] = gen_rtx_REG (SImode
, 2 + arm_reg0
);
4520 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
4521 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
4526 /* Output a move from an fpu register to arm registers.
4527 OPERANDS[0] is the first registers of an arm register pair.
4528 OPERANDS[1] is an fpu register. */
4531 output_mov_long_double_arm_from_fpu (operands
)
4534 int arm_reg0
= REGNO (operands
[0]);
4537 if (arm_reg0
== IP_REGNUM
)
4540 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
4541 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
4542 ops
[2] = gen_rtx_REG (SImode
, 2 + arm_reg0
);
4544 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
4545 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
4549 /* Output a move from arm registers to arm registers of a long double
4550 OPERANDS[0] is the destination.
4551 OPERANDS[1] is the source. */
4553 output_mov_long_double_arm_from_arm (operands
)
4556 /* We have to be careful here because the two might overlap */
4557 int dest_start
= REGNO (operands
[0]);
4558 int src_start
= REGNO (operands
[1]);
4562 if (dest_start
< src_start
)
4564 for (i
= 0; i
< 3; i
++)
4566 ops
[0] = gen_rtx_REG (SImode
, dest_start
+ i
);
4567 ops
[1] = gen_rtx_REG (SImode
, src_start
+ i
);
4568 output_asm_insn ("mov%?\t%0, %1", ops
);
4573 for (i
= 2; i
>= 0; i
--)
4575 ops
[0] = gen_rtx_REG (SImode
, dest_start
+ i
);
4576 ops
[1] = gen_rtx_REG (SImode
, src_start
+ i
);
4577 output_asm_insn ("mov%?\t%0, %1", ops
);
4585 /* Output a move from arm registers to an fpu registers.
4586 OPERANDS[0] is an fpu register.
4587 OPERANDS[1] is the first registers of an arm register pair. */
4590 output_mov_double_fpu_from_arm (operands
)
4593 int arm_reg0
= REGNO (operands
[1]);
4596 if (arm_reg0
== IP_REGNUM
)
4599 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
4600 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
4601 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
4602 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
4606 /* Output a move from an fpu register to arm registers.
4607 OPERANDS[0] is the first registers of an arm register pair.
4608 OPERANDS[1] is an fpu register. */
4611 output_mov_double_arm_from_fpu (operands
)
4614 int arm_reg0
= REGNO (operands
[0]);
4617 if (arm_reg0
== IP_REGNUM
)
4620 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
4621 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
4622 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
4623 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
4627 /* Output a move between double words.
4628 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
4629 or MEM<-REG and all MEMs must be offsettable addresses. */
4632 output_move_double (operands
)
4635 enum rtx_code code0
= GET_CODE (operands
[0]);
4636 enum rtx_code code1
= GET_CODE (operands
[1]);
4641 int reg0
= REGNO (operands
[0]);
4643 otherops
[0] = gen_rtx_REG (SImode
, 1 + reg0
);
4647 int reg1
= REGNO (operands
[1]);
4648 if (reg1
== IP_REGNUM
)
4651 /* Ensure the second source is not overwritten */
4652 if (reg1
== reg0
+ (WORDS_BIG_ENDIAN
? -1 : 1))
4653 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands
);
4655 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands
);
4657 else if (code1
== CONST_DOUBLE
)
4659 if (GET_MODE (operands
[1]) == DFmode
)
4662 union real_extract u
;
4664 bcopy ((char *) &CONST_DOUBLE_LOW (operands
[1]), (char *) &u
,
4666 REAL_VALUE_TO_TARGET_DOUBLE (u
.d
, l
);
4667 otherops
[1] = GEN_INT(l
[1]);
4668 operands
[1] = GEN_INT(l
[0]);
4670 else if (GET_MODE (operands
[1]) != VOIDmode
)
4672 else if (WORDS_BIG_ENDIAN
)
4675 otherops
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4676 operands
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4681 otherops
[1] = GEN_INT (CONST_DOUBLE_HIGH (operands
[1]));
4682 operands
[1] = GEN_INT (CONST_DOUBLE_LOW (operands
[1]));
4685 output_mov_immediate (operands
);
4686 output_mov_immediate (otherops
);
4688 else if (code1
== CONST_INT
)
4690 #if HOST_BITS_PER_WIDE_INT > 32
4691 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
4692 what the upper word is. */
4693 if (WORDS_BIG_ENDIAN
)
4695 otherops
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4696 operands
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4700 otherops
[1] = GEN_INT (INTVAL (operands
[1]) >> 32);
4701 operands
[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands
[1])));
4704 /* Sign extend the intval into the high-order word */
4705 if (WORDS_BIG_ENDIAN
)
4707 otherops
[1] = operands
[1];
4708 operands
[1] = (INTVAL (operands
[1]) < 0
4709 ? constm1_rtx
: const0_rtx
);
4712 otherops
[1] = INTVAL (operands
[1]) < 0 ? constm1_rtx
: const0_rtx
;
4714 output_mov_immediate (otherops
);
4715 output_mov_immediate (operands
);
4717 else if (code1
== MEM
)
4719 switch (GET_CODE (XEXP (operands
[1], 0)))
4722 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
4726 abort (); /* Should never happen now */
4730 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
4734 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
4738 abort (); /* Should never happen now */
4743 output_asm_insn ("adr%?\t%0, %1", operands
);
4744 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
4748 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1),
4749 GET_MODE (XEXP (XEXP (operands
[1], 0), 1))))
4751 otherops
[0] = operands
[0];
4752 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
4753 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
4754 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
4756 if (GET_CODE (otherops
[2]) == CONST_INT
)
4758 switch (INTVAL (otherops
[2]))
4761 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
4764 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
4767 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
4770 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
4771 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
4773 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4776 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
4779 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
4781 return "ldm%?ia\t%0, %M0";
4785 otherops
[1] = adj_offsettable_operand (operands
[1], 4);
4786 /* Take care of overlapping base/data reg. */
4787 if (reg_mentioned_p (operands
[0], operands
[1]))
4789 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4790 output_asm_insn ("ldr%?\t%0, %1", operands
);
4794 output_asm_insn ("ldr%?\t%0, %1", operands
);
4795 output_asm_insn ("ldr%?\t%0, %1", otherops
);
4801 abort (); /* Constraints should prevent this */
4803 else if (code0
== MEM
&& code1
== REG
)
4805 if (REGNO (operands
[1]) == IP_REGNUM
)
4808 switch (GET_CODE (XEXP (operands
[0], 0)))
4811 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
4815 abort (); /* Should never happen now */
4819 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
4823 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
4827 abort (); /* Should never happen now */
4831 if (GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == CONST_INT
)
4833 switch (INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
4836 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
4840 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
4844 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
4851 otherops
[0] = adj_offsettable_operand (operands
[0], 4);
4852 otherops
[1] = gen_rtx_REG (SImode
, 1 + REGNO (operands
[1]));
4853 output_asm_insn ("str%?\t%1, %0", operands
);
4854 output_asm_insn ("str%?\t%1, %0", otherops
);
4858 abort (); /* Constraints should prevent this */
4864 /* Output an arbitrary MOV reg, #n.
4865 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
4868 output_mov_immediate (operands
)
4871 HOST_WIDE_INT n
= INTVAL (operands
[1]);
4875 /* Try to use one MOV */
4876 if (const_ok_for_arm (n
))
4878 output_asm_insn ("mov%?\t%0, %1", operands
);
4882 /* Try to use one MVN */
4883 if (const_ok_for_arm (~n
))
4885 operands
[1] = GEN_INT (~n
);
4886 output_asm_insn ("mvn%?\t%0, %1", operands
);
4890 /* If all else fails, make it out of ORRs or BICs as appropriate. */
4892 for (i
=0; i
< 32; i
++)
4896 if (n_ones
> 16) /* Shorter to use MVN with BIC in this case. */
4897 output_multi_immediate(operands
, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
4900 output_multi_immediate(operands
, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
4907 /* Output an ADD r, s, #n where n may be too big for one instruction. If
4908 adding zero to one register, output nothing. */
4911 output_add_immediate (operands
)
4914 HOST_WIDE_INT n
= INTVAL (operands
[2]);
4916 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
4919 output_multi_immediate (operands
,
4920 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
4923 output_multi_immediate (operands
,
4924 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
4931 /* Output a multiple immediate operation.
4932 OPERANDS is the vector of operands referred to in the output patterns.
4933 INSTR1 is the output pattern to use for the first constant.
4934 INSTR2 is the output pattern to use for subsequent constants.
4935 IMMED_OP is the index of the constant slot in OPERANDS.
4936 N is the constant value. */
4939 output_multi_immediate (operands
, instr1
, instr2
, immed_op
, n
)
4941 char * instr1
, * instr2
;
4945 #if HOST_BITS_PER_WIDE_INT > 32
4951 operands
[immed_op
] = const0_rtx
;
4952 output_asm_insn (instr1
, operands
); /* Quick and easy output */
4957 char *instr
= instr1
;
4959 /* Note that n is never zero here (which would give no output) */
4960 for (i
= 0; i
< 32; i
+= 2)
4964 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
4965 output_asm_insn (instr
, operands
);
4975 /* Return the appropriate ARM instruction for the operation code.
4976 The returned result should not be overwritten. OP is the rtx of the
4977 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
4981 arithmetic_instr (op
, shift_first_arg
)
4983 int shift_first_arg
;
4985 switch (GET_CODE (op
))
4991 return shift_first_arg
? "rsb" : "sub";
5008 /* Ensure valid constant shifts and return the appropriate shift mnemonic
5009 for the operation code. The returned result should not be overwritten.
5010 OP is the rtx code of the shift.
5011 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
5015 shift_op (op
, amountp
)
5017 HOST_WIDE_INT
*amountp
;
5020 enum rtx_code code
= GET_CODE (op
);
5022 if (GET_CODE (XEXP (op
, 1)) == REG
|| GET_CODE (XEXP (op
, 1)) == SUBREG
)
5024 else if (GET_CODE (XEXP (op
, 1)) == CONST_INT
)
5025 *amountp
= INTVAL (XEXP (op
, 1));
5048 /* We never have to worry about the amount being other than a
5049 power of 2, since this case can never be reloaded from a reg. */
5051 *amountp
= int_log2 (*amountp
);
5062 /* This is not 100% correct, but follows from the desire to merge
5063 multiplication by a power of 2 with the recognizer for a
5064 shift. >=32 is not a valid shift for "asl", so we must try and
5065 output a shift that produces the correct arithmetical result.
5066 Using lsr #32 is identical except for the fact that the carry bit
5067 is not set correctly if we set the flags; but we never use the
5068 carry bit from such an operation, so we can ignore that. */
5069 if (code
== ROTATERT
)
5070 *amountp
&= 31; /* Rotate is just modulo 32 */
5071 else if (*amountp
!= (*amountp
& 31))
5078 /* Shifts of 0 are no-ops. */
5087 /* Obtain the shift from the POWER of two. */
5089 static HOST_WIDE_INT
5091 HOST_WIDE_INT power
;
5093 HOST_WIDE_INT shift
= 0;
5095 while (((((HOST_WIDE_INT
) 1) << shift
) & power
) == 0)
5105 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
5106 /bin/as is horribly restrictive. */
5107 #define MAX_ASCII_LEN 51
5110 output_ascii_pseudo_op (stream
, p
, len
)
5118 fputs ("\t.ascii\t\"", stream
);
5120 for (i
= 0; i
< len
; i
++)
5122 register int c
= p
[i
];
5124 if (len_so_far
>= MAX_ASCII_LEN
)
5126 fputs ("\"\n\t.ascii\t\"", stream
);
5133 fputs ("\\t", stream
);
5138 fputs ("\\f", stream
);
5143 fputs ("\\b", stream
);
5148 fputs ("\\r", stream
);
5152 case TARGET_NEWLINE
:
5153 fputs ("\\n", stream
);
5155 if ((c
>= ' ' && c
<= '~')
5157 /* This is a good place for a line break. */
5158 len_so_far
= MAX_ASCII_LEN
;
5165 putc ('\\', stream
);
5170 if (c
>= ' ' && c
<= '~')
5177 fprintf (stream
, "\\%03o", c
);
5184 fputs ("\"\n", stream
);
5188 /* Try to determine whether a pattern really clobbers the link register.
5189 This information is useful when peepholing, so that lr need not be pushed
5190 if we combine a call followed by a return.
5191 NOTE: This code does not check for side-effect expressions in a SET_SRC:
5192 such a check should not be needed because these only update an existing
5193 value within a register; the register must still be set elsewhere within
5197 pattern_really_clobbers_lr (x
)
5202 switch (GET_CODE (x
))
5205 switch (GET_CODE (SET_DEST (x
)))
5208 return REGNO (SET_DEST (x
)) == LR_REGNUM
;
5211 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == REG
)
5212 return REGNO (XEXP (SET_DEST (x
), 0)) == LR_REGNUM
;
5214 if (GET_CODE (XEXP (SET_DEST (x
), 0)) == MEM
)
5223 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
5224 if (pattern_really_clobbers_lr (XVECEXP (x
, 0, i
)))
5229 switch (GET_CODE (XEXP (x
, 0)))
5232 return REGNO (XEXP (x
, 0)) == LR_REGNUM
;
5235 if (GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
)
5236 return REGNO (XEXP (XEXP (x
, 0), 0)) == LR_REGNUM
;
5252 function_really_clobbers_lr (first
)
5257 for (insn
= first
; insn
; insn
= next_nonnote_insn (insn
))
5259 switch (GET_CODE (insn
))
5264 case JUMP_INSN
: /* Jump insns only change the PC (and conds) */
5269 if (pattern_really_clobbers_lr (PATTERN (insn
)))
5274 /* Don't yet know how to handle those calls that are not to a
5276 if (GET_CODE (PATTERN (insn
)) != PARALLEL
)
5279 switch (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)))
5282 if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn
), 0, 0), 0), 0))
5288 if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn
),
5294 default: /* Don't recognize it, be safe */
5298 /* A call can be made (by peepholing) not to clobber lr iff it is
5299 followed by a return. There may, however, be a use insn iff
5300 we are returning the result of the call.
5301 If we run off the end of the insn chain, then that means the
5302 call was at the end of the function. Unfortunately we don't
5303 have a return insn for the peephole to recognize, so we
5304 must reject this. (Can this be fixed by adding our own insn?) */
5305 if ((next
= next_nonnote_insn (insn
)) == NULL
)
5308 /* No need to worry about lr if the call never returns */
5309 if (GET_CODE (next
) == BARRIER
)
5312 if (GET_CODE (next
) == INSN
&& GET_CODE (PATTERN (next
)) == USE
5313 && (GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
5314 && (REGNO (SET_DEST (XVECEXP (PATTERN (insn
), 0, 0)))
5315 == REGNO (XEXP (PATTERN (next
), 0))))
5316 if ((next
= next_nonnote_insn (next
)) == NULL
)
5319 if (GET_CODE (next
) == JUMP_INSN
5320 && GET_CODE (PATTERN (next
)) == RETURN
)
5329 /* We have reached the end of the chain so lr was _not_ clobbered */
5334 output_return_instruction (operand
, really_return
, reverse
)
5340 int reg
, live_regs
= 0;
5341 int volatile_func
= (optimize
> 0
5342 && TREE_THIS_VOLATILE (current_function_decl
));
5344 return_used_this_function
= 1;
5346 if (TARGET_ABORT_NORETURN
&& volatile_func
)
5349 /* If this function was declared non-returning, and we have found a tail
5350 call, then we have to trust that the called function won't return. */
5351 if (! really_return
)
5354 /* Otherwise, trap an attempted return by aborting. */
5356 ops
[1] = gen_rtx_SYMBOL_REF (Pmode
, NEED_PLT_GOT
? "abort(PLT)"
5358 assemble_external_libcall (ops
[1]);
5359 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
5363 if (current_function_calls_alloca
&& ! really_return
)
5366 for (reg
= 0; reg
<= 10; reg
++)
5367 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5370 if (flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
])
5373 if (live_regs
|| (regs_ever_live
[LR_REGNUM
] && ! lr_save_eliminated
))
5376 if (frame_pointer_needed
)
5381 if (lr_save_eliminated
|| ! regs_ever_live
[LR_REGNUM
])
5384 if (frame_pointer_needed
)
5386 reverse
? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
5389 reverse
? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
5391 for (reg
= 0; reg
<= 10; reg
++)
5392 if (regs_ever_live
[reg
]
5393 && (! call_used_regs
[reg
]
5394 || (flag_pic
&& reg
== PIC_OFFSET_TABLE_REGNUM
)))
5396 strcat (instr
, "%|");
5397 strcat (instr
, reg_names
[reg
]);
5399 strcat (instr
, ", ");
5402 if (frame_pointer_needed
)
5404 strcat (instr
, "%|");
5405 strcat (instr
, reg_names
[11]);
5406 strcat (instr
, ", ");
5407 strcat (instr
, "%|");
5408 strcat (instr
, reg_names
[13]);
5409 strcat (instr
, ", ");
5410 strcat (instr
, "%|");
5411 strcat (instr
, TARGET_INTERWORK
|| (! really_return
)
5412 ? reg_names
[LR_REGNUM
] : reg_names
[PC_REGNUM
] );
5416 strcat (instr
, "%|");
5417 if (TARGET_INTERWORK
&& really_return
)
5418 strcat (instr
, reg_names
[IP_REGNUM
]);
5420 strcat (instr
, really_return
? reg_names
[PC_REGNUM
] : reg_names
[LR_REGNUM
]);
5422 strcat (instr
, (TARGET_APCS_32
|| !really_return
) ? "}" : "}^");
5423 output_asm_insn (instr
, &operand
);
5425 if (TARGET_INTERWORK
&& really_return
)
5427 strcpy (instr
, "bx%?");
5428 strcat (instr
, reverse
? "%D0" : "%d0");
5429 strcat (instr
, "\t%|");
5430 strcat (instr
, frame_pointer_needed
? "lr" : "ip");
5432 output_asm_insn (instr
, & operand
);
5435 else if (really_return
)
5437 if (TARGET_INTERWORK
)
5438 sprintf (instr
, "bx%%?%%%s0\t%%|lr", reverse
? "D" : "d");
5440 sprintf (instr
, "mov%%?%%%s0%s\t%%|pc, %%|lr",
5441 reverse
? "D" : "d", TARGET_APCS_32
? "" : "s");
5443 output_asm_insn (instr
, & operand
);
5449 /* Return nonzero if optimizing and the current function is volatile.
5450 Such functions never return, and many memory cycles can be saved
5451 by not storing register values that will never be needed again.
5452 This optimization was added to speed up context switching in a
5453 kernel application. */
5456 arm_volatile_func ()
5458 return (optimize
> 0 && TREE_THIS_VOLATILE (current_function_decl
));
5461 /* Write the function name into the code section, directly preceding
5462 the function prologue.
5464 Code will be output similar to this:
5466 .ascii "arm_poke_function_name", 0
5469 .word 0xff000000 + (t1 - t0)
5470 arm_poke_function_name
5472 stmfd sp!, {fp, ip, lr, pc}
5475 When performing a stack backtrace, code can inspect the value
5476 of 'pc' stored at 'fp' + 0. If the trace function then looks
5477 at location pc - 12 and the top 8 bits are set, then we know
5478 that there is a function name embedded immediately preceding this
5479 location and has length ((pc[-3]) & 0xff000000).
5481 We assume that pc is declared as a pointer to an unsigned long.
5483 It is of no benefit to output the function name if we are assembling
5484 a leaf function. These function types will not contain a stack
5485 backtrace structure, therefore it is not possible to determine the
5489 arm_poke_function_name (stream
, name
)
5493 unsigned long alignlength
;
5494 unsigned long length
;
5497 length
= strlen (name
);
5498 alignlength
= NUM_INTS (length
+ 1);
5500 ASM_OUTPUT_ASCII (stream
, name
, length
+ 1);
5501 ASM_OUTPUT_ALIGN (stream
, 2);
5502 x
= GEN_INT (0xff000000UL
+ alignlength
);
5503 ASM_OUTPUT_INT (stream
, x
);
5506 /* The amount of stack adjustment that happens here, in output_return and in
5507 output_epilogue must be exactly the same as was calculated during reload,
5508 or things will point to the wrong place. The only time we can safely
5509 ignore this constraint is when a function has no arguments on the stack,
5510 no stack frame requirement and no live registers execpt for `lr'. If we
5511 can guarantee that by making all function calls into tail calls and that
5512 lr is not clobbered in any other way, then there is no need to push lr
5516 output_func_prologue (f
, frame_size
)
5520 int reg
, live_regs_mask
= 0;
5521 int volatile_func
= (optimize
> 0
5522 && TREE_THIS_VOLATILE (current_function_decl
));
5524 /* Nonzero if we must stuff some register arguments onto the stack as if
5525 they were passed there. */
5526 int store_arg_regs
= 0;
5528 if (arm_ccfsm_state
|| arm_target_insn
)
5529 abort (); /* Sanity check */
5531 if (arm_naked_function_p (current_function_decl
))
5534 return_used_this_function
= 0;
5535 lr_save_eliminated
= 0;
5537 fprintf (f
, "\t%s args = %d, pretend = %d, frame = %d\n",
5538 ASM_COMMENT_START
, current_function_args_size
,
5539 current_function_pretend_args_size
, frame_size
);
5540 fprintf (f
, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
5541 ASM_COMMENT_START
, frame_pointer_needed
,
5542 current_function_anonymous_args
);
5545 fprintf (f
, "\t%s Volatile function.\n", ASM_COMMENT_START
);
5547 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
5550 for (reg
= 0; reg
<= 10; reg
++)
5551 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5552 live_regs_mask
|= (1 << reg
);
5554 if (flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
])
5555 live_regs_mask
|= (1 << PIC_OFFSET_TABLE_REGNUM
);
5557 if (frame_pointer_needed
)
5558 live_regs_mask
|= 0xD800;
5559 else if (regs_ever_live
[LR_REGNUM
])
5561 if (! current_function_args_size
5562 && ! function_really_clobbers_lr (get_insns ()))
5563 lr_save_eliminated
= 1;
5565 live_regs_mask
|= 1 << LR_REGNUM
;
5570 /* if a di mode load/store multiple is used, and the base register
5571 is r3, then r4 can become an ever live register without lr
5572 doing so, in this case we need to push lr as well, or we
5573 will fail to get a proper return. */
5575 live_regs_mask
|= 1 << LR_REGNUM
;
5576 lr_save_eliminated
= 0;
5580 if (lr_save_eliminated
)
5581 fprintf (f
,"\t%s I don't think this function clobbers lr\n",
5584 #ifdef AOF_ASSEMBLER
5586 asm_fprintf (f
, "\tmov\t%R%s, %R%s\n", reg_names
[IP_REGNUM
],
5587 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
5593 output_func_epilogue (f
, frame_size
)
5597 int reg
, live_regs_mask
= 0;
5598 /* If we need this then it will always be at least this much */
5599 int floats_offset
= 12;
5601 int volatile_func
= (optimize
> 0
5602 && TREE_THIS_VOLATILE (current_function_decl
));
5604 if (use_return_insn (FALSE
) && return_used_this_function
)
5606 if ((frame_size
+ current_function_outgoing_args_size
) != 0
5607 && !(frame_pointer_needed
&& TARGET_APCS
))
5612 /* Naked functions don't have epilogues. */
5613 if (arm_naked_function_p (current_function_decl
))
5616 /* A volatile function should never return. Call abort. */
5617 if (TARGET_ABORT_NORETURN
&& volatile_func
)
5620 op
= gen_rtx_SYMBOL_REF (Pmode
, NEED_PLT_GOT
? "abort(PLT)" : "abort");
5621 assemble_external_libcall (op
);
5622 output_asm_insn ("bl\t%a0", &op
);
5626 for (reg
= 0; reg
<= 10; reg
++)
5627 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5629 live_regs_mask
|= (1 << reg
);
5633 if (flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
])
5635 live_regs_mask
|= (1 << PIC_OFFSET_TABLE_REGNUM
);
5639 if (frame_pointer_needed
)
5641 if (arm_fpu_arch
== FP_SOFT2
)
5643 for (reg
= 23; reg
> 15; reg
--)
5644 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5646 floats_offset
+= 12;
5647 asm_fprintf (f
, "\tldfe\t%R%s, [%R%s, #-%d]\n",
5648 reg_names
[reg
], reg_names
[FP_REGNUM
], floats_offset
);
5655 for (reg
= 23; reg
> 15; reg
--)
5657 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5659 floats_offset
+= 12;
5661 /* We can't unstack more than four registers at once */
5662 if (start_reg
- reg
== 3)
5664 asm_fprintf (f
, "\tlfm\t%R%s, 4, [%R%s, #-%d]\n",
5665 reg_names
[reg
], reg_names
[FP_REGNUM
],
5667 start_reg
= reg
- 1;
5672 if (reg
!= start_reg
)
5673 asm_fprintf (f
, "\tlfm\t%R%s, %d, [%R%s, #-%d]\n",
5674 reg_names
[reg
+ 1], start_reg
- reg
,
5675 reg_names
[FP_REGNUM
], floats_offset
);
5676 start_reg
= reg
- 1;
5680 /* Just in case the last register checked also needs unstacking. */
5681 if (reg
!= start_reg
)
5682 asm_fprintf (f
, "\tlfm\t%R%s, %d, [%R%s, #-%d]\n",
5683 reg_names
[reg
+ 1], start_reg
- reg
,
5684 reg_names
[FP_REGNUM
], floats_offset
);
5687 if (TARGET_INTERWORK
)
5689 live_regs_mask
|= 0x6800;
5690 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
, FALSE
);
5691 asm_fprintf (f
, "\tbx\t%R%s\n", reg_names
[LR_REGNUM
]);
5695 live_regs_mask
|= 0xA800;
5696 print_multi_reg (f
, "ldmea\t%sfp", live_regs_mask
,
5697 TARGET_APCS_32
? FALSE
: TRUE
);
5702 /* Restore stack pointer if necessary. */
5703 if (frame_size
+ current_function_outgoing_args_size
!= 0)
5705 operands
[0] = operands
[1] = stack_pointer_rtx
;
5706 operands
[2] = GEN_INT (frame_size
5707 + current_function_outgoing_args_size
);
5708 output_add_immediate (operands
);
5711 if (arm_fpu_arch
== FP_SOFT2
)
5713 for (reg
= 16; reg
< 24; reg
++)
5714 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5715 asm_fprintf (f
, "\tldfe\t%R%s, [%R%s], #12\n",
5716 reg_names
[reg
], reg_names
[SP_REGNUM
]);
5722 for (reg
= 16; reg
< 24; reg
++)
5724 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5726 if (reg
- start_reg
== 3)
5728 asm_fprintf (f
, "\tlfmfd\t%R%s, 4, [%R%s]!\n",
5729 reg_names
[start_reg
], reg_names
[SP_REGNUM
]);
5730 start_reg
= reg
+ 1;
5735 if (reg
!= start_reg
)
5736 asm_fprintf (f
, "\tlfmfd\t%R%s, %d, [%R%s]!\n",
5737 reg_names
[start_reg
], reg
- start_reg
,
5738 reg_names
[SP_REGNUM
]);
5740 start_reg
= reg
+ 1;
5744 /* Just in case the last register checked also needs unstacking. */
5745 if (reg
!= start_reg
)
5746 asm_fprintf (f
, "\tlfmfd\t%R%s, %d, [%R%s]!\n",
5747 reg_names
[start_reg
], reg
- start_reg
,
5748 reg_names
[SP_REGNUM
]);
5751 if (current_function_pretend_args_size
== 0 && regs_ever_live
[LR_REGNUM
])
5753 if (TARGET_INTERWORK
)
5755 if (! lr_save_eliminated
)
5756 live_regs_mask
|= 1 << LR_REGNUM
;
5758 if (live_regs_mask
!= 0)
5759 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
5761 asm_fprintf (f
, "\tbx\t%R%s\n", reg_names
[LR_REGNUM
]);
5763 else if (lr_save_eliminated
)
5764 asm_fprintf (f
, "\tmov%c\t%r, %r\n",
5765 TARGET_APCS_32
? ' ' : 's',
5766 reg_names
[PC_REGNUM
], reg_names
[LR_REGNUM
]);
5768 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
| 0x8000,
5769 TARGET_APCS_32
? FALSE
: TRUE
);
5773 if (live_regs_mask
|| regs_ever_live
[LR_REGNUM
])
5775 /* Restore the integer regs, and the return address into lr */
5776 if (! lr_save_eliminated
)
5777 live_regs_mask
|= 1 << LR_REGNUM
;
5779 if (live_regs_mask
!= 0)
5780 print_multi_reg (f
, "ldmfd\t%ssp!", live_regs_mask
, FALSE
);
5783 if (current_function_pretend_args_size
)
5785 /* Unwind the pre-pushed regs */
5786 operands
[0] = operands
[1] = stack_pointer_rtx
;
5787 operands
[2] = GEN_INT (current_function_pretend_args_size
);
5788 output_add_immediate (operands
);
5790 /* And finally, go home */
5791 if (TARGET_INTERWORK
)
5792 asm_fprintf (f
, "\tbx\t%R%s\n", reg_names
[LR_REGNUM
]);
5793 else if (TARGET_APCS_32
)
5794 asm_fprintf (f
, "\tmov\t%R%s, %R%s\n", reg_names
[PC_REGNUM
], reg_names
[LR_REGNUM
]);
5796 asm_fprintf (f
, "\tmovs\t%R%s, %R%s\n", reg_names
[PC_REGNUM
], reg_names
[LR_REGNUM
]);
5802 /* Reset the ARM-specific per-function variables. */
5803 current_function_anonymous_args
= 0;
5804 after_arm_reorg
= 0;
5808 emit_multi_reg_push (mask
)
5815 for (i
= 0; i
< 16; i
++)
5816 if (mask
& (1 << i
))
5819 if (num_regs
== 0 || num_regs
> 16)
5822 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (num_regs
));
5824 for (i
= 0; i
< 16; i
++)
5826 if (mask
& (1 << i
))
5829 = gen_rtx_SET (VOIDmode
,
5830 gen_rtx_MEM (BLKmode
,
5831 gen_rtx_PRE_DEC (BLKmode
,
5832 stack_pointer_rtx
)),
5833 gen_rtx_UNSPEC (BLKmode
,
5835 gen_rtx_REG (SImode
, i
)),
5841 for (j
= 1, i
++; j
< num_regs
; i
++)
5843 if (mask
& (1 << i
))
5846 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, i
));
5855 emit_sfm (base_reg
, count
)
5862 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
));
5865 = gen_rtx_SET (VOIDmode
,
5866 gen_rtx_MEM (BLKmode
,
5867 gen_rtx_PRE_DEC (BLKmode
, stack_pointer_rtx
)),
5868 gen_rtx_UNSPEC (BLKmode
,
5869 gen_rtvec (1, gen_rtx_REG (XFmode
,
5872 for (i
= 1; i
< count
; i
++)
5873 XVECEXP (par
, 0, i
) = gen_rtx_USE (VOIDmode
,
5874 gen_rtx_REG (XFmode
, base_reg
++));
5880 arm_expand_prologue ()
5883 rtx amount
= GEN_INT (-(get_frame_size ()
5884 + current_function_outgoing_args_size
));
5885 int live_regs_mask
= 0;
5886 int store_arg_regs
= 0;
5887 int volatile_func
= (optimize
> 0
5888 && TREE_THIS_VOLATILE (current_function_decl
));
5890 /* Naked functions don't have prologues. */
5891 if (arm_naked_function_p (current_function_decl
))
5894 if (current_function_anonymous_args
&& current_function_pretend_args_size
)
5897 if (! volatile_func
)
5899 for (reg
= 0; reg
<= 10; reg
++)
5900 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5901 live_regs_mask
|= 1 << reg
;
5903 if (flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
])
5904 live_regs_mask
|= 1 << PIC_OFFSET_TABLE_REGNUM
;
5906 if (regs_ever_live
[LR_REGNUM
])
5907 live_regs_mask
|= 1 << LR_REGNUM
;
5910 if (frame_pointer_needed
)
5912 live_regs_mask
|= 0xD800;
5913 emit_insn (gen_movsi (gen_rtx_REG (SImode
, IP_REGNUM
),
5914 stack_pointer_rtx
));
5917 if (current_function_pretend_args_size
)
5920 emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size
/ 4))
5923 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
5924 GEN_INT (-current_function_pretend_args_size
)));
5929 /* If we have to push any regs, then we must push lr as well, or
5930 we won't get a proper return. */
5931 live_regs_mask
|= 1 << LR_REGNUM
;
5932 emit_multi_reg_push (live_regs_mask
);
5935 /* For now the integer regs are still pushed in output_func_epilogue (). */
5937 if (! volatile_func
)
5939 if (arm_fpu_arch
== FP_SOFT2
)
5941 for (reg
= 23; reg
> 15; reg
--)
5942 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5943 emit_insn (gen_rtx_SET
5945 gen_rtx_MEM (XFmode
,
5946 gen_rtx_PRE_DEC (XFmode
,
5947 stack_pointer_rtx
)),
5948 gen_rtx_REG (XFmode
, reg
)));
5954 for (reg
= 23; reg
> 15; reg
--)
5956 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
5958 if (start_reg
- reg
== 3)
5961 start_reg
= reg
- 1;
5966 if (start_reg
!= reg
)
5967 emit_sfm (reg
+ 1, start_reg
- reg
);
5968 start_reg
= reg
- 1;
5972 if (start_reg
!= reg
)
5973 emit_sfm (reg
+ 1, start_reg
- reg
);
5977 if (frame_pointer_needed
)
5978 emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, gen_rtx_REG (SImode
, IP_REGNUM
),
5980 (-(4 + current_function_pretend_args_size
)))));
5982 if (amount
!= const0_rtx
)
5984 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, amount
));
5985 emit_insn (gen_rtx_CLOBBER (VOIDmode
,
5986 gen_rtx_MEM (BLKmode
, stack_pointer_rtx
)));
5989 /* If we are profiling, make sure no instructions are scheduled before
5990 the call to mcount. Similarly if the user has requested no
5991 scheduling in the prolog. */
5992 if (profile_flag
|| profile_block_flag
|| TARGET_NO_SCHED_PRO
)
5993 emit_insn (gen_blockage ());
5997 /* If CODE is 'd', then the X is a condition operand and the instruction
5998 should only be executed if the condition is true.
5999 if CODE is 'D', then the X is a condition operand and the instruction
6000 should only be executed if the condition is false: however, if the mode
6001 of the comparison is CCFPEmode, then always execute the instruction -- we
6002 do this because in these circumstances !GE does not necessarily imply LT;
6003 in these cases the instruction pattern will take care to make sure that
6004 an instruction containing %d will follow, thereby undoing the effects of
6005 doing this instruction unconditionally.
6006 If CODE is 'N' then X is a floating point operand that must be negated
6008 If CODE is 'B' then output a bitwise inverted value of X (a const int).
6009 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
6012 arm_print_operand (stream
, x
, code
)
6020 fputs (ASM_COMMENT_START
, stream
);
6024 fputs (REGISTER_PREFIX
, stream
);
6028 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
6029 fputs (arm_condition_codes
[arm_current_cc
], stream
);
6035 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
6036 r
= REAL_VALUE_NEGATE (r
);
6037 fprintf (stream
, "%s", fp_const_from_val (&r
));
6042 if (GET_CODE (x
) == CONST_INT
)
6045 val
= ARM_SIGN_EXTEND (~ INTVAL (x
));
6046 fprintf (stream
, HOST_WIDE_INT_PRINT_DEC
, val
);
6051 output_addr_const (stream
, x
);
6056 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
6060 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
6066 char * shift
= shift_op (x
, & val
);
6070 fprintf (stream
, ", %s ", shift_op (x
, & val
));
6072 arm_print_operand (stream
, XEXP (x
, 1), 0);
6075 fputc ('#', stream
);
6076 fprintf (stream
, HOST_WIDE_INT_PRINT_DEC
, val
);
6085 fputs (REGISTER_PREFIX
, stream
);
6086 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0)], stream
);
6092 fputs (REGISTER_PREFIX
, stream
);
6093 fputs (reg_names
[REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1)], stream
);
6097 fputs (REGISTER_PREFIX
, stream
);
6098 if (GET_CODE (XEXP (x
, 0)) == REG
)
6099 fputs (reg_names
[REGNO (XEXP (x
, 0))], stream
);
6101 fputs (reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))], stream
);
6105 asm_fprintf (stream
, "{%R%s-%R%s}",
6106 reg_names
[REGNO (x
)],
6107 reg_names
[REGNO (x
) + NUM_INTS (GET_MODE (x
)) - 1]);
6112 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
6118 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
6119 (get_arm_condition_code (x
))],
6127 if (GET_CODE (x
) == REG
)
6129 fputs (REGISTER_PREFIX
, stream
);
6130 fputs (reg_names
[REGNO (x
)], stream
);
6132 else if (GET_CODE (x
) == MEM
)
6134 output_memory_reference_mode
= GET_MODE (x
);
6135 output_address (XEXP (x
, 0));
6137 else if (GET_CODE (x
) == CONST_DOUBLE
)
6138 fprintf (stream
, "#%s", fp_immediate_constant (x
));
6139 else if (GET_CODE (x
) == NEG
)
6140 abort (); /* This should never happen now. */
6143 fputc ('#', stream
);
6144 output_addr_const (stream
, x
);
6149 /* A finite state machine takes care of noticing whether or not instructions
6150 can be conditionally executed, and thus decrease execution time and code
6151 size by deleting branch instructions. The fsm is controlled by
6152 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
6154 /* The state of the fsm controlling condition codes are:
6155 0: normal, do nothing special
6156 1: make ASM_OUTPUT_OPCODE not output this instruction
6157 2: make ASM_OUTPUT_OPCODE not output this instruction
6158 3: make instructions conditional
6159 4: make instructions conditional
6161 State transitions (state->state by whom under condition):
6162 0 -> 1 final_prescan_insn if the `target' is a label
6163 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
6164 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
6165 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
6166 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
6167 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
6168 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
6169 (the target insn is arm_target_insn).
6171 If the jump clobbers the conditions then we use states 2 and 4.
6173 A similar thing can be done with conditional return insns.
6175 XXX In case the `target' is an unconditional branch, this conditionalising
6176 of the instructions always reduces code size, but not always execution
6177 time. But then, I want to reduce the code size to somewhere near what
6178 /bin/cc produces. */
6180 /* Returns the index of the ARM condition code string in
6181 `arm_condition_codes'. COMPARISON should be an rtx like
6182 `(eq (...) (...))'. */
6184 static enum arm_cond_code
6185 get_arm_condition_code (comparison
)
6188 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
6190 register enum rtx_code comp_code
= GET_CODE (comparison
);
6192 if (GET_MODE_CLASS (mode
) != MODE_CC
)
6193 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
6194 XEXP (comparison
, 1));
6198 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
6199 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
6200 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
6201 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
6202 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
6203 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
6204 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
6205 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
6206 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
6207 case CC_DLTUmode
: code
= ARM_CC
;
6210 if (comp_code
!= EQ
&& comp_code
!= NE
)
6213 if (comp_code
== EQ
)
6214 return ARM_INVERSE_CONDITION_CODE (code
);
6220 case NE
: return ARM_NE
;
6221 case EQ
: return ARM_EQ
;
6222 case GE
: return ARM_PL
;
6223 case LT
: return ARM_MI
;
6231 case NE
: return ARM_NE
;
6232 case EQ
: return ARM_EQ
;
6239 case GE
: return ARM_GE
;
6240 case GT
: return ARM_GT
;
6241 case LE
: return ARM_LS
;
6242 case LT
: return ARM_MI
;
6249 case NE
: return ARM_NE
;
6250 case EQ
: return ARM_EQ
;
6251 case GE
: return ARM_LE
;
6252 case GT
: return ARM_LT
;
6253 case LE
: return ARM_GE
;
6254 case LT
: return ARM_GT
;
6255 case GEU
: return ARM_LS
;
6256 case GTU
: return ARM_CC
;
6257 case LEU
: return ARM_CS
;
6258 case LTU
: return ARM_HI
;
6265 case LTU
: return ARM_CS
;
6266 case GEU
: return ARM_CC
;
6273 case NE
: return ARM_NE
;
6274 case EQ
: return ARM_EQ
;
6275 case GE
: return ARM_GE
;
6276 case GT
: return ARM_GT
;
6277 case LE
: return ARM_LE
;
6278 case LT
: return ARM_LT
;
6279 case GEU
: return ARM_CS
;
6280 case GTU
: return ARM_HI
;
6281 case LEU
: return ARM_LS
;
6282 case LTU
: return ARM_CC
;
6294 arm_final_prescan_insn (insn
)
6297 /* BODY will hold the body of INSN. */
6298 register rtx body
= PATTERN (insn
);
6300 /* This will be 1 if trying to repeat the trick, and things need to be
6301 reversed if it appears to fail. */
6304 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
6305 taken are clobbered, even if the rtl suggests otherwise. It also
6306 means that we have to grub around within the jump expression to find
6307 out what the conditions are when the jump isn't taken. */
6308 int jump_clobbers
= 0;
6310 /* If we start with a return insn, we only succeed if we find another one. */
6311 int seeking_return
= 0;
6313 /* START_INSN will hold the insn from where we start looking. This is the
6314 first insn after the following code_label if REVERSE is true. */
6315 rtx start_insn
= insn
;
6317 /* If in state 4, check if the target branch is reached, in order to
6318 change back to state 0. */
6319 if (arm_ccfsm_state
== 4)
6321 if (insn
== arm_target_insn
)
6323 arm_target_insn
= NULL
;
6324 arm_ccfsm_state
= 0;
6329 /* If in state 3, it is possible to repeat the trick, if this insn is an
6330 unconditional branch to a label, and immediately following this branch
6331 is the previous target label which is only used once, and the label this
6332 branch jumps to is not too far off. */
6333 if (arm_ccfsm_state
== 3)
6335 if (simplejump_p (insn
))
6337 start_insn
= next_nonnote_insn (start_insn
);
6338 if (GET_CODE (start_insn
) == BARRIER
)
6340 /* XXX Isn't this always a barrier? */
6341 start_insn
= next_nonnote_insn (start_insn
);
6343 if (GET_CODE (start_insn
) == CODE_LABEL
6344 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
6345 && LABEL_NUSES (start_insn
) == 1)
6350 else if (GET_CODE (body
) == RETURN
)
6352 start_insn
= next_nonnote_insn (start_insn
);
6353 if (GET_CODE (start_insn
) == BARRIER
)
6354 start_insn
= next_nonnote_insn (start_insn
);
6355 if (GET_CODE (start_insn
) == CODE_LABEL
6356 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
6357 && LABEL_NUSES (start_insn
) == 1)
6369 if (arm_ccfsm_state
!= 0 && !reverse
)
6371 if (GET_CODE (insn
) != JUMP_INSN
)
6374 /* This jump might be paralleled with a clobber of the condition codes
6375 the jump should always come first */
6376 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
6377 body
= XVECEXP (body
, 0, 0);
6380 /* If this is a conditional return then we don't want to know */
6381 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
6382 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
6383 && (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
6384 || GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
))
6389 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
6390 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
6393 int fail
= FALSE
, succeed
= FALSE
;
6394 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
6395 int then_not_else
= TRUE
;
6396 rtx this_insn
= start_insn
, label
= 0;
6398 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
6400 /* The code below is wrong for these, and I haven't time to
6401 fix it now. So we just do the safe thing and return. This
6402 whole function needs re-writing anyway. */
6407 /* Register the insn jumped to. */
6410 if (!seeking_return
)
6411 label
= XEXP (SET_SRC (body
), 0);
6413 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
6414 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
6415 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
6417 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
6418 then_not_else
= FALSE
;
6420 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
6422 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
6425 then_not_else
= FALSE
;
6430 /* See how many insns this branch skips, and what kind of insns. If all
6431 insns are okay, and the label or unconditional branch to the same
6432 label is not too far away, succeed. */
6433 for (insns_skipped
= 0;
6434 !fail
&& !succeed
&& insns_skipped
++ < max_insns_skipped
;)
6438 this_insn
= next_nonnote_insn (this_insn
);
6442 switch (GET_CODE (this_insn
))
6445 /* Succeed if it is the target label, otherwise fail since
6446 control falls in from somewhere else. */
6447 if (this_insn
== label
)
6451 arm_ccfsm_state
= 2;
6452 this_insn
= next_nonnote_insn (this_insn
);
6455 arm_ccfsm_state
= 1;
6463 /* Succeed if the following insn is the target label.
6465 If return insns are used then the last insn in a function
6466 will be a barrier. */
6467 this_insn
= next_nonnote_insn (this_insn
);
6468 if (this_insn
&& this_insn
== label
)
6472 arm_ccfsm_state
= 2;
6473 this_insn
= next_nonnote_insn (this_insn
);
6476 arm_ccfsm_state
= 1;
6484 /* If using 32-bit addresses the cc is not preserved over
6488 /* Succeed if the following insn is the target label,
6489 or if the following two insns are a barrier and
6490 the target label. */
6491 this_insn
= next_nonnote_insn (this_insn
);
6492 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
6493 this_insn
= next_nonnote_insn (this_insn
);
6495 if (this_insn
&& this_insn
== label
6496 && insns_skipped
< max_insns_skipped
)
6500 arm_ccfsm_state
= 2;
6501 this_insn
= next_nonnote_insn (this_insn
);
6504 arm_ccfsm_state
= 1;
6513 /* If this is an unconditional branch to the same label, succeed.
6514 If it is to another label, do nothing. If it is conditional,
6516 /* XXX Probably, the tests for SET and the PC are unnecessary. */
6518 scanbody
= PATTERN (this_insn
);
6519 if (GET_CODE (scanbody
) == SET
6520 && GET_CODE (SET_DEST (scanbody
)) == PC
)
6522 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
6523 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
6525 arm_ccfsm_state
= 2;
6528 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
6531 /* Fail if a conditional return is undesirable (eg on a
6532 StrongARM), but still allow this if optimizing for size. */
6533 else if (GET_CODE (scanbody
) == RETURN
6534 && ! use_return_insn (TRUE
)
6537 else if (GET_CODE (scanbody
) == RETURN
6540 arm_ccfsm_state
= 2;
6543 else if (GET_CODE (scanbody
) == PARALLEL
)
6545 switch (get_attr_conds (this_insn
))
6557 /* Instructions using or affecting the condition codes make it
6559 scanbody
= PATTERN (this_insn
);
6560 if (! (GET_CODE (scanbody
) == SET
6561 || GET_CODE (scanbody
) == PARALLEL
)
6562 || get_attr_conds (this_insn
) != CONDS_NOCOND
)
6572 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
6573 arm_target_label
= CODE_LABEL_NUMBER (label
);
6574 else if (seeking_return
|| arm_ccfsm_state
== 2)
6576 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
6578 this_insn
= next_nonnote_insn (this_insn
);
6579 if (this_insn
&& (GET_CODE (this_insn
) == BARRIER
6580 || GET_CODE (this_insn
) == CODE_LABEL
))
6585 /* Oh, dear! we ran off the end.. give up */
6586 recog (PATTERN (insn
), insn
, NULL_PTR
);
6587 arm_ccfsm_state
= 0;
6588 arm_target_insn
= NULL
;
6591 arm_target_insn
= this_insn
;
6600 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
6602 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
6603 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
6604 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
6605 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
6609 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
6612 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
6616 if (reverse
|| then_not_else
)
6617 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
6619 /* restore recog_operand (getting the attributes of other insns can
6620 destroy this array, but final.c assumes that it remains intact
6621 across this call; since the insn has been recognized already we
6622 call recog direct). */
6623 recog (PATTERN (insn
), insn
, NULL_PTR
);
6627 #ifdef AOF_ASSEMBLER
6628 /* Special functions only needed when producing AOF syntax assembler. */
6630 rtx aof_pic_label
= NULL_RTX
;
6633 struct pic_chain
* next
;
6637 static struct pic_chain
* aof_pic_chain
= NULL
;
6643 struct pic_chain
** chainp
;
6646 if (aof_pic_label
== NULL_RTX
)
6648 /* This needs to persist throughout the compilation. */
6649 end_temporary_allocation ();
6650 aof_pic_label
= gen_rtx_SYMBOL_REF (Pmode
, "x$adcons");
6651 resume_temporary_allocation ();
6654 for (offset
= 0, chainp
= &aof_pic_chain
; *chainp
;
6655 offset
+= 4, chainp
= &(*chainp
)->next
)
6656 if ((*chainp
)->symname
== XSTR (x
, 0))
6657 return plus_constant (aof_pic_label
, offset
);
6659 *chainp
= (struct pic_chain
*) xmalloc (sizeof (struct pic_chain
));
6660 (*chainp
)->next
= NULL
;
6661 (*chainp
)->symname
= XSTR (x
, 0);
6662 return plus_constant (aof_pic_label
, offset
);
6666 aof_dump_pic_table (f
)
6669 struct pic_chain
* chain
;
6671 if (aof_pic_chain
== NULL
)
6674 asm_fprintf (f
, "\tAREA |%R%s$$adcons|, BASED %R%s\n",
6675 reg_names
[PIC_OFFSET_TABLE_REGNUM
],
6676 reg_names
[PIC_OFFSET_TABLE_REGNUM
]);
6677 fputs ("|x$adcons|\n", f
);
6679 for (chain
= aof_pic_chain
; chain
; chain
= chain
->next
)
6681 fputs ("\tDCD\t", f
);
6682 assemble_name (f
, chain
->symname
);
6687 int arm_text_section_count
= 1;
6692 static char buf
[100];
6693 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
6694 arm_text_section_count
++);
6696 strcat (buf
, ", PIC, REENTRANT");
6700 static int arm_data_section_count
= 1;
6705 static char buf
[100];
6706 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
6710 /* The AOF assembler is religiously strict about declarations of
6711 imported and exported symbols, so that it is impossible to declare
6712 a function as imported near the beginning of the file, and then to
6713 export it later on. It is, however, possible to delay the decision
6714 until all the functions in the file have been compiled. To get
6715 around this, we maintain a list of the imports and exports, and
6716 delete from it any that are subsequently defined. At the end of
6717 compilation we spit the remainder of the list out before the END
6722 struct import
* next
;
6726 static struct import
* imports_list
= NULL
;
6729 aof_add_import (name
)
6732 struct import
* new;
6734 for (new = imports_list
; new; new = new->next
)
6735 if (new->name
== name
)
6738 new = (struct import
*) xmalloc (sizeof (struct import
));
6739 new->next
= imports_list
;
6745 aof_delete_import (name
)
6748 struct import
** old
;
6750 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
6752 if ((*old
)->name
== name
)
6754 *old
= (*old
)->next
;
6760 int arm_main_function
= 0;
6763 aof_dump_imports (f
)
6766 /* The AOF assembler needs this to cause the startup code to be extracted
6767 from the library. Brining in __main causes the whole thing to work
6769 if (arm_main_function
)
6772 fputs ("\tIMPORT __main\n", f
);
6773 fputs ("\tDCD __main\n", f
);
6776 /* Now dump the remaining imports. */
6777 while (imports_list
)
6779 fprintf (f
, "\tIMPORT\t");
6780 assemble_name (f
, imports_list
->name
);
6782 imports_list
= imports_list
->next
;
6785 #endif /* AOF_ASSEMBLER */