1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "hard-reg-set.h"
29 #include "insn-config.h"
30 #include "conditions.h"
32 #include "insn-attr.h"
34 #include "diagnostic-core.h"
39 #include "target-def.h"
43 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
44 #include "sched-int.h"
45 #include "insn-codes.h"
50 enum reg_class regno_reg_class
[] =
52 DATA_REGS
, DATA_REGS
, DATA_REGS
, DATA_REGS
,
53 DATA_REGS
, DATA_REGS
, DATA_REGS
, DATA_REGS
,
54 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
55 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
56 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
57 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
62 /* The minimum number of integer registers that we want to save with the
63 movem instruction. Using two movel instructions instead of a single
64 moveml is about 15% faster for the 68020 and 68030 at no expense in
66 #define MIN_MOVEM_REGS 3
68 /* The minimum number of floating point registers that we want to save
69 with the fmovem instruction. */
70 #define MIN_FMOVEM_REGS 1
72 /* Structure describing stack frame layout. */
75 /* Stack pointer to frame pointer offset. */
78 /* Offset of FPU registers. */
79 HOST_WIDE_INT foffset
;
81 /* Frame size in bytes (rounded up). */
84 /* Data and address register. */
86 unsigned int reg_mask
;
90 unsigned int fpu_mask
;
92 /* Offsets relative to ARG_POINTER. */
93 HOST_WIDE_INT frame_pointer_offset
;
94 HOST_WIDE_INT stack_pointer_offset
;
96 /* Function which the above information refers to. */
100 /* Current frame information calculated by m68k_compute_frame_layout(). */
101 static struct m68k_frame current_frame
;
103 /* Structure describing an m68k address.
105 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
106 with null fields evaluating to 0. Here:
108 - BASE satisfies m68k_legitimate_base_reg_p
109 - INDEX satisfies m68k_legitimate_index_reg_p
110 - OFFSET satisfies m68k_legitimate_constant_address_p
112 INDEX is either HImode or SImode. The other fields are SImode.
114 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
115 the address is (BASE)+. */
116 struct m68k_address
{
124 static int m68k_sched_adjust_cost (rtx
, rtx
, rtx
, int);
125 static int m68k_sched_issue_rate (void);
126 static int m68k_sched_variable_issue (FILE *, int, rtx
, int);
127 static void m68k_sched_md_init_global (FILE *, int, int);
128 static void m68k_sched_md_finish_global (FILE *, int);
129 static void m68k_sched_md_init (FILE *, int, int);
130 static void m68k_sched_dfa_pre_advance_cycle (void);
131 static void m68k_sched_dfa_post_advance_cycle (void);
132 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
134 static bool m68k_can_eliminate (const int, const int);
135 static void m68k_conditional_register_usage (void);
136 static bool m68k_legitimate_address_p (enum machine_mode
, rtx
, bool);
137 static void m68k_option_override (void);
138 static void m68k_override_options_after_change (void);
139 static rtx
find_addr_reg (rtx
);
140 static const char *singlemove_string (rtx
*);
141 static void m68k_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
142 HOST_WIDE_INT
, tree
);
143 static rtx
m68k_struct_value_rtx (tree
, int);
144 static tree
m68k_handle_fndecl_attribute (tree
*node
, tree name
,
145 tree args
, int flags
,
147 static void m68k_compute_frame_layout (void);
148 static bool m68k_save_reg (unsigned int regno
, bool interrupt_handler
);
149 static bool m68k_ok_for_sibcall_p (tree
, tree
);
150 static bool m68k_tls_symbol_p (rtx
);
151 static rtx
m68k_legitimize_address (rtx
, rtx
, enum machine_mode
);
152 static bool m68k_rtx_costs (rtx
, int, int, int, int *, bool);
153 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
154 static bool m68k_return_in_memory (const_tree
, const_tree
);
156 static void m68k_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
157 static void m68k_trampoline_init (rtx
, tree
, rtx
);
158 static int m68k_return_pops_args (tree
, tree
, int);
159 static rtx
m68k_delegitimize_address (rtx
);
160 static void m68k_function_arg_advance (cumulative_args_t
, enum machine_mode
,
162 static rtx
m68k_function_arg (cumulative_args_t
, enum machine_mode
,
164 static bool m68k_cannot_force_const_mem (enum machine_mode mode
, rtx x
);
165 static bool m68k_output_addr_const_extra (FILE *, rtx
);
166 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED
;
168 /* Initialize the GCC target structure. */
170 #if INT_OP_GROUP == INT_OP_DOT_WORD
171 #undef TARGET_ASM_ALIGNED_HI_OP
172 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
175 #if INT_OP_GROUP == INT_OP_NO_DOT
176 #undef TARGET_ASM_BYTE_OP
177 #define TARGET_ASM_BYTE_OP "\tbyte\t"
178 #undef TARGET_ASM_ALIGNED_HI_OP
179 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
180 #undef TARGET_ASM_ALIGNED_SI_OP
181 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
184 #if INT_OP_GROUP == INT_OP_DC
185 #undef TARGET_ASM_BYTE_OP
186 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
187 #undef TARGET_ASM_ALIGNED_HI_OP
188 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
189 #undef TARGET_ASM_ALIGNED_SI_OP
190 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
193 #undef TARGET_ASM_UNALIGNED_HI_OP
194 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
195 #undef TARGET_ASM_UNALIGNED_SI_OP
196 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
198 #undef TARGET_ASM_OUTPUT_MI_THUNK
199 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
200 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
201 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
203 #undef TARGET_ASM_FILE_START_APP_OFF
204 #define TARGET_ASM_FILE_START_APP_OFF true
206 #undef TARGET_LEGITIMIZE_ADDRESS
207 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
209 #undef TARGET_SCHED_ADJUST_COST
210 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
212 #undef TARGET_SCHED_ISSUE_RATE
213 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
215 #undef TARGET_SCHED_VARIABLE_ISSUE
216 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
218 #undef TARGET_SCHED_INIT_GLOBAL
219 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
221 #undef TARGET_SCHED_FINISH_GLOBAL
222 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
224 #undef TARGET_SCHED_INIT
225 #define TARGET_SCHED_INIT m68k_sched_md_init
227 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
228 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
230 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
231 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
233 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
234 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
235 m68k_sched_first_cycle_multipass_dfa_lookahead
237 #undef TARGET_OPTION_OVERRIDE
238 #define TARGET_OPTION_OVERRIDE m68k_option_override
240 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
241 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
243 #undef TARGET_RTX_COSTS
244 #define TARGET_RTX_COSTS m68k_rtx_costs
246 #undef TARGET_ATTRIBUTE_TABLE
247 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
249 #undef TARGET_PROMOTE_PROTOTYPES
250 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
252 #undef TARGET_STRUCT_VALUE_RTX
253 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
255 #undef TARGET_CANNOT_FORCE_CONST_MEM
256 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
258 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
259 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
261 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
262 #undef TARGET_RETURN_IN_MEMORY
263 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
267 #undef TARGET_HAVE_TLS
268 #define TARGET_HAVE_TLS (true)
270 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
271 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
274 #undef TARGET_LEGITIMATE_ADDRESS_P
275 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
277 #undef TARGET_CAN_ELIMINATE
278 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
280 #undef TARGET_CONDITIONAL_REGISTER_USAGE
281 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
283 #undef TARGET_TRAMPOLINE_INIT
284 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
286 #undef TARGET_RETURN_POPS_ARGS
287 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
289 #undef TARGET_DELEGITIMIZE_ADDRESS
290 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
292 #undef TARGET_FUNCTION_ARG
293 #define TARGET_FUNCTION_ARG m68k_function_arg
295 #undef TARGET_FUNCTION_ARG_ADVANCE
296 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
298 #undef TARGET_LEGITIMATE_CONSTANT_P
299 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
301 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
302 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
304 /* The value stored by TAS. */
305 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
306 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
308 static const struct attribute_spec m68k_attribute_table
[] =
310 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
311 affects_type_identity } */
312 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute
,
314 { "interrupt_handler", 0, 0, true, false, false,
315 m68k_handle_fndecl_attribute
, false },
316 { "interrupt_thread", 0, 0, true, false, false,
317 m68k_handle_fndecl_attribute
, false },
318 { NULL
, 0, 0, false, false, false, NULL
, false }
321 struct gcc_target targetm
= TARGET_INITIALIZER
;
323 /* Base flags for 68k ISAs. */
324 #define FL_FOR_isa_00 FL_ISA_68000
325 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
326 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
327 generated 68881 code for 68020 and 68030 targets unless explicitly told
329 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
330 | FL_BITFIELD | FL_68881 | FL_CAS)
331 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
332 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
334 /* Base flags for ColdFire ISAs. */
335 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
336 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
337 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
338 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
339 /* ISA_C is not upwardly compatible with ISA_B. */
340 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
344 /* Traditional 68000 instruction sets. */
350 /* ColdFire instruction set variants. */
358 /* Information about one of the -march, -mcpu or -mtune arguments. */
359 struct m68k_target_selection
361 /* The argument being described. */
364 /* For -mcpu, this is the device selected by the option.
365 For -mtune and -march, it is a representative device
366 for the microarchitecture or ISA respectively. */
367 enum target_device device
;
369 /* The M68K_DEVICE fields associated with DEVICE. See the comment
370 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
372 enum uarch_type microarch
;
377 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
378 static const struct m68k_target_selection all_devices
[] =
380 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
381 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
382 #include "m68k-devices.def"
384 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
387 /* A list of all ISAs, mapping each one to a representative device.
388 Used for -march selection. */
389 static const struct m68k_target_selection all_isas
[] =
391 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
392 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
393 #include "m68k-isas.def"
395 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
398 /* A list of all microarchitectures, mapping each one to a representative
399 device. Used for -mtune selection. */
400 static const struct m68k_target_selection all_microarchs
[] =
402 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
403 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
404 #include "m68k-microarchs.def"
405 #undef M68K_MICROARCH
406 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
409 /* The entries associated with the -mcpu, -march and -mtune settings,
410 or null for options that have not been used. */
411 const struct m68k_target_selection
*m68k_cpu_entry
;
412 const struct m68k_target_selection
*m68k_arch_entry
;
413 const struct m68k_target_selection
*m68k_tune_entry
;
415 /* Which CPU we are generating code for. */
416 enum target_device m68k_cpu
;
418 /* Which microarchitecture to tune for. */
419 enum uarch_type m68k_tune
;
421 /* Which FPU to use. */
422 enum fpu_type m68k_fpu
;
424 /* The set of FL_* flags that apply to the target processor. */
425 unsigned int m68k_cpu_flags
;
427 /* The set of FL_* flags that apply to the processor to be tuned for. */
428 unsigned int m68k_tune_flags
;
430 /* Asm templates for calling or jumping to an arbitrary symbolic address,
431 or NULL if such calls or jumps are not supported. The address is held
433 const char *m68k_symbolic_call
;
434 const char *m68k_symbolic_jump
;
436 /* Enum variable that corresponds to m68k_symbolic_call values. */
437 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var
;
440 /* Implement TARGET_OPTION_OVERRIDE. */
443 m68k_option_override (void)
445 const struct m68k_target_selection
*entry
;
446 unsigned long target_mask
;
448 if (global_options_set
.x_m68k_arch_option
)
449 m68k_arch_entry
= &all_isas
[m68k_arch_option
];
451 if (global_options_set
.x_m68k_cpu_option
)
452 m68k_cpu_entry
= &all_devices
[(int) m68k_cpu_option
];
454 if (global_options_set
.x_m68k_tune_option
)
455 m68k_tune_entry
= &all_microarchs
[(int) m68k_tune_option
];
463 -march=ARCH should generate code that runs any processor
464 implementing architecture ARCH. -mcpu=CPU should override -march
465 and should generate code that runs on processor CPU, making free
466 use of any instructions that CPU understands. -mtune=UARCH applies
467 on top of -mcpu or -march and optimizes the code for UARCH. It does
468 not change the target architecture. */
471 /* Complain if the -march setting is for a different microarchitecture,
472 or includes flags that the -mcpu setting doesn't. */
474 && (m68k_arch_entry
->microarch
!= m68k_cpu_entry
->microarch
475 || (m68k_arch_entry
->flags
& ~m68k_cpu_entry
->flags
) != 0))
476 warning (0, "-mcpu=%s conflicts with -march=%s",
477 m68k_cpu_entry
->name
, m68k_arch_entry
->name
);
479 entry
= m68k_cpu_entry
;
482 entry
= m68k_arch_entry
;
485 entry
= all_devices
+ TARGET_CPU_DEFAULT
;
487 m68k_cpu_flags
= entry
->flags
;
489 /* Use the architecture setting to derive default values for
493 /* ColdFire is lenient about alignment. */
494 if (!TARGET_COLDFIRE
)
495 target_mask
|= MASK_STRICT_ALIGNMENT
;
497 if ((m68k_cpu_flags
& FL_BITFIELD
) != 0)
498 target_mask
|= MASK_BITFIELD
;
499 if ((m68k_cpu_flags
& FL_CF_HWDIV
) != 0)
500 target_mask
|= MASK_CF_HWDIV
;
501 if ((m68k_cpu_flags
& (FL_68881
| FL_CF_FPU
)) != 0)
502 target_mask
|= MASK_HARD_FLOAT
;
503 target_flags
|= target_mask
& ~target_flags_explicit
;
505 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
506 m68k_cpu
= entry
->device
;
509 m68k_tune
= m68k_tune_entry
->microarch
;
510 m68k_tune_flags
= m68k_tune_entry
->flags
;
512 #ifdef M68K_DEFAULT_TUNE
513 else if (!m68k_cpu_entry
&& !m68k_arch_entry
)
515 enum target_device dev
;
516 dev
= all_microarchs
[M68K_DEFAULT_TUNE
].device
;
517 m68k_tune_flags
= all_devices
[dev
]->flags
;
522 m68k_tune
= entry
->microarch
;
523 m68k_tune_flags
= entry
->flags
;
526 /* Set the type of FPU. */
527 m68k_fpu
= (!TARGET_HARD_FLOAT
? FPUTYPE_NONE
528 : (m68k_cpu_flags
& FL_COLDFIRE
) != 0 ? FPUTYPE_COLDFIRE
531 /* Sanity check to ensure that msep-data and mid-sahred-library are not
532 * both specified together. Doing so simply doesn't make sense.
534 if (TARGET_SEP_DATA
&& TARGET_ID_SHARED_LIBRARY
)
535 error ("cannot specify both -msep-data and -mid-shared-library");
537 /* If we're generating code for a separate A5 relative data segment,
538 * we've got to enable -fPIC as well. This might be relaxable to
539 * -fpic but it hasn't been tested properly.
541 if (TARGET_SEP_DATA
|| TARGET_ID_SHARED_LIBRARY
)
544 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
545 error if the target does not support them. */
546 if (TARGET_PCREL
&& !TARGET_68020
&& flag_pic
== 2)
547 error ("-mpcrel -fPIC is not currently supported on selected cpu");
549 /* ??? A historic way of turning on pic, or is this intended to
550 be an embedded thing that doesn't have the same name binding
551 significance that it does on hosted ELF systems? */
552 if (TARGET_PCREL
&& flag_pic
== 0)
557 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_JSR
;
559 m68k_symbolic_jump
= "jra %a0";
561 else if (TARGET_ID_SHARED_LIBRARY
)
562 /* All addresses must be loaded from the GOT. */
564 else if (TARGET_68020
|| TARGET_ISAB
|| TARGET_ISAC
)
567 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_BSR_C
;
569 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_BSR_P
;
572 /* No unconditional long branch */;
573 else if (TARGET_PCREL
)
574 m68k_symbolic_jump
= "bra%.l %c0";
576 m68k_symbolic_jump
= "bra%.l %p0";
577 /* Turn off function cse if we are doing PIC. We always want
578 function call to be done as `bsr foo@PLTPC'. */
579 /* ??? It's traditional to do this for -mpcrel too, but it isn't
580 clear how intentional that is. */
581 flag_no_function_cse
= 1;
584 switch (m68k_symbolic_call_var
)
586 case M68K_SYMBOLIC_CALL_JSR
:
587 m68k_symbolic_call
= "jsr %a0";
590 case M68K_SYMBOLIC_CALL_BSR_C
:
591 m68k_symbolic_call
= "bsr%.l %c0";
594 case M68K_SYMBOLIC_CALL_BSR_P
:
595 m68k_symbolic_call
= "bsr%.l %p0";
598 case M68K_SYMBOLIC_CALL_NONE
:
599 gcc_assert (m68k_symbolic_call
== NULL
);
606 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
607 if (align_labels
> 2)
609 warning (0, "-falign-labels=%d is not supported", align_labels
);
614 warning (0, "-falign-loops=%d is not supported", align_loops
);
619 if (stack_limit_rtx
!= NULL_RTX
&& !TARGET_68020
)
621 warning (0, "-fstack-limit- options are not supported on this cpu");
622 stack_limit_rtx
= NULL_RTX
;
625 SUBTARGET_OVERRIDE_OPTIONS
;
627 /* Setup scheduling options. */
629 m68k_sched_cpu
= CPU_CFV1
;
631 m68k_sched_cpu
= CPU_CFV2
;
633 m68k_sched_cpu
= CPU_CFV3
;
635 m68k_sched_cpu
= CPU_CFV4
;
638 m68k_sched_cpu
= CPU_UNKNOWN
;
639 flag_schedule_insns
= 0;
640 flag_schedule_insns_after_reload
= 0;
641 flag_modulo_sched
= 0;
644 if (m68k_sched_cpu
!= CPU_UNKNOWN
)
646 if ((m68k_cpu_flags
& (FL_CF_EMAC
| FL_CF_EMAC_B
)) != 0)
647 m68k_sched_mac
= MAC_CF_EMAC
;
648 else if ((m68k_cpu_flags
& FL_CF_MAC
) != 0)
649 m68k_sched_mac
= MAC_CF_MAC
;
651 m68k_sched_mac
= MAC_NO
;
655 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
658 m68k_override_options_after_change (void)
660 if (m68k_sched_cpu
== CPU_UNKNOWN
)
662 flag_schedule_insns
= 0;
663 flag_schedule_insns_after_reload
= 0;
664 flag_modulo_sched
= 0;
668 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
669 given argument and NAME is the argument passed to -mcpu. Return NULL
670 if -mcpu was not passed. */
673 m68k_cpp_cpu_ident (const char *prefix
)
677 return concat ("__m", prefix
, "_cpu_", m68k_cpu_entry
->name
, NULL
);
680 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
681 given argument and NAME is the name of the representative device for
682 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
685 m68k_cpp_cpu_family (const char *prefix
)
689 return concat ("__m", prefix
, "_family_", m68k_cpu_entry
->family
, NULL
);
692 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
693 "interrupt_handler" attribute and interrupt_thread if FUNC has an
694 "interrupt_thread" attribute. Otherwise, return
695 m68k_fk_normal_function. */
697 enum m68k_function_kind
698 m68k_get_function_kind (tree func
)
702 gcc_assert (TREE_CODE (func
) == FUNCTION_DECL
);
704 a
= lookup_attribute ("interrupt", DECL_ATTRIBUTES (func
));
706 return m68k_fk_interrupt_handler
;
708 a
= lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func
));
710 return m68k_fk_interrupt_handler
;
712 a
= lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func
));
714 return m68k_fk_interrupt_thread
;
716 return m68k_fk_normal_function
;
719 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
720 struct attribute_spec.handler. */
722 m68k_handle_fndecl_attribute (tree
*node
, tree name
,
723 tree args ATTRIBUTE_UNUSED
,
724 int flags ATTRIBUTE_UNUSED
,
727 if (TREE_CODE (*node
) != FUNCTION_DECL
)
729 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
731 *no_add_attrs
= true;
734 if (m68k_get_function_kind (*node
) != m68k_fk_normal_function
)
736 error ("multiple interrupt attributes not allowed");
737 *no_add_attrs
= true;
741 && !strcmp (IDENTIFIER_POINTER (name
), "interrupt_thread"))
743 error ("interrupt_thread is available only on fido");
744 *no_add_attrs
= true;
751 m68k_compute_frame_layout (void)
755 enum m68k_function_kind func_kind
=
756 m68k_get_function_kind (current_function_decl
);
757 bool interrupt_handler
= func_kind
== m68k_fk_interrupt_handler
;
758 bool interrupt_thread
= func_kind
== m68k_fk_interrupt_thread
;
760 /* Only compute the frame once per function.
761 Don't cache information until reload has been completed. */
762 if (current_frame
.funcdef_no
== current_function_funcdef_no
766 current_frame
.size
= (get_frame_size () + 3) & -4;
770 /* Interrupt thread does not need to save any register. */
771 if (!interrupt_thread
)
772 for (regno
= 0; regno
< 16; regno
++)
773 if (m68k_save_reg (regno
, interrupt_handler
))
775 mask
|= 1 << (regno
- D0_REG
);
778 current_frame
.offset
= saved
* 4;
779 current_frame
.reg_no
= saved
;
780 current_frame
.reg_mask
= mask
;
782 current_frame
.foffset
= 0;
784 if (TARGET_HARD_FLOAT
)
786 /* Interrupt thread does not need to save any register. */
787 if (!interrupt_thread
)
788 for (regno
= 16; regno
< 24; regno
++)
789 if (m68k_save_reg (regno
, interrupt_handler
))
791 mask
|= 1 << (regno
- FP0_REG
);
794 current_frame
.foffset
= saved
* TARGET_FP_REG_SIZE
;
795 current_frame
.offset
+= current_frame
.foffset
;
797 current_frame
.fpu_no
= saved
;
798 current_frame
.fpu_mask
= mask
;
800 /* Remember what function this frame refers to. */
801 current_frame
.funcdef_no
= current_function_funcdef_no
;
804 /* Worker function for TARGET_CAN_ELIMINATE. */
807 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
809 return (to
== STACK_POINTER_REGNUM
? ! frame_pointer_needed
: true);
813 m68k_initial_elimination_offset (int from
, int to
)
816 /* The arg pointer points 8 bytes before the start of the arguments,
817 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
818 frame pointer in most frames. */
819 argptr_offset
= frame_pointer_needed
? 0 : UNITS_PER_WORD
;
820 if (from
== ARG_POINTER_REGNUM
&& to
== FRAME_POINTER_REGNUM
)
821 return argptr_offset
;
823 m68k_compute_frame_layout ();
825 gcc_assert (to
== STACK_POINTER_REGNUM
);
828 case ARG_POINTER_REGNUM
:
829 return current_frame
.offset
+ current_frame
.size
- argptr_offset
;
830 case FRAME_POINTER_REGNUM
:
831 return current_frame
.offset
+ current_frame
.size
;
837 /* Refer to the array `regs_ever_live' to determine which registers
838 to save; `regs_ever_live[I]' is nonzero if register number I
839 is ever used in the function. This function is responsible for
840 knowing which registers should not be saved even if used.
841 Return true if we need to save REGNO. */
844 m68k_save_reg (unsigned int regno
, bool interrupt_handler
)
846 if (flag_pic
&& regno
== PIC_REG
)
848 if (crtl
->saves_all_registers
)
850 if (crtl
->uses_pic_offset_table
)
852 /* Reload may introduce constant pool references into a function
853 that thitherto didn't need a PIC register. Note that the test
854 above will not catch that case because we will only set
855 crtl->uses_pic_offset_table when emitting
856 the address reloads. */
857 if (crtl
->uses_const_pool
)
861 if (crtl
->calls_eh_return
)
866 unsigned int test
= EH_RETURN_DATA_REGNO (i
);
867 if (test
== INVALID_REGNUM
)
874 /* Fixed regs we never touch. */
875 if (fixed_regs
[regno
])
878 /* The frame pointer (if it is such) is handled specially. */
879 if (regno
== FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
882 /* Interrupt handlers must also save call_used_regs
883 if they are live or when calling nested functions. */
884 if (interrupt_handler
)
886 if (df_regs_ever_live_p (regno
))
889 if (!crtl
->is_leaf
&& call_used_regs
[regno
])
893 /* Never need to save registers that aren't touched. */
894 if (!df_regs_ever_live_p (regno
))
897 /* Otherwise save everything that isn't call-clobbered. */
898 return !call_used_regs
[regno
];
901 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
902 the lowest memory address. COUNT is the number of registers to be
903 moved, with register REGNO + I being moved if bit I of MASK is set.
904 STORE_P specifies the direction of the move and ADJUST_STACK_P says
905 whether or not this is pre-decrement (if STORE_P) or post-increment
906 (if !STORE_P) operation. */
909 m68k_emit_movem (rtx base
, HOST_WIDE_INT offset
,
910 unsigned int count
, unsigned int regno
,
911 unsigned int mask
, bool store_p
, bool adjust_stack_p
)
914 rtx body
, addr
, src
, operands
[2];
915 enum machine_mode mode
;
917 body
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (adjust_stack_p
+ count
));
918 mode
= reg_raw_mode
[regno
];
923 src
= plus_constant (Pmode
, base
,
925 * GET_MODE_SIZE (mode
)
926 * (HOST_WIDE_INT
) (store_p
? -1 : 1)));
927 XVECEXP (body
, 0, i
++) = gen_rtx_SET (VOIDmode
, base
, src
);
930 for (; mask
!= 0; mask
>>= 1, regno
++)
933 addr
= plus_constant (Pmode
, base
, offset
);
934 operands
[!store_p
] = gen_frame_mem (mode
, addr
);
935 operands
[store_p
] = gen_rtx_REG (mode
, regno
);
936 XVECEXP (body
, 0, i
++)
937 = gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]);
938 offset
+= GET_MODE_SIZE (mode
);
940 gcc_assert (i
== XVECLEN (body
, 0));
942 return emit_insn (body
);
945 /* Make INSN a frame-related instruction. */
948 m68k_set_frame_related (rtx insn
)
953 RTX_FRAME_RELATED_P (insn
) = 1;
954 body
= PATTERN (insn
);
955 if (GET_CODE (body
) == PARALLEL
)
956 for (i
= 0; i
< XVECLEN (body
, 0); i
++)
957 RTX_FRAME_RELATED_P (XVECEXP (body
, 0, i
)) = 1;
960 /* Emit RTL for the "prologue" define_expand. */
963 m68k_expand_prologue (void)
965 HOST_WIDE_INT fsize_with_regs
;
966 rtx limit
, src
, dest
;
968 m68k_compute_frame_layout ();
970 if (flag_stack_usage_info
)
971 current_function_static_stack_size
972 = current_frame
.size
+ current_frame
.offset
;
974 /* If the stack limit is a symbol, we can check it here,
975 before actually allocating the space. */
976 if (crtl
->limit_stack
977 && GET_CODE (stack_limit_rtx
) == SYMBOL_REF
)
979 limit
= plus_constant (Pmode
, stack_limit_rtx
, current_frame
.size
+ 4);
980 if (!m68k_legitimate_constant_p (Pmode
, limit
))
982 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
), limit
);
983 limit
= gen_rtx_REG (Pmode
, D0_REG
);
985 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode
,
986 stack_pointer_rtx
, limit
),
987 stack_pointer_rtx
, limit
,
991 fsize_with_regs
= current_frame
.size
;
994 /* ColdFire's move multiple instructions do not allow pre-decrement
995 addressing. Add the size of movem saves to the initial stack
996 allocation instead. */
997 if (current_frame
.reg_no
>= MIN_MOVEM_REGS
)
998 fsize_with_regs
+= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
999 if (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
)
1000 fsize_with_regs
+= current_frame
.fpu_no
* GET_MODE_SIZE (DFmode
);
1003 if (frame_pointer_needed
)
1005 if (fsize_with_regs
== 0 && TUNE_68040
)
1007 /* On the 68040, two separate moves are faster than link.w 0. */
1008 dest
= gen_frame_mem (Pmode
,
1009 gen_rtx_PRE_DEC (Pmode
, stack_pointer_rtx
));
1010 m68k_set_frame_related (emit_move_insn (dest
, frame_pointer_rtx
));
1011 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx
,
1012 stack_pointer_rtx
));
1014 else if (fsize_with_regs
< 0x8000 || TARGET_68020
)
1015 m68k_set_frame_related
1016 (emit_insn (gen_link (frame_pointer_rtx
,
1017 GEN_INT (-4 - fsize_with_regs
))));
1020 m68k_set_frame_related
1021 (emit_insn (gen_link (frame_pointer_rtx
, GEN_INT (-4))));
1022 m68k_set_frame_related
1023 (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1025 GEN_INT (-fsize_with_regs
))));
1028 /* If the frame pointer is needed, emit a special barrier that
1029 will prevent the scheduler from moving stores to the frame
1030 before the stack adjustment. */
1031 emit_insn (gen_stack_tie (stack_pointer_rtx
, frame_pointer_rtx
));
1033 else if (fsize_with_regs
!= 0)
1034 m68k_set_frame_related
1035 (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1037 GEN_INT (-fsize_with_regs
))));
1039 if (current_frame
.fpu_mask
)
1041 gcc_assert (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
);
1043 m68k_set_frame_related
1044 (m68k_emit_movem (stack_pointer_rtx
,
1045 current_frame
.fpu_no
* -GET_MODE_SIZE (XFmode
),
1046 current_frame
.fpu_no
, FP0_REG
,
1047 current_frame
.fpu_mask
, true, true));
1052 /* If we're using moveml to save the integer registers,
1053 the stack pointer will point to the bottom of the moveml
1054 save area. Find the stack offset of the first FP register. */
1055 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1058 offset
= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1059 m68k_set_frame_related
1060 (m68k_emit_movem (stack_pointer_rtx
, offset
,
1061 current_frame
.fpu_no
, FP0_REG
,
1062 current_frame
.fpu_mask
, true, false));
1066 /* If the stack limit is not a symbol, check it here.
1067 This has the disadvantage that it may be too late... */
1068 if (crtl
->limit_stack
)
1070 if (REG_P (stack_limit_rtx
))
1071 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode
, stack_pointer_rtx
,
1073 stack_pointer_rtx
, stack_limit_rtx
,
1076 else if (GET_CODE (stack_limit_rtx
) != SYMBOL_REF
)
1077 warning (0, "stack limit expression is not supported");
1080 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1082 /* Store each register separately in the same order moveml does. */
1085 for (i
= 16; i
-- > 0; )
1086 if (current_frame
.reg_mask
& (1 << i
))
1088 src
= gen_rtx_REG (SImode
, D0_REG
+ i
);
1089 dest
= gen_frame_mem (SImode
,
1090 gen_rtx_PRE_DEC (Pmode
, stack_pointer_rtx
));
1091 m68k_set_frame_related (emit_insn (gen_movsi (dest
, src
)));
1096 if (TARGET_COLDFIRE
)
1097 /* The required register save space has already been allocated.
1098 The first register should be stored at (%sp). */
1099 m68k_set_frame_related
1100 (m68k_emit_movem (stack_pointer_rtx
, 0,
1101 current_frame
.reg_no
, D0_REG
,
1102 current_frame
.reg_mask
, true, false));
1104 m68k_set_frame_related
1105 (m68k_emit_movem (stack_pointer_rtx
,
1106 current_frame
.reg_no
* -GET_MODE_SIZE (SImode
),
1107 current_frame
.reg_no
, D0_REG
,
1108 current_frame
.reg_mask
, true, true));
1111 if (!TARGET_SEP_DATA
1112 && crtl
->uses_pic_offset_table
)
1113 emit_insn (gen_load_got (pic_offset_table_rtx
));
1116 /* Return true if a simple (return) instruction is sufficient for this
1117 instruction (i.e. if no epilogue is needed). */
1120 m68k_use_return_insn (void)
1122 if (!reload_completed
|| frame_pointer_needed
|| get_frame_size () != 0)
1125 m68k_compute_frame_layout ();
1126 return current_frame
.offset
== 0;
1129 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1130 SIBCALL_P says which.
1132 The function epilogue should not depend on the current stack pointer!
1133 It should use the frame pointer only, if there is a frame pointer.
1134 This is mandatory because of alloca; we also take advantage of it to
1135 omit stack adjustments before returning. */
1138 m68k_expand_epilogue (bool sibcall_p
)
1140 HOST_WIDE_INT fsize
, fsize_with_regs
;
1141 bool big
, restore_from_sp
;
1143 m68k_compute_frame_layout ();
1145 fsize
= current_frame
.size
;
1147 restore_from_sp
= false;
1149 /* FIXME : crtl->is_leaf below is too strong.
1150 What we really need to know there is if there could be pending
1151 stack adjustment needed at that point. */
1152 restore_from_sp
= (!frame_pointer_needed
1153 || (!cfun
->calls_alloca
&& crtl
->is_leaf
));
1155 /* fsize_with_regs is the size we need to adjust the sp when
1156 popping the frame. */
1157 fsize_with_regs
= fsize
;
1158 if (TARGET_COLDFIRE
&& restore_from_sp
)
1160 /* ColdFire's move multiple instructions do not allow post-increment
1161 addressing. Add the size of movem loads to the final deallocation
1163 if (current_frame
.reg_no
>= MIN_MOVEM_REGS
)
1164 fsize_with_regs
+= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1165 if (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
)
1166 fsize_with_regs
+= current_frame
.fpu_no
* GET_MODE_SIZE (DFmode
);
1169 if (current_frame
.offset
+ fsize
>= 0x8000
1171 && (current_frame
.reg_mask
|| current_frame
.fpu_mask
))
1174 && (current_frame
.reg_no
>= MIN_MOVEM_REGS
1175 || current_frame
.fpu_no
>= MIN_FMOVEM_REGS
))
1177 /* ColdFire's move multiple instructions do not support the
1178 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1179 stack-based restore. */
1180 emit_move_insn (gen_rtx_REG (Pmode
, A1_REG
),
1181 GEN_INT (-(current_frame
.offset
+ fsize
)));
1182 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1183 gen_rtx_REG (Pmode
, A1_REG
),
1184 frame_pointer_rtx
));
1185 restore_from_sp
= true;
1189 emit_move_insn (gen_rtx_REG (Pmode
, A1_REG
), GEN_INT (-fsize
));
1195 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1197 /* Restore each register separately in the same order moveml does. */
1199 HOST_WIDE_INT offset
;
1201 offset
= current_frame
.offset
+ fsize
;
1202 for (i
= 0; i
< 16; i
++)
1203 if (current_frame
.reg_mask
& (1 << i
))
1209 /* Generate the address -OFFSET(%fp,%a1.l). */
1210 addr
= gen_rtx_REG (Pmode
, A1_REG
);
1211 addr
= gen_rtx_PLUS (Pmode
, addr
, frame_pointer_rtx
);
1212 addr
= plus_constant (Pmode
, addr
, -offset
);
1214 else if (restore_from_sp
)
1215 addr
= gen_rtx_POST_INC (Pmode
, stack_pointer_rtx
);
1217 addr
= plus_constant (Pmode
, frame_pointer_rtx
, -offset
);
1218 emit_move_insn (gen_rtx_REG (SImode
, D0_REG
+ i
),
1219 gen_frame_mem (SImode
, addr
));
1220 offset
-= GET_MODE_SIZE (SImode
);
1223 else if (current_frame
.reg_mask
)
1226 m68k_emit_movem (gen_rtx_PLUS (Pmode
,
1227 gen_rtx_REG (Pmode
, A1_REG
),
1229 -(current_frame
.offset
+ fsize
),
1230 current_frame
.reg_no
, D0_REG
,
1231 current_frame
.reg_mask
, false, false);
1232 else if (restore_from_sp
)
1233 m68k_emit_movem (stack_pointer_rtx
, 0,
1234 current_frame
.reg_no
, D0_REG
,
1235 current_frame
.reg_mask
, false,
1238 m68k_emit_movem (frame_pointer_rtx
,
1239 -(current_frame
.offset
+ fsize
),
1240 current_frame
.reg_no
, D0_REG
,
1241 current_frame
.reg_mask
, false, false);
1244 if (current_frame
.fpu_no
> 0)
1247 m68k_emit_movem (gen_rtx_PLUS (Pmode
,
1248 gen_rtx_REG (Pmode
, A1_REG
),
1250 -(current_frame
.foffset
+ fsize
),
1251 current_frame
.fpu_no
, FP0_REG
,
1252 current_frame
.fpu_mask
, false, false);
1253 else if (restore_from_sp
)
1255 if (TARGET_COLDFIRE
)
1259 /* If we used moveml to restore the integer registers, the
1260 stack pointer will still point to the bottom of the moveml
1261 save area. Find the stack offset of the first FP
1263 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1266 offset
= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1267 m68k_emit_movem (stack_pointer_rtx
, offset
,
1268 current_frame
.fpu_no
, FP0_REG
,
1269 current_frame
.fpu_mask
, false, false);
1272 m68k_emit_movem (stack_pointer_rtx
, 0,
1273 current_frame
.fpu_no
, FP0_REG
,
1274 current_frame
.fpu_mask
, false, true);
1277 m68k_emit_movem (frame_pointer_rtx
,
1278 -(current_frame
.foffset
+ fsize
),
1279 current_frame
.fpu_no
, FP0_REG
,
1280 current_frame
.fpu_mask
, false, false);
1283 if (frame_pointer_needed
)
1284 emit_insn (gen_unlink (frame_pointer_rtx
));
1285 else if (fsize_with_regs
)
1286 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1288 GEN_INT (fsize_with_regs
)));
1290 if (crtl
->calls_eh_return
)
1291 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1293 EH_RETURN_STACKADJ_RTX
));
1296 emit_jump_insn (ret_rtx
);
1299 /* Return true if X is a valid comparison operator for the dbcc
1302 Note it rejects floating point comparison operators.
1303 (In the future we could use Fdbcc).
1305 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1308 valid_dbcc_comparison_p_2 (rtx x
, enum machine_mode mode ATTRIBUTE_UNUSED
)
1310 switch (GET_CODE (x
))
1312 case EQ
: case NE
: case GTU
: case LTU
:
1316 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1318 case GT
: case LT
: case GE
: case LE
:
1319 return ! (cc_prev_status
.flags
& CC_NO_OVERFLOW
);
1325 /* Return nonzero if flags are currently in the 68881 flag register. */
1327 flags_in_68881 (void)
1329 /* We could add support for these in the future */
1330 return cc_status
.flags
& CC_IN_68881
;
1333 /* Return true if PARALLEL contains register REGNO. */
1335 m68k_reg_present_p (const_rtx parallel
, unsigned int regno
)
1339 if (REG_P (parallel
) && REGNO (parallel
) == regno
)
1342 if (GET_CODE (parallel
) != PARALLEL
)
1345 for (i
= 0; i
< XVECLEN (parallel
, 0); ++i
)
1349 x
= XEXP (XVECEXP (parallel
, 0, i
), 0);
1350 if (REG_P (x
) && REGNO (x
) == regno
)
1357 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1360 m68k_ok_for_sibcall_p (tree decl
, tree exp
)
1362 enum m68k_function_kind kind
;
1364 /* We cannot use sibcalls for nested functions because we use the
1365 static chain register for indirect calls. */
1366 if (CALL_EXPR_STATIC_CHAIN (exp
))
1369 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun
->decl
))))
1371 /* Check that the return value locations are the same. For
1372 example that we aren't returning a value from the sibling in
1373 a D0 register but then need to transfer it to a A0 register. */
1377 cfun_value
= FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun
->decl
)),
1379 call_value
= FUNCTION_VALUE (TREE_TYPE (exp
), decl
);
1381 /* Check that the values are equal or that the result the callee
1382 function returns is superset of what the current function returns. */
1383 if (!(rtx_equal_p (cfun_value
, call_value
)
1384 || (REG_P (cfun_value
)
1385 && m68k_reg_present_p (call_value
, REGNO (cfun_value
)))))
1389 kind
= m68k_get_function_kind (current_function_decl
);
1390 if (kind
== m68k_fk_normal_function
)
1391 /* We can always sibcall from a normal function, because it's
1392 undefined if it is calling an interrupt function. */
1395 /* Otherwise we can only sibcall if the function kind is known to be
1397 if (decl
&& m68k_get_function_kind (decl
) == kind
)
1403 /* On the m68k all args are always pushed. */
1406 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED
,
1407 enum machine_mode mode ATTRIBUTE_UNUSED
,
1408 const_tree type ATTRIBUTE_UNUSED
,
1409 bool named ATTRIBUTE_UNUSED
)
1415 m68k_function_arg_advance (cumulative_args_t cum_v
, enum machine_mode mode
,
1416 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1418 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1420 *cum
+= (mode
!= BLKmode
1421 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1422 : (int_size_in_bytes (type
) + 3) & ~3);
1425 /* Convert X to a legitimate function call memory reference and return the
1429 m68k_legitimize_call_address (rtx x
)
1431 gcc_assert (MEM_P (x
));
1432 if (call_operand (XEXP (x
, 0), VOIDmode
))
1434 return replace_equiv_address (x
, force_reg (Pmode
, XEXP (x
, 0)));
1437 /* Likewise for sibling calls. */
1440 m68k_legitimize_sibcall_address (rtx x
)
1442 gcc_assert (MEM_P (x
));
1443 if (sibcall_operand (XEXP (x
, 0), VOIDmode
))
1446 emit_move_insn (gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
), XEXP (x
, 0));
1447 return replace_equiv_address (x
, gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
));
1450 /* Convert X to a legitimate address and return it if successful. Otherwise
1453 For the 68000, we handle X+REG by loading X into a register R and
1454 using R+REG. R will go in an address reg and indexing will be used.
1455 However, if REG is a broken-out memory address or multiplication,
1456 nothing needs to be done because REG can certainly go in an address reg. */
1459 m68k_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
)
1461 if (m68k_tls_symbol_p (x
))
1462 return m68k_legitimize_tls_address (x
);
1464 if (GET_CODE (x
) == PLUS
)
1466 int ch
= (x
) != (oldx
);
1469 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1471 if (GET_CODE (XEXP (x
, 0)) == MULT
)
1474 XEXP (x
, 0) = force_operand (XEXP (x
, 0), 0);
1476 if (GET_CODE (XEXP (x
, 1)) == MULT
)
1479 XEXP (x
, 1) = force_operand (XEXP (x
, 1), 0);
1483 if (GET_CODE (XEXP (x
, 1)) == REG
1484 && GET_CODE (XEXP (x
, 0)) == REG
)
1486 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1489 x
= force_operand (x
, 0);
1493 if (memory_address_p (mode
, x
))
1496 if (GET_CODE (XEXP (x
, 0)) == REG
1497 || (GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
1498 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1499 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == HImode
))
1501 rtx temp
= gen_reg_rtx (Pmode
);
1502 rtx val
= force_operand (XEXP (x
, 1), 0);
1503 emit_move_insn (temp
, val
);
1506 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
1507 && GET_CODE (XEXP (x
, 0)) == REG
)
1508 x
= force_operand (x
, 0);
1510 else if (GET_CODE (XEXP (x
, 1)) == REG
1511 || (GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
1512 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == REG
1513 && GET_MODE (XEXP (XEXP (x
, 1), 0)) == HImode
))
1515 rtx temp
= gen_reg_rtx (Pmode
);
1516 rtx val
= force_operand (XEXP (x
, 0), 0);
1517 emit_move_insn (temp
, val
);
1520 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
1521 && GET_CODE (XEXP (x
, 1)) == REG
)
1522 x
= force_operand (x
, 0);
1530 /* Output a dbCC; jCC sequence. Note we do not handle the
1531 floating point version of this sequence (Fdbcc). We also
1532 do not handle alternative conditions when CC_NO_OVERFLOW is
1533 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1534 kick those out before we get here. */
1537 output_dbcc_and_branch (rtx
*operands
)
1539 switch (GET_CODE (operands
[3]))
1542 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands
);
1546 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands
);
1550 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands
);
1554 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands
);
1558 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands
);
1562 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands
);
1566 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands
);
1570 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands
);
1574 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands
);
1578 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands
);
1585 /* If the decrement is to be done in SImode, then we have
1586 to compensate for the fact that dbcc decrements in HImode. */
1587 switch (GET_MODE (operands
[0]))
1590 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands
);
1602 output_scc_di (rtx op
, rtx operand1
, rtx operand2
, rtx dest
)
1605 enum rtx_code op_code
= GET_CODE (op
);
1607 /* This does not produce a useful cc. */
1610 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1611 below. Swap the operands and change the op if these requirements
1612 are not fulfilled. */
1613 if (GET_CODE (operand2
) == REG
&& GET_CODE (operand1
) != REG
)
1617 operand1
= operand2
;
1619 op_code
= swap_condition (op_code
);
1621 loperands
[0] = operand1
;
1622 if (GET_CODE (operand1
) == REG
)
1623 loperands
[1] = gen_rtx_REG (SImode
, REGNO (operand1
) + 1);
1625 loperands
[1] = adjust_address (operand1
, SImode
, 4);
1626 if (operand2
!= const0_rtx
)
1628 loperands
[2] = operand2
;
1629 if (GET_CODE (operand2
) == REG
)
1630 loperands
[3] = gen_rtx_REG (SImode
, REGNO (operand2
) + 1);
1632 loperands
[3] = adjust_address (operand2
, SImode
, 4);
1634 loperands
[4] = gen_label_rtx ();
1635 if (operand2
!= const0_rtx
)
1636 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands
);
1639 if (TARGET_68020
|| TARGET_COLDFIRE
|| ! ADDRESS_REG_P (loperands
[0]))
1640 output_asm_insn ("tst%.l %0", loperands
);
1642 output_asm_insn ("cmp%.w #0,%0", loperands
);
1644 output_asm_insn ("jne %l4", loperands
);
1646 if (TARGET_68020
|| TARGET_COLDFIRE
|| ! ADDRESS_REG_P (loperands
[1]))
1647 output_asm_insn ("tst%.l %1", loperands
);
1649 output_asm_insn ("cmp%.w #0,%1", loperands
);
1652 loperands
[5] = dest
;
1657 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1658 CODE_LABEL_NUMBER (loperands
[4]));
1659 output_asm_insn ("seq %5", loperands
);
1663 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1664 CODE_LABEL_NUMBER (loperands
[4]));
1665 output_asm_insn ("sne %5", loperands
);
1669 loperands
[6] = gen_label_rtx ();
1670 output_asm_insn ("shi %5\n\tjra %l6", loperands
);
1671 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1672 CODE_LABEL_NUMBER (loperands
[4]));
1673 output_asm_insn ("sgt %5", loperands
);
1674 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1675 CODE_LABEL_NUMBER (loperands
[6]));
1679 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1680 CODE_LABEL_NUMBER (loperands
[4]));
1681 output_asm_insn ("shi %5", loperands
);
1685 loperands
[6] = gen_label_rtx ();
1686 output_asm_insn ("scs %5\n\tjra %l6", loperands
);
1687 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1688 CODE_LABEL_NUMBER (loperands
[4]));
1689 output_asm_insn ("slt %5", loperands
);
1690 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1691 CODE_LABEL_NUMBER (loperands
[6]));
1695 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1696 CODE_LABEL_NUMBER (loperands
[4]));
1697 output_asm_insn ("scs %5", loperands
);
1701 loperands
[6] = gen_label_rtx ();
1702 output_asm_insn ("scc %5\n\tjra %l6", loperands
);
1703 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1704 CODE_LABEL_NUMBER (loperands
[4]));
1705 output_asm_insn ("sge %5", loperands
);
1706 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1707 CODE_LABEL_NUMBER (loperands
[6]));
1711 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1712 CODE_LABEL_NUMBER (loperands
[4]));
1713 output_asm_insn ("scc %5", loperands
);
1717 loperands
[6] = gen_label_rtx ();
1718 output_asm_insn ("sls %5\n\tjra %l6", loperands
);
1719 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1720 CODE_LABEL_NUMBER (loperands
[4]));
1721 output_asm_insn ("sle %5", loperands
);
1722 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1723 CODE_LABEL_NUMBER (loperands
[6]));
1727 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1728 CODE_LABEL_NUMBER (loperands
[4]));
1729 output_asm_insn ("sls %5", loperands
);
1739 output_btst (rtx
*operands
, rtx countop
, rtx dataop
, rtx insn
, int signpos
)
1741 operands
[0] = countop
;
1742 operands
[1] = dataop
;
1744 if (GET_CODE (countop
) == CONST_INT
)
1746 register int count
= INTVAL (countop
);
1747 /* If COUNT is bigger than size of storage unit in use,
1748 advance to the containing unit of same size. */
1749 if (count
> signpos
)
1751 int offset
= (count
& ~signpos
) / 8;
1752 count
= count
& signpos
;
1753 operands
[1] = dataop
= adjust_address (dataop
, QImode
, offset
);
1755 if (count
== signpos
)
1756 cc_status
.flags
= CC_NOT_POSITIVE
| CC_Z_IN_NOT_N
;
1758 cc_status
.flags
= CC_NOT_NEGATIVE
| CC_Z_IN_NOT_N
;
1760 /* These three statements used to use next_insns_test_no...
1761 but it appears that this should do the same job. */
1763 && next_insn_tests_no_inequality (insn
))
1766 && next_insn_tests_no_inequality (insn
))
1769 && next_insn_tests_no_inequality (insn
))
1771 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1772 On some m68k variants unfortunately that's slower than btst.
1773 On 68000 and higher, that should also work for all HImode operands. */
1774 if (TUNE_CPU32
|| TARGET_COLDFIRE
|| optimize_size
)
1776 if (count
== 3 && DATA_REG_P (operands
[1])
1777 && next_insn_tests_no_inequality (insn
))
1779 cc_status
.flags
= CC_NOT_NEGATIVE
| CC_Z_IN_NOT_N
| CC_NO_OVERFLOW
;
1780 return "move%.w %1,%%ccr";
1782 if (count
== 2 && DATA_REG_P (operands
[1])
1783 && next_insn_tests_no_inequality (insn
))
1785 cc_status
.flags
= CC_NOT_NEGATIVE
| CC_INVERTED
| CC_NO_OVERFLOW
;
1786 return "move%.w %1,%%ccr";
1788 /* count == 1 followed by bvc/bvs and
1789 count == 0 followed by bcc/bcs are also possible, but need
1790 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1793 cc_status
.flags
= CC_NOT_NEGATIVE
;
1795 return "btst %0,%1";
1798 /* Return true if X is a legitimate base register. STRICT_P says
1799 whether we need strict checking. */
1802 m68k_legitimate_base_reg_p (rtx x
, bool strict_p
)
1804 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1805 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
1810 ? REGNO_OK_FOR_BASE_P (REGNO (x
))
1811 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x
))));
1814 /* Return true if X is a legitimate index register. STRICT_P says
1815 whether we need strict checking. */
1818 m68k_legitimate_index_reg_p (rtx x
, bool strict_p
)
1820 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
1825 ? REGNO_OK_FOR_INDEX_P (REGNO (x
))
1826 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x
))));
1829 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1830 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1831 ADDRESS if so. STRICT_P says whether we need strict checking. */
1834 m68k_decompose_index (rtx x
, bool strict_p
, struct m68k_address
*address
)
1838 /* Check for a scale factor. */
1840 if ((TARGET_68020
|| TARGET_COLDFIRE
)
1841 && GET_CODE (x
) == MULT
1842 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1843 && (INTVAL (XEXP (x
, 1)) == 2
1844 || INTVAL (XEXP (x
, 1)) == 4
1845 || (INTVAL (XEXP (x
, 1)) == 8
1846 && (TARGET_COLDFIRE_FPU
|| !TARGET_COLDFIRE
))))
1848 scale
= INTVAL (XEXP (x
, 1));
1852 /* Check for a word extension. */
1853 if (!TARGET_COLDFIRE
1854 && GET_CODE (x
) == SIGN_EXTEND
1855 && GET_MODE (XEXP (x
, 0)) == HImode
)
1858 if (m68k_legitimate_index_reg_p (x
, strict_p
))
1860 address
->scale
= scale
;
1868 /* Return true if X is an illegitimate symbolic constant. */
1871 m68k_illegitimate_symbolic_constant_p (rtx x
)
1875 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
)
1877 split_const (x
, &base
, &offset
);
1878 if (GET_CODE (base
) == SYMBOL_REF
1879 && !offset_within_block_p (base
, INTVAL (offset
)))
1882 return m68k_tls_reference_p (x
, false);
1885 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1888 m68k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
1890 return m68k_illegitimate_symbolic_constant_p (x
);
1893 /* Return true if X is a legitimate constant address that can reach
1894 bytes in the range [X, X + REACH). STRICT_P says whether we need
1898 m68k_legitimate_constant_address_p (rtx x
, unsigned int reach
, bool strict_p
)
1902 if (!CONSTANT_ADDRESS_P (x
))
1906 && !(strict_p
&& TARGET_PCREL
)
1907 && symbolic_operand (x
, VOIDmode
))
1910 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
&& reach
> 1)
1912 split_const (x
, &base
, &offset
);
1913 if (GET_CODE (base
) == SYMBOL_REF
1914 && !offset_within_block_p (base
, INTVAL (offset
) + reach
- 1))
1918 return !m68k_tls_reference_p (x
, false);
1921 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1922 labels will become jump tables. */
1925 m68k_jump_table_ref_p (rtx x
)
1927 if (GET_CODE (x
) != LABEL_REF
)
1931 if (!NEXT_INSN (x
) && !PREV_INSN (x
))
1934 x
= next_nonnote_insn (x
);
1935 return x
&& JUMP_TABLE_DATA_P (x
);
1938 /* Return true if X is a legitimate address for values of mode MODE.
1939 STRICT_P says whether strict checking is needed. If the address
1940 is valid, describe its components in *ADDRESS. */
1943 m68k_decompose_address (enum machine_mode mode
, rtx x
,
1944 bool strict_p
, struct m68k_address
*address
)
1948 memset (address
, 0, sizeof (*address
));
1950 if (mode
== BLKmode
)
1953 reach
= GET_MODE_SIZE (mode
);
1955 /* Check for (An) (mode 2). */
1956 if (m68k_legitimate_base_reg_p (x
, strict_p
))
1962 /* Check for -(An) and (An)+ (modes 3 and 4). */
1963 if ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_INC
)
1964 && m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
))
1966 address
->code
= GET_CODE (x
);
1967 address
->base
= XEXP (x
, 0);
1971 /* Check for (d16,An) (mode 5). */
1972 if (GET_CODE (x
) == PLUS
1973 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1974 && IN_RANGE (INTVAL (XEXP (x
, 1)), -0x8000, 0x8000 - reach
)
1975 && m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
))
1977 address
->base
= XEXP (x
, 0);
1978 address
->offset
= XEXP (x
, 1);
1982 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1983 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1985 if (GET_CODE (x
) == PLUS
1986 && XEXP (x
, 0) == pic_offset_table_rtx
)
1988 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
1989 they are invalid in this context. */
1990 if (m68k_unwrap_symbol (XEXP (x
, 1), false) != XEXP (x
, 1))
1992 address
->base
= XEXP (x
, 0);
1993 address
->offset
= XEXP (x
, 1);
1998 /* The ColdFire FPU only accepts addressing modes 2-5. */
1999 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2002 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2003 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2004 All these modes are variations of mode 7. */
2005 if (m68k_legitimate_constant_address_p (x
, reach
, strict_p
))
2007 address
->offset
= x
;
2011 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2014 ??? do_tablejump creates these addresses before placing the target
2015 label, so we have to assume that unplaced labels are jump table
2016 references. It seems unlikely that we would ever generate indexed
2017 accesses to unplaced labels in other cases. */
2018 if (GET_CODE (x
) == PLUS
2019 && m68k_jump_table_ref_p (XEXP (x
, 1))
2020 && m68k_decompose_index (XEXP (x
, 0), strict_p
, address
))
2022 address
->offset
= XEXP (x
, 1);
2026 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2027 (bd,An,Xn.SIZE*SCALE) addresses. */
2031 /* Check for a nonzero base displacement. */
2032 if (GET_CODE (x
) == PLUS
2033 && m68k_legitimate_constant_address_p (XEXP (x
, 1), reach
, strict_p
))
2035 address
->offset
= XEXP (x
, 1);
2039 /* Check for a suppressed index register. */
2040 if (m68k_legitimate_base_reg_p (x
, strict_p
))
2046 /* Check for a suppressed base register. Do not allow this case
2047 for non-symbolic offsets as it effectively gives gcc freedom
2048 to treat data registers as base registers, which can generate
2051 && symbolic_operand (address
->offset
, VOIDmode
)
2052 && m68k_decompose_index (x
, strict_p
, address
))
2057 /* Check for a nonzero base displacement. */
2058 if (GET_CODE (x
) == PLUS
2059 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2060 && IN_RANGE (INTVAL (XEXP (x
, 1)), -0x80, 0x80 - reach
))
2062 address
->offset
= XEXP (x
, 1);
2067 /* We now expect the sum of a base and an index. */
2068 if (GET_CODE (x
) == PLUS
)
2070 if (m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
)
2071 && m68k_decompose_index (XEXP (x
, 1), strict_p
, address
))
2073 address
->base
= XEXP (x
, 0);
2077 if (m68k_legitimate_base_reg_p (XEXP (x
, 1), strict_p
)
2078 && m68k_decompose_index (XEXP (x
, 0), strict_p
, address
))
2080 address
->base
= XEXP (x
, 1);
2087 /* Return true if X is a legitimate address for values of mode MODE.
2088 STRICT_P says whether strict checking is needed. */
2091 m68k_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict_p
)
2093 struct m68k_address address
;
2095 return m68k_decompose_address (mode
, x
, strict_p
, &address
);
2098 /* Return true if X is a memory, describing its address in ADDRESS if so.
2099 Apply strict checking if called during or after reload. */
2102 m68k_legitimate_mem_p (rtx x
, struct m68k_address
*address
)
2105 && m68k_decompose_address (GET_MODE (x
), XEXP (x
, 0),
2106 reload_in_progress
|| reload_completed
,
2110 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2113 m68k_legitimate_constant_p (enum machine_mode mode
, rtx x
)
2115 return mode
!= XFmode
&& !m68k_illegitimate_symbolic_constant_p (x
);
2118 /* Return true if X matches the 'Q' constraint. It must be a memory
2119 with a base address and no constant offset or index. */
2122 m68k_matches_q_p (rtx x
)
2124 struct m68k_address address
;
2126 return (m68k_legitimate_mem_p (x
, &address
)
2127 && address
.code
== UNKNOWN
2133 /* Return true if X matches the 'U' constraint. It must be a base address
2134 with a constant offset and no index. */
2137 m68k_matches_u_p (rtx x
)
2139 struct m68k_address address
;
2141 return (m68k_legitimate_mem_p (x
, &address
)
2142 && address
.code
== UNKNOWN
2148 /* Return GOT pointer. */
2153 if (pic_offset_table_rtx
== NULL_RTX
)
2154 pic_offset_table_rtx
= gen_rtx_REG (Pmode
, PIC_REG
);
2156 crtl
->uses_pic_offset_table
= 1;
2158 return pic_offset_table_rtx
;
2161 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2163 enum m68k_reloc
{ RELOC_GOT
, RELOC_TLSGD
, RELOC_TLSLDM
, RELOC_TLSLDO
,
2164 RELOC_TLSIE
, RELOC_TLSLE
};
2166 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2168 /* Wrap symbol X into unspec representing relocation RELOC.
2169 BASE_REG - register that should be added to the result.
2170 TEMP_REG - if non-null, temporary register. */
2173 m68k_wrap_symbol (rtx x
, enum m68k_reloc reloc
, rtx base_reg
, rtx temp_reg
)
2177 use_x_p
= (base_reg
== pic_offset_table_rtx
) ? TARGET_XGOT
: TARGET_XTLS
;
2179 if (TARGET_COLDFIRE
&& use_x_p
)
2180 /* When compiling with -mx{got, tls} switch the code will look like this:
2182 move.l <X>@<RELOC>,<TEMP_REG>
2183 add.l <BASE_REG>,<TEMP_REG> */
2185 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2186 to put @RELOC after reference. */
2187 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (reloc
)),
2189 x
= gen_rtx_CONST (Pmode
, x
);
2191 if (temp_reg
== NULL
)
2193 gcc_assert (can_create_pseudo_p ());
2194 temp_reg
= gen_reg_rtx (Pmode
);
2197 emit_move_insn (temp_reg
, x
);
2198 emit_insn (gen_addsi3 (temp_reg
, temp_reg
, base_reg
));
2203 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (reloc
)),
2205 x
= gen_rtx_CONST (Pmode
, x
);
2207 x
= gen_rtx_PLUS (Pmode
, base_reg
, x
);
2213 /* Helper for m68k_unwrap_symbol.
2214 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2215 sets *RELOC_PTR to relocation type for the symbol. */
2218 m68k_unwrap_symbol_1 (rtx orig
, bool unwrap_reloc32_p
,
2219 enum m68k_reloc
*reloc_ptr
)
2221 if (GET_CODE (orig
) == CONST
)
2224 enum m68k_reloc dummy
;
2228 if (reloc_ptr
== NULL
)
2231 /* Handle an addend. */
2232 if ((GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
)
2233 && CONST_INT_P (XEXP (x
, 1)))
2236 if (GET_CODE (x
) == UNSPEC
)
2238 switch (XINT (x
, 1))
2240 case UNSPEC_RELOC16
:
2241 orig
= XVECEXP (x
, 0, 0);
2242 *reloc_ptr
= (enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1));
2245 case UNSPEC_RELOC32
:
2246 if (unwrap_reloc32_p
)
2248 orig
= XVECEXP (x
, 0, 0);
2249 *reloc_ptr
= (enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1));
2262 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2263 UNSPEC_RELOC32 wrappers. */
2266 m68k_unwrap_symbol (rtx orig
, bool unwrap_reloc32_p
)
2268 return m68k_unwrap_symbol_1 (orig
, unwrap_reloc32_p
, NULL
);
2271 /* Helper for m68k_final_prescan_insn. */
2274 m68k_final_prescan_insn_1 (rtx
*x_ptr
, void *data ATTRIBUTE_UNUSED
)
2278 if (m68k_unwrap_symbol (x
, true) != x
)
2279 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2283 gcc_assert (GET_CODE (x
) == CONST
);
2286 if (GET_CODE (plus
) == PLUS
|| GET_CODE (plus
) == MINUS
)
2291 unspec
= XEXP (plus
, 0);
2292 gcc_assert (GET_CODE (unspec
) == UNSPEC
);
2293 addend
= XEXP (plus
, 1);
2294 gcc_assert (CONST_INT_P (addend
));
2296 /* We now have all the pieces, rearrange them. */
2298 /* Move symbol to plus. */
2299 XEXP (plus
, 0) = XVECEXP (unspec
, 0, 0);
2301 /* Move plus inside unspec. */
2302 XVECEXP (unspec
, 0, 0) = plus
;
2304 /* Move unspec to top level of const. */
2305 XEXP (x
, 0) = unspec
;
2314 /* Prescan insn before outputing assembler for it. */
2317 m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED
,
2318 rtx
*operands
, int n_operands
)
2322 /* Combine and, possibly, other optimizations may do good job
2324 (const (unspec [(symbol)]))
2326 (const (plus (unspec [(symbol)])
2328 The problem with this is emitting @TLS or @GOT decorations.
2329 The decoration is emitted when processing (unspec), so the
2330 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2332 It seems that the easiest solution to this is to convert such
2334 (const (unspec [(plus (symbol)
2336 Note, that the top level of operand remains intact, so we don't have
2337 to patch up anything outside of the operand. */
2339 for (i
= 0; i
< n_operands
; ++i
)
2345 for_each_rtx (&op
, m68k_final_prescan_insn_1
, NULL
);
2349 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2350 If REG is non-null, use it; generate new pseudo otherwise. */
2353 m68k_move_to_reg (rtx x
, rtx orig
, rtx reg
)
2357 if (reg
== NULL_RTX
)
2359 gcc_assert (can_create_pseudo_p ());
2360 reg
= gen_reg_rtx (Pmode
);
2363 insn
= emit_move_insn (reg
, x
);
2364 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2366 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
2371 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2375 m68k_wrap_symbol_into_got_ref (rtx x
, enum m68k_reloc reloc
, rtx temp_reg
)
2377 x
= m68k_wrap_symbol (x
, reloc
, m68k_get_gp (), temp_reg
);
2379 x
= gen_rtx_MEM (Pmode
, x
);
2380 MEM_READONLY_P (x
) = 1;
2385 /* Legitimize PIC addresses. If the address is already
2386 position-independent, we return ORIG. Newly generated
2387 position-independent addresses go to REG. If we need more
2388 than one register, we lose.
2390 An address is legitimized by making an indirect reference
2391 through the Global Offset Table with the name of the symbol
2394 The assembler and linker are responsible for placing the
2395 address of the symbol in the GOT. The function prologue
2396 is responsible for initializing a5 to the starting address
2399 The assembler is also responsible for translating a symbol name
2400 into a constant displacement from the start of the GOT.
2402 A quick example may make things a little clearer:
2404 When not generating PIC code to store the value 12345 into _foo
2405 we would generate the following code:
2409 When generating PIC two transformations are made. First, the compiler
2410 loads the address of foo into a register. So the first transformation makes:
2415 The code in movsi will intercept the lea instruction and call this
2416 routine which will transform the instructions into:
2418 movel a5@(_foo:w), a0
2422 That (in a nutshell) is how *all* symbol and label references are
2426 legitimize_pic_address (rtx orig
, enum machine_mode mode ATTRIBUTE_UNUSED
,
2431 /* First handle a simple SYMBOL_REF or LABEL_REF */
2432 if (GET_CODE (orig
) == SYMBOL_REF
|| GET_CODE (orig
) == LABEL_REF
)
2436 pic_ref
= m68k_wrap_symbol_into_got_ref (orig
, RELOC_GOT
, reg
);
2437 pic_ref
= m68k_move_to_reg (pic_ref
, orig
, reg
);
2439 else if (GET_CODE (orig
) == CONST
)
2443 /* Make sure this has not already been legitimized. */
2444 if (m68k_unwrap_symbol (orig
, true) != orig
)
2449 /* legitimize both operands of the PLUS */
2450 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
2452 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
2453 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
2454 base
== reg
? 0 : reg
);
2456 if (GET_CODE (orig
) == CONST_INT
)
2457 pic_ref
= plus_constant (Pmode
, base
, INTVAL (orig
));
2459 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
2465 /* The __tls_get_addr symbol. */
2466 static GTY(()) rtx m68k_tls_get_addr
;
2468 /* Return SYMBOL_REF for __tls_get_addr. */
2471 m68k_get_tls_get_addr (void)
2473 if (m68k_tls_get_addr
== NULL_RTX
)
2474 m68k_tls_get_addr
= init_one_libfunc ("__tls_get_addr");
2476 return m68k_tls_get_addr
;
2479 /* Return libcall result in A0 instead of usual D0. */
2480 static bool m68k_libcall_value_in_a0_p
= false;
2482 /* Emit instruction sequence that calls __tls_get_addr. X is
2483 the TLS symbol we are referencing and RELOC is the symbol type to use
2484 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2485 emitted. A pseudo register with result of __tls_get_addr call is
2489 m68k_call_tls_get_addr (rtx x
, rtx eqv
, enum m68k_reloc reloc
)
2495 /* Emit the call sequence. */
2498 /* FIXME: Unfortunately, emit_library_call_value does not
2499 consider (plus (%a5) (const (unspec))) to be a good enough
2500 operand for push, so it forces it into a register. The bad
2501 thing about this is that combiner, due to copy propagation and other
2502 optimizations, sometimes can not later fix this. As a consequence,
2503 additional register may be allocated resulting in a spill.
2504 For reference, see args processing loops in
2505 calls.c:emit_library_call_value_1.
2506 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2507 x
= m68k_wrap_symbol (x
, reloc
, m68k_get_gp (), NULL_RTX
);
2509 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2510 is the simpliest way of generating a call. The difference between
2511 __tls_get_addr() and libcall is that the result is returned in D0
2512 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2513 which temporarily switches returning the result to A0. */
2515 m68k_libcall_value_in_a0_p
= true;
2516 a0
= emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX
, LCT_PURE
,
2517 Pmode
, 1, x
, Pmode
);
2518 m68k_libcall_value_in_a0_p
= false;
2520 insns
= get_insns ();
2523 gcc_assert (can_create_pseudo_p ());
2524 dest
= gen_reg_rtx (Pmode
);
2525 emit_libcall_block (insns
, dest
, a0
, eqv
);
2530 /* The __tls_get_addr symbol. */
2531 static GTY(()) rtx m68k_read_tp
;
2533 /* Return SYMBOL_REF for __m68k_read_tp. */
2536 m68k_get_m68k_read_tp (void)
2538 if (m68k_read_tp
== NULL_RTX
)
2539 m68k_read_tp
= init_one_libfunc ("__m68k_read_tp");
2541 return m68k_read_tp
;
2544 /* Emit instruction sequence that calls __m68k_read_tp.
2545 A pseudo register with result of __m68k_read_tp call is returned. */
2548 m68k_call_m68k_read_tp (void)
2557 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2558 is the simpliest way of generating a call. The difference between
2559 __m68k_read_tp() and libcall is that the result is returned in D0
2560 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2561 which temporarily switches returning the result to A0. */
2563 /* Emit the call sequence. */
2564 m68k_libcall_value_in_a0_p
= true;
2565 a0
= emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX
, LCT_PURE
,
2567 m68k_libcall_value_in_a0_p
= false;
2568 insns
= get_insns ();
2571 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2572 share the m68k_read_tp result with other IE/LE model accesses. */
2573 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const1_rtx
), UNSPEC_RELOC32
);
2575 gcc_assert (can_create_pseudo_p ());
2576 dest
= gen_reg_rtx (Pmode
);
2577 emit_libcall_block (insns
, dest
, a0
, eqv
);
2582 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2583 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2587 m68k_legitimize_tls_address (rtx orig
)
2589 switch (SYMBOL_REF_TLS_MODEL (orig
))
2591 case TLS_MODEL_GLOBAL_DYNAMIC
:
2592 orig
= m68k_call_tls_get_addr (orig
, orig
, RELOC_TLSGD
);
2595 case TLS_MODEL_LOCAL_DYNAMIC
:
2601 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2602 share the LDM result with other LD model accesses. */
2603 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2606 a0
= m68k_call_tls_get_addr (orig
, eqv
, RELOC_TLSLDM
);
2608 x
= m68k_wrap_symbol (orig
, RELOC_TLSLDO
, a0
, NULL_RTX
);
2610 if (can_create_pseudo_p ())
2611 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2617 case TLS_MODEL_INITIAL_EXEC
:
2622 a0
= m68k_call_m68k_read_tp ();
2624 x
= m68k_wrap_symbol_into_got_ref (orig
, RELOC_TLSIE
, NULL_RTX
);
2625 x
= gen_rtx_PLUS (Pmode
, x
, a0
);
2627 if (can_create_pseudo_p ())
2628 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2634 case TLS_MODEL_LOCAL_EXEC
:
2639 a0
= m68k_call_m68k_read_tp ();
2641 x
= m68k_wrap_symbol (orig
, RELOC_TLSLE
, a0
, NULL_RTX
);
2643 if (can_create_pseudo_p ())
2644 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2657 /* Return true if X is a TLS symbol. */
2660 m68k_tls_symbol_p (rtx x
)
2662 if (!TARGET_HAVE_TLS
)
2665 if (GET_CODE (x
) != SYMBOL_REF
)
2668 return SYMBOL_REF_TLS_MODEL (x
) != 0;
2671 /* Helper for m68k_tls_referenced_p. */
2674 m68k_tls_reference_p_1 (rtx
*x_ptr
, void *data ATTRIBUTE_UNUSED
)
2676 /* Note: this is not the same as m68k_tls_symbol_p. */
2677 if (GET_CODE (*x_ptr
) == SYMBOL_REF
)
2678 return SYMBOL_REF_TLS_MODEL (*x_ptr
) != 0 ? 1 : 0;
2680 /* Don't recurse into legitimate TLS references. */
2681 if (m68k_tls_reference_p (*x_ptr
, true))
2687 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2688 though illegitimate one.
2689 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2692 m68k_tls_reference_p (rtx x
, bool legitimate_p
)
2694 if (!TARGET_HAVE_TLS
)
2698 return for_each_rtx (&x
, m68k_tls_reference_p_1
, NULL
) == 1 ? true : false;
2701 enum m68k_reloc reloc
= RELOC_GOT
;
2703 return (m68k_unwrap_symbol_1 (x
, true, &reloc
) != x
2704 && TLS_RELOC_P (reloc
));
2710 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2712 /* Return the type of move that should be used for integer I. */
2715 m68k_const_method (HOST_WIDE_INT i
)
2722 /* The ColdFire doesn't have byte or word operations. */
2723 /* FIXME: This may not be useful for the m68060 either. */
2724 if (!TARGET_COLDFIRE
)
2726 /* if -256 < N < 256 but N is not in range for a moveq
2727 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2728 if (USE_MOVQ (i
^ 0xff))
2730 /* Likewise, try with not.w */
2731 if (USE_MOVQ (i
^ 0xffff))
2733 /* This is the only value where neg.w is useful */
2738 /* Try also with swap. */
2740 if (USE_MOVQ ((u
>> 16) | (u
<< 16)))
2745 /* Try using MVZ/MVS with an immediate value to load constants. */
2746 if (i
>= 0 && i
<= 65535)
2748 if (i
>= -32768 && i
<= 32767)
2752 /* Otherwise, use move.l */
2756 /* Return the cost of moving constant I into a data register. */
2759 const_int_cost (HOST_WIDE_INT i
)
2761 switch (m68k_const_method (i
))
2764 /* Constants between -128 and 127 are cheap due to moveq. */
2772 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2782 m68k_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2783 int *total
, bool speed ATTRIBUTE_UNUSED
)
2788 /* Constant zero is super cheap due to clr instruction. */
2789 if (x
== const0_rtx
)
2792 *total
= const_int_cost (INTVAL (x
));
2802 /* Make 0.0 cheaper than other floating constants to
2803 encourage creating tstsf and tstdf insns. */
2804 if (outer_code
== COMPARE
2805 && (x
== CONST0_RTX (SFmode
) || x
== CONST0_RTX (DFmode
)))
2811 /* These are vaguely right for a 68020. */
2812 /* The costs for long multiply have been adjusted to work properly
2813 in synth_mult on the 68020, relative to an average of the time
2814 for add and the time for shift, taking away a little more because
2815 sometimes move insns are needed. */
2816 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2821 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2822 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2824 : TARGET_COLDFIRE ? 3 : 13)
2829 : TUNE_68000_10 ? 5 \
2830 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2831 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2833 : TARGET_COLDFIRE ? 2 : 8)
2836 (TARGET_CF_HWDIV ? 11 \
2837 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2840 /* An lea costs about three times as much as a simple add. */
2841 if (GET_MODE (x
) == SImode
2842 && GET_CODE (XEXP (x
, 1)) == REG
2843 && GET_CODE (XEXP (x
, 0)) == MULT
2844 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
2845 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
2846 && (INTVAL (XEXP (XEXP (x
, 0), 1)) == 2
2847 || INTVAL (XEXP (XEXP (x
, 0), 1)) == 4
2848 || INTVAL (XEXP (XEXP (x
, 0), 1)) == 8))
2850 /* lea an@(dx:l:i),am */
2851 *total
= COSTS_N_INSNS (TARGET_COLDFIRE
? 2 : 3);
2861 *total
= COSTS_N_INSNS(1);
2866 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
2868 if (INTVAL (XEXP (x
, 1)) < 16)
2869 *total
= COSTS_N_INSNS (2) + INTVAL (XEXP (x
, 1)) / 2;
2871 /* We're using clrw + swap for these cases. */
2872 *total
= COSTS_N_INSNS (4) + (INTVAL (XEXP (x
, 1)) - 16) / 2;
2875 *total
= COSTS_N_INSNS (10); /* Worst case. */
2878 /* A shift by a big integer takes an extra instruction. */
2879 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
2880 && (INTVAL (XEXP (x
, 1)) == 16))
2882 *total
= COSTS_N_INSNS (2); /* clrw;swap */
2885 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
2886 && !(INTVAL (XEXP (x
, 1)) > 0
2887 && INTVAL (XEXP (x
, 1)) <= 8))
2889 *total
= COSTS_N_INSNS (TARGET_COLDFIRE
? 1 : 3); /* lsr #i,dn */
2895 if ((GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
2896 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
2897 && GET_MODE (x
) == SImode
)
2898 *total
= COSTS_N_INSNS (MULW_COST
);
2899 else if (GET_MODE (x
) == QImode
|| GET_MODE (x
) == HImode
)
2900 *total
= COSTS_N_INSNS (MULW_COST
);
2902 *total
= COSTS_N_INSNS (MULL_COST
);
2909 if (GET_MODE (x
) == QImode
|| GET_MODE (x
) == HImode
)
2910 *total
= COSTS_N_INSNS (DIVW_COST
); /* div.w */
2911 else if (TARGET_CF_HWDIV
)
2912 *total
= COSTS_N_INSNS (18);
2914 *total
= COSTS_N_INSNS (43); /* div.l */
2918 if (outer_code
== COMPARE
)
2927 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2931 output_move_const_into_data_reg (rtx
*operands
)
2935 i
= INTVAL (operands
[1]);
2936 switch (m68k_const_method (i
))
2939 return "mvzw %1,%0";
2941 return "mvsw %1,%0";
2943 return "moveq %1,%0";
2946 operands
[1] = GEN_INT (i
^ 0xff);
2947 return "moveq %1,%0\n\tnot%.b %0";
2950 operands
[1] = GEN_INT (i
^ 0xffff);
2951 return "moveq %1,%0\n\tnot%.w %0";
2954 return "moveq #-128,%0\n\tneg%.w %0";
2959 operands
[1] = GEN_INT ((u
<< 16) | (u
>> 16));
2960 return "moveq %1,%0\n\tswap %0";
2963 return "move%.l %1,%0";
2969 /* Return true if I can be handled by ISA B's mov3q instruction. */
2972 valid_mov3q_const (HOST_WIDE_INT i
)
2974 return TARGET_ISAB
&& (i
== -1 || IN_RANGE (i
, 1, 7));
2977 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2978 I is the value of OPERANDS[1]. */
2981 output_move_simode_const (rtx
*operands
)
2987 src
= INTVAL (operands
[1]);
2989 && (DATA_REG_P (dest
) || MEM_P (dest
))
2990 /* clr insns on 68000 read before writing. */
2991 && ((TARGET_68010
|| TARGET_COLDFIRE
)
2992 || !(MEM_P (dest
) && MEM_VOLATILE_P (dest
))))
2994 else if (GET_MODE (dest
) == SImode
&& valid_mov3q_const (src
))
2995 return "mov3q%.l %1,%0";
2996 else if (src
== 0 && ADDRESS_REG_P (dest
))
2997 return "sub%.l %0,%0";
2998 else if (DATA_REG_P (dest
))
2999 return output_move_const_into_data_reg (operands
);
3000 else if (ADDRESS_REG_P (dest
) && IN_RANGE (src
, -0x8000, 0x7fff))
3002 if (valid_mov3q_const (src
))
3003 return "mov3q%.l %1,%0";
3004 return "move%.w %1,%0";
3006 else if (MEM_P (dest
)
3007 && GET_CODE (XEXP (dest
, 0)) == PRE_DEC
3008 && REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
3009 && IN_RANGE (src
, -0x8000, 0x7fff))
3011 if (valid_mov3q_const (src
))
3012 return "mov3q%.l %1,%-";
3015 return "move%.l %1,%0";
3019 output_move_simode (rtx
*operands
)
3021 if (GET_CODE (operands
[1]) == CONST_INT
)
3022 return output_move_simode_const (operands
);
3023 else if ((GET_CODE (operands
[1]) == SYMBOL_REF
3024 || GET_CODE (operands
[1]) == CONST
)
3025 && push_operand (operands
[0], SImode
))
3027 else if ((GET_CODE (operands
[1]) == SYMBOL_REF
3028 || GET_CODE (operands
[1]) == CONST
)
3029 && ADDRESS_REG_P (operands
[0]))
3030 return "lea %a1,%0";
3031 return "move%.l %1,%0";
3035 output_move_himode (rtx
*operands
)
3037 if (GET_CODE (operands
[1]) == CONST_INT
)
3039 if (operands
[1] == const0_rtx
3040 && (DATA_REG_P (operands
[0])
3041 || GET_CODE (operands
[0]) == MEM
)
3042 /* clr insns on 68000 read before writing. */
3043 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3044 || !(GET_CODE (operands
[0]) == MEM
3045 && MEM_VOLATILE_P (operands
[0]))))
3047 else if (operands
[1] == const0_rtx
3048 && ADDRESS_REG_P (operands
[0]))
3049 return "sub%.l %0,%0";
3050 else if (DATA_REG_P (operands
[0])
3051 && INTVAL (operands
[1]) < 128
3052 && INTVAL (operands
[1]) >= -128)
3053 return "moveq %1,%0";
3054 else if (INTVAL (operands
[1]) < 0x8000
3055 && INTVAL (operands
[1]) >= -0x8000)
3056 return "move%.w %1,%0";
3058 else if (CONSTANT_P (operands
[1]))
3059 return "move%.l %1,%0";
3060 return "move%.w %1,%0";
3064 output_move_qimode (rtx
*operands
)
3066 /* 68k family always modifies the stack pointer by at least 2, even for
3067 byte pushes. The 5200 (ColdFire) does not do this. */
3069 /* This case is generated by pushqi1 pattern now. */
3070 gcc_assert (!(GET_CODE (operands
[0]) == MEM
3071 && GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
3072 && XEXP (XEXP (operands
[0], 0), 0) == stack_pointer_rtx
3073 && ! ADDRESS_REG_P (operands
[1])
3074 && ! TARGET_COLDFIRE
));
3076 /* clr and st insns on 68000 read before writing. */
3077 if (!ADDRESS_REG_P (operands
[0])
3078 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3079 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3081 if (operands
[1] == const0_rtx
)
3083 if ((!TARGET_COLDFIRE
|| DATA_REG_P (operands
[0]))
3084 && GET_CODE (operands
[1]) == CONST_INT
3085 && (INTVAL (operands
[1]) & 255) == 255)
3091 if (GET_CODE (operands
[1]) == CONST_INT
3092 && DATA_REG_P (operands
[0])
3093 && INTVAL (operands
[1]) < 128
3094 && INTVAL (operands
[1]) >= -128)
3095 return "moveq %1,%0";
3096 if (operands
[1] == const0_rtx
&& ADDRESS_REG_P (operands
[0]))
3097 return "sub%.l %0,%0";
3098 if (GET_CODE (operands
[1]) != CONST_INT
&& CONSTANT_P (operands
[1]))
3099 return "move%.l %1,%0";
3100 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3101 from address registers. */
3102 if (ADDRESS_REG_P (operands
[0]) || ADDRESS_REG_P (operands
[1]))
3103 return "move%.w %1,%0";
3104 return "move%.b %1,%0";
3108 output_move_stricthi (rtx
*operands
)
3110 if (operands
[1] == const0_rtx
3111 /* clr insns on 68000 read before writing. */
3112 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3113 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3115 return "move%.w %1,%0";
3119 output_move_strictqi (rtx
*operands
)
3121 if (operands
[1] == const0_rtx
3122 /* clr insns on 68000 read before writing. */
3123 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3124 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3126 return "move%.b %1,%0";
3129 /* Return the best assembler insn template
3130 for moving operands[1] into operands[0] as a fullword. */
3133 singlemove_string (rtx
*operands
)
3135 if (GET_CODE (operands
[1]) == CONST_INT
)
3136 return output_move_simode_const (operands
);
3137 return "move%.l %1,%0";
3141 /* Output assembler or rtl code to perform a doubleword move insn
3142 with operands OPERANDS.
3143 Pointers to 3 helper functions should be specified:
3144 HANDLE_REG_ADJUST to adjust a register by a small value,
3145 HANDLE_COMPADR to compute an address and
3146 HANDLE_MOVSI to move 4 bytes. */
3149 handle_move_double (rtx operands
[2],
3150 void (*handle_reg_adjust
) (rtx
, int),
3151 void (*handle_compadr
) (rtx
[2]),
3152 void (*handle_movsi
) (rtx
[2]))
3156 REGOP
, OFFSOP
, MEMOP
, PUSHOP
, POPOP
, CNSTOP
, RNDOP
3161 rtx addreg0
= 0, addreg1
= 0;
3162 int dest_overlapped_low
= 0;
3163 int size
= GET_MODE_SIZE (GET_MODE (operands
[0]));
3168 /* First classify both operands. */
3170 if (REG_P (operands
[0]))
3172 else if (offsettable_memref_p (operands
[0]))
3174 else if (GET_CODE (XEXP (operands
[0], 0)) == POST_INC
)
3176 else if (GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
)
3178 else if (GET_CODE (operands
[0]) == MEM
)
3183 if (REG_P (operands
[1]))
3185 else if (CONSTANT_P (operands
[1]))
3187 else if (offsettable_memref_p (operands
[1]))
3189 else if (GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3191 else if (GET_CODE (XEXP (operands
[1], 0)) == PRE_DEC
)
3193 else if (GET_CODE (operands
[1]) == MEM
)
3198 /* Check for the cases that the operand constraints are not supposed
3199 to allow to happen. Generating code for these cases is
3201 gcc_assert (optype0
!= RNDOP
&& optype1
!= RNDOP
);
3203 /* If one operand is decrementing and one is incrementing
3204 decrement the former register explicitly
3205 and change that operand into ordinary indexing. */
3207 if (optype0
== PUSHOP
&& optype1
== POPOP
)
3209 operands
[0] = XEXP (XEXP (operands
[0], 0), 0);
3211 handle_reg_adjust (operands
[0], -size
);
3213 if (GET_MODE (operands
[1]) == XFmode
)
3214 operands
[0] = gen_rtx_MEM (XFmode
, operands
[0]);
3215 else if (GET_MODE (operands
[0]) == DFmode
)
3216 operands
[0] = gen_rtx_MEM (DFmode
, operands
[0]);
3218 operands
[0] = gen_rtx_MEM (DImode
, operands
[0]);
3221 if (optype0
== POPOP
&& optype1
== PUSHOP
)
3223 operands
[1] = XEXP (XEXP (operands
[1], 0), 0);
3225 handle_reg_adjust (operands
[1], -size
);
3227 if (GET_MODE (operands
[1]) == XFmode
)
3228 operands
[1] = gen_rtx_MEM (XFmode
, operands
[1]);
3229 else if (GET_MODE (operands
[1]) == DFmode
)
3230 operands
[1] = gen_rtx_MEM (DFmode
, operands
[1]);
3232 operands
[1] = gen_rtx_MEM (DImode
, operands
[1]);
3236 /* If an operand is an unoffsettable memory ref, find a register
3237 we can increment temporarily to make it refer to the second word. */
3239 if (optype0
== MEMOP
)
3240 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
3242 if (optype1
== MEMOP
)
3243 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
3245 /* Ok, we can do one word at a time.
3246 Normally we do the low-numbered word first,
3247 but if either operand is autodecrementing then we
3248 do the high-numbered word first.
3250 In either case, set up in LATEHALF the operands to use
3251 for the high-numbered word and in some cases alter the
3252 operands in OPERANDS to be suitable for the low-numbered word. */
3256 if (optype0
== REGOP
)
3258 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 2);
3259 middlehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
3261 else if (optype0
== OFFSOP
)
3263 middlehalf
[0] = adjust_address (operands
[0], SImode
, 4);
3264 latehalf
[0] = adjust_address (operands
[0], SImode
, size
- 4);
3268 middlehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3269 latehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3272 if (optype1
== REGOP
)
3274 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 2);
3275 middlehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
3277 else if (optype1
== OFFSOP
)
3279 middlehalf
[1] = adjust_address (operands
[1], SImode
, 4);
3280 latehalf
[1] = adjust_address (operands
[1], SImode
, size
- 4);
3282 else if (optype1
== CNSTOP
)
3284 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
3289 REAL_VALUE_FROM_CONST_DOUBLE (r
, operands
[1]);
3290 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r
, l
);
3291 operands
[1] = GEN_INT (l
[0]);
3292 middlehalf
[1] = GEN_INT (l
[1]);
3293 latehalf
[1] = GEN_INT (l
[2]);
3297 /* No non-CONST_DOUBLE constant should ever appear
3299 gcc_assert (!CONSTANT_P (operands
[1]));
3304 middlehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3305 latehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3309 /* size is not 12: */
3311 if (optype0
== REGOP
)
3312 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
3313 else if (optype0
== OFFSOP
)
3314 latehalf
[0] = adjust_address (operands
[0], SImode
, size
- 4);
3316 latehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3318 if (optype1
== REGOP
)
3319 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
3320 else if (optype1
== OFFSOP
)
3321 latehalf
[1] = adjust_address (operands
[1], SImode
, size
- 4);
3322 else if (optype1
== CNSTOP
)
3323 split_double (operands
[1], &operands
[1], &latehalf
[1]);
3325 latehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3328 /* If insn is effectively movd N(sp),-(sp) then we will do the
3329 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3330 for the low word as well, to compensate for the first decrement of sp. */
3331 if (optype0
== PUSHOP
3332 && REGNO (XEXP (XEXP (operands
[0], 0), 0)) == STACK_POINTER_REGNUM
3333 && reg_overlap_mentioned_p (stack_pointer_rtx
, operands
[1]))
3334 operands
[1] = middlehalf
[1] = latehalf
[1];
3336 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3337 if the upper part of reg N does not appear in the MEM, arrange to
3338 emit the move late-half first. Otherwise, compute the MEM address
3339 into the upper part of N and use that as a pointer to the memory
3341 if (optype0
== REGOP
3342 && (optype1
== OFFSOP
|| optype1
== MEMOP
))
3344 rtx testlow
= gen_rtx_REG (SImode
, REGNO (operands
[0]));
3346 if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0))
3347 && reg_overlap_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
3349 /* If both halves of dest are used in the src memory address,
3350 compute the address into latehalf of dest.
3351 Note that this can't happen if the dest is two data regs. */
3353 xops
[0] = latehalf
[0];
3354 xops
[1] = XEXP (operands
[1], 0);
3356 handle_compadr (xops
);
3357 if (GET_MODE (operands
[1]) == XFmode
)
3359 operands
[1] = gen_rtx_MEM (XFmode
, latehalf
[0]);
3360 middlehalf
[1] = adjust_address (operands
[1], DImode
, size
- 8);
3361 latehalf
[1] = adjust_address (operands
[1], DImode
, size
- 4);
3365 operands
[1] = gen_rtx_MEM (DImode
, latehalf
[0]);
3366 latehalf
[1] = adjust_address (operands
[1], DImode
, size
- 4);
3370 && reg_overlap_mentioned_p (middlehalf
[0],
3371 XEXP (operands
[1], 0)))
3373 /* Check for two regs used by both source and dest.
3374 Note that this can't happen if the dest is all data regs.
3375 It can happen if the dest is d6, d7, a0.
3376 But in that case, latehalf is an addr reg, so
3377 the code at compadr does ok. */
3379 if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0))
3380 || reg_overlap_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
3383 /* JRV says this can't happen: */
3384 gcc_assert (!addreg0
&& !addreg1
);
3386 /* Only the middle reg conflicts; simply put it last. */
3387 handle_movsi (operands
);
3388 handle_movsi (latehalf
);
3389 handle_movsi (middlehalf
);
3393 else if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0)))
3394 /* If the low half of dest is mentioned in the source memory
3395 address, the arrange to emit the move late half first. */
3396 dest_overlapped_low
= 1;
3399 /* If one or both operands autodecrementing,
3400 do the two words, high-numbered first. */
3402 /* Likewise, the first move would clobber the source of the second one,
3403 do them in the other order. This happens only for registers;
3404 such overlap can't happen in memory unless the user explicitly
3405 sets it up, and that is an undefined circumstance. */
3407 if (optype0
== PUSHOP
|| optype1
== PUSHOP
3408 || (optype0
== REGOP
&& optype1
== REGOP
3409 && ((middlehalf
[1] && REGNO (operands
[0]) == REGNO (middlehalf
[1]))
3410 || REGNO (operands
[0]) == REGNO (latehalf
[1])))
3411 || dest_overlapped_low
)
3413 /* Make any unoffsettable addresses point at high-numbered word. */
3415 handle_reg_adjust (addreg0
, size
- 4);
3417 handle_reg_adjust (addreg1
, size
- 4);
3420 handle_movsi (latehalf
);
3422 /* Undo the adds we just did. */
3424 handle_reg_adjust (addreg0
, -4);
3426 handle_reg_adjust (addreg1
, -4);
3430 handle_movsi (middlehalf
);
3433 handle_reg_adjust (addreg0
, -4);
3435 handle_reg_adjust (addreg1
, -4);
3438 /* Do low-numbered word. */
3440 handle_movsi (operands
);
3444 /* Normal case: do the two words, low-numbered first. */
3446 m68k_final_prescan_insn (NULL
, operands
, 2);
3447 handle_movsi (operands
);
3449 /* Do the middle one of the three words for long double */
3453 handle_reg_adjust (addreg0
, 4);
3455 handle_reg_adjust (addreg1
, 4);
3457 m68k_final_prescan_insn (NULL
, middlehalf
, 2);
3458 handle_movsi (middlehalf
);
3461 /* Make any unoffsettable addresses point at high-numbered word. */
3463 handle_reg_adjust (addreg0
, 4);
3465 handle_reg_adjust (addreg1
, 4);
3468 m68k_final_prescan_insn (NULL
, latehalf
, 2);
3469 handle_movsi (latehalf
);
3471 /* Undo the adds we just did. */
3473 handle_reg_adjust (addreg0
, -(size
- 4));
3475 handle_reg_adjust (addreg1
, -(size
- 4));
3480 /* Output assembler code to adjust REG by N. */
3482 output_reg_adjust (rtx reg
, int n
)
3486 gcc_assert (GET_MODE (reg
) == SImode
3487 && -12 <= n
&& n
!= 0 && n
<= 12);
3492 s
= "add%.l #12,%0";
3496 s
= "addq%.l #8,%0";
3500 s
= "addq%.l #4,%0";
3504 s
= "sub%.l #12,%0";
3508 s
= "subq%.l #8,%0";
3512 s
= "subq%.l #4,%0";
3520 output_asm_insn (s
, ®
);
3523 /* Emit rtl code to adjust REG by N. */
3525 emit_reg_adjust (rtx reg1
, int n
)
3529 gcc_assert (GET_MODE (reg1
) == SImode
3530 && -12 <= n
&& n
!= 0 && n
<= 12);
3532 reg1
= copy_rtx (reg1
);
3533 reg2
= copy_rtx (reg1
);
3536 emit_insn (gen_subsi3 (reg1
, reg2
, GEN_INT (-n
)));
3538 emit_insn (gen_addsi3 (reg1
, reg2
, GEN_INT (n
)));
3543 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3545 output_compadr (rtx operands
[2])
3547 output_asm_insn ("lea %a1,%0", operands
);
3550 /* Output the best assembler insn for moving operands[1] into operands[0]
3553 output_movsi (rtx operands
[2])
3555 output_asm_insn (singlemove_string (operands
), operands
);
3558 /* Copy OP and change its mode to MODE. */
3560 copy_operand (rtx op
, enum machine_mode mode
)
3562 /* ??? This looks really ugly. There must be a better way
3563 to change a mode on the operand. */
3564 if (GET_MODE (op
) != VOIDmode
)
3567 op
= gen_rtx_REG (mode
, REGNO (op
));
3571 PUT_MODE (op
, mode
);
3578 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3580 emit_movsi (rtx operands
[2])
3582 operands
[0] = copy_operand (operands
[0], SImode
);
3583 operands
[1] = copy_operand (operands
[1], SImode
);
3585 emit_insn (gen_movsi (operands
[0], operands
[1]));
3588 /* Output assembler code to perform a doubleword move insn
3589 with operands OPERANDS. */
3591 output_move_double (rtx
*operands
)
3593 handle_move_double (operands
,
3594 output_reg_adjust
, output_compadr
, output_movsi
);
3599 /* Output rtl code to perform a doubleword move insn
3600 with operands OPERANDS. */
3602 m68k_emit_move_double (rtx operands
[2])
3604 handle_move_double (operands
, emit_reg_adjust
, emit_movsi
, emit_movsi
);
3607 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3608 new rtx with the correct mode. */
3611 force_mode (enum machine_mode mode
, rtx orig
)
3613 if (mode
== GET_MODE (orig
))
3616 if (REGNO (orig
) >= FIRST_PSEUDO_REGISTER
)
3619 return gen_rtx_REG (mode
, REGNO (orig
));
3623 fp_reg_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
3625 return reg_renumber
&& FP_REG_P (op
);
3628 /* Emit insns to move operands[1] into operands[0].
3630 Return 1 if we have written out everything that needs to be done to
3631 do the move. Otherwise, return 0 and the caller will emit the move
3634 Note SCRATCH_REG may not be in the proper mode depending on how it
3635 will be used. This routine is responsible for creating a new copy
3636 of SCRATCH_REG in the proper mode. */
3639 emit_move_sequence (rtx
*operands
, enum machine_mode mode
, rtx scratch_reg
)
3641 register rtx operand0
= operands
[0];
3642 register rtx operand1
= operands
[1];
3646 && reload_in_progress
&& GET_CODE (operand0
) == REG
3647 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
3648 operand0
= reg_equiv_mem (REGNO (operand0
));
3649 else if (scratch_reg
3650 && reload_in_progress
&& GET_CODE (operand0
) == SUBREG
3651 && GET_CODE (SUBREG_REG (operand0
)) == REG
3652 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
3654 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3655 the code which tracks sets/uses for delete_output_reload. */
3656 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
3657 reg_equiv_mem (REGNO (SUBREG_REG (operand0
))),
3658 SUBREG_BYTE (operand0
));
3659 operand0
= alter_subreg (&temp
, true);
3663 && reload_in_progress
&& GET_CODE (operand1
) == REG
3664 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
3665 operand1
= reg_equiv_mem (REGNO (operand1
));
3666 else if (scratch_reg
3667 && reload_in_progress
&& GET_CODE (operand1
) == SUBREG
3668 && GET_CODE (SUBREG_REG (operand1
)) == REG
3669 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
3671 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3672 the code which tracks sets/uses for delete_output_reload. */
3673 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
3674 reg_equiv_mem (REGNO (SUBREG_REG (operand1
))),
3675 SUBREG_BYTE (operand1
));
3676 operand1
= alter_subreg (&temp
, true);
3679 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand0
) == MEM
3680 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
3681 != XEXP (operand0
, 0)))
3682 operand0
= gen_rtx_MEM (GET_MODE (operand0
), tem
);
3683 if (scratch_reg
&& reload_in_progress
&& GET_CODE (operand1
) == MEM
3684 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
3685 != XEXP (operand1
, 0)))
3686 operand1
= gen_rtx_MEM (GET_MODE (operand1
), tem
);
3688 /* Handle secondary reloads for loads/stores of FP registers where
3689 the address is symbolic by using the scratch register */
3690 if (fp_reg_operand (operand0
, mode
)
3691 && ((GET_CODE (operand1
) == MEM
3692 && ! memory_address_p (DFmode
, XEXP (operand1
, 0)))
3693 || ((GET_CODE (operand1
) == SUBREG
3694 && GET_CODE (XEXP (operand1
, 0)) == MEM
3695 && !memory_address_p (DFmode
, XEXP (XEXP (operand1
, 0), 0)))))
3698 if (GET_CODE (operand1
) == SUBREG
)
3699 operand1
= XEXP (operand1
, 0);
3701 /* SCRATCH_REG will hold an address. We want
3702 it in SImode regardless of what mode it was originally given
3704 scratch_reg
= force_mode (SImode
, scratch_reg
);
3706 /* D might not fit in 14 bits either; for such cases load D into
3708 if (!memory_address_p (Pmode
, XEXP (operand1
, 0)))
3710 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
3711 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
3713 XEXP (XEXP (operand1
, 0), 0),
3717 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
3718 emit_insn (gen_rtx_SET (VOIDmode
, operand0
,
3719 gen_rtx_MEM (mode
, scratch_reg
)));
3722 else if (fp_reg_operand (operand1
, mode
)
3723 && ((GET_CODE (operand0
) == MEM
3724 && ! memory_address_p (DFmode
, XEXP (operand0
, 0)))
3725 || ((GET_CODE (operand0
) == SUBREG
)
3726 && GET_CODE (XEXP (operand0
, 0)) == MEM
3727 && !memory_address_p (DFmode
, XEXP (XEXP (operand0
, 0), 0))))
3730 if (GET_CODE (operand0
) == SUBREG
)
3731 operand0
= XEXP (operand0
, 0);
3733 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3734 it in SIMODE regardless of what mode it was originally given
3736 scratch_reg
= force_mode (SImode
, scratch_reg
);
3738 /* D might not fit in 14 bits either; for such cases load D into
3740 if (!memory_address_p (Pmode
, XEXP (operand0
, 0)))
3742 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
3743 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
3746 XEXP (XEXP (operand0
, 0),
3751 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
3752 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_MEM (mode
, scratch_reg
),
3756 /* Handle secondary reloads for loads of FP registers from constant
3757 expressions by forcing the constant into memory.
3759 use scratch_reg to hold the address of the memory location.
3761 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3762 NO_REGS when presented with a const_int and an register class
3763 containing only FP registers. Doing so unfortunately creates
3764 more problems than it solves. Fix this for 2.5. */
3765 else if (fp_reg_operand (operand0
, mode
)
3766 && CONSTANT_P (operand1
)
3771 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3772 it in SIMODE regardless of what mode it was originally given
3774 scratch_reg
= force_mode (SImode
, scratch_reg
);
3776 /* Force the constant into memory and put the address of the
3777 memory location into scratch_reg. */
3778 xoperands
[0] = scratch_reg
;
3779 xoperands
[1] = XEXP (force_const_mem (mode
, operand1
), 0);
3780 emit_insn (gen_rtx_SET (mode
, scratch_reg
, xoperands
[1]));
3782 /* Now load the destination register. */
3783 emit_insn (gen_rtx_SET (mode
, operand0
,
3784 gen_rtx_MEM (mode
, scratch_reg
)));
3788 /* Now have insn-emit do whatever it normally does. */
3792 /* Split one or more DImode RTL references into pairs of SImode
3793 references. The RTL can be REG, offsettable MEM, integer constant, or
3794 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3795 split and "num" is its length. lo_half and hi_half are output arrays
3796 that parallel "operands". */
3799 split_di (rtx operands
[], int num
, rtx lo_half
[], rtx hi_half
[])
3803 rtx op
= operands
[num
];
3805 /* simplify_subreg refuses to split volatile memory addresses,
3806 but we still have to handle it. */
3807 if (GET_CODE (op
) == MEM
)
3809 lo_half
[num
] = adjust_address (op
, SImode
, 4);
3810 hi_half
[num
] = adjust_address (op
, SImode
, 0);
3814 lo_half
[num
] = simplify_gen_subreg (SImode
, op
,
3815 GET_MODE (op
) == VOIDmode
3816 ? DImode
: GET_MODE (op
), 4);
3817 hi_half
[num
] = simplify_gen_subreg (SImode
, op
,
3818 GET_MODE (op
) == VOIDmode
3819 ? DImode
: GET_MODE (op
), 0);
3824 /* Split X into a base and a constant offset, storing them in *BASE
3825 and *OFFSET respectively. */
3828 m68k_split_offset (rtx x
, rtx
*base
, HOST_WIDE_INT
*offset
)
3831 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
3833 *offset
+= INTVAL (XEXP (x
, 1));
3839 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3840 instruction. STORE_P says whether the move is a load or store.
3842 If the instruction uses post-increment or pre-decrement addressing,
3843 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3844 adjustment. This adjustment will be made by the first element of
3845 PARALLEL, with the loads or stores starting at element 1. If the
3846 instruction does not use post-increment or pre-decrement addressing,
3847 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3848 start at element 0. */
3851 m68k_movem_pattern_p (rtx pattern
, rtx automod_base
,
3852 HOST_WIDE_INT automod_offset
, bool store_p
)
3854 rtx base
, mem_base
, set
, mem
, reg
, last_reg
;
3855 HOST_WIDE_INT offset
, mem_offset
;
3857 enum reg_class rclass
;
3859 len
= XVECLEN (pattern
, 0);
3860 first
= (automod_base
!= NULL
);
3864 /* Stores must be pre-decrement and loads must be post-increment. */
3865 if (store_p
!= (automod_offset
< 0))
3868 /* Work out the base and offset for lowest memory location. */
3869 base
= automod_base
;
3870 offset
= (automod_offset
< 0 ? automod_offset
: 0);
3874 /* Allow any valid base and offset in the first access. */
3881 for (i
= first
; i
< len
; i
++)
3883 /* We need a plain SET. */
3884 set
= XVECEXP (pattern
, 0, i
);
3885 if (GET_CODE (set
) != SET
)
3888 /* Check that we have a memory location... */
3889 mem
= XEXP (set
, !store_p
);
3890 if (!MEM_P (mem
) || !memory_operand (mem
, VOIDmode
))
3893 /* ...with the right address. */
3896 m68k_split_offset (XEXP (mem
, 0), &base
, &offset
);
3897 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3898 There are no mode restrictions for 680x0 besides the
3899 automodification rules enforced above. */
3901 && !m68k_legitimate_base_reg_p (base
, reload_completed
))
3906 m68k_split_offset (XEXP (mem
, 0), &mem_base
, &mem_offset
);
3907 if (!rtx_equal_p (base
, mem_base
) || offset
!= mem_offset
)
3911 /* Check that we have a register of the required mode and class. */
3912 reg
= XEXP (set
, store_p
);
3914 || !HARD_REGISTER_P (reg
)
3915 || GET_MODE (reg
) != reg_raw_mode
[REGNO (reg
)])
3920 /* The register must belong to RCLASS and have a higher number
3921 than the register in the previous SET. */
3922 if (!TEST_HARD_REG_BIT (reg_class_contents
[rclass
], REGNO (reg
))
3923 || REGNO (last_reg
) >= REGNO (reg
))
3928 /* Work out which register class we need. */
3929 if (INT_REGNO_P (REGNO (reg
)))
3930 rclass
= GENERAL_REGS
;
3931 else if (FP_REGNO_P (REGNO (reg
)))
3938 offset
+= GET_MODE_SIZE (GET_MODE (reg
));
3941 /* If we have an automodification, check whether the final offset is OK. */
3942 if (automod_base
&& offset
!= (automod_offset
< 0 ? 0 : automod_offset
))
3945 /* Reject unprofitable cases. */
3946 if (len
< first
+ (rclass
== FP_REGS
? MIN_FMOVEM_REGS
: MIN_MOVEM_REGS
))
3952 /* Return the assembly code template for a movem or fmovem instruction
3953 whose pattern is given by PATTERN. Store the template's operands
3956 If the instruction uses post-increment or pre-decrement addressing,
3957 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3958 is true if this is a store instruction. */
3961 m68k_output_movem (rtx
*operands
, rtx pattern
,
3962 HOST_WIDE_INT automod_offset
, bool store_p
)
3967 gcc_assert (GET_CODE (pattern
) == PARALLEL
);
3969 first
= (automod_offset
!= 0);
3970 for (i
= first
; i
< XVECLEN (pattern
, 0); i
++)
3972 /* When using movem with pre-decrement addressing, register X + D0_REG
3973 is controlled by bit 15 - X. For all other addressing modes,
3974 register X + D0_REG is controlled by bit X. Confusingly, the
3975 register mask for fmovem is in the opposite order to that for
3979 gcc_assert (MEM_P (XEXP (XVECEXP (pattern
, 0, i
), !store_p
)));
3980 gcc_assert (REG_P (XEXP (XVECEXP (pattern
, 0, i
), store_p
)));
3981 regno
= REGNO (XEXP (XVECEXP (pattern
, 0, i
), store_p
));
3982 if (automod_offset
< 0)
3984 if (FP_REGNO_P (regno
))
3985 mask
|= 1 << (regno
- FP0_REG
);
3987 mask
|= 1 << (15 - (regno
- D0_REG
));
3991 if (FP_REGNO_P (regno
))
3992 mask
|= 1 << (7 - (regno
- FP0_REG
));
3994 mask
|= 1 << (regno
- D0_REG
);
3999 if (automod_offset
== 0)
4000 operands
[0] = XEXP (XEXP (XVECEXP (pattern
, 0, first
), !store_p
), 0);
4001 else if (automod_offset
< 0)
4002 operands
[0] = gen_rtx_PRE_DEC (Pmode
, SET_DEST (XVECEXP (pattern
, 0, 0)));
4004 operands
[0] = gen_rtx_POST_INC (Pmode
, SET_DEST (XVECEXP (pattern
, 0, 0)));
4005 operands
[1] = GEN_INT (mask
);
4006 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern
, 0, first
), store_p
))))
4009 return "fmovem %1,%a0";
4011 return "fmovem %a0,%1";
4016 return "movem%.l %1,%a0";
4018 return "movem%.l %a0,%1";
4022 /* Return a REG that occurs in ADDR with coefficient 1.
4023 ADDR can be effectively incremented by incrementing REG. */
4026 find_addr_reg (rtx addr
)
4028 while (GET_CODE (addr
) == PLUS
)
4030 if (GET_CODE (XEXP (addr
, 0)) == REG
)
4031 addr
= XEXP (addr
, 0);
4032 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
4033 addr
= XEXP (addr
, 1);
4034 else if (CONSTANT_P (XEXP (addr
, 0)))
4035 addr
= XEXP (addr
, 1);
4036 else if (CONSTANT_P (XEXP (addr
, 1)))
4037 addr
= XEXP (addr
, 0);
4041 gcc_assert (GET_CODE (addr
) == REG
);
4045 /* Output assembler code to perform a 32-bit 3-operand add. */
4048 output_addsi3 (rtx
*operands
)
4050 if (! operands_match_p (operands
[0], operands
[1]))
4052 if (!ADDRESS_REG_P (operands
[1]))
4054 rtx tmp
= operands
[1];
4056 operands
[1] = operands
[2];
4060 /* These insns can result from reloads to access
4061 stack slots over 64k from the frame pointer. */
4062 if (GET_CODE (operands
[2]) == CONST_INT
4063 && (INTVAL (operands
[2]) < -32768 || INTVAL (operands
[2]) > 32767))
4064 return "move%.l %2,%0\n\tadd%.l %1,%0";
4065 if (GET_CODE (operands
[2]) == REG
)
4066 return MOTOROLA
? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4067 return MOTOROLA
? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4069 if (GET_CODE (operands
[2]) == CONST_INT
)
4071 if (INTVAL (operands
[2]) > 0
4072 && INTVAL (operands
[2]) <= 8)
4073 return "addq%.l %2,%0";
4074 if (INTVAL (operands
[2]) < 0
4075 && INTVAL (operands
[2]) >= -8)
4077 operands
[2] = GEN_INT (- INTVAL (operands
[2]));
4078 return "subq%.l %2,%0";
4080 /* On the CPU32 it is faster to use two addql instructions to
4081 add a small integer (8 < N <= 16) to a register.
4082 Likewise for subql. */
4083 if (TUNE_CPU32
&& REG_P (operands
[0]))
4085 if (INTVAL (operands
[2]) > 8
4086 && INTVAL (operands
[2]) <= 16)
4088 operands
[2] = GEN_INT (INTVAL (operands
[2]) - 8);
4089 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4091 if (INTVAL (operands
[2]) < -8
4092 && INTVAL (operands
[2]) >= -16)
4094 operands
[2] = GEN_INT (- INTVAL (operands
[2]) - 8);
4095 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4098 if (ADDRESS_REG_P (operands
[0])
4099 && INTVAL (operands
[2]) >= -0x8000
4100 && INTVAL (operands
[2]) < 0x8000)
4103 return "add%.w %2,%0";
4105 return MOTOROLA
? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4108 return "add%.l %2,%0";
4111 /* Store in cc_status the expressions that the condition codes will
4112 describe after execution of an instruction whose pattern is EXP.
4113 Do not alter them if the instruction would not alter the cc's. */
4115 /* On the 68000, all the insns to store in an address register fail to
4116 set the cc's. However, in some cases these instructions can make it
4117 possibly invalid to use the saved cc's. In those cases we clear out
4118 some or all of the saved cc's so they won't be used. */
4121 notice_update_cc (rtx exp
, rtx insn
)
4123 if (GET_CODE (exp
) == SET
)
4125 if (GET_CODE (SET_SRC (exp
)) == CALL
)
4127 else if (ADDRESS_REG_P (SET_DEST (exp
)))
4129 if (cc_status
.value1
&& modified_in_p (cc_status
.value1
, insn
))
4130 cc_status
.value1
= 0;
4131 if (cc_status
.value2
&& modified_in_p (cc_status
.value2
, insn
))
4132 cc_status
.value2
= 0;
4134 /* fmoves to memory or data registers do not set the condition
4135 codes. Normal moves _do_ set the condition codes, but not in
4136 a way that is appropriate for comparison with 0, because -0.0
4137 would be treated as a negative nonzero number. Note that it
4138 isn't appropriate to conditionalize this restriction on
4139 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4140 we care about the difference between -0.0 and +0.0. */
4141 else if (!FP_REG_P (SET_DEST (exp
))
4142 && SET_DEST (exp
) != cc0_rtx
4143 && (FP_REG_P (SET_SRC (exp
))
4144 || GET_CODE (SET_SRC (exp
)) == FIX
4145 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp
)))))
4147 /* A pair of move insns doesn't produce a useful overall cc. */
4148 else if (!FP_REG_P (SET_DEST (exp
))
4149 && !FP_REG_P (SET_SRC (exp
))
4150 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp
))) > 4
4151 && (GET_CODE (SET_SRC (exp
)) == REG
4152 || GET_CODE (SET_SRC (exp
)) == MEM
4153 || GET_CODE (SET_SRC (exp
)) == CONST_DOUBLE
))
4155 else if (SET_DEST (exp
) != pc_rtx
)
4157 cc_status
.flags
= 0;
4158 cc_status
.value1
= SET_DEST (exp
);
4159 cc_status
.value2
= SET_SRC (exp
);
4162 else if (GET_CODE (exp
) == PARALLEL
4163 && GET_CODE (XVECEXP (exp
, 0, 0)) == SET
)
4165 rtx dest
= SET_DEST (XVECEXP (exp
, 0, 0));
4166 rtx src
= SET_SRC (XVECEXP (exp
, 0, 0));
4168 if (ADDRESS_REG_P (dest
))
4170 else if (dest
!= pc_rtx
)
4172 cc_status
.flags
= 0;
4173 cc_status
.value1
= dest
;
4174 cc_status
.value2
= src
;
4179 if (cc_status
.value2
!= 0
4180 && ADDRESS_REG_P (cc_status
.value2
)
4181 && GET_MODE (cc_status
.value2
) == QImode
)
4183 if (cc_status
.value2
!= 0)
4184 switch (GET_CODE (cc_status
.value2
))
4186 case ASHIFT
: case ASHIFTRT
: case LSHIFTRT
:
4187 case ROTATE
: case ROTATERT
:
4188 /* These instructions always clear the overflow bit, and set
4189 the carry to the bit shifted out. */
4190 cc_status
.flags
|= CC_OVERFLOW_UNUSABLE
| CC_NO_CARRY
;
4193 case PLUS
: case MINUS
: case MULT
:
4194 case DIV
: case UDIV
: case MOD
: case UMOD
: case NEG
:
4195 if (GET_MODE (cc_status
.value2
) != VOIDmode
)
4196 cc_status
.flags
|= CC_NO_OVERFLOW
;
4199 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4200 ends with a move insn moving r2 in r2's mode.
4201 Thus, the cc's are set for r2.
4202 This can set N bit spuriously. */
4203 cc_status
.flags
|= CC_NOT_NEGATIVE
;
4208 if (cc_status
.value1
&& GET_CODE (cc_status
.value1
) == REG
4210 && reg_overlap_mentioned_p (cc_status
.value1
, cc_status
.value2
))
4211 cc_status
.value2
= 0;
4212 /* Check for PRE_DEC in dest modifying a register used in src. */
4213 if (cc_status
.value1
&& GET_CODE (cc_status
.value1
) == MEM
4214 && GET_CODE (XEXP (cc_status
.value1
, 0)) == PRE_DEC
4216 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status
.value1
, 0), 0),
4218 cc_status
.value2
= 0;
4219 if (((cc_status
.value1
&& FP_REG_P (cc_status
.value1
))
4220 || (cc_status
.value2
&& FP_REG_P (cc_status
.value2
))))
4221 cc_status
.flags
= CC_IN_68881
;
4222 if (cc_status
.value2
&& GET_CODE (cc_status
.value2
) == COMPARE
4223 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status
.value2
, 0))) == MODE_FLOAT
)
4225 cc_status
.flags
= CC_IN_68881
;
4226 if (!FP_REG_P (XEXP (cc_status
.value2
, 0))
4227 && FP_REG_P (XEXP (cc_status
.value2
, 1)))
4228 cc_status
.flags
|= CC_REVERSED
;
4233 output_move_const_double (rtx
*operands
)
4235 int code
= standard_68881_constant_p (operands
[1]);
4239 static char buf
[40];
4241 sprintf (buf
, "fmovecr #0x%x,%%0", code
& 0xff);
4244 return "fmove%.d %1,%0";
4248 output_move_const_single (rtx
*operands
)
4250 int code
= standard_68881_constant_p (operands
[1]);
4254 static char buf
[40];
4256 sprintf (buf
, "fmovecr #0x%x,%%0", code
& 0xff);
4259 return "fmove%.s %f1,%0";
4262 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4263 from the "fmovecr" instruction.
4264 The value, anded with 0xff, gives the code to use in fmovecr
4265 to get the desired constant. */
4267 /* This code has been fixed for cross-compilation. */
4269 static int inited_68881_table
= 0;
4271 static const char *const strings_68881
[7] = {
4281 static const int codes_68881
[7] = {
4291 REAL_VALUE_TYPE values_68881
[7];
4293 /* Set up values_68881 array by converting the decimal values
4294 strings_68881 to binary. */
4297 init_68881_table (void)
4301 enum machine_mode mode
;
4304 for (i
= 0; i
< 7; i
++)
4308 r
= REAL_VALUE_ATOF (strings_68881
[i
], mode
);
4309 values_68881
[i
] = r
;
4311 inited_68881_table
= 1;
4315 standard_68881_constant_p (rtx x
)
4320 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4321 used at all on those chips. */
4325 if (! inited_68881_table
)
4326 init_68881_table ();
4328 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4330 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4332 for (i
= 0; i
< 6; i
++)
4334 if (REAL_VALUES_IDENTICAL (r
, values_68881
[i
]))
4335 return (codes_68881
[i
]);
4338 if (GET_MODE (x
) == SFmode
)
4341 if (REAL_VALUES_EQUAL (r
, values_68881
[6]))
4342 return (codes_68881
[6]);
4344 /* larger powers of ten in the constants ram are not used
4345 because they are not equal to a `double' C constant. */
4349 /* If X is a floating-point constant, return the logarithm of X base 2,
4350 or 0 if X is not a power of 2. */
4353 floating_exact_log2 (rtx x
)
4355 REAL_VALUE_TYPE r
, r1
;
4358 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4360 if (REAL_VALUES_LESS (r
, dconst1
))
4363 exp
= real_exponent (&r
);
4364 real_2expN (&r1
, exp
, DFmode
);
4365 if (REAL_VALUES_EQUAL (r1
, r
))
4371 /* A C compound statement to output to stdio stream STREAM the
4372 assembler syntax for an instruction operand X. X is an RTL
4375 CODE is a value that can be used to specify one of several ways
4376 of printing the operand. It is used when identical operands
4377 must be printed differently depending on the context. CODE
4378 comes from the `%' specification that was used to request
4379 printing of the operand. If the specification was just `%DIGIT'
4380 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4381 is the ASCII code for LTR.
4383 If X is a register, this macro should print the register's name.
4384 The names can be found in an array `reg_names' whose type is
4385 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4387 When the machine description has a specification `%PUNCT' (a `%'
4388 followed by a punctuation character), this macro is called with
4389 a null pointer for X and the punctuation character for CODE.
4391 The m68k specific codes are:
4393 '.' for dot needed in Motorola-style opcode names.
4394 '-' for an operand pushing on the stack:
4395 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4396 '+' for an operand pushing on the stack:
4397 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4398 '@' for a reference to the top word on the stack:
4399 sp@, (sp) or (%sp) depending on the style of syntax.
4400 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4401 but & in SGS syntax).
4402 '!' for the cc register (used in an `and to cc' insn).
4403 '$' for the letter `s' in an op code, but only on the 68040.
4404 '&' for the letter `d' in an op code, but only on the 68040.
4405 '/' for register prefix needed by longlong.h.
4406 '?' for m68k_library_id_string
4408 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4409 'd' to force memory addressing to be absolute, not relative.
4410 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4411 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4412 or print pair of registers as rx:ry.
4413 'p' print an address with @PLTPC attached, but only if the operand
4414 is not locally-bound. */
4417 print_operand (FILE *file
, rtx op
, int letter
)
4422 fprintf (file
, ".");
4424 else if (letter
== '#')
4425 asm_fprintf (file
, "%I");
4426 else if (letter
== '-')
4427 asm_fprintf (file
, MOTOROLA
? "-(%Rsp)" : "%Rsp@-");
4428 else if (letter
== '+')
4429 asm_fprintf (file
, MOTOROLA
? "(%Rsp)+" : "%Rsp@+");
4430 else if (letter
== '@')
4431 asm_fprintf (file
, MOTOROLA
? "(%Rsp)" : "%Rsp@");
4432 else if (letter
== '!')
4433 asm_fprintf (file
, "%Rfpcr");
4434 else if (letter
== '$')
4437 fprintf (file
, "s");
4439 else if (letter
== '&')
4442 fprintf (file
, "d");
4444 else if (letter
== '/')
4445 asm_fprintf (file
, "%R");
4446 else if (letter
== '?')
4447 asm_fprintf (file
, m68k_library_id_string
);
4448 else if (letter
== 'p')
4450 output_addr_const (file
, op
);
4451 if (!(GET_CODE (op
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (op
)))
4452 fprintf (file
, "@PLTPC");
4454 else if (GET_CODE (op
) == REG
)
4457 /* Print out the second register name of a register pair.
4458 I.e., R (6) => 7. */
4459 fputs (M68K_REGNAME(REGNO (op
) + 1), file
);
4461 fputs (M68K_REGNAME(REGNO (op
)), file
);
4463 else if (GET_CODE (op
) == MEM
)
4465 output_address (XEXP (op
, 0));
4466 if (letter
== 'd' && ! TARGET_68020
4467 && CONSTANT_ADDRESS_P (XEXP (op
, 0))
4468 && !(GET_CODE (XEXP (op
, 0)) == CONST_INT
4469 && INTVAL (XEXP (op
, 0)) < 0x8000
4470 && INTVAL (XEXP (op
, 0)) >= -0x8000))
4471 fprintf (file
, MOTOROLA
? ".l" : ":l");
4473 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == SFmode
)
4477 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
4478 REAL_VALUE_TO_TARGET_SINGLE (r
, l
);
4479 asm_fprintf (file
, "%I0x%lx", l
& 0xFFFFFFFF);
4481 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == XFmode
)
4485 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
4486 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r
, l
);
4487 asm_fprintf (file
, "%I0x%lx%08lx%08lx", l
[0] & 0xFFFFFFFF,
4488 l
[1] & 0xFFFFFFFF, l
[2] & 0xFFFFFFFF);
4490 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == DFmode
)
4494 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
4495 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
4496 asm_fprintf (file
, "%I0x%lx%08lx", l
[0] & 0xFFFFFFFF, l
[1] & 0xFFFFFFFF);
4500 /* Use `print_operand_address' instead of `output_addr_const'
4501 to ensure that we print relevant PIC stuff. */
4502 asm_fprintf (file
, "%I");
4504 && (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == CONST
))
4505 print_operand_address (file
, op
);
4507 output_addr_const (file
, op
);
4511 /* Return string for TLS relocation RELOC. */
4514 m68k_get_reloc_decoration (enum m68k_reloc reloc
)
4516 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4517 gcc_assert (MOTOROLA
|| reloc
== RELOC_GOT
);
4524 if (flag_pic
== 1 && TARGET_68020
)
4565 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4568 m68k_output_addr_const_extra (FILE *file
, rtx x
)
4570 if (GET_CODE (x
) == UNSPEC
)
4572 switch (XINT (x
, 1))
4574 case UNSPEC_RELOC16
:
4575 case UNSPEC_RELOC32
:
4576 output_addr_const (file
, XVECEXP (x
, 0, 0));
4577 fputs (m68k_get_reloc_decoration
4578 ((enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1))), file
);
4589 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4592 m68k_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
4594 gcc_assert (size
== 4);
4595 fputs ("\t.long\t", file
);
4596 output_addr_const (file
, x
);
4597 fputs ("@TLSLDO+0x8000", file
);
4600 /* In the name of slightly smaller debug output, and to cater to
4601 general assembler lossage, recognize various UNSPEC sequences
4602 and turn them back into a direct symbol reference. */
4605 m68k_delegitimize_address (rtx orig_x
)
4608 struct m68k_address addr
;
4611 orig_x
= delegitimize_mem_from_attrs (orig_x
);
4616 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
4619 if (!m68k_decompose_address (GET_MODE (x
), x
, false, &addr
)
4620 || addr
.offset
== NULL_RTX
4621 || GET_CODE (addr
.offset
) != CONST
)
4624 unspec
= XEXP (addr
.offset
, 0);
4625 if (GET_CODE (unspec
) == PLUS
&& CONST_INT_P (XEXP (unspec
, 1)))
4626 unspec
= XEXP (unspec
, 0);
4627 if (GET_CODE (unspec
) != UNSPEC
4628 || (XINT (unspec
, 1) != UNSPEC_RELOC16
4629 && XINT (unspec
, 1) != UNSPEC_RELOC32
))
4631 x
= XVECEXP (unspec
, 0, 0);
4632 gcc_assert (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
);
4633 if (unspec
!= XEXP (addr
.offset
, 0))
4634 x
= gen_rtx_PLUS (Pmode
, x
, XEXP (XEXP (addr
.offset
, 0), 1));
4637 rtx idx
= addr
.index
;
4638 if (addr
.scale
!= 1)
4639 idx
= gen_rtx_MULT (Pmode
, idx
, GEN_INT (addr
.scale
));
4640 x
= gen_rtx_PLUS (Pmode
, idx
, x
);
4643 x
= gen_rtx_PLUS (Pmode
, addr
.base
, x
);
4645 x
= replace_equiv_address_nv (orig_x
, x
);
4650 /* A C compound statement to output to stdio stream STREAM the
4651 assembler syntax for an instruction operand that is a memory
4652 reference whose address is ADDR. ADDR is an RTL expression.
4654 Note that this contains a kludge that knows that the only reason
4655 we have an address (plus (label_ref...) (reg...)) when not generating
4656 PIC code is in the insn before a tablejump, and we know that m68k.md
4657 generates a label LInnn: on such an insn.
4659 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4660 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4662 This routine is responsible for distinguishing between -fpic and -fPIC
4663 style relocations in an address. When generating -fpic code the
4664 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4665 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4668 print_operand_address (FILE *file
, rtx addr
)
4670 struct m68k_address address
;
4672 if (!m68k_decompose_address (QImode
, addr
, true, &address
))
4675 if (address
.code
== PRE_DEC
)
4676 fprintf (file
, MOTOROLA
? "-(%s)" : "%s@-",
4677 M68K_REGNAME (REGNO (address
.base
)));
4678 else if (address
.code
== POST_INC
)
4679 fprintf (file
, MOTOROLA
? "(%s)+" : "%s@+",
4680 M68K_REGNAME (REGNO (address
.base
)));
4681 else if (!address
.base
&& !address
.index
)
4683 /* A constant address. */
4684 gcc_assert (address
.offset
== addr
);
4685 if (GET_CODE (addr
) == CONST_INT
)
4687 /* (xxx).w or (xxx).l. */
4688 if (IN_RANGE (INTVAL (addr
), -0x8000, 0x7fff))
4689 fprintf (file
, MOTOROLA
? "%d.w" : "%d:w", (int) INTVAL (addr
));
4691 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (addr
));
4693 else if (TARGET_PCREL
)
4695 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4697 output_addr_const (file
, addr
);
4698 asm_fprintf (file
, flag_pic
== 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4702 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4703 name ends in `.<letter>', as the last 2 characters can be
4704 mistaken as a size suffix. Put the name in parentheses. */
4705 if (GET_CODE (addr
) == SYMBOL_REF
4706 && strlen (XSTR (addr
, 0)) > 2
4707 && XSTR (addr
, 0)[strlen (XSTR (addr
, 0)) - 2] == '.')
4710 output_addr_const (file
, addr
);
4714 output_addr_const (file
, addr
);
4721 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4722 label being accessed, otherwise it is -1. */
4723 labelno
= (address
.offset
4725 && GET_CODE (address
.offset
) == LABEL_REF
4726 ? CODE_LABEL_NUMBER (XEXP (address
.offset
, 0))
4730 /* Print the "offset(base" component. */
4732 asm_fprintf (file
, "%LL%d(%Rpc,", labelno
);
4736 output_addr_const (file
, address
.offset
);
4740 fputs (M68K_REGNAME (REGNO (address
.base
)), file
);
4742 /* Print the ",index" component, if any. */
4747 fprintf (file
, "%s.%c",
4748 M68K_REGNAME (REGNO (address
.index
)),
4749 GET_MODE (address
.index
) == HImode
? 'w' : 'l');
4750 if (address
.scale
!= 1)
4751 fprintf (file
, "*%d", address
.scale
);
4755 else /* !MOTOROLA */
4757 if (!address
.offset
&& !address
.index
)
4758 fprintf (file
, "%s@", M68K_REGNAME (REGNO (address
.base
)));
4761 /* Print the "base@(offset" component. */
4763 asm_fprintf (file
, "%Rpc@(%LL%d", labelno
);
4767 fputs (M68K_REGNAME (REGNO (address
.base
)), file
);
4768 fprintf (file
, "@(");
4770 output_addr_const (file
, address
.offset
);
4772 /* Print the ",index" component, if any. */
4775 fprintf (file
, ",%s:%c",
4776 M68K_REGNAME (REGNO (address
.index
)),
4777 GET_MODE (address
.index
) == HImode
? 'w' : 'l');
4778 if (address
.scale
!= 1)
4779 fprintf (file
, ":%d", address
.scale
);
4787 /* Check for cases where a clr insns can be omitted from code using
4788 strict_low_part sets. For example, the second clrl here is not needed:
4789 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4791 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4792 insn we are checking for redundancy. TARGET is the register set by the
4796 strict_low_part_peephole_ok (enum machine_mode mode
, rtx first_insn
,
4801 while ((p
= PREV_INSN (p
)))
4803 if (NOTE_INSN_BASIC_BLOCK_P (p
))
4809 /* If it isn't an insn, then give up. */
4813 if (reg_set_p (target
, p
))
4815 rtx set
= single_set (p
);
4818 /* If it isn't an easy to recognize insn, then give up. */
4822 dest
= SET_DEST (set
);
4824 /* If this sets the entire target register to zero, then our
4825 first_insn is redundant. */
4826 if (rtx_equal_p (dest
, target
)
4827 && SET_SRC (set
) == const0_rtx
)
4829 else if (GET_CODE (dest
) == STRICT_LOW_PART
4830 && GET_CODE (XEXP (dest
, 0)) == REG
4831 && REGNO (XEXP (dest
, 0)) == REGNO (target
)
4832 && (GET_MODE_SIZE (GET_MODE (XEXP (dest
, 0)))
4833 <= GET_MODE_SIZE (mode
)))
4834 /* This is a strict low part set which modifies less than
4835 we are using, so it is safe. */
4845 /* Operand predicates for implementing asymmetric pc-relative addressing
4846 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4847 when used as a source operand, but not as a destination operand.
4849 We model this by restricting the meaning of the basic predicates
4850 (general_operand, memory_operand, etc) to forbid the use of this
4851 addressing mode, and then define the following predicates that permit
4852 this addressing mode. These predicates can then be used for the
4853 source operands of the appropriate instructions.
4855 n.b. While it is theoretically possible to change all machine patterns
4856 to use this addressing more where permitted by the architecture,
4857 it has only been implemented for "common" cases: SImode, HImode, and
4858 QImode operands, and only for the principle operations that would
4859 require this addressing mode: data movement and simple integer operations.
4861 In parallel with these new predicates, two new constraint letters
4862 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4863 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4864 In the pcrel case 's' is only valid in combination with 'a' registers.
4865 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4866 of how these constraints are used.
4868 The use of these predicates is strictly optional, though patterns that
4869 don't will cause an extra reload register to be allocated where one
4872 lea (abc:w,%pc),%a0 ; need to reload address
4873 moveq &1,%d1 ; since write to pc-relative space
4874 movel %d1,%a0@ ; is not allowed
4876 lea (abc:w,%pc),%a1 ; no need to reload address here
4877 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4879 For more info, consult tiemann@cygnus.com.
4882 All of the ugliness with predicates and constraints is due to the
4883 simple fact that the m68k does not allow a pc-relative addressing
4884 mode as a destination. gcc does not distinguish between source and
4885 destination addresses. Hence, if we claim that pc-relative address
4886 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4887 end up with invalid code. To get around this problem, we left
4888 pc-relative modes as invalid addresses, and then added special
4889 predicates and constraints to accept them.
4891 A cleaner way to handle this is to modify gcc to distinguish
4892 between source and destination addresses. We can then say that
4893 pc-relative is a valid source address but not a valid destination
4894 address, and hopefully avoid a lot of the predicate and constraint
4895 hackery. Unfortunately, this would be a pretty big change. It would
4896 be a useful change for a number of ports, but there aren't any current
4897 plans to undertake this.
4899 ***************************************************************************/
4903 output_andsi3 (rtx
*operands
)
4906 if (GET_CODE (operands
[2]) == CONST_INT
4907 && (INTVAL (operands
[2]) | 0xffff) == -1
4908 && (DATA_REG_P (operands
[0])
4909 || offsettable_memref_p (operands
[0]))
4910 && !TARGET_COLDFIRE
)
4912 if (GET_CODE (operands
[0]) != REG
)
4913 operands
[0] = adjust_address (operands
[0], HImode
, 2);
4914 operands
[2] = GEN_INT (INTVAL (operands
[2]) & 0xffff);
4915 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4917 if (operands
[2] == const0_rtx
)
4919 return "and%.w %2,%0";
4921 if (GET_CODE (operands
[2]) == CONST_INT
4922 && (logval
= exact_log2 (~ INTVAL (operands
[2]) & 0xffffffff)) >= 0
4923 && (DATA_REG_P (operands
[0])
4924 || offsettable_memref_p (operands
[0])))
4926 if (DATA_REG_P (operands
[0]))
4927 operands
[1] = GEN_INT (logval
);
4930 operands
[0] = adjust_address (operands
[0], SImode
, 3 - (logval
/ 8));
4931 operands
[1] = GEN_INT (logval
% 8);
4933 /* This does not set condition codes in a standard way. */
4935 return "bclr %1,%0";
4937 return "and%.l %2,%0";
4941 output_iorsi3 (rtx
*operands
)
4943 register int logval
;
4944 if (GET_CODE (operands
[2]) == CONST_INT
4945 && INTVAL (operands
[2]) >> 16 == 0
4946 && (DATA_REG_P (operands
[0])
4947 || offsettable_memref_p (operands
[0]))
4948 && !TARGET_COLDFIRE
)
4950 if (GET_CODE (operands
[0]) != REG
)
4951 operands
[0] = adjust_address (operands
[0], HImode
, 2);
4952 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4954 if (INTVAL (operands
[2]) == 0xffff)
4955 return "mov%.w %2,%0";
4956 return "or%.w %2,%0";
4958 if (GET_CODE (operands
[2]) == CONST_INT
4959 && (logval
= exact_log2 (INTVAL (operands
[2]) & 0xffffffff)) >= 0
4960 && (DATA_REG_P (operands
[0])
4961 || offsettable_memref_p (operands
[0])))
4963 if (DATA_REG_P (operands
[0]))
4964 operands
[1] = GEN_INT (logval
);
4967 operands
[0] = adjust_address (operands
[0], SImode
, 3 - (logval
/ 8));
4968 operands
[1] = GEN_INT (logval
% 8);
4971 return "bset %1,%0";
4973 return "or%.l %2,%0";
4977 output_xorsi3 (rtx
*operands
)
4979 register int logval
;
4980 if (GET_CODE (operands
[2]) == CONST_INT
4981 && INTVAL (operands
[2]) >> 16 == 0
4982 && (offsettable_memref_p (operands
[0]) || DATA_REG_P (operands
[0]))
4983 && !TARGET_COLDFIRE
)
4985 if (! DATA_REG_P (operands
[0]))
4986 operands
[0] = adjust_address (operands
[0], HImode
, 2);
4987 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4989 if (INTVAL (operands
[2]) == 0xffff)
4991 return "eor%.w %2,%0";
4993 if (GET_CODE (operands
[2]) == CONST_INT
4994 && (logval
= exact_log2 (INTVAL (operands
[2]) & 0xffffffff)) >= 0
4995 && (DATA_REG_P (operands
[0])
4996 || offsettable_memref_p (operands
[0])))
4998 if (DATA_REG_P (operands
[0]))
4999 operands
[1] = GEN_INT (logval
);
5002 operands
[0] = adjust_address (operands
[0], SImode
, 3 - (logval
/ 8));
5003 operands
[1] = GEN_INT (logval
% 8);
5006 return "bchg %1,%0";
5008 return "eor%.l %2,%0";
5011 /* Return the instruction that should be used for a call to address X,
5012 which is known to be in operand 0. */
5017 if (symbolic_operand (x
, VOIDmode
))
5018 return m68k_symbolic_call
;
5023 /* Likewise sibling calls. */
5026 output_sibcall (rtx x
)
5028 if (symbolic_operand (x
, VOIDmode
))
5029 return m68k_symbolic_jump
;
5035 m68k_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
5036 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
5039 rtx this_slot
, offset
, addr
, mem
, insn
, tmp
;
5041 /* Avoid clobbering the struct value reg by using the
5042 static chain reg as a temporary. */
5043 tmp
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
5045 /* Pretend to be a post-reload pass while generating rtl. */
5046 reload_completed
= 1;
5048 /* The "this" pointer is stored at 4(%sp). */
5049 this_slot
= gen_rtx_MEM (Pmode
, plus_constant (Pmode
,
5050 stack_pointer_rtx
, 4));
5052 /* Add DELTA to THIS. */
5055 /* Make the offset a legitimate operand for memory addition. */
5056 offset
= GEN_INT (delta
);
5057 if ((delta
< -8 || delta
> 8)
5058 && (TARGET_COLDFIRE
|| USE_MOVQ (delta
)))
5060 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
), offset
);
5061 offset
= gen_rtx_REG (Pmode
, D0_REG
);
5063 emit_insn (gen_add3_insn (copy_rtx (this_slot
),
5064 copy_rtx (this_slot
), offset
));
5067 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5068 if (vcall_offset
!= 0)
5070 /* Set the static chain register to *THIS. */
5071 emit_move_insn (tmp
, this_slot
);
5072 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
5074 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5075 addr
= plus_constant (Pmode
, tmp
, vcall_offset
);
5076 if (!m68k_legitimate_address_p (Pmode
, addr
, true))
5078 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, addr
));
5082 /* Load the offset into %d0 and add it to THIS. */
5083 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
),
5084 gen_rtx_MEM (Pmode
, addr
));
5085 emit_insn (gen_add3_insn (copy_rtx (this_slot
),
5086 copy_rtx (this_slot
),
5087 gen_rtx_REG (Pmode
, D0_REG
)));
5090 /* Jump to the target function. Use a sibcall if direct jumps are
5091 allowed, otherwise load the address into a register first. */
5092 mem
= DECL_RTL (function
);
5093 if (!sibcall_operand (XEXP (mem
, 0), VOIDmode
))
5095 gcc_assert (flag_pic
);
5097 if (!TARGET_SEP_DATA
)
5099 /* Use the static chain register as a temporary (call-clobbered)
5100 GOT pointer for this function. We can use the static chain
5101 register because it isn't live on entry to the thunk. */
5102 SET_REGNO (pic_offset_table_rtx
, STATIC_CHAIN_REGNUM
);
5103 emit_insn (gen_load_got (pic_offset_table_rtx
));
5105 legitimize_pic_address (XEXP (mem
, 0), Pmode
, tmp
);
5106 mem
= replace_equiv_address (mem
, tmp
);
5108 insn
= emit_call_insn (gen_sibcall (mem
, const0_rtx
));
5109 SIBLING_CALL_P (insn
) = 1;
5111 /* Run just enough of rest_of_compilation. */
5112 insn
= get_insns ();
5113 split_all_insns_noflow ();
5114 final_start_function (insn
, file
, 1);
5115 final (insn
, file
, 1);
5116 final_end_function ();
5118 /* Clean up the vars set above. */
5119 reload_completed
= 0;
5121 /* Restore the original PIC register. */
5123 SET_REGNO (pic_offset_table_rtx
, PIC_REG
);
5126 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5129 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
5130 int incoming ATTRIBUTE_UNUSED
)
5132 return gen_rtx_REG (Pmode
, M68K_STRUCT_VALUE_REGNUM
);
5135 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5137 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
5138 unsigned int new_reg
)
5141 /* Interrupt functions can only use registers that have already been
5142 saved by the prologue, even if they would normally be
5145 if ((m68k_get_function_kind (current_function_decl
)
5146 == m68k_fk_interrupt_handler
)
5147 && !df_regs_ever_live_p (new_reg
))
5153 /* Value is true if hard register REGNO can hold a value of machine-mode
5154 MODE. On the 68000, we let the cpu registers can hold any mode, but
5155 restrict the 68881 registers to floating-point modes. */
5158 m68k_regno_mode_ok (int regno
, enum machine_mode mode
)
5160 if (DATA_REGNO_P (regno
))
5162 /* Data Registers, can hold aggregate if fits in. */
5163 if (regno
+ GET_MODE_SIZE (mode
) / 4 <= 8)
5166 else if (ADDRESS_REGNO_P (regno
))
5168 if (regno
+ GET_MODE_SIZE (mode
) / 4 <= 16)
5171 else if (FP_REGNO_P (regno
))
5173 /* FPU registers, hold float or complex float of long double or
5175 if ((GET_MODE_CLASS (mode
) == MODE_FLOAT
5176 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
5177 && GET_MODE_UNIT_SIZE (mode
) <= TARGET_FP_REG_SIZE
)
5183 /* Implement SECONDARY_RELOAD_CLASS. */
5186 m68k_secondary_reload_class (enum reg_class rclass
,
5187 enum machine_mode mode
, rtx x
)
5191 regno
= true_regnum (x
);
5193 /* If one operand of a movqi is an address register, the other
5194 operand must be a general register or constant. Other types
5195 of operand must be reloaded through a data register. */
5196 if (GET_MODE_SIZE (mode
) == 1
5197 && reg_classes_intersect_p (rclass
, ADDR_REGS
)
5198 && !(INT_REGNO_P (regno
) || CONSTANT_P (x
)))
5201 /* PC-relative addresses must be loaded into an address register first. */
5203 && !reg_class_subset_p (rclass
, ADDR_REGS
)
5204 && symbolic_operand (x
, VOIDmode
))
5210 /* Implement PREFERRED_RELOAD_CLASS. */
5213 m68k_preferred_reload_class (rtx x
, enum reg_class rclass
)
5215 enum reg_class secondary_class
;
5217 /* If RCLASS might need a secondary reload, try restricting it to
5218 a class that doesn't. */
5219 secondary_class
= m68k_secondary_reload_class (rclass
, GET_MODE (x
), x
);
5220 if (secondary_class
!= NO_REGS
5221 && reg_class_subset_p (secondary_class
, rclass
))
5222 return secondary_class
;
5224 /* Prefer to use moveq for in-range constants. */
5225 if (GET_CODE (x
) == CONST_INT
5226 && reg_class_subset_p (DATA_REGS
, rclass
)
5227 && IN_RANGE (INTVAL (x
), -0x80, 0x7f))
5230 /* ??? Do we really need this now? */
5231 if (GET_CODE (x
) == CONST_DOUBLE
5232 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
5234 if (TARGET_HARD_FLOAT
&& reg_class_subset_p (FP_REGS
, rclass
))
5243 /* Return floating point values in a 68881 register. This makes 68881 code
5244 a little bit faster. It also makes -msoft-float code incompatible with
5245 hard-float code, so people have to be careful not to mix the two.
5246 For ColdFire it was decided the ABI incompatibility is undesirable.
5247 If there is need for a hard-float ABI it is probably worth doing it
5248 properly and also passing function arguments in FP registers. */
5250 m68k_libcall_value (enum machine_mode mode
)
5257 return gen_rtx_REG (mode
, FP0_REG
);
5263 return gen_rtx_REG (mode
, m68k_libcall_value_in_a0_p
? A0_REG
: D0_REG
);
5266 /* Location in which function value is returned.
5267 NOTE: Due to differences in ABIs, don't call this function directly,
5268 use FUNCTION_VALUE instead. */
5270 m68k_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
)
5272 enum machine_mode mode
;
5274 mode
= TYPE_MODE (valtype
);
5280 return gen_rtx_REG (mode
, FP0_REG
);
5286 /* If the function returns a pointer, push that into %a0. */
5287 if (func
&& POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func
))))
5288 /* For compatibility with the large body of existing code which
5289 does not always properly declare external functions returning
5290 pointer types, the m68k/SVR4 convention is to copy the value
5291 returned for pointer functions from a0 to d0 in the function
5292 epilogue, so that callers that have neglected to properly
5293 declare the callee can still find the correct return value in
5295 return gen_rtx_PARALLEL
5298 gen_rtx_EXPR_LIST (VOIDmode
,
5299 gen_rtx_REG (mode
, A0_REG
),
5301 gen_rtx_EXPR_LIST (VOIDmode
,
5302 gen_rtx_REG (mode
, D0_REG
),
5304 else if (POINTER_TYPE_P (valtype
))
5305 return gen_rtx_REG (mode
, A0_REG
);
5307 return gen_rtx_REG (mode
, D0_REG
);
5310 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5311 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5313 m68k_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
5315 enum machine_mode mode
= TYPE_MODE (type
);
5317 if (mode
== BLKmode
)
5320 /* If TYPE's known alignment is less than the alignment of MODE that
5321 would contain the structure, then return in memory. We need to
5322 do so to maintain the compatibility between code compiled with
5323 -mstrict-align and that compiled with -mno-strict-align. */
5324 if (AGGREGATE_TYPE_P (type
)
5325 && TYPE_ALIGN (type
) < GET_MODE_ALIGNMENT (mode
))
5332 /* CPU to schedule the program for. */
5333 enum attr_cpu m68k_sched_cpu
;
5335 /* MAC to schedule the program for. */
5336 enum attr_mac m68k_sched_mac
;
5344 /* Integer register. */
5350 /* Implicit mem reference (e.g. stack). */
5353 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5356 /* Memory with offset but without indexing. EA mode 5. */
5359 /* Memory with indexing. EA mode 6. */
5362 /* Memory referenced by absolute address. EA mode 7. */
5365 /* Immediate operand that doesn't require extension word. */
5368 /* Immediate 16 bit operand. */
5371 /* Immediate 32 bit operand. */
5375 /* Return type of memory ADDR_RTX refers to. */
5376 static enum attr_op_type
5377 sched_address_type (enum machine_mode mode
, rtx addr_rtx
)
5379 struct m68k_address address
;
5381 if (symbolic_operand (addr_rtx
, VOIDmode
))
5382 return OP_TYPE_MEM7
;
5384 if (!m68k_decompose_address (mode
, addr_rtx
,
5385 reload_completed
, &address
))
5387 gcc_assert (!reload_completed
);
5388 /* Reload will likely fix the address to be in the register. */
5389 return OP_TYPE_MEM234
;
5392 if (address
.scale
!= 0)
5393 return OP_TYPE_MEM6
;
5395 if (address
.base
!= NULL_RTX
)
5397 if (address
.offset
== NULL_RTX
)
5398 return OP_TYPE_MEM234
;
5400 return OP_TYPE_MEM5
;
5403 gcc_assert (address
.offset
!= NULL_RTX
);
5405 return OP_TYPE_MEM7
;
5408 /* Return X or Y (depending on OPX_P) operand of INSN. */
5410 sched_get_operand (rtx insn
, bool opx_p
)
5414 if (recog_memoized (insn
) < 0)
5417 extract_constrain_insn_cached (insn
);
5420 i
= get_attr_opx (insn
);
5422 i
= get_attr_opy (insn
);
5424 if (i
>= recog_data
.n_operands
)
5427 return recog_data
.operand
[i
];
5430 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5431 If ADDRESS_P is true, return type of memory location operand refers to. */
5432 static enum attr_op_type
5433 sched_attr_op_type (rtx insn
, bool opx_p
, bool address_p
)
5437 op
= sched_get_operand (insn
, opx_p
);
5441 gcc_assert (!reload_completed
);
5446 return sched_address_type (QImode
, op
);
5448 if (memory_operand (op
, VOIDmode
))
5449 return sched_address_type (GET_MODE (op
), XEXP (op
, 0));
5451 if (register_operand (op
, VOIDmode
))
5453 if ((!reload_completed
&& FLOAT_MODE_P (GET_MODE (op
)))
5454 || (reload_completed
&& FP_REG_P (op
)))
5460 if (GET_CODE (op
) == CONST_INT
)
5466 /* Check for quick constants. */
5467 switch (get_attr_type (insn
))
5470 if (IN_RANGE (ival
, 1, 8) || IN_RANGE (ival
, -8, -1))
5471 return OP_TYPE_IMM_Q
;
5473 gcc_assert (!reload_completed
);
5477 if (USE_MOVQ (ival
))
5478 return OP_TYPE_IMM_Q
;
5480 gcc_assert (!reload_completed
);
5484 if (valid_mov3q_const (ival
))
5485 return OP_TYPE_IMM_Q
;
5487 gcc_assert (!reload_completed
);
5494 if (IN_RANGE (ival
, -0x8000, 0x7fff))
5495 return OP_TYPE_IMM_W
;
5497 return OP_TYPE_IMM_L
;
5500 if (GET_CODE (op
) == CONST_DOUBLE
)
5502 switch (GET_MODE (op
))
5505 return OP_TYPE_IMM_W
;
5509 return OP_TYPE_IMM_L
;
5516 if (GET_CODE (op
) == CONST
5517 || symbolic_operand (op
, VOIDmode
)
5520 switch (GET_MODE (op
))
5523 return OP_TYPE_IMM_Q
;
5526 return OP_TYPE_IMM_W
;
5529 return OP_TYPE_IMM_L
;
5532 if (symbolic_operand (m68k_unwrap_symbol (op
, false), VOIDmode
))
5534 return OP_TYPE_IMM_W
;
5536 return OP_TYPE_IMM_L
;
5540 gcc_assert (!reload_completed
);
5542 if (FLOAT_MODE_P (GET_MODE (op
)))
5548 /* Implement opx_type attribute.
5549 Return type of INSN's operand X.
5550 If ADDRESS_P is true, return type of memory location operand refers to. */
5552 m68k_sched_attr_opx_type (rtx insn
, int address_p
)
5554 switch (sched_attr_op_type (insn
, true, address_p
!= 0))
5560 return OPX_TYPE_FPN
;
5563 return OPX_TYPE_MEM1
;
5565 case OP_TYPE_MEM234
:
5566 return OPX_TYPE_MEM234
;
5569 return OPX_TYPE_MEM5
;
5572 return OPX_TYPE_MEM6
;
5575 return OPX_TYPE_MEM7
;
5578 return OPX_TYPE_IMM_Q
;
5581 return OPX_TYPE_IMM_W
;
5584 return OPX_TYPE_IMM_L
;
5591 /* Implement opy_type attribute.
5592 Return type of INSN's operand Y.
5593 If ADDRESS_P is true, return type of memory location operand refers to. */
5595 m68k_sched_attr_opy_type (rtx insn
, int address_p
)
5597 switch (sched_attr_op_type (insn
, false, address_p
!= 0))
5603 return OPY_TYPE_FPN
;
5606 return OPY_TYPE_MEM1
;
5608 case OP_TYPE_MEM234
:
5609 return OPY_TYPE_MEM234
;
5612 return OPY_TYPE_MEM5
;
5615 return OPY_TYPE_MEM6
;
5618 return OPY_TYPE_MEM7
;
5621 return OPY_TYPE_IMM_Q
;
5624 return OPY_TYPE_IMM_W
;
5627 return OPY_TYPE_IMM_L
;
5634 /* Return size of INSN as int. */
5636 sched_get_attr_size_int (rtx insn
)
5640 switch (get_attr_type (insn
))
5643 /* There should be no references to m68k_sched_attr_size for 'ignore'
5657 switch (get_attr_opx_type (insn
))
5663 case OPX_TYPE_MEM234
:
5664 case OPY_TYPE_IMM_Q
:
5669 /* Here we assume that most absolute references are short. */
5671 case OPY_TYPE_IMM_W
:
5675 case OPY_TYPE_IMM_L
:
5683 switch (get_attr_opy_type (insn
))
5689 case OPY_TYPE_MEM234
:
5690 case OPY_TYPE_IMM_Q
:
5695 /* Here we assume that most absolute references are short. */
5697 case OPY_TYPE_IMM_W
:
5701 case OPY_TYPE_IMM_L
:
5711 gcc_assert (!reload_completed
);
5719 /* Return size of INSN as attribute enum value. */
5721 m68k_sched_attr_size (rtx insn
)
5723 switch (sched_get_attr_size_int (insn
))
5739 /* Return operand X or Y (depending on OPX_P) of INSN,
5740 if it is a MEM, or NULL overwise. */
5741 static enum attr_op_type
5742 sched_get_opxy_mem_type (rtx insn
, bool opx_p
)
5746 switch (get_attr_opx_type (insn
))
5751 case OPX_TYPE_IMM_Q
:
5752 case OPX_TYPE_IMM_W
:
5753 case OPX_TYPE_IMM_L
:
5757 case OPX_TYPE_MEM234
:
5760 return OP_TYPE_MEM1
;
5763 return OP_TYPE_MEM6
;
5771 switch (get_attr_opy_type (insn
))
5776 case OPY_TYPE_IMM_Q
:
5777 case OPY_TYPE_IMM_W
:
5778 case OPY_TYPE_IMM_L
:
5782 case OPY_TYPE_MEM234
:
5785 return OP_TYPE_MEM1
;
5788 return OP_TYPE_MEM6
;
5796 /* Implement op_mem attribute. */
5798 m68k_sched_attr_op_mem (rtx insn
)
5800 enum attr_op_type opx
;
5801 enum attr_op_type opy
;
5803 opx
= sched_get_opxy_mem_type (insn
, true);
5804 opy
= sched_get_opxy_mem_type (insn
, false);
5806 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_RN
)
5809 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_MEM1
)
5811 switch (get_attr_opx_access (insn
))
5827 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_MEM6
)
5829 switch (get_attr_opx_access (insn
))
5845 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_RN
)
5848 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_MEM1
)
5850 switch (get_attr_opx_access (insn
))
5856 gcc_assert (!reload_completed
);
5861 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_MEM6
)
5863 switch (get_attr_opx_access (insn
))
5869 gcc_assert (!reload_completed
);
5874 if (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_RN
)
5877 if (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_MEM1
)
5879 switch (get_attr_opx_access (insn
))
5885 gcc_assert (!reload_completed
);
5890 gcc_assert (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_MEM6
);
5891 gcc_assert (!reload_completed
);
5895 /* Data for ColdFire V4 index bypass.
5896 Producer modifies register that is used as index in consumer with
5900 /* Producer instruction. */
5903 /* Consumer instruction. */
5906 /* Scale of indexed memory access within consumer.
5907 Or zero if bypass should not be effective at the moment. */
5909 } sched_cfv4_bypass_data
;
5911 /* An empty state that is used in m68k_sched_adjust_cost. */
5912 static state_t sched_adjust_cost_state
;
5914 /* Implement adjust_cost scheduler hook.
5915 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5917 m68k_sched_adjust_cost (rtx insn
, rtx link ATTRIBUTE_UNUSED
, rtx def_insn
,
5922 if (recog_memoized (def_insn
) < 0
5923 || recog_memoized (insn
) < 0)
5926 if (sched_cfv4_bypass_data
.scale
== 1)
5927 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5929 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5930 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5931 that the data in sched_cfv4_bypass_data is up to date. */
5932 gcc_assert (sched_cfv4_bypass_data
.pro
== def_insn
5933 && sched_cfv4_bypass_data
.con
== insn
);
5938 sched_cfv4_bypass_data
.pro
= NULL
;
5939 sched_cfv4_bypass_data
.con
= NULL
;
5940 sched_cfv4_bypass_data
.scale
= 0;
5943 gcc_assert (sched_cfv4_bypass_data
.pro
== NULL
5944 && sched_cfv4_bypass_data
.con
== NULL
5945 && sched_cfv4_bypass_data
.scale
== 0);
5947 /* Don't try to issue INSN earlier than DFA permits.
5948 This is especially useful for instructions that write to memory,
5949 as their true dependence (default) latency is better to be set to 0
5950 to workaround alias analysis limitations.
5951 This is, in fact, a machine independent tweak, so, probably,
5952 it should be moved to haifa-sched.c: insn_cost (). */
5953 delay
= min_insn_conflict_delay (sched_adjust_cost_state
, def_insn
, insn
);
5960 /* Return maximal number of insns that can be scheduled on a single cycle. */
5962 m68k_sched_issue_rate (void)
5964 switch (m68k_sched_cpu
)
5980 /* Maximal length of instruction for current CPU.
5981 E.g. it is 3 for any ColdFire core. */
5982 static int max_insn_size
;
5984 /* Data to model instruction buffer of CPU. */
5987 /* True if instruction buffer model is modeled for current CPU. */
5990 /* Size of the instruction buffer in words. */
5993 /* Number of filled words in the instruction buffer. */
5996 /* Additional information about instruction buffer for CPUs that have
5997 a buffer of instruction records, rather then a plain buffer
5998 of instruction words. */
5999 struct _sched_ib_records
6001 /* Size of buffer in records. */
6004 /* Array to hold data on adjustements made to the size of the buffer. */
6007 /* Index of the above array. */
6011 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6015 static struct _sched_ib sched_ib
;
6017 /* ID of memory unit. */
6018 static int sched_mem_unit_code
;
6020 /* Implementation of the targetm.sched.variable_issue () hook.
6021 It is called after INSN was issued. It returns the number of insns
6022 that can possibly get scheduled on the current cycle.
6023 It is used here to determine the effect of INSN on the instruction
6026 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED
,
6027 int sched_verbose ATTRIBUTE_UNUSED
,
6028 rtx insn
, int can_issue_more
)
6032 if (recog_memoized (insn
) >= 0 && get_attr_type (insn
) != TYPE_IGNORE
)
6034 switch (m68k_sched_cpu
)
6038 insn_size
= sched_get_attr_size_int (insn
);
6042 insn_size
= sched_get_attr_size_int (insn
);
6044 /* ColdFire V3 and V4 cores have instruction buffers that can
6045 accumulate up to 8 instructions regardless of instructions'
6046 sizes. So we should take care not to "prefetch" 24 one-word
6047 or 12 two-words instructions.
6048 To model this behavior we temporarily decrease size of the
6049 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6053 adjust
= max_insn_size
- insn_size
;
6054 sched_ib
.size
-= adjust
;
6056 if (sched_ib
.filled
> sched_ib
.size
)
6057 sched_ib
.filled
= sched_ib
.size
;
6059 sched_ib
.records
.adjust
[sched_ib
.records
.adjust_index
] = adjust
;
6062 ++sched_ib
.records
.adjust_index
;
6063 if (sched_ib
.records
.adjust_index
== sched_ib
.records
.n_insns
)
6064 sched_ib
.records
.adjust_index
= 0;
6066 /* Undo adjustement we did 7 instructions ago. */
6068 += sched_ib
.records
.adjust
[sched_ib
.records
.adjust_index
];
6073 gcc_assert (!sched_ib
.enabled_p
);
6081 if (insn_size
> sched_ib
.filled
)
6082 /* Scheduling for register pressure does not always take DFA into
6083 account. Workaround instruction buffer not being filled enough. */
6085 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
6086 insn_size
= sched_ib
.filled
;
6091 else if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6092 || asm_noperands (PATTERN (insn
)) >= 0)
6093 insn_size
= sched_ib
.filled
;
6097 sched_ib
.filled
-= insn_size
;
6099 return can_issue_more
;
6102 /* Return how many instructions should scheduler lookahead to choose the
6105 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6107 return m68k_sched_issue_rate () - 1;
6110 /* Implementation of targetm.sched.init_global () hook.
6111 It is invoked once per scheduling pass and is used here
6112 to initialize scheduler constants. */
6114 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED
,
6115 int sched_verbose ATTRIBUTE_UNUSED
,
6116 int n_insns ATTRIBUTE_UNUSED
)
6118 #ifdef ENABLE_CHECKING
6119 /* Check that all instructions have DFA reservations and
6120 that all instructions can be issued from a clean state. */
6125 state
= alloca (state_size ());
6127 for (insn
= get_insns (); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
6129 if (INSN_P (insn
) && recog_memoized (insn
) >= 0)
6131 gcc_assert (insn_has_dfa_reservation_p (insn
));
6133 state_reset (state
);
6134 if (state_transition (state
, insn
) >= 0)
6141 /* Setup target cpu. */
6143 /* ColdFire V4 has a set of features to keep its instruction buffer full
6144 (e.g., a separate memory bus for instructions) and, hence, we do not model
6145 buffer for this CPU. */
6146 sched_ib
.enabled_p
= (m68k_sched_cpu
!= CPU_CFV4
);
6148 switch (m68k_sched_cpu
)
6151 sched_ib
.filled
= 0;
6158 sched_ib
.records
.n_insns
= 0;
6159 sched_ib
.records
.adjust
= NULL
;
6164 sched_ib
.records
.n_insns
= 8;
6165 sched_ib
.records
.adjust
= XNEWVEC (int, sched_ib
.records
.n_insns
);
6172 sched_mem_unit_code
= get_cpu_unit_code ("cf_mem1");
6174 sched_adjust_cost_state
= xmalloc (state_size ());
6175 state_reset (sched_adjust_cost_state
);
6178 emit_insn (gen_ib ());
6179 sched_ib
.insn
= get_insns ();
6183 /* Scheduling pass is now finished. Free/reset static variables. */
6185 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED
,
6186 int verbose ATTRIBUTE_UNUSED
)
6188 sched_ib
.insn
= NULL
;
6190 free (sched_adjust_cost_state
);
6191 sched_adjust_cost_state
= NULL
;
6193 sched_mem_unit_code
= 0;
6195 free (sched_ib
.records
.adjust
);
6196 sched_ib
.records
.adjust
= NULL
;
6197 sched_ib
.records
.n_insns
= 0;
6201 /* Implementation of targetm.sched.init () hook.
6202 It is invoked each time scheduler starts on the new block (basic block or
6203 extended basic block). */
6205 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED
,
6206 int sched_verbose ATTRIBUTE_UNUSED
,
6207 int n_insns ATTRIBUTE_UNUSED
)
6209 switch (m68k_sched_cpu
)
6217 sched_ib
.size
= sched_ib
.records
.n_insns
* max_insn_size
;
6219 memset (sched_ib
.records
.adjust
, 0,
6220 sched_ib
.records
.n_insns
* sizeof (*sched_ib
.records
.adjust
));
6221 sched_ib
.records
.adjust_index
= 0;
6225 gcc_assert (!sched_ib
.enabled_p
);
6233 if (sched_ib
.enabled_p
)
6234 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6235 the first cycle. Workaround that. */
6236 sched_ib
.filled
= -2;
6239 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6240 It is invoked just before current cycle finishes and is used here
6241 to track if instruction buffer got its two words this cycle. */
6243 m68k_sched_dfa_pre_advance_cycle (void)
6245 if (!sched_ib
.enabled_p
)
6248 if (!cpu_unit_reservation_p (curr_state
, sched_mem_unit_code
))
6250 sched_ib
.filled
+= 2;
6252 if (sched_ib
.filled
> sched_ib
.size
)
6253 sched_ib
.filled
= sched_ib
.size
;
6257 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6258 It is invoked just after new cycle begins and is used here
6259 to setup number of filled words in the instruction buffer so that
6260 instructions which won't have all their words prefetched would be
6261 stalled for a cycle. */
6263 m68k_sched_dfa_post_advance_cycle (void)
6267 if (!sched_ib
.enabled_p
)
6270 /* Setup number of prefetched instruction words in the instruction
6272 i
= max_insn_size
- sched_ib
.filled
;
6276 if (state_transition (curr_state
, sched_ib
.insn
) >= 0)
6277 /* Pick up scheduler state. */
6282 /* Return X or Y (depending on OPX_P) operand of INSN,
6283 if it is an integer register, or NULL overwise. */
6285 sched_get_reg_operand (rtx insn
, bool opx_p
)
6291 if (get_attr_opx_type (insn
) == OPX_TYPE_RN
)
6293 op
= sched_get_operand (insn
, true);
6294 gcc_assert (op
!= NULL
);
6296 if (!reload_completed
&& !REG_P (op
))
6302 if (get_attr_opy_type (insn
) == OPY_TYPE_RN
)
6304 op
= sched_get_operand (insn
, false);
6305 gcc_assert (op
!= NULL
);
6307 if (!reload_completed
&& !REG_P (op
))
6315 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6318 sched_mem_operand_p (rtx insn
, bool opx_p
)
6320 switch (sched_get_opxy_mem_type (insn
, opx_p
))
6331 /* Return X or Y (depending on OPX_P) operand of INSN,
6332 if it is a MEM, or NULL overwise. */
6334 sched_get_mem_operand (rtx insn
, bool must_read_p
, bool must_write_p
)
6354 if (opy_p
&& sched_mem_operand_p (insn
, false))
6355 return sched_get_operand (insn
, false);
6357 if (opx_p
&& sched_mem_operand_p (insn
, true))
6358 return sched_get_operand (insn
, true);
6364 /* Return non-zero if PRO modifies register used as part of
6367 m68k_sched_address_bypass_p (rtx pro
, rtx con
)
6372 pro_x
= sched_get_reg_operand (pro
, true);
6376 con_mem_read
= sched_get_mem_operand (con
, true, false);
6377 gcc_assert (con_mem_read
!= NULL
);
6379 if (reg_mentioned_p (pro_x
, con_mem_read
))
6385 /* Helper function for m68k_sched_indexed_address_bypass_p.
6386 if PRO modifies register used as index in CON,
6387 return scale of indexed memory access in CON. Return zero overwise. */
6389 sched_get_indexed_address_scale (rtx pro
, rtx con
)
6393 struct m68k_address address
;
6395 reg
= sched_get_reg_operand (pro
, true);
6399 mem
= sched_get_mem_operand (con
, true, false);
6400 gcc_assert (mem
!= NULL
&& MEM_P (mem
));
6402 if (!m68k_decompose_address (GET_MODE (mem
), XEXP (mem
, 0), reload_completed
,
6406 if (REGNO (reg
) == REGNO (address
.index
))
6408 gcc_assert (address
.scale
!= 0);
6409 return address
.scale
;
6415 /* Return non-zero if PRO modifies register used
6416 as index with scale 2 or 4 in CON. */
6418 m68k_sched_indexed_address_bypass_p (rtx pro
, rtx con
)
6420 gcc_assert (sched_cfv4_bypass_data
.pro
== NULL
6421 && sched_cfv4_bypass_data
.con
== NULL
6422 && sched_cfv4_bypass_data
.scale
== 0);
6424 switch (sched_get_indexed_address_scale (pro
, con
))
6427 /* We can't have a variable latency bypass, so
6428 remember to adjust the insn cost in adjust_cost hook. */
6429 sched_cfv4_bypass_data
.pro
= pro
;
6430 sched_cfv4_bypass_data
.con
= con
;
6431 sched_cfv4_bypass_data
.scale
= 1;
6443 /* We generate a two-instructions program at M_TRAMP :
6444 movea.l &CHAIN_VALUE,%a0
6446 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6449 m68k_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
6451 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
6454 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM
));
6456 mem
= adjust_address (m_tramp
, HImode
, 0);
6457 emit_move_insn (mem
, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM
-8) << 9)));
6458 mem
= adjust_address (m_tramp
, SImode
, 2);
6459 emit_move_insn (mem
, chain_value
);
6461 mem
= adjust_address (m_tramp
, HImode
, 6);
6462 emit_move_insn (mem
, GEN_INT(0x4EF9));
6463 mem
= adjust_address (m_tramp
, SImode
, 8);
6464 emit_move_insn (mem
, fnaddr
);
6466 FINALIZE_TRAMPOLINE (XEXP (m_tramp
, 0));
6469 /* On the 68000, the RTS insn cannot pop anything.
6470 On the 68010, the RTD insn may be used to pop them if the number
6471 of args is fixed, but if the number is variable then the caller
6472 must pop them all. RTD can't be used for library calls now
6473 because the library is compiled with the Unix compiler.
6474 Use of RTD is a selectable option, since it is incompatible with
6475 standard Unix calling sequences. If the option is not selected,
6476 the caller must always pop the args. */
6479 m68k_return_pops_args (tree fundecl
, tree funtype
, int size
)
6483 || TREE_CODE (fundecl
) != IDENTIFIER_NODE
)
6484 && (!stdarg_p (funtype
)))
6488 /* Make sure everything's fine if we *don't* have a given processor.
6489 This assumes that putting a register in fixed_regs will keep the
6490 compiler's mitts completely off it. We don't bother to zero it out
6491 of register classes. */
6494 m68k_conditional_register_usage (void)
6498 if (!TARGET_HARD_FLOAT
)
6500 COPY_HARD_REG_SET (x
, reg_class_contents
[(int)FP_REGS
]);
6501 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6502 if (TEST_HARD_REG_BIT (x
, i
))
6503 fixed_regs
[i
] = call_used_regs
[i
] = 1;
6506 fixed_regs
[PIC_REG
] = call_used_regs
[PIC_REG
] = 1;
6510 m68k_init_sync_libfuncs (void)
6512 init_sync_libfuncs (UNITS_PER_WORD
);
6515 /* Implements EPILOGUE_USES. All registers are live on exit from an
6516 interrupt routine. */
6518 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
6520 return (reload_completed
6521 && (m68k_get_function_kind (current_function_decl
)
6522 == m68k_fk_interrupt_handler
));
6525 #include "gt-m68k.h"