2018-07-04 Denys Vlasenko <dvlasenk@redhat.com>
[official-gcc.git] / gcc / config / m68k / m68k.c
blobcea5c0ecab5f0df79e25f5b1bf0728909ed33666
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "cfghooks.h"
27 #include "tree.h"
28 #include "stringpool.h"
29 #include "attribs.h"
30 #include "rtl.h"
31 #include "df.h"
32 #include "alias.h"
33 #include "fold-const.h"
34 #include "calls.h"
35 #include "stor-layout.h"
36 #include "varasm.h"
37 #include "regs.h"
38 #include "insn-config.h"
39 #include "conditions.h"
40 #include "output.h"
41 #include "insn-attr.h"
42 #include "recog.h"
43 #include "diagnostic-core.h"
44 #include "flags.h"
45 #include "expmed.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "memmodel.h"
49 #include "emit-rtl.h"
50 #include "stmt.h"
51 #include "expr.h"
52 #include "reload.h"
53 #include "tm_p.h"
54 #include "target.h"
55 #include "debug.h"
56 #include "cfgrtl.h"
57 #include "cfganal.h"
58 #include "lcm.h"
59 #include "cfgbuild.h"
60 #include "cfgcleanup.h"
61 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
62 #include "sched-int.h"
63 #include "insn-codes.h"
64 #include "opts.h"
65 #include "optabs.h"
66 #include "builtins.h"
67 #include "rtl-iter.h"
68 #include "toplev.h"
70 /* This file should be included last. */
71 #include "target-def.h"
73 enum reg_class regno_reg_class[] =
75 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
76 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
77 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
78 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
79 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
80 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
81 ADDR_REGS
85 /* The minimum number of integer registers that we want to save with the
86 movem instruction. Using two movel instructions instead of a single
87 moveml is about 15% faster for the 68020 and 68030 at no expense in
88 code size. */
89 #define MIN_MOVEM_REGS 3
91 /* The minimum number of floating point registers that we want to save
92 with the fmovem instruction. */
93 #define MIN_FMOVEM_REGS 1
95 /* Structure describing stack frame layout. */
96 struct m68k_frame
98 /* Stack pointer to frame pointer offset. */
99 HOST_WIDE_INT offset;
101 /* Offset of FPU registers. */
102 HOST_WIDE_INT foffset;
104 /* Frame size in bytes (rounded up). */
105 HOST_WIDE_INT size;
107 /* Data and address register. */
108 int reg_no;
109 unsigned int reg_mask;
111 /* FPU registers. */
112 int fpu_no;
113 unsigned int fpu_mask;
115 /* Offsets relative to ARG_POINTER. */
116 HOST_WIDE_INT frame_pointer_offset;
117 HOST_WIDE_INT stack_pointer_offset;
119 /* Function which the above information refers to. */
120 int funcdef_no;
123 /* Current frame information calculated by m68k_compute_frame_layout(). */
124 static struct m68k_frame current_frame;
126 /* Structure describing an m68k address.
128 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
129 with null fields evaluating to 0. Here:
131 - BASE satisfies m68k_legitimate_base_reg_p
132 - INDEX satisfies m68k_legitimate_index_reg_p
133 - OFFSET satisfies m68k_legitimate_constant_address_p
135 INDEX is either HImode or SImode. The other fields are SImode.
137 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
138 the address is (BASE)+. */
139 struct m68k_address {
140 enum rtx_code code;
141 rtx base;
142 rtx index;
143 rtx offset;
144 int scale;
147 static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
148 unsigned int);
149 static int m68k_sched_issue_rate (void);
150 static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
151 static void m68k_sched_md_init_global (FILE *, int, int);
152 static void m68k_sched_md_finish_global (FILE *, int);
153 static void m68k_sched_md_init (FILE *, int, int);
154 static void m68k_sched_dfa_pre_advance_cycle (void);
155 static void m68k_sched_dfa_post_advance_cycle (void);
156 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
158 static bool m68k_can_eliminate (const int, const int);
159 static void m68k_conditional_register_usage (void);
160 static bool m68k_legitimate_address_p (machine_mode, rtx, bool);
161 static void m68k_option_override (void);
162 static void m68k_override_options_after_change (void);
163 static rtx find_addr_reg (rtx);
164 static const char *singlemove_string (rtx *);
165 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
166 HOST_WIDE_INT, tree);
167 static rtx m68k_struct_value_rtx (tree, int);
168 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
169 tree args, int flags,
170 bool *no_add_attrs);
171 static void m68k_compute_frame_layout (void);
172 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
173 static bool m68k_ok_for_sibcall_p (tree, tree);
174 static bool m68k_tls_symbol_p (rtx);
175 static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
176 static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
177 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
178 static bool m68k_return_in_memory (const_tree, const_tree);
179 #endif
180 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
181 static void m68k_trampoline_init (rtx, tree, rtx);
182 static poly_int64 m68k_return_pops_args (tree, tree, poly_int64);
183 static rtx m68k_delegitimize_address (rtx);
184 static void m68k_function_arg_advance (cumulative_args_t, machine_mode,
185 const_tree, bool);
186 static rtx m68k_function_arg (cumulative_args_t, machine_mode,
187 const_tree, bool);
188 static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
189 static bool m68k_output_addr_const_extra (FILE *, rtx);
190 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
191 static enum flt_eval_method
192 m68k_excess_precision (enum excess_precision_type);
193 static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode);
194 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode);
195 static bool m68k_modes_tieable_p (machine_mode, machine_mode);
196 static machine_mode m68k_promote_function_mode (const_tree, machine_mode,
197 int *, const_tree, int);
199 /* Initialize the GCC target structure. */
201 #if INT_OP_GROUP == INT_OP_DOT_WORD
202 #undef TARGET_ASM_ALIGNED_HI_OP
203 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
204 #endif
206 #if INT_OP_GROUP == INT_OP_NO_DOT
207 #undef TARGET_ASM_BYTE_OP
208 #define TARGET_ASM_BYTE_OP "\tbyte\t"
209 #undef TARGET_ASM_ALIGNED_HI_OP
210 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
211 #undef TARGET_ASM_ALIGNED_SI_OP
212 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
213 #endif
215 #if INT_OP_GROUP == INT_OP_DC
216 #undef TARGET_ASM_BYTE_OP
217 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
218 #undef TARGET_ASM_ALIGNED_HI_OP
219 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
220 #undef TARGET_ASM_ALIGNED_SI_OP
221 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
222 #endif
224 #undef TARGET_ASM_UNALIGNED_HI_OP
225 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
226 #undef TARGET_ASM_UNALIGNED_SI_OP
227 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
229 #undef TARGET_ASM_OUTPUT_MI_THUNK
230 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
231 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
232 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
234 #undef TARGET_ASM_FILE_START_APP_OFF
235 #define TARGET_ASM_FILE_START_APP_OFF true
237 #undef TARGET_LEGITIMIZE_ADDRESS
238 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
240 #undef TARGET_SCHED_ADJUST_COST
241 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
243 #undef TARGET_SCHED_ISSUE_RATE
244 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
246 #undef TARGET_SCHED_VARIABLE_ISSUE
247 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
249 #undef TARGET_SCHED_INIT_GLOBAL
250 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
252 #undef TARGET_SCHED_FINISH_GLOBAL
253 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
255 #undef TARGET_SCHED_INIT
256 #define TARGET_SCHED_INIT m68k_sched_md_init
258 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
259 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
261 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
262 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
264 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
265 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
266 m68k_sched_first_cycle_multipass_dfa_lookahead
268 #undef TARGET_OPTION_OVERRIDE
269 #define TARGET_OPTION_OVERRIDE m68k_option_override
271 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
272 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
274 #undef TARGET_RTX_COSTS
275 #define TARGET_RTX_COSTS m68k_rtx_costs
277 #undef TARGET_ATTRIBUTE_TABLE
278 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
280 #undef TARGET_PROMOTE_PROTOTYPES
281 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
283 #undef TARGET_STRUCT_VALUE_RTX
284 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
286 #undef TARGET_CANNOT_FORCE_CONST_MEM
287 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
289 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
290 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
292 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
293 #undef TARGET_RETURN_IN_MEMORY
294 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
295 #endif
297 #ifdef HAVE_AS_TLS
298 #undef TARGET_HAVE_TLS
299 #define TARGET_HAVE_TLS (true)
301 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
302 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
303 #endif
305 #undef TARGET_LRA_P
306 #define TARGET_LRA_P hook_bool_void_false
308 #undef TARGET_LEGITIMATE_ADDRESS_P
309 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
311 #undef TARGET_CAN_ELIMINATE
312 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
314 #undef TARGET_CONDITIONAL_REGISTER_USAGE
315 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
317 #undef TARGET_TRAMPOLINE_INIT
318 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
320 #undef TARGET_RETURN_POPS_ARGS
321 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
323 #undef TARGET_DELEGITIMIZE_ADDRESS
324 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
326 #undef TARGET_FUNCTION_ARG
327 #define TARGET_FUNCTION_ARG m68k_function_arg
329 #undef TARGET_FUNCTION_ARG_ADVANCE
330 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
332 #undef TARGET_LEGITIMATE_CONSTANT_P
333 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
335 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
336 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
338 #undef TARGET_C_EXCESS_PRECISION
339 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
341 /* The value stored by TAS. */
342 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
343 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
345 #undef TARGET_HARD_REGNO_NREGS
346 #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
347 #undef TARGET_HARD_REGNO_MODE_OK
348 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
350 #undef TARGET_MODES_TIEABLE_P
351 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
353 #undef TARGET_PROMOTE_FUNCTION_MODE
354 #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode
356 static const struct attribute_spec m68k_attribute_table[] =
358 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
359 affects_type_identity, handler, exclude } */
360 { "interrupt", 0, 0, true, false, false, false,
361 m68k_handle_fndecl_attribute, NULL },
362 { "interrupt_handler", 0, 0, true, false, false, false,
363 m68k_handle_fndecl_attribute, NULL },
364 { "interrupt_thread", 0, 0, true, false, false, false,
365 m68k_handle_fndecl_attribute, NULL },
366 { NULL, 0, 0, false, false, false, false, NULL, NULL }
369 struct gcc_target targetm = TARGET_INITIALIZER;
371 /* Base flags for 68k ISAs. */
372 #define FL_FOR_isa_00 FL_ISA_68000
373 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
374 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
375 generated 68881 code for 68020 and 68030 targets unless explicitly told
376 not to. */
377 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
378 | FL_BITFIELD | FL_68881 | FL_CAS)
379 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
380 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
382 /* Base flags for ColdFire ISAs. */
383 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
384 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
385 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
386 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
387 /* ISA_C is not upwardly compatible with ISA_B. */
388 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
390 enum m68k_isa
392 /* Traditional 68000 instruction sets. */
393 isa_00,
394 isa_10,
395 isa_20,
396 isa_40,
397 isa_cpu32,
398 /* ColdFire instruction set variants. */
399 isa_a,
400 isa_aplus,
401 isa_b,
402 isa_c,
403 isa_max
406 /* Information about one of the -march, -mcpu or -mtune arguments. */
407 struct m68k_target_selection
409 /* The argument being described. */
410 const char *name;
412 /* For -mcpu, this is the device selected by the option.
413 For -mtune and -march, it is a representative device
414 for the microarchitecture or ISA respectively. */
415 enum target_device device;
417 /* The M68K_DEVICE fields associated with DEVICE. See the comment
418 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
419 const char *family;
420 enum uarch_type microarch;
421 enum m68k_isa isa;
422 unsigned long flags;
425 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
426 static const struct m68k_target_selection all_devices[] =
428 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
429 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
430 #include "m68k-devices.def"
431 #undef M68K_DEVICE
432 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
435 /* A list of all ISAs, mapping each one to a representative device.
436 Used for -march selection. */
437 static const struct m68k_target_selection all_isas[] =
439 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
440 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
441 #include "m68k-isas.def"
442 #undef M68K_ISA
443 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
446 /* A list of all microarchitectures, mapping each one to a representative
447 device. Used for -mtune selection. */
448 static const struct m68k_target_selection all_microarchs[] =
450 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
451 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
452 #include "m68k-microarchs.def"
453 #undef M68K_MICROARCH
454 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
457 /* The entries associated with the -mcpu, -march and -mtune settings,
458 or null for options that have not been used. */
459 const struct m68k_target_selection *m68k_cpu_entry;
460 const struct m68k_target_selection *m68k_arch_entry;
461 const struct m68k_target_selection *m68k_tune_entry;
463 /* Which CPU we are generating code for. */
464 enum target_device m68k_cpu;
466 /* Which microarchitecture to tune for. */
467 enum uarch_type m68k_tune;
469 /* Which FPU to use. */
470 enum fpu_type m68k_fpu;
472 /* The set of FL_* flags that apply to the target processor. */
473 unsigned int m68k_cpu_flags;
475 /* The set of FL_* flags that apply to the processor to be tuned for. */
476 unsigned int m68k_tune_flags;
478 /* Asm templates for calling or jumping to an arbitrary symbolic address,
479 or NULL if such calls or jumps are not supported. The address is held
480 in operand 0. */
481 const char *m68k_symbolic_call;
482 const char *m68k_symbolic_jump;
484 /* Enum variable that corresponds to m68k_symbolic_call values. */
485 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
488 /* Implement TARGET_OPTION_OVERRIDE. */
490 static void
491 m68k_option_override (void)
493 const struct m68k_target_selection *entry;
494 unsigned long target_mask;
496 if (global_options_set.x_m68k_arch_option)
497 m68k_arch_entry = &all_isas[m68k_arch_option];
499 if (global_options_set.x_m68k_cpu_option)
500 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
502 if (global_options_set.x_m68k_tune_option)
503 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
505 /* User can choose:
507 -mcpu=
508 -march=
509 -mtune=
511 -march=ARCH should generate code that runs any processor
512 implementing architecture ARCH. -mcpu=CPU should override -march
513 and should generate code that runs on processor CPU, making free
514 use of any instructions that CPU understands. -mtune=UARCH applies
515 on top of -mcpu or -march and optimizes the code for UARCH. It does
516 not change the target architecture. */
517 if (m68k_cpu_entry)
519 /* Complain if the -march setting is for a different microarchitecture,
520 or includes flags that the -mcpu setting doesn't. */
521 if (m68k_arch_entry
522 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
523 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
524 warning (0, "-mcpu=%s conflicts with -march=%s",
525 m68k_cpu_entry->name, m68k_arch_entry->name);
527 entry = m68k_cpu_entry;
529 else
530 entry = m68k_arch_entry;
532 if (!entry)
533 entry = all_devices + TARGET_CPU_DEFAULT;
535 m68k_cpu_flags = entry->flags;
537 /* Use the architecture setting to derive default values for
538 certain flags. */
539 target_mask = 0;
541 /* ColdFire is lenient about alignment. */
542 if (!TARGET_COLDFIRE)
543 target_mask |= MASK_STRICT_ALIGNMENT;
545 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
546 target_mask |= MASK_BITFIELD;
547 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
548 target_mask |= MASK_CF_HWDIV;
549 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
550 target_mask |= MASK_HARD_FLOAT;
551 target_flags |= target_mask & ~target_flags_explicit;
553 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
554 m68k_cpu = entry->device;
555 if (m68k_tune_entry)
557 m68k_tune = m68k_tune_entry->microarch;
558 m68k_tune_flags = m68k_tune_entry->flags;
560 #ifdef M68K_DEFAULT_TUNE
561 else if (!m68k_cpu_entry && !m68k_arch_entry)
563 enum target_device dev;
564 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
565 m68k_tune_flags = all_devices[dev].flags;
567 #endif
568 else
570 m68k_tune = entry->microarch;
571 m68k_tune_flags = entry->flags;
574 /* Set the type of FPU. */
575 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
576 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
577 : FPUTYPE_68881);
579 /* Sanity check to ensure that msep-data and mid-sahred-library are not
580 * both specified together. Doing so simply doesn't make sense.
582 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
583 error ("cannot specify both -msep-data and -mid-shared-library");
585 /* If we're generating code for a separate A5 relative data segment,
586 * we've got to enable -fPIC as well. This might be relaxable to
587 * -fpic but it hasn't been tested properly.
589 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
590 flag_pic = 2;
592 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
593 error if the target does not support them. */
594 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
595 error ("-mpcrel -fPIC is not currently supported on selected cpu");
597 /* ??? A historic way of turning on pic, or is this intended to
598 be an embedded thing that doesn't have the same name binding
599 significance that it does on hosted ELF systems? */
600 if (TARGET_PCREL && flag_pic == 0)
601 flag_pic = 1;
603 if (!flag_pic)
605 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
607 m68k_symbolic_jump = "jra %a0";
609 else if (TARGET_ID_SHARED_LIBRARY)
610 /* All addresses must be loaded from the GOT. */
612 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
614 if (TARGET_PCREL)
615 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
616 else
617 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
619 if (TARGET_ISAC)
620 /* No unconditional long branch */;
621 else if (TARGET_PCREL)
622 m68k_symbolic_jump = "bra%.l %c0";
623 else
624 m68k_symbolic_jump = "bra%.l %p0";
625 /* Turn off function cse if we are doing PIC. We always want
626 function call to be done as `bsr foo@PLTPC'. */
627 /* ??? It's traditional to do this for -mpcrel too, but it isn't
628 clear how intentional that is. */
629 flag_no_function_cse = 1;
632 switch (m68k_symbolic_call_var)
634 case M68K_SYMBOLIC_CALL_JSR:
635 m68k_symbolic_call = "jsr %a0";
636 break;
638 case M68K_SYMBOLIC_CALL_BSR_C:
639 m68k_symbolic_call = "bsr%.l %c0";
640 break;
642 case M68K_SYMBOLIC_CALL_BSR_P:
643 m68k_symbolic_call = "bsr%.l %p0";
644 break;
646 case M68K_SYMBOLIC_CALL_NONE:
647 gcc_assert (m68k_symbolic_call == NULL);
648 break;
650 default:
651 gcc_unreachable ();
654 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
655 parse_alignment_opts ();
656 if (align_labels_value > 2)
658 warning (0, "-falign-labels=%d is not supported", align_labels_value);
659 str_align_labels = "1";
662 if (align_loops_value > 2)
664 warning (0, "-falign-loops=%d is not supported", align_loops_value);
665 str_align_loops = "1";
667 #endif
669 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
670 && !TARGET_68020)
672 warning (0, "-fstack-limit- options are not supported on this cpu");
673 opt_fstack_limit_symbol_arg = NULL;
674 opt_fstack_limit_register_no = -1;
677 SUBTARGET_OVERRIDE_OPTIONS;
679 /* Setup scheduling options. */
680 if (TUNE_CFV1)
681 m68k_sched_cpu = CPU_CFV1;
682 else if (TUNE_CFV2)
683 m68k_sched_cpu = CPU_CFV2;
684 else if (TUNE_CFV3)
685 m68k_sched_cpu = CPU_CFV3;
686 else if (TUNE_CFV4)
687 m68k_sched_cpu = CPU_CFV4;
688 else
690 m68k_sched_cpu = CPU_UNKNOWN;
691 flag_schedule_insns = 0;
692 flag_schedule_insns_after_reload = 0;
693 flag_modulo_sched = 0;
694 flag_live_range_shrinkage = 0;
697 if (m68k_sched_cpu != CPU_UNKNOWN)
699 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
700 m68k_sched_mac = MAC_CF_EMAC;
701 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
702 m68k_sched_mac = MAC_CF_MAC;
703 else
704 m68k_sched_mac = MAC_NO;
708 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
710 static void
711 m68k_override_options_after_change (void)
713 if (m68k_sched_cpu == CPU_UNKNOWN)
715 flag_schedule_insns = 0;
716 flag_schedule_insns_after_reload = 0;
717 flag_modulo_sched = 0;
718 flag_live_range_shrinkage = 0;
722 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
723 given argument and NAME is the argument passed to -mcpu. Return NULL
724 if -mcpu was not passed. */
726 const char *
727 m68k_cpp_cpu_ident (const char *prefix)
729 if (!m68k_cpu_entry)
730 return NULL;
731 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
734 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
735 given argument and NAME is the name of the representative device for
736 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
738 const char *
739 m68k_cpp_cpu_family (const char *prefix)
741 if (!m68k_cpu_entry)
742 return NULL;
743 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
746 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
747 "interrupt_handler" attribute and interrupt_thread if FUNC has an
748 "interrupt_thread" attribute. Otherwise, return
749 m68k_fk_normal_function. */
751 enum m68k_function_kind
752 m68k_get_function_kind (tree func)
754 tree a;
756 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
758 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
759 if (a != NULL_TREE)
760 return m68k_fk_interrupt_handler;
762 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
763 if (a != NULL_TREE)
764 return m68k_fk_interrupt_handler;
766 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
767 if (a != NULL_TREE)
768 return m68k_fk_interrupt_thread;
770 return m68k_fk_normal_function;
773 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
774 struct attribute_spec.handler. */
775 static tree
776 m68k_handle_fndecl_attribute (tree *node, tree name,
777 tree args ATTRIBUTE_UNUSED,
778 int flags ATTRIBUTE_UNUSED,
779 bool *no_add_attrs)
781 if (TREE_CODE (*node) != FUNCTION_DECL)
783 warning (OPT_Wattributes, "%qE attribute only applies to functions",
784 name);
785 *no_add_attrs = true;
788 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
790 error ("multiple interrupt attributes not allowed");
791 *no_add_attrs = true;
794 if (!TARGET_FIDOA
795 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
797 error ("interrupt_thread is available only on fido");
798 *no_add_attrs = true;
801 return NULL_TREE;
804 static void
805 m68k_compute_frame_layout (void)
807 int regno, saved;
808 unsigned int mask;
809 enum m68k_function_kind func_kind =
810 m68k_get_function_kind (current_function_decl);
811 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
812 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
814 /* Only compute the frame once per function.
815 Don't cache information until reload has been completed. */
816 if (current_frame.funcdef_no == current_function_funcdef_no
817 && reload_completed)
818 return;
820 current_frame.size = (get_frame_size () + 3) & -4;
822 mask = saved = 0;
824 /* Interrupt thread does not need to save any register. */
825 if (!interrupt_thread)
826 for (regno = 0; regno < 16; regno++)
827 if (m68k_save_reg (regno, interrupt_handler))
829 mask |= 1 << (regno - D0_REG);
830 saved++;
832 current_frame.offset = saved * 4;
833 current_frame.reg_no = saved;
834 current_frame.reg_mask = mask;
836 current_frame.foffset = 0;
837 mask = saved = 0;
838 if (TARGET_HARD_FLOAT)
840 /* Interrupt thread does not need to save any register. */
841 if (!interrupt_thread)
842 for (regno = 16; regno < 24; regno++)
843 if (m68k_save_reg (regno, interrupt_handler))
845 mask |= 1 << (regno - FP0_REG);
846 saved++;
848 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
849 current_frame.offset += current_frame.foffset;
851 current_frame.fpu_no = saved;
852 current_frame.fpu_mask = mask;
854 /* Remember what function this frame refers to. */
855 current_frame.funcdef_no = current_function_funcdef_no;
858 /* Worker function for TARGET_CAN_ELIMINATE. */
860 bool
861 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
863 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
866 HOST_WIDE_INT
867 m68k_initial_elimination_offset (int from, int to)
869 int argptr_offset;
870 /* The arg pointer points 8 bytes before the start of the arguments,
871 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
872 frame pointer in most frames. */
873 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
874 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
875 return argptr_offset;
877 m68k_compute_frame_layout ();
879 gcc_assert (to == STACK_POINTER_REGNUM);
880 switch (from)
882 case ARG_POINTER_REGNUM:
883 return current_frame.offset + current_frame.size - argptr_offset;
884 case FRAME_POINTER_REGNUM:
885 return current_frame.offset + current_frame.size;
886 default:
887 gcc_unreachable ();
891 /* Refer to the array `regs_ever_live' to determine which registers
892 to save; `regs_ever_live[I]' is nonzero if register number I
893 is ever used in the function. This function is responsible for
894 knowing which registers should not be saved even if used.
895 Return true if we need to save REGNO. */
897 static bool
898 m68k_save_reg (unsigned int regno, bool interrupt_handler)
900 if (flag_pic && regno == PIC_REG)
902 if (crtl->saves_all_registers)
903 return true;
904 if (crtl->uses_pic_offset_table)
905 return true;
906 /* Reload may introduce constant pool references into a function
907 that thitherto didn't need a PIC register. Note that the test
908 above will not catch that case because we will only set
909 crtl->uses_pic_offset_table when emitting
910 the address reloads. */
911 if (crtl->uses_const_pool)
912 return true;
915 if (crtl->calls_eh_return)
917 unsigned int i;
918 for (i = 0; ; i++)
920 unsigned int test = EH_RETURN_DATA_REGNO (i);
921 if (test == INVALID_REGNUM)
922 break;
923 if (test == regno)
924 return true;
928 /* Fixed regs we never touch. */
929 if (fixed_regs[regno])
930 return false;
932 /* The frame pointer (if it is such) is handled specially. */
933 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
934 return false;
936 /* Interrupt handlers must also save call_used_regs
937 if they are live or when calling nested functions. */
938 if (interrupt_handler)
940 if (df_regs_ever_live_p (regno))
941 return true;
943 if (!crtl->is_leaf && call_used_regs[regno])
944 return true;
947 /* Never need to save registers that aren't touched. */
948 if (!df_regs_ever_live_p (regno))
949 return false;
951 /* Otherwise save everything that isn't call-clobbered. */
952 return !call_used_regs[regno];
955 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
956 the lowest memory address. COUNT is the number of registers to be
957 moved, with register REGNO + I being moved if bit I of MASK is set.
958 STORE_P specifies the direction of the move and ADJUST_STACK_P says
959 whether or not this is pre-decrement (if STORE_P) or post-increment
960 (if !STORE_P) operation. */
962 static rtx_insn *
963 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
964 unsigned int count, unsigned int regno,
965 unsigned int mask, bool store_p, bool adjust_stack_p)
967 int i;
968 rtx body, addr, src, operands[2];
969 machine_mode mode;
971 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
972 mode = reg_raw_mode[regno];
973 i = 0;
975 if (adjust_stack_p)
977 src = plus_constant (Pmode, base,
978 (count
979 * GET_MODE_SIZE (mode)
980 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
981 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
984 for (; mask != 0; mask >>= 1, regno++)
985 if (mask & 1)
987 addr = plus_constant (Pmode, base, offset);
988 operands[!store_p] = gen_frame_mem (mode, addr);
989 operands[store_p] = gen_rtx_REG (mode, regno);
990 XVECEXP (body, 0, i++)
991 = gen_rtx_SET (operands[0], operands[1]);
992 offset += GET_MODE_SIZE (mode);
994 gcc_assert (i == XVECLEN (body, 0));
996 return emit_insn (body);
999 /* Make INSN a frame-related instruction. */
1001 static void
1002 m68k_set_frame_related (rtx_insn *insn)
1004 rtx body;
1005 int i;
1007 RTX_FRAME_RELATED_P (insn) = 1;
1008 body = PATTERN (insn);
1009 if (GET_CODE (body) == PARALLEL)
1010 for (i = 0; i < XVECLEN (body, 0); i++)
1011 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1014 /* Emit RTL for the "prologue" define_expand. */
1016 void
1017 m68k_expand_prologue (void)
1019 HOST_WIDE_INT fsize_with_regs;
1020 rtx limit, src, dest;
1022 m68k_compute_frame_layout ();
1024 if (flag_stack_usage_info)
1025 current_function_static_stack_size
1026 = current_frame.size + current_frame.offset;
1028 /* If the stack limit is a symbol, we can check it here,
1029 before actually allocating the space. */
1030 if (crtl->limit_stack
1031 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1033 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1034 if (!m68k_legitimate_constant_p (Pmode, limit))
1036 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1037 limit = gen_rtx_REG (Pmode, D0_REG);
1039 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1040 stack_pointer_rtx, limit),
1041 stack_pointer_rtx, limit,
1042 const1_rtx));
1045 fsize_with_regs = current_frame.size;
1046 if (TARGET_COLDFIRE)
1048 /* ColdFire's move multiple instructions do not allow pre-decrement
1049 addressing. Add the size of movem saves to the initial stack
1050 allocation instead. */
1051 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1052 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1053 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1054 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1057 if (frame_pointer_needed)
1059 if (fsize_with_regs == 0 && TUNE_68040)
1061 /* On the 68040, two separate moves are faster than link.w 0. */
1062 dest = gen_frame_mem (Pmode,
1063 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1064 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1065 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1066 stack_pointer_rtx));
1068 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1069 m68k_set_frame_related
1070 (emit_insn (gen_link (frame_pointer_rtx,
1071 GEN_INT (-4 - fsize_with_regs))));
1072 else
1074 m68k_set_frame_related
1075 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1076 m68k_set_frame_related
1077 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1078 stack_pointer_rtx,
1079 GEN_INT (-fsize_with_regs))));
1082 /* If the frame pointer is needed, emit a special barrier that
1083 will prevent the scheduler from moving stores to the frame
1084 before the stack adjustment. */
1085 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1087 else if (fsize_with_regs != 0)
1088 m68k_set_frame_related
1089 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1090 stack_pointer_rtx,
1091 GEN_INT (-fsize_with_regs))));
1093 if (current_frame.fpu_mask)
1095 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1096 if (TARGET_68881)
1097 m68k_set_frame_related
1098 (m68k_emit_movem (stack_pointer_rtx,
1099 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1100 current_frame.fpu_no, FP0_REG,
1101 current_frame.fpu_mask, true, true));
1102 else
1104 int offset;
1106 /* If we're using moveml to save the integer registers,
1107 the stack pointer will point to the bottom of the moveml
1108 save area. Find the stack offset of the first FP register. */
1109 if (current_frame.reg_no < MIN_MOVEM_REGS)
1110 offset = 0;
1111 else
1112 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1113 m68k_set_frame_related
1114 (m68k_emit_movem (stack_pointer_rtx, offset,
1115 current_frame.fpu_no, FP0_REG,
1116 current_frame.fpu_mask, true, false));
1120 /* If the stack limit is not a symbol, check it here.
1121 This has the disadvantage that it may be too late... */
1122 if (crtl->limit_stack)
1124 if (REG_P (stack_limit_rtx))
1125 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1126 stack_limit_rtx),
1127 stack_pointer_rtx, stack_limit_rtx,
1128 const1_rtx));
1130 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1131 warning (0, "stack limit expression is not supported");
1134 if (current_frame.reg_no < MIN_MOVEM_REGS)
1136 /* Store each register separately in the same order moveml does. */
1137 int i;
1139 for (i = 16; i-- > 0; )
1140 if (current_frame.reg_mask & (1 << i))
1142 src = gen_rtx_REG (SImode, D0_REG + i);
1143 dest = gen_frame_mem (SImode,
1144 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1145 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1148 else
1150 if (TARGET_COLDFIRE)
1151 /* The required register save space has already been allocated.
1152 The first register should be stored at (%sp). */
1153 m68k_set_frame_related
1154 (m68k_emit_movem (stack_pointer_rtx, 0,
1155 current_frame.reg_no, D0_REG,
1156 current_frame.reg_mask, true, false));
1157 else
1158 m68k_set_frame_related
1159 (m68k_emit_movem (stack_pointer_rtx,
1160 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1161 current_frame.reg_no, D0_REG,
1162 current_frame.reg_mask, true, true));
1165 if (!TARGET_SEP_DATA
1166 && crtl->uses_pic_offset_table)
1167 emit_insn (gen_load_got (pic_offset_table_rtx));
1170 /* Return true if a simple (return) instruction is sufficient for this
1171 instruction (i.e. if no epilogue is needed). */
1173 bool
1174 m68k_use_return_insn (void)
1176 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1177 return false;
1179 m68k_compute_frame_layout ();
1180 return current_frame.offset == 0;
1183 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1184 SIBCALL_P says which.
1186 The function epilogue should not depend on the current stack pointer!
1187 It should use the frame pointer only, if there is a frame pointer.
1188 This is mandatory because of alloca; we also take advantage of it to
1189 omit stack adjustments before returning. */
1191 void
1192 m68k_expand_epilogue (bool sibcall_p)
1194 HOST_WIDE_INT fsize, fsize_with_regs;
1195 bool big, restore_from_sp;
1197 m68k_compute_frame_layout ();
1199 fsize = current_frame.size;
1200 big = false;
1201 restore_from_sp = false;
1203 /* FIXME : crtl->is_leaf below is too strong.
1204 What we really need to know there is if there could be pending
1205 stack adjustment needed at that point. */
1206 restore_from_sp = (!frame_pointer_needed
1207 || (!cfun->calls_alloca && crtl->is_leaf));
1209 /* fsize_with_regs is the size we need to adjust the sp when
1210 popping the frame. */
1211 fsize_with_regs = fsize;
1212 if (TARGET_COLDFIRE && restore_from_sp)
1214 /* ColdFire's move multiple instructions do not allow post-increment
1215 addressing. Add the size of movem loads to the final deallocation
1216 instead. */
1217 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1218 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1219 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1220 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1223 if (current_frame.offset + fsize >= 0x8000
1224 && !restore_from_sp
1225 && (current_frame.reg_mask || current_frame.fpu_mask))
1227 if (TARGET_COLDFIRE
1228 && (current_frame.reg_no >= MIN_MOVEM_REGS
1229 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1231 /* ColdFire's move multiple instructions do not support the
1232 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1233 stack-based restore. */
1234 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1235 GEN_INT (-(current_frame.offset + fsize)));
1236 emit_insn (gen_blockage ());
1237 emit_insn (gen_addsi3 (stack_pointer_rtx,
1238 gen_rtx_REG (Pmode, A1_REG),
1239 frame_pointer_rtx));
1240 restore_from_sp = true;
1242 else
1244 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1245 fsize = 0;
1246 big = true;
1250 if (current_frame.reg_no < MIN_MOVEM_REGS)
1252 /* Restore each register separately in the same order moveml does. */
1253 int i;
1254 HOST_WIDE_INT offset;
1256 offset = current_frame.offset + fsize;
1257 for (i = 0; i < 16; i++)
1258 if (current_frame.reg_mask & (1 << i))
1260 rtx addr;
1262 if (big)
1264 /* Generate the address -OFFSET(%fp,%a1.l). */
1265 addr = gen_rtx_REG (Pmode, A1_REG);
1266 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1267 addr = plus_constant (Pmode, addr, -offset);
1269 else if (restore_from_sp)
1270 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1271 else
1272 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1273 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1274 gen_frame_mem (SImode, addr));
1275 offset -= GET_MODE_SIZE (SImode);
1278 else if (current_frame.reg_mask)
1280 if (big)
1281 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1282 gen_rtx_REG (Pmode, A1_REG),
1283 frame_pointer_rtx),
1284 -(current_frame.offset + fsize),
1285 current_frame.reg_no, D0_REG,
1286 current_frame.reg_mask, false, false);
1287 else if (restore_from_sp)
1288 m68k_emit_movem (stack_pointer_rtx, 0,
1289 current_frame.reg_no, D0_REG,
1290 current_frame.reg_mask, false,
1291 !TARGET_COLDFIRE);
1292 else
1293 m68k_emit_movem (frame_pointer_rtx,
1294 -(current_frame.offset + fsize),
1295 current_frame.reg_no, D0_REG,
1296 current_frame.reg_mask, false, false);
1299 if (current_frame.fpu_no > 0)
1301 if (big)
1302 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1303 gen_rtx_REG (Pmode, A1_REG),
1304 frame_pointer_rtx),
1305 -(current_frame.foffset + fsize),
1306 current_frame.fpu_no, FP0_REG,
1307 current_frame.fpu_mask, false, false);
1308 else if (restore_from_sp)
1310 if (TARGET_COLDFIRE)
1312 int offset;
1314 /* If we used moveml to restore the integer registers, the
1315 stack pointer will still point to the bottom of the moveml
1316 save area. Find the stack offset of the first FP
1317 register. */
1318 if (current_frame.reg_no < MIN_MOVEM_REGS)
1319 offset = 0;
1320 else
1321 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1322 m68k_emit_movem (stack_pointer_rtx, offset,
1323 current_frame.fpu_no, FP0_REG,
1324 current_frame.fpu_mask, false, false);
1326 else
1327 m68k_emit_movem (stack_pointer_rtx, 0,
1328 current_frame.fpu_no, FP0_REG,
1329 current_frame.fpu_mask, false, true);
1331 else
1332 m68k_emit_movem (frame_pointer_rtx,
1333 -(current_frame.foffset + fsize),
1334 current_frame.fpu_no, FP0_REG,
1335 current_frame.fpu_mask, false, false);
1338 emit_insn (gen_blockage ());
1339 if (frame_pointer_needed)
1340 emit_insn (gen_unlink (frame_pointer_rtx));
1341 else if (fsize_with_regs)
1342 emit_insn (gen_addsi3 (stack_pointer_rtx,
1343 stack_pointer_rtx,
1344 GEN_INT (fsize_with_regs)));
1346 if (crtl->calls_eh_return)
1347 emit_insn (gen_addsi3 (stack_pointer_rtx,
1348 stack_pointer_rtx,
1349 EH_RETURN_STACKADJ_RTX));
1351 if (!sibcall_p)
1352 emit_jump_insn (ret_rtx);
1355 /* Return true if X is a valid comparison operator for the dbcc
1356 instruction.
1358 Note it rejects floating point comparison operators.
1359 (In the future we could use Fdbcc).
1361 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1364 valid_dbcc_comparison_p_2 (rtx x, machine_mode mode ATTRIBUTE_UNUSED)
1366 switch (GET_CODE (x))
1368 case EQ: case NE: case GTU: case LTU:
1369 case GEU: case LEU:
1370 return 1;
1372 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1373 conservative */
1374 case GT: case LT: case GE: case LE:
1375 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1376 default:
1377 return 0;
1381 /* Return nonzero if flags are currently in the 68881 flag register. */
1383 flags_in_68881 (void)
1385 /* We could add support for these in the future */
1386 return cc_status.flags & CC_IN_68881;
1389 /* Return true if PARALLEL contains register REGNO. */
1390 static bool
1391 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1393 int i;
1395 if (REG_P (parallel) && REGNO (parallel) == regno)
1396 return true;
1398 if (GET_CODE (parallel) != PARALLEL)
1399 return false;
1401 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1403 const_rtx x;
1405 x = XEXP (XVECEXP (parallel, 0, i), 0);
1406 if (REG_P (x) && REGNO (x) == regno)
1407 return true;
1410 return false;
1413 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1415 static bool
1416 m68k_ok_for_sibcall_p (tree decl, tree exp)
1418 enum m68k_function_kind kind;
1420 /* We cannot use sibcalls for nested functions because we use the
1421 static chain register for indirect calls. */
1422 if (CALL_EXPR_STATIC_CHAIN (exp))
1423 return false;
1425 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1427 /* Check that the return value locations are the same. For
1428 example that we aren't returning a value from the sibling in
1429 a D0 register but then need to transfer it to a A0 register. */
1430 rtx cfun_value;
1431 rtx call_value;
1433 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1434 cfun->decl);
1435 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1437 /* Check that the values are equal or that the result the callee
1438 function returns is superset of what the current function returns. */
1439 if (!(rtx_equal_p (cfun_value, call_value)
1440 || (REG_P (cfun_value)
1441 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1442 return false;
1445 kind = m68k_get_function_kind (current_function_decl);
1446 if (kind == m68k_fk_normal_function)
1447 /* We can always sibcall from a normal function, because it's
1448 undefined if it is calling an interrupt function. */
1449 return true;
1451 /* Otherwise we can only sibcall if the function kind is known to be
1452 the same. */
1453 if (decl && m68k_get_function_kind (decl) == kind)
1454 return true;
1456 return false;
1459 /* On the m68k all args are always pushed. */
1461 static rtx
1462 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1463 machine_mode mode ATTRIBUTE_UNUSED,
1464 const_tree type ATTRIBUTE_UNUSED,
1465 bool named ATTRIBUTE_UNUSED)
1467 return NULL_RTX;
1470 static void
1471 m68k_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1472 const_tree type, bool named ATTRIBUTE_UNUSED)
1474 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1476 *cum += (mode != BLKmode
1477 ? (GET_MODE_SIZE (mode) + 3) & ~3
1478 : (int_size_in_bytes (type) + 3) & ~3);
1481 /* Convert X to a legitimate function call memory reference and return the
1482 result. */
1485 m68k_legitimize_call_address (rtx x)
1487 gcc_assert (MEM_P (x));
1488 if (call_operand (XEXP (x, 0), VOIDmode))
1489 return x;
1490 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1493 /* Likewise for sibling calls. */
1496 m68k_legitimize_sibcall_address (rtx x)
1498 gcc_assert (MEM_P (x));
1499 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1500 return x;
1502 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1503 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1506 /* Convert X to a legitimate address and return it if successful. Otherwise
1507 return X.
1509 For the 68000, we handle X+REG by loading X into a register R and
1510 using R+REG. R will go in an address reg and indexing will be used.
1511 However, if REG is a broken-out memory address or multiplication,
1512 nothing needs to be done because REG can certainly go in an address reg. */
1514 static rtx
1515 m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
1517 if (m68k_tls_symbol_p (x))
1518 return m68k_legitimize_tls_address (x);
1520 if (GET_CODE (x) == PLUS)
1522 int ch = (x) != (oldx);
1523 int copied = 0;
1525 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1527 if (GET_CODE (XEXP (x, 0)) == MULT)
1529 COPY_ONCE (x);
1530 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1532 if (GET_CODE (XEXP (x, 1)) == MULT)
1534 COPY_ONCE (x);
1535 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1537 if (ch)
1539 if (GET_CODE (XEXP (x, 1)) == REG
1540 && GET_CODE (XEXP (x, 0)) == REG)
1542 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1544 COPY_ONCE (x);
1545 x = force_operand (x, 0);
1547 return x;
1549 if (memory_address_p (mode, x))
1550 return x;
1552 if (GET_CODE (XEXP (x, 0)) == REG
1553 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1554 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1555 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1557 rtx temp = gen_reg_rtx (Pmode);
1558 rtx val = force_operand (XEXP (x, 1), 0);
1559 emit_move_insn (temp, val);
1560 COPY_ONCE (x);
1561 XEXP (x, 1) = temp;
1562 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1563 && GET_CODE (XEXP (x, 0)) == REG)
1564 x = force_operand (x, 0);
1566 else if (GET_CODE (XEXP (x, 1)) == REG
1567 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1568 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1569 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1571 rtx temp = gen_reg_rtx (Pmode);
1572 rtx val = force_operand (XEXP (x, 0), 0);
1573 emit_move_insn (temp, val);
1574 COPY_ONCE (x);
1575 XEXP (x, 0) = temp;
1576 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1577 && GET_CODE (XEXP (x, 1)) == REG)
1578 x = force_operand (x, 0);
1582 return x;
1586 /* Output a dbCC; jCC sequence. Note we do not handle the
1587 floating point version of this sequence (Fdbcc). We also
1588 do not handle alternative conditions when CC_NO_OVERFLOW is
1589 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1590 kick those out before we get here. */
1592 void
1593 output_dbcc_and_branch (rtx *operands)
1595 switch (GET_CODE (operands[3]))
1597 case EQ:
1598 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1599 break;
1601 case NE:
1602 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1603 break;
1605 case GT:
1606 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1607 break;
1609 case GTU:
1610 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1611 break;
1613 case LT:
1614 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1615 break;
1617 case LTU:
1618 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1619 break;
1621 case GE:
1622 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1623 break;
1625 case GEU:
1626 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1627 break;
1629 case LE:
1630 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1631 break;
1633 case LEU:
1634 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1635 break;
1637 default:
1638 gcc_unreachable ();
1641 /* If the decrement is to be done in SImode, then we have
1642 to compensate for the fact that dbcc decrements in HImode. */
1643 switch (GET_MODE (operands[0]))
1645 case E_SImode:
1646 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1647 break;
1649 case E_HImode:
1650 break;
1652 default:
1653 gcc_unreachable ();
1657 const char *
1658 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1660 rtx loperands[7];
1661 enum rtx_code op_code = GET_CODE (op);
1663 /* This does not produce a useful cc. */
1664 CC_STATUS_INIT;
1666 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1667 below. Swap the operands and change the op if these requirements
1668 are not fulfilled. */
1669 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1671 rtx tmp = operand1;
1673 operand1 = operand2;
1674 operand2 = tmp;
1675 op_code = swap_condition (op_code);
1677 loperands[0] = operand1;
1678 if (GET_CODE (operand1) == REG)
1679 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1680 else
1681 loperands[1] = adjust_address (operand1, SImode, 4);
1682 if (operand2 != const0_rtx)
1684 loperands[2] = operand2;
1685 if (GET_CODE (operand2) == REG)
1686 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1687 else
1688 loperands[3] = adjust_address (operand2, SImode, 4);
1690 loperands[4] = gen_label_rtx ();
1691 if (operand2 != const0_rtx)
1692 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1693 else
1695 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1696 output_asm_insn ("tst%.l %0", loperands);
1697 else
1698 output_asm_insn ("cmp%.w #0,%0", loperands);
1700 output_asm_insn ("jne %l4", loperands);
1702 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1703 output_asm_insn ("tst%.l %1", loperands);
1704 else
1705 output_asm_insn ("cmp%.w #0,%1", loperands);
1708 loperands[5] = dest;
1710 switch (op_code)
1712 case EQ:
1713 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1714 CODE_LABEL_NUMBER (loperands[4]));
1715 output_asm_insn ("seq %5", loperands);
1716 break;
1718 case NE:
1719 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1720 CODE_LABEL_NUMBER (loperands[4]));
1721 output_asm_insn ("sne %5", loperands);
1722 break;
1724 case GT:
1725 loperands[6] = gen_label_rtx ();
1726 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1727 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1728 CODE_LABEL_NUMBER (loperands[4]));
1729 output_asm_insn ("sgt %5", loperands);
1730 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1731 CODE_LABEL_NUMBER (loperands[6]));
1732 break;
1734 case GTU:
1735 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1736 CODE_LABEL_NUMBER (loperands[4]));
1737 output_asm_insn ("shi %5", loperands);
1738 break;
1740 case LT:
1741 loperands[6] = gen_label_rtx ();
1742 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1743 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1744 CODE_LABEL_NUMBER (loperands[4]));
1745 output_asm_insn ("slt %5", loperands);
1746 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1747 CODE_LABEL_NUMBER (loperands[6]));
1748 break;
1750 case LTU:
1751 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1752 CODE_LABEL_NUMBER (loperands[4]));
1753 output_asm_insn ("scs %5", loperands);
1754 break;
1756 case GE:
1757 loperands[6] = gen_label_rtx ();
1758 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1759 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1760 CODE_LABEL_NUMBER (loperands[4]));
1761 output_asm_insn ("sge %5", loperands);
1762 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1763 CODE_LABEL_NUMBER (loperands[6]));
1764 break;
1766 case GEU:
1767 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1768 CODE_LABEL_NUMBER (loperands[4]));
1769 output_asm_insn ("scc %5", loperands);
1770 break;
1772 case LE:
1773 loperands[6] = gen_label_rtx ();
1774 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1775 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1776 CODE_LABEL_NUMBER (loperands[4]));
1777 output_asm_insn ("sle %5", loperands);
1778 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1779 CODE_LABEL_NUMBER (loperands[6]));
1780 break;
1782 case LEU:
1783 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1784 CODE_LABEL_NUMBER (loperands[4]));
1785 output_asm_insn ("sls %5", loperands);
1786 break;
1788 default:
1789 gcc_unreachable ();
1791 return "";
1794 const char *
1795 output_btst (rtx *operands, rtx countop, rtx dataop, rtx_insn *insn, int signpos)
1797 operands[0] = countop;
1798 operands[1] = dataop;
1800 if (GET_CODE (countop) == CONST_INT)
1802 register int count = INTVAL (countop);
1803 /* If COUNT is bigger than size of storage unit in use,
1804 advance to the containing unit of same size. */
1805 if (count > signpos)
1807 int offset = (count & ~signpos) / 8;
1808 count = count & signpos;
1809 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1811 if (count == signpos)
1812 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1813 else
1814 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1816 /* These three statements used to use next_insns_test_no...
1817 but it appears that this should do the same job. */
1818 if (count == 31
1819 && next_insn_tests_no_inequality (insn))
1820 return "tst%.l %1";
1821 if (count == 15
1822 && next_insn_tests_no_inequality (insn))
1823 return "tst%.w %1";
1824 if (count == 7
1825 && next_insn_tests_no_inequality (insn))
1826 return "tst%.b %1";
1827 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1828 On some m68k variants unfortunately that's slower than btst.
1829 On 68000 and higher, that should also work for all HImode operands. */
1830 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1832 if (count == 3 && DATA_REG_P (operands[1])
1833 && next_insn_tests_no_inequality (insn))
1835 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1836 return "move%.w %1,%%ccr";
1838 if (count == 2 && DATA_REG_P (operands[1])
1839 && next_insn_tests_no_inequality (insn))
1841 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1842 return "move%.w %1,%%ccr";
1844 /* count == 1 followed by bvc/bvs and
1845 count == 0 followed by bcc/bcs are also possible, but need
1846 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1849 cc_status.flags = CC_NOT_NEGATIVE;
1851 return "btst %0,%1";
1854 /* Return true if X is a legitimate base register. STRICT_P says
1855 whether we need strict checking. */
1857 bool
1858 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1860 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1861 if (!strict_p && GET_CODE (x) == SUBREG)
1862 x = SUBREG_REG (x);
1864 return (REG_P (x)
1865 && (strict_p
1866 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1867 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1870 /* Return true if X is a legitimate index register. STRICT_P says
1871 whether we need strict checking. */
1873 bool
1874 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1876 if (!strict_p && GET_CODE (x) == SUBREG)
1877 x = SUBREG_REG (x);
1879 return (REG_P (x)
1880 && (strict_p
1881 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1882 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1885 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1886 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1887 ADDRESS if so. STRICT_P says whether we need strict checking. */
1889 static bool
1890 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1892 int scale;
1894 /* Check for a scale factor. */
1895 scale = 1;
1896 if ((TARGET_68020 || TARGET_COLDFIRE)
1897 && GET_CODE (x) == MULT
1898 && GET_CODE (XEXP (x, 1)) == CONST_INT
1899 && (INTVAL (XEXP (x, 1)) == 2
1900 || INTVAL (XEXP (x, 1)) == 4
1901 || (INTVAL (XEXP (x, 1)) == 8
1902 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1904 scale = INTVAL (XEXP (x, 1));
1905 x = XEXP (x, 0);
1908 /* Check for a word extension. */
1909 if (!TARGET_COLDFIRE
1910 && GET_CODE (x) == SIGN_EXTEND
1911 && GET_MODE (XEXP (x, 0)) == HImode)
1912 x = XEXP (x, 0);
1914 if (m68k_legitimate_index_reg_p (x, strict_p))
1916 address->scale = scale;
1917 address->index = x;
1918 return true;
1921 return false;
1924 /* Return true if X is an illegitimate symbolic constant. */
1926 bool
1927 m68k_illegitimate_symbolic_constant_p (rtx x)
1929 rtx base, offset;
1931 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1933 split_const (x, &base, &offset);
1934 if (GET_CODE (base) == SYMBOL_REF
1935 && !offset_within_block_p (base, INTVAL (offset)))
1936 return true;
1938 return m68k_tls_reference_p (x, false);
1941 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1943 static bool
1944 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1946 return m68k_illegitimate_symbolic_constant_p (x);
1949 /* Return true if X is a legitimate constant address that can reach
1950 bytes in the range [X, X + REACH). STRICT_P says whether we need
1951 strict checking. */
1953 static bool
1954 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1956 rtx base, offset;
1958 if (!CONSTANT_ADDRESS_P (x))
1959 return false;
1961 if (flag_pic
1962 && !(strict_p && TARGET_PCREL)
1963 && symbolic_operand (x, VOIDmode))
1964 return false;
1966 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1968 split_const (x, &base, &offset);
1969 if (GET_CODE (base) == SYMBOL_REF
1970 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1971 return false;
1974 return !m68k_tls_reference_p (x, false);
1977 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1978 labels will become jump tables. */
1980 static bool
1981 m68k_jump_table_ref_p (rtx x)
1983 if (GET_CODE (x) != LABEL_REF)
1984 return false;
1986 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
1987 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
1988 return true;
1990 insn = next_nonnote_insn (insn);
1991 return insn && JUMP_TABLE_DATA_P (insn);
1994 /* Return true if X is a legitimate address for values of mode MODE.
1995 STRICT_P says whether strict checking is needed. If the address
1996 is valid, describe its components in *ADDRESS. */
1998 static bool
1999 m68k_decompose_address (machine_mode mode, rtx x,
2000 bool strict_p, struct m68k_address *address)
2002 unsigned int reach;
2004 memset (address, 0, sizeof (*address));
2006 if (mode == BLKmode)
2007 reach = 1;
2008 else
2009 reach = GET_MODE_SIZE (mode);
2011 /* Check for (An) (mode 2). */
2012 if (m68k_legitimate_base_reg_p (x, strict_p))
2014 address->base = x;
2015 return true;
2018 /* Check for -(An) and (An)+ (modes 3 and 4). */
2019 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2020 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2022 address->code = GET_CODE (x);
2023 address->base = XEXP (x, 0);
2024 return true;
2027 /* Check for (d16,An) (mode 5). */
2028 if (GET_CODE (x) == PLUS
2029 && GET_CODE (XEXP (x, 1)) == CONST_INT
2030 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2031 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2033 address->base = XEXP (x, 0);
2034 address->offset = XEXP (x, 1);
2035 return true;
2038 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2039 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2040 addresses. */
2041 if (GET_CODE (x) == PLUS
2042 && XEXP (x, 0) == pic_offset_table_rtx)
2044 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2045 they are invalid in this context. */
2046 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2048 address->base = XEXP (x, 0);
2049 address->offset = XEXP (x, 1);
2050 return true;
2054 /* The ColdFire FPU only accepts addressing modes 2-5. */
2055 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2056 return false;
2058 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2059 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2060 All these modes are variations of mode 7. */
2061 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2063 address->offset = x;
2064 return true;
2067 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2068 tablejumps.
2070 ??? do_tablejump creates these addresses before placing the target
2071 label, so we have to assume that unplaced labels are jump table
2072 references. It seems unlikely that we would ever generate indexed
2073 accesses to unplaced labels in other cases. */
2074 if (GET_CODE (x) == PLUS
2075 && m68k_jump_table_ref_p (XEXP (x, 1))
2076 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2078 address->offset = XEXP (x, 1);
2079 return true;
2082 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2083 (bd,An,Xn.SIZE*SCALE) addresses. */
2085 if (TARGET_68020)
2087 /* Check for a nonzero base displacement. */
2088 if (GET_CODE (x) == PLUS
2089 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2091 address->offset = XEXP (x, 1);
2092 x = XEXP (x, 0);
2095 /* Check for a suppressed index register. */
2096 if (m68k_legitimate_base_reg_p (x, strict_p))
2098 address->base = x;
2099 return true;
2102 /* Check for a suppressed base register. Do not allow this case
2103 for non-symbolic offsets as it effectively gives gcc freedom
2104 to treat data registers as base registers, which can generate
2105 worse code. */
2106 if (address->offset
2107 && symbolic_operand (address->offset, VOIDmode)
2108 && m68k_decompose_index (x, strict_p, address))
2109 return true;
2111 else
2113 /* Check for a nonzero base displacement. */
2114 if (GET_CODE (x) == PLUS
2115 && GET_CODE (XEXP (x, 1)) == CONST_INT
2116 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2118 address->offset = XEXP (x, 1);
2119 x = XEXP (x, 0);
2123 /* We now expect the sum of a base and an index. */
2124 if (GET_CODE (x) == PLUS)
2126 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2127 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2129 address->base = XEXP (x, 0);
2130 return true;
2133 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2134 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2136 address->base = XEXP (x, 1);
2137 return true;
2140 return false;
2143 /* Return true if X is a legitimate address for values of mode MODE.
2144 STRICT_P says whether strict checking is needed. */
2146 bool
2147 m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
2149 struct m68k_address address;
2151 return m68k_decompose_address (mode, x, strict_p, &address);
2154 /* Return true if X is a memory, describing its address in ADDRESS if so.
2155 Apply strict checking if called during or after reload. */
2157 static bool
2158 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2160 return (MEM_P (x)
2161 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2162 reload_in_progress || reload_completed,
2163 address));
2166 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2168 bool
2169 m68k_legitimate_constant_p (machine_mode mode, rtx x)
2171 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2174 /* Return true if X matches the 'Q' constraint. It must be a memory
2175 with a base address and no constant offset or index. */
2177 bool
2178 m68k_matches_q_p (rtx x)
2180 struct m68k_address address;
2182 return (m68k_legitimate_mem_p (x, &address)
2183 && address.code == UNKNOWN
2184 && address.base
2185 && !address.offset
2186 && !address.index);
2189 /* Return true if X matches the 'U' constraint. It must be a base address
2190 with a constant offset and no index. */
2192 bool
2193 m68k_matches_u_p (rtx x)
2195 struct m68k_address address;
2197 return (m68k_legitimate_mem_p (x, &address)
2198 && address.code == UNKNOWN
2199 && address.base
2200 && address.offset
2201 && !address.index);
2204 /* Return GOT pointer. */
2206 static rtx
2207 m68k_get_gp (void)
2209 if (pic_offset_table_rtx == NULL_RTX)
2210 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2212 crtl->uses_pic_offset_table = 1;
2214 return pic_offset_table_rtx;
2217 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2218 wrappers. */
2219 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2220 RELOC_TLSIE, RELOC_TLSLE };
2222 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2224 /* Wrap symbol X into unspec representing relocation RELOC.
2225 BASE_REG - register that should be added to the result.
2226 TEMP_REG - if non-null, temporary register. */
2228 static rtx
2229 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2231 bool use_x_p;
2233 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2235 if (TARGET_COLDFIRE && use_x_p)
2236 /* When compiling with -mx{got, tls} switch the code will look like this:
2238 move.l <X>@<RELOC>,<TEMP_REG>
2239 add.l <BASE_REG>,<TEMP_REG> */
2241 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2242 to put @RELOC after reference. */
2243 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2244 UNSPEC_RELOC32);
2245 x = gen_rtx_CONST (Pmode, x);
2247 if (temp_reg == NULL)
2249 gcc_assert (can_create_pseudo_p ());
2250 temp_reg = gen_reg_rtx (Pmode);
2253 emit_move_insn (temp_reg, x);
2254 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2255 x = temp_reg;
2257 else
2259 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2260 UNSPEC_RELOC16);
2261 x = gen_rtx_CONST (Pmode, x);
2263 x = gen_rtx_PLUS (Pmode, base_reg, x);
2266 return x;
2269 /* Helper for m68k_unwrap_symbol.
2270 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2271 sets *RELOC_PTR to relocation type for the symbol. */
2273 static rtx
2274 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2275 enum m68k_reloc *reloc_ptr)
2277 if (GET_CODE (orig) == CONST)
2279 rtx x;
2280 enum m68k_reloc dummy;
2282 x = XEXP (orig, 0);
2284 if (reloc_ptr == NULL)
2285 reloc_ptr = &dummy;
2287 /* Handle an addend. */
2288 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2289 && CONST_INT_P (XEXP (x, 1)))
2290 x = XEXP (x, 0);
2292 if (GET_CODE (x) == UNSPEC)
2294 switch (XINT (x, 1))
2296 case UNSPEC_RELOC16:
2297 orig = XVECEXP (x, 0, 0);
2298 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2299 break;
2301 case UNSPEC_RELOC32:
2302 if (unwrap_reloc32_p)
2304 orig = XVECEXP (x, 0, 0);
2305 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2307 break;
2309 default:
2310 break;
2315 return orig;
2318 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2319 UNSPEC_RELOC32 wrappers. */
2322 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2324 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2327 /* Prescan insn before outputing assembler for it. */
2329 void
2330 m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
2331 rtx *operands, int n_operands)
2333 int i;
2335 /* Combine and, possibly, other optimizations may do good job
2336 converting
2337 (const (unspec [(symbol)]))
2338 into
2339 (const (plus (unspec [(symbol)])
2340 (const_int N))).
2341 The problem with this is emitting @TLS or @GOT decorations.
2342 The decoration is emitted when processing (unspec), so the
2343 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2345 It seems that the easiest solution to this is to convert such
2346 operands to
2347 (const (unspec [(plus (symbol)
2348 (const_int N))])).
2349 Note, that the top level of operand remains intact, so we don't have
2350 to patch up anything outside of the operand. */
2352 subrtx_var_iterator::array_type array;
2353 for (i = 0; i < n_operands; ++i)
2355 rtx op;
2357 op = operands[i];
2359 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2361 rtx x = *iter;
2362 if (m68k_unwrap_symbol (x, true) != x)
2364 rtx plus;
2366 gcc_assert (GET_CODE (x) == CONST);
2367 plus = XEXP (x, 0);
2369 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2371 rtx unspec;
2372 rtx addend;
2374 unspec = XEXP (plus, 0);
2375 gcc_assert (GET_CODE (unspec) == UNSPEC);
2376 addend = XEXP (plus, 1);
2377 gcc_assert (CONST_INT_P (addend));
2379 /* We now have all the pieces, rearrange them. */
2381 /* Move symbol to plus. */
2382 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2384 /* Move plus inside unspec. */
2385 XVECEXP (unspec, 0, 0) = plus;
2387 /* Move unspec to top level of const. */
2388 XEXP (x, 0) = unspec;
2390 iter.skip_subrtxes ();
2396 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2397 If REG is non-null, use it; generate new pseudo otherwise. */
2399 static rtx
2400 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2402 rtx_insn *insn;
2404 if (reg == NULL_RTX)
2406 gcc_assert (can_create_pseudo_p ());
2407 reg = gen_reg_rtx (Pmode);
2410 insn = emit_move_insn (reg, x);
2411 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2412 by loop. */
2413 set_unique_reg_note (insn, REG_EQUAL, orig);
2415 return reg;
2418 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2419 GOT slot. */
2421 static rtx
2422 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2424 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2426 x = gen_rtx_MEM (Pmode, x);
2427 MEM_READONLY_P (x) = 1;
2429 return x;
2432 /* Legitimize PIC addresses. If the address is already
2433 position-independent, we return ORIG. Newly generated
2434 position-independent addresses go to REG. If we need more
2435 than one register, we lose.
2437 An address is legitimized by making an indirect reference
2438 through the Global Offset Table with the name of the symbol
2439 used as an offset.
2441 The assembler and linker are responsible for placing the
2442 address of the symbol in the GOT. The function prologue
2443 is responsible for initializing a5 to the starting address
2444 of the GOT.
2446 The assembler is also responsible for translating a symbol name
2447 into a constant displacement from the start of the GOT.
2449 A quick example may make things a little clearer:
2451 When not generating PIC code to store the value 12345 into _foo
2452 we would generate the following code:
2454 movel #12345, _foo
2456 When generating PIC two transformations are made. First, the compiler
2457 loads the address of foo into a register. So the first transformation makes:
2459 lea _foo, a0
2460 movel #12345, a0@
2462 The code in movsi will intercept the lea instruction and call this
2463 routine which will transform the instructions into:
2465 movel a5@(_foo:w), a0
2466 movel #12345, a0@
2469 That (in a nutshell) is how *all* symbol and label references are
2470 handled. */
2473 legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
2474 rtx reg)
2476 rtx pic_ref = orig;
2478 /* First handle a simple SYMBOL_REF or LABEL_REF */
2479 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2481 gcc_assert (reg);
2483 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2484 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2486 else if (GET_CODE (orig) == CONST)
2488 rtx base;
2490 /* Make sure this has not already been legitimized. */
2491 if (m68k_unwrap_symbol (orig, true) != orig)
2492 return orig;
2494 gcc_assert (reg);
2496 /* legitimize both operands of the PLUS */
2497 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2499 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2500 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2501 base == reg ? 0 : reg);
2503 if (GET_CODE (orig) == CONST_INT)
2504 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2505 else
2506 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2509 return pic_ref;
2512 /* The __tls_get_addr symbol. */
2513 static GTY(()) rtx m68k_tls_get_addr;
2515 /* Return SYMBOL_REF for __tls_get_addr. */
2517 static rtx
2518 m68k_get_tls_get_addr (void)
2520 if (m68k_tls_get_addr == NULL_RTX)
2521 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2523 return m68k_tls_get_addr;
2526 /* Return libcall result in A0 instead of usual D0. */
2527 static bool m68k_libcall_value_in_a0_p = false;
2529 /* Emit instruction sequence that calls __tls_get_addr. X is
2530 the TLS symbol we are referencing and RELOC is the symbol type to use
2531 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2532 emitted. A pseudo register with result of __tls_get_addr call is
2533 returned. */
2535 static rtx
2536 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2538 rtx a0;
2539 rtx_insn *insns;
2540 rtx dest;
2542 /* Emit the call sequence. */
2543 start_sequence ();
2545 /* FIXME: Unfortunately, emit_library_call_value does not
2546 consider (plus (%a5) (const (unspec))) to be a good enough
2547 operand for push, so it forces it into a register. The bad
2548 thing about this is that combiner, due to copy propagation and other
2549 optimizations, sometimes can not later fix this. As a consequence,
2550 additional register may be allocated resulting in a spill.
2551 For reference, see args processing loops in
2552 calls.c:emit_library_call_value_1.
2553 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2554 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2556 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2557 is the simpliest way of generating a call. The difference between
2558 __tls_get_addr() and libcall is that the result is returned in D0
2559 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2560 which temporarily switches returning the result to A0. */
2562 m68k_libcall_value_in_a0_p = true;
2563 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2564 Pmode, x, Pmode);
2565 m68k_libcall_value_in_a0_p = false;
2567 insns = get_insns ();
2568 end_sequence ();
2570 gcc_assert (can_create_pseudo_p ());
2571 dest = gen_reg_rtx (Pmode);
2572 emit_libcall_block (insns, dest, a0, eqv);
2574 return dest;
2577 /* The __tls_get_addr symbol. */
2578 static GTY(()) rtx m68k_read_tp;
2580 /* Return SYMBOL_REF for __m68k_read_tp. */
2582 static rtx
2583 m68k_get_m68k_read_tp (void)
2585 if (m68k_read_tp == NULL_RTX)
2586 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2588 return m68k_read_tp;
2591 /* Emit instruction sequence that calls __m68k_read_tp.
2592 A pseudo register with result of __m68k_read_tp call is returned. */
2594 static rtx
2595 m68k_call_m68k_read_tp (void)
2597 rtx a0;
2598 rtx eqv;
2599 rtx_insn *insns;
2600 rtx dest;
2602 start_sequence ();
2604 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2605 is the simpliest way of generating a call. The difference between
2606 __m68k_read_tp() and libcall is that the result is returned in D0
2607 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2608 which temporarily switches returning the result to A0. */
2610 /* Emit the call sequence. */
2611 m68k_libcall_value_in_a0_p = true;
2612 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2613 Pmode);
2614 m68k_libcall_value_in_a0_p = false;
2615 insns = get_insns ();
2616 end_sequence ();
2618 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2619 share the m68k_read_tp result with other IE/LE model accesses. */
2620 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2622 gcc_assert (can_create_pseudo_p ());
2623 dest = gen_reg_rtx (Pmode);
2624 emit_libcall_block (insns, dest, a0, eqv);
2626 return dest;
2629 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2630 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2631 ColdFire. */
2634 m68k_legitimize_tls_address (rtx orig)
2636 switch (SYMBOL_REF_TLS_MODEL (orig))
2638 case TLS_MODEL_GLOBAL_DYNAMIC:
2639 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2640 break;
2642 case TLS_MODEL_LOCAL_DYNAMIC:
2644 rtx eqv;
2645 rtx a0;
2646 rtx x;
2648 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2649 share the LDM result with other LD model accesses. */
2650 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2651 UNSPEC_RELOC32);
2653 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2655 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2657 if (can_create_pseudo_p ())
2658 x = m68k_move_to_reg (x, orig, NULL_RTX);
2660 orig = x;
2661 break;
2664 case TLS_MODEL_INITIAL_EXEC:
2666 rtx a0;
2667 rtx x;
2669 a0 = m68k_call_m68k_read_tp ();
2671 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2672 x = gen_rtx_PLUS (Pmode, x, a0);
2674 if (can_create_pseudo_p ())
2675 x = m68k_move_to_reg (x, orig, NULL_RTX);
2677 orig = x;
2678 break;
2681 case TLS_MODEL_LOCAL_EXEC:
2683 rtx a0;
2684 rtx x;
2686 a0 = m68k_call_m68k_read_tp ();
2688 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2690 if (can_create_pseudo_p ())
2691 x = m68k_move_to_reg (x, orig, NULL_RTX);
2693 orig = x;
2694 break;
2697 default:
2698 gcc_unreachable ();
2701 return orig;
2704 /* Return true if X is a TLS symbol. */
2706 static bool
2707 m68k_tls_symbol_p (rtx x)
2709 if (!TARGET_HAVE_TLS)
2710 return false;
2712 if (GET_CODE (x) != SYMBOL_REF)
2713 return false;
2715 return SYMBOL_REF_TLS_MODEL (x) != 0;
2718 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2719 though illegitimate one.
2720 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2722 bool
2723 m68k_tls_reference_p (rtx x, bool legitimate_p)
2725 if (!TARGET_HAVE_TLS)
2726 return false;
2728 if (!legitimate_p)
2730 subrtx_var_iterator::array_type array;
2731 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2733 rtx x = *iter;
2735 /* Note: this is not the same as m68k_tls_symbol_p. */
2736 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2737 return true;
2739 /* Don't recurse into legitimate TLS references. */
2740 if (m68k_tls_reference_p (x, true))
2741 iter.skip_subrtxes ();
2743 return false;
2745 else
2747 enum m68k_reloc reloc = RELOC_GOT;
2749 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2750 && TLS_RELOC_P (reloc));
2756 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2758 /* Return the type of move that should be used for integer I. */
2760 M68K_CONST_METHOD
2761 m68k_const_method (HOST_WIDE_INT i)
2763 unsigned u;
2765 if (USE_MOVQ (i))
2766 return MOVQ;
2768 /* The ColdFire doesn't have byte or word operations. */
2769 /* FIXME: This may not be useful for the m68060 either. */
2770 if (!TARGET_COLDFIRE)
2772 /* if -256 < N < 256 but N is not in range for a moveq
2773 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2774 if (USE_MOVQ (i ^ 0xff))
2775 return NOTB;
2776 /* Likewise, try with not.w */
2777 if (USE_MOVQ (i ^ 0xffff))
2778 return NOTW;
2779 /* This is the only value where neg.w is useful */
2780 if (i == -65408)
2781 return NEGW;
2784 /* Try also with swap. */
2785 u = i;
2786 if (USE_MOVQ ((u >> 16) | (u << 16)))
2787 return SWAP;
2789 if (TARGET_ISAB)
2791 /* Try using MVZ/MVS with an immediate value to load constants. */
2792 if (i >= 0 && i <= 65535)
2793 return MVZ;
2794 if (i >= -32768 && i <= 32767)
2795 return MVS;
2798 /* Otherwise, use move.l */
2799 return MOVL;
2802 /* Return the cost of moving constant I into a data register. */
2804 static int
2805 const_int_cost (HOST_WIDE_INT i)
2807 switch (m68k_const_method (i))
2809 case MOVQ:
2810 /* Constants between -128 and 127 are cheap due to moveq. */
2811 return 0;
2812 case MVZ:
2813 case MVS:
2814 case NOTB:
2815 case NOTW:
2816 case NEGW:
2817 case SWAP:
2818 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2819 return 1;
2820 case MOVL:
2821 return 2;
2822 default:
2823 gcc_unreachable ();
2827 static bool
2828 m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
2829 int opno ATTRIBUTE_UNUSED,
2830 int *total, bool speed ATTRIBUTE_UNUSED)
2832 int code = GET_CODE (x);
2834 switch (code)
2836 case CONST_INT:
2837 /* Constant zero is super cheap due to clr instruction. */
2838 if (x == const0_rtx)
2839 *total = 0;
2840 else
2841 *total = const_int_cost (INTVAL (x));
2842 return true;
2844 case CONST:
2845 case LABEL_REF:
2846 case SYMBOL_REF:
2847 *total = 3;
2848 return true;
2850 case CONST_DOUBLE:
2851 /* Make 0.0 cheaper than other floating constants to
2852 encourage creating tstsf and tstdf insns. */
2853 if (outer_code == COMPARE
2854 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2855 *total = 4;
2856 else
2857 *total = 5;
2858 return true;
2860 /* These are vaguely right for a 68020. */
2861 /* The costs for long multiply have been adjusted to work properly
2862 in synth_mult on the 68020, relative to an average of the time
2863 for add and the time for shift, taking away a little more because
2864 sometimes move insns are needed. */
2865 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2866 terms. */
2867 #define MULL_COST \
2868 (TUNE_68060 ? 2 \
2869 : TUNE_68040 ? 5 \
2870 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2871 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2872 : TUNE_CFV2 ? 8 \
2873 : TARGET_COLDFIRE ? 3 : 13)
2875 #define MULW_COST \
2876 (TUNE_68060 ? 2 \
2877 : TUNE_68040 ? 3 \
2878 : TUNE_68000_10 ? 5 \
2879 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2880 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2881 : TUNE_CFV2 ? 8 \
2882 : TARGET_COLDFIRE ? 2 : 8)
2884 #define DIVW_COST \
2885 (TARGET_CF_HWDIV ? 11 \
2886 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2888 case PLUS:
2889 /* An lea costs about three times as much as a simple add. */
2890 if (mode == SImode
2891 && GET_CODE (XEXP (x, 1)) == REG
2892 && GET_CODE (XEXP (x, 0)) == MULT
2893 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2894 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2895 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2896 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2897 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2899 /* lea an@(dx:l:i),am */
2900 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2901 return true;
2903 return false;
2905 case ASHIFT:
2906 case ASHIFTRT:
2907 case LSHIFTRT:
2908 if (TUNE_68060)
2910 *total = COSTS_N_INSNS(1);
2911 return true;
2913 if (TUNE_68000_10)
2915 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2917 if (INTVAL (XEXP (x, 1)) < 16)
2918 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2919 else
2920 /* We're using clrw + swap for these cases. */
2921 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2923 else
2924 *total = COSTS_N_INSNS (10); /* Worst case. */
2925 return true;
2927 /* A shift by a big integer takes an extra instruction. */
2928 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2929 && (INTVAL (XEXP (x, 1)) == 16))
2931 *total = COSTS_N_INSNS (2); /* clrw;swap */
2932 return true;
2934 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2935 && !(INTVAL (XEXP (x, 1)) > 0
2936 && INTVAL (XEXP (x, 1)) <= 8))
2938 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2939 return true;
2941 return false;
2943 case MULT:
2944 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2945 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2946 && mode == SImode)
2947 *total = COSTS_N_INSNS (MULW_COST);
2948 else if (mode == QImode || mode == HImode)
2949 *total = COSTS_N_INSNS (MULW_COST);
2950 else
2951 *total = COSTS_N_INSNS (MULL_COST);
2952 return true;
2954 case DIV:
2955 case UDIV:
2956 case MOD:
2957 case UMOD:
2958 if (mode == QImode || mode == HImode)
2959 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2960 else if (TARGET_CF_HWDIV)
2961 *total = COSTS_N_INSNS (18);
2962 else
2963 *total = COSTS_N_INSNS (43); /* div.l */
2964 return true;
2966 case ZERO_EXTRACT:
2967 if (outer_code == COMPARE)
2968 *total = 0;
2969 return false;
2971 default:
2972 return false;
2976 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2977 OPERANDS[0]. */
2979 static const char *
2980 output_move_const_into_data_reg (rtx *operands)
2982 HOST_WIDE_INT i;
2984 i = INTVAL (operands[1]);
2985 switch (m68k_const_method (i))
2987 case MVZ:
2988 return "mvzw %1,%0";
2989 case MVS:
2990 return "mvsw %1,%0";
2991 case MOVQ:
2992 return "moveq %1,%0";
2993 case NOTB:
2994 CC_STATUS_INIT;
2995 operands[1] = GEN_INT (i ^ 0xff);
2996 return "moveq %1,%0\n\tnot%.b %0";
2997 case NOTW:
2998 CC_STATUS_INIT;
2999 operands[1] = GEN_INT (i ^ 0xffff);
3000 return "moveq %1,%0\n\tnot%.w %0";
3001 case NEGW:
3002 CC_STATUS_INIT;
3003 return "moveq #-128,%0\n\tneg%.w %0";
3004 case SWAP:
3006 unsigned u = i;
3008 operands[1] = GEN_INT ((u << 16) | (u >> 16));
3009 return "moveq %1,%0\n\tswap %0";
3011 case MOVL:
3012 return "move%.l %1,%0";
3013 default:
3014 gcc_unreachable ();
3018 /* Return true if I can be handled by ISA B's mov3q instruction. */
3020 bool
3021 valid_mov3q_const (HOST_WIDE_INT i)
3023 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3026 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3027 I is the value of OPERANDS[1]. */
3029 static const char *
3030 output_move_simode_const (rtx *operands)
3032 rtx dest;
3033 HOST_WIDE_INT src;
3035 dest = operands[0];
3036 src = INTVAL (operands[1]);
3037 if (src == 0
3038 && (DATA_REG_P (dest) || MEM_P (dest))
3039 /* clr insns on 68000 read before writing. */
3040 && ((TARGET_68010 || TARGET_COLDFIRE)
3041 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3042 return "clr%.l %0";
3043 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3044 return "mov3q%.l %1,%0";
3045 else if (src == 0 && ADDRESS_REG_P (dest))
3046 return "sub%.l %0,%0";
3047 else if (DATA_REG_P (dest))
3048 return output_move_const_into_data_reg (operands);
3049 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3051 if (valid_mov3q_const (src))
3052 return "mov3q%.l %1,%0";
3053 return "move%.w %1,%0";
3055 else if (MEM_P (dest)
3056 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3057 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3058 && IN_RANGE (src, -0x8000, 0x7fff))
3060 if (valid_mov3q_const (src))
3061 return "mov3q%.l %1,%-";
3062 return "pea %a1";
3064 return "move%.l %1,%0";
3067 const char *
3068 output_move_simode (rtx *operands)
3070 if (GET_CODE (operands[1]) == CONST_INT)
3071 return output_move_simode_const (operands);
3072 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3073 || GET_CODE (operands[1]) == CONST)
3074 && push_operand (operands[0], SImode))
3075 return "pea %a1";
3076 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3077 || GET_CODE (operands[1]) == CONST)
3078 && ADDRESS_REG_P (operands[0]))
3079 return "lea %a1,%0";
3080 return "move%.l %1,%0";
3083 const char *
3084 output_move_himode (rtx *operands)
3086 if (GET_CODE (operands[1]) == CONST_INT)
3088 if (operands[1] == const0_rtx
3089 && (DATA_REG_P (operands[0])
3090 || GET_CODE (operands[0]) == MEM)
3091 /* clr insns on 68000 read before writing. */
3092 && ((TARGET_68010 || TARGET_COLDFIRE)
3093 || !(GET_CODE (operands[0]) == MEM
3094 && MEM_VOLATILE_P (operands[0]))))
3095 return "clr%.w %0";
3096 else if (operands[1] == const0_rtx
3097 && ADDRESS_REG_P (operands[0]))
3098 return "sub%.l %0,%0";
3099 else if (DATA_REG_P (operands[0])
3100 && INTVAL (operands[1]) < 128
3101 && INTVAL (operands[1]) >= -128)
3102 return "moveq %1,%0";
3103 else if (INTVAL (operands[1]) < 0x8000
3104 && INTVAL (operands[1]) >= -0x8000)
3105 return "move%.w %1,%0";
3107 else if (CONSTANT_P (operands[1]))
3108 return "move%.l %1,%0";
3109 return "move%.w %1,%0";
3112 const char *
3113 output_move_qimode (rtx *operands)
3115 /* 68k family always modifies the stack pointer by at least 2, even for
3116 byte pushes. The 5200 (ColdFire) does not do this. */
3118 /* This case is generated by pushqi1 pattern now. */
3119 gcc_assert (!(GET_CODE (operands[0]) == MEM
3120 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3121 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3122 && ! ADDRESS_REG_P (operands[1])
3123 && ! TARGET_COLDFIRE));
3125 /* clr and st insns on 68000 read before writing. */
3126 if (!ADDRESS_REG_P (operands[0])
3127 && ((TARGET_68010 || TARGET_COLDFIRE)
3128 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3130 if (operands[1] == const0_rtx)
3131 return "clr%.b %0";
3132 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3133 && GET_CODE (operands[1]) == CONST_INT
3134 && (INTVAL (operands[1]) & 255) == 255)
3136 CC_STATUS_INIT;
3137 return "st %0";
3140 if (GET_CODE (operands[1]) == CONST_INT
3141 && DATA_REG_P (operands[0])
3142 && INTVAL (operands[1]) < 128
3143 && INTVAL (operands[1]) >= -128)
3144 return "moveq %1,%0";
3145 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3146 return "sub%.l %0,%0";
3147 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3148 return "move%.l %1,%0";
3149 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3150 from address registers. */
3151 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3152 return "move%.w %1,%0";
3153 return "move%.b %1,%0";
3156 const char *
3157 output_move_stricthi (rtx *operands)
3159 if (operands[1] == const0_rtx
3160 /* clr insns on 68000 read before writing. */
3161 && ((TARGET_68010 || TARGET_COLDFIRE)
3162 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3163 return "clr%.w %0";
3164 return "move%.w %1,%0";
3167 const char *
3168 output_move_strictqi (rtx *operands)
3170 if (operands[1] == const0_rtx
3171 /* clr insns on 68000 read before writing. */
3172 && ((TARGET_68010 || TARGET_COLDFIRE)
3173 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3174 return "clr%.b %0";
3175 return "move%.b %1,%0";
3178 /* Return the best assembler insn template
3179 for moving operands[1] into operands[0] as a fullword. */
3181 static const char *
3182 singlemove_string (rtx *operands)
3184 if (GET_CODE (operands[1]) == CONST_INT)
3185 return output_move_simode_const (operands);
3186 return "move%.l %1,%0";
3190 /* Output assembler or rtl code to perform a doubleword move insn
3191 with operands OPERANDS.
3192 Pointers to 3 helper functions should be specified:
3193 HANDLE_REG_ADJUST to adjust a register by a small value,
3194 HANDLE_COMPADR to compute an address and
3195 HANDLE_MOVSI to move 4 bytes. */
3197 static void
3198 handle_move_double (rtx operands[2],
3199 void (*handle_reg_adjust) (rtx, int),
3200 void (*handle_compadr) (rtx [2]),
3201 void (*handle_movsi) (rtx [2]))
3203 enum
3205 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3206 } optype0, optype1;
3207 rtx latehalf[2];
3208 rtx middlehalf[2];
3209 rtx xops[2];
3210 rtx addreg0 = 0, addreg1 = 0;
3211 int dest_overlapped_low = 0;
3212 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3214 middlehalf[0] = 0;
3215 middlehalf[1] = 0;
3217 /* First classify both operands. */
3219 if (REG_P (operands[0]))
3220 optype0 = REGOP;
3221 else if (offsettable_memref_p (operands[0]))
3222 optype0 = OFFSOP;
3223 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3224 optype0 = POPOP;
3225 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3226 optype0 = PUSHOP;
3227 else if (GET_CODE (operands[0]) == MEM)
3228 optype0 = MEMOP;
3229 else
3230 optype0 = RNDOP;
3232 if (REG_P (operands[1]))
3233 optype1 = REGOP;
3234 else if (CONSTANT_P (operands[1]))
3235 optype1 = CNSTOP;
3236 else if (offsettable_memref_p (operands[1]))
3237 optype1 = OFFSOP;
3238 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3239 optype1 = POPOP;
3240 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3241 optype1 = PUSHOP;
3242 else if (GET_CODE (operands[1]) == MEM)
3243 optype1 = MEMOP;
3244 else
3245 optype1 = RNDOP;
3247 /* Check for the cases that the operand constraints are not supposed
3248 to allow to happen. Generating code for these cases is
3249 painful. */
3250 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3252 /* If one operand is decrementing and one is incrementing
3253 decrement the former register explicitly
3254 and change that operand into ordinary indexing. */
3256 if (optype0 == PUSHOP && optype1 == POPOP)
3258 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3260 handle_reg_adjust (operands[0], -size);
3262 if (GET_MODE (operands[1]) == XFmode)
3263 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3264 else if (GET_MODE (operands[0]) == DFmode)
3265 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3266 else
3267 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3268 optype0 = OFFSOP;
3270 if (optype0 == POPOP && optype1 == PUSHOP)
3272 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3274 handle_reg_adjust (operands[1], -size);
3276 if (GET_MODE (operands[1]) == XFmode)
3277 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3278 else if (GET_MODE (operands[1]) == DFmode)
3279 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3280 else
3281 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3282 optype1 = OFFSOP;
3285 /* If an operand is an unoffsettable memory ref, find a register
3286 we can increment temporarily to make it refer to the second word. */
3288 if (optype0 == MEMOP)
3289 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3291 if (optype1 == MEMOP)
3292 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3294 /* Ok, we can do one word at a time.
3295 Normally we do the low-numbered word first,
3296 but if either operand is autodecrementing then we
3297 do the high-numbered word first.
3299 In either case, set up in LATEHALF the operands to use
3300 for the high-numbered word and in some cases alter the
3301 operands in OPERANDS to be suitable for the low-numbered word. */
3303 if (size == 12)
3305 if (optype0 == REGOP)
3307 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3308 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3310 else if (optype0 == OFFSOP)
3312 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3313 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3315 else
3317 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3318 latehalf[0] = adjust_address (operands[0], SImode, 0);
3321 if (optype1 == REGOP)
3323 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3324 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3326 else if (optype1 == OFFSOP)
3328 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3329 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3331 else if (optype1 == CNSTOP)
3333 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3335 long l[3];
3337 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3338 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
3339 operands[1] = GEN_INT (l[0]);
3340 middlehalf[1] = GEN_INT (l[1]);
3341 latehalf[1] = GEN_INT (l[2]);
3343 else
3345 /* No non-CONST_DOUBLE constant should ever appear
3346 here. */
3347 gcc_assert (!CONSTANT_P (operands[1]));
3350 else
3352 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3353 latehalf[1] = adjust_address (operands[1], SImode, 0);
3356 else
3357 /* size is not 12: */
3359 if (optype0 == REGOP)
3360 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3361 else if (optype0 == OFFSOP)
3362 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3363 else
3364 latehalf[0] = adjust_address (operands[0], SImode, 0);
3366 if (optype1 == REGOP)
3367 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3368 else if (optype1 == OFFSOP)
3369 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3370 else if (optype1 == CNSTOP)
3371 split_double (operands[1], &operands[1], &latehalf[1]);
3372 else
3373 latehalf[1] = adjust_address (operands[1], SImode, 0);
3376 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3377 word first. We should use the adjusted operand 1 (which is N+4(REG))
3378 for the low word as well, to compensate for the first decrement of
3379 REG. */
3380 if (optype0 == PUSHOP
3381 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3382 operands[1] = middlehalf[1] = latehalf[1];
3384 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3385 if the upper part of reg N does not appear in the MEM, arrange to
3386 emit the move late-half first. Otherwise, compute the MEM address
3387 into the upper part of N and use that as a pointer to the memory
3388 operand. */
3389 if (optype0 == REGOP
3390 && (optype1 == OFFSOP || optype1 == MEMOP))
3392 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3394 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3395 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3397 /* If both halves of dest are used in the src memory address,
3398 compute the address into latehalf of dest.
3399 Note that this can't happen if the dest is two data regs. */
3400 compadr:
3401 xops[0] = latehalf[0];
3402 xops[1] = XEXP (operands[1], 0);
3404 handle_compadr (xops);
3405 if (GET_MODE (operands[1]) == XFmode)
3407 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3408 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3409 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3411 else
3413 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3414 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3417 else if (size == 12
3418 && reg_overlap_mentioned_p (middlehalf[0],
3419 XEXP (operands[1], 0)))
3421 /* Check for two regs used by both source and dest.
3422 Note that this can't happen if the dest is all data regs.
3423 It can happen if the dest is d6, d7, a0.
3424 But in that case, latehalf is an addr reg, so
3425 the code at compadr does ok. */
3427 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3428 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3429 goto compadr;
3431 /* JRV says this can't happen: */
3432 gcc_assert (!addreg0 && !addreg1);
3434 /* Only the middle reg conflicts; simply put it last. */
3435 handle_movsi (operands);
3436 handle_movsi (latehalf);
3437 handle_movsi (middlehalf);
3439 return;
3441 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3442 /* If the low half of dest is mentioned in the source memory
3443 address, the arrange to emit the move late half first. */
3444 dest_overlapped_low = 1;
3447 /* If one or both operands autodecrementing,
3448 do the two words, high-numbered first. */
3450 /* Likewise, the first move would clobber the source of the second one,
3451 do them in the other order. This happens only for registers;
3452 such overlap can't happen in memory unless the user explicitly
3453 sets it up, and that is an undefined circumstance. */
3455 if (optype0 == PUSHOP || optype1 == PUSHOP
3456 || (optype0 == REGOP && optype1 == REGOP
3457 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3458 || REGNO (operands[0]) == REGNO (latehalf[1])))
3459 || dest_overlapped_low)
3461 /* Make any unoffsettable addresses point at high-numbered word. */
3462 if (addreg0)
3463 handle_reg_adjust (addreg0, size - 4);
3464 if (addreg1)
3465 handle_reg_adjust (addreg1, size - 4);
3467 /* Do that word. */
3468 handle_movsi (latehalf);
3470 /* Undo the adds we just did. */
3471 if (addreg0)
3472 handle_reg_adjust (addreg0, -4);
3473 if (addreg1)
3474 handle_reg_adjust (addreg1, -4);
3476 if (size == 12)
3478 handle_movsi (middlehalf);
3480 if (addreg0)
3481 handle_reg_adjust (addreg0, -4);
3482 if (addreg1)
3483 handle_reg_adjust (addreg1, -4);
3486 /* Do low-numbered word. */
3488 handle_movsi (operands);
3489 return;
3492 /* Normal case: do the two words, low-numbered first. */
3494 m68k_final_prescan_insn (NULL, operands, 2);
3495 handle_movsi (operands);
3497 /* Do the middle one of the three words for long double */
3498 if (size == 12)
3500 if (addreg0)
3501 handle_reg_adjust (addreg0, 4);
3502 if (addreg1)
3503 handle_reg_adjust (addreg1, 4);
3505 m68k_final_prescan_insn (NULL, middlehalf, 2);
3506 handle_movsi (middlehalf);
3509 /* Make any unoffsettable addresses point at high-numbered word. */
3510 if (addreg0)
3511 handle_reg_adjust (addreg0, 4);
3512 if (addreg1)
3513 handle_reg_adjust (addreg1, 4);
3515 /* Do that word. */
3516 m68k_final_prescan_insn (NULL, latehalf, 2);
3517 handle_movsi (latehalf);
3519 /* Undo the adds we just did. */
3520 if (addreg0)
3521 handle_reg_adjust (addreg0, -(size - 4));
3522 if (addreg1)
3523 handle_reg_adjust (addreg1, -(size - 4));
3525 return;
3528 /* Output assembler code to adjust REG by N. */
3529 static void
3530 output_reg_adjust (rtx reg, int n)
3532 const char *s;
3534 gcc_assert (GET_MODE (reg) == SImode && n >= -12 && n != 0 && n <= 12);
3536 switch (n)
3538 case 12:
3539 s = "add%.l #12,%0";
3540 break;
3542 case 8:
3543 s = "addq%.l #8,%0";
3544 break;
3546 case 4:
3547 s = "addq%.l #4,%0";
3548 break;
3550 case -12:
3551 s = "sub%.l #12,%0";
3552 break;
3554 case -8:
3555 s = "subq%.l #8,%0";
3556 break;
3558 case -4:
3559 s = "subq%.l #4,%0";
3560 break;
3562 default:
3563 gcc_unreachable ();
3564 s = NULL;
3567 output_asm_insn (s, &reg);
3570 /* Emit rtl code to adjust REG by N. */
3571 static void
3572 emit_reg_adjust (rtx reg1, int n)
3574 rtx reg2;
3576 gcc_assert (GET_MODE (reg1) == SImode && n >= -12 && n != 0 && n <= 12);
3578 reg1 = copy_rtx (reg1);
3579 reg2 = copy_rtx (reg1);
3581 if (n < 0)
3582 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3583 else if (n > 0)
3584 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3585 else
3586 gcc_unreachable ();
3589 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3590 static void
3591 output_compadr (rtx operands[2])
3593 output_asm_insn ("lea %a1,%0", operands);
3596 /* Output the best assembler insn for moving operands[1] into operands[0]
3597 as a fullword. */
3598 static void
3599 output_movsi (rtx operands[2])
3601 output_asm_insn (singlemove_string (operands), operands);
3604 /* Copy OP and change its mode to MODE. */
3605 static rtx
3606 copy_operand (rtx op, machine_mode mode)
3608 /* ??? This looks really ugly. There must be a better way
3609 to change a mode on the operand. */
3610 if (GET_MODE (op) != VOIDmode)
3612 if (REG_P (op))
3613 op = gen_rtx_REG (mode, REGNO (op));
3614 else
3616 op = copy_rtx (op);
3617 PUT_MODE (op, mode);
3621 return op;
3624 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3625 static void
3626 emit_movsi (rtx operands[2])
3628 operands[0] = copy_operand (operands[0], SImode);
3629 operands[1] = copy_operand (operands[1], SImode);
3631 emit_insn (gen_movsi (operands[0], operands[1]));
3634 /* Output assembler code to perform a doubleword move insn
3635 with operands OPERANDS. */
3636 const char *
3637 output_move_double (rtx *operands)
3639 handle_move_double (operands,
3640 output_reg_adjust, output_compadr, output_movsi);
3642 return "";
3645 /* Output rtl code to perform a doubleword move insn
3646 with operands OPERANDS. */
3647 void
3648 m68k_emit_move_double (rtx operands[2])
3650 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3653 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3654 new rtx with the correct mode. */
3656 static rtx
3657 force_mode (machine_mode mode, rtx orig)
3659 if (mode == GET_MODE (orig))
3660 return orig;
3662 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3663 abort ();
3665 return gen_rtx_REG (mode, REGNO (orig));
3668 static int
3669 fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
3671 return reg_renumber && FP_REG_P (op);
3674 /* Emit insns to move operands[1] into operands[0].
3676 Return 1 if we have written out everything that needs to be done to
3677 do the move. Otherwise, return 0 and the caller will emit the move
3678 normally.
3680 Note SCRATCH_REG may not be in the proper mode depending on how it
3681 will be used. This routine is responsible for creating a new copy
3682 of SCRATCH_REG in the proper mode. */
3685 emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
3687 register rtx operand0 = operands[0];
3688 register rtx operand1 = operands[1];
3689 register rtx tem;
3691 if (scratch_reg
3692 && reload_in_progress && GET_CODE (operand0) == REG
3693 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3694 operand0 = reg_equiv_mem (REGNO (operand0));
3695 else if (scratch_reg
3696 && reload_in_progress && GET_CODE (operand0) == SUBREG
3697 && GET_CODE (SUBREG_REG (operand0)) == REG
3698 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3700 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3701 the code which tracks sets/uses for delete_output_reload. */
3702 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3703 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3704 SUBREG_BYTE (operand0));
3705 operand0 = alter_subreg (&temp, true);
3708 if (scratch_reg
3709 && reload_in_progress && GET_CODE (operand1) == REG
3710 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3711 operand1 = reg_equiv_mem (REGNO (operand1));
3712 else if (scratch_reg
3713 && reload_in_progress && GET_CODE (operand1) == SUBREG
3714 && GET_CODE (SUBREG_REG (operand1)) == REG
3715 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3717 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3718 the code which tracks sets/uses for delete_output_reload. */
3719 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3720 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3721 SUBREG_BYTE (operand1));
3722 operand1 = alter_subreg (&temp, true);
3725 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3726 && ((tem = find_replacement (&XEXP (operand0, 0)))
3727 != XEXP (operand0, 0)))
3728 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3729 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3730 && ((tem = find_replacement (&XEXP (operand1, 0)))
3731 != XEXP (operand1, 0)))
3732 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3734 /* Handle secondary reloads for loads/stores of FP registers where
3735 the address is symbolic by using the scratch register */
3736 if (fp_reg_operand (operand0, mode)
3737 && ((GET_CODE (operand1) == MEM
3738 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3739 || ((GET_CODE (operand1) == SUBREG
3740 && GET_CODE (XEXP (operand1, 0)) == MEM
3741 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3742 && scratch_reg)
3744 if (GET_CODE (operand1) == SUBREG)
3745 operand1 = XEXP (operand1, 0);
3747 /* SCRATCH_REG will hold an address. We want
3748 it in SImode regardless of what mode it was originally given
3749 to us. */
3750 scratch_reg = force_mode (SImode, scratch_reg);
3752 /* D might not fit in 14 bits either; for such cases load D into
3753 scratch reg. */
3754 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3756 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3757 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3758 Pmode,
3759 XEXP (XEXP (operand1, 0), 0),
3760 scratch_reg));
3762 else
3763 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3764 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3765 return 1;
3767 else if (fp_reg_operand (operand1, mode)
3768 && ((GET_CODE (operand0) == MEM
3769 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3770 || ((GET_CODE (operand0) == SUBREG)
3771 && GET_CODE (XEXP (operand0, 0)) == MEM
3772 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3773 && scratch_reg)
3775 if (GET_CODE (operand0) == SUBREG)
3776 operand0 = XEXP (operand0, 0);
3778 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3779 it in SIMODE regardless of what mode it was originally given
3780 to us. */
3781 scratch_reg = force_mode (SImode, scratch_reg);
3783 /* D might not fit in 14 bits either; for such cases load D into
3784 scratch reg. */
3785 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3787 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3788 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3789 0)),
3790 Pmode,
3791 XEXP (XEXP (operand0, 0),
3793 scratch_reg));
3795 else
3796 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3797 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
3798 return 1;
3800 /* Handle secondary reloads for loads of FP registers from constant
3801 expressions by forcing the constant into memory.
3803 use scratch_reg to hold the address of the memory location.
3805 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3806 NO_REGS when presented with a const_int and an register class
3807 containing only FP registers. Doing so unfortunately creates
3808 more problems than it solves. Fix this for 2.5. */
3809 else if (fp_reg_operand (operand0, mode)
3810 && CONSTANT_P (operand1)
3811 && scratch_reg)
3813 rtx xoperands[2];
3815 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3816 it in SIMODE regardless of what mode it was originally given
3817 to us. */
3818 scratch_reg = force_mode (SImode, scratch_reg);
3820 /* Force the constant into memory and put the address of the
3821 memory location into scratch_reg. */
3822 xoperands[0] = scratch_reg;
3823 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3824 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
3826 /* Now load the destination register. */
3827 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3828 return 1;
3831 /* Now have insn-emit do whatever it normally does. */
3832 return 0;
3835 /* Split one or more DImode RTL references into pairs of SImode
3836 references. The RTL can be REG, offsettable MEM, integer constant, or
3837 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3838 split and "num" is its length. lo_half and hi_half are output arrays
3839 that parallel "operands". */
3841 void
3842 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3844 while (num--)
3846 rtx op = operands[num];
3848 /* simplify_subreg refuses to split volatile memory addresses,
3849 but we still have to handle it. */
3850 if (GET_CODE (op) == MEM)
3852 lo_half[num] = adjust_address (op, SImode, 4);
3853 hi_half[num] = adjust_address (op, SImode, 0);
3855 else
3857 lo_half[num] = simplify_gen_subreg (SImode, op,
3858 GET_MODE (op) == VOIDmode
3859 ? DImode : GET_MODE (op), 4);
3860 hi_half[num] = simplify_gen_subreg (SImode, op,
3861 GET_MODE (op) == VOIDmode
3862 ? DImode : GET_MODE (op), 0);
3867 /* Split X into a base and a constant offset, storing them in *BASE
3868 and *OFFSET respectively. */
3870 static void
3871 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3873 *offset = 0;
3874 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3876 *offset += INTVAL (XEXP (x, 1));
3877 x = XEXP (x, 0);
3879 *base = x;
3882 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3883 instruction. STORE_P says whether the move is a load or store.
3885 If the instruction uses post-increment or pre-decrement addressing,
3886 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3887 adjustment. This adjustment will be made by the first element of
3888 PARALLEL, with the loads or stores starting at element 1. If the
3889 instruction does not use post-increment or pre-decrement addressing,
3890 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3891 start at element 0. */
3893 bool
3894 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3895 HOST_WIDE_INT automod_offset, bool store_p)
3897 rtx base, mem_base, set, mem, reg, last_reg;
3898 HOST_WIDE_INT offset, mem_offset;
3899 int i, first, len;
3900 enum reg_class rclass;
3902 len = XVECLEN (pattern, 0);
3903 first = (automod_base != NULL);
3905 if (automod_base)
3907 /* Stores must be pre-decrement and loads must be post-increment. */
3908 if (store_p != (automod_offset < 0))
3909 return false;
3911 /* Work out the base and offset for lowest memory location. */
3912 base = automod_base;
3913 offset = (automod_offset < 0 ? automod_offset : 0);
3915 else
3917 /* Allow any valid base and offset in the first access. */
3918 base = NULL;
3919 offset = 0;
3922 last_reg = NULL;
3923 rclass = NO_REGS;
3924 for (i = first; i < len; i++)
3926 /* We need a plain SET. */
3927 set = XVECEXP (pattern, 0, i);
3928 if (GET_CODE (set) != SET)
3929 return false;
3931 /* Check that we have a memory location... */
3932 mem = XEXP (set, !store_p);
3933 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3934 return false;
3936 /* ...with the right address. */
3937 if (base == NULL)
3939 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3940 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3941 There are no mode restrictions for 680x0 besides the
3942 automodification rules enforced above. */
3943 if (TARGET_COLDFIRE
3944 && !m68k_legitimate_base_reg_p (base, reload_completed))
3945 return false;
3947 else
3949 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3950 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3951 return false;
3954 /* Check that we have a register of the required mode and class. */
3955 reg = XEXP (set, store_p);
3956 if (!REG_P (reg)
3957 || !HARD_REGISTER_P (reg)
3958 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3959 return false;
3961 if (last_reg)
3963 /* The register must belong to RCLASS and have a higher number
3964 than the register in the previous SET. */
3965 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3966 || REGNO (last_reg) >= REGNO (reg))
3967 return false;
3969 else
3971 /* Work out which register class we need. */
3972 if (INT_REGNO_P (REGNO (reg)))
3973 rclass = GENERAL_REGS;
3974 else if (FP_REGNO_P (REGNO (reg)))
3975 rclass = FP_REGS;
3976 else
3977 return false;
3980 last_reg = reg;
3981 offset += GET_MODE_SIZE (GET_MODE (reg));
3984 /* If we have an automodification, check whether the final offset is OK. */
3985 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3986 return false;
3988 /* Reject unprofitable cases. */
3989 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3990 return false;
3992 return true;
3995 /* Return the assembly code template for a movem or fmovem instruction
3996 whose pattern is given by PATTERN. Store the template's operands
3997 in OPERANDS.
3999 If the instruction uses post-increment or pre-decrement addressing,
4000 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
4001 is true if this is a store instruction. */
4003 const char *
4004 m68k_output_movem (rtx *operands, rtx pattern,
4005 HOST_WIDE_INT automod_offset, bool store_p)
4007 unsigned int mask;
4008 int i, first;
4010 gcc_assert (GET_CODE (pattern) == PARALLEL);
4011 mask = 0;
4012 first = (automod_offset != 0);
4013 for (i = first; i < XVECLEN (pattern, 0); i++)
4015 /* When using movem with pre-decrement addressing, register X + D0_REG
4016 is controlled by bit 15 - X. For all other addressing modes,
4017 register X + D0_REG is controlled by bit X. Confusingly, the
4018 register mask for fmovem is in the opposite order to that for
4019 movem. */
4020 unsigned int regno;
4022 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4023 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4024 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4025 if (automod_offset < 0)
4027 if (FP_REGNO_P (regno))
4028 mask |= 1 << (regno - FP0_REG);
4029 else
4030 mask |= 1 << (15 - (regno - D0_REG));
4032 else
4034 if (FP_REGNO_P (regno))
4035 mask |= 1 << (7 - (regno - FP0_REG));
4036 else
4037 mask |= 1 << (regno - D0_REG);
4040 CC_STATUS_INIT;
4042 if (automod_offset == 0)
4043 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4044 else if (automod_offset < 0)
4045 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4046 else
4047 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4048 operands[1] = GEN_INT (mask);
4049 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4051 if (store_p)
4052 return "fmovem %1,%a0";
4053 else
4054 return "fmovem %a0,%1";
4056 else
4058 if (store_p)
4059 return "movem%.l %1,%a0";
4060 else
4061 return "movem%.l %a0,%1";
4065 /* Return a REG that occurs in ADDR with coefficient 1.
4066 ADDR can be effectively incremented by incrementing REG. */
4068 static rtx
4069 find_addr_reg (rtx addr)
4071 while (GET_CODE (addr) == PLUS)
4073 if (GET_CODE (XEXP (addr, 0)) == REG)
4074 addr = XEXP (addr, 0);
4075 else if (GET_CODE (XEXP (addr, 1)) == REG)
4076 addr = XEXP (addr, 1);
4077 else if (CONSTANT_P (XEXP (addr, 0)))
4078 addr = XEXP (addr, 1);
4079 else if (CONSTANT_P (XEXP (addr, 1)))
4080 addr = XEXP (addr, 0);
4081 else
4082 gcc_unreachable ();
4084 gcc_assert (GET_CODE (addr) == REG);
4085 return addr;
4088 /* Output assembler code to perform a 32-bit 3-operand add. */
4090 const char *
4091 output_addsi3 (rtx *operands)
4093 if (! operands_match_p (operands[0], operands[1]))
4095 if (!ADDRESS_REG_P (operands[1]))
4097 rtx tmp = operands[1];
4099 operands[1] = operands[2];
4100 operands[2] = tmp;
4103 /* These insns can result from reloads to access
4104 stack slots over 64k from the frame pointer. */
4105 if (GET_CODE (operands[2]) == CONST_INT
4106 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4107 return "move%.l %2,%0\n\tadd%.l %1,%0";
4108 if (GET_CODE (operands[2]) == REG)
4109 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4110 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4112 if (GET_CODE (operands[2]) == CONST_INT)
4114 if (INTVAL (operands[2]) > 0
4115 && INTVAL (operands[2]) <= 8)
4116 return "addq%.l %2,%0";
4117 if (INTVAL (operands[2]) < 0
4118 && INTVAL (operands[2]) >= -8)
4120 operands[2] = GEN_INT (- INTVAL (operands[2]));
4121 return "subq%.l %2,%0";
4123 /* On the CPU32 it is faster to use two addql instructions to
4124 add a small integer (8 < N <= 16) to a register.
4125 Likewise for subql. */
4126 if (TUNE_CPU32 && REG_P (operands[0]))
4128 if (INTVAL (operands[2]) > 8
4129 && INTVAL (operands[2]) <= 16)
4131 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4132 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4134 if (INTVAL (operands[2]) < -8
4135 && INTVAL (operands[2]) >= -16)
4137 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4138 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4141 if (ADDRESS_REG_P (operands[0])
4142 && INTVAL (operands[2]) >= -0x8000
4143 && INTVAL (operands[2]) < 0x8000)
4145 if (TUNE_68040)
4146 return "add%.w %2,%0";
4147 else
4148 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4151 return "add%.l %2,%0";
4154 /* Store in cc_status the expressions that the condition codes will
4155 describe after execution of an instruction whose pattern is EXP.
4156 Do not alter them if the instruction would not alter the cc's. */
4158 /* On the 68000, all the insns to store in an address register fail to
4159 set the cc's. However, in some cases these instructions can make it
4160 possibly invalid to use the saved cc's. In those cases we clear out
4161 some or all of the saved cc's so they won't be used. */
4163 void
4164 notice_update_cc (rtx exp, rtx insn)
4166 if (GET_CODE (exp) == SET)
4168 if (GET_CODE (SET_SRC (exp)) == CALL)
4169 CC_STATUS_INIT;
4170 else if (ADDRESS_REG_P (SET_DEST (exp)))
4172 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4173 cc_status.value1 = 0;
4174 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4175 cc_status.value2 = 0;
4177 /* fmoves to memory or data registers do not set the condition
4178 codes. Normal moves _do_ set the condition codes, but not in
4179 a way that is appropriate for comparison with 0, because -0.0
4180 would be treated as a negative nonzero number. Note that it
4181 isn't appropriate to conditionalize this restriction on
4182 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4183 we care about the difference between -0.0 and +0.0. */
4184 else if (!FP_REG_P (SET_DEST (exp))
4185 && SET_DEST (exp) != cc0_rtx
4186 && (FP_REG_P (SET_SRC (exp))
4187 || GET_CODE (SET_SRC (exp)) == FIX
4188 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4189 CC_STATUS_INIT;
4190 /* A pair of move insns doesn't produce a useful overall cc. */
4191 else if (!FP_REG_P (SET_DEST (exp))
4192 && !FP_REG_P (SET_SRC (exp))
4193 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4194 && (GET_CODE (SET_SRC (exp)) == REG
4195 || GET_CODE (SET_SRC (exp)) == MEM
4196 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4197 CC_STATUS_INIT;
4198 else if (SET_DEST (exp) != pc_rtx)
4200 cc_status.flags = 0;
4201 cc_status.value1 = SET_DEST (exp);
4202 cc_status.value2 = SET_SRC (exp);
4205 else if (GET_CODE (exp) == PARALLEL
4206 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4208 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4209 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4211 if (ADDRESS_REG_P (dest))
4212 CC_STATUS_INIT;
4213 else if (dest != pc_rtx)
4215 cc_status.flags = 0;
4216 cc_status.value1 = dest;
4217 cc_status.value2 = src;
4220 else
4221 CC_STATUS_INIT;
4222 if (cc_status.value2 != 0
4223 && ADDRESS_REG_P (cc_status.value2)
4224 && GET_MODE (cc_status.value2) == QImode)
4225 CC_STATUS_INIT;
4226 if (cc_status.value2 != 0)
4227 switch (GET_CODE (cc_status.value2))
4229 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4230 case ROTATE: case ROTATERT:
4231 /* These instructions always clear the overflow bit, and set
4232 the carry to the bit shifted out. */
4233 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4234 break;
4236 case PLUS: case MINUS: case MULT:
4237 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4238 if (GET_MODE (cc_status.value2) != VOIDmode)
4239 cc_status.flags |= CC_NO_OVERFLOW;
4240 break;
4241 case ZERO_EXTEND:
4242 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4243 ends with a move insn moving r2 in r2's mode.
4244 Thus, the cc's are set for r2.
4245 This can set N bit spuriously. */
4246 cc_status.flags |= CC_NOT_NEGATIVE;
4248 default:
4249 break;
4251 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4252 && cc_status.value2
4253 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4254 cc_status.value2 = 0;
4255 /* Check for PRE_DEC in dest modifying a register used in src. */
4256 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4257 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4258 && cc_status.value2
4259 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4260 cc_status.value2))
4261 cc_status.value2 = 0;
4262 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4263 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4264 cc_status.flags = CC_IN_68881;
4265 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4266 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4268 cc_status.flags = CC_IN_68881;
4269 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4270 && FP_REG_P (XEXP (cc_status.value2, 1)))
4271 cc_status.flags |= CC_REVERSED;
4275 const char *
4276 output_move_const_double (rtx *operands)
4278 int code = standard_68881_constant_p (operands[1]);
4280 if (code != 0)
4282 static char buf[40];
4284 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4285 return buf;
4287 return "fmove%.d %1,%0";
4290 const char *
4291 output_move_const_single (rtx *operands)
4293 int code = standard_68881_constant_p (operands[1]);
4295 if (code != 0)
4297 static char buf[40];
4299 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4300 return buf;
4302 return "fmove%.s %f1,%0";
4305 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4306 from the "fmovecr" instruction.
4307 The value, anded with 0xff, gives the code to use in fmovecr
4308 to get the desired constant. */
4310 /* This code has been fixed for cross-compilation. */
4312 static int inited_68881_table = 0;
4314 static const char *const strings_68881[7] = {
4315 "0.0",
4316 "1.0",
4317 "10.0",
4318 "100.0",
4319 "10000.0",
4320 "1e8",
4321 "1e16"
4324 static const int codes_68881[7] = {
4325 0x0f,
4326 0x32,
4327 0x33,
4328 0x34,
4329 0x35,
4330 0x36,
4331 0x37
4334 REAL_VALUE_TYPE values_68881[7];
4336 /* Set up values_68881 array by converting the decimal values
4337 strings_68881 to binary. */
4339 void
4340 init_68881_table (void)
4342 int i;
4343 REAL_VALUE_TYPE r;
4344 machine_mode mode;
4346 mode = SFmode;
4347 for (i = 0; i < 7; i++)
4349 if (i == 6)
4350 mode = DFmode;
4351 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4352 values_68881[i] = r;
4354 inited_68881_table = 1;
4358 standard_68881_constant_p (rtx x)
4360 const REAL_VALUE_TYPE *r;
4361 int i;
4363 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4364 used at all on those chips. */
4365 if (TUNE_68040_60)
4366 return 0;
4368 if (! inited_68881_table)
4369 init_68881_table ();
4371 r = CONST_DOUBLE_REAL_VALUE (x);
4373 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4374 for (i = 0; i < 6; i++)
4376 if (real_identical (r, &values_68881[i]))
4377 return (codes_68881[i]);
4380 if (GET_MODE (x) == SFmode)
4381 return 0;
4383 if (real_equal (r, &values_68881[6]))
4384 return (codes_68881[6]);
4386 /* larger powers of ten in the constants ram are not used
4387 because they are not equal to a `double' C constant. */
4388 return 0;
4391 /* If X is a floating-point constant, return the logarithm of X base 2,
4392 or 0 if X is not a power of 2. */
4395 floating_exact_log2 (rtx x)
4397 const REAL_VALUE_TYPE *r;
4398 REAL_VALUE_TYPE r1;
4399 int exp;
4401 r = CONST_DOUBLE_REAL_VALUE (x);
4403 if (real_less (r, &dconst1))
4404 return 0;
4406 exp = real_exponent (r);
4407 real_2expN (&r1, exp, DFmode);
4408 if (real_equal (&r1, r))
4409 return exp;
4411 return 0;
4414 /* A C compound statement to output to stdio stream STREAM the
4415 assembler syntax for an instruction operand X. X is an RTL
4416 expression.
4418 CODE is a value that can be used to specify one of several ways
4419 of printing the operand. It is used when identical operands
4420 must be printed differently depending on the context. CODE
4421 comes from the `%' specification that was used to request
4422 printing of the operand. If the specification was just `%DIGIT'
4423 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4424 is the ASCII code for LTR.
4426 If X is a register, this macro should print the register's name.
4427 The names can be found in an array `reg_names' whose type is
4428 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4430 When the machine description has a specification `%PUNCT' (a `%'
4431 followed by a punctuation character), this macro is called with
4432 a null pointer for X and the punctuation character for CODE.
4434 The m68k specific codes are:
4436 '.' for dot needed in Motorola-style opcode names.
4437 '-' for an operand pushing on the stack:
4438 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4439 '+' for an operand pushing on the stack:
4440 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4441 '@' for a reference to the top word on the stack:
4442 sp@, (sp) or (%sp) depending on the style of syntax.
4443 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4444 but & in SGS syntax).
4445 '!' for the cc register (used in an `and to cc' insn).
4446 '$' for the letter `s' in an op code, but only on the 68040.
4447 '&' for the letter `d' in an op code, but only on the 68040.
4448 '/' for register prefix needed by longlong.h.
4449 '?' for m68k_library_id_string
4451 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4452 'd' to force memory addressing to be absolute, not relative.
4453 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4454 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4455 or print pair of registers as rx:ry.
4456 'p' print an address with @PLTPC attached, but only if the operand
4457 is not locally-bound. */
4459 void
4460 print_operand (FILE *file, rtx op, int letter)
4462 if (letter == '.')
4464 if (MOTOROLA)
4465 fprintf (file, ".");
4467 else if (letter == '#')
4468 asm_fprintf (file, "%I");
4469 else if (letter == '-')
4470 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4471 else if (letter == '+')
4472 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4473 else if (letter == '@')
4474 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4475 else if (letter == '!')
4476 asm_fprintf (file, "%Rfpcr");
4477 else if (letter == '$')
4479 if (TARGET_68040)
4480 fprintf (file, "s");
4482 else if (letter == '&')
4484 if (TARGET_68040)
4485 fprintf (file, "d");
4487 else if (letter == '/')
4488 asm_fprintf (file, "%R");
4489 else if (letter == '?')
4490 asm_fprintf (file, m68k_library_id_string);
4491 else if (letter == 'p')
4493 output_addr_const (file, op);
4494 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4495 fprintf (file, "@PLTPC");
4497 else if (GET_CODE (op) == REG)
4499 if (letter == 'R')
4500 /* Print out the second register name of a register pair.
4501 I.e., R (6) => 7. */
4502 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4503 else
4504 fputs (M68K_REGNAME(REGNO (op)), file);
4506 else if (GET_CODE (op) == MEM)
4508 output_address (GET_MODE (op), XEXP (op, 0));
4509 if (letter == 'd' && ! TARGET_68020
4510 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4511 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4512 && INTVAL (XEXP (op, 0)) < 0x8000
4513 && INTVAL (XEXP (op, 0)) >= -0x8000))
4514 fprintf (file, MOTOROLA ? ".l" : ":l");
4516 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4518 long l;
4519 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4520 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4522 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4524 long l[3];
4525 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4526 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4527 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4529 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4531 long l[2];
4532 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
4533 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4535 else
4537 /* Use `print_operand_address' instead of `output_addr_const'
4538 to ensure that we print relevant PIC stuff. */
4539 asm_fprintf (file, "%I");
4540 if (TARGET_PCREL
4541 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4542 print_operand_address (file, op);
4543 else
4544 output_addr_const (file, op);
4548 /* Return string for TLS relocation RELOC. */
4550 static const char *
4551 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4553 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4554 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4556 switch (reloc)
4558 case RELOC_GOT:
4559 if (MOTOROLA)
4561 if (flag_pic == 1 && TARGET_68020)
4562 return "@GOT.w";
4563 else
4564 return "@GOT";
4566 else
4568 if (TARGET_68020)
4570 switch (flag_pic)
4572 case 1:
4573 return ":w";
4574 case 2:
4575 return ":l";
4576 default:
4577 return "";
4581 gcc_unreachable ();
4583 case RELOC_TLSGD:
4584 return "@TLSGD";
4586 case RELOC_TLSLDM:
4587 return "@TLSLDM";
4589 case RELOC_TLSLDO:
4590 return "@TLSLDO";
4592 case RELOC_TLSIE:
4593 return "@TLSIE";
4595 case RELOC_TLSLE:
4596 return "@TLSLE";
4598 default:
4599 gcc_unreachable ();
4603 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4605 static bool
4606 m68k_output_addr_const_extra (FILE *file, rtx x)
4608 if (GET_CODE (x) == UNSPEC)
4610 switch (XINT (x, 1))
4612 case UNSPEC_RELOC16:
4613 case UNSPEC_RELOC32:
4614 output_addr_const (file, XVECEXP (x, 0, 0));
4615 fputs (m68k_get_reloc_decoration
4616 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4617 return true;
4619 default:
4620 break;
4624 return false;
4627 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4629 static void
4630 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4632 gcc_assert (size == 4);
4633 fputs ("\t.long\t", file);
4634 output_addr_const (file, x);
4635 fputs ("@TLSLDO+0x8000", file);
4638 /* In the name of slightly smaller debug output, and to cater to
4639 general assembler lossage, recognize various UNSPEC sequences
4640 and turn them back into a direct symbol reference. */
4642 static rtx
4643 m68k_delegitimize_address (rtx orig_x)
4645 rtx x;
4646 struct m68k_address addr;
4647 rtx unspec;
4649 orig_x = delegitimize_mem_from_attrs (orig_x);
4650 x = orig_x;
4651 if (MEM_P (x))
4652 x = XEXP (x, 0);
4654 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4655 return orig_x;
4657 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4658 || addr.offset == NULL_RTX
4659 || GET_CODE (addr.offset) != CONST)
4660 return orig_x;
4662 unspec = XEXP (addr.offset, 0);
4663 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4664 unspec = XEXP (unspec, 0);
4665 if (GET_CODE (unspec) != UNSPEC
4666 || (XINT (unspec, 1) != UNSPEC_RELOC16
4667 && XINT (unspec, 1) != UNSPEC_RELOC32))
4668 return orig_x;
4669 x = XVECEXP (unspec, 0, 0);
4670 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4671 if (unspec != XEXP (addr.offset, 0))
4672 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4673 if (addr.index)
4675 rtx idx = addr.index;
4676 if (addr.scale != 1)
4677 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4678 x = gen_rtx_PLUS (Pmode, idx, x);
4680 if (addr.base)
4681 x = gen_rtx_PLUS (Pmode, addr.base, x);
4682 if (MEM_P (orig_x))
4683 x = replace_equiv_address_nv (orig_x, x);
4684 return x;
4688 /* A C compound statement to output to stdio stream STREAM the
4689 assembler syntax for an instruction operand that is a memory
4690 reference whose address is ADDR. ADDR is an RTL expression.
4692 Note that this contains a kludge that knows that the only reason
4693 we have an address (plus (label_ref...) (reg...)) when not generating
4694 PIC code is in the insn before a tablejump, and we know that m68k.md
4695 generates a label LInnn: on such an insn.
4697 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4698 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4700 This routine is responsible for distinguishing between -fpic and -fPIC
4701 style relocations in an address. When generating -fpic code the
4702 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4703 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4705 void
4706 print_operand_address (FILE *file, rtx addr)
4708 struct m68k_address address;
4710 if (!m68k_decompose_address (QImode, addr, true, &address))
4711 gcc_unreachable ();
4713 if (address.code == PRE_DEC)
4714 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4715 M68K_REGNAME (REGNO (address.base)));
4716 else if (address.code == POST_INC)
4717 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4718 M68K_REGNAME (REGNO (address.base)));
4719 else if (!address.base && !address.index)
4721 /* A constant address. */
4722 gcc_assert (address.offset == addr);
4723 if (GET_CODE (addr) == CONST_INT)
4725 /* (xxx).w or (xxx).l. */
4726 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4727 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4728 else
4729 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4731 else if (TARGET_PCREL)
4733 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4734 fputc ('(', file);
4735 output_addr_const (file, addr);
4736 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4738 else
4740 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4741 name ends in `.<letter>', as the last 2 characters can be
4742 mistaken as a size suffix. Put the name in parentheses. */
4743 if (GET_CODE (addr) == SYMBOL_REF
4744 && strlen (XSTR (addr, 0)) > 2
4745 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4747 putc ('(', file);
4748 output_addr_const (file, addr);
4749 putc (')', file);
4751 else
4752 output_addr_const (file, addr);
4755 else
4757 int labelno;
4759 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4760 label being accessed, otherwise it is -1. */
4761 labelno = (address.offset
4762 && !address.base
4763 && GET_CODE (address.offset) == LABEL_REF
4764 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4765 : -1);
4766 if (MOTOROLA)
4768 /* Print the "offset(base" component. */
4769 if (labelno >= 0)
4770 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4771 else
4773 if (address.offset)
4774 output_addr_const (file, address.offset);
4776 putc ('(', file);
4777 if (address.base)
4778 fputs (M68K_REGNAME (REGNO (address.base)), file);
4780 /* Print the ",index" component, if any. */
4781 if (address.index)
4783 if (address.base)
4784 putc (',', file);
4785 fprintf (file, "%s.%c",
4786 M68K_REGNAME (REGNO (address.index)),
4787 GET_MODE (address.index) == HImode ? 'w' : 'l');
4788 if (address.scale != 1)
4789 fprintf (file, "*%d", address.scale);
4791 putc (')', file);
4793 else /* !MOTOROLA */
4795 if (!address.offset && !address.index)
4796 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4797 else
4799 /* Print the "base@(offset" component. */
4800 if (labelno >= 0)
4801 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4802 else
4804 if (address.base)
4805 fputs (M68K_REGNAME (REGNO (address.base)), file);
4806 fprintf (file, "@(");
4807 if (address.offset)
4808 output_addr_const (file, address.offset);
4810 /* Print the ",index" component, if any. */
4811 if (address.index)
4813 fprintf (file, ",%s:%c",
4814 M68K_REGNAME (REGNO (address.index)),
4815 GET_MODE (address.index) == HImode ? 'w' : 'l');
4816 if (address.scale != 1)
4817 fprintf (file, ":%d", address.scale);
4819 putc (')', file);
4825 /* Check for cases where a clr insns can be omitted from code using
4826 strict_low_part sets. For example, the second clrl here is not needed:
4827 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4829 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4830 insn we are checking for redundancy. TARGET is the register set by the
4831 clear insn. */
4833 bool
4834 strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
4835 rtx target)
4837 rtx_insn *p = first_insn;
4839 while ((p = PREV_INSN (p)))
4841 if (NOTE_INSN_BASIC_BLOCK_P (p))
4842 return false;
4844 if (NOTE_P (p))
4845 continue;
4847 /* If it isn't an insn, then give up. */
4848 if (!INSN_P (p))
4849 return false;
4851 if (reg_set_p (target, p))
4853 rtx set = single_set (p);
4854 rtx dest;
4856 /* If it isn't an easy to recognize insn, then give up. */
4857 if (! set)
4858 return false;
4860 dest = SET_DEST (set);
4862 /* If this sets the entire target register to zero, then our
4863 first_insn is redundant. */
4864 if (rtx_equal_p (dest, target)
4865 && SET_SRC (set) == const0_rtx)
4866 return true;
4867 else if (GET_CODE (dest) == STRICT_LOW_PART
4868 && GET_CODE (XEXP (dest, 0)) == REG
4869 && REGNO (XEXP (dest, 0)) == REGNO (target)
4870 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4871 <= GET_MODE_SIZE (mode)))
4872 /* This is a strict low part set which modifies less than
4873 we are using, so it is safe. */
4875 else
4876 return false;
4880 return false;
4883 /* Operand predicates for implementing asymmetric pc-relative addressing
4884 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4885 when used as a source operand, but not as a destination operand.
4887 We model this by restricting the meaning of the basic predicates
4888 (general_operand, memory_operand, etc) to forbid the use of this
4889 addressing mode, and then define the following predicates that permit
4890 this addressing mode. These predicates can then be used for the
4891 source operands of the appropriate instructions.
4893 n.b. While it is theoretically possible to change all machine patterns
4894 to use this addressing more where permitted by the architecture,
4895 it has only been implemented for "common" cases: SImode, HImode, and
4896 QImode operands, and only for the principle operations that would
4897 require this addressing mode: data movement and simple integer operations.
4899 In parallel with these new predicates, two new constraint letters
4900 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4901 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4902 In the pcrel case 's' is only valid in combination with 'a' registers.
4903 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4904 of how these constraints are used.
4906 The use of these predicates is strictly optional, though patterns that
4907 don't will cause an extra reload register to be allocated where one
4908 was not necessary:
4910 lea (abc:w,%pc),%a0 ; need to reload address
4911 moveq &1,%d1 ; since write to pc-relative space
4912 movel %d1,%a0@ ; is not allowed
4914 lea (abc:w,%pc),%a1 ; no need to reload address here
4915 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4917 For more info, consult tiemann@cygnus.com.
4920 All of the ugliness with predicates and constraints is due to the
4921 simple fact that the m68k does not allow a pc-relative addressing
4922 mode as a destination. gcc does not distinguish between source and
4923 destination addresses. Hence, if we claim that pc-relative address
4924 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4925 end up with invalid code. To get around this problem, we left
4926 pc-relative modes as invalid addresses, and then added special
4927 predicates and constraints to accept them.
4929 A cleaner way to handle this is to modify gcc to distinguish
4930 between source and destination addresses. We can then say that
4931 pc-relative is a valid source address but not a valid destination
4932 address, and hopefully avoid a lot of the predicate and constraint
4933 hackery. Unfortunately, this would be a pretty big change. It would
4934 be a useful change for a number of ports, but there aren't any current
4935 plans to undertake this.
4937 ***************************************************************************/
4940 const char *
4941 output_andsi3 (rtx *operands)
4943 int logval;
4944 if (GET_CODE (operands[2]) == CONST_INT
4945 && (INTVAL (operands[2]) | 0xffff) == -1
4946 && (DATA_REG_P (operands[0])
4947 || offsettable_memref_p (operands[0]))
4948 && !TARGET_COLDFIRE)
4950 if (GET_CODE (operands[0]) != REG)
4951 operands[0] = adjust_address (operands[0], HImode, 2);
4952 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4953 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4954 CC_STATUS_INIT;
4955 if (operands[2] == const0_rtx)
4956 return "clr%.w %0";
4957 return "and%.w %2,%0";
4959 if (GET_CODE (operands[2]) == CONST_INT
4960 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4961 && (DATA_REG_P (operands[0])
4962 || offsettable_memref_p (operands[0])))
4964 if (DATA_REG_P (operands[0]))
4965 operands[1] = GEN_INT (logval);
4966 else
4968 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4969 operands[1] = GEN_INT (logval % 8);
4971 /* This does not set condition codes in a standard way. */
4972 CC_STATUS_INIT;
4973 return "bclr %1,%0";
4975 return "and%.l %2,%0";
4978 const char *
4979 output_iorsi3 (rtx *operands)
4981 register int logval;
4982 if (GET_CODE (operands[2]) == CONST_INT
4983 && INTVAL (operands[2]) >> 16 == 0
4984 && (DATA_REG_P (operands[0])
4985 || offsettable_memref_p (operands[0]))
4986 && !TARGET_COLDFIRE)
4988 if (GET_CODE (operands[0]) != REG)
4989 operands[0] = adjust_address (operands[0], HImode, 2);
4990 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4991 CC_STATUS_INIT;
4992 if (INTVAL (operands[2]) == 0xffff)
4993 return "mov%.w %2,%0";
4994 return "or%.w %2,%0";
4996 if (GET_CODE (operands[2]) == CONST_INT
4997 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4998 && (DATA_REG_P (operands[0])
4999 || offsettable_memref_p (operands[0])))
5001 if (DATA_REG_P (operands[0]))
5002 operands[1] = GEN_INT (logval);
5003 else
5005 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5006 operands[1] = GEN_INT (logval % 8);
5008 CC_STATUS_INIT;
5009 return "bset %1,%0";
5011 return "or%.l %2,%0";
5014 const char *
5015 output_xorsi3 (rtx *operands)
5017 register int logval;
5018 if (GET_CODE (operands[2]) == CONST_INT
5019 && INTVAL (operands[2]) >> 16 == 0
5020 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5021 && !TARGET_COLDFIRE)
5023 if (! DATA_REG_P (operands[0]))
5024 operands[0] = adjust_address (operands[0], HImode, 2);
5025 /* Do not delete a following tstl %0 insn; that would be incorrect. */
5026 CC_STATUS_INIT;
5027 if (INTVAL (operands[2]) == 0xffff)
5028 return "not%.w %0";
5029 return "eor%.w %2,%0";
5031 if (GET_CODE (operands[2]) == CONST_INT
5032 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5033 && (DATA_REG_P (operands[0])
5034 || offsettable_memref_p (operands[0])))
5036 if (DATA_REG_P (operands[0]))
5037 operands[1] = GEN_INT (logval);
5038 else
5040 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5041 operands[1] = GEN_INT (logval % 8);
5043 CC_STATUS_INIT;
5044 return "bchg %1,%0";
5046 return "eor%.l %2,%0";
5049 /* Return the instruction that should be used for a call to address X,
5050 which is known to be in operand 0. */
5052 const char *
5053 output_call (rtx x)
5055 if (symbolic_operand (x, VOIDmode))
5056 return m68k_symbolic_call;
5057 else
5058 return "jsr %a0";
5061 /* Likewise sibling calls. */
5063 const char *
5064 output_sibcall (rtx x)
5066 if (symbolic_operand (x, VOIDmode))
5067 return m68k_symbolic_jump;
5068 else
5069 return "jmp %a0";
5072 static void
5073 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5074 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5075 tree function)
5077 rtx this_slot, offset, addr, mem, tmp;
5078 rtx_insn *insn;
5080 /* Avoid clobbering the struct value reg by using the
5081 static chain reg as a temporary. */
5082 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5084 /* Pretend to be a post-reload pass while generating rtl. */
5085 reload_completed = 1;
5087 /* The "this" pointer is stored at 4(%sp). */
5088 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5089 stack_pointer_rtx, 4));
5091 /* Add DELTA to THIS. */
5092 if (delta != 0)
5094 /* Make the offset a legitimate operand for memory addition. */
5095 offset = GEN_INT (delta);
5096 if ((delta < -8 || delta > 8)
5097 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5099 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5100 offset = gen_rtx_REG (Pmode, D0_REG);
5102 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5103 copy_rtx (this_slot), offset));
5106 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5107 if (vcall_offset != 0)
5109 /* Set the static chain register to *THIS. */
5110 emit_move_insn (tmp, this_slot);
5111 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5113 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5114 addr = plus_constant (Pmode, tmp, vcall_offset);
5115 if (!m68k_legitimate_address_p (Pmode, addr, true))
5117 emit_insn (gen_rtx_SET (tmp, addr));
5118 addr = tmp;
5121 /* Load the offset into %d0 and add it to THIS. */
5122 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5123 gen_rtx_MEM (Pmode, addr));
5124 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5125 copy_rtx (this_slot),
5126 gen_rtx_REG (Pmode, D0_REG)));
5129 /* Jump to the target function. Use a sibcall if direct jumps are
5130 allowed, otherwise load the address into a register first. */
5131 mem = DECL_RTL (function);
5132 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5134 gcc_assert (flag_pic);
5136 if (!TARGET_SEP_DATA)
5138 /* Use the static chain register as a temporary (call-clobbered)
5139 GOT pointer for this function. We can use the static chain
5140 register because it isn't live on entry to the thunk. */
5141 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5142 emit_insn (gen_load_got (pic_offset_table_rtx));
5144 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5145 mem = replace_equiv_address (mem, tmp);
5147 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5148 SIBLING_CALL_P (insn) = 1;
5150 /* Run just enough of rest_of_compilation. */
5151 insn = get_insns ();
5152 split_all_insns_noflow ();
5153 final_start_function (insn, file, 1);
5154 final (insn, file, 1);
5155 final_end_function ();
5157 /* Clean up the vars set above. */
5158 reload_completed = 0;
5160 /* Restore the original PIC register. */
5161 if (flag_pic)
5162 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5165 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5167 static rtx
5168 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5169 int incoming ATTRIBUTE_UNUSED)
5171 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5174 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5176 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5177 unsigned int new_reg)
5180 /* Interrupt functions can only use registers that have already been
5181 saved by the prologue, even if they would normally be
5182 call-clobbered. */
5184 if ((m68k_get_function_kind (current_function_decl)
5185 == m68k_fk_interrupt_handler)
5186 && !df_regs_ever_live_p (new_reg))
5187 return 0;
5189 return 1;
5192 /* Implement TARGET_HARD_REGNO_NREGS.
5194 On the m68k, ordinary registers hold 32 bits worth;
5195 for the 68881 registers, a single register is always enough for
5196 anything that can be stored in them at all. */
5198 static unsigned int
5199 m68k_hard_regno_nregs (unsigned int regno, machine_mode mode)
5201 if (regno >= 16)
5202 return GET_MODE_NUNITS (mode);
5203 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
5206 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5207 registers can hold any mode, but restrict the 68881 registers to
5208 floating-point modes. */
5210 static bool
5211 m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5213 if (DATA_REGNO_P (regno))
5215 /* Data Registers, can hold aggregate if fits in. */
5216 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5217 return true;
5219 else if (ADDRESS_REGNO_P (regno))
5221 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5222 return true;
5224 else if (FP_REGNO_P (regno))
5226 /* FPU registers, hold float or complex float of long double or
5227 smaller. */
5228 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5229 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5230 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5231 return true;
5233 return false;
5236 /* Implement TARGET_MODES_TIEABLE_P. */
5238 static bool
5239 m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5241 return (!TARGET_HARD_FLOAT
5242 || ((GET_MODE_CLASS (mode1) == MODE_FLOAT
5243 || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
5244 == (GET_MODE_CLASS (mode2) == MODE_FLOAT
5245 || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT)));
5248 /* Implement SECONDARY_RELOAD_CLASS. */
5250 enum reg_class
5251 m68k_secondary_reload_class (enum reg_class rclass,
5252 machine_mode mode, rtx x)
5254 int regno;
5256 regno = true_regnum (x);
5258 /* If one operand of a movqi is an address register, the other
5259 operand must be a general register or constant. Other types
5260 of operand must be reloaded through a data register. */
5261 if (GET_MODE_SIZE (mode) == 1
5262 && reg_classes_intersect_p (rclass, ADDR_REGS)
5263 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5264 return DATA_REGS;
5266 /* PC-relative addresses must be loaded into an address register first. */
5267 if (TARGET_PCREL
5268 && !reg_class_subset_p (rclass, ADDR_REGS)
5269 && symbolic_operand (x, VOIDmode))
5270 return ADDR_REGS;
5272 return NO_REGS;
5275 /* Implement PREFERRED_RELOAD_CLASS. */
5277 enum reg_class
5278 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5280 enum reg_class secondary_class;
5282 /* If RCLASS might need a secondary reload, try restricting it to
5283 a class that doesn't. */
5284 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5285 if (secondary_class != NO_REGS
5286 && reg_class_subset_p (secondary_class, rclass))
5287 return secondary_class;
5289 /* Prefer to use moveq for in-range constants. */
5290 if (GET_CODE (x) == CONST_INT
5291 && reg_class_subset_p (DATA_REGS, rclass)
5292 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5293 return DATA_REGS;
5295 /* ??? Do we really need this now? */
5296 if (GET_CODE (x) == CONST_DOUBLE
5297 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5299 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5300 return FP_REGS;
5302 return NO_REGS;
5305 return rclass;
5308 /* Return floating point values in a 68881 register. This makes 68881 code
5309 a little bit faster. It also makes -msoft-float code incompatible with
5310 hard-float code, so people have to be careful not to mix the two.
5311 For ColdFire it was decided the ABI incompatibility is undesirable.
5312 If there is need for a hard-float ABI it is probably worth doing it
5313 properly and also passing function arguments in FP registers. */
5315 m68k_libcall_value (machine_mode mode)
5317 switch (mode) {
5318 case E_SFmode:
5319 case E_DFmode:
5320 case E_XFmode:
5321 if (TARGET_68881)
5322 return gen_rtx_REG (mode, FP0_REG);
5323 break;
5324 default:
5325 break;
5328 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5331 /* Location in which function value is returned.
5332 NOTE: Due to differences in ABIs, don't call this function directly,
5333 use FUNCTION_VALUE instead. */
5335 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5337 machine_mode mode;
5339 mode = TYPE_MODE (valtype);
5340 switch (mode) {
5341 case E_SFmode:
5342 case E_DFmode:
5343 case E_XFmode:
5344 if (TARGET_68881)
5345 return gen_rtx_REG (mode, FP0_REG);
5346 break;
5347 default:
5348 break;
5351 /* If the function returns a pointer, push that into %a0. */
5352 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5353 /* For compatibility with the large body of existing code which
5354 does not always properly declare external functions returning
5355 pointer types, the m68k/SVR4 convention is to copy the value
5356 returned for pointer functions from a0 to d0 in the function
5357 epilogue, so that callers that have neglected to properly
5358 declare the callee can still find the correct return value in
5359 d0. */
5360 return gen_rtx_PARALLEL
5361 (mode,
5362 gen_rtvec (2,
5363 gen_rtx_EXPR_LIST (VOIDmode,
5364 gen_rtx_REG (mode, A0_REG),
5365 const0_rtx),
5366 gen_rtx_EXPR_LIST (VOIDmode,
5367 gen_rtx_REG (mode, D0_REG),
5368 const0_rtx)));
5369 else if (POINTER_TYPE_P (valtype))
5370 return gen_rtx_REG (mode, A0_REG);
5371 else
5372 return gen_rtx_REG (mode, D0_REG);
5375 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5376 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5377 static bool
5378 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5380 machine_mode mode = TYPE_MODE (type);
5382 if (mode == BLKmode)
5383 return true;
5385 /* If TYPE's known alignment is less than the alignment of MODE that
5386 would contain the structure, then return in memory. We need to
5387 do so to maintain the compatibility between code compiled with
5388 -mstrict-align and that compiled with -mno-strict-align. */
5389 if (AGGREGATE_TYPE_P (type)
5390 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5391 return true;
5393 return false;
5395 #endif
5397 /* CPU to schedule the program for. */
5398 enum attr_cpu m68k_sched_cpu;
5400 /* MAC to schedule the program for. */
5401 enum attr_mac m68k_sched_mac;
5403 /* Operand type. */
5404 enum attr_op_type
5406 /* No operand. */
5407 OP_TYPE_NONE,
5409 /* Integer register. */
5410 OP_TYPE_RN,
5412 /* FP register. */
5413 OP_TYPE_FPN,
5415 /* Implicit mem reference (e.g. stack). */
5416 OP_TYPE_MEM1,
5418 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5419 OP_TYPE_MEM234,
5421 /* Memory with offset but without indexing. EA mode 5. */
5422 OP_TYPE_MEM5,
5424 /* Memory with indexing. EA mode 6. */
5425 OP_TYPE_MEM6,
5427 /* Memory referenced by absolute address. EA mode 7. */
5428 OP_TYPE_MEM7,
5430 /* Immediate operand that doesn't require extension word. */
5431 OP_TYPE_IMM_Q,
5433 /* Immediate 16 bit operand. */
5434 OP_TYPE_IMM_W,
5436 /* Immediate 32 bit operand. */
5437 OP_TYPE_IMM_L
5440 /* Return type of memory ADDR_RTX refers to. */
5441 static enum attr_op_type
5442 sched_address_type (machine_mode mode, rtx addr_rtx)
5444 struct m68k_address address;
5446 if (symbolic_operand (addr_rtx, VOIDmode))
5447 return OP_TYPE_MEM7;
5449 if (!m68k_decompose_address (mode, addr_rtx,
5450 reload_completed, &address))
5452 gcc_assert (!reload_completed);
5453 /* Reload will likely fix the address to be in the register. */
5454 return OP_TYPE_MEM234;
5457 if (address.scale != 0)
5458 return OP_TYPE_MEM6;
5460 if (address.base != NULL_RTX)
5462 if (address.offset == NULL_RTX)
5463 return OP_TYPE_MEM234;
5465 return OP_TYPE_MEM5;
5468 gcc_assert (address.offset != NULL_RTX);
5470 return OP_TYPE_MEM7;
5473 /* Return X or Y (depending on OPX_P) operand of INSN. */
5474 static rtx
5475 sched_get_operand (rtx_insn *insn, bool opx_p)
5477 int i;
5479 if (recog_memoized (insn) < 0)
5480 gcc_unreachable ();
5482 extract_constrain_insn_cached (insn);
5484 if (opx_p)
5485 i = get_attr_opx (insn);
5486 else
5487 i = get_attr_opy (insn);
5489 if (i >= recog_data.n_operands)
5490 return NULL;
5492 return recog_data.operand[i];
5495 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5496 If ADDRESS_P is true, return type of memory location operand refers to. */
5497 static enum attr_op_type
5498 sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
5500 rtx op;
5502 op = sched_get_operand (insn, opx_p);
5504 if (op == NULL)
5506 gcc_assert (!reload_completed);
5507 return OP_TYPE_RN;
5510 if (address_p)
5511 return sched_address_type (QImode, op);
5513 if (memory_operand (op, VOIDmode))
5514 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5516 if (register_operand (op, VOIDmode))
5518 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5519 || (reload_completed && FP_REG_P (op)))
5520 return OP_TYPE_FPN;
5522 return OP_TYPE_RN;
5525 if (GET_CODE (op) == CONST_INT)
5527 int ival;
5529 ival = INTVAL (op);
5531 /* Check for quick constants. */
5532 switch (get_attr_type (insn))
5534 case TYPE_ALUQ_L:
5535 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5536 return OP_TYPE_IMM_Q;
5538 gcc_assert (!reload_completed);
5539 break;
5541 case TYPE_MOVEQ_L:
5542 if (USE_MOVQ (ival))
5543 return OP_TYPE_IMM_Q;
5545 gcc_assert (!reload_completed);
5546 break;
5548 case TYPE_MOV3Q_L:
5549 if (valid_mov3q_const (ival))
5550 return OP_TYPE_IMM_Q;
5552 gcc_assert (!reload_completed);
5553 break;
5555 default:
5556 break;
5559 if (IN_RANGE (ival, -0x8000, 0x7fff))
5560 return OP_TYPE_IMM_W;
5562 return OP_TYPE_IMM_L;
5565 if (GET_CODE (op) == CONST_DOUBLE)
5567 switch (GET_MODE (op))
5569 case E_SFmode:
5570 return OP_TYPE_IMM_W;
5572 case E_VOIDmode:
5573 case E_DFmode:
5574 return OP_TYPE_IMM_L;
5576 default:
5577 gcc_unreachable ();
5581 if (GET_CODE (op) == CONST
5582 || symbolic_operand (op, VOIDmode)
5583 || LABEL_P (op))
5585 switch (GET_MODE (op))
5587 case E_QImode:
5588 return OP_TYPE_IMM_Q;
5590 case E_HImode:
5591 return OP_TYPE_IMM_W;
5593 case E_SImode:
5594 return OP_TYPE_IMM_L;
5596 default:
5597 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5598 /* Just a guess. */
5599 return OP_TYPE_IMM_W;
5601 return OP_TYPE_IMM_L;
5605 gcc_assert (!reload_completed);
5607 if (FLOAT_MODE_P (GET_MODE (op)))
5608 return OP_TYPE_FPN;
5610 return OP_TYPE_RN;
5613 /* Implement opx_type attribute.
5614 Return type of INSN's operand X.
5615 If ADDRESS_P is true, return type of memory location operand refers to. */
5616 enum attr_opx_type
5617 m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
5619 switch (sched_attr_op_type (insn, true, address_p != 0))
5621 case OP_TYPE_RN:
5622 return OPX_TYPE_RN;
5624 case OP_TYPE_FPN:
5625 return OPX_TYPE_FPN;
5627 case OP_TYPE_MEM1:
5628 return OPX_TYPE_MEM1;
5630 case OP_TYPE_MEM234:
5631 return OPX_TYPE_MEM234;
5633 case OP_TYPE_MEM5:
5634 return OPX_TYPE_MEM5;
5636 case OP_TYPE_MEM6:
5637 return OPX_TYPE_MEM6;
5639 case OP_TYPE_MEM7:
5640 return OPX_TYPE_MEM7;
5642 case OP_TYPE_IMM_Q:
5643 return OPX_TYPE_IMM_Q;
5645 case OP_TYPE_IMM_W:
5646 return OPX_TYPE_IMM_W;
5648 case OP_TYPE_IMM_L:
5649 return OPX_TYPE_IMM_L;
5651 default:
5652 gcc_unreachable ();
5656 /* Implement opy_type attribute.
5657 Return type of INSN's operand Y.
5658 If ADDRESS_P is true, return type of memory location operand refers to. */
5659 enum attr_opy_type
5660 m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
5662 switch (sched_attr_op_type (insn, false, address_p != 0))
5664 case OP_TYPE_RN:
5665 return OPY_TYPE_RN;
5667 case OP_TYPE_FPN:
5668 return OPY_TYPE_FPN;
5670 case OP_TYPE_MEM1:
5671 return OPY_TYPE_MEM1;
5673 case OP_TYPE_MEM234:
5674 return OPY_TYPE_MEM234;
5676 case OP_TYPE_MEM5:
5677 return OPY_TYPE_MEM5;
5679 case OP_TYPE_MEM6:
5680 return OPY_TYPE_MEM6;
5682 case OP_TYPE_MEM7:
5683 return OPY_TYPE_MEM7;
5685 case OP_TYPE_IMM_Q:
5686 return OPY_TYPE_IMM_Q;
5688 case OP_TYPE_IMM_W:
5689 return OPY_TYPE_IMM_W;
5691 case OP_TYPE_IMM_L:
5692 return OPY_TYPE_IMM_L;
5694 default:
5695 gcc_unreachable ();
5699 /* Return size of INSN as int. */
5700 static int
5701 sched_get_attr_size_int (rtx_insn *insn)
5703 int size;
5705 switch (get_attr_type (insn))
5707 case TYPE_IGNORE:
5708 /* There should be no references to m68k_sched_attr_size for 'ignore'
5709 instructions. */
5710 gcc_unreachable ();
5711 return 0;
5713 case TYPE_MUL_L:
5714 size = 2;
5715 break;
5717 default:
5718 size = 1;
5719 break;
5722 switch (get_attr_opx_type (insn))
5724 case OPX_TYPE_NONE:
5725 case OPX_TYPE_RN:
5726 case OPX_TYPE_FPN:
5727 case OPX_TYPE_MEM1:
5728 case OPX_TYPE_MEM234:
5729 case OPY_TYPE_IMM_Q:
5730 break;
5732 case OPX_TYPE_MEM5:
5733 case OPX_TYPE_MEM6:
5734 /* Here we assume that most absolute references are short. */
5735 case OPX_TYPE_MEM7:
5736 case OPY_TYPE_IMM_W:
5737 ++size;
5738 break;
5740 case OPY_TYPE_IMM_L:
5741 size += 2;
5742 break;
5744 default:
5745 gcc_unreachable ();
5748 switch (get_attr_opy_type (insn))
5750 case OPY_TYPE_NONE:
5751 case OPY_TYPE_RN:
5752 case OPY_TYPE_FPN:
5753 case OPY_TYPE_MEM1:
5754 case OPY_TYPE_MEM234:
5755 case OPY_TYPE_IMM_Q:
5756 break;
5758 case OPY_TYPE_MEM5:
5759 case OPY_TYPE_MEM6:
5760 /* Here we assume that most absolute references are short. */
5761 case OPY_TYPE_MEM7:
5762 case OPY_TYPE_IMM_W:
5763 ++size;
5764 break;
5766 case OPY_TYPE_IMM_L:
5767 size += 2;
5768 break;
5770 default:
5771 gcc_unreachable ();
5774 if (size > 3)
5776 gcc_assert (!reload_completed);
5778 size = 3;
5781 return size;
5784 /* Return size of INSN as attribute enum value. */
5785 enum attr_size
5786 m68k_sched_attr_size (rtx_insn *insn)
5788 switch (sched_get_attr_size_int (insn))
5790 case 1:
5791 return SIZE_1;
5793 case 2:
5794 return SIZE_2;
5796 case 3:
5797 return SIZE_3;
5799 default:
5800 gcc_unreachable ();
5804 /* Return operand X or Y (depending on OPX_P) of INSN,
5805 if it is a MEM, or NULL overwise. */
5806 static enum attr_op_type
5807 sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
5809 if (opx_p)
5811 switch (get_attr_opx_type (insn))
5813 case OPX_TYPE_NONE:
5814 case OPX_TYPE_RN:
5815 case OPX_TYPE_FPN:
5816 case OPX_TYPE_IMM_Q:
5817 case OPX_TYPE_IMM_W:
5818 case OPX_TYPE_IMM_L:
5819 return OP_TYPE_RN;
5821 case OPX_TYPE_MEM1:
5822 case OPX_TYPE_MEM234:
5823 case OPX_TYPE_MEM5:
5824 case OPX_TYPE_MEM7:
5825 return OP_TYPE_MEM1;
5827 case OPX_TYPE_MEM6:
5828 return OP_TYPE_MEM6;
5830 default:
5831 gcc_unreachable ();
5834 else
5836 switch (get_attr_opy_type (insn))
5838 case OPY_TYPE_NONE:
5839 case OPY_TYPE_RN:
5840 case OPY_TYPE_FPN:
5841 case OPY_TYPE_IMM_Q:
5842 case OPY_TYPE_IMM_W:
5843 case OPY_TYPE_IMM_L:
5844 return OP_TYPE_RN;
5846 case OPY_TYPE_MEM1:
5847 case OPY_TYPE_MEM234:
5848 case OPY_TYPE_MEM5:
5849 case OPY_TYPE_MEM7:
5850 return OP_TYPE_MEM1;
5852 case OPY_TYPE_MEM6:
5853 return OP_TYPE_MEM6;
5855 default:
5856 gcc_unreachable ();
5861 /* Implement op_mem attribute. */
5862 enum attr_op_mem
5863 m68k_sched_attr_op_mem (rtx_insn *insn)
5865 enum attr_op_type opx;
5866 enum attr_op_type opy;
5868 opx = sched_get_opxy_mem_type (insn, true);
5869 opy = sched_get_opxy_mem_type (insn, false);
5871 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5872 return OP_MEM_00;
5874 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5876 switch (get_attr_opx_access (insn))
5878 case OPX_ACCESS_R:
5879 return OP_MEM_10;
5881 case OPX_ACCESS_W:
5882 return OP_MEM_01;
5884 case OPX_ACCESS_RW:
5885 return OP_MEM_11;
5887 default:
5888 gcc_unreachable ();
5892 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5894 switch (get_attr_opx_access (insn))
5896 case OPX_ACCESS_R:
5897 return OP_MEM_I0;
5899 case OPX_ACCESS_W:
5900 return OP_MEM_0I;
5902 case OPX_ACCESS_RW:
5903 return OP_MEM_I1;
5905 default:
5906 gcc_unreachable ();
5910 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5911 return OP_MEM_10;
5913 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5915 switch (get_attr_opx_access (insn))
5917 case OPX_ACCESS_W:
5918 return OP_MEM_11;
5920 default:
5921 gcc_assert (!reload_completed);
5922 return OP_MEM_11;
5926 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5928 switch (get_attr_opx_access (insn))
5930 case OPX_ACCESS_W:
5931 return OP_MEM_1I;
5933 default:
5934 gcc_assert (!reload_completed);
5935 return OP_MEM_1I;
5939 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5940 return OP_MEM_I0;
5942 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5944 switch (get_attr_opx_access (insn))
5946 case OPX_ACCESS_W:
5947 return OP_MEM_I1;
5949 default:
5950 gcc_assert (!reload_completed);
5951 return OP_MEM_I1;
5955 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5956 gcc_assert (!reload_completed);
5957 return OP_MEM_I1;
5960 /* Data for ColdFire V4 index bypass.
5961 Producer modifies register that is used as index in consumer with
5962 specified scale. */
5963 static struct
5965 /* Producer instruction. */
5966 rtx pro;
5968 /* Consumer instruction. */
5969 rtx con;
5971 /* Scale of indexed memory access within consumer.
5972 Or zero if bypass should not be effective at the moment. */
5973 int scale;
5974 } sched_cfv4_bypass_data;
5976 /* An empty state that is used in m68k_sched_adjust_cost. */
5977 static state_t sched_adjust_cost_state;
5979 /* Implement adjust_cost scheduler hook.
5980 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5981 static int
5982 m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
5983 unsigned int)
5985 int delay;
5987 if (recog_memoized (def_insn) < 0
5988 || recog_memoized (insn) < 0)
5989 return cost;
5991 if (sched_cfv4_bypass_data.scale == 1)
5992 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5994 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5995 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5996 that the data in sched_cfv4_bypass_data is up to date. */
5997 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5998 && sched_cfv4_bypass_data.con == insn);
6000 if (cost < 3)
6001 cost = 3;
6003 sched_cfv4_bypass_data.pro = NULL;
6004 sched_cfv4_bypass_data.con = NULL;
6005 sched_cfv4_bypass_data.scale = 0;
6007 else
6008 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6009 && sched_cfv4_bypass_data.con == NULL
6010 && sched_cfv4_bypass_data.scale == 0);
6012 /* Don't try to issue INSN earlier than DFA permits.
6013 This is especially useful for instructions that write to memory,
6014 as their true dependence (default) latency is better to be set to 0
6015 to workaround alias analysis limitations.
6016 This is, in fact, a machine independent tweak, so, probably,
6017 it should be moved to haifa-sched.c: insn_cost (). */
6018 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6019 if (delay > cost)
6020 cost = delay;
6022 return cost;
6025 /* Return maximal number of insns that can be scheduled on a single cycle. */
6026 static int
6027 m68k_sched_issue_rate (void)
6029 switch (m68k_sched_cpu)
6031 case CPU_CFV1:
6032 case CPU_CFV2:
6033 case CPU_CFV3:
6034 return 1;
6036 case CPU_CFV4:
6037 return 2;
6039 default:
6040 gcc_unreachable ();
6041 return 0;
6045 /* Maximal length of instruction for current CPU.
6046 E.g. it is 3 for any ColdFire core. */
6047 static int max_insn_size;
6049 /* Data to model instruction buffer of CPU. */
6050 struct _sched_ib
6052 /* True if instruction buffer model is modeled for current CPU. */
6053 bool enabled_p;
6055 /* Size of the instruction buffer in words. */
6056 int size;
6058 /* Number of filled words in the instruction buffer. */
6059 int filled;
6061 /* Additional information about instruction buffer for CPUs that have
6062 a buffer of instruction records, rather then a plain buffer
6063 of instruction words. */
6064 struct _sched_ib_records
6066 /* Size of buffer in records. */
6067 int n_insns;
6069 /* Array to hold data on adjustments made to the size of the buffer. */
6070 int *adjust;
6072 /* Index of the above array. */
6073 int adjust_index;
6074 } records;
6076 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6077 rtx insn;
6080 static struct _sched_ib sched_ib;
6082 /* ID of memory unit. */
6083 static int sched_mem_unit_code;
6085 /* Implementation of the targetm.sched.variable_issue () hook.
6086 It is called after INSN was issued. It returns the number of insns
6087 that can possibly get scheduled on the current cycle.
6088 It is used here to determine the effect of INSN on the instruction
6089 buffer. */
6090 static int
6091 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6092 int sched_verbose ATTRIBUTE_UNUSED,
6093 rtx_insn *insn, int can_issue_more)
6095 int insn_size;
6097 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6099 switch (m68k_sched_cpu)
6101 case CPU_CFV1:
6102 case CPU_CFV2:
6103 insn_size = sched_get_attr_size_int (insn);
6104 break;
6106 case CPU_CFV3:
6107 insn_size = sched_get_attr_size_int (insn);
6109 /* ColdFire V3 and V4 cores have instruction buffers that can
6110 accumulate up to 8 instructions regardless of instructions'
6111 sizes. So we should take care not to "prefetch" 24 one-word
6112 or 12 two-words instructions.
6113 To model this behavior we temporarily decrease size of the
6114 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6116 int adjust;
6118 adjust = max_insn_size - insn_size;
6119 sched_ib.size -= adjust;
6121 if (sched_ib.filled > sched_ib.size)
6122 sched_ib.filled = sched_ib.size;
6124 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6127 ++sched_ib.records.adjust_index;
6128 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6129 sched_ib.records.adjust_index = 0;
6131 /* Undo adjustment we did 7 instructions ago. */
6132 sched_ib.size
6133 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6135 break;
6137 case CPU_CFV4:
6138 gcc_assert (!sched_ib.enabled_p);
6139 insn_size = 0;
6140 break;
6142 default:
6143 gcc_unreachable ();
6146 if (insn_size > sched_ib.filled)
6147 /* Scheduling for register pressure does not always take DFA into
6148 account. Workaround instruction buffer not being filled enough. */
6150 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6151 insn_size = sched_ib.filled;
6154 --can_issue_more;
6156 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6157 || asm_noperands (PATTERN (insn)) >= 0)
6158 insn_size = sched_ib.filled;
6159 else
6160 insn_size = 0;
6162 sched_ib.filled -= insn_size;
6164 return can_issue_more;
6167 /* Return how many instructions should scheduler lookahead to choose the
6168 best one. */
6169 static int
6170 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6172 return m68k_sched_issue_rate () - 1;
6175 /* Implementation of targetm.sched.init_global () hook.
6176 It is invoked once per scheduling pass and is used here
6177 to initialize scheduler constants. */
6178 static void
6179 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6180 int sched_verbose ATTRIBUTE_UNUSED,
6181 int n_insns ATTRIBUTE_UNUSED)
6183 /* Check that all instructions have DFA reservations and
6184 that all instructions can be issued from a clean state. */
6185 if (flag_checking)
6187 rtx_insn *insn;
6188 state_t state;
6190 state = alloca (state_size ());
6192 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6194 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6196 gcc_assert (insn_has_dfa_reservation_p (insn));
6198 state_reset (state);
6199 if (state_transition (state, insn) >= 0)
6200 gcc_unreachable ();
6205 /* Setup target cpu. */
6207 /* ColdFire V4 has a set of features to keep its instruction buffer full
6208 (e.g., a separate memory bus for instructions) and, hence, we do not model
6209 buffer for this CPU. */
6210 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6212 switch (m68k_sched_cpu)
6214 case CPU_CFV4:
6215 sched_ib.filled = 0;
6217 /* FALLTHRU */
6219 case CPU_CFV1:
6220 case CPU_CFV2:
6221 max_insn_size = 3;
6222 sched_ib.records.n_insns = 0;
6223 sched_ib.records.adjust = NULL;
6224 break;
6226 case CPU_CFV3:
6227 max_insn_size = 3;
6228 sched_ib.records.n_insns = 8;
6229 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6230 break;
6232 default:
6233 gcc_unreachable ();
6236 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6238 sched_adjust_cost_state = xmalloc (state_size ());
6239 state_reset (sched_adjust_cost_state);
6241 start_sequence ();
6242 emit_insn (gen_ib ());
6243 sched_ib.insn = get_insns ();
6244 end_sequence ();
6247 /* Scheduling pass is now finished. Free/reset static variables. */
6248 static void
6249 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6250 int verbose ATTRIBUTE_UNUSED)
6252 sched_ib.insn = NULL;
6254 free (sched_adjust_cost_state);
6255 sched_adjust_cost_state = NULL;
6257 sched_mem_unit_code = 0;
6259 free (sched_ib.records.adjust);
6260 sched_ib.records.adjust = NULL;
6261 sched_ib.records.n_insns = 0;
6262 max_insn_size = 0;
6265 /* Implementation of targetm.sched.init () hook.
6266 It is invoked each time scheduler starts on the new block (basic block or
6267 extended basic block). */
6268 static void
6269 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6270 int sched_verbose ATTRIBUTE_UNUSED,
6271 int n_insns ATTRIBUTE_UNUSED)
6273 switch (m68k_sched_cpu)
6275 case CPU_CFV1:
6276 case CPU_CFV2:
6277 sched_ib.size = 6;
6278 break;
6280 case CPU_CFV3:
6281 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6283 memset (sched_ib.records.adjust, 0,
6284 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6285 sched_ib.records.adjust_index = 0;
6286 break;
6288 case CPU_CFV4:
6289 gcc_assert (!sched_ib.enabled_p);
6290 sched_ib.size = 0;
6291 break;
6293 default:
6294 gcc_unreachable ();
6297 if (sched_ib.enabled_p)
6298 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6299 the first cycle. Workaround that. */
6300 sched_ib.filled = -2;
6303 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6304 It is invoked just before current cycle finishes and is used here
6305 to track if instruction buffer got its two words this cycle. */
6306 static void
6307 m68k_sched_dfa_pre_advance_cycle (void)
6309 if (!sched_ib.enabled_p)
6310 return;
6312 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6314 sched_ib.filled += 2;
6316 if (sched_ib.filled > sched_ib.size)
6317 sched_ib.filled = sched_ib.size;
6321 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6322 It is invoked just after new cycle begins and is used here
6323 to setup number of filled words in the instruction buffer so that
6324 instructions which won't have all their words prefetched would be
6325 stalled for a cycle. */
6326 static void
6327 m68k_sched_dfa_post_advance_cycle (void)
6329 int i;
6331 if (!sched_ib.enabled_p)
6332 return;
6334 /* Setup number of prefetched instruction words in the instruction
6335 buffer. */
6336 i = max_insn_size - sched_ib.filled;
6338 while (--i >= 0)
6340 if (state_transition (curr_state, sched_ib.insn) >= 0)
6341 /* Pick up scheduler state. */
6342 ++sched_ib.filled;
6346 /* Return X or Y (depending on OPX_P) operand of INSN,
6347 if it is an integer register, or NULL overwise. */
6348 static rtx
6349 sched_get_reg_operand (rtx_insn *insn, bool opx_p)
6351 rtx op = NULL;
6353 if (opx_p)
6355 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6357 op = sched_get_operand (insn, true);
6358 gcc_assert (op != NULL);
6360 if (!reload_completed && !REG_P (op))
6361 return NULL;
6364 else
6366 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6368 op = sched_get_operand (insn, false);
6369 gcc_assert (op != NULL);
6371 if (!reload_completed && !REG_P (op))
6372 return NULL;
6376 return op;
6379 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6380 is a MEM. */
6381 static bool
6382 sched_mem_operand_p (rtx_insn *insn, bool opx_p)
6384 switch (sched_get_opxy_mem_type (insn, opx_p))
6386 case OP_TYPE_MEM1:
6387 case OP_TYPE_MEM6:
6388 return true;
6390 default:
6391 return false;
6395 /* Return X or Y (depending on OPX_P) operand of INSN,
6396 if it is a MEM, or NULL overwise. */
6397 static rtx
6398 sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
6400 bool opx_p;
6401 bool opy_p;
6403 opx_p = false;
6404 opy_p = false;
6406 if (must_read_p)
6408 opx_p = true;
6409 opy_p = true;
6412 if (must_write_p)
6414 opx_p = true;
6415 opy_p = false;
6418 if (opy_p && sched_mem_operand_p (insn, false))
6419 return sched_get_operand (insn, false);
6421 if (opx_p && sched_mem_operand_p (insn, true))
6422 return sched_get_operand (insn, true);
6424 gcc_unreachable ();
6425 return NULL;
6428 /* Return non-zero if PRO modifies register used as part of
6429 address in CON. */
6431 m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6433 rtx pro_x;
6434 rtx con_mem_read;
6436 pro_x = sched_get_reg_operand (pro, true);
6437 if (pro_x == NULL)
6438 return 0;
6440 con_mem_read = sched_get_mem_operand (con, true, false);
6441 gcc_assert (con_mem_read != NULL);
6443 if (reg_mentioned_p (pro_x, con_mem_read))
6444 return 1;
6446 return 0;
6449 /* Helper function for m68k_sched_indexed_address_bypass_p.
6450 if PRO modifies register used as index in CON,
6451 return scale of indexed memory access in CON. Return zero overwise. */
6452 static int
6453 sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
6455 rtx reg;
6456 rtx mem;
6457 struct m68k_address address;
6459 reg = sched_get_reg_operand (pro, true);
6460 if (reg == NULL)
6461 return 0;
6463 mem = sched_get_mem_operand (con, true, false);
6464 gcc_assert (mem != NULL && MEM_P (mem));
6466 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6467 &address))
6468 gcc_unreachable ();
6470 if (REGNO (reg) == REGNO (address.index))
6472 gcc_assert (address.scale != 0);
6473 return address.scale;
6476 return 0;
6479 /* Return non-zero if PRO modifies register used
6480 as index with scale 2 or 4 in CON. */
6482 m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6484 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6485 && sched_cfv4_bypass_data.con == NULL
6486 && sched_cfv4_bypass_data.scale == 0);
6488 switch (sched_get_indexed_address_scale (pro, con))
6490 case 1:
6491 /* We can't have a variable latency bypass, so
6492 remember to adjust the insn cost in adjust_cost hook. */
6493 sched_cfv4_bypass_data.pro = pro;
6494 sched_cfv4_bypass_data.con = con;
6495 sched_cfv4_bypass_data.scale = 1;
6496 return 0;
6498 case 2:
6499 case 4:
6500 return 1;
6502 default:
6503 return 0;
6507 /* We generate a two-instructions program at M_TRAMP :
6508 movea.l &CHAIN_VALUE,%a0
6509 jmp FNADDR
6510 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6512 static void
6513 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6515 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6516 rtx mem;
6518 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6520 mem = adjust_address (m_tramp, HImode, 0);
6521 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6522 mem = adjust_address (m_tramp, SImode, 2);
6523 emit_move_insn (mem, chain_value);
6525 mem = adjust_address (m_tramp, HImode, 6);
6526 emit_move_insn (mem, GEN_INT(0x4EF9));
6527 mem = adjust_address (m_tramp, SImode, 8);
6528 emit_move_insn (mem, fnaddr);
6530 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6533 /* On the 68000, the RTS insn cannot pop anything.
6534 On the 68010, the RTD insn may be used to pop them if the number
6535 of args is fixed, but if the number is variable then the caller
6536 must pop them all. RTD can't be used for library calls now
6537 because the library is compiled with the Unix compiler.
6538 Use of RTD is a selectable option, since it is incompatible with
6539 standard Unix calling sequences. If the option is not selected,
6540 the caller must always pop the args. */
6542 static poly_int64
6543 m68k_return_pops_args (tree fundecl, tree funtype, poly_int64 size)
6545 return ((TARGET_RTD
6546 && (!fundecl
6547 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6548 && (!stdarg_p (funtype)))
6549 ? (HOST_WIDE_INT) size : 0);
6552 /* Make sure everything's fine if we *don't* have a given processor.
6553 This assumes that putting a register in fixed_regs will keep the
6554 compiler's mitts completely off it. We don't bother to zero it out
6555 of register classes. */
6557 static void
6558 m68k_conditional_register_usage (void)
6560 int i;
6561 HARD_REG_SET x;
6562 if (!TARGET_HARD_FLOAT)
6564 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6565 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6566 if (TEST_HARD_REG_BIT (x, i))
6567 fixed_regs[i] = call_used_regs[i] = 1;
6569 if (flag_pic)
6570 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6573 static void
6574 m68k_init_sync_libfuncs (void)
6576 init_sync_libfuncs (UNITS_PER_WORD);
6579 /* Implements EPILOGUE_USES. All registers are live on exit from an
6580 interrupt routine. */
6581 bool
6582 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6584 return (reload_completed
6585 && (m68k_get_function_kind (current_function_decl)
6586 == m68k_fk_interrupt_handler));
6590 /* Implement TARGET_C_EXCESS_PRECISION.
6592 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
6593 instructions, we get proper intermediate rounding, otherwise we
6594 get extended precision results. */
6596 static enum flt_eval_method
6597 m68k_excess_precision (enum excess_precision_type type)
6599 switch (type)
6601 case EXCESS_PRECISION_TYPE_FAST:
6602 /* The fastest type to promote to will always be the native type,
6603 whether that occurs with implicit excess precision or
6604 otherwise. */
6605 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6606 case EXCESS_PRECISION_TYPE_STANDARD:
6607 case EXCESS_PRECISION_TYPE_IMPLICIT:
6608 /* Otherwise, the excess precision we want when we are
6609 in a standards compliant mode, and the implicit precision we
6610 provide can be identical. */
6611 if (TARGET_68040 || ! TARGET_68881)
6612 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
6614 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE;
6615 default:
6616 gcc_unreachable ();
6618 return FLT_EVAL_METHOD_UNPREDICTABLE;
6621 /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes
6622 a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */
6624 poly_int64
6625 m68k_push_rounding (poly_int64 bytes)
6627 if (TARGET_COLDFIRE)
6628 return bytes;
6629 return (bytes + 1) & ~1;
6632 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
6634 static machine_mode
6635 m68k_promote_function_mode (const_tree type, machine_mode mode,
6636 int *punsignedp ATTRIBUTE_UNUSED,
6637 const_tree fntype ATTRIBUTE_UNUSED,
6638 int for_return)
6640 /* Promote libcall arguments narrower than int to match the normal C
6641 ABI (for which promotions are handled via
6642 TARGET_PROMOTE_PROTOTYPES). */
6643 if (type == NULL_TREE && !for_return && (mode == QImode || mode == HImode))
6644 return SImode;
6645 return mode;
6648 #include "gt-m68k.h"