* config/m68k/m68k.c (notice_update_cc): Handle register conflict
[official-gcc.git] / gcc / config / m68k / m68k.c
blob7035504bfe3c03112f7c9b82f83ac3a05ac899f2
1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "function.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "insn-config.h"
30 #include "conditions.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "recog.h"
34 #include "diagnostic-core.h"
35 #include "expr.h"
36 #include "reload.h"
37 #include "tm_p.h"
38 #include "target.h"
39 #include "target-def.h"
40 #include "debug.h"
41 #include "flags.h"
42 #include "df.h"
43 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
44 #include "sched-int.h"
45 #include "insn-codes.h"
46 #include "ggc.h"
47 #include "opts.h"
48 #include "optabs.h"
50 enum reg_class regno_reg_class[] =
52 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 ADDR_REGS
62 /* The minimum number of integer registers that we want to save with the
63 movem instruction. Using two movel instructions instead of a single
64 moveml is about 15% faster for the 68020 and 68030 at no expense in
65 code size. */
66 #define MIN_MOVEM_REGS 3
68 /* The minimum number of floating point registers that we want to save
69 with the fmovem instruction. */
70 #define MIN_FMOVEM_REGS 1
72 /* Structure describing stack frame layout. */
73 struct m68k_frame
75 /* Stack pointer to frame pointer offset. */
76 HOST_WIDE_INT offset;
78 /* Offset of FPU registers. */
79 HOST_WIDE_INT foffset;
81 /* Frame size in bytes (rounded up). */
82 HOST_WIDE_INT size;
84 /* Data and address register. */
85 int reg_no;
86 unsigned int reg_mask;
88 /* FPU registers. */
89 int fpu_no;
90 unsigned int fpu_mask;
92 /* Offsets relative to ARG_POINTER. */
93 HOST_WIDE_INT frame_pointer_offset;
94 HOST_WIDE_INT stack_pointer_offset;
96 /* Function which the above information refers to. */
97 int funcdef_no;
100 /* Current frame information calculated by m68k_compute_frame_layout(). */
101 static struct m68k_frame current_frame;
103 /* Structure describing an m68k address.
105 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
106 with null fields evaluating to 0. Here:
108 - BASE satisfies m68k_legitimate_base_reg_p
109 - INDEX satisfies m68k_legitimate_index_reg_p
110 - OFFSET satisfies m68k_legitimate_constant_address_p
112 INDEX is either HImode or SImode. The other fields are SImode.
114 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
115 the address is (BASE)+. */
116 struct m68k_address {
117 enum rtx_code code;
118 rtx base;
119 rtx index;
120 rtx offset;
121 int scale;
124 static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
125 static int m68k_sched_issue_rate (void);
126 static int m68k_sched_variable_issue (FILE *, int, rtx, int);
127 static void m68k_sched_md_init_global (FILE *, int, int);
128 static void m68k_sched_md_finish_global (FILE *, int);
129 static void m68k_sched_md_init (FILE *, int, int);
130 static void m68k_sched_dfa_pre_advance_cycle (void);
131 static void m68k_sched_dfa_post_advance_cycle (void);
132 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
134 static bool m68k_can_eliminate (const int, const int);
135 static void m68k_conditional_register_usage (void);
136 static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
137 static void m68k_option_override (void);
138 static void m68k_override_options_after_change (void);
139 static rtx find_addr_reg (rtx);
140 static const char *singlemove_string (rtx *);
141 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
142 HOST_WIDE_INT, tree);
143 static rtx m68k_struct_value_rtx (tree, int);
144 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
145 tree args, int flags,
146 bool *no_add_attrs);
147 static void m68k_compute_frame_layout (void);
148 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
149 static bool m68k_ok_for_sibcall_p (tree, tree);
150 static bool m68k_tls_symbol_p (rtx);
151 static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
152 static bool m68k_rtx_costs (rtx, int, int, int, int *, bool);
153 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
154 static bool m68k_return_in_memory (const_tree, const_tree);
155 #endif
156 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
157 static void m68k_trampoline_init (rtx, tree, rtx);
158 static int m68k_return_pops_args (tree, tree, int);
159 static rtx m68k_delegitimize_address (rtx);
160 static void m68k_function_arg_advance (cumulative_args_t, enum machine_mode,
161 const_tree, bool);
162 static rtx m68k_function_arg (cumulative_args_t, enum machine_mode,
163 const_tree, bool);
164 static bool m68k_cannot_force_const_mem (enum machine_mode mode, rtx x);
165 static bool m68k_output_addr_const_extra (FILE *, rtx);
166 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
168 /* Initialize the GCC target structure. */
170 #if INT_OP_GROUP == INT_OP_DOT_WORD
171 #undef TARGET_ASM_ALIGNED_HI_OP
172 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
173 #endif
175 #if INT_OP_GROUP == INT_OP_NO_DOT
176 #undef TARGET_ASM_BYTE_OP
177 #define TARGET_ASM_BYTE_OP "\tbyte\t"
178 #undef TARGET_ASM_ALIGNED_HI_OP
179 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
180 #undef TARGET_ASM_ALIGNED_SI_OP
181 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
182 #endif
184 #if INT_OP_GROUP == INT_OP_DC
185 #undef TARGET_ASM_BYTE_OP
186 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
187 #undef TARGET_ASM_ALIGNED_HI_OP
188 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
189 #undef TARGET_ASM_ALIGNED_SI_OP
190 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
191 #endif
193 #undef TARGET_ASM_UNALIGNED_HI_OP
194 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
195 #undef TARGET_ASM_UNALIGNED_SI_OP
196 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
198 #undef TARGET_ASM_OUTPUT_MI_THUNK
199 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
200 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
201 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
203 #undef TARGET_ASM_FILE_START_APP_OFF
204 #define TARGET_ASM_FILE_START_APP_OFF true
206 #undef TARGET_LEGITIMIZE_ADDRESS
207 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
209 #undef TARGET_SCHED_ADJUST_COST
210 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
212 #undef TARGET_SCHED_ISSUE_RATE
213 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
215 #undef TARGET_SCHED_VARIABLE_ISSUE
216 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
218 #undef TARGET_SCHED_INIT_GLOBAL
219 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
221 #undef TARGET_SCHED_FINISH_GLOBAL
222 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
224 #undef TARGET_SCHED_INIT
225 #define TARGET_SCHED_INIT m68k_sched_md_init
227 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
228 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
230 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
231 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
233 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
234 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
235 m68k_sched_first_cycle_multipass_dfa_lookahead
237 #undef TARGET_OPTION_OVERRIDE
238 #define TARGET_OPTION_OVERRIDE m68k_option_override
240 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
241 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
243 #undef TARGET_RTX_COSTS
244 #define TARGET_RTX_COSTS m68k_rtx_costs
246 #undef TARGET_ATTRIBUTE_TABLE
247 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
249 #undef TARGET_PROMOTE_PROTOTYPES
250 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
252 #undef TARGET_STRUCT_VALUE_RTX
253 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
255 #undef TARGET_CANNOT_FORCE_CONST_MEM
256 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
258 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
259 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
261 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
262 #undef TARGET_RETURN_IN_MEMORY
263 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
264 #endif
266 #ifdef HAVE_AS_TLS
267 #undef TARGET_HAVE_TLS
268 #define TARGET_HAVE_TLS (true)
270 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
271 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
272 #endif
274 #undef TARGET_LEGITIMATE_ADDRESS_P
275 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
277 #undef TARGET_CAN_ELIMINATE
278 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
280 #undef TARGET_CONDITIONAL_REGISTER_USAGE
281 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
283 #undef TARGET_TRAMPOLINE_INIT
284 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
286 #undef TARGET_RETURN_POPS_ARGS
287 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
289 #undef TARGET_DELEGITIMIZE_ADDRESS
290 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
292 #undef TARGET_FUNCTION_ARG
293 #define TARGET_FUNCTION_ARG m68k_function_arg
295 #undef TARGET_FUNCTION_ARG_ADVANCE
296 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
298 #undef TARGET_LEGITIMATE_CONSTANT_P
299 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
301 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
302 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
304 /* The value stored by TAS. */
305 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
306 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
308 static const struct attribute_spec m68k_attribute_table[] =
310 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
311 affects_type_identity } */
312 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute,
313 false },
314 { "interrupt_handler", 0, 0, true, false, false,
315 m68k_handle_fndecl_attribute, false },
316 { "interrupt_thread", 0, 0, true, false, false,
317 m68k_handle_fndecl_attribute, false },
318 { NULL, 0, 0, false, false, false, NULL, false }
321 struct gcc_target targetm = TARGET_INITIALIZER;
323 /* Base flags for 68k ISAs. */
324 #define FL_FOR_isa_00 FL_ISA_68000
325 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
326 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
327 generated 68881 code for 68020 and 68030 targets unless explicitly told
328 not to. */
329 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
330 | FL_BITFIELD | FL_68881 | FL_CAS)
331 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
332 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
334 /* Base flags for ColdFire ISAs. */
335 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
336 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
337 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
338 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
339 /* ISA_C is not upwardly compatible with ISA_B. */
340 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
342 enum m68k_isa
344 /* Traditional 68000 instruction sets. */
345 isa_00,
346 isa_10,
347 isa_20,
348 isa_40,
349 isa_cpu32,
350 /* ColdFire instruction set variants. */
351 isa_a,
352 isa_aplus,
353 isa_b,
354 isa_c,
355 isa_max
358 /* Information about one of the -march, -mcpu or -mtune arguments. */
359 struct m68k_target_selection
361 /* The argument being described. */
362 const char *name;
364 /* For -mcpu, this is the device selected by the option.
365 For -mtune and -march, it is a representative device
366 for the microarchitecture or ISA respectively. */
367 enum target_device device;
369 /* The M68K_DEVICE fields associated with DEVICE. See the comment
370 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
371 const char *family;
372 enum uarch_type microarch;
373 enum m68k_isa isa;
374 unsigned long flags;
377 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
378 static const struct m68k_target_selection all_devices[] =
380 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
381 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
382 #include "m68k-devices.def"
383 #undef M68K_DEVICE
384 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
387 /* A list of all ISAs, mapping each one to a representative device.
388 Used for -march selection. */
389 static const struct m68k_target_selection all_isas[] =
391 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
392 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
393 #include "m68k-isas.def"
394 #undef M68K_ISA
395 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
398 /* A list of all microarchitectures, mapping each one to a representative
399 device. Used for -mtune selection. */
400 static const struct m68k_target_selection all_microarchs[] =
402 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
403 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
404 #include "m68k-microarchs.def"
405 #undef M68K_MICROARCH
406 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
409 /* The entries associated with the -mcpu, -march and -mtune settings,
410 or null for options that have not been used. */
411 const struct m68k_target_selection *m68k_cpu_entry;
412 const struct m68k_target_selection *m68k_arch_entry;
413 const struct m68k_target_selection *m68k_tune_entry;
415 /* Which CPU we are generating code for. */
416 enum target_device m68k_cpu;
418 /* Which microarchitecture to tune for. */
419 enum uarch_type m68k_tune;
421 /* Which FPU to use. */
422 enum fpu_type m68k_fpu;
424 /* The set of FL_* flags that apply to the target processor. */
425 unsigned int m68k_cpu_flags;
427 /* The set of FL_* flags that apply to the processor to be tuned for. */
428 unsigned int m68k_tune_flags;
430 /* Asm templates for calling or jumping to an arbitrary symbolic address,
431 or NULL if such calls or jumps are not supported. The address is held
432 in operand 0. */
433 const char *m68k_symbolic_call;
434 const char *m68k_symbolic_jump;
436 /* Enum variable that corresponds to m68k_symbolic_call values. */
437 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
440 /* Implement TARGET_OPTION_OVERRIDE. */
442 static void
443 m68k_option_override (void)
445 const struct m68k_target_selection *entry;
446 unsigned long target_mask;
448 if (global_options_set.x_m68k_arch_option)
449 m68k_arch_entry = &all_isas[m68k_arch_option];
451 if (global_options_set.x_m68k_cpu_option)
452 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
454 if (global_options_set.x_m68k_tune_option)
455 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
457 /* User can choose:
459 -mcpu=
460 -march=
461 -mtune=
463 -march=ARCH should generate code that runs any processor
464 implementing architecture ARCH. -mcpu=CPU should override -march
465 and should generate code that runs on processor CPU, making free
466 use of any instructions that CPU understands. -mtune=UARCH applies
467 on top of -mcpu or -march and optimizes the code for UARCH. It does
468 not change the target architecture. */
469 if (m68k_cpu_entry)
471 /* Complain if the -march setting is for a different microarchitecture,
472 or includes flags that the -mcpu setting doesn't. */
473 if (m68k_arch_entry
474 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
475 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
476 warning (0, "-mcpu=%s conflicts with -march=%s",
477 m68k_cpu_entry->name, m68k_arch_entry->name);
479 entry = m68k_cpu_entry;
481 else
482 entry = m68k_arch_entry;
484 if (!entry)
485 entry = all_devices + TARGET_CPU_DEFAULT;
487 m68k_cpu_flags = entry->flags;
489 /* Use the architecture setting to derive default values for
490 certain flags. */
491 target_mask = 0;
493 /* ColdFire is lenient about alignment. */
494 if (!TARGET_COLDFIRE)
495 target_mask |= MASK_STRICT_ALIGNMENT;
497 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
498 target_mask |= MASK_BITFIELD;
499 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
500 target_mask |= MASK_CF_HWDIV;
501 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
502 target_mask |= MASK_HARD_FLOAT;
503 target_flags |= target_mask & ~target_flags_explicit;
505 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
506 m68k_cpu = entry->device;
507 if (m68k_tune_entry)
509 m68k_tune = m68k_tune_entry->microarch;
510 m68k_tune_flags = m68k_tune_entry->flags;
512 #ifdef M68K_DEFAULT_TUNE
513 else if (!m68k_cpu_entry && !m68k_arch_entry)
515 enum target_device dev;
516 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
517 m68k_tune_flags = all_devices[dev]->flags;
519 #endif
520 else
522 m68k_tune = entry->microarch;
523 m68k_tune_flags = entry->flags;
526 /* Set the type of FPU. */
527 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
528 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
529 : FPUTYPE_68881);
531 /* Sanity check to ensure that msep-data and mid-sahred-library are not
532 * both specified together. Doing so simply doesn't make sense.
534 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
535 error ("cannot specify both -msep-data and -mid-shared-library");
537 /* If we're generating code for a separate A5 relative data segment,
538 * we've got to enable -fPIC as well. This might be relaxable to
539 * -fpic but it hasn't been tested properly.
541 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
542 flag_pic = 2;
544 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
545 error if the target does not support them. */
546 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
547 error ("-mpcrel -fPIC is not currently supported on selected cpu");
549 /* ??? A historic way of turning on pic, or is this intended to
550 be an embedded thing that doesn't have the same name binding
551 significance that it does on hosted ELF systems? */
552 if (TARGET_PCREL && flag_pic == 0)
553 flag_pic = 1;
555 if (!flag_pic)
557 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
559 m68k_symbolic_jump = "jra %a0";
561 else if (TARGET_ID_SHARED_LIBRARY)
562 /* All addresses must be loaded from the GOT. */
564 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
566 if (TARGET_PCREL)
567 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
568 else
569 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
571 if (TARGET_ISAC)
572 /* No unconditional long branch */;
573 else if (TARGET_PCREL)
574 m68k_symbolic_jump = "bra%.l %c0";
575 else
576 m68k_symbolic_jump = "bra%.l %p0";
577 /* Turn off function cse if we are doing PIC. We always want
578 function call to be done as `bsr foo@PLTPC'. */
579 /* ??? It's traditional to do this for -mpcrel too, but it isn't
580 clear how intentional that is. */
581 flag_no_function_cse = 1;
584 switch (m68k_symbolic_call_var)
586 case M68K_SYMBOLIC_CALL_JSR:
587 m68k_symbolic_call = "jsr %a0";
588 break;
590 case M68K_SYMBOLIC_CALL_BSR_C:
591 m68k_symbolic_call = "bsr%.l %c0";
592 break;
594 case M68K_SYMBOLIC_CALL_BSR_P:
595 m68k_symbolic_call = "bsr%.l %p0";
596 break;
598 case M68K_SYMBOLIC_CALL_NONE:
599 gcc_assert (m68k_symbolic_call == NULL);
600 break;
602 default:
603 gcc_unreachable ();
606 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
607 if (align_labels > 2)
609 warning (0, "-falign-labels=%d is not supported", align_labels);
610 align_labels = 0;
612 if (align_loops > 2)
614 warning (0, "-falign-loops=%d is not supported", align_loops);
615 align_loops = 0;
617 #endif
619 if (stack_limit_rtx != NULL_RTX && !TARGET_68020)
621 warning (0, "-fstack-limit- options are not supported on this cpu");
622 stack_limit_rtx = NULL_RTX;
625 SUBTARGET_OVERRIDE_OPTIONS;
627 /* Setup scheduling options. */
628 if (TUNE_CFV1)
629 m68k_sched_cpu = CPU_CFV1;
630 else if (TUNE_CFV2)
631 m68k_sched_cpu = CPU_CFV2;
632 else if (TUNE_CFV3)
633 m68k_sched_cpu = CPU_CFV3;
634 else if (TUNE_CFV4)
635 m68k_sched_cpu = CPU_CFV4;
636 else
638 m68k_sched_cpu = CPU_UNKNOWN;
639 flag_schedule_insns = 0;
640 flag_schedule_insns_after_reload = 0;
641 flag_modulo_sched = 0;
644 if (m68k_sched_cpu != CPU_UNKNOWN)
646 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
647 m68k_sched_mac = MAC_CF_EMAC;
648 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
649 m68k_sched_mac = MAC_CF_MAC;
650 else
651 m68k_sched_mac = MAC_NO;
655 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
657 static void
658 m68k_override_options_after_change (void)
660 if (m68k_sched_cpu == CPU_UNKNOWN)
662 flag_schedule_insns = 0;
663 flag_schedule_insns_after_reload = 0;
664 flag_modulo_sched = 0;
668 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
669 given argument and NAME is the argument passed to -mcpu. Return NULL
670 if -mcpu was not passed. */
672 const char *
673 m68k_cpp_cpu_ident (const char *prefix)
675 if (!m68k_cpu_entry)
676 return NULL;
677 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
680 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
681 given argument and NAME is the name of the representative device for
682 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
684 const char *
685 m68k_cpp_cpu_family (const char *prefix)
687 if (!m68k_cpu_entry)
688 return NULL;
689 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
692 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
693 "interrupt_handler" attribute and interrupt_thread if FUNC has an
694 "interrupt_thread" attribute. Otherwise, return
695 m68k_fk_normal_function. */
697 enum m68k_function_kind
698 m68k_get_function_kind (tree func)
700 tree a;
702 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
704 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
705 if (a != NULL_TREE)
706 return m68k_fk_interrupt_handler;
708 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
709 if (a != NULL_TREE)
710 return m68k_fk_interrupt_handler;
712 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
713 if (a != NULL_TREE)
714 return m68k_fk_interrupt_thread;
716 return m68k_fk_normal_function;
719 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
720 struct attribute_spec.handler. */
721 static tree
722 m68k_handle_fndecl_attribute (tree *node, tree name,
723 tree args ATTRIBUTE_UNUSED,
724 int flags ATTRIBUTE_UNUSED,
725 bool *no_add_attrs)
727 if (TREE_CODE (*node) != FUNCTION_DECL)
729 warning (OPT_Wattributes, "%qE attribute only applies to functions",
730 name);
731 *no_add_attrs = true;
734 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
736 error ("multiple interrupt attributes not allowed");
737 *no_add_attrs = true;
740 if (!TARGET_FIDOA
741 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
743 error ("interrupt_thread is available only on fido");
744 *no_add_attrs = true;
747 return NULL_TREE;
750 static void
751 m68k_compute_frame_layout (void)
753 int regno, saved;
754 unsigned int mask;
755 enum m68k_function_kind func_kind =
756 m68k_get_function_kind (current_function_decl);
757 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
758 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
760 /* Only compute the frame once per function.
761 Don't cache information until reload has been completed. */
762 if (current_frame.funcdef_no == current_function_funcdef_no
763 && reload_completed)
764 return;
766 current_frame.size = (get_frame_size () + 3) & -4;
768 mask = saved = 0;
770 /* Interrupt thread does not need to save any register. */
771 if (!interrupt_thread)
772 for (regno = 0; regno < 16; regno++)
773 if (m68k_save_reg (regno, interrupt_handler))
775 mask |= 1 << (regno - D0_REG);
776 saved++;
778 current_frame.offset = saved * 4;
779 current_frame.reg_no = saved;
780 current_frame.reg_mask = mask;
782 current_frame.foffset = 0;
783 mask = saved = 0;
784 if (TARGET_HARD_FLOAT)
786 /* Interrupt thread does not need to save any register. */
787 if (!interrupt_thread)
788 for (regno = 16; regno < 24; regno++)
789 if (m68k_save_reg (regno, interrupt_handler))
791 mask |= 1 << (regno - FP0_REG);
792 saved++;
794 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
795 current_frame.offset += current_frame.foffset;
797 current_frame.fpu_no = saved;
798 current_frame.fpu_mask = mask;
800 /* Remember what function this frame refers to. */
801 current_frame.funcdef_no = current_function_funcdef_no;
804 /* Worker function for TARGET_CAN_ELIMINATE. */
806 bool
807 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
809 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
812 HOST_WIDE_INT
813 m68k_initial_elimination_offset (int from, int to)
815 int argptr_offset;
816 /* The arg pointer points 8 bytes before the start of the arguments,
817 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
818 frame pointer in most frames. */
819 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
820 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
821 return argptr_offset;
823 m68k_compute_frame_layout ();
825 gcc_assert (to == STACK_POINTER_REGNUM);
826 switch (from)
828 case ARG_POINTER_REGNUM:
829 return current_frame.offset + current_frame.size - argptr_offset;
830 case FRAME_POINTER_REGNUM:
831 return current_frame.offset + current_frame.size;
832 default:
833 gcc_unreachable ();
837 /* Refer to the array `regs_ever_live' to determine which registers
838 to save; `regs_ever_live[I]' is nonzero if register number I
839 is ever used in the function. This function is responsible for
840 knowing which registers should not be saved even if used.
841 Return true if we need to save REGNO. */
843 static bool
844 m68k_save_reg (unsigned int regno, bool interrupt_handler)
846 if (flag_pic && regno == PIC_REG)
848 if (crtl->saves_all_registers)
849 return true;
850 if (crtl->uses_pic_offset_table)
851 return true;
852 /* Reload may introduce constant pool references into a function
853 that thitherto didn't need a PIC register. Note that the test
854 above will not catch that case because we will only set
855 crtl->uses_pic_offset_table when emitting
856 the address reloads. */
857 if (crtl->uses_const_pool)
858 return true;
861 if (crtl->calls_eh_return)
863 unsigned int i;
864 for (i = 0; ; i++)
866 unsigned int test = EH_RETURN_DATA_REGNO (i);
867 if (test == INVALID_REGNUM)
868 break;
869 if (test == regno)
870 return true;
874 /* Fixed regs we never touch. */
875 if (fixed_regs[regno])
876 return false;
878 /* The frame pointer (if it is such) is handled specially. */
879 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
880 return false;
882 /* Interrupt handlers must also save call_used_regs
883 if they are live or when calling nested functions. */
884 if (interrupt_handler)
886 if (df_regs_ever_live_p (regno))
887 return true;
889 if (!crtl->is_leaf && call_used_regs[regno])
890 return true;
893 /* Never need to save registers that aren't touched. */
894 if (!df_regs_ever_live_p (regno))
895 return false;
897 /* Otherwise save everything that isn't call-clobbered. */
898 return !call_used_regs[regno];
901 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
902 the lowest memory address. COUNT is the number of registers to be
903 moved, with register REGNO + I being moved if bit I of MASK is set.
904 STORE_P specifies the direction of the move and ADJUST_STACK_P says
905 whether or not this is pre-decrement (if STORE_P) or post-increment
906 (if !STORE_P) operation. */
908 static rtx
909 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
910 unsigned int count, unsigned int regno,
911 unsigned int mask, bool store_p, bool adjust_stack_p)
913 int i;
914 rtx body, addr, src, operands[2];
915 enum machine_mode mode;
917 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
918 mode = reg_raw_mode[regno];
919 i = 0;
921 if (adjust_stack_p)
923 src = plus_constant (Pmode, base,
924 (count
925 * GET_MODE_SIZE (mode)
926 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
927 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
930 for (; mask != 0; mask >>= 1, regno++)
931 if (mask & 1)
933 addr = plus_constant (Pmode, base, offset);
934 operands[!store_p] = gen_frame_mem (mode, addr);
935 operands[store_p] = gen_rtx_REG (mode, regno);
936 XVECEXP (body, 0, i++)
937 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
938 offset += GET_MODE_SIZE (mode);
940 gcc_assert (i == XVECLEN (body, 0));
942 return emit_insn (body);
945 /* Make INSN a frame-related instruction. */
947 static void
948 m68k_set_frame_related (rtx insn)
950 rtx body;
951 int i;
953 RTX_FRAME_RELATED_P (insn) = 1;
954 body = PATTERN (insn);
955 if (GET_CODE (body) == PARALLEL)
956 for (i = 0; i < XVECLEN (body, 0); i++)
957 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
960 /* Emit RTL for the "prologue" define_expand. */
962 void
963 m68k_expand_prologue (void)
965 HOST_WIDE_INT fsize_with_regs;
966 rtx limit, src, dest;
968 m68k_compute_frame_layout ();
970 if (flag_stack_usage_info)
971 current_function_static_stack_size
972 = current_frame.size + current_frame.offset;
974 /* If the stack limit is a symbol, we can check it here,
975 before actually allocating the space. */
976 if (crtl->limit_stack
977 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
979 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
980 if (!m68k_legitimate_constant_p (Pmode, limit))
982 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
983 limit = gen_rtx_REG (Pmode, D0_REG);
985 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
986 stack_pointer_rtx, limit),
987 stack_pointer_rtx, limit,
988 const1_rtx));
991 fsize_with_regs = current_frame.size;
992 if (TARGET_COLDFIRE)
994 /* ColdFire's move multiple instructions do not allow pre-decrement
995 addressing. Add the size of movem saves to the initial stack
996 allocation instead. */
997 if (current_frame.reg_no >= MIN_MOVEM_REGS)
998 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
999 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1000 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1003 if (frame_pointer_needed)
1005 if (fsize_with_regs == 0 && TUNE_68040)
1007 /* On the 68040, two separate moves are faster than link.w 0. */
1008 dest = gen_frame_mem (Pmode,
1009 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1010 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1011 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1012 stack_pointer_rtx));
1014 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1015 m68k_set_frame_related
1016 (emit_insn (gen_link (frame_pointer_rtx,
1017 GEN_INT (-4 - fsize_with_regs))));
1018 else
1020 m68k_set_frame_related
1021 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1022 m68k_set_frame_related
1023 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1024 stack_pointer_rtx,
1025 GEN_INT (-fsize_with_regs))));
1028 /* If the frame pointer is needed, emit a special barrier that
1029 will prevent the scheduler from moving stores to the frame
1030 before the stack adjustment. */
1031 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1033 else if (fsize_with_regs != 0)
1034 m68k_set_frame_related
1035 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1036 stack_pointer_rtx,
1037 GEN_INT (-fsize_with_regs))));
1039 if (current_frame.fpu_mask)
1041 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1042 if (TARGET_68881)
1043 m68k_set_frame_related
1044 (m68k_emit_movem (stack_pointer_rtx,
1045 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1046 current_frame.fpu_no, FP0_REG,
1047 current_frame.fpu_mask, true, true));
1048 else
1050 int offset;
1052 /* If we're using moveml to save the integer registers,
1053 the stack pointer will point to the bottom of the moveml
1054 save area. Find the stack offset of the first FP register. */
1055 if (current_frame.reg_no < MIN_MOVEM_REGS)
1056 offset = 0;
1057 else
1058 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1059 m68k_set_frame_related
1060 (m68k_emit_movem (stack_pointer_rtx, offset,
1061 current_frame.fpu_no, FP0_REG,
1062 current_frame.fpu_mask, true, false));
1066 /* If the stack limit is not a symbol, check it here.
1067 This has the disadvantage that it may be too late... */
1068 if (crtl->limit_stack)
1070 if (REG_P (stack_limit_rtx))
1071 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1072 stack_limit_rtx),
1073 stack_pointer_rtx, stack_limit_rtx,
1074 const1_rtx));
1076 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1077 warning (0, "stack limit expression is not supported");
1080 if (current_frame.reg_no < MIN_MOVEM_REGS)
1082 /* Store each register separately in the same order moveml does. */
1083 int i;
1085 for (i = 16; i-- > 0; )
1086 if (current_frame.reg_mask & (1 << i))
1088 src = gen_rtx_REG (SImode, D0_REG + i);
1089 dest = gen_frame_mem (SImode,
1090 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1091 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1094 else
1096 if (TARGET_COLDFIRE)
1097 /* The required register save space has already been allocated.
1098 The first register should be stored at (%sp). */
1099 m68k_set_frame_related
1100 (m68k_emit_movem (stack_pointer_rtx, 0,
1101 current_frame.reg_no, D0_REG,
1102 current_frame.reg_mask, true, false));
1103 else
1104 m68k_set_frame_related
1105 (m68k_emit_movem (stack_pointer_rtx,
1106 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1107 current_frame.reg_no, D0_REG,
1108 current_frame.reg_mask, true, true));
1111 if (!TARGET_SEP_DATA
1112 && crtl->uses_pic_offset_table)
1113 emit_insn (gen_load_got (pic_offset_table_rtx));
1116 /* Return true if a simple (return) instruction is sufficient for this
1117 instruction (i.e. if no epilogue is needed). */
1119 bool
1120 m68k_use_return_insn (void)
1122 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1123 return false;
1125 m68k_compute_frame_layout ();
1126 return current_frame.offset == 0;
1129 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1130 SIBCALL_P says which.
1132 The function epilogue should not depend on the current stack pointer!
1133 It should use the frame pointer only, if there is a frame pointer.
1134 This is mandatory because of alloca; we also take advantage of it to
1135 omit stack adjustments before returning. */
1137 void
1138 m68k_expand_epilogue (bool sibcall_p)
1140 HOST_WIDE_INT fsize, fsize_with_regs;
1141 bool big, restore_from_sp;
1143 m68k_compute_frame_layout ();
1145 fsize = current_frame.size;
1146 big = false;
1147 restore_from_sp = false;
1149 /* FIXME : crtl->is_leaf below is too strong.
1150 What we really need to know there is if there could be pending
1151 stack adjustment needed at that point. */
1152 restore_from_sp = (!frame_pointer_needed
1153 || (!cfun->calls_alloca && crtl->is_leaf));
1155 /* fsize_with_regs is the size we need to adjust the sp when
1156 popping the frame. */
1157 fsize_with_regs = fsize;
1158 if (TARGET_COLDFIRE && restore_from_sp)
1160 /* ColdFire's move multiple instructions do not allow post-increment
1161 addressing. Add the size of movem loads to the final deallocation
1162 instead. */
1163 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1164 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1165 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1166 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1169 if (current_frame.offset + fsize >= 0x8000
1170 && !restore_from_sp
1171 && (current_frame.reg_mask || current_frame.fpu_mask))
1173 if (TARGET_COLDFIRE
1174 && (current_frame.reg_no >= MIN_MOVEM_REGS
1175 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1177 /* ColdFire's move multiple instructions do not support the
1178 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1179 stack-based restore. */
1180 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1181 GEN_INT (-(current_frame.offset + fsize)));
1182 emit_insn (gen_addsi3 (stack_pointer_rtx,
1183 gen_rtx_REG (Pmode, A1_REG),
1184 frame_pointer_rtx));
1185 restore_from_sp = true;
1187 else
1189 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1190 fsize = 0;
1191 big = true;
1195 if (current_frame.reg_no < MIN_MOVEM_REGS)
1197 /* Restore each register separately in the same order moveml does. */
1198 int i;
1199 HOST_WIDE_INT offset;
1201 offset = current_frame.offset + fsize;
1202 for (i = 0; i < 16; i++)
1203 if (current_frame.reg_mask & (1 << i))
1205 rtx addr;
1207 if (big)
1209 /* Generate the address -OFFSET(%fp,%a1.l). */
1210 addr = gen_rtx_REG (Pmode, A1_REG);
1211 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1212 addr = plus_constant (Pmode, addr, -offset);
1214 else if (restore_from_sp)
1215 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1216 else
1217 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1218 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1219 gen_frame_mem (SImode, addr));
1220 offset -= GET_MODE_SIZE (SImode);
1223 else if (current_frame.reg_mask)
1225 if (big)
1226 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1227 gen_rtx_REG (Pmode, A1_REG),
1228 frame_pointer_rtx),
1229 -(current_frame.offset + fsize),
1230 current_frame.reg_no, D0_REG,
1231 current_frame.reg_mask, false, false);
1232 else if (restore_from_sp)
1233 m68k_emit_movem (stack_pointer_rtx, 0,
1234 current_frame.reg_no, D0_REG,
1235 current_frame.reg_mask, false,
1236 !TARGET_COLDFIRE);
1237 else
1238 m68k_emit_movem (frame_pointer_rtx,
1239 -(current_frame.offset + fsize),
1240 current_frame.reg_no, D0_REG,
1241 current_frame.reg_mask, false, false);
1244 if (current_frame.fpu_no > 0)
1246 if (big)
1247 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1248 gen_rtx_REG (Pmode, A1_REG),
1249 frame_pointer_rtx),
1250 -(current_frame.foffset + fsize),
1251 current_frame.fpu_no, FP0_REG,
1252 current_frame.fpu_mask, false, false);
1253 else if (restore_from_sp)
1255 if (TARGET_COLDFIRE)
1257 int offset;
1259 /* If we used moveml to restore the integer registers, the
1260 stack pointer will still point to the bottom of the moveml
1261 save area. Find the stack offset of the first FP
1262 register. */
1263 if (current_frame.reg_no < MIN_MOVEM_REGS)
1264 offset = 0;
1265 else
1266 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1267 m68k_emit_movem (stack_pointer_rtx, offset,
1268 current_frame.fpu_no, FP0_REG,
1269 current_frame.fpu_mask, false, false);
1271 else
1272 m68k_emit_movem (stack_pointer_rtx, 0,
1273 current_frame.fpu_no, FP0_REG,
1274 current_frame.fpu_mask, false, true);
1276 else
1277 m68k_emit_movem (frame_pointer_rtx,
1278 -(current_frame.foffset + fsize),
1279 current_frame.fpu_no, FP0_REG,
1280 current_frame.fpu_mask, false, false);
1283 if (frame_pointer_needed)
1284 emit_insn (gen_unlink (frame_pointer_rtx));
1285 else if (fsize_with_regs)
1286 emit_insn (gen_addsi3 (stack_pointer_rtx,
1287 stack_pointer_rtx,
1288 GEN_INT (fsize_with_regs)));
1290 if (crtl->calls_eh_return)
1291 emit_insn (gen_addsi3 (stack_pointer_rtx,
1292 stack_pointer_rtx,
1293 EH_RETURN_STACKADJ_RTX));
1295 if (!sibcall_p)
1296 emit_jump_insn (ret_rtx);
1299 /* Return true if X is a valid comparison operator for the dbcc
1300 instruction.
1302 Note it rejects floating point comparison operators.
1303 (In the future we could use Fdbcc).
1305 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1308 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1310 switch (GET_CODE (x))
1312 case EQ: case NE: case GTU: case LTU:
1313 case GEU: case LEU:
1314 return 1;
1316 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1317 conservative */
1318 case GT: case LT: case GE: case LE:
1319 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1320 default:
1321 return 0;
1325 /* Return nonzero if flags are currently in the 68881 flag register. */
1327 flags_in_68881 (void)
1329 /* We could add support for these in the future */
1330 return cc_status.flags & CC_IN_68881;
1333 /* Return true if PARALLEL contains register REGNO. */
1334 static bool
1335 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1337 int i;
1339 if (REG_P (parallel) && REGNO (parallel) == regno)
1340 return true;
1342 if (GET_CODE (parallel) != PARALLEL)
1343 return false;
1345 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1347 const_rtx x;
1349 x = XEXP (XVECEXP (parallel, 0, i), 0);
1350 if (REG_P (x) && REGNO (x) == regno)
1351 return true;
1354 return false;
1357 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1359 static bool
1360 m68k_ok_for_sibcall_p (tree decl, tree exp)
1362 enum m68k_function_kind kind;
1364 /* We cannot use sibcalls for nested functions because we use the
1365 static chain register for indirect calls. */
1366 if (CALL_EXPR_STATIC_CHAIN (exp))
1367 return false;
1369 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1371 /* Check that the return value locations are the same. For
1372 example that we aren't returning a value from the sibling in
1373 a D0 register but then need to transfer it to a A0 register. */
1374 rtx cfun_value;
1375 rtx call_value;
1377 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1378 cfun->decl);
1379 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1381 /* Check that the values are equal or that the result the callee
1382 function returns is superset of what the current function returns. */
1383 if (!(rtx_equal_p (cfun_value, call_value)
1384 || (REG_P (cfun_value)
1385 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1386 return false;
1389 kind = m68k_get_function_kind (current_function_decl);
1390 if (kind == m68k_fk_normal_function)
1391 /* We can always sibcall from a normal function, because it's
1392 undefined if it is calling an interrupt function. */
1393 return true;
1395 /* Otherwise we can only sibcall if the function kind is known to be
1396 the same. */
1397 if (decl && m68k_get_function_kind (decl) == kind)
1398 return true;
1400 return false;
1403 /* On the m68k all args are always pushed. */
1405 static rtx
1406 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1407 enum machine_mode mode ATTRIBUTE_UNUSED,
1408 const_tree type ATTRIBUTE_UNUSED,
1409 bool named ATTRIBUTE_UNUSED)
1411 return NULL_RTX;
1414 static void
1415 m68k_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
1416 const_tree type, bool named ATTRIBUTE_UNUSED)
1418 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1420 *cum += (mode != BLKmode
1421 ? (GET_MODE_SIZE (mode) + 3) & ~3
1422 : (int_size_in_bytes (type) + 3) & ~3);
1425 /* Convert X to a legitimate function call memory reference and return the
1426 result. */
1429 m68k_legitimize_call_address (rtx x)
1431 gcc_assert (MEM_P (x));
1432 if (call_operand (XEXP (x, 0), VOIDmode))
1433 return x;
1434 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1437 /* Likewise for sibling calls. */
1440 m68k_legitimize_sibcall_address (rtx x)
1442 gcc_assert (MEM_P (x));
1443 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1444 return x;
1446 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1447 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1450 /* Convert X to a legitimate address and return it if successful. Otherwise
1451 return X.
1453 For the 68000, we handle X+REG by loading X into a register R and
1454 using R+REG. R will go in an address reg and indexing will be used.
1455 However, if REG is a broken-out memory address or multiplication,
1456 nothing needs to be done because REG can certainly go in an address reg. */
1458 static rtx
1459 m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1461 if (m68k_tls_symbol_p (x))
1462 return m68k_legitimize_tls_address (x);
1464 if (GET_CODE (x) == PLUS)
1466 int ch = (x) != (oldx);
1467 int copied = 0;
1469 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1471 if (GET_CODE (XEXP (x, 0)) == MULT)
1473 COPY_ONCE (x);
1474 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1476 if (GET_CODE (XEXP (x, 1)) == MULT)
1478 COPY_ONCE (x);
1479 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1481 if (ch)
1483 if (GET_CODE (XEXP (x, 1)) == REG
1484 && GET_CODE (XEXP (x, 0)) == REG)
1486 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1488 COPY_ONCE (x);
1489 x = force_operand (x, 0);
1491 return x;
1493 if (memory_address_p (mode, x))
1494 return x;
1496 if (GET_CODE (XEXP (x, 0)) == REG
1497 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1498 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1499 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1501 rtx temp = gen_reg_rtx (Pmode);
1502 rtx val = force_operand (XEXP (x, 1), 0);
1503 emit_move_insn (temp, val);
1504 COPY_ONCE (x);
1505 XEXP (x, 1) = temp;
1506 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1507 && GET_CODE (XEXP (x, 0)) == REG)
1508 x = force_operand (x, 0);
1510 else if (GET_CODE (XEXP (x, 1)) == REG
1511 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1512 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1513 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1515 rtx temp = gen_reg_rtx (Pmode);
1516 rtx val = force_operand (XEXP (x, 0), 0);
1517 emit_move_insn (temp, val);
1518 COPY_ONCE (x);
1519 XEXP (x, 0) = temp;
1520 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1521 && GET_CODE (XEXP (x, 1)) == REG)
1522 x = force_operand (x, 0);
1526 return x;
1530 /* Output a dbCC; jCC sequence. Note we do not handle the
1531 floating point version of this sequence (Fdbcc). We also
1532 do not handle alternative conditions when CC_NO_OVERFLOW is
1533 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1534 kick those out before we get here. */
1536 void
1537 output_dbcc_and_branch (rtx *operands)
1539 switch (GET_CODE (operands[3]))
1541 case EQ:
1542 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1543 break;
1545 case NE:
1546 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1547 break;
1549 case GT:
1550 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1551 break;
1553 case GTU:
1554 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1555 break;
1557 case LT:
1558 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1559 break;
1561 case LTU:
1562 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1563 break;
1565 case GE:
1566 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1567 break;
1569 case GEU:
1570 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1571 break;
1573 case LE:
1574 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1575 break;
1577 case LEU:
1578 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1579 break;
1581 default:
1582 gcc_unreachable ();
1585 /* If the decrement is to be done in SImode, then we have
1586 to compensate for the fact that dbcc decrements in HImode. */
1587 switch (GET_MODE (operands[0]))
1589 case SImode:
1590 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1591 break;
1593 case HImode:
1594 break;
1596 default:
1597 gcc_unreachable ();
1601 const char *
1602 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1604 rtx loperands[7];
1605 enum rtx_code op_code = GET_CODE (op);
1607 /* This does not produce a useful cc. */
1608 CC_STATUS_INIT;
1610 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1611 below. Swap the operands and change the op if these requirements
1612 are not fulfilled. */
1613 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1615 rtx tmp = operand1;
1617 operand1 = operand2;
1618 operand2 = tmp;
1619 op_code = swap_condition (op_code);
1621 loperands[0] = operand1;
1622 if (GET_CODE (operand1) == REG)
1623 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1624 else
1625 loperands[1] = adjust_address (operand1, SImode, 4);
1626 if (operand2 != const0_rtx)
1628 loperands[2] = operand2;
1629 if (GET_CODE (operand2) == REG)
1630 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1631 else
1632 loperands[3] = adjust_address (operand2, SImode, 4);
1634 loperands[4] = gen_label_rtx ();
1635 if (operand2 != const0_rtx)
1636 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1637 else
1639 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1640 output_asm_insn ("tst%.l %0", loperands);
1641 else
1642 output_asm_insn ("cmp%.w #0,%0", loperands);
1644 output_asm_insn ("jne %l4", loperands);
1646 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1647 output_asm_insn ("tst%.l %1", loperands);
1648 else
1649 output_asm_insn ("cmp%.w #0,%1", loperands);
1652 loperands[5] = dest;
1654 switch (op_code)
1656 case EQ:
1657 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1658 CODE_LABEL_NUMBER (loperands[4]));
1659 output_asm_insn ("seq %5", loperands);
1660 break;
1662 case NE:
1663 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1664 CODE_LABEL_NUMBER (loperands[4]));
1665 output_asm_insn ("sne %5", loperands);
1666 break;
1668 case GT:
1669 loperands[6] = gen_label_rtx ();
1670 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1671 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1672 CODE_LABEL_NUMBER (loperands[4]));
1673 output_asm_insn ("sgt %5", loperands);
1674 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1675 CODE_LABEL_NUMBER (loperands[6]));
1676 break;
1678 case GTU:
1679 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1680 CODE_LABEL_NUMBER (loperands[4]));
1681 output_asm_insn ("shi %5", loperands);
1682 break;
1684 case LT:
1685 loperands[6] = gen_label_rtx ();
1686 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1687 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1688 CODE_LABEL_NUMBER (loperands[4]));
1689 output_asm_insn ("slt %5", loperands);
1690 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1691 CODE_LABEL_NUMBER (loperands[6]));
1692 break;
1694 case LTU:
1695 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1696 CODE_LABEL_NUMBER (loperands[4]));
1697 output_asm_insn ("scs %5", loperands);
1698 break;
1700 case GE:
1701 loperands[6] = gen_label_rtx ();
1702 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1703 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1704 CODE_LABEL_NUMBER (loperands[4]));
1705 output_asm_insn ("sge %5", loperands);
1706 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1707 CODE_LABEL_NUMBER (loperands[6]));
1708 break;
1710 case GEU:
1711 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1712 CODE_LABEL_NUMBER (loperands[4]));
1713 output_asm_insn ("scc %5", loperands);
1714 break;
1716 case LE:
1717 loperands[6] = gen_label_rtx ();
1718 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1719 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1720 CODE_LABEL_NUMBER (loperands[4]));
1721 output_asm_insn ("sle %5", loperands);
1722 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1723 CODE_LABEL_NUMBER (loperands[6]));
1724 break;
1726 case LEU:
1727 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1728 CODE_LABEL_NUMBER (loperands[4]));
1729 output_asm_insn ("sls %5", loperands);
1730 break;
1732 default:
1733 gcc_unreachable ();
1735 return "";
1738 const char *
1739 output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1741 operands[0] = countop;
1742 operands[1] = dataop;
1744 if (GET_CODE (countop) == CONST_INT)
1746 register int count = INTVAL (countop);
1747 /* If COUNT is bigger than size of storage unit in use,
1748 advance to the containing unit of same size. */
1749 if (count > signpos)
1751 int offset = (count & ~signpos) / 8;
1752 count = count & signpos;
1753 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1755 if (count == signpos)
1756 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1757 else
1758 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1760 /* These three statements used to use next_insns_test_no...
1761 but it appears that this should do the same job. */
1762 if (count == 31
1763 && next_insn_tests_no_inequality (insn))
1764 return "tst%.l %1";
1765 if (count == 15
1766 && next_insn_tests_no_inequality (insn))
1767 return "tst%.w %1";
1768 if (count == 7
1769 && next_insn_tests_no_inequality (insn))
1770 return "tst%.b %1";
1771 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1772 On some m68k variants unfortunately that's slower than btst.
1773 On 68000 and higher, that should also work for all HImode operands. */
1774 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1776 if (count == 3 && DATA_REG_P (operands[1])
1777 && next_insn_tests_no_inequality (insn))
1779 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1780 return "move%.w %1,%%ccr";
1782 if (count == 2 && DATA_REG_P (operands[1])
1783 && next_insn_tests_no_inequality (insn))
1785 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1786 return "move%.w %1,%%ccr";
1788 /* count == 1 followed by bvc/bvs and
1789 count == 0 followed by bcc/bcs are also possible, but need
1790 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1793 cc_status.flags = CC_NOT_NEGATIVE;
1795 return "btst %0,%1";
1798 /* Return true if X is a legitimate base register. STRICT_P says
1799 whether we need strict checking. */
1801 bool
1802 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1804 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1805 if (!strict_p && GET_CODE (x) == SUBREG)
1806 x = SUBREG_REG (x);
1808 return (REG_P (x)
1809 && (strict_p
1810 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1811 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1814 /* Return true if X is a legitimate index register. STRICT_P says
1815 whether we need strict checking. */
1817 bool
1818 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1820 if (!strict_p && GET_CODE (x) == SUBREG)
1821 x = SUBREG_REG (x);
1823 return (REG_P (x)
1824 && (strict_p
1825 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1826 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1829 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1830 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1831 ADDRESS if so. STRICT_P says whether we need strict checking. */
1833 static bool
1834 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1836 int scale;
1838 /* Check for a scale factor. */
1839 scale = 1;
1840 if ((TARGET_68020 || TARGET_COLDFIRE)
1841 && GET_CODE (x) == MULT
1842 && GET_CODE (XEXP (x, 1)) == CONST_INT
1843 && (INTVAL (XEXP (x, 1)) == 2
1844 || INTVAL (XEXP (x, 1)) == 4
1845 || (INTVAL (XEXP (x, 1)) == 8
1846 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1848 scale = INTVAL (XEXP (x, 1));
1849 x = XEXP (x, 0);
1852 /* Check for a word extension. */
1853 if (!TARGET_COLDFIRE
1854 && GET_CODE (x) == SIGN_EXTEND
1855 && GET_MODE (XEXP (x, 0)) == HImode)
1856 x = XEXP (x, 0);
1858 if (m68k_legitimate_index_reg_p (x, strict_p))
1860 address->scale = scale;
1861 address->index = x;
1862 return true;
1865 return false;
1868 /* Return true if X is an illegitimate symbolic constant. */
1870 bool
1871 m68k_illegitimate_symbolic_constant_p (rtx x)
1873 rtx base, offset;
1875 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1877 split_const (x, &base, &offset);
1878 if (GET_CODE (base) == SYMBOL_REF
1879 && !offset_within_block_p (base, INTVAL (offset)))
1880 return true;
1882 return m68k_tls_reference_p (x, false);
1885 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1887 static bool
1888 m68k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1890 return m68k_illegitimate_symbolic_constant_p (x);
1893 /* Return true if X is a legitimate constant address that can reach
1894 bytes in the range [X, X + REACH). STRICT_P says whether we need
1895 strict checking. */
1897 static bool
1898 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1900 rtx base, offset;
1902 if (!CONSTANT_ADDRESS_P (x))
1903 return false;
1905 if (flag_pic
1906 && !(strict_p && TARGET_PCREL)
1907 && symbolic_operand (x, VOIDmode))
1908 return false;
1910 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1912 split_const (x, &base, &offset);
1913 if (GET_CODE (base) == SYMBOL_REF
1914 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1915 return false;
1918 return !m68k_tls_reference_p (x, false);
1921 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1922 labels will become jump tables. */
1924 static bool
1925 m68k_jump_table_ref_p (rtx x)
1927 if (GET_CODE (x) != LABEL_REF)
1928 return false;
1930 x = XEXP (x, 0);
1931 if (!NEXT_INSN (x) && !PREV_INSN (x))
1932 return true;
1934 x = next_nonnote_insn (x);
1935 return x && JUMP_TABLE_DATA_P (x);
1938 /* Return true if X is a legitimate address for values of mode MODE.
1939 STRICT_P says whether strict checking is needed. If the address
1940 is valid, describe its components in *ADDRESS. */
1942 static bool
1943 m68k_decompose_address (enum machine_mode mode, rtx x,
1944 bool strict_p, struct m68k_address *address)
1946 unsigned int reach;
1948 memset (address, 0, sizeof (*address));
1950 if (mode == BLKmode)
1951 reach = 1;
1952 else
1953 reach = GET_MODE_SIZE (mode);
1955 /* Check for (An) (mode 2). */
1956 if (m68k_legitimate_base_reg_p (x, strict_p))
1958 address->base = x;
1959 return true;
1962 /* Check for -(An) and (An)+ (modes 3 and 4). */
1963 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1964 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1966 address->code = GET_CODE (x);
1967 address->base = XEXP (x, 0);
1968 return true;
1971 /* Check for (d16,An) (mode 5). */
1972 if (GET_CODE (x) == PLUS
1973 && GET_CODE (XEXP (x, 1)) == CONST_INT
1974 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1975 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1977 address->base = XEXP (x, 0);
1978 address->offset = XEXP (x, 1);
1979 return true;
1982 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1983 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1984 addresses. */
1985 if (GET_CODE (x) == PLUS
1986 && XEXP (x, 0) == pic_offset_table_rtx)
1988 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
1989 they are invalid in this context. */
1990 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
1992 address->base = XEXP (x, 0);
1993 address->offset = XEXP (x, 1);
1994 return true;
1998 /* The ColdFire FPU only accepts addressing modes 2-5. */
1999 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2000 return false;
2002 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2003 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2004 All these modes are variations of mode 7. */
2005 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2007 address->offset = x;
2008 return true;
2011 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2012 tablejumps.
2014 ??? do_tablejump creates these addresses before placing the target
2015 label, so we have to assume that unplaced labels are jump table
2016 references. It seems unlikely that we would ever generate indexed
2017 accesses to unplaced labels in other cases. */
2018 if (GET_CODE (x) == PLUS
2019 && m68k_jump_table_ref_p (XEXP (x, 1))
2020 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2022 address->offset = XEXP (x, 1);
2023 return true;
2026 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2027 (bd,An,Xn.SIZE*SCALE) addresses. */
2029 if (TARGET_68020)
2031 /* Check for a nonzero base displacement. */
2032 if (GET_CODE (x) == PLUS
2033 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2035 address->offset = XEXP (x, 1);
2036 x = XEXP (x, 0);
2039 /* Check for a suppressed index register. */
2040 if (m68k_legitimate_base_reg_p (x, strict_p))
2042 address->base = x;
2043 return true;
2046 /* Check for a suppressed base register. Do not allow this case
2047 for non-symbolic offsets as it effectively gives gcc freedom
2048 to treat data registers as base registers, which can generate
2049 worse code. */
2050 if (address->offset
2051 && symbolic_operand (address->offset, VOIDmode)
2052 && m68k_decompose_index (x, strict_p, address))
2053 return true;
2055 else
2057 /* Check for a nonzero base displacement. */
2058 if (GET_CODE (x) == PLUS
2059 && GET_CODE (XEXP (x, 1)) == CONST_INT
2060 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2062 address->offset = XEXP (x, 1);
2063 x = XEXP (x, 0);
2067 /* We now expect the sum of a base and an index. */
2068 if (GET_CODE (x) == PLUS)
2070 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2071 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2073 address->base = XEXP (x, 0);
2074 return true;
2077 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2078 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2080 address->base = XEXP (x, 1);
2081 return true;
2084 return false;
2087 /* Return true if X is a legitimate address for values of mode MODE.
2088 STRICT_P says whether strict checking is needed. */
2090 bool
2091 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2093 struct m68k_address address;
2095 return m68k_decompose_address (mode, x, strict_p, &address);
2098 /* Return true if X is a memory, describing its address in ADDRESS if so.
2099 Apply strict checking if called during or after reload. */
2101 static bool
2102 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2104 return (MEM_P (x)
2105 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2106 reload_in_progress || reload_completed,
2107 address));
2110 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2112 bool
2113 m68k_legitimate_constant_p (enum machine_mode mode, rtx x)
2115 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2118 /* Return true if X matches the 'Q' constraint. It must be a memory
2119 with a base address and no constant offset or index. */
2121 bool
2122 m68k_matches_q_p (rtx x)
2124 struct m68k_address address;
2126 return (m68k_legitimate_mem_p (x, &address)
2127 && address.code == UNKNOWN
2128 && address.base
2129 && !address.offset
2130 && !address.index);
2133 /* Return true if X matches the 'U' constraint. It must be a base address
2134 with a constant offset and no index. */
2136 bool
2137 m68k_matches_u_p (rtx x)
2139 struct m68k_address address;
2141 return (m68k_legitimate_mem_p (x, &address)
2142 && address.code == UNKNOWN
2143 && address.base
2144 && address.offset
2145 && !address.index);
2148 /* Return GOT pointer. */
2150 static rtx
2151 m68k_get_gp (void)
2153 if (pic_offset_table_rtx == NULL_RTX)
2154 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2156 crtl->uses_pic_offset_table = 1;
2158 return pic_offset_table_rtx;
2161 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2162 wrappers. */
2163 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2164 RELOC_TLSIE, RELOC_TLSLE };
2166 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2168 /* Wrap symbol X into unspec representing relocation RELOC.
2169 BASE_REG - register that should be added to the result.
2170 TEMP_REG - if non-null, temporary register. */
2172 static rtx
2173 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2175 bool use_x_p;
2177 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2179 if (TARGET_COLDFIRE && use_x_p)
2180 /* When compiling with -mx{got, tls} switch the code will look like this:
2182 move.l <X>@<RELOC>,<TEMP_REG>
2183 add.l <BASE_REG>,<TEMP_REG> */
2185 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2186 to put @RELOC after reference. */
2187 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2188 UNSPEC_RELOC32);
2189 x = gen_rtx_CONST (Pmode, x);
2191 if (temp_reg == NULL)
2193 gcc_assert (can_create_pseudo_p ());
2194 temp_reg = gen_reg_rtx (Pmode);
2197 emit_move_insn (temp_reg, x);
2198 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2199 x = temp_reg;
2201 else
2203 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2204 UNSPEC_RELOC16);
2205 x = gen_rtx_CONST (Pmode, x);
2207 x = gen_rtx_PLUS (Pmode, base_reg, x);
2210 return x;
2213 /* Helper for m68k_unwrap_symbol.
2214 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2215 sets *RELOC_PTR to relocation type for the symbol. */
2217 static rtx
2218 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2219 enum m68k_reloc *reloc_ptr)
2221 if (GET_CODE (orig) == CONST)
2223 rtx x;
2224 enum m68k_reloc dummy;
2226 x = XEXP (orig, 0);
2228 if (reloc_ptr == NULL)
2229 reloc_ptr = &dummy;
2231 /* Handle an addend. */
2232 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2233 && CONST_INT_P (XEXP (x, 1)))
2234 x = XEXP (x, 0);
2236 if (GET_CODE (x) == UNSPEC)
2238 switch (XINT (x, 1))
2240 case UNSPEC_RELOC16:
2241 orig = XVECEXP (x, 0, 0);
2242 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2243 break;
2245 case UNSPEC_RELOC32:
2246 if (unwrap_reloc32_p)
2248 orig = XVECEXP (x, 0, 0);
2249 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2251 break;
2253 default:
2254 break;
2259 return orig;
2262 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2263 UNSPEC_RELOC32 wrappers. */
2266 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2268 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2271 /* Helper for m68k_final_prescan_insn. */
2273 static int
2274 m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2276 rtx x = *x_ptr;
2278 if (m68k_unwrap_symbol (x, true) != x)
2279 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2281 rtx plus;
2283 gcc_assert (GET_CODE (x) == CONST);
2284 plus = XEXP (x, 0);
2286 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2288 rtx unspec;
2289 rtx addend;
2291 unspec = XEXP (plus, 0);
2292 gcc_assert (GET_CODE (unspec) == UNSPEC);
2293 addend = XEXP (plus, 1);
2294 gcc_assert (CONST_INT_P (addend));
2296 /* We now have all the pieces, rearrange them. */
2298 /* Move symbol to plus. */
2299 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2301 /* Move plus inside unspec. */
2302 XVECEXP (unspec, 0, 0) = plus;
2304 /* Move unspec to top level of const. */
2305 XEXP (x, 0) = unspec;
2308 return -1;
2311 return 0;
2314 /* Prescan insn before outputing assembler for it. */
2316 void
2317 m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2318 rtx *operands, int n_operands)
2320 int i;
2322 /* Combine and, possibly, other optimizations may do good job
2323 converting
2324 (const (unspec [(symbol)]))
2325 into
2326 (const (plus (unspec [(symbol)])
2327 (const_int N))).
2328 The problem with this is emitting @TLS or @GOT decorations.
2329 The decoration is emitted when processing (unspec), so the
2330 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2332 It seems that the easiest solution to this is to convert such
2333 operands to
2334 (const (unspec [(plus (symbol)
2335 (const_int N))])).
2336 Note, that the top level of operand remains intact, so we don't have
2337 to patch up anything outside of the operand. */
2339 for (i = 0; i < n_operands; ++i)
2341 rtx op;
2343 op = operands[i];
2345 for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2349 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2350 If REG is non-null, use it; generate new pseudo otherwise. */
2352 static rtx
2353 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2355 rtx insn;
2357 if (reg == NULL_RTX)
2359 gcc_assert (can_create_pseudo_p ());
2360 reg = gen_reg_rtx (Pmode);
2363 insn = emit_move_insn (reg, x);
2364 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2365 by loop. */
2366 set_unique_reg_note (insn, REG_EQUAL, orig);
2368 return reg;
2371 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2372 GOT slot. */
2374 static rtx
2375 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2377 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2379 x = gen_rtx_MEM (Pmode, x);
2380 MEM_READONLY_P (x) = 1;
2382 return x;
2385 /* Legitimize PIC addresses. If the address is already
2386 position-independent, we return ORIG. Newly generated
2387 position-independent addresses go to REG. If we need more
2388 than one register, we lose.
2390 An address is legitimized by making an indirect reference
2391 through the Global Offset Table with the name of the symbol
2392 used as an offset.
2394 The assembler and linker are responsible for placing the
2395 address of the symbol in the GOT. The function prologue
2396 is responsible for initializing a5 to the starting address
2397 of the GOT.
2399 The assembler is also responsible for translating a symbol name
2400 into a constant displacement from the start of the GOT.
2402 A quick example may make things a little clearer:
2404 When not generating PIC code to store the value 12345 into _foo
2405 we would generate the following code:
2407 movel #12345, _foo
2409 When generating PIC two transformations are made. First, the compiler
2410 loads the address of foo into a register. So the first transformation makes:
2412 lea _foo, a0
2413 movel #12345, a0@
2415 The code in movsi will intercept the lea instruction and call this
2416 routine which will transform the instructions into:
2418 movel a5@(_foo:w), a0
2419 movel #12345, a0@
2422 That (in a nutshell) is how *all* symbol and label references are
2423 handled. */
2426 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2427 rtx reg)
2429 rtx pic_ref = orig;
2431 /* First handle a simple SYMBOL_REF or LABEL_REF */
2432 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2434 gcc_assert (reg);
2436 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2437 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2439 else if (GET_CODE (orig) == CONST)
2441 rtx base;
2443 /* Make sure this has not already been legitimized. */
2444 if (m68k_unwrap_symbol (orig, true) != orig)
2445 return orig;
2447 gcc_assert (reg);
2449 /* legitimize both operands of the PLUS */
2450 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2452 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2453 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2454 base == reg ? 0 : reg);
2456 if (GET_CODE (orig) == CONST_INT)
2457 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2458 else
2459 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2462 return pic_ref;
2465 /* The __tls_get_addr symbol. */
2466 static GTY(()) rtx m68k_tls_get_addr;
2468 /* Return SYMBOL_REF for __tls_get_addr. */
2470 static rtx
2471 m68k_get_tls_get_addr (void)
2473 if (m68k_tls_get_addr == NULL_RTX)
2474 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2476 return m68k_tls_get_addr;
2479 /* Return libcall result in A0 instead of usual D0. */
2480 static bool m68k_libcall_value_in_a0_p = false;
2482 /* Emit instruction sequence that calls __tls_get_addr. X is
2483 the TLS symbol we are referencing and RELOC is the symbol type to use
2484 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2485 emitted. A pseudo register with result of __tls_get_addr call is
2486 returned. */
2488 static rtx
2489 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2491 rtx a0;
2492 rtx insns;
2493 rtx dest;
2495 /* Emit the call sequence. */
2496 start_sequence ();
2498 /* FIXME: Unfortunately, emit_library_call_value does not
2499 consider (plus (%a5) (const (unspec))) to be a good enough
2500 operand for push, so it forces it into a register. The bad
2501 thing about this is that combiner, due to copy propagation and other
2502 optimizations, sometimes can not later fix this. As a consequence,
2503 additional register may be allocated resulting in a spill.
2504 For reference, see args processing loops in
2505 calls.c:emit_library_call_value_1.
2506 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2507 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2509 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2510 is the simpliest way of generating a call. The difference between
2511 __tls_get_addr() and libcall is that the result is returned in D0
2512 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2513 which temporarily switches returning the result to A0. */
2515 m68k_libcall_value_in_a0_p = true;
2516 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2517 Pmode, 1, x, Pmode);
2518 m68k_libcall_value_in_a0_p = false;
2520 insns = get_insns ();
2521 end_sequence ();
2523 gcc_assert (can_create_pseudo_p ());
2524 dest = gen_reg_rtx (Pmode);
2525 emit_libcall_block (insns, dest, a0, eqv);
2527 return dest;
2530 /* The __tls_get_addr symbol. */
2531 static GTY(()) rtx m68k_read_tp;
2533 /* Return SYMBOL_REF for __m68k_read_tp. */
2535 static rtx
2536 m68k_get_m68k_read_tp (void)
2538 if (m68k_read_tp == NULL_RTX)
2539 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2541 return m68k_read_tp;
2544 /* Emit instruction sequence that calls __m68k_read_tp.
2545 A pseudo register with result of __m68k_read_tp call is returned. */
2547 static rtx
2548 m68k_call_m68k_read_tp (void)
2550 rtx a0;
2551 rtx eqv;
2552 rtx insns;
2553 rtx dest;
2555 start_sequence ();
2557 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2558 is the simpliest way of generating a call. The difference between
2559 __m68k_read_tp() and libcall is that the result is returned in D0
2560 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2561 which temporarily switches returning the result to A0. */
2563 /* Emit the call sequence. */
2564 m68k_libcall_value_in_a0_p = true;
2565 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2566 Pmode, 0);
2567 m68k_libcall_value_in_a0_p = false;
2568 insns = get_insns ();
2569 end_sequence ();
2571 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2572 share the m68k_read_tp result with other IE/LE model accesses. */
2573 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2575 gcc_assert (can_create_pseudo_p ());
2576 dest = gen_reg_rtx (Pmode);
2577 emit_libcall_block (insns, dest, a0, eqv);
2579 return dest;
2582 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2583 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2584 ColdFire. */
2587 m68k_legitimize_tls_address (rtx orig)
2589 switch (SYMBOL_REF_TLS_MODEL (orig))
2591 case TLS_MODEL_GLOBAL_DYNAMIC:
2592 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2593 break;
2595 case TLS_MODEL_LOCAL_DYNAMIC:
2597 rtx eqv;
2598 rtx a0;
2599 rtx x;
2601 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2602 share the LDM result with other LD model accesses. */
2603 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2604 UNSPEC_RELOC32);
2606 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2608 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2610 if (can_create_pseudo_p ())
2611 x = m68k_move_to_reg (x, orig, NULL_RTX);
2613 orig = x;
2614 break;
2617 case TLS_MODEL_INITIAL_EXEC:
2619 rtx a0;
2620 rtx x;
2622 a0 = m68k_call_m68k_read_tp ();
2624 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2625 x = gen_rtx_PLUS (Pmode, x, a0);
2627 if (can_create_pseudo_p ())
2628 x = m68k_move_to_reg (x, orig, NULL_RTX);
2630 orig = x;
2631 break;
2634 case TLS_MODEL_LOCAL_EXEC:
2636 rtx a0;
2637 rtx x;
2639 a0 = m68k_call_m68k_read_tp ();
2641 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2643 if (can_create_pseudo_p ())
2644 x = m68k_move_to_reg (x, orig, NULL_RTX);
2646 orig = x;
2647 break;
2650 default:
2651 gcc_unreachable ();
2654 return orig;
2657 /* Return true if X is a TLS symbol. */
2659 static bool
2660 m68k_tls_symbol_p (rtx x)
2662 if (!TARGET_HAVE_TLS)
2663 return false;
2665 if (GET_CODE (x) != SYMBOL_REF)
2666 return false;
2668 return SYMBOL_REF_TLS_MODEL (x) != 0;
2671 /* Helper for m68k_tls_referenced_p. */
2673 static int
2674 m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2676 /* Note: this is not the same as m68k_tls_symbol_p. */
2677 if (GET_CODE (*x_ptr) == SYMBOL_REF)
2678 return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2680 /* Don't recurse into legitimate TLS references. */
2681 if (m68k_tls_reference_p (*x_ptr, true))
2682 return -1;
2684 return 0;
2687 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2688 though illegitimate one.
2689 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2691 bool
2692 m68k_tls_reference_p (rtx x, bool legitimate_p)
2694 if (!TARGET_HAVE_TLS)
2695 return false;
2697 if (!legitimate_p)
2698 return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2699 else
2701 enum m68k_reloc reloc = RELOC_GOT;
2703 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2704 && TLS_RELOC_P (reloc));
2710 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2712 /* Return the type of move that should be used for integer I. */
2714 M68K_CONST_METHOD
2715 m68k_const_method (HOST_WIDE_INT i)
2717 unsigned u;
2719 if (USE_MOVQ (i))
2720 return MOVQ;
2722 /* The ColdFire doesn't have byte or word operations. */
2723 /* FIXME: This may not be useful for the m68060 either. */
2724 if (!TARGET_COLDFIRE)
2726 /* if -256 < N < 256 but N is not in range for a moveq
2727 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2728 if (USE_MOVQ (i ^ 0xff))
2729 return NOTB;
2730 /* Likewise, try with not.w */
2731 if (USE_MOVQ (i ^ 0xffff))
2732 return NOTW;
2733 /* This is the only value where neg.w is useful */
2734 if (i == -65408)
2735 return NEGW;
2738 /* Try also with swap. */
2739 u = i;
2740 if (USE_MOVQ ((u >> 16) | (u << 16)))
2741 return SWAP;
2743 if (TARGET_ISAB)
2745 /* Try using MVZ/MVS with an immediate value to load constants. */
2746 if (i >= 0 && i <= 65535)
2747 return MVZ;
2748 if (i >= -32768 && i <= 32767)
2749 return MVS;
2752 /* Otherwise, use move.l */
2753 return MOVL;
2756 /* Return the cost of moving constant I into a data register. */
2758 static int
2759 const_int_cost (HOST_WIDE_INT i)
2761 switch (m68k_const_method (i))
2763 case MOVQ:
2764 /* Constants between -128 and 127 are cheap due to moveq. */
2765 return 0;
2766 case MVZ:
2767 case MVS:
2768 case NOTB:
2769 case NOTW:
2770 case NEGW:
2771 case SWAP:
2772 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2773 return 1;
2774 case MOVL:
2775 return 2;
2776 default:
2777 gcc_unreachable ();
2781 static bool
2782 m68k_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2783 int *total, bool speed ATTRIBUTE_UNUSED)
2785 switch (code)
2787 case CONST_INT:
2788 /* Constant zero is super cheap due to clr instruction. */
2789 if (x == const0_rtx)
2790 *total = 0;
2791 else
2792 *total = const_int_cost (INTVAL (x));
2793 return true;
2795 case CONST:
2796 case LABEL_REF:
2797 case SYMBOL_REF:
2798 *total = 3;
2799 return true;
2801 case CONST_DOUBLE:
2802 /* Make 0.0 cheaper than other floating constants to
2803 encourage creating tstsf and tstdf insns. */
2804 if (outer_code == COMPARE
2805 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2806 *total = 4;
2807 else
2808 *total = 5;
2809 return true;
2811 /* These are vaguely right for a 68020. */
2812 /* The costs for long multiply have been adjusted to work properly
2813 in synth_mult on the 68020, relative to an average of the time
2814 for add and the time for shift, taking away a little more because
2815 sometimes move insns are needed. */
2816 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2817 terms. */
2818 #define MULL_COST \
2819 (TUNE_68060 ? 2 \
2820 : TUNE_68040 ? 5 \
2821 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2822 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2823 : TUNE_CFV2 ? 8 \
2824 : TARGET_COLDFIRE ? 3 : 13)
2826 #define MULW_COST \
2827 (TUNE_68060 ? 2 \
2828 : TUNE_68040 ? 3 \
2829 : TUNE_68000_10 ? 5 \
2830 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2831 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2832 : TUNE_CFV2 ? 8 \
2833 : TARGET_COLDFIRE ? 2 : 8)
2835 #define DIVW_COST \
2836 (TARGET_CF_HWDIV ? 11 \
2837 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2839 case PLUS:
2840 /* An lea costs about three times as much as a simple add. */
2841 if (GET_MODE (x) == SImode
2842 && GET_CODE (XEXP (x, 1)) == REG
2843 && GET_CODE (XEXP (x, 0)) == MULT
2844 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2845 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2846 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2847 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2848 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2850 /* lea an@(dx:l:i),am */
2851 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2852 return true;
2854 return false;
2856 case ASHIFT:
2857 case ASHIFTRT:
2858 case LSHIFTRT:
2859 if (TUNE_68060)
2861 *total = COSTS_N_INSNS(1);
2862 return true;
2864 if (TUNE_68000_10)
2866 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2868 if (INTVAL (XEXP (x, 1)) < 16)
2869 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2870 else
2871 /* We're using clrw + swap for these cases. */
2872 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2874 else
2875 *total = COSTS_N_INSNS (10); /* Worst case. */
2876 return true;
2878 /* A shift by a big integer takes an extra instruction. */
2879 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2880 && (INTVAL (XEXP (x, 1)) == 16))
2882 *total = COSTS_N_INSNS (2); /* clrw;swap */
2883 return true;
2885 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2886 && !(INTVAL (XEXP (x, 1)) > 0
2887 && INTVAL (XEXP (x, 1)) <= 8))
2889 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2890 return true;
2892 return false;
2894 case MULT:
2895 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2896 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2897 && GET_MODE (x) == SImode)
2898 *total = COSTS_N_INSNS (MULW_COST);
2899 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2900 *total = COSTS_N_INSNS (MULW_COST);
2901 else
2902 *total = COSTS_N_INSNS (MULL_COST);
2903 return true;
2905 case DIV:
2906 case UDIV:
2907 case MOD:
2908 case UMOD:
2909 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2910 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2911 else if (TARGET_CF_HWDIV)
2912 *total = COSTS_N_INSNS (18);
2913 else
2914 *total = COSTS_N_INSNS (43); /* div.l */
2915 return true;
2917 case ZERO_EXTRACT:
2918 if (outer_code == COMPARE)
2919 *total = 0;
2920 return false;
2922 default:
2923 return false;
2927 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2928 OPERANDS[0]. */
2930 static const char *
2931 output_move_const_into_data_reg (rtx *operands)
2933 HOST_WIDE_INT i;
2935 i = INTVAL (operands[1]);
2936 switch (m68k_const_method (i))
2938 case MVZ:
2939 return "mvzw %1,%0";
2940 case MVS:
2941 return "mvsw %1,%0";
2942 case MOVQ:
2943 return "moveq %1,%0";
2944 case NOTB:
2945 CC_STATUS_INIT;
2946 operands[1] = GEN_INT (i ^ 0xff);
2947 return "moveq %1,%0\n\tnot%.b %0";
2948 case NOTW:
2949 CC_STATUS_INIT;
2950 operands[1] = GEN_INT (i ^ 0xffff);
2951 return "moveq %1,%0\n\tnot%.w %0";
2952 case NEGW:
2953 CC_STATUS_INIT;
2954 return "moveq #-128,%0\n\tneg%.w %0";
2955 case SWAP:
2957 unsigned u = i;
2959 operands[1] = GEN_INT ((u << 16) | (u >> 16));
2960 return "moveq %1,%0\n\tswap %0";
2962 case MOVL:
2963 return "move%.l %1,%0";
2964 default:
2965 gcc_unreachable ();
2969 /* Return true if I can be handled by ISA B's mov3q instruction. */
2971 bool
2972 valid_mov3q_const (HOST_WIDE_INT i)
2974 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2977 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2978 I is the value of OPERANDS[1]. */
2980 static const char *
2981 output_move_simode_const (rtx *operands)
2983 rtx dest;
2984 HOST_WIDE_INT src;
2986 dest = operands[0];
2987 src = INTVAL (operands[1]);
2988 if (src == 0
2989 && (DATA_REG_P (dest) || MEM_P (dest))
2990 /* clr insns on 68000 read before writing. */
2991 && ((TARGET_68010 || TARGET_COLDFIRE)
2992 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
2993 return "clr%.l %0";
2994 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
2995 return "mov3q%.l %1,%0";
2996 else if (src == 0 && ADDRESS_REG_P (dest))
2997 return "sub%.l %0,%0";
2998 else if (DATA_REG_P (dest))
2999 return output_move_const_into_data_reg (operands);
3000 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3002 if (valid_mov3q_const (src))
3003 return "mov3q%.l %1,%0";
3004 return "move%.w %1,%0";
3006 else if (MEM_P (dest)
3007 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3008 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3009 && IN_RANGE (src, -0x8000, 0x7fff))
3011 if (valid_mov3q_const (src))
3012 return "mov3q%.l %1,%-";
3013 return "pea %a1";
3015 return "move%.l %1,%0";
3018 const char *
3019 output_move_simode (rtx *operands)
3021 if (GET_CODE (operands[1]) == CONST_INT)
3022 return output_move_simode_const (operands);
3023 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3024 || GET_CODE (operands[1]) == CONST)
3025 && push_operand (operands[0], SImode))
3026 return "pea %a1";
3027 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3028 || GET_CODE (operands[1]) == CONST)
3029 && ADDRESS_REG_P (operands[0]))
3030 return "lea %a1,%0";
3031 return "move%.l %1,%0";
3034 const char *
3035 output_move_himode (rtx *operands)
3037 if (GET_CODE (operands[1]) == CONST_INT)
3039 if (operands[1] == const0_rtx
3040 && (DATA_REG_P (operands[0])
3041 || GET_CODE (operands[0]) == MEM)
3042 /* clr insns on 68000 read before writing. */
3043 && ((TARGET_68010 || TARGET_COLDFIRE)
3044 || !(GET_CODE (operands[0]) == MEM
3045 && MEM_VOLATILE_P (operands[0]))))
3046 return "clr%.w %0";
3047 else if (operands[1] == const0_rtx
3048 && ADDRESS_REG_P (operands[0]))
3049 return "sub%.l %0,%0";
3050 else if (DATA_REG_P (operands[0])
3051 && INTVAL (operands[1]) < 128
3052 && INTVAL (operands[1]) >= -128)
3053 return "moveq %1,%0";
3054 else if (INTVAL (operands[1]) < 0x8000
3055 && INTVAL (operands[1]) >= -0x8000)
3056 return "move%.w %1,%0";
3058 else if (CONSTANT_P (operands[1]))
3059 return "move%.l %1,%0";
3060 return "move%.w %1,%0";
3063 const char *
3064 output_move_qimode (rtx *operands)
3066 /* 68k family always modifies the stack pointer by at least 2, even for
3067 byte pushes. The 5200 (ColdFire) does not do this. */
3069 /* This case is generated by pushqi1 pattern now. */
3070 gcc_assert (!(GET_CODE (operands[0]) == MEM
3071 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3072 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3073 && ! ADDRESS_REG_P (operands[1])
3074 && ! TARGET_COLDFIRE));
3076 /* clr and st insns on 68000 read before writing. */
3077 if (!ADDRESS_REG_P (operands[0])
3078 && ((TARGET_68010 || TARGET_COLDFIRE)
3079 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3081 if (operands[1] == const0_rtx)
3082 return "clr%.b %0";
3083 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3084 && GET_CODE (operands[1]) == CONST_INT
3085 && (INTVAL (operands[1]) & 255) == 255)
3087 CC_STATUS_INIT;
3088 return "st %0";
3091 if (GET_CODE (operands[1]) == CONST_INT
3092 && DATA_REG_P (operands[0])
3093 && INTVAL (operands[1]) < 128
3094 && INTVAL (operands[1]) >= -128)
3095 return "moveq %1,%0";
3096 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3097 return "sub%.l %0,%0";
3098 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3099 return "move%.l %1,%0";
3100 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3101 from address registers. */
3102 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3103 return "move%.w %1,%0";
3104 return "move%.b %1,%0";
3107 const char *
3108 output_move_stricthi (rtx *operands)
3110 if (operands[1] == const0_rtx
3111 /* clr insns on 68000 read before writing. */
3112 && ((TARGET_68010 || TARGET_COLDFIRE)
3113 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3114 return "clr%.w %0";
3115 return "move%.w %1,%0";
3118 const char *
3119 output_move_strictqi (rtx *operands)
3121 if (operands[1] == const0_rtx
3122 /* clr insns on 68000 read before writing. */
3123 && ((TARGET_68010 || TARGET_COLDFIRE)
3124 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3125 return "clr%.b %0";
3126 return "move%.b %1,%0";
3129 /* Return the best assembler insn template
3130 for moving operands[1] into operands[0] as a fullword. */
3132 static const char *
3133 singlemove_string (rtx *operands)
3135 if (GET_CODE (operands[1]) == CONST_INT)
3136 return output_move_simode_const (operands);
3137 return "move%.l %1,%0";
3141 /* Output assembler or rtl code to perform a doubleword move insn
3142 with operands OPERANDS.
3143 Pointers to 3 helper functions should be specified:
3144 HANDLE_REG_ADJUST to adjust a register by a small value,
3145 HANDLE_COMPADR to compute an address and
3146 HANDLE_MOVSI to move 4 bytes. */
3148 static void
3149 handle_move_double (rtx operands[2],
3150 void (*handle_reg_adjust) (rtx, int),
3151 void (*handle_compadr) (rtx [2]),
3152 void (*handle_movsi) (rtx [2]))
3154 enum
3156 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3157 } optype0, optype1;
3158 rtx latehalf[2];
3159 rtx middlehalf[2];
3160 rtx xops[2];
3161 rtx addreg0 = 0, addreg1 = 0;
3162 int dest_overlapped_low = 0;
3163 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3165 middlehalf[0] = 0;
3166 middlehalf[1] = 0;
3168 /* First classify both operands. */
3170 if (REG_P (operands[0]))
3171 optype0 = REGOP;
3172 else if (offsettable_memref_p (operands[0]))
3173 optype0 = OFFSOP;
3174 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3175 optype0 = POPOP;
3176 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3177 optype0 = PUSHOP;
3178 else if (GET_CODE (operands[0]) == MEM)
3179 optype0 = MEMOP;
3180 else
3181 optype0 = RNDOP;
3183 if (REG_P (operands[1]))
3184 optype1 = REGOP;
3185 else if (CONSTANT_P (operands[1]))
3186 optype1 = CNSTOP;
3187 else if (offsettable_memref_p (operands[1]))
3188 optype1 = OFFSOP;
3189 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3190 optype1 = POPOP;
3191 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3192 optype1 = PUSHOP;
3193 else if (GET_CODE (operands[1]) == MEM)
3194 optype1 = MEMOP;
3195 else
3196 optype1 = RNDOP;
3198 /* Check for the cases that the operand constraints are not supposed
3199 to allow to happen. Generating code for these cases is
3200 painful. */
3201 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3203 /* If one operand is decrementing and one is incrementing
3204 decrement the former register explicitly
3205 and change that operand into ordinary indexing. */
3207 if (optype0 == PUSHOP && optype1 == POPOP)
3209 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3211 handle_reg_adjust (operands[0], -size);
3213 if (GET_MODE (operands[1]) == XFmode)
3214 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3215 else if (GET_MODE (operands[0]) == DFmode)
3216 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3217 else
3218 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3219 optype0 = OFFSOP;
3221 if (optype0 == POPOP && optype1 == PUSHOP)
3223 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3225 handle_reg_adjust (operands[1], -size);
3227 if (GET_MODE (operands[1]) == XFmode)
3228 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3229 else if (GET_MODE (operands[1]) == DFmode)
3230 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3231 else
3232 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3233 optype1 = OFFSOP;
3236 /* If an operand is an unoffsettable memory ref, find a register
3237 we can increment temporarily to make it refer to the second word. */
3239 if (optype0 == MEMOP)
3240 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3242 if (optype1 == MEMOP)
3243 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3245 /* Ok, we can do one word at a time.
3246 Normally we do the low-numbered word first,
3247 but if either operand is autodecrementing then we
3248 do the high-numbered word first.
3250 In either case, set up in LATEHALF the operands to use
3251 for the high-numbered word and in some cases alter the
3252 operands in OPERANDS to be suitable for the low-numbered word. */
3254 if (size == 12)
3256 if (optype0 == REGOP)
3258 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3259 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3261 else if (optype0 == OFFSOP)
3263 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3264 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3266 else
3268 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3269 latehalf[0] = adjust_address (operands[0], SImode, 0);
3272 if (optype1 == REGOP)
3274 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3275 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3277 else if (optype1 == OFFSOP)
3279 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3280 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3282 else if (optype1 == CNSTOP)
3284 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3286 REAL_VALUE_TYPE r;
3287 long l[3];
3289 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3290 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3291 operands[1] = GEN_INT (l[0]);
3292 middlehalf[1] = GEN_INT (l[1]);
3293 latehalf[1] = GEN_INT (l[2]);
3295 else
3297 /* No non-CONST_DOUBLE constant should ever appear
3298 here. */
3299 gcc_assert (!CONSTANT_P (operands[1]));
3302 else
3304 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3305 latehalf[1] = adjust_address (operands[1], SImode, 0);
3308 else
3309 /* size is not 12: */
3311 if (optype0 == REGOP)
3312 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3313 else if (optype0 == OFFSOP)
3314 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3315 else
3316 latehalf[0] = adjust_address (operands[0], SImode, 0);
3318 if (optype1 == REGOP)
3319 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3320 else if (optype1 == OFFSOP)
3321 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3322 else if (optype1 == CNSTOP)
3323 split_double (operands[1], &operands[1], &latehalf[1]);
3324 else
3325 latehalf[1] = adjust_address (operands[1], SImode, 0);
3328 /* If insn is effectively movd N(sp),-(sp) then we will do the
3329 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3330 for the low word as well, to compensate for the first decrement of sp. */
3331 if (optype0 == PUSHOP
3332 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3333 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
3334 operands[1] = middlehalf[1] = latehalf[1];
3336 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3337 if the upper part of reg N does not appear in the MEM, arrange to
3338 emit the move late-half first. Otherwise, compute the MEM address
3339 into the upper part of N and use that as a pointer to the memory
3340 operand. */
3341 if (optype0 == REGOP
3342 && (optype1 == OFFSOP || optype1 == MEMOP))
3344 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3346 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3347 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3349 /* If both halves of dest are used in the src memory address,
3350 compute the address into latehalf of dest.
3351 Note that this can't happen if the dest is two data regs. */
3352 compadr:
3353 xops[0] = latehalf[0];
3354 xops[1] = XEXP (operands[1], 0);
3356 handle_compadr (xops);
3357 if (GET_MODE (operands[1]) == XFmode)
3359 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3360 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3361 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3363 else
3365 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3366 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3369 else if (size == 12
3370 && reg_overlap_mentioned_p (middlehalf[0],
3371 XEXP (operands[1], 0)))
3373 /* Check for two regs used by both source and dest.
3374 Note that this can't happen if the dest is all data regs.
3375 It can happen if the dest is d6, d7, a0.
3376 But in that case, latehalf is an addr reg, so
3377 the code at compadr does ok. */
3379 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3380 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3381 goto compadr;
3383 /* JRV says this can't happen: */
3384 gcc_assert (!addreg0 && !addreg1);
3386 /* Only the middle reg conflicts; simply put it last. */
3387 handle_movsi (operands);
3388 handle_movsi (latehalf);
3389 handle_movsi (middlehalf);
3391 return;
3393 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3394 /* If the low half of dest is mentioned in the source memory
3395 address, the arrange to emit the move late half first. */
3396 dest_overlapped_low = 1;
3399 /* If one or both operands autodecrementing,
3400 do the two words, high-numbered first. */
3402 /* Likewise, the first move would clobber the source of the second one,
3403 do them in the other order. This happens only for registers;
3404 such overlap can't happen in memory unless the user explicitly
3405 sets it up, and that is an undefined circumstance. */
3407 if (optype0 == PUSHOP || optype1 == PUSHOP
3408 || (optype0 == REGOP && optype1 == REGOP
3409 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3410 || REGNO (operands[0]) == REGNO (latehalf[1])))
3411 || dest_overlapped_low)
3413 /* Make any unoffsettable addresses point at high-numbered word. */
3414 if (addreg0)
3415 handle_reg_adjust (addreg0, size - 4);
3416 if (addreg1)
3417 handle_reg_adjust (addreg1, size - 4);
3419 /* Do that word. */
3420 handle_movsi (latehalf);
3422 /* Undo the adds we just did. */
3423 if (addreg0)
3424 handle_reg_adjust (addreg0, -4);
3425 if (addreg1)
3426 handle_reg_adjust (addreg1, -4);
3428 if (size == 12)
3430 handle_movsi (middlehalf);
3432 if (addreg0)
3433 handle_reg_adjust (addreg0, -4);
3434 if (addreg1)
3435 handle_reg_adjust (addreg1, -4);
3438 /* Do low-numbered word. */
3440 handle_movsi (operands);
3441 return;
3444 /* Normal case: do the two words, low-numbered first. */
3446 m68k_final_prescan_insn (NULL, operands, 2);
3447 handle_movsi (operands);
3449 /* Do the middle one of the three words for long double */
3450 if (size == 12)
3452 if (addreg0)
3453 handle_reg_adjust (addreg0, 4);
3454 if (addreg1)
3455 handle_reg_adjust (addreg1, 4);
3457 m68k_final_prescan_insn (NULL, middlehalf, 2);
3458 handle_movsi (middlehalf);
3461 /* Make any unoffsettable addresses point at high-numbered word. */
3462 if (addreg0)
3463 handle_reg_adjust (addreg0, 4);
3464 if (addreg1)
3465 handle_reg_adjust (addreg1, 4);
3467 /* Do that word. */
3468 m68k_final_prescan_insn (NULL, latehalf, 2);
3469 handle_movsi (latehalf);
3471 /* Undo the adds we just did. */
3472 if (addreg0)
3473 handle_reg_adjust (addreg0, -(size - 4));
3474 if (addreg1)
3475 handle_reg_adjust (addreg1, -(size - 4));
3477 return;
3480 /* Output assembler code to adjust REG by N. */
3481 static void
3482 output_reg_adjust (rtx reg, int n)
3484 const char *s;
3486 gcc_assert (GET_MODE (reg) == SImode
3487 && -12 <= n && n != 0 && n <= 12);
3489 switch (n)
3491 case 12:
3492 s = "add%.l #12,%0";
3493 break;
3495 case 8:
3496 s = "addq%.l #8,%0";
3497 break;
3499 case 4:
3500 s = "addq%.l #4,%0";
3501 break;
3503 case -12:
3504 s = "sub%.l #12,%0";
3505 break;
3507 case -8:
3508 s = "subq%.l #8,%0";
3509 break;
3511 case -4:
3512 s = "subq%.l #4,%0";
3513 break;
3515 default:
3516 gcc_unreachable ();
3517 s = NULL;
3520 output_asm_insn (s, &reg);
3523 /* Emit rtl code to adjust REG by N. */
3524 static void
3525 emit_reg_adjust (rtx reg1, int n)
3527 rtx reg2;
3529 gcc_assert (GET_MODE (reg1) == SImode
3530 && -12 <= n && n != 0 && n <= 12);
3532 reg1 = copy_rtx (reg1);
3533 reg2 = copy_rtx (reg1);
3535 if (n < 0)
3536 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3537 else if (n > 0)
3538 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3539 else
3540 gcc_unreachable ();
3543 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3544 static void
3545 output_compadr (rtx operands[2])
3547 output_asm_insn ("lea %a1,%0", operands);
3550 /* Output the best assembler insn for moving operands[1] into operands[0]
3551 as a fullword. */
3552 static void
3553 output_movsi (rtx operands[2])
3555 output_asm_insn (singlemove_string (operands), operands);
3558 /* Copy OP and change its mode to MODE. */
3559 static rtx
3560 copy_operand (rtx op, enum machine_mode mode)
3562 /* ??? This looks really ugly. There must be a better way
3563 to change a mode on the operand. */
3564 if (GET_MODE (op) != VOIDmode)
3566 if (REG_P (op))
3567 op = gen_rtx_REG (mode, REGNO (op));
3568 else
3570 op = copy_rtx (op);
3571 PUT_MODE (op, mode);
3575 return op;
3578 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3579 static void
3580 emit_movsi (rtx operands[2])
3582 operands[0] = copy_operand (operands[0], SImode);
3583 operands[1] = copy_operand (operands[1], SImode);
3585 emit_insn (gen_movsi (operands[0], operands[1]));
3588 /* Output assembler code to perform a doubleword move insn
3589 with operands OPERANDS. */
3590 const char *
3591 output_move_double (rtx *operands)
3593 handle_move_double (operands,
3594 output_reg_adjust, output_compadr, output_movsi);
3596 return "";
3599 /* Output rtl code to perform a doubleword move insn
3600 with operands OPERANDS. */
3601 void
3602 m68k_emit_move_double (rtx operands[2])
3604 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3607 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3608 new rtx with the correct mode. */
3610 static rtx
3611 force_mode (enum machine_mode mode, rtx orig)
3613 if (mode == GET_MODE (orig))
3614 return orig;
3616 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3617 abort ();
3619 return gen_rtx_REG (mode, REGNO (orig));
3622 static int
3623 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3625 return reg_renumber && FP_REG_P (op);
3628 /* Emit insns to move operands[1] into operands[0].
3630 Return 1 if we have written out everything that needs to be done to
3631 do the move. Otherwise, return 0 and the caller will emit the move
3632 normally.
3634 Note SCRATCH_REG may not be in the proper mode depending on how it
3635 will be used. This routine is responsible for creating a new copy
3636 of SCRATCH_REG in the proper mode. */
3639 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3641 register rtx operand0 = operands[0];
3642 register rtx operand1 = operands[1];
3643 register rtx tem;
3645 if (scratch_reg
3646 && reload_in_progress && GET_CODE (operand0) == REG
3647 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3648 operand0 = reg_equiv_mem (REGNO (operand0));
3649 else if (scratch_reg
3650 && reload_in_progress && GET_CODE (operand0) == SUBREG
3651 && GET_CODE (SUBREG_REG (operand0)) == REG
3652 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3654 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3655 the code which tracks sets/uses for delete_output_reload. */
3656 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3657 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3658 SUBREG_BYTE (operand0));
3659 operand0 = alter_subreg (&temp, true);
3662 if (scratch_reg
3663 && reload_in_progress && GET_CODE (operand1) == REG
3664 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3665 operand1 = reg_equiv_mem (REGNO (operand1));
3666 else if (scratch_reg
3667 && reload_in_progress && GET_CODE (operand1) == SUBREG
3668 && GET_CODE (SUBREG_REG (operand1)) == REG
3669 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3671 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3672 the code which tracks sets/uses for delete_output_reload. */
3673 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3674 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3675 SUBREG_BYTE (operand1));
3676 operand1 = alter_subreg (&temp, true);
3679 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3680 && ((tem = find_replacement (&XEXP (operand0, 0)))
3681 != XEXP (operand0, 0)))
3682 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3683 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3684 && ((tem = find_replacement (&XEXP (operand1, 0)))
3685 != XEXP (operand1, 0)))
3686 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3688 /* Handle secondary reloads for loads/stores of FP registers where
3689 the address is symbolic by using the scratch register */
3690 if (fp_reg_operand (operand0, mode)
3691 && ((GET_CODE (operand1) == MEM
3692 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3693 || ((GET_CODE (operand1) == SUBREG
3694 && GET_CODE (XEXP (operand1, 0)) == MEM
3695 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3696 && scratch_reg)
3698 if (GET_CODE (operand1) == SUBREG)
3699 operand1 = XEXP (operand1, 0);
3701 /* SCRATCH_REG will hold an address. We want
3702 it in SImode regardless of what mode it was originally given
3703 to us. */
3704 scratch_reg = force_mode (SImode, scratch_reg);
3706 /* D might not fit in 14 bits either; for such cases load D into
3707 scratch reg. */
3708 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3710 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3711 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3712 Pmode,
3713 XEXP (XEXP (operand1, 0), 0),
3714 scratch_reg));
3716 else
3717 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3718 emit_insn (gen_rtx_SET (VOIDmode, operand0,
3719 gen_rtx_MEM (mode, scratch_reg)));
3720 return 1;
3722 else if (fp_reg_operand (operand1, mode)
3723 && ((GET_CODE (operand0) == MEM
3724 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3725 || ((GET_CODE (operand0) == SUBREG)
3726 && GET_CODE (XEXP (operand0, 0)) == MEM
3727 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3728 && scratch_reg)
3730 if (GET_CODE (operand0) == SUBREG)
3731 operand0 = XEXP (operand0, 0);
3733 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3734 it in SIMODE regardless of what mode it was originally given
3735 to us. */
3736 scratch_reg = force_mode (SImode, scratch_reg);
3738 /* D might not fit in 14 bits either; for such cases load D into
3739 scratch reg. */
3740 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3742 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3743 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3744 0)),
3745 Pmode,
3746 XEXP (XEXP (operand0, 0),
3748 scratch_reg));
3750 else
3751 emit_move_insn (scratch_reg, XEXP (operand0, 0));
3752 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3753 operand1));
3754 return 1;
3756 /* Handle secondary reloads for loads of FP registers from constant
3757 expressions by forcing the constant into memory.
3759 use scratch_reg to hold the address of the memory location.
3761 The proper fix is to change PREFERRED_RELOAD_CLASS to return
3762 NO_REGS when presented with a const_int and an register class
3763 containing only FP registers. Doing so unfortunately creates
3764 more problems than it solves. Fix this for 2.5. */
3765 else if (fp_reg_operand (operand0, mode)
3766 && CONSTANT_P (operand1)
3767 && scratch_reg)
3769 rtx xoperands[2];
3771 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3772 it in SIMODE regardless of what mode it was originally given
3773 to us. */
3774 scratch_reg = force_mode (SImode, scratch_reg);
3776 /* Force the constant into memory and put the address of the
3777 memory location into scratch_reg. */
3778 xoperands[0] = scratch_reg;
3779 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3780 emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3782 /* Now load the destination register. */
3783 emit_insn (gen_rtx_SET (mode, operand0,
3784 gen_rtx_MEM (mode, scratch_reg)));
3785 return 1;
3788 /* Now have insn-emit do whatever it normally does. */
3789 return 0;
3792 /* Split one or more DImode RTL references into pairs of SImode
3793 references. The RTL can be REG, offsettable MEM, integer constant, or
3794 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
3795 split and "num" is its length. lo_half and hi_half are output arrays
3796 that parallel "operands". */
3798 void
3799 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3801 while (num--)
3803 rtx op = operands[num];
3805 /* simplify_subreg refuses to split volatile memory addresses,
3806 but we still have to handle it. */
3807 if (GET_CODE (op) == MEM)
3809 lo_half[num] = adjust_address (op, SImode, 4);
3810 hi_half[num] = adjust_address (op, SImode, 0);
3812 else
3814 lo_half[num] = simplify_gen_subreg (SImode, op,
3815 GET_MODE (op) == VOIDmode
3816 ? DImode : GET_MODE (op), 4);
3817 hi_half[num] = simplify_gen_subreg (SImode, op,
3818 GET_MODE (op) == VOIDmode
3819 ? DImode : GET_MODE (op), 0);
3824 /* Split X into a base and a constant offset, storing them in *BASE
3825 and *OFFSET respectively. */
3827 static void
3828 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3830 *offset = 0;
3831 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3833 *offset += INTVAL (XEXP (x, 1));
3834 x = XEXP (x, 0);
3836 *base = x;
3839 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3840 instruction. STORE_P says whether the move is a load or store.
3842 If the instruction uses post-increment or pre-decrement addressing,
3843 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3844 adjustment. This adjustment will be made by the first element of
3845 PARALLEL, with the loads or stores starting at element 1. If the
3846 instruction does not use post-increment or pre-decrement addressing,
3847 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3848 start at element 0. */
3850 bool
3851 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3852 HOST_WIDE_INT automod_offset, bool store_p)
3854 rtx base, mem_base, set, mem, reg, last_reg;
3855 HOST_WIDE_INT offset, mem_offset;
3856 int i, first, len;
3857 enum reg_class rclass;
3859 len = XVECLEN (pattern, 0);
3860 first = (automod_base != NULL);
3862 if (automod_base)
3864 /* Stores must be pre-decrement and loads must be post-increment. */
3865 if (store_p != (automod_offset < 0))
3866 return false;
3868 /* Work out the base and offset for lowest memory location. */
3869 base = automod_base;
3870 offset = (automod_offset < 0 ? automod_offset : 0);
3872 else
3874 /* Allow any valid base and offset in the first access. */
3875 base = NULL;
3876 offset = 0;
3879 last_reg = NULL;
3880 rclass = NO_REGS;
3881 for (i = first; i < len; i++)
3883 /* We need a plain SET. */
3884 set = XVECEXP (pattern, 0, i);
3885 if (GET_CODE (set) != SET)
3886 return false;
3888 /* Check that we have a memory location... */
3889 mem = XEXP (set, !store_p);
3890 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3891 return false;
3893 /* ...with the right address. */
3894 if (base == NULL)
3896 m68k_split_offset (XEXP (mem, 0), &base, &offset);
3897 /* The ColdFire instruction only allows (An) and (d16,An) modes.
3898 There are no mode restrictions for 680x0 besides the
3899 automodification rules enforced above. */
3900 if (TARGET_COLDFIRE
3901 && !m68k_legitimate_base_reg_p (base, reload_completed))
3902 return false;
3904 else
3906 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3907 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3908 return false;
3911 /* Check that we have a register of the required mode and class. */
3912 reg = XEXP (set, store_p);
3913 if (!REG_P (reg)
3914 || !HARD_REGISTER_P (reg)
3915 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3916 return false;
3918 if (last_reg)
3920 /* The register must belong to RCLASS and have a higher number
3921 than the register in the previous SET. */
3922 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3923 || REGNO (last_reg) >= REGNO (reg))
3924 return false;
3926 else
3928 /* Work out which register class we need. */
3929 if (INT_REGNO_P (REGNO (reg)))
3930 rclass = GENERAL_REGS;
3931 else if (FP_REGNO_P (REGNO (reg)))
3932 rclass = FP_REGS;
3933 else
3934 return false;
3937 last_reg = reg;
3938 offset += GET_MODE_SIZE (GET_MODE (reg));
3941 /* If we have an automodification, check whether the final offset is OK. */
3942 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3943 return false;
3945 /* Reject unprofitable cases. */
3946 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3947 return false;
3949 return true;
3952 /* Return the assembly code template for a movem or fmovem instruction
3953 whose pattern is given by PATTERN. Store the template's operands
3954 in OPERANDS.
3956 If the instruction uses post-increment or pre-decrement addressing,
3957 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
3958 is true if this is a store instruction. */
3960 const char *
3961 m68k_output_movem (rtx *operands, rtx pattern,
3962 HOST_WIDE_INT automod_offset, bool store_p)
3964 unsigned int mask;
3965 int i, first;
3967 gcc_assert (GET_CODE (pattern) == PARALLEL);
3968 mask = 0;
3969 first = (automod_offset != 0);
3970 for (i = first; i < XVECLEN (pattern, 0); i++)
3972 /* When using movem with pre-decrement addressing, register X + D0_REG
3973 is controlled by bit 15 - X. For all other addressing modes,
3974 register X + D0_REG is controlled by bit X. Confusingly, the
3975 register mask for fmovem is in the opposite order to that for
3976 movem. */
3977 unsigned int regno;
3979 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3980 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3981 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3982 if (automod_offset < 0)
3984 if (FP_REGNO_P (regno))
3985 mask |= 1 << (regno - FP0_REG);
3986 else
3987 mask |= 1 << (15 - (regno - D0_REG));
3989 else
3991 if (FP_REGNO_P (regno))
3992 mask |= 1 << (7 - (regno - FP0_REG));
3993 else
3994 mask |= 1 << (regno - D0_REG);
3997 CC_STATUS_INIT;
3999 if (automod_offset == 0)
4000 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4001 else if (automod_offset < 0)
4002 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4003 else
4004 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4005 operands[1] = GEN_INT (mask);
4006 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4008 if (store_p)
4009 return "fmovem %1,%a0";
4010 else
4011 return "fmovem %a0,%1";
4013 else
4015 if (store_p)
4016 return "movem%.l %1,%a0";
4017 else
4018 return "movem%.l %a0,%1";
4022 /* Return a REG that occurs in ADDR with coefficient 1.
4023 ADDR can be effectively incremented by incrementing REG. */
4025 static rtx
4026 find_addr_reg (rtx addr)
4028 while (GET_CODE (addr) == PLUS)
4030 if (GET_CODE (XEXP (addr, 0)) == REG)
4031 addr = XEXP (addr, 0);
4032 else if (GET_CODE (XEXP (addr, 1)) == REG)
4033 addr = XEXP (addr, 1);
4034 else if (CONSTANT_P (XEXP (addr, 0)))
4035 addr = XEXP (addr, 1);
4036 else if (CONSTANT_P (XEXP (addr, 1)))
4037 addr = XEXP (addr, 0);
4038 else
4039 gcc_unreachable ();
4041 gcc_assert (GET_CODE (addr) == REG);
4042 return addr;
4045 /* Output assembler code to perform a 32-bit 3-operand add. */
4047 const char *
4048 output_addsi3 (rtx *operands)
4050 if (! operands_match_p (operands[0], operands[1]))
4052 if (!ADDRESS_REG_P (operands[1]))
4054 rtx tmp = operands[1];
4056 operands[1] = operands[2];
4057 operands[2] = tmp;
4060 /* These insns can result from reloads to access
4061 stack slots over 64k from the frame pointer. */
4062 if (GET_CODE (operands[2]) == CONST_INT
4063 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4064 return "move%.l %2,%0\n\tadd%.l %1,%0";
4065 if (GET_CODE (operands[2]) == REG)
4066 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4067 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4069 if (GET_CODE (operands[2]) == CONST_INT)
4071 if (INTVAL (operands[2]) > 0
4072 && INTVAL (operands[2]) <= 8)
4073 return "addq%.l %2,%0";
4074 if (INTVAL (operands[2]) < 0
4075 && INTVAL (operands[2]) >= -8)
4077 operands[2] = GEN_INT (- INTVAL (operands[2]));
4078 return "subq%.l %2,%0";
4080 /* On the CPU32 it is faster to use two addql instructions to
4081 add a small integer (8 < N <= 16) to a register.
4082 Likewise for subql. */
4083 if (TUNE_CPU32 && REG_P (operands[0]))
4085 if (INTVAL (operands[2]) > 8
4086 && INTVAL (operands[2]) <= 16)
4088 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4089 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4091 if (INTVAL (operands[2]) < -8
4092 && INTVAL (operands[2]) >= -16)
4094 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4095 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4098 if (ADDRESS_REG_P (operands[0])
4099 && INTVAL (operands[2]) >= -0x8000
4100 && INTVAL (operands[2]) < 0x8000)
4102 if (TUNE_68040)
4103 return "add%.w %2,%0";
4104 else
4105 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4108 return "add%.l %2,%0";
4111 /* Store in cc_status the expressions that the condition codes will
4112 describe after execution of an instruction whose pattern is EXP.
4113 Do not alter them if the instruction would not alter the cc's. */
4115 /* On the 68000, all the insns to store in an address register fail to
4116 set the cc's. However, in some cases these instructions can make it
4117 possibly invalid to use the saved cc's. In those cases we clear out
4118 some or all of the saved cc's so they won't be used. */
4120 void
4121 notice_update_cc (rtx exp, rtx insn)
4123 if (GET_CODE (exp) == SET)
4125 if (GET_CODE (SET_SRC (exp)) == CALL)
4126 CC_STATUS_INIT;
4127 else if (ADDRESS_REG_P (SET_DEST (exp)))
4129 if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4130 cc_status.value1 = 0;
4131 if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4132 cc_status.value2 = 0;
4134 /* fmoves to memory or data registers do not set the condition
4135 codes. Normal moves _do_ set the condition codes, but not in
4136 a way that is appropriate for comparison with 0, because -0.0
4137 would be treated as a negative nonzero number. Note that it
4138 isn't appropriate to conditionalize this restriction on
4139 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4140 we care about the difference between -0.0 and +0.0. */
4141 else if (!FP_REG_P (SET_DEST (exp))
4142 && SET_DEST (exp) != cc0_rtx
4143 && (FP_REG_P (SET_SRC (exp))
4144 || GET_CODE (SET_SRC (exp)) == FIX
4145 || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4146 CC_STATUS_INIT;
4147 /* A pair of move insns doesn't produce a useful overall cc. */
4148 else if (!FP_REG_P (SET_DEST (exp))
4149 && !FP_REG_P (SET_SRC (exp))
4150 && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4151 && (GET_CODE (SET_SRC (exp)) == REG
4152 || GET_CODE (SET_SRC (exp)) == MEM
4153 || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4154 CC_STATUS_INIT;
4155 else if (SET_DEST (exp) != pc_rtx)
4157 cc_status.flags = 0;
4158 cc_status.value1 = SET_DEST (exp);
4159 cc_status.value2 = SET_SRC (exp);
4162 else if (GET_CODE (exp) == PARALLEL
4163 && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4165 rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4166 rtx src = SET_SRC (XVECEXP (exp, 0, 0));
4168 if (ADDRESS_REG_P (dest))
4169 CC_STATUS_INIT;
4170 else if (dest != pc_rtx)
4172 cc_status.flags = 0;
4173 cc_status.value1 = dest;
4174 cc_status.value2 = src;
4177 else
4178 CC_STATUS_INIT;
4179 if (cc_status.value2 != 0
4180 && ADDRESS_REG_P (cc_status.value2)
4181 && GET_MODE (cc_status.value2) == QImode)
4182 CC_STATUS_INIT;
4183 if (cc_status.value2 != 0)
4184 switch (GET_CODE (cc_status.value2))
4186 case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4187 case ROTATE: case ROTATERT:
4188 /* These instructions always clear the overflow bit, and set
4189 the carry to the bit shifted out. */
4190 cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4191 break;
4193 case PLUS: case MINUS: case MULT:
4194 case DIV: case UDIV: case MOD: case UMOD: case NEG:
4195 if (GET_MODE (cc_status.value2) != VOIDmode)
4196 cc_status.flags |= CC_NO_OVERFLOW;
4197 break;
4198 case ZERO_EXTEND:
4199 /* (SET r1 (ZERO_EXTEND r2)) on this machine
4200 ends with a move insn moving r2 in r2's mode.
4201 Thus, the cc's are set for r2.
4202 This can set N bit spuriously. */
4203 cc_status.flags |= CC_NOT_NEGATIVE;
4205 default:
4206 break;
4208 if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4209 && cc_status.value2
4210 && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4211 cc_status.value2 = 0;
4212 /* Check for PRE_DEC in dest modifying a register used in src. */
4213 if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4214 && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4215 && cc_status.value2
4216 && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4217 cc_status.value2))
4218 cc_status.value2 = 0;
4219 if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4220 || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4221 cc_status.flags = CC_IN_68881;
4222 if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4223 && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4225 cc_status.flags = CC_IN_68881;
4226 if (!FP_REG_P (XEXP (cc_status.value2, 0))
4227 && FP_REG_P (XEXP (cc_status.value2, 1)))
4228 cc_status.flags |= CC_REVERSED;
4232 const char *
4233 output_move_const_double (rtx *operands)
4235 int code = standard_68881_constant_p (operands[1]);
4237 if (code != 0)
4239 static char buf[40];
4241 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4242 return buf;
4244 return "fmove%.d %1,%0";
4247 const char *
4248 output_move_const_single (rtx *operands)
4250 int code = standard_68881_constant_p (operands[1]);
4252 if (code != 0)
4254 static char buf[40];
4256 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4257 return buf;
4259 return "fmove%.s %f1,%0";
4262 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4263 from the "fmovecr" instruction.
4264 The value, anded with 0xff, gives the code to use in fmovecr
4265 to get the desired constant. */
4267 /* This code has been fixed for cross-compilation. */
4269 static int inited_68881_table = 0;
4271 static const char *const strings_68881[7] = {
4272 "0.0",
4273 "1.0",
4274 "10.0",
4275 "100.0",
4276 "10000.0",
4277 "1e8",
4278 "1e16"
4281 static const int codes_68881[7] = {
4282 0x0f,
4283 0x32,
4284 0x33,
4285 0x34,
4286 0x35,
4287 0x36,
4288 0x37
4291 REAL_VALUE_TYPE values_68881[7];
4293 /* Set up values_68881 array by converting the decimal values
4294 strings_68881 to binary. */
4296 void
4297 init_68881_table (void)
4299 int i;
4300 REAL_VALUE_TYPE r;
4301 enum machine_mode mode;
4303 mode = SFmode;
4304 for (i = 0; i < 7; i++)
4306 if (i == 6)
4307 mode = DFmode;
4308 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4309 values_68881[i] = r;
4311 inited_68881_table = 1;
4315 standard_68881_constant_p (rtx x)
4317 REAL_VALUE_TYPE r;
4318 int i;
4320 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4321 used at all on those chips. */
4322 if (TUNE_68040_60)
4323 return 0;
4325 if (! inited_68881_table)
4326 init_68881_table ();
4328 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4330 /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4331 is rejected. */
4332 for (i = 0; i < 6; i++)
4334 if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
4335 return (codes_68881[i]);
4338 if (GET_MODE (x) == SFmode)
4339 return 0;
4341 if (REAL_VALUES_EQUAL (r, values_68881[6]))
4342 return (codes_68881[6]);
4344 /* larger powers of ten in the constants ram are not used
4345 because they are not equal to a `double' C constant. */
4346 return 0;
4349 /* If X is a floating-point constant, return the logarithm of X base 2,
4350 or 0 if X is not a power of 2. */
4353 floating_exact_log2 (rtx x)
4355 REAL_VALUE_TYPE r, r1;
4356 int exp;
4358 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4360 if (REAL_VALUES_LESS (r, dconst1))
4361 return 0;
4363 exp = real_exponent (&r);
4364 real_2expN (&r1, exp, DFmode);
4365 if (REAL_VALUES_EQUAL (r1, r))
4366 return exp;
4368 return 0;
4371 /* A C compound statement to output to stdio stream STREAM the
4372 assembler syntax for an instruction operand X. X is an RTL
4373 expression.
4375 CODE is a value that can be used to specify one of several ways
4376 of printing the operand. It is used when identical operands
4377 must be printed differently depending on the context. CODE
4378 comes from the `%' specification that was used to request
4379 printing of the operand. If the specification was just `%DIGIT'
4380 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4381 is the ASCII code for LTR.
4383 If X is a register, this macro should print the register's name.
4384 The names can be found in an array `reg_names' whose type is
4385 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4387 When the machine description has a specification `%PUNCT' (a `%'
4388 followed by a punctuation character), this macro is called with
4389 a null pointer for X and the punctuation character for CODE.
4391 The m68k specific codes are:
4393 '.' for dot needed in Motorola-style opcode names.
4394 '-' for an operand pushing on the stack:
4395 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4396 '+' for an operand pushing on the stack:
4397 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4398 '@' for a reference to the top word on the stack:
4399 sp@, (sp) or (%sp) depending on the style of syntax.
4400 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4401 but & in SGS syntax).
4402 '!' for the cc register (used in an `and to cc' insn).
4403 '$' for the letter `s' in an op code, but only on the 68040.
4404 '&' for the letter `d' in an op code, but only on the 68040.
4405 '/' for register prefix needed by longlong.h.
4406 '?' for m68k_library_id_string
4408 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4409 'd' to force memory addressing to be absolute, not relative.
4410 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4411 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4412 or print pair of registers as rx:ry.
4413 'p' print an address with @PLTPC attached, but only if the operand
4414 is not locally-bound. */
4416 void
4417 print_operand (FILE *file, rtx op, int letter)
4419 if (letter == '.')
4421 if (MOTOROLA)
4422 fprintf (file, ".");
4424 else if (letter == '#')
4425 asm_fprintf (file, "%I");
4426 else if (letter == '-')
4427 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4428 else if (letter == '+')
4429 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4430 else if (letter == '@')
4431 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4432 else if (letter == '!')
4433 asm_fprintf (file, "%Rfpcr");
4434 else if (letter == '$')
4436 if (TARGET_68040)
4437 fprintf (file, "s");
4439 else if (letter == '&')
4441 if (TARGET_68040)
4442 fprintf (file, "d");
4444 else if (letter == '/')
4445 asm_fprintf (file, "%R");
4446 else if (letter == '?')
4447 asm_fprintf (file, m68k_library_id_string);
4448 else if (letter == 'p')
4450 output_addr_const (file, op);
4451 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4452 fprintf (file, "@PLTPC");
4454 else if (GET_CODE (op) == REG)
4456 if (letter == 'R')
4457 /* Print out the second register name of a register pair.
4458 I.e., R (6) => 7. */
4459 fputs (M68K_REGNAME(REGNO (op) + 1), file);
4460 else
4461 fputs (M68K_REGNAME(REGNO (op)), file);
4463 else if (GET_CODE (op) == MEM)
4465 output_address (XEXP (op, 0));
4466 if (letter == 'd' && ! TARGET_68020
4467 && CONSTANT_ADDRESS_P (XEXP (op, 0))
4468 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4469 && INTVAL (XEXP (op, 0)) < 0x8000
4470 && INTVAL (XEXP (op, 0)) >= -0x8000))
4471 fprintf (file, MOTOROLA ? ".l" : ":l");
4473 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4475 REAL_VALUE_TYPE r;
4476 long l;
4477 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4478 REAL_VALUE_TO_TARGET_SINGLE (r, l);
4479 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4481 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4483 REAL_VALUE_TYPE r;
4484 long l[3];
4485 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4486 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4487 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4488 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4490 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4492 REAL_VALUE_TYPE r;
4493 long l[2];
4494 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4495 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4496 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4498 else
4500 /* Use `print_operand_address' instead of `output_addr_const'
4501 to ensure that we print relevant PIC stuff. */
4502 asm_fprintf (file, "%I");
4503 if (TARGET_PCREL
4504 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4505 print_operand_address (file, op);
4506 else
4507 output_addr_const (file, op);
4511 /* Return string for TLS relocation RELOC. */
4513 static const char *
4514 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4516 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
4517 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4519 switch (reloc)
4521 case RELOC_GOT:
4522 if (MOTOROLA)
4524 if (flag_pic == 1 && TARGET_68020)
4525 return "@GOT.w";
4526 else
4527 return "@GOT";
4529 else
4531 if (TARGET_68020)
4533 switch (flag_pic)
4535 case 1:
4536 return ":w";
4537 case 2:
4538 return ":l";
4539 default:
4540 return "";
4545 case RELOC_TLSGD:
4546 return "@TLSGD";
4548 case RELOC_TLSLDM:
4549 return "@TLSLDM";
4551 case RELOC_TLSLDO:
4552 return "@TLSLDO";
4554 case RELOC_TLSIE:
4555 return "@TLSIE";
4557 case RELOC_TLSLE:
4558 return "@TLSLE";
4560 default:
4561 gcc_unreachable ();
4565 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
4567 static bool
4568 m68k_output_addr_const_extra (FILE *file, rtx x)
4570 if (GET_CODE (x) == UNSPEC)
4572 switch (XINT (x, 1))
4574 case UNSPEC_RELOC16:
4575 case UNSPEC_RELOC32:
4576 output_addr_const (file, XVECEXP (x, 0, 0));
4577 fputs (m68k_get_reloc_decoration
4578 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4579 return true;
4581 default:
4582 break;
4586 return false;
4589 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
4591 static void
4592 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4594 gcc_assert (size == 4);
4595 fputs ("\t.long\t", file);
4596 output_addr_const (file, x);
4597 fputs ("@TLSLDO+0x8000", file);
4600 /* In the name of slightly smaller debug output, and to cater to
4601 general assembler lossage, recognize various UNSPEC sequences
4602 and turn them back into a direct symbol reference. */
4604 static rtx
4605 m68k_delegitimize_address (rtx orig_x)
4607 rtx x;
4608 struct m68k_address addr;
4609 rtx unspec;
4611 orig_x = delegitimize_mem_from_attrs (orig_x);
4612 x = orig_x;
4613 if (MEM_P (x))
4614 x = XEXP (x, 0);
4616 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4617 return orig_x;
4619 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4620 || addr.offset == NULL_RTX
4621 || GET_CODE (addr.offset) != CONST)
4622 return orig_x;
4624 unspec = XEXP (addr.offset, 0);
4625 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4626 unspec = XEXP (unspec, 0);
4627 if (GET_CODE (unspec) != UNSPEC
4628 || (XINT (unspec, 1) != UNSPEC_RELOC16
4629 && XINT (unspec, 1) != UNSPEC_RELOC32))
4630 return orig_x;
4631 x = XVECEXP (unspec, 0, 0);
4632 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4633 if (unspec != XEXP (addr.offset, 0))
4634 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4635 if (addr.index)
4637 rtx idx = addr.index;
4638 if (addr.scale != 1)
4639 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4640 x = gen_rtx_PLUS (Pmode, idx, x);
4642 if (addr.base)
4643 x = gen_rtx_PLUS (Pmode, addr.base, x);
4644 if (MEM_P (orig_x))
4645 x = replace_equiv_address_nv (orig_x, x);
4646 return x;
4650 /* A C compound statement to output to stdio stream STREAM the
4651 assembler syntax for an instruction operand that is a memory
4652 reference whose address is ADDR. ADDR is an RTL expression.
4654 Note that this contains a kludge that knows that the only reason
4655 we have an address (plus (label_ref...) (reg...)) when not generating
4656 PIC code is in the insn before a tablejump, and we know that m68k.md
4657 generates a label LInnn: on such an insn.
4659 It is possible for PIC to generate a (plus (label_ref...) (reg...))
4660 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4662 This routine is responsible for distinguishing between -fpic and -fPIC
4663 style relocations in an address. When generating -fpic code the
4664 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
4665 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4667 void
4668 print_operand_address (FILE *file, rtx addr)
4670 struct m68k_address address;
4672 if (!m68k_decompose_address (QImode, addr, true, &address))
4673 gcc_unreachable ();
4675 if (address.code == PRE_DEC)
4676 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4677 M68K_REGNAME (REGNO (address.base)));
4678 else if (address.code == POST_INC)
4679 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4680 M68K_REGNAME (REGNO (address.base)));
4681 else if (!address.base && !address.index)
4683 /* A constant address. */
4684 gcc_assert (address.offset == addr);
4685 if (GET_CODE (addr) == CONST_INT)
4687 /* (xxx).w or (xxx).l. */
4688 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4689 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4690 else
4691 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4693 else if (TARGET_PCREL)
4695 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
4696 fputc ('(', file);
4697 output_addr_const (file, addr);
4698 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4700 else
4702 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
4703 name ends in `.<letter>', as the last 2 characters can be
4704 mistaken as a size suffix. Put the name in parentheses. */
4705 if (GET_CODE (addr) == SYMBOL_REF
4706 && strlen (XSTR (addr, 0)) > 2
4707 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4709 putc ('(', file);
4710 output_addr_const (file, addr);
4711 putc (')', file);
4713 else
4714 output_addr_const (file, addr);
4717 else
4719 int labelno;
4721 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4722 label being accessed, otherwise it is -1. */
4723 labelno = (address.offset
4724 && !address.base
4725 && GET_CODE (address.offset) == LABEL_REF
4726 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4727 : -1);
4728 if (MOTOROLA)
4730 /* Print the "offset(base" component. */
4731 if (labelno >= 0)
4732 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4733 else
4735 if (address.offset)
4736 output_addr_const (file, address.offset);
4738 putc ('(', file);
4739 if (address.base)
4740 fputs (M68K_REGNAME (REGNO (address.base)), file);
4742 /* Print the ",index" component, if any. */
4743 if (address.index)
4745 if (address.base)
4746 putc (',', file);
4747 fprintf (file, "%s.%c",
4748 M68K_REGNAME (REGNO (address.index)),
4749 GET_MODE (address.index) == HImode ? 'w' : 'l');
4750 if (address.scale != 1)
4751 fprintf (file, "*%d", address.scale);
4753 putc (')', file);
4755 else /* !MOTOROLA */
4757 if (!address.offset && !address.index)
4758 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4759 else
4761 /* Print the "base@(offset" component. */
4762 if (labelno >= 0)
4763 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4764 else
4766 if (address.base)
4767 fputs (M68K_REGNAME (REGNO (address.base)), file);
4768 fprintf (file, "@(");
4769 if (address.offset)
4770 output_addr_const (file, address.offset);
4772 /* Print the ",index" component, if any. */
4773 if (address.index)
4775 fprintf (file, ",%s:%c",
4776 M68K_REGNAME (REGNO (address.index)),
4777 GET_MODE (address.index) == HImode ? 'w' : 'l');
4778 if (address.scale != 1)
4779 fprintf (file, ":%d", address.scale);
4781 putc (')', file);
4787 /* Check for cases where a clr insns can be omitted from code using
4788 strict_low_part sets. For example, the second clrl here is not needed:
4789 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4791 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
4792 insn we are checking for redundancy. TARGET is the register set by the
4793 clear insn. */
4795 bool
4796 strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4797 rtx target)
4799 rtx p = first_insn;
4801 while ((p = PREV_INSN (p)))
4803 if (NOTE_INSN_BASIC_BLOCK_P (p))
4804 return false;
4806 if (NOTE_P (p))
4807 continue;
4809 /* If it isn't an insn, then give up. */
4810 if (!INSN_P (p))
4811 return false;
4813 if (reg_set_p (target, p))
4815 rtx set = single_set (p);
4816 rtx dest;
4818 /* If it isn't an easy to recognize insn, then give up. */
4819 if (! set)
4820 return false;
4822 dest = SET_DEST (set);
4824 /* If this sets the entire target register to zero, then our
4825 first_insn is redundant. */
4826 if (rtx_equal_p (dest, target)
4827 && SET_SRC (set) == const0_rtx)
4828 return true;
4829 else if (GET_CODE (dest) == STRICT_LOW_PART
4830 && GET_CODE (XEXP (dest, 0)) == REG
4831 && REGNO (XEXP (dest, 0)) == REGNO (target)
4832 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4833 <= GET_MODE_SIZE (mode)))
4834 /* This is a strict low part set which modifies less than
4835 we are using, so it is safe. */
4837 else
4838 return false;
4842 return false;
4845 /* Operand predicates for implementing asymmetric pc-relative addressing
4846 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
4847 when used as a source operand, but not as a destination operand.
4849 We model this by restricting the meaning of the basic predicates
4850 (general_operand, memory_operand, etc) to forbid the use of this
4851 addressing mode, and then define the following predicates that permit
4852 this addressing mode. These predicates can then be used for the
4853 source operands of the appropriate instructions.
4855 n.b. While it is theoretically possible to change all machine patterns
4856 to use this addressing more where permitted by the architecture,
4857 it has only been implemented for "common" cases: SImode, HImode, and
4858 QImode operands, and only for the principle operations that would
4859 require this addressing mode: data movement and simple integer operations.
4861 In parallel with these new predicates, two new constraint letters
4862 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
4863 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
4864 In the pcrel case 's' is only valid in combination with 'a' registers.
4865 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4866 of how these constraints are used.
4868 The use of these predicates is strictly optional, though patterns that
4869 don't will cause an extra reload register to be allocated where one
4870 was not necessary:
4872 lea (abc:w,%pc),%a0 ; need to reload address
4873 moveq &1,%d1 ; since write to pc-relative space
4874 movel %d1,%a0@ ; is not allowed
4876 lea (abc:w,%pc),%a1 ; no need to reload address here
4877 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
4879 For more info, consult tiemann@cygnus.com.
4882 All of the ugliness with predicates and constraints is due to the
4883 simple fact that the m68k does not allow a pc-relative addressing
4884 mode as a destination. gcc does not distinguish between source and
4885 destination addresses. Hence, if we claim that pc-relative address
4886 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4887 end up with invalid code. To get around this problem, we left
4888 pc-relative modes as invalid addresses, and then added special
4889 predicates and constraints to accept them.
4891 A cleaner way to handle this is to modify gcc to distinguish
4892 between source and destination addresses. We can then say that
4893 pc-relative is a valid source address but not a valid destination
4894 address, and hopefully avoid a lot of the predicate and constraint
4895 hackery. Unfortunately, this would be a pretty big change. It would
4896 be a useful change for a number of ports, but there aren't any current
4897 plans to undertake this.
4899 ***************************************************************************/
4902 const char *
4903 output_andsi3 (rtx *operands)
4905 int logval;
4906 if (GET_CODE (operands[2]) == CONST_INT
4907 && (INTVAL (operands[2]) | 0xffff) == -1
4908 && (DATA_REG_P (operands[0])
4909 || offsettable_memref_p (operands[0]))
4910 && !TARGET_COLDFIRE)
4912 if (GET_CODE (operands[0]) != REG)
4913 operands[0] = adjust_address (operands[0], HImode, 2);
4914 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4915 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4916 CC_STATUS_INIT;
4917 if (operands[2] == const0_rtx)
4918 return "clr%.w %0";
4919 return "and%.w %2,%0";
4921 if (GET_CODE (operands[2]) == CONST_INT
4922 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4923 && (DATA_REG_P (operands[0])
4924 || offsettable_memref_p (operands[0])))
4926 if (DATA_REG_P (operands[0]))
4927 operands[1] = GEN_INT (logval);
4928 else
4930 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4931 operands[1] = GEN_INT (logval % 8);
4933 /* This does not set condition codes in a standard way. */
4934 CC_STATUS_INIT;
4935 return "bclr %1,%0";
4937 return "and%.l %2,%0";
4940 const char *
4941 output_iorsi3 (rtx *operands)
4943 register int logval;
4944 if (GET_CODE (operands[2]) == CONST_INT
4945 && INTVAL (operands[2]) >> 16 == 0
4946 && (DATA_REG_P (operands[0])
4947 || offsettable_memref_p (operands[0]))
4948 && !TARGET_COLDFIRE)
4950 if (GET_CODE (operands[0]) != REG)
4951 operands[0] = adjust_address (operands[0], HImode, 2);
4952 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4953 CC_STATUS_INIT;
4954 if (INTVAL (operands[2]) == 0xffff)
4955 return "mov%.w %2,%0";
4956 return "or%.w %2,%0";
4958 if (GET_CODE (operands[2]) == CONST_INT
4959 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4960 && (DATA_REG_P (operands[0])
4961 || offsettable_memref_p (operands[0])))
4963 if (DATA_REG_P (operands[0]))
4964 operands[1] = GEN_INT (logval);
4965 else
4967 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4968 operands[1] = GEN_INT (logval % 8);
4970 CC_STATUS_INIT;
4971 return "bset %1,%0";
4973 return "or%.l %2,%0";
4976 const char *
4977 output_xorsi3 (rtx *operands)
4979 register int logval;
4980 if (GET_CODE (operands[2]) == CONST_INT
4981 && INTVAL (operands[2]) >> 16 == 0
4982 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
4983 && !TARGET_COLDFIRE)
4985 if (! DATA_REG_P (operands[0]))
4986 operands[0] = adjust_address (operands[0], HImode, 2);
4987 /* Do not delete a following tstl %0 insn; that would be incorrect. */
4988 CC_STATUS_INIT;
4989 if (INTVAL (operands[2]) == 0xffff)
4990 return "not%.w %0";
4991 return "eor%.w %2,%0";
4993 if (GET_CODE (operands[2]) == CONST_INT
4994 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4995 && (DATA_REG_P (operands[0])
4996 || offsettable_memref_p (operands[0])))
4998 if (DATA_REG_P (operands[0]))
4999 operands[1] = GEN_INT (logval);
5000 else
5002 operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5003 operands[1] = GEN_INT (logval % 8);
5005 CC_STATUS_INIT;
5006 return "bchg %1,%0";
5008 return "eor%.l %2,%0";
5011 /* Return the instruction that should be used for a call to address X,
5012 which is known to be in operand 0. */
5014 const char *
5015 output_call (rtx x)
5017 if (symbolic_operand (x, VOIDmode))
5018 return m68k_symbolic_call;
5019 else
5020 return "jsr %a0";
5023 /* Likewise sibling calls. */
5025 const char *
5026 output_sibcall (rtx x)
5028 if (symbolic_operand (x, VOIDmode))
5029 return m68k_symbolic_jump;
5030 else
5031 return "jmp %a0";
5034 static void
5035 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5036 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5037 tree function)
5039 rtx this_slot, offset, addr, mem, insn, tmp;
5041 /* Avoid clobbering the struct value reg by using the
5042 static chain reg as a temporary. */
5043 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5045 /* Pretend to be a post-reload pass while generating rtl. */
5046 reload_completed = 1;
5048 /* The "this" pointer is stored at 4(%sp). */
5049 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5050 stack_pointer_rtx, 4));
5052 /* Add DELTA to THIS. */
5053 if (delta != 0)
5055 /* Make the offset a legitimate operand for memory addition. */
5056 offset = GEN_INT (delta);
5057 if ((delta < -8 || delta > 8)
5058 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5060 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5061 offset = gen_rtx_REG (Pmode, D0_REG);
5063 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5064 copy_rtx (this_slot), offset));
5067 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5068 if (vcall_offset != 0)
5070 /* Set the static chain register to *THIS. */
5071 emit_move_insn (tmp, this_slot);
5072 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5074 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5075 addr = plus_constant (Pmode, tmp, vcall_offset);
5076 if (!m68k_legitimate_address_p (Pmode, addr, true))
5078 emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5079 addr = tmp;
5082 /* Load the offset into %d0 and add it to THIS. */
5083 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5084 gen_rtx_MEM (Pmode, addr));
5085 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5086 copy_rtx (this_slot),
5087 gen_rtx_REG (Pmode, D0_REG)));
5090 /* Jump to the target function. Use a sibcall if direct jumps are
5091 allowed, otherwise load the address into a register first. */
5092 mem = DECL_RTL (function);
5093 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5095 gcc_assert (flag_pic);
5097 if (!TARGET_SEP_DATA)
5099 /* Use the static chain register as a temporary (call-clobbered)
5100 GOT pointer for this function. We can use the static chain
5101 register because it isn't live on entry to the thunk. */
5102 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5103 emit_insn (gen_load_got (pic_offset_table_rtx));
5105 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5106 mem = replace_equiv_address (mem, tmp);
5108 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5109 SIBLING_CALL_P (insn) = 1;
5111 /* Run just enough of rest_of_compilation. */
5112 insn = get_insns ();
5113 split_all_insns_noflow ();
5114 final_start_function (insn, file, 1);
5115 final (insn, file, 1);
5116 final_end_function ();
5118 /* Clean up the vars set above. */
5119 reload_completed = 0;
5121 /* Restore the original PIC register. */
5122 if (flag_pic)
5123 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5126 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5128 static rtx
5129 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5130 int incoming ATTRIBUTE_UNUSED)
5132 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5135 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5137 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5138 unsigned int new_reg)
5141 /* Interrupt functions can only use registers that have already been
5142 saved by the prologue, even if they would normally be
5143 call-clobbered. */
5145 if ((m68k_get_function_kind (current_function_decl)
5146 == m68k_fk_interrupt_handler)
5147 && !df_regs_ever_live_p (new_reg))
5148 return 0;
5150 return 1;
5153 /* Value is true if hard register REGNO can hold a value of machine-mode
5154 MODE. On the 68000, we let the cpu registers can hold any mode, but
5155 restrict the 68881 registers to floating-point modes. */
5157 bool
5158 m68k_regno_mode_ok (int regno, enum machine_mode mode)
5160 if (DATA_REGNO_P (regno))
5162 /* Data Registers, can hold aggregate if fits in. */
5163 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5164 return true;
5166 else if (ADDRESS_REGNO_P (regno))
5168 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5169 return true;
5171 else if (FP_REGNO_P (regno))
5173 /* FPU registers, hold float or complex float of long double or
5174 smaller. */
5175 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5176 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5177 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5178 return true;
5180 return false;
5183 /* Implement SECONDARY_RELOAD_CLASS. */
5185 enum reg_class
5186 m68k_secondary_reload_class (enum reg_class rclass,
5187 enum machine_mode mode, rtx x)
5189 int regno;
5191 regno = true_regnum (x);
5193 /* If one operand of a movqi is an address register, the other
5194 operand must be a general register or constant. Other types
5195 of operand must be reloaded through a data register. */
5196 if (GET_MODE_SIZE (mode) == 1
5197 && reg_classes_intersect_p (rclass, ADDR_REGS)
5198 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5199 return DATA_REGS;
5201 /* PC-relative addresses must be loaded into an address register first. */
5202 if (TARGET_PCREL
5203 && !reg_class_subset_p (rclass, ADDR_REGS)
5204 && symbolic_operand (x, VOIDmode))
5205 return ADDR_REGS;
5207 return NO_REGS;
5210 /* Implement PREFERRED_RELOAD_CLASS. */
5212 enum reg_class
5213 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5215 enum reg_class secondary_class;
5217 /* If RCLASS might need a secondary reload, try restricting it to
5218 a class that doesn't. */
5219 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5220 if (secondary_class != NO_REGS
5221 && reg_class_subset_p (secondary_class, rclass))
5222 return secondary_class;
5224 /* Prefer to use moveq for in-range constants. */
5225 if (GET_CODE (x) == CONST_INT
5226 && reg_class_subset_p (DATA_REGS, rclass)
5227 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5228 return DATA_REGS;
5230 /* ??? Do we really need this now? */
5231 if (GET_CODE (x) == CONST_DOUBLE
5232 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5234 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5235 return FP_REGS;
5237 return NO_REGS;
5240 return rclass;
5243 /* Return floating point values in a 68881 register. This makes 68881 code
5244 a little bit faster. It also makes -msoft-float code incompatible with
5245 hard-float code, so people have to be careful not to mix the two.
5246 For ColdFire it was decided the ABI incompatibility is undesirable.
5247 If there is need for a hard-float ABI it is probably worth doing it
5248 properly and also passing function arguments in FP registers. */
5250 m68k_libcall_value (enum machine_mode mode)
5252 switch (mode) {
5253 case SFmode:
5254 case DFmode:
5255 case XFmode:
5256 if (TARGET_68881)
5257 return gen_rtx_REG (mode, FP0_REG);
5258 break;
5259 default:
5260 break;
5263 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5266 /* Location in which function value is returned.
5267 NOTE: Due to differences in ABIs, don't call this function directly,
5268 use FUNCTION_VALUE instead. */
5270 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5272 enum machine_mode mode;
5274 mode = TYPE_MODE (valtype);
5275 switch (mode) {
5276 case SFmode:
5277 case DFmode:
5278 case XFmode:
5279 if (TARGET_68881)
5280 return gen_rtx_REG (mode, FP0_REG);
5281 break;
5282 default:
5283 break;
5286 /* If the function returns a pointer, push that into %a0. */
5287 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5288 /* For compatibility with the large body of existing code which
5289 does not always properly declare external functions returning
5290 pointer types, the m68k/SVR4 convention is to copy the value
5291 returned for pointer functions from a0 to d0 in the function
5292 epilogue, so that callers that have neglected to properly
5293 declare the callee can still find the correct return value in
5294 d0. */
5295 return gen_rtx_PARALLEL
5296 (mode,
5297 gen_rtvec (2,
5298 gen_rtx_EXPR_LIST (VOIDmode,
5299 gen_rtx_REG (mode, A0_REG),
5300 const0_rtx),
5301 gen_rtx_EXPR_LIST (VOIDmode,
5302 gen_rtx_REG (mode, D0_REG),
5303 const0_rtx)));
5304 else if (POINTER_TYPE_P (valtype))
5305 return gen_rtx_REG (mode, A0_REG);
5306 else
5307 return gen_rtx_REG (mode, D0_REG);
5310 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5311 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5312 static bool
5313 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5315 enum machine_mode mode = TYPE_MODE (type);
5317 if (mode == BLKmode)
5318 return true;
5320 /* If TYPE's known alignment is less than the alignment of MODE that
5321 would contain the structure, then return in memory. We need to
5322 do so to maintain the compatibility between code compiled with
5323 -mstrict-align and that compiled with -mno-strict-align. */
5324 if (AGGREGATE_TYPE_P (type)
5325 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5326 return true;
5328 return false;
5330 #endif
5332 /* CPU to schedule the program for. */
5333 enum attr_cpu m68k_sched_cpu;
5335 /* MAC to schedule the program for. */
5336 enum attr_mac m68k_sched_mac;
5338 /* Operand type. */
5339 enum attr_op_type
5341 /* No operand. */
5342 OP_TYPE_NONE,
5344 /* Integer register. */
5345 OP_TYPE_RN,
5347 /* FP register. */
5348 OP_TYPE_FPN,
5350 /* Implicit mem reference (e.g. stack). */
5351 OP_TYPE_MEM1,
5353 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5354 OP_TYPE_MEM234,
5356 /* Memory with offset but without indexing. EA mode 5. */
5357 OP_TYPE_MEM5,
5359 /* Memory with indexing. EA mode 6. */
5360 OP_TYPE_MEM6,
5362 /* Memory referenced by absolute address. EA mode 7. */
5363 OP_TYPE_MEM7,
5365 /* Immediate operand that doesn't require extension word. */
5366 OP_TYPE_IMM_Q,
5368 /* Immediate 16 bit operand. */
5369 OP_TYPE_IMM_W,
5371 /* Immediate 32 bit operand. */
5372 OP_TYPE_IMM_L
5375 /* Return type of memory ADDR_RTX refers to. */
5376 static enum attr_op_type
5377 sched_address_type (enum machine_mode mode, rtx addr_rtx)
5379 struct m68k_address address;
5381 if (symbolic_operand (addr_rtx, VOIDmode))
5382 return OP_TYPE_MEM7;
5384 if (!m68k_decompose_address (mode, addr_rtx,
5385 reload_completed, &address))
5387 gcc_assert (!reload_completed);
5388 /* Reload will likely fix the address to be in the register. */
5389 return OP_TYPE_MEM234;
5392 if (address.scale != 0)
5393 return OP_TYPE_MEM6;
5395 if (address.base != NULL_RTX)
5397 if (address.offset == NULL_RTX)
5398 return OP_TYPE_MEM234;
5400 return OP_TYPE_MEM5;
5403 gcc_assert (address.offset != NULL_RTX);
5405 return OP_TYPE_MEM7;
5408 /* Return X or Y (depending on OPX_P) operand of INSN. */
5409 static rtx
5410 sched_get_operand (rtx insn, bool opx_p)
5412 int i;
5414 if (recog_memoized (insn) < 0)
5415 gcc_unreachable ();
5417 extract_constrain_insn_cached (insn);
5419 if (opx_p)
5420 i = get_attr_opx (insn);
5421 else
5422 i = get_attr_opy (insn);
5424 if (i >= recog_data.n_operands)
5425 return NULL;
5427 return recog_data.operand[i];
5430 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5431 If ADDRESS_P is true, return type of memory location operand refers to. */
5432 static enum attr_op_type
5433 sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
5435 rtx op;
5437 op = sched_get_operand (insn, opx_p);
5439 if (op == NULL)
5441 gcc_assert (!reload_completed);
5442 return OP_TYPE_RN;
5445 if (address_p)
5446 return sched_address_type (QImode, op);
5448 if (memory_operand (op, VOIDmode))
5449 return sched_address_type (GET_MODE (op), XEXP (op, 0));
5451 if (register_operand (op, VOIDmode))
5453 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5454 || (reload_completed && FP_REG_P (op)))
5455 return OP_TYPE_FPN;
5457 return OP_TYPE_RN;
5460 if (GET_CODE (op) == CONST_INT)
5462 int ival;
5464 ival = INTVAL (op);
5466 /* Check for quick constants. */
5467 switch (get_attr_type (insn))
5469 case TYPE_ALUQ_L:
5470 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5471 return OP_TYPE_IMM_Q;
5473 gcc_assert (!reload_completed);
5474 break;
5476 case TYPE_MOVEQ_L:
5477 if (USE_MOVQ (ival))
5478 return OP_TYPE_IMM_Q;
5480 gcc_assert (!reload_completed);
5481 break;
5483 case TYPE_MOV3Q_L:
5484 if (valid_mov3q_const (ival))
5485 return OP_TYPE_IMM_Q;
5487 gcc_assert (!reload_completed);
5488 break;
5490 default:
5491 break;
5494 if (IN_RANGE (ival, -0x8000, 0x7fff))
5495 return OP_TYPE_IMM_W;
5497 return OP_TYPE_IMM_L;
5500 if (GET_CODE (op) == CONST_DOUBLE)
5502 switch (GET_MODE (op))
5504 case SFmode:
5505 return OP_TYPE_IMM_W;
5507 case VOIDmode:
5508 case DFmode:
5509 return OP_TYPE_IMM_L;
5511 default:
5512 gcc_unreachable ();
5516 if (GET_CODE (op) == CONST
5517 || symbolic_operand (op, VOIDmode)
5518 || LABEL_P (op))
5520 switch (GET_MODE (op))
5522 case QImode:
5523 return OP_TYPE_IMM_Q;
5525 case HImode:
5526 return OP_TYPE_IMM_W;
5528 case SImode:
5529 return OP_TYPE_IMM_L;
5531 default:
5532 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5533 /* Just a guess. */
5534 return OP_TYPE_IMM_W;
5536 return OP_TYPE_IMM_L;
5540 gcc_assert (!reload_completed);
5542 if (FLOAT_MODE_P (GET_MODE (op)))
5543 return OP_TYPE_FPN;
5545 return OP_TYPE_RN;
5548 /* Implement opx_type attribute.
5549 Return type of INSN's operand X.
5550 If ADDRESS_P is true, return type of memory location operand refers to. */
5551 enum attr_opx_type
5552 m68k_sched_attr_opx_type (rtx insn, int address_p)
5554 switch (sched_attr_op_type (insn, true, address_p != 0))
5556 case OP_TYPE_RN:
5557 return OPX_TYPE_RN;
5559 case OP_TYPE_FPN:
5560 return OPX_TYPE_FPN;
5562 case OP_TYPE_MEM1:
5563 return OPX_TYPE_MEM1;
5565 case OP_TYPE_MEM234:
5566 return OPX_TYPE_MEM234;
5568 case OP_TYPE_MEM5:
5569 return OPX_TYPE_MEM5;
5571 case OP_TYPE_MEM6:
5572 return OPX_TYPE_MEM6;
5574 case OP_TYPE_MEM7:
5575 return OPX_TYPE_MEM7;
5577 case OP_TYPE_IMM_Q:
5578 return OPX_TYPE_IMM_Q;
5580 case OP_TYPE_IMM_W:
5581 return OPX_TYPE_IMM_W;
5583 case OP_TYPE_IMM_L:
5584 return OPX_TYPE_IMM_L;
5586 default:
5587 gcc_unreachable ();
5591 /* Implement opy_type attribute.
5592 Return type of INSN's operand Y.
5593 If ADDRESS_P is true, return type of memory location operand refers to. */
5594 enum attr_opy_type
5595 m68k_sched_attr_opy_type (rtx insn, int address_p)
5597 switch (sched_attr_op_type (insn, false, address_p != 0))
5599 case OP_TYPE_RN:
5600 return OPY_TYPE_RN;
5602 case OP_TYPE_FPN:
5603 return OPY_TYPE_FPN;
5605 case OP_TYPE_MEM1:
5606 return OPY_TYPE_MEM1;
5608 case OP_TYPE_MEM234:
5609 return OPY_TYPE_MEM234;
5611 case OP_TYPE_MEM5:
5612 return OPY_TYPE_MEM5;
5614 case OP_TYPE_MEM6:
5615 return OPY_TYPE_MEM6;
5617 case OP_TYPE_MEM7:
5618 return OPY_TYPE_MEM7;
5620 case OP_TYPE_IMM_Q:
5621 return OPY_TYPE_IMM_Q;
5623 case OP_TYPE_IMM_W:
5624 return OPY_TYPE_IMM_W;
5626 case OP_TYPE_IMM_L:
5627 return OPY_TYPE_IMM_L;
5629 default:
5630 gcc_unreachable ();
5634 /* Return size of INSN as int. */
5635 static int
5636 sched_get_attr_size_int (rtx insn)
5638 int size;
5640 switch (get_attr_type (insn))
5642 case TYPE_IGNORE:
5643 /* There should be no references to m68k_sched_attr_size for 'ignore'
5644 instructions. */
5645 gcc_unreachable ();
5646 return 0;
5648 case TYPE_MUL_L:
5649 size = 2;
5650 break;
5652 default:
5653 size = 1;
5654 break;
5657 switch (get_attr_opx_type (insn))
5659 case OPX_TYPE_NONE:
5660 case OPX_TYPE_RN:
5661 case OPX_TYPE_FPN:
5662 case OPX_TYPE_MEM1:
5663 case OPX_TYPE_MEM234:
5664 case OPY_TYPE_IMM_Q:
5665 break;
5667 case OPX_TYPE_MEM5:
5668 case OPX_TYPE_MEM6:
5669 /* Here we assume that most absolute references are short. */
5670 case OPX_TYPE_MEM7:
5671 case OPY_TYPE_IMM_W:
5672 ++size;
5673 break;
5675 case OPY_TYPE_IMM_L:
5676 size += 2;
5677 break;
5679 default:
5680 gcc_unreachable ();
5683 switch (get_attr_opy_type (insn))
5685 case OPY_TYPE_NONE:
5686 case OPY_TYPE_RN:
5687 case OPY_TYPE_FPN:
5688 case OPY_TYPE_MEM1:
5689 case OPY_TYPE_MEM234:
5690 case OPY_TYPE_IMM_Q:
5691 break;
5693 case OPY_TYPE_MEM5:
5694 case OPY_TYPE_MEM6:
5695 /* Here we assume that most absolute references are short. */
5696 case OPY_TYPE_MEM7:
5697 case OPY_TYPE_IMM_W:
5698 ++size;
5699 break;
5701 case OPY_TYPE_IMM_L:
5702 size += 2;
5703 break;
5705 default:
5706 gcc_unreachable ();
5709 if (size > 3)
5711 gcc_assert (!reload_completed);
5713 size = 3;
5716 return size;
5719 /* Return size of INSN as attribute enum value. */
5720 enum attr_size
5721 m68k_sched_attr_size (rtx insn)
5723 switch (sched_get_attr_size_int (insn))
5725 case 1:
5726 return SIZE_1;
5728 case 2:
5729 return SIZE_2;
5731 case 3:
5732 return SIZE_3;
5734 default:
5735 gcc_unreachable ();
5739 /* Return operand X or Y (depending on OPX_P) of INSN,
5740 if it is a MEM, or NULL overwise. */
5741 static enum attr_op_type
5742 sched_get_opxy_mem_type (rtx insn, bool opx_p)
5744 if (opx_p)
5746 switch (get_attr_opx_type (insn))
5748 case OPX_TYPE_NONE:
5749 case OPX_TYPE_RN:
5750 case OPX_TYPE_FPN:
5751 case OPX_TYPE_IMM_Q:
5752 case OPX_TYPE_IMM_W:
5753 case OPX_TYPE_IMM_L:
5754 return OP_TYPE_RN;
5756 case OPX_TYPE_MEM1:
5757 case OPX_TYPE_MEM234:
5758 case OPX_TYPE_MEM5:
5759 case OPX_TYPE_MEM7:
5760 return OP_TYPE_MEM1;
5762 case OPX_TYPE_MEM6:
5763 return OP_TYPE_MEM6;
5765 default:
5766 gcc_unreachable ();
5769 else
5771 switch (get_attr_opy_type (insn))
5773 case OPY_TYPE_NONE:
5774 case OPY_TYPE_RN:
5775 case OPY_TYPE_FPN:
5776 case OPY_TYPE_IMM_Q:
5777 case OPY_TYPE_IMM_W:
5778 case OPY_TYPE_IMM_L:
5779 return OP_TYPE_RN;
5781 case OPY_TYPE_MEM1:
5782 case OPY_TYPE_MEM234:
5783 case OPY_TYPE_MEM5:
5784 case OPY_TYPE_MEM7:
5785 return OP_TYPE_MEM1;
5787 case OPY_TYPE_MEM6:
5788 return OP_TYPE_MEM6;
5790 default:
5791 gcc_unreachable ();
5796 /* Implement op_mem attribute. */
5797 enum attr_op_mem
5798 m68k_sched_attr_op_mem (rtx insn)
5800 enum attr_op_type opx;
5801 enum attr_op_type opy;
5803 opx = sched_get_opxy_mem_type (insn, true);
5804 opy = sched_get_opxy_mem_type (insn, false);
5806 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5807 return OP_MEM_00;
5809 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5811 switch (get_attr_opx_access (insn))
5813 case OPX_ACCESS_R:
5814 return OP_MEM_10;
5816 case OPX_ACCESS_W:
5817 return OP_MEM_01;
5819 case OPX_ACCESS_RW:
5820 return OP_MEM_11;
5822 default:
5823 gcc_unreachable ();
5827 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5829 switch (get_attr_opx_access (insn))
5831 case OPX_ACCESS_R:
5832 return OP_MEM_I0;
5834 case OPX_ACCESS_W:
5835 return OP_MEM_0I;
5837 case OPX_ACCESS_RW:
5838 return OP_MEM_I1;
5840 default:
5841 gcc_unreachable ();
5845 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5846 return OP_MEM_10;
5848 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5850 switch (get_attr_opx_access (insn))
5852 case OPX_ACCESS_W:
5853 return OP_MEM_11;
5855 default:
5856 gcc_assert (!reload_completed);
5857 return OP_MEM_11;
5861 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5863 switch (get_attr_opx_access (insn))
5865 case OPX_ACCESS_W:
5866 return OP_MEM_1I;
5868 default:
5869 gcc_assert (!reload_completed);
5870 return OP_MEM_1I;
5874 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5875 return OP_MEM_I0;
5877 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5879 switch (get_attr_opx_access (insn))
5881 case OPX_ACCESS_W:
5882 return OP_MEM_I1;
5884 default:
5885 gcc_assert (!reload_completed);
5886 return OP_MEM_I1;
5890 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5891 gcc_assert (!reload_completed);
5892 return OP_MEM_I1;
5895 /* Data for ColdFire V4 index bypass.
5896 Producer modifies register that is used as index in consumer with
5897 specified scale. */
5898 static struct
5900 /* Producer instruction. */
5901 rtx pro;
5903 /* Consumer instruction. */
5904 rtx con;
5906 /* Scale of indexed memory access within consumer.
5907 Or zero if bypass should not be effective at the moment. */
5908 int scale;
5909 } sched_cfv4_bypass_data;
5911 /* An empty state that is used in m68k_sched_adjust_cost. */
5912 static state_t sched_adjust_cost_state;
5914 /* Implement adjust_cost scheduler hook.
5915 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
5916 static int
5917 m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5918 int cost)
5920 int delay;
5922 if (recog_memoized (def_insn) < 0
5923 || recog_memoized (insn) < 0)
5924 return cost;
5926 if (sched_cfv4_bypass_data.scale == 1)
5927 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
5929 /* haifa-sched.c: insn_cost () calls bypass_p () just before
5930 targetm.sched.adjust_cost (). Hence, we can be relatively sure
5931 that the data in sched_cfv4_bypass_data is up to date. */
5932 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5933 && sched_cfv4_bypass_data.con == insn);
5935 if (cost < 3)
5936 cost = 3;
5938 sched_cfv4_bypass_data.pro = NULL;
5939 sched_cfv4_bypass_data.con = NULL;
5940 sched_cfv4_bypass_data.scale = 0;
5942 else
5943 gcc_assert (sched_cfv4_bypass_data.pro == NULL
5944 && sched_cfv4_bypass_data.con == NULL
5945 && sched_cfv4_bypass_data.scale == 0);
5947 /* Don't try to issue INSN earlier than DFA permits.
5948 This is especially useful for instructions that write to memory,
5949 as their true dependence (default) latency is better to be set to 0
5950 to workaround alias analysis limitations.
5951 This is, in fact, a machine independent tweak, so, probably,
5952 it should be moved to haifa-sched.c: insn_cost (). */
5953 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5954 if (delay > cost)
5955 cost = delay;
5957 return cost;
5960 /* Return maximal number of insns that can be scheduled on a single cycle. */
5961 static int
5962 m68k_sched_issue_rate (void)
5964 switch (m68k_sched_cpu)
5966 case CPU_CFV1:
5967 case CPU_CFV2:
5968 case CPU_CFV3:
5969 return 1;
5971 case CPU_CFV4:
5972 return 2;
5974 default:
5975 gcc_unreachable ();
5976 return 0;
5980 /* Maximal length of instruction for current CPU.
5981 E.g. it is 3 for any ColdFire core. */
5982 static int max_insn_size;
5984 /* Data to model instruction buffer of CPU. */
5985 struct _sched_ib
5987 /* True if instruction buffer model is modeled for current CPU. */
5988 bool enabled_p;
5990 /* Size of the instruction buffer in words. */
5991 int size;
5993 /* Number of filled words in the instruction buffer. */
5994 int filled;
5996 /* Additional information about instruction buffer for CPUs that have
5997 a buffer of instruction records, rather then a plain buffer
5998 of instruction words. */
5999 struct _sched_ib_records
6001 /* Size of buffer in records. */
6002 int n_insns;
6004 /* Array to hold data on adjustements made to the size of the buffer. */
6005 int *adjust;
6007 /* Index of the above array. */
6008 int adjust_index;
6009 } records;
6011 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6012 rtx insn;
6015 static struct _sched_ib sched_ib;
6017 /* ID of memory unit. */
6018 static int sched_mem_unit_code;
6020 /* Implementation of the targetm.sched.variable_issue () hook.
6021 It is called after INSN was issued. It returns the number of insns
6022 that can possibly get scheduled on the current cycle.
6023 It is used here to determine the effect of INSN on the instruction
6024 buffer. */
6025 static int
6026 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6027 int sched_verbose ATTRIBUTE_UNUSED,
6028 rtx insn, int can_issue_more)
6030 int insn_size;
6032 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6034 switch (m68k_sched_cpu)
6036 case CPU_CFV1:
6037 case CPU_CFV2:
6038 insn_size = sched_get_attr_size_int (insn);
6039 break;
6041 case CPU_CFV3:
6042 insn_size = sched_get_attr_size_int (insn);
6044 /* ColdFire V3 and V4 cores have instruction buffers that can
6045 accumulate up to 8 instructions regardless of instructions'
6046 sizes. So we should take care not to "prefetch" 24 one-word
6047 or 12 two-words instructions.
6048 To model this behavior we temporarily decrease size of the
6049 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6051 int adjust;
6053 adjust = max_insn_size - insn_size;
6054 sched_ib.size -= adjust;
6056 if (sched_ib.filled > sched_ib.size)
6057 sched_ib.filled = sched_ib.size;
6059 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6062 ++sched_ib.records.adjust_index;
6063 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6064 sched_ib.records.adjust_index = 0;
6066 /* Undo adjustement we did 7 instructions ago. */
6067 sched_ib.size
6068 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6070 break;
6072 case CPU_CFV4:
6073 gcc_assert (!sched_ib.enabled_p);
6074 insn_size = 0;
6075 break;
6077 default:
6078 gcc_unreachable ();
6081 if (insn_size > sched_ib.filled)
6082 /* Scheduling for register pressure does not always take DFA into
6083 account. Workaround instruction buffer not being filled enough. */
6085 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6086 insn_size = sched_ib.filled;
6089 --can_issue_more;
6091 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6092 || asm_noperands (PATTERN (insn)) >= 0)
6093 insn_size = sched_ib.filled;
6094 else
6095 insn_size = 0;
6097 sched_ib.filled -= insn_size;
6099 return can_issue_more;
6102 /* Return how many instructions should scheduler lookahead to choose the
6103 best one. */
6104 static int
6105 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6107 return m68k_sched_issue_rate () - 1;
6110 /* Implementation of targetm.sched.init_global () hook.
6111 It is invoked once per scheduling pass and is used here
6112 to initialize scheduler constants. */
6113 static void
6114 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6115 int sched_verbose ATTRIBUTE_UNUSED,
6116 int n_insns ATTRIBUTE_UNUSED)
6118 #ifdef ENABLE_CHECKING
6119 /* Check that all instructions have DFA reservations and
6120 that all instructions can be issued from a clean state. */
6122 rtx insn;
6123 state_t state;
6125 state = alloca (state_size ());
6127 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6129 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6131 gcc_assert (insn_has_dfa_reservation_p (insn));
6133 state_reset (state);
6134 if (state_transition (state, insn) >= 0)
6135 gcc_unreachable ();
6139 #endif
6141 /* Setup target cpu. */
6143 /* ColdFire V4 has a set of features to keep its instruction buffer full
6144 (e.g., a separate memory bus for instructions) and, hence, we do not model
6145 buffer for this CPU. */
6146 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6148 switch (m68k_sched_cpu)
6150 case CPU_CFV4:
6151 sched_ib.filled = 0;
6153 /* FALLTHRU */
6155 case CPU_CFV1:
6156 case CPU_CFV2:
6157 max_insn_size = 3;
6158 sched_ib.records.n_insns = 0;
6159 sched_ib.records.adjust = NULL;
6160 break;
6162 case CPU_CFV3:
6163 max_insn_size = 3;
6164 sched_ib.records.n_insns = 8;
6165 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6166 break;
6168 default:
6169 gcc_unreachable ();
6172 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6174 sched_adjust_cost_state = xmalloc (state_size ());
6175 state_reset (sched_adjust_cost_state);
6177 start_sequence ();
6178 emit_insn (gen_ib ());
6179 sched_ib.insn = get_insns ();
6180 end_sequence ();
6183 /* Scheduling pass is now finished. Free/reset static variables. */
6184 static void
6185 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6186 int verbose ATTRIBUTE_UNUSED)
6188 sched_ib.insn = NULL;
6190 free (sched_adjust_cost_state);
6191 sched_adjust_cost_state = NULL;
6193 sched_mem_unit_code = 0;
6195 free (sched_ib.records.adjust);
6196 sched_ib.records.adjust = NULL;
6197 sched_ib.records.n_insns = 0;
6198 max_insn_size = 0;
6201 /* Implementation of targetm.sched.init () hook.
6202 It is invoked each time scheduler starts on the new block (basic block or
6203 extended basic block). */
6204 static void
6205 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6206 int sched_verbose ATTRIBUTE_UNUSED,
6207 int n_insns ATTRIBUTE_UNUSED)
6209 switch (m68k_sched_cpu)
6211 case CPU_CFV1:
6212 case CPU_CFV2:
6213 sched_ib.size = 6;
6214 break;
6216 case CPU_CFV3:
6217 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6219 memset (sched_ib.records.adjust, 0,
6220 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6221 sched_ib.records.adjust_index = 0;
6222 break;
6224 case CPU_CFV4:
6225 gcc_assert (!sched_ib.enabled_p);
6226 sched_ib.size = 0;
6227 break;
6229 default:
6230 gcc_unreachable ();
6233 if (sched_ib.enabled_p)
6234 /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6235 the first cycle. Workaround that. */
6236 sched_ib.filled = -2;
6239 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6240 It is invoked just before current cycle finishes and is used here
6241 to track if instruction buffer got its two words this cycle. */
6242 static void
6243 m68k_sched_dfa_pre_advance_cycle (void)
6245 if (!sched_ib.enabled_p)
6246 return;
6248 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6250 sched_ib.filled += 2;
6252 if (sched_ib.filled > sched_ib.size)
6253 sched_ib.filled = sched_ib.size;
6257 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6258 It is invoked just after new cycle begins and is used here
6259 to setup number of filled words in the instruction buffer so that
6260 instructions which won't have all their words prefetched would be
6261 stalled for a cycle. */
6262 static void
6263 m68k_sched_dfa_post_advance_cycle (void)
6265 int i;
6267 if (!sched_ib.enabled_p)
6268 return;
6270 /* Setup number of prefetched instruction words in the instruction
6271 buffer. */
6272 i = max_insn_size - sched_ib.filled;
6274 while (--i >= 0)
6276 if (state_transition (curr_state, sched_ib.insn) >= 0)
6277 /* Pick up scheduler state. */
6278 ++sched_ib.filled;
6282 /* Return X or Y (depending on OPX_P) operand of INSN,
6283 if it is an integer register, or NULL overwise. */
6284 static rtx
6285 sched_get_reg_operand (rtx insn, bool opx_p)
6287 rtx op = NULL;
6289 if (opx_p)
6291 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6293 op = sched_get_operand (insn, true);
6294 gcc_assert (op != NULL);
6296 if (!reload_completed && !REG_P (op))
6297 return NULL;
6300 else
6302 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6304 op = sched_get_operand (insn, false);
6305 gcc_assert (op != NULL);
6307 if (!reload_completed && !REG_P (op))
6308 return NULL;
6312 return op;
6315 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6316 is a MEM. */
6317 static bool
6318 sched_mem_operand_p (rtx insn, bool opx_p)
6320 switch (sched_get_opxy_mem_type (insn, opx_p))
6322 case OP_TYPE_MEM1:
6323 case OP_TYPE_MEM6:
6324 return true;
6326 default:
6327 return false;
6331 /* Return X or Y (depending on OPX_P) operand of INSN,
6332 if it is a MEM, or NULL overwise. */
6333 static rtx
6334 sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6336 bool opx_p;
6337 bool opy_p;
6339 opx_p = false;
6340 opy_p = false;
6342 if (must_read_p)
6344 opx_p = true;
6345 opy_p = true;
6348 if (must_write_p)
6350 opx_p = true;
6351 opy_p = false;
6354 if (opy_p && sched_mem_operand_p (insn, false))
6355 return sched_get_operand (insn, false);
6357 if (opx_p && sched_mem_operand_p (insn, true))
6358 return sched_get_operand (insn, true);
6360 gcc_unreachable ();
6361 return NULL;
6364 /* Return non-zero if PRO modifies register used as part of
6365 address in CON. */
6367 m68k_sched_address_bypass_p (rtx pro, rtx con)
6369 rtx pro_x;
6370 rtx con_mem_read;
6372 pro_x = sched_get_reg_operand (pro, true);
6373 if (pro_x == NULL)
6374 return 0;
6376 con_mem_read = sched_get_mem_operand (con, true, false);
6377 gcc_assert (con_mem_read != NULL);
6379 if (reg_mentioned_p (pro_x, con_mem_read))
6380 return 1;
6382 return 0;
6385 /* Helper function for m68k_sched_indexed_address_bypass_p.
6386 if PRO modifies register used as index in CON,
6387 return scale of indexed memory access in CON. Return zero overwise. */
6388 static int
6389 sched_get_indexed_address_scale (rtx pro, rtx con)
6391 rtx reg;
6392 rtx mem;
6393 struct m68k_address address;
6395 reg = sched_get_reg_operand (pro, true);
6396 if (reg == NULL)
6397 return 0;
6399 mem = sched_get_mem_operand (con, true, false);
6400 gcc_assert (mem != NULL && MEM_P (mem));
6402 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6403 &address))
6404 gcc_unreachable ();
6406 if (REGNO (reg) == REGNO (address.index))
6408 gcc_assert (address.scale != 0);
6409 return address.scale;
6412 return 0;
6415 /* Return non-zero if PRO modifies register used
6416 as index with scale 2 or 4 in CON. */
6418 m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6420 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6421 && sched_cfv4_bypass_data.con == NULL
6422 && sched_cfv4_bypass_data.scale == 0);
6424 switch (sched_get_indexed_address_scale (pro, con))
6426 case 1:
6427 /* We can't have a variable latency bypass, so
6428 remember to adjust the insn cost in adjust_cost hook. */
6429 sched_cfv4_bypass_data.pro = pro;
6430 sched_cfv4_bypass_data.con = con;
6431 sched_cfv4_bypass_data.scale = 1;
6432 return 0;
6434 case 2:
6435 case 4:
6436 return 1;
6438 default:
6439 return 0;
6443 /* We generate a two-instructions program at M_TRAMP :
6444 movea.l &CHAIN_VALUE,%a0
6445 jmp FNADDR
6446 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
6448 static void
6449 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6451 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6452 rtx mem;
6454 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6456 mem = adjust_address (m_tramp, HImode, 0);
6457 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6458 mem = adjust_address (m_tramp, SImode, 2);
6459 emit_move_insn (mem, chain_value);
6461 mem = adjust_address (m_tramp, HImode, 6);
6462 emit_move_insn (mem, GEN_INT(0x4EF9));
6463 mem = adjust_address (m_tramp, SImode, 8);
6464 emit_move_insn (mem, fnaddr);
6466 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6469 /* On the 68000, the RTS insn cannot pop anything.
6470 On the 68010, the RTD insn may be used to pop them if the number
6471 of args is fixed, but if the number is variable then the caller
6472 must pop them all. RTD can't be used for library calls now
6473 because the library is compiled with the Unix compiler.
6474 Use of RTD is a selectable option, since it is incompatible with
6475 standard Unix calling sequences. If the option is not selected,
6476 the caller must always pop the args. */
6478 static int
6479 m68k_return_pops_args (tree fundecl, tree funtype, int size)
6481 return ((TARGET_RTD
6482 && (!fundecl
6483 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6484 && (!stdarg_p (funtype)))
6485 ? size : 0);
6488 /* Make sure everything's fine if we *don't* have a given processor.
6489 This assumes that putting a register in fixed_regs will keep the
6490 compiler's mitts completely off it. We don't bother to zero it out
6491 of register classes. */
6493 static void
6494 m68k_conditional_register_usage (void)
6496 int i;
6497 HARD_REG_SET x;
6498 if (!TARGET_HARD_FLOAT)
6500 COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6501 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6502 if (TEST_HARD_REG_BIT (x, i))
6503 fixed_regs[i] = call_used_regs[i] = 1;
6505 if (flag_pic)
6506 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6509 static void
6510 m68k_init_sync_libfuncs (void)
6512 init_sync_libfuncs (UNITS_PER_WORD);
6515 /* Implements EPILOGUE_USES. All registers are live on exit from an
6516 interrupt routine. */
6517 bool
6518 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6520 return (reload_completed
6521 && (m68k_get_function_kind (current_function_decl)
6522 == m68k_fk_interrupt_handler));
6525 #include "gt-m68k.h"