gcc/
[official-gcc.git] / gcc / config / s390 / s390.c
blobe6e43fa02bf30af347a914aa38f34a38400a8974
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "hash-set.h"
29 #include "machmode.h"
30 #include "vec.h"
31 #include "double-int.h"
32 #include "input.h"
33 #include "alias.h"
34 #include "symtab.h"
35 #include "wide-int.h"
36 #include "inchash.h"
37 #include "tree.h"
38 #include "fold-const.h"
39 #include "print-tree.h"
40 #include "stringpool.h"
41 #include "stor-layout.h"
42 #include "varasm.h"
43 #include "calls.h"
44 #include "tm_p.h"
45 #include "regs.h"
46 #include "hard-reg-set.h"
47 #include "insn-config.h"
48 #include "conditions.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "flags.h"
52 #include "except.h"
53 #include "function.h"
54 #include "recog.h"
55 #include "hashtab.h"
56 #include "statistics.h"
57 #include "real.h"
58 #include "fixed-value.h"
59 #include "expmed.h"
60 #include "dojump.h"
61 #include "explow.h"
62 #include "emit-rtl.h"
63 #include "stmt.h"
64 #include "expr.h"
65 #include "reload.h"
66 #include "diagnostic-core.h"
67 #include "predict.h"
68 #include "dominance.h"
69 #include "cfg.h"
70 #include "cfgrtl.h"
71 #include "cfganal.h"
72 #include "lcm.h"
73 #include "cfgbuild.h"
74 #include "cfgcleanup.h"
75 #include "basic-block.h"
76 #include "ggc.h"
77 #include "target.h"
78 #include "target-def.h"
79 #include "debug.h"
80 #include "langhooks.h"
81 #include "insn-codes.h"
82 #include "optabs.h"
83 #include "hash-table.h"
84 #include "tree-ssa-alias.h"
85 #include "internal-fn.h"
86 #include "gimple-fold.h"
87 #include "tree-eh.h"
88 #include "gimple-expr.h"
89 #include "is-a.h"
90 #include "gimple.h"
91 #include "gimplify.h"
92 #include "df.h"
93 #include "params.h"
94 #include "cfgloop.h"
95 #include "opts.h"
96 #include "tree-pass.h"
97 #include "context.h"
98 #include "builtins.h"
99 #include "rtl-iter.h"
101 /* Define the specific costs for a given cpu. */
103 struct processor_costs
105 /* multiplication */
106 const int m; /* cost of an M instruction. */
107 const int mghi; /* cost of an MGHI instruction. */
108 const int mh; /* cost of an MH instruction. */
109 const int mhi; /* cost of an MHI instruction. */
110 const int ml; /* cost of an ML instruction. */
111 const int mr; /* cost of an MR instruction. */
112 const int ms; /* cost of an MS instruction. */
113 const int msg; /* cost of an MSG instruction. */
114 const int msgf; /* cost of an MSGF instruction. */
115 const int msgfr; /* cost of an MSGFR instruction. */
116 const int msgr; /* cost of an MSGR instruction. */
117 const int msr; /* cost of an MSR instruction. */
118 const int mult_df; /* cost of multiplication in DFmode. */
119 const int mxbr;
120 /* square root */
121 const int sqxbr; /* cost of square root in TFmode. */
122 const int sqdbr; /* cost of square root in DFmode. */
123 const int sqebr; /* cost of square root in SFmode. */
124 /* multiply and add */
125 const int madbr; /* cost of multiply and add in DFmode. */
126 const int maebr; /* cost of multiply and add in SFmode. */
127 /* division */
128 const int dxbr;
129 const int ddbr;
130 const int debr;
131 const int dlgr;
132 const int dlr;
133 const int dr;
134 const int dsgfr;
135 const int dsgr;
138 const struct processor_costs *s390_cost;
140 static const
141 struct processor_costs z900_cost =
143 COSTS_N_INSNS (5), /* M */
144 COSTS_N_INSNS (10), /* MGHI */
145 COSTS_N_INSNS (5), /* MH */
146 COSTS_N_INSNS (4), /* MHI */
147 COSTS_N_INSNS (5), /* ML */
148 COSTS_N_INSNS (5), /* MR */
149 COSTS_N_INSNS (4), /* MS */
150 COSTS_N_INSNS (15), /* MSG */
151 COSTS_N_INSNS (7), /* MSGF */
152 COSTS_N_INSNS (7), /* MSGFR */
153 COSTS_N_INSNS (10), /* MSGR */
154 COSTS_N_INSNS (4), /* MSR */
155 COSTS_N_INSNS (7), /* multiplication in DFmode */
156 COSTS_N_INSNS (13), /* MXBR */
157 COSTS_N_INSNS (136), /* SQXBR */
158 COSTS_N_INSNS (44), /* SQDBR */
159 COSTS_N_INSNS (35), /* SQEBR */
160 COSTS_N_INSNS (18), /* MADBR */
161 COSTS_N_INSNS (13), /* MAEBR */
162 COSTS_N_INSNS (134), /* DXBR */
163 COSTS_N_INSNS (30), /* DDBR */
164 COSTS_N_INSNS (27), /* DEBR */
165 COSTS_N_INSNS (220), /* DLGR */
166 COSTS_N_INSNS (34), /* DLR */
167 COSTS_N_INSNS (34), /* DR */
168 COSTS_N_INSNS (32), /* DSGFR */
169 COSTS_N_INSNS (32), /* DSGR */
172 static const
173 struct processor_costs z990_cost =
175 COSTS_N_INSNS (4), /* M */
176 COSTS_N_INSNS (2), /* MGHI */
177 COSTS_N_INSNS (2), /* MH */
178 COSTS_N_INSNS (2), /* MHI */
179 COSTS_N_INSNS (4), /* ML */
180 COSTS_N_INSNS (4), /* MR */
181 COSTS_N_INSNS (5), /* MS */
182 COSTS_N_INSNS (6), /* MSG */
183 COSTS_N_INSNS (4), /* MSGF */
184 COSTS_N_INSNS (4), /* MSGFR */
185 COSTS_N_INSNS (4), /* MSGR */
186 COSTS_N_INSNS (4), /* MSR */
187 COSTS_N_INSNS (1), /* multiplication in DFmode */
188 COSTS_N_INSNS (28), /* MXBR */
189 COSTS_N_INSNS (130), /* SQXBR */
190 COSTS_N_INSNS (66), /* SQDBR */
191 COSTS_N_INSNS (38), /* SQEBR */
192 COSTS_N_INSNS (1), /* MADBR */
193 COSTS_N_INSNS (1), /* MAEBR */
194 COSTS_N_INSNS (60), /* DXBR */
195 COSTS_N_INSNS (40), /* DDBR */
196 COSTS_N_INSNS (26), /* DEBR */
197 COSTS_N_INSNS (176), /* DLGR */
198 COSTS_N_INSNS (31), /* DLR */
199 COSTS_N_INSNS (31), /* DR */
200 COSTS_N_INSNS (31), /* DSGFR */
201 COSTS_N_INSNS (31), /* DSGR */
204 static const
205 struct processor_costs z9_109_cost =
207 COSTS_N_INSNS (4), /* M */
208 COSTS_N_INSNS (2), /* MGHI */
209 COSTS_N_INSNS (2), /* MH */
210 COSTS_N_INSNS (2), /* MHI */
211 COSTS_N_INSNS (4), /* ML */
212 COSTS_N_INSNS (4), /* MR */
213 COSTS_N_INSNS (5), /* MS */
214 COSTS_N_INSNS (6), /* MSG */
215 COSTS_N_INSNS (4), /* MSGF */
216 COSTS_N_INSNS (4), /* MSGFR */
217 COSTS_N_INSNS (4), /* MSGR */
218 COSTS_N_INSNS (4), /* MSR */
219 COSTS_N_INSNS (1), /* multiplication in DFmode */
220 COSTS_N_INSNS (28), /* MXBR */
221 COSTS_N_INSNS (130), /* SQXBR */
222 COSTS_N_INSNS (66), /* SQDBR */
223 COSTS_N_INSNS (38), /* SQEBR */
224 COSTS_N_INSNS (1), /* MADBR */
225 COSTS_N_INSNS (1), /* MAEBR */
226 COSTS_N_INSNS (60), /* DXBR */
227 COSTS_N_INSNS (40), /* DDBR */
228 COSTS_N_INSNS (26), /* DEBR */
229 COSTS_N_INSNS (30), /* DLGR */
230 COSTS_N_INSNS (23), /* DLR */
231 COSTS_N_INSNS (23), /* DR */
232 COSTS_N_INSNS (24), /* DSGFR */
233 COSTS_N_INSNS (24), /* DSGR */
236 static const
237 struct processor_costs z10_cost =
239 COSTS_N_INSNS (10), /* M */
240 COSTS_N_INSNS (10), /* MGHI */
241 COSTS_N_INSNS (10), /* MH */
242 COSTS_N_INSNS (10), /* MHI */
243 COSTS_N_INSNS (10), /* ML */
244 COSTS_N_INSNS (10), /* MR */
245 COSTS_N_INSNS (10), /* MS */
246 COSTS_N_INSNS (10), /* MSG */
247 COSTS_N_INSNS (10), /* MSGF */
248 COSTS_N_INSNS (10), /* MSGFR */
249 COSTS_N_INSNS (10), /* MSGR */
250 COSTS_N_INSNS (10), /* MSR */
251 COSTS_N_INSNS (1) , /* multiplication in DFmode */
252 COSTS_N_INSNS (50), /* MXBR */
253 COSTS_N_INSNS (120), /* SQXBR */
254 COSTS_N_INSNS (52), /* SQDBR */
255 COSTS_N_INSNS (38), /* SQEBR */
256 COSTS_N_INSNS (1), /* MADBR */
257 COSTS_N_INSNS (1), /* MAEBR */
258 COSTS_N_INSNS (111), /* DXBR */
259 COSTS_N_INSNS (39), /* DDBR */
260 COSTS_N_INSNS (32), /* DEBR */
261 COSTS_N_INSNS (160), /* DLGR */
262 COSTS_N_INSNS (71), /* DLR */
263 COSTS_N_INSNS (71), /* DR */
264 COSTS_N_INSNS (71), /* DSGFR */
265 COSTS_N_INSNS (71), /* DSGR */
268 static const
269 struct processor_costs z196_cost =
271 COSTS_N_INSNS (7), /* M */
272 COSTS_N_INSNS (5), /* MGHI */
273 COSTS_N_INSNS (5), /* MH */
274 COSTS_N_INSNS (5), /* MHI */
275 COSTS_N_INSNS (7), /* ML */
276 COSTS_N_INSNS (7), /* MR */
277 COSTS_N_INSNS (6), /* MS */
278 COSTS_N_INSNS (8), /* MSG */
279 COSTS_N_INSNS (6), /* MSGF */
280 COSTS_N_INSNS (6), /* MSGFR */
281 COSTS_N_INSNS (8), /* MSGR */
282 COSTS_N_INSNS (6), /* MSR */
283 COSTS_N_INSNS (1) , /* multiplication in DFmode */
284 COSTS_N_INSNS (40), /* MXBR B+40 */
285 COSTS_N_INSNS (100), /* SQXBR B+100 */
286 COSTS_N_INSNS (42), /* SQDBR B+42 */
287 COSTS_N_INSNS (28), /* SQEBR B+28 */
288 COSTS_N_INSNS (1), /* MADBR B */
289 COSTS_N_INSNS (1), /* MAEBR B */
290 COSTS_N_INSNS (101), /* DXBR B+101 */
291 COSTS_N_INSNS (29), /* DDBR */
292 COSTS_N_INSNS (22), /* DEBR */
293 COSTS_N_INSNS (160), /* DLGR cracked */
294 COSTS_N_INSNS (160), /* DLR cracked */
295 COSTS_N_INSNS (160), /* DR expanded */
296 COSTS_N_INSNS (160), /* DSGFR cracked */
297 COSTS_N_INSNS (160), /* DSGR cracked */
300 static const
301 struct processor_costs zEC12_cost =
303 COSTS_N_INSNS (7), /* M */
304 COSTS_N_INSNS (5), /* MGHI */
305 COSTS_N_INSNS (5), /* MH */
306 COSTS_N_INSNS (5), /* MHI */
307 COSTS_N_INSNS (7), /* ML */
308 COSTS_N_INSNS (7), /* MR */
309 COSTS_N_INSNS (6), /* MS */
310 COSTS_N_INSNS (8), /* MSG */
311 COSTS_N_INSNS (6), /* MSGF */
312 COSTS_N_INSNS (6), /* MSGFR */
313 COSTS_N_INSNS (8), /* MSGR */
314 COSTS_N_INSNS (6), /* MSR */
315 COSTS_N_INSNS (1) , /* multiplication in DFmode */
316 COSTS_N_INSNS (40), /* MXBR B+40 */
317 COSTS_N_INSNS (100), /* SQXBR B+100 */
318 COSTS_N_INSNS (42), /* SQDBR B+42 */
319 COSTS_N_INSNS (28), /* SQEBR B+28 */
320 COSTS_N_INSNS (1), /* MADBR B */
321 COSTS_N_INSNS (1), /* MAEBR B */
322 COSTS_N_INSNS (131), /* DXBR B+131 */
323 COSTS_N_INSNS (29), /* DDBR */
324 COSTS_N_INSNS (22), /* DEBR */
325 COSTS_N_INSNS (160), /* DLGR cracked */
326 COSTS_N_INSNS (160), /* DLR cracked */
327 COSTS_N_INSNS (160), /* DR expanded */
328 COSTS_N_INSNS (160), /* DSGFR cracked */
329 COSTS_N_INSNS (160), /* DSGR cracked */
332 extern int reload_completed;
334 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
335 static rtx_insn *last_scheduled_insn;
337 /* Structure used to hold the components of a S/390 memory
338 address. A legitimate address on S/390 is of the general
339 form
340 base + index + displacement
341 where any of the components is optional.
343 base and index are registers of the class ADDR_REGS,
344 displacement is an unsigned 12-bit immediate constant. */
346 struct s390_address
348 rtx base;
349 rtx indx;
350 rtx disp;
351 bool pointer;
352 bool literal_pool;
355 /* The following structure is embedded in the machine
356 specific part of struct function. */
358 struct GTY (()) s390_frame_layout
360 /* Offset within stack frame. */
361 HOST_WIDE_INT gprs_offset;
362 HOST_WIDE_INT f0_offset;
363 HOST_WIDE_INT f4_offset;
364 HOST_WIDE_INT f8_offset;
365 HOST_WIDE_INT backchain_offset;
367 /* Number of first and last gpr where slots in the register
368 save area are reserved for. */
369 int first_save_gpr_slot;
370 int last_save_gpr_slot;
372 /* Location (FP register number) where GPRs (r0-r15) should
373 be saved to.
374 0 - does not need to be saved at all
375 -1 - stack slot */
376 signed char gpr_save_slots[16];
378 /* Number of first and last gpr to be saved, restored. */
379 int first_save_gpr;
380 int first_restore_gpr;
381 int last_save_gpr;
382 int last_restore_gpr;
384 /* Bits standing for floating point registers. Set, if the
385 respective register has to be saved. Starting with reg 16 (f0)
386 at the rightmost bit.
387 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
388 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
389 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
390 unsigned int fpr_bitmap;
392 /* Number of floating point registers f8-f15 which must be saved. */
393 int high_fprs;
395 /* Set if return address needs to be saved.
396 This flag is set by s390_return_addr_rtx if it could not use
397 the initial value of r14 and therefore depends on r14 saved
398 to the stack. */
399 bool save_return_addr_p;
401 /* Size of stack frame. */
402 HOST_WIDE_INT frame_size;
405 /* Define the structure for the machine field in struct function. */
407 struct GTY(()) machine_function
409 struct s390_frame_layout frame_layout;
411 /* Literal pool base register. */
412 rtx base_reg;
414 /* True if we may need to perform branch splitting. */
415 bool split_branches_pending_p;
417 bool has_landing_pad_p;
419 /* True if the current function may contain a tbegin clobbering
420 FPRs. */
421 bool tbegin_p;
424 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
426 #define cfun_frame_layout (cfun->machine->frame_layout)
427 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
428 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
429 ? cfun_frame_layout.fpr_bitmap & 0x0f \
430 : cfun_frame_layout.fpr_bitmap & 0x03))
431 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
432 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
433 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
434 (1 << (REGNO - FPR0_REGNUM)))
435 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
436 (1 << (REGNO - FPR0_REGNUM))))
437 #define cfun_gpr_save_slot(REGNO) \
438 cfun->machine->frame_layout.gpr_save_slots[REGNO]
440 /* Number of GPRs and FPRs used for argument passing. */
441 #define GP_ARG_NUM_REG 5
442 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
444 /* A couple of shortcuts. */
445 #define CONST_OK_FOR_J(x) \
446 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
447 #define CONST_OK_FOR_K(x) \
448 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
449 #define CONST_OK_FOR_Os(x) \
450 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
451 #define CONST_OK_FOR_Op(x) \
452 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
453 #define CONST_OK_FOR_On(x) \
454 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
456 #define REGNO_PAIR_OK(REGNO, MODE) \
457 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
459 /* That's the read ahead of the dynamic branch prediction unit in
460 bytes on a z10 (or higher) CPU. */
461 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
463 static const int s390_hotpatch_hw_max = 1000000;
464 static int s390_hotpatch_hw_before_label = 0;
465 static int s390_hotpatch_hw_after_label = 0;
467 /* Check whether the hotpatch attribute is applied to a function and, if it has
468 an argument, the argument is valid. */
470 static tree
471 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
472 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
474 tree expr;
475 tree expr2;
476 int err;
478 if (TREE_CODE (*node) != FUNCTION_DECL)
480 warning (OPT_Wattributes, "%qE attribute only applies to functions",
481 name);
482 *no_add_attrs = true;
484 if (args != NULL && TREE_CHAIN (args) != NULL)
486 expr = TREE_VALUE (args);
487 expr2 = TREE_VALUE (TREE_CHAIN (args));
489 if (args == NULL || TREE_CHAIN (args) == NULL)
490 err = 1;
491 else if (TREE_CODE (expr) != INTEGER_CST
492 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
493 || wi::gtu_p (expr, s390_hotpatch_hw_max))
494 err = 1;
495 else if (TREE_CODE (expr2) != INTEGER_CST
496 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
497 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
498 err = 1;
499 else
500 err = 0;
501 if (err)
503 error ("requested %qE attribute is not a comma separated pair of"
504 " non-negative integer constants or too large (max. %d)", name,
505 s390_hotpatch_hw_max);
506 *no_add_attrs = true;
509 return NULL_TREE;
512 static const struct attribute_spec s390_attribute_table[] = {
513 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false
515 /* End element. */
516 { NULL, 0, 0, false, false, false, NULL, false }
519 /* Return the alignment for LABEL. We default to the -falign-labels
520 value except for the literal pool base label. */
522 s390_label_align (rtx label)
524 rtx_insn *prev_insn = prev_active_insn (label);
525 rtx set, src;
527 if (prev_insn == NULL_RTX)
528 goto old;
530 set = single_set (prev_insn);
532 if (set == NULL_RTX)
533 goto old;
535 src = SET_SRC (set);
537 /* Don't align literal pool base labels. */
538 if (GET_CODE (src) == UNSPEC
539 && XINT (src, 1) == UNSPEC_MAIN_BASE)
540 return 0;
542 old:
543 return align_labels_log;
546 static machine_mode
547 s390_libgcc_cmp_return_mode (void)
549 return TARGET_64BIT ? DImode : SImode;
552 static machine_mode
553 s390_libgcc_shift_count_mode (void)
555 return TARGET_64BIT ? DImode : SImode;
558 static machine_mode
559 s390_unwind_word_mode (void)
561 return TARGET_64BIT ? DImode : SImode;
564 /* Return true if the back end supports mode MODE. */
565 static bool
566 s390_scalar_mode_supported_p (machine_mode mode)
568 /* In contrast to the default implementation reject TImode constants on 31bit
569 TARGET_ZARCH for ABI compliance. */
570 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
571 return false;
573 if (DECIMAL_FLOAT_MODE_P (mode))
574 return default_decimal_float_supported_p ();
576 return default_scalar_mode_supported_p (mode);
579 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
581 void
582 s390_set_has_landing_pad_p (bool value)
584 cfun->machine->has_landing_pad_p = value;
587 /* If two condition code modes are compatible, return a condition code
588 mode which is compatible with both. Otherwise, return
589 VOIDmode. */
591 static machine_mode
592 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
594 if (m1 == m2)
595 return m1;
597 switch (m1)
599 case CCZmode:
600 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
601 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
602 return m2;
603 return VOIDmode;
605 case CCSmode:
606 case CCUmode:
607 case CCTmode:
608 case CCSRmode:
609 case CCURmode:
610 case CCZ1mode:
611 if (m2 == CCZmode)
612 return m1;
614 return VOIDmode;
616 default:
617 return VOIDmode;
619 return VOIDmode;
622 /* Return true if SET either doesn't set the CC register, or else
623 the source and destination have matching CC modes and that
624 CC mode is at least as constrained as REQ_MODE. */
626 static bool
627 s390_match_ccmode_set (rtx set, machine_mode req_mode)
629 machine_mode set_mode;
631 gcc_assert (GET_CODE (set) == SET);
633 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
634 return 1;
636 set_mode = GET_MODE (SET_DEST (set));
637 switch (set_mode)
639 case CCSmode:
640 case CCSRmode:
641 case CCUmode:
642 case CCURmode:
643 case CCLmode:
644 case CCL1mode:
645 case CCL2mode:
646 case CCL3mode:
647 case CCT1mode:
648 case CCT2mode:
649 case CCT3mode:
650 if (req_mode != set_mode)
651 return 0;
652 break;
654 case CCZmode:
655 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
656 && req_mode != CCSRmode && req_mode != CCURmode)
657 return 0;
658 break;
660 case CCAPmode:
661 case CCANmode:
662 if (req_mode != CCAmode)
663 return 0;
664 break;
666 default:
667 gcc_unreachable ();
670 return (GET_MODE (SET_SRC (set)) == set_mode);
673 /* Return true if every SET in INSN that sets the CC register
674 has source and destination with matching CC modes and that
675 CC mode is at least as constrained as REQ_MODE.
676 If REQ_MODE is VOIDmode, always return false. */
678 bool
679 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
681 int i;
683 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
684 if (req_mode == VOIDmode)
685 return false;
687 if (GET_CODE (PATTERN (insn)) == SET)
688 return s390_match_ccmode_set (PATTERN (insn), req_mode);
690 if (GET_CODE (PATTERN (insn)) == PARALLEL)
691 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
693 rtx set = XVECEXP (PATTERN (insn), 0, i);
694 if (GET_CODE (set) == SET)
695 if (!s390_match_ccmode_set (set, req_mode))
696 return false;
699 return true;
702 /* If a test-under-mask instruction can be used to implement
703 (compare (and ... OP1) OP2), return the CC mode required
704 to do that. Otherwise, return VOIDmode.
705 MIXED is true if the instruction can distinguish between
706 CC1 and CC2 for mixed selected bits (TMxx), it is false
707 if the instruction cannot (TM). */
709 machine_mode
710 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
712 int bit0, bit1;
714 /* ??? Fixme: should work on CONST_DOUBLE as well. */
715 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
716 return VOIDmode;
718 /* Selected bits all zero: CC0.
719 e.g.: int a; if ((a & (16 + 128)) == 0) */
720 if (INTVAL (op2) == 0)
721 return CCTmode;
723 /* Selected bits all one: CC3.
724 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
725 if (INTVAL (op2) == INTVAL (op1))
726 return CCT3mode;
728 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
729 int a;
730 if ((a & (16 + 128)) == 16) -> CCT1
731 if ((a & (16 + 128)) == 128) -> CCT2 */
732 if (mixed)
734 bit1 = exact_log2 (INTVAL (op2));
735 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
736 if (bit0 != -1 && bit1 != -1)
737 return bit0 > bit1 ? CCT1mode : CCT2mode;
740 return VOIDmode;
743 /* Given a comparison code OP (EQ, NE, etc.) and the operands
744 OP0 and OP1 of a COMPARE, return the mode to be used for the
745 comparison. */
747 machine_mode
748 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
750 switch (code)
752 case EQ:
753 case NE:
754 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
755 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
756 return CCAPmode;
757 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
758 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
759 return CCAPmode;
760 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
761 || GET_CODE (op1) == NEG)
762 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
763 return CCLmode;
765 if (GET_CODE (op0) == AND)
767 /* Check whether we can potentially do it via TM. */
768 machine_mode ccmode;
769 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
770 if (ccmode != VOIDmode)
772 /* Relax CCTmode to CCZmode to allow fall-back to AND
773 if that turns out to be beneficial. */
774 return ccmode == CCTmode ? CCZmode : ccmode;
778 if (register_operand (op0, HImode)
779 && GET_CODE (op1) == CONST_INT
780 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
781 return CCT3mode;
782 if (register_operand (op0, QImode)
783 && GET_CODE (op1) == CONST_INT
784 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
785 return CCT3mode;
787 return CCZmode;
789 case LE:
790 case LT:
791 case GE:
792 case GT:
793 /* The only overflow condition of NEG and ABS happens when
794 -INT_MAX is used as parameter, which stays negative. So
795 we have an overflow from a positive value to a negative.
796 Using CCAP mode the resulting cc can be used for comparisons. */
797 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
798 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
799 return CCAPmode;
801 /* If constants are involved in an add instruction it is possible to use
802 the resulting cc for comparisons with zero. Knowing the sign of the
803 constant the overflow behavior gets predictable. e.g.:
804 int a, b; if ((b = a + c) > 0)
805 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
806 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
807 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
808 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
809 /* Avoid INT32_MIN on 32 bit. */
810 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
812 if (INTVAL (XEXP((op0), 1)) < 0)
813 return CCANmode;
814 else
815 return CCAPmode;
817 /* Fall through. */
818 case UNORDERED:
819 case ORDERED:
820 case UNEQ:
821 case UNLE:
822 case UNLT:
823 case UNGE:
824 case UNGT:
825 case LTGT:
826 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
827 && GET_CODE (op1) != CONST_INT)
828 return CCSRmode;
829 return CCSmode;
831 case LTU:
832 case GEU:
833 if (GET_CODE (op0) == PLUS
834 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
835 return CCL1mode;
837 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
838 && GET_CODE (op1) != CONST_INT)
839 return CCURmode;
840 return CCUmode;
842 case LEU:
843 case GTU:
844 if (GET_CODE (op0) == MINUS
845 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
846 return CCL2mode;
848 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
849 && GET_CODE (op1) != CONST_INT)
850 return CCURmode;
851 return CCUmode;
853 default:
854 gcc_unreachable ();
858 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
859 that we can implement more efficiently. */
861 static void
862 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
863 bool op0_preserve_value)
865 if (op0_preserve_value)
866 return;
868 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
869 if ((*code == EQ || *code == NE)
870 && *op1 == const0_rtx
871 && GET_CODE (*op0) == ZERO_EXTRACT
872 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
873 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
874 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
876 rtx inner = XEXP (*op0, 0);
877 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
878 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
879 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
881 if (len > 0 && len < modesize
882 && pos >= 0 && pos + len <= modesize
883 && modesize <= HOST_BITS_PER_WIDE_INT)
885 unsigned HOST_WIDE_INT block;
886 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
887 block <<= modesize - pos - len;
889 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
890 gen_int_mode (block, GET_MODE (inner)));
894 /* Narrow AND of memory against immediate to enable TM. */
895 if ((*code == EQ || *code == NE)
896 && *op1 == const0_rtx
897 && GET_CODE (*op0) == AND
898 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
899 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
901 rtx inner = XEXP (*op0, 0);
902 rtx mask = XEXP (*op0, 1);
904 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
905 if (GET_CODE (inner) == SUBREG
906 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
907 && (GET_MODE_SIZE (GET_MODE (inner))
908 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
909 && ((INTVAL (mask)
910 & GET_MODE_MASK (GET_MODE (inner))
911 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
912 == 0))
913 inner = SUBREG_REG (inner);
915 /* Do not change volatile MEMs. */
916 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
918 int part = s390_single_part (XEXP (*op0, 1),
919 GET_MODE (inner), QImode, 0);
920 if (part >= 0)
922 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
923 inner = adjust_address_nv (inner, QImode, part);
924 *op0 = gen_rtx_AND (QImode, inner, mask);
929 /* Narrow comparisons against 0xffff to HImode if possible. */
930 if ((*code == EQ || *code == NE)
931 && GET_CODE (*op1) == CONST_INT
932 && INTVAL (*op1) == 0xffff
933 && SCALAR_INT_MODE_P (GET_MODE (*op0))
934 && (nonzero_bits (*op0, GET_MODE (*op0))
935 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
937 *op0 = gen_lowpart (HImode, *op0);
938 *op1 = constm1_rtx;
941 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
942 if (GET_CODE (*op0) == UNSPEC
943 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
944 && XVECLEN (*op0, 0) == 1
945 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
946 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
947 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
948 && *op1 == const0_rtx)
950 enum rtx_code new_code = UNKNOWN;
951 switch (*code)
953 case EQ: new_code = EQ; break;
954 case NE: new_code = NE; break;
955 case LT: new_code = GTU; break;
956 case GT: new_code = LTU; break;
957 case LE: new_code = GEU; break;
958 case GE: new_code = LEU; break;
959 default: break;
962 if (new_code != UNKNOWN)
964 *op0 = XVECEXP (*op0, 0, 0);
965 *code = new_code;
969 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
970 if (GET_CODE (*op0) == UNSPEC
971 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
972 && XVECLEN (*op0, 0) == 1
973 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
974 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
975 && CONST_INT_P (*op1))
977 enum rtx_code new_code = UNKNOWN;
978 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
980 case CCZmode:
981 case CCRAWmode:
982 switch (*code)
984 case EQ: new_code = EQ; break;
985 case NE: new_code = NE; break;
986 default: break;
988 break;
989 default: break;
992 if (new_code != UNKNOWN)
994 /* For CCRAWmode put the required cc mask into the second
995 operand. */
996 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
997 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
998 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
999 *op0 = XVECEXP (*op0, 0, 0);
1000 *code = new_code;
1004 /* Simplify cascaded EQ, NE with const0_rtx. */
1005 if ((*code == NE || *code == EQ)
1006 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1007 && GET_MODE (*op0) == SImode
1008 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1009 && REG_P (XEXP (*op0, 0))
1010 && XEXP (*op0, 1) == const0_rtx
1011 && *op1 == const0_rtx)
1013 if ((*code == EQ && GET_CODE (*op0) == NE)
1014 || (*code == NE && GET_CODE (*op0) == EQ))
1015 *code = EQ;
1016 else
1017 *code = NE;
1018 *op0 = XEXP (*op0, 0);
1021 /* Prefer register over memory as first operand. */
1022 if (MEM_P (*op0) && REG_P (*op1))
1024 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1025 *code = (int)swap_condition ((enum rtx_code)*code);
1029 /* Emit a compare instruction suitable to implement the comparison
1030 OP0 CODE OP1. Return the correct condition RTL to be placed in
1031 the IF_THEN_ELSE of the conditional branch testing the result. */
1034 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1036 machine_mode mode = s390_select_ccmode (code, op0, op1);
1037 rtx cc;
1039 /* Do not output a redundant compare instruction if a compare_and_swap
1040 pattern already computed the result and the machine modes are compatible. */
1041 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1043 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1044 == GET_MODE (op0));
1045 cc = op0;
1047 else
1049 cc = gen_rtx_REG (mode, CC_REGNUM);
1050 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1053 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1056 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1057 matches CMP.
1058 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1059 conditional branch testing the result. */
1061 static rtx
1062 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1063 rtx cmp, rtx new_rtx)
1065 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1066 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1067 const0_rtx);
1070 /* Emit a jump instruction to TARGET and return it. If COND is
1071 NULL_RTX, emit an unconditional jump, else a conditional jump under
1072 condition COND. */
1074 rtx_insn *
1075 s390_emit_jump (rtx target, rtx cond)
1077 rtx insn;
1079 target = gen_rtx_LABEL_REF (VOIDmode, target);
1080 if (cond)
1081 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1083 insn = gen_rtx_SET (pc_rtx, target);
1084 return emit_jump_insn (insn);
1087 /* Return branch condition mask to implement a branch
1088 specified by CODE. Return -1 for invalid comparisons. */
1091 s390_branch_condition_mask (rtx code)
1093 const int CC0 = 1 << 3;
1094 const int CC1 = 1 << 2;
1095 const int CC2 = 1 << 1;
1096 const int CC3 = 1 << 0;
1098 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1099 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1100 gcc_assert (XEXP (code, 1) == const0_rtx
1101 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1102 && CONST_INT_P (XEXP (code, 1))));
1105 switch (GET_MODE (XEXP (code, 0)))
1107 case CCZmode:
1108 case CCZ1mode:
1109 switch (GET_CODE (code))
1111 case EQ: return CC0;
1112 case NE: return CC1 | CC2 | CC3;
1113 default: return -1;
1115 break;
1117 case CCT1mode:
1118 switch (GET_CODE (code))
1120 case EQ: return CC1;
1121 case NE: return CC0 | CC2 | CC3;
1122 default: return -1;
1124 break;
1126 case CCT2mode:
1127 switch (GET_CODE (code))
1129 case EQ: return CC2;
1130 case NE: return CC0 | CC1 | CC3;
1131 default: return -1;
1133 break;
1135 case CCT3mode:
1136 switch (GET_CODE (code))
1138 case EQ: return CC3;
1139 case NE: return CC0 | CC1 | CC2;
1140 default: return -1;
1142 break;
1144 case CCLmode:
1145 switch (GET_CODE (code))
1147 case EQ: return CC0 | CC2;
1148 case NE: return CC1 | CC3;
1149 default: return -1;
1151 break;
1153 case CCL1mode:
1154 switch (GET_CODE (code))
1156 case LTU: return CC2 | CC3; /* carry */
1157 case GEU: return CC0 | CC1; /* no carry */
1158 default: return -1;
1160 break;
1162 case CCL2mode:
1163 switch (GET_CODE (code))
1165 case GTU: return CC0 | CC1; /* borrow */
1166 case LEU: return CC2 | CC3; /* no borrow */
1167 default: return -1;
1169 break;
1171 case CCL3mode:
1172 switch (GET_CODE (code))
1174 case EQ: return CC0 | CC2;
1175 case NE: return CC1 | CC3;
1176 case LTU: return CC1;
1177 case GTU: return CC3;
1178 case LEU: return CC1 | CC2;
1179 case GEU: return CC2 | CC3;
1180 default: return -1;
1183 case CCUmode:
1184 switch (GET_CODE (code))
1186 case EQ: return CC0;
1187 case NE: return CC1 | CC2 | CC3;
1188 case LTU: return CC1;
1189 case GTU: return CC2;
1190 case LEU: return CC0 | CC1;
1191 case GEU: return CC0 | CC2;
1192 default: return -1;
1194 break;
1196 case CCURmode:
1197 switch (GET_CODE (code))
1199 case EQ: return CC0;
1200 case NE: return CC2 | CC1 | CC3;
1201 case LTU: return CC2;
1202 case GTU: return CC1;
1203 case LEU: return CC0 | CC2;
1204 case GEU: return CC0 | CC1;
1205 default: return -1;
1207 break;
1209 case CCAPmode:
1210 switch (GET_CODE (code))
1212 case EQ: return CC0;
1213 case NE: return CC1 | CC2 | CC3;
1214 case LT: return CC1 | CC3;
1215 case GT: return CC2;
1216 case LE: return CC0 | CC1 | CC3;
1217 case GE: return CC0 | CC2;
1218 default: return -1;
1220 break;
1222 case CCANmode:
1223 switch (GET_CODE (code))
1225 case EQ: return CC0;
1226 case NE: return CC1 | CC2 | CC3;
1227 case LT: return CC1;
1228 case GT: return CC2 | CC3;
1229 case LE: return CC0 | CC1;
1230 case GE: return CC0 | CC2 | CC3;
1231 default: return -1;
1233 break;
1235 case CCSmode:
1236 switch (GET_CODE (code))
1238 case EQ: return CC0;
1239 case NE: return CC1 | CC2 | CC3;
1240 case LT: return CC1;
1241 case GT: return CC2;
1242 case LE: return CC0 | CC1;
1243 case GE: return CC0 | CC2;
1244 case UNORDERED: return CC3;
1245 case ORDERED: return CC0 | CC1 | CC2;
1246 case UNEQ: return CC0 | CC3;
1247 case UNLT: return CC1 | CC3;
1248 case UNGT: return CC2 | CC3;
1249 case UNLE: return CC0 | CC1 | CC3;
1250 case UNGE: return CC0 | CC2 | CC3;
1251 case LTGT: return CC1 | CC2;
1252 default: return -1;
1254 break;
1256 case CCSRmode:
1257 switch (GET_CODE (code))
1259 case EQ: return CC0;
1260 case NE: return CC2 | CC1 | CC3;
1261 case LT: return CC2;
1262 case GT: return CC1;
1263 case LE: return CC0 | CC2;
1264 case GE: return CC0 | CC1;
1265 case UNORDERED: return CC3;
1266 case ORDERED: return CC0 | CC2 | CC1;
1267 case UNEQ: return CC0 | CC3;
1268 case UNLT: return CC2 | CC3;
1269 case UNGT: return CC1 | CC3;
1270 case UNLE: return CC0 | CC2 | CC3;
1271 case UNGE: return CC0 | CC1 | CC3;
1272 case LTGT: return CC2 | CC1;
1273 default: return -1;
1275 break;
1277 case CCRAWmode:
1278 switch (GET_CODE (code))
1280 case EQ:
1281 return INTVAL (XEXP (code, 1));
1282 case NE:
1283 return (INTVAL (XEXP (code, 1))) ^ 0xf;
1284 default:
1285 gcc_unreachable ();
1288 default:
1289 return -1;
1294 /* Return branch condition mask to implement a compare and branch
1295 specified by CODE. Return -1 for invalid comparisons. */
1298 s390_compare_and_branch_condition_mask (rtx code)
1300 const int CC0 = 1 << 3;
1301 const int CC1 = 1 << 2;
1302 const int CC2 = 1 << 1;
1304 switch (GET_CODE (code))
1306 case EQ:
1307 return CC0;
1308 case NE:
1309 return CC1 | CC2;
1310 case LT:
1311 case LTU:
1312 return CC1;
1313 case GT:
1314 case GTU:
1315 return CC2;
1316 case LE:
1317 case LEU:
1318 return CC0 | CC1;
1319 case GE:
1320 case GEU:
1321 return CC0 | CC2;
1322 default:
1323 gcc_unreachable ();
1325 return -1;
1328 /* If INV is false, return assembler mnemonic string to implement
1329 a branch specified by CODE. If INV is true, return mnemonic
1330 for the corresponding inverted branch. */
1332 static const char *
1333 s390_branch_condition_mnemonic (rtx code, int inv)
1335 int mask;
1337 static const char *const mnemonic[16] =
1339 NULL, "o", "h", "nle",
1340 "l", "nhe", "lh", "ne",
1341 "e", "nlh", "he", "nl",
1342 "le", "nh", "no", NULL
1345 if (GET_CODE (XEXP (code, 0)) == REG
1346 && REGNO (XEXP (code, 0)) == CC_REGNUM
1347 && (XEXP (code, 1) == const0_rtx
1348 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1349 && CONST_INT_P (XEXP (code, 1)))))
1350 mask = s390_branch_condition_mask (code);
1351 else
1352 mask = s390_compare_and_branch_condition_mask (code);
1354 gcc_assert (mask >= 0);
1356 if (inv)
1357 mask ^= 15;
1359 gcc_assert (mask >= 1 && mask <= 14);
1361 return mnemonic[mask];
1364 /* Return the part of op which has a value different from def.
1365 The size of the part is determined by mode.
1366 Use this function only if you already know that op really
1367 contains such a part. */
1369 unsigned HOST_WIDE_INT
1370 s390_extract_part (rtx op, machine_mode mode, int def)
1372 unsigned HOST_WIDE_INT value = 0;
1373 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1374 int part_bits = GET_MODE_BITSIZE (mode);
1375 unsigned HOST_WIDE_INT part_mask
1376 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1377 int i;
1379 for (i = 0; i < max_parts; i++)
1381 if (i == 0)
1382 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1383 else
1384 value >>= part_bits;
1386 if ((value & part_mask) != (def & part_mask))
1387 return value & part_mask;
1390 gcc_unreachable ();
1393 /* If OP is an integer constant of mode MODE with exactly one
1394 part of mode PART_MODE unequal to DEF, return the number of that
1395 part. Otherwise, return -1. */
1398 s390_single_part (rtx op,
1399 machine_mode mode,
1400 machine_mode part_mode,
1401 int def)
1403 unsigned HOST_WIDE_INT value = 0;
1404 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1405 unsigned HOST_WIDE_INT part_mask
1406 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1407 int i, part = -1;
1409 if (GET_CODE (op) != CONST_INT)
1410 return -1;
1412 for (i = 0; i < n_parts; i++)
1414 if (i == 0)
1415 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1416 else
1417 value >>= GET_MODE_BITSIZE (part_mode);
1419 if ((value & part_mask) != (def & part_mask))
1421 if (part != -1)
1422 return -1;
1423 else
1424 part = i;
1427 return part == -1 ? -1 : n_parts - 1 - part;
1430 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1431 bits and no other bits are set in IN. POS and LENGTH can be used
1432 to obtain the start position and the length of the bitfield.
1434 POS gives the position of the first bit of the bitfield counting
1435 from the lowest order bit starting with zero. In order to use this
1436 value for S/390 instructions this has to be converted to "bits big
1437 endian" style. */
1439 bool
1440 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1441 int *pos, int *length)
1443 int tmp_pos = 0;
1444 int tmp_length = 0;
1445 int i;
1446 unsigned HOST_WIDE_INT mask = 1ULL;
1447 bool contiguous = false;
1449 for (i = 0; i < size; mask <<= 1, i++)
1451 if (contiguous)
1453 if (mask & in)
1454 tmp_length++;
1455 else
1456 break;
1458 else
1460 if (mask & in)
1462 contiguous = true;
1463 tmp_length++;
1465 else
1466 tmp_pos++;
1470 if (!tmp_length)
1471 return false;
1473 /* Calculate a mask for all bits beyond the contiguous bits. */
1474 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1476 if (mask & in)
1477 return false;
1479 if (tmp_length + tmp_pos - 1 > size)
1480 return false;
1482 if (length)
1483 *length = tmp_length;
1485 if (pos)
1486 *pos = tmp_pos;
1488 return true;
1491 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1492 equivalent to a shift followed by the AND. In particular, CONTIG
1493 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1494 for ROTL indicate a rotate to the right. */
1496 bool
1497 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
1499 int pos, len;
1500 bool ok;
1502 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
1503 gcc_assert (ok);
1505 return ((rotl >= 0 && rotl <= pos)
1506 || (rotl < 0 && -rotl <= bitsize - len - pos));
1509 /* Check whether we can (and want to) split a double-word
1510 move in mode MODE from SRC to DST into two single-word
1511 moves, moving the subword FIRST_SUBWORD first. */
1513 bool
1514 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
1516 /* Floating point registers cannot be split. */
1517 if (FP_REG_P (src) || FP_REG_P (dst))
1518 return false;
1520 /* We don't need to split if operands are directly accessible. */
1521 if (s_operand (src, mode) || s_operand (dst, mode))
1522 return false;
1524 /* Non-offsettable memory references cannot be split. */
1525 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1526 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1527 return false;
1529 /* Moving the first subword must not clobber a register
1530 needed to move the second subword. */
1531 if (register_operand (dst, mode))
1533 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1534 if (reg_overlap_mentioned_p (subreg, src))
1535 return false;
1538 return true;
1541 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1542 and [MEM2, MEM2 + SIZE] do overlap and false
1543 otherwise. */
1545 bool
1546 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1548 rtx addr1, addr2, addr_delta;
1549 HOST_WIDE_INT delta;
1551 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1552 return true;
1554 if (size == 0)
1555 return false;
1557 addr1 = XEXP (mem1, 0);
1558 addr2 = XEXP (mem2, 0);
1560 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1562 /* This overlapping check is used by peepholes merging memory block operations.
1563 Overlapping operations would otherwise be recognized by the S/390 hardware
1564 and would fall back to a slower implementation. Allowing overlapping
1565 operations would lead to slow code but not to wrong code. Therefore we are
1566 somewhat optimistic if we cannot prove that the memory blocks are
1567 overlapping.
1568 That's why we return false here although this may accept operations on
1569 overlapping memory areas. */
1570 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1571 return false;
1573 delta = INTVAL (addr_delta);
1575 if (delta == 0
1576 || (delta > 0 && delta < size)
1577 || (delta < 0 && -delta < size))
1578 return true;
1580 return false;
1583 /* Check whether the address of memory reference MEM2 equals exactly
1584 the address of memory reference MEM1 plus DELTA. Return true if
1585 we can prove this to be the case, false otherwise. */
1587 bool
1588 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1590 rtx addr1, addr2, addr_delta;
1592 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1593 return false;
1595 addr1 = XEXP (mem1, 0);
1596 addr2 = XEXP (mem2, 0);
1598 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1599 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1600 return false;
1602 return true;
1605 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1607 void
1608 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
1609 rtx *operands)
1611 machine_mode wmode = mode;
1612 rtx dst = operands[0];
1613 rtx src1 = operands[1];
1614 rtx src2 = operands[2];
1615 rtx op, clob, tem;
1617 /* If we cannot handle the operation directly, use a temp register. */
1618 if (!s390_logical_operator_ok_p (operands))
1619 dst = gen_reg_rtx (mode);
1621 /* QImode and HImode patterns make sense only if we have a destination
1622 in memory. Otherwise perform the operation in SImode. */
1623 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1624 wmode = SImode;
1626 /* Widen operands if required. */
1627 if (mode != wmode)
1629 if (GET_CODE (dst) == SUBREG
1630 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1631 dst = tem;
1632 else if (REG_P (dst))
1633 dst = gen_rtx_SUBREG (wmode, dst, 0);
1634 else
1635 dst = gen_reg_rtx (wmode);
1637 if (GET_CODE (src1) == SUBREG
1638 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1639 src1 = tem;
1640 else if (GET_MODE (src1) != VOIDmode)
1641 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1643 if (GET_CODE (src2) == SUBREG
1644 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1645 src2 = tem;
1646 else if (GET_MODE (src2) != VOIDmode)
1647 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1650 /* Emit the instruction. */
1651 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1652 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1653 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1655 /* Fix up the destination if needed. */
1656 if (dst != operands[0])
1657 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1660 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1662 bool
1663 s390_logical_operator_ok_p (rtx *operands)
1665 /* If the destination operand is in memory, it needs to coincide
1666 with one of the source operands. After reload, it has to be
1667 the first source operand. */
1668 if (GET_CODE (operands[0]) == MEM)
1669 return rtx_equal_p (operands[0], operands[1])
1670 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1672 return true;
1675 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1676 operand IMMOP to switch from SS to SI type instructions. */
1678 void
1679 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1681 int def = code == AND ? -1 : 0;
1682 HOST_WIDE_INT mask;
1683 int part;
1685 gcc_assert (GET_CODE (*memop) == MEM);
1686 gcc_assert (!MEM_VOLATILE_P (*memop));
1688 mask = s390_extract_part (*immop, QImode, def);
1689 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1690 gcc_assert (part >= 0);
1692 *memop = adjust_address (*memop, QImode, part);
1693 *immop = gen_int_mode (mask, QImode);
1697 /* How to allocate a 'struct machine_function'. */
1699 static struct machine_function *
1700 s390_init_machine_status (void)
1702 return ggc_cleared_alloc<machine_function> ();
1705 /* Map for smallest class containing reg regno. */
1707 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1708 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1709 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1710 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1711 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1712 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1713 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1714 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1715 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1716 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1717 ACCESS_REGS, ACCESS_REGS
1720 /* Return attribute type of insn. */
1722 static enum attr_type
1723 s390_safe_attr_type (rtx_insn *insn)
1725 if (recog_memoized (insn) >= 0)
1726 return get_attr_type (insn);
1727 else
1728 return TYPE_NONE;
1731 /* Return true if DISP is a valid short displacement. */
1733 static bool
1734 s390_short_displacement (rtx disp)
1736 /* No displacement is OK. */
1737 if (!disp)
1738 return true;
1740 /* Without the long displacement facility we don't need to
1741 distingiush between long and short displacement. */
1742 if (!TARGET_LONG_DISPLACEMENT)
1743 return true;
1745 /* Integer displacement in range. */
1746 if (GET_CODE (disp) == CONST_INT)
1747 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1749 /* GOT offset is not OK, the GOT can be large. */
1750 if (GET_CODE (disp) == CONST
1751 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1752 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1753 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1754 return false;
1756 /* All other symbolic constants are literal pool references,
1757 which are OK as the literal pool must be small. */
1758 if (GET_CODE (disp) == CONST)
1759 return true;
1761 return false;
1764 /* Decompose a RTL expression ADDR for a memory address into
1765 its components, returned in OUT.
1767 Returns false if ADDR is not a valid memory address, true
1768 otherwise. If OUT is NULL, don't return the components,
1769 but check for validity only.
1771 Note: Only addresses in canonical form are recognized.
1772 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1773 canonical form so that they will be recognized. */
1775 static int
1776 s390_decompose_address (rtx addr, struct s390_address *out)
1778 HOST_WIDE_INT offset = 0;
1779 rtx base = NULL_RTX;
1780 rtx indx = NULL_RTX;
1781 rtx disp = NULL_RTX;
1782 rtx orig_disp;
1783 bool pointer = false;
1784 bool base_ptr = false;
1785 bool indx_ptr = false;
1786 bool literal_pool = false;
1788 /* We may need to substitute the literal pool base register into the address
1789 below. However, at this point we do not know which register is going to
1790 be used as base, so we substitute the arg pointer register. This is going
1791 to be treated as holding a pointer below -- it shouldn't be used for any
1792 other purpose. */
1793 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1795 /* Decompose address into base + index + displacement. */
1797 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1798 base = addr;
1800 else if (GET_CODE (addr) == PLUS)
1802 rtx op0 = XEXP (addr, 0);
1803 rtx op1 = XEXP (addr, 1);
1804 enum rtx_code code0 = GET_CODE (op0);
1805 enum rtx_code code1 = GET_CODE (op1);
1807 if (code0 == REG || code0 == UNSPEC)
1809 if (code1 == REG || code1 == UNSPEC)
1811 indx = op0; /* index + base */
1812 base = op1;
1815 else
1817 base = op0; /* base + displacement */
1818 disp = op1;
1822 else if (code0 == PLUS)
1824 indx = XEXP (op0, 0); /* index + base + disp */
1825 base = XEXP (op0, 1);
1826 disp = op1;
1829 else
1831 return false;
1835 else
1836 disp = addr; /* displacement */
1838 /* Extract integer part of displacement. */
1839 orig_disp = disp;
1840 if (disp)
1842 if (GET_CODE (disp) == CONST_INT)
1844 offset = INTVAL (disp);
1845 disp = NULL_RTX;
1847 else if (GET_CODE (disp) == CONST
1848 && GET_CODE (XEXP (disp, 0)) == PLUS
1849 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1851 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1852 disp = XEXP (XEXP (disp, 0), 0);
1856 /* Strip off CONST here to avoid special case tests later. */
1857 if (disp && GET_CODE (disp) == CONST)
1858 disp = XEXP (disp, 0);
1860 /* We can convert literal pool addresses to
1861 displacements by basing them off the base register. */
1862 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1864 /* Either base or index must be free to hold the base register. */
1865 if (!base)
1866 base = fake_pool_base, literal_pool = true;
1867 else if (!indx)
1868 indx = fake_pool_base, literal_pool = true;
1869 else
1870 return false;
1872 /* Mark up the displacement. */
1873 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1874 UNSPEC_LTREL_OFFSET);
1877 /* Validate base register. */
1878 if (base)
1880 if (GET_CODE (base) == UNSPEC)
1881 switch (XINT (base, 1))
1883 case UNSPEC_LTREF:
1884 if (!disp)
1885 disp = gen_rtx_UNSPEC (Pmode,
1886 gen_rtvec (1, XVECEXP (base, 0, 0)),
1887 UNSPEC_LTREL_OFFSET);
1888 else
1889 return false;
1891 base = XVECEXP (base, 0, 1);
1892 break;
1894 case UNSPEC_LTREL_BASE:
1895 if (XVECLEN (base, 0) == 1)
1896 base = fake_pool_base, literal_pool = true;
1897 else
1898 base = XVECEXP (base, 0, 1);
1899 break;
1901 default:
1902 return false;
1905 if (!REG_P (base)
1906 || (GET_MODE (base) != SImode
1907 && GET_MODE (base) != Pmode))
1908 return false;
1910 if (REGNO (base) == STACK_POINTER_REGNUM
1911 || REGNO (base) == FRAME_POINTER_REGNUM
1912 || ((reload_completed || reload_in_progress)
1913 && frame_pointer_needed
1914 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1915 || REGNO (base) == ARG_POINTER_REGNUM
1916 || (flag_pic
1917 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1918 pointer = base_ptr = true;
1920 if ((reload_completed || reload_in_progress)
1921 && base == cfun->machine->base_reg)
1922 pointer = base_ptr = literal_pool = true;
1925 /* Validate index register. */
1926 if (indx)
1928 if (GET_CODE (indx) == UNSPEC)
1929 switch (XINT (indx, 1))
1931 case UNSPEC_LTREF:
1932 if (!disp)
1933 disp = gen_rtx_UNSPEC (Pmode,
1934 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1935 UNSPEC_LTREL_OFFSET);
1936 else
1937 return false;
1939 indx = XVECEXP (indx, 0, 1);
1940 break;
1942 case UNSPEC_LTREL_BASE:
1943 if (XVECLEN (indx, 0) == 1)
1944 indx = fake_pool_base, literal_pool = true;
1945 else
1946 indx = XVECEXP (indx, 0, 1);
1947 break;
1949 default:
1950 return false;
1953 if (!REG_P (indx)
1954 || (GET_MODE (indx) != SImode
1955 && GET_MODE (indx) != Pmode))
1956 return false;
1958 if (REGNO (indx) == STACK_POINTER_REGNUM
1959 || REGNO (indx) == FRAME_POINTER_REGNUM
1960 || ((reload_completed || reload_in_progress)
1961 && frame_pointer_needed
1962 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1963 || REGNO (indx) == ARG_POINTER_REGNUM
1964 || (flag_pic
1965 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1966 pointer = indx_ptr = true;
1968 if ((reload_completed || reload_in_progress)
1969 && indx == cfun->machine->base_reg)
1970 pointer = indx_ptr = literal_pool = true;
1973 /* Prefer to use pointer as base, not index. */
1974 if (base && indx && !base_ptr
1975 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1977 rtx tmp = base;
1978 base = indx;
1979 indx = tmp;
1982 /* Validate displacement. */
1983 if (!disp)
1985 /* If virtual registers are involved, the displacement will change later
1986 anyway as the virtual registers get eliminated. This could make a
1987 valid displacement invalid, but it is more likely to make an invalid
1988 displacement valid, because we sometimes access the register save area
1989 via negative offsets to one of those registers.
1990 Thus we don't check the displacement for validity here. If after
1991 elimination the displacement turns out to be invalid after all,
1992 this is fixed up by reload in any case. */
1993 /* LRA maintains always displacements up to date and we need to
1994 know the displacement is right during all LRA not only at the
1995 final elimination. */
1996 if (lra_in_progress
1997 || (base != arg_pointer_rtx
1998 && indx != arg_pointer_rtx
1999 && base != return_address_pointer_rtx
2000 && indx != return_address_pointer_rtx
2001 && base != frame_pointer_rtx
2002 && indx != frame_pointer_rtx
2003 && base != virtual_stack_vars_rtx
2004 && indx != virtual_stack_vars_rtx))
2005 if (!DISP_IN_RANGE (offset))
2006 return false;
2008 else
2010 /* All the special cases are pointers. */
2011 pointer = true;
2013 /* In the small-PIC case, the linker converts @GOT
2014 and @GOTNTPOFF offsets to possible displacements. */
2015 if (GET_CODE (disp) == UNSPEC
2016 && (XINT (disp, 1) == UNSPEC_GOT
2017 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2018 && flag_pic == 1)
2023 /* Accept pool label offsets. */
2024 else if (GET_CODE (disp) == UNSPEC
2025 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2028 /* Accept literal pool references. */
2029 else if (GET_CODE (disp) == UNSPEC
2030 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2032 /* In case CSE pulled a non literal pool reference out of
2033 the pool we have to reject the address. This is
2034 especially important when loading the GOT pointer on non
2035 zarch CPUs. In this case the literal pool contains an lt
2036 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2037 will most likely exceed the displacement. */
2038 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2039 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2040 return false;
2042 orig_disp = gen_rtx_CONST (Pmode, disp);
2043 if (offset)
2045 /* If we have an offset, make sure it does not
2046 exceed the size of the constant pool entry. */
2047 rtx sym = XVECEXP (disp, 0, 0);
2048 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2049 return false;
2051 orig_disp = plus_constant (Pmode, orig_disp, offset);
2055 else
2056 return false;
2059 if (!base && !indx)
2060 pointer = true;
2062 if (out)
2064 out->base = base;
2065 out->indx = indx;
2066 out->disp = orig_disp;
2067 out->pointer = pointer;
2068 out->literal_pool = literal_pool;
2071 return true;
2074 /* Decompose a RTL expression OP for a shift count into its components,
2075 and return the base register in BASE and the offset in OFFSET.
2077 Return true if OP is a valid shift count, false if not. */
2079 bool
2080 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2082 HOST_WIDE_INT off = 0;
2084 /* We can have an integer constant, an address register,
2085 or a sum of the two. */
2086 if (GET_CODE (op) == CONST_INT)
2088 off = INTVAL (op);
2089 op = NULL_RTX;
2091 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2093 off = INTVAL (XEXP (op, 1));
2094 op = XEXP (op, 0);
2096 while (op && GET_CODE (op) == SUBREG)
2097 op = SUBREG_REG (op);
2099 if (op && GET_CODE (op) != REG)
2100 return false;
2102 if (offset)
2103 *offset = off;
2104 if (base)
2105 *base = op;
2107 return true;
2111 /* Return true if CODE is a valid address without index. */
2113 bool
2114 s390_legitimate_address_without_index_p (rtx op)
2116 struct s390_address addr;
2118 if (!s390_decompose_address (XEXP (op, 0), &addr))
2119 return false;
2120 if (addr.indx)
2121 return false;
2123 return true;
2127 /* Return TRUE if ADDR is an operand valid for a load/store relative
2128 instruction. Be aware that the alignment of the operand needs to
2129 be checked separately.
2130 Valid addresses are single references or a sum of a reference and a
2131 constant integer. Return these parts in SYMREF and ADDEND. You can
2132 pass NULL in REF and/or ADDEND if you are not interested in these
2133 values. Literal pool references are *not* considered symbol
2134 references. */
2136 static bool
2137 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2139 HOST_WIDE_INT tmpaddend = 0;
2141 if (GET_CODE (addr) == CONST)
2142 addr = XEXP (addr, 0);
2144 if (GET_CODE (addr) == PLUS)
2146 if (!CONST_INT_P (XEXP (addr, 1)))
2147 return false;
2149 tmpaddend = INTVAL (XEXP (addr, 1));
2150 addr = XEXP (addr, 0);
2153 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2154 || (GET_CODE (addr) == UNSPEC
2155 && (XINT (addr, 1) == UNSPEC_GOTENT
2156 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2158 if (symref)
2159 *symref = addr;
2160 if (addend)
2161 *addend = tmpaddend;
2163 return true;
2165 return false;
2168 /* Return true if the address in OP is valid for constraint letter C
2169 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2170 pool MEMs should be accepted. Only the Q, R, S, T constraint
2171 letters are allowed for C. */
2173 static int
2174 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2176 struct s390_address addr;
2177 bool decomposed = false;
2179 /* This check makes sure that no symbolic address (except literal
2180 pool references) are accepted by the R or T constraints. */
2181 if (s390_loadrelative_operand_p (op, NULL, NULL))
2182 return 0;
2184 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2185 if (!lit_pool_ok)
2187 if (!s390_decompose_address (op, &addr))
2188 return 0;
2189 if (addr.literal_pool)
2190 return 0;
2191 decomposed = true;
2194 switch (c)
2196 case 'Q': /* no index short displacement */
2197 if (!decomposed && !s390_decompose_address (op, &addr))
2198 return 0;
2199 if (addr.indx)
2200 return 0;
2201 if (!s390_short_displacement (addr.disp))
2202 return 0;
2203 break;
2205 case 'R': /* with index short displacement */
2206 if (TARGET_LONG_DISPLACEMENT)
2208 if (!decomposed && !s390_decompose_address (op, &addr))
2209 return 0;
2210 if (!s390_short_displacement (addr.disp))
2211 return 0;
2213 /* Any invalid address here will be fixed up by reload,
2214 so accept it for the most generic constraint. */
2215 break;
2217 case 'S': /* no index long displacement */
2218 if (!TARGET_LONG_DISPLACEMENT)
2219 return 0;
2220 if (!decomposed && !s390_decompose_address (op, &addr))
2221 return 0;
2222 if (addr.indx)
2223 return 0;
2224 if (s390_short_displacement (addr.disp))
2225 return 0;
2226 break;
2228 case 'T': /* with index long displacement */
2229 if (!TARGET_LONG_DISPLACEMENT)
2230 return 0;
2231 /* Any invalid address here will be fixed up by reload,
2232 so accept it for the most generic constraint. */
2233 if ((decomposed || s390_decompose_address (op, &addr))
2234 && s390_short_displacement (addr.disp))
2235 return 0;
2236 break;
2237 default:
2238 return 0;
2240 return 1;
2244 /* Evaluates constraint strings described by the regular expression
2245 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2246 the constraint given in STR, or 0 else. */
2249 s390_mem_constraint (const char *str, rtx op)
2251 char c = str[0];
2253 switch (c)
2255 case 'A':
2256 /* Check for offsettable variants of memory constraints. */
2257 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2258 return 0;
2259 if ((reload_completed || reload_in_progress)
2260 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2261 return 0;
2262 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2263 case 'B':
2264 /* Check for non-literal-pool variants of memory constraints. */
2265 if (!MEM_P (op))
2266 return 0;
2267 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2268 case 'Q':
2269 case 'R':
2270 case 'S':
2271 case 'T':
2272 if (GET_CODE (op) != MEM)
2273 return 0;
2274 return s390_check_qrst_address (c, XEXP (op, 0), true);
2275 case 'U':
2276 return (s390_check_qrst_address ('Q', op, true)
2277 || s390_check_qrst_address ('R', op, true));
2278 case 'W':
2279 return (s390_check_qrst_address ('S', op, true)
2280 || s390_check_qrst_address ('T', op, true));
2281 case 'Y':
2282 /* Simply check for the basic form of a shift count. Reload will
2283 take care of making sure we have a proper base register. */
2284 if (!s390_decompose_shift_count (op, NULL, NULL))
2285 return 0;
2286 break;
2287 case 'Z':
2288 return s390_check_qrst_address (str[1], op, true);
2289 default:
2290 return 0;
2292 return 1;
2296 /* Evaluates constraint strings starting with letter O. Input
2297 parameter C is the second letter following the "O" in the constraint
2298 string. Returns 1 if VALUE meets the respective constraint and 0
2299 otherwise. */
2302 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2304 if (!TARGET_EXTIMM)
2305 return 0;
2307 switch (c)
2309 case 's':
2310 return trunc_int_for_mode (value, SImode) == value;
2312 case 'p':
2313 return value == 0
2314 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2316 case 'n':
2317 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2319 default:
2320 gcc_unreachable ();
2325 /* Evaluates constraint strings starting with letter N. Parameter STR
2326 contains the letters following letter "N" in the constraint string.
2327 Returns true if VALUE matches the constraint. */
2330 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2332 machine_mode mode, part_mode;
2333 int def;
2334 int part, part_goal;
2337 if (str[0] == 'x')
2338 part_goal = -1;
2339 else
2340 part_goal = str[0] - '0';
2342 switch (str[1])
2344 case 'Q':
2345 part_mode = QImode;
2346 break;
2347 case 'H':
2348 part_mode = HImode;
2349 break;
2350 case 'S':
2351 part_mode = SImode;
2352 break;
2353 default:
2354 return 0;
2357 switch (str[2])
2359 case 'H':
2360 mode = HImode;
2361 break;
2362 case 'S':
2363 mode = SImode;
2364 break;
2365 case 'D':
2366 mode = DImode;
2367 break;
2368 default:
2369 return 0;
2372 switch (str[3])
2374 case '0':
2375 def = 0;
2376 break;
2377 case 'F':
2378 def = -1;
2379 break;
2380 default:
2381 return 0;
2384 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2385 return 0;
2387 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2388 if (part < 0)
2389 return 0;
2390 if (part_goal != -1 && part_goal != part)
2391 return 0;
2393 return 1;
2397 /* Returns true if the input parameter VALUE is a float zero. */
2400 s390_float_const_zero_p (rtx value)
2402 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2403 && value == CONST0_RTX (GET_MODE (value)));
2406 /* Implement TARGET_REGISTER_MOVE_COST. */
2408 static int
2409 s390_register_move_cost (machine_mode mode,
2410 reg_class_t from, reg_class_t to)
2412 /* On s390, copy between fprs and gprs is expensive. */
2414 /* It becomes somewhat faster having ldgr/lgdr. */
2415 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
2417 /* ldgr is single cycle. */
2418 if (reg_classes_intersect_p (from, GENERAL_REGS)
2419 && reg_classes_intersect_p (to, FP_REGS))
2420 return 1;
2421 /* lgdr needs 3 cycles. */
2422 if (reg_classes_intersect_p (to, GENERAL_REGS)
2423 && reg_classes_intersect_p (from, FP_REGS))
2424 return 3;
2427 /* Otherwise copying is done via memory. */
2428 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2429 && reg_classes_intersect_p (to, FP_REGS))
2430 || (reg_classes_intersect_p (from, FP_REGS)
2431 && reg_classes_intersect_p (to, GENERAL_REGS)))
2432 return 10;
2434 return 1;
2437 /* Implement TARGET_MEMORY_MOVE_COST. */
2439 static int
2440 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2441 reg_class_t rclass ATTRIBUTE_UNUSED,
2442 bool in ATTRIBUTE_UNUSED)
2444 return 2;
2447 /* Compute a (partial) cost for rtx X. Return true if the complete
2448 cost has been computed, and false if subexpressions should be
2449 scanned. In either case, *TOTAL contains the cost result.
2450 CODE contains GET_CODE (x), OUTER_CODE contains the code
2451 of the superexpression of x. */
2453 static bool
2454 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2455 int *total, bool speed ATTRIBUTE_UNUSED)
2457 switch (code)
2459 case CONST:
2460 case CONST_INT:
2461 case LABEL_REF:
2462 case SYMBOL_REF:
2463 case CONST_DOUBLE:
2464 case MEM:
2465 *total = 0;
2466 return true;
2468 case ASHIFT:
2469 case ASHIFTRT:
2470 case LSHIFTRT:
2471 case ROTATE:
2472 case ROTATERT:
2473 case AND:
2474 case IOR:
2475 case XOR:
2476 case NEG:
2477 case NOT:
2478 *total = COSTS_N_INSNS (1);
2479 return false;
2481 case PLUS:
2482 case MINUS:
2483 *total = COSTS_N_INSNS (1);
2484 return false;
2486 case MULT:
2487 switch (GET_MODE (x))
2489 case SImode:
2491 rtx left = XEXP (x, 0);
2492 rtx right = XEXP (x, 1);
2493 if (GET_CODE (right) == CONST_INT
2494 && CONST_OK_FOR_K (INTVAL (right)))
2495 *total = s390_cost->mhi;
2496 else if (GET_CODE (left) == SIGN_EXTEND)
2497 *total = s390_cost->mh;
2498 else
2499 *total = s390_cost->ms; /* msr, ms, msy */
2500 break;
2502 case DImode:
2504 rtx left = XEXP (x, 0);
2505 rtx right = XEXP (x, 1);
2506 if (TARGET_ZARCH)
2508 if (GET_CODE (right) == CONST_INT
2509 && CONST_OK_FOR_K (INTVAL (right)))
2510 *total = s390_cost->mghi;
2511 else if (GET_CODE (left) == SIGN_EXTEND)
2512 *total = s390_cost->msgf;
2513 else
2514 *total = s390_cost->msg; /* msgr, msg */
2516 else /* TARGET_31BIT */
2518 if (GET_CODE (left) == SIGN_EXTEND
2519 && GET_CODE (right) == SIGN_EXTEND)
2520 /* mulsidi case: mr, m */
2521 *total = s390_cost->m;
2522 else if (GET_CODE (left) == ZERO_EXTEND
2523 && GET_CODE (right) == ZERO_EXTEND
2524 && TARGET_CPU_ZARCH)
2525 /* umulsidi case: ml, mlr */
2526 *total = s390_cost->ml;
2527 else
2528 /* Complex calculation is required. */
2529 *total = COSTS_N_INSNS (40);
2531 break;
2533 case SFmode:
2534 case DFmode:
2535 *total = s390_cost->mult_df;
2536 break;
2537 case TFmode:
2538 *total = s390_cost->mxbr;
2539 break;
2540 default:
2541 return false;
2543 return false;
2545 case FMA:
2546 switch (GET_MODE (x))
2548 case DFmode:
2549 *total = s390_cost->madbr;
2550 break;
2551 case SFmode:
2552 *total = s390_cost->maebr;
2553 break;
2554 default:
2555 return false;
2557 /* Negate in the third argument is free: FMSUB. */
2558 if (GET_CODE (XEXP (x, 2)) == NEG)
2560 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2561 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2562 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2563 return true;
2565 return false;
2567 case UDIV:
2568 case UMOD:
2569 if (GET_MODE (x) == TImode) /* 128 bit division */
2570 *total = s390_cost->dlgr;
2571 else if (GET_MODE (x) == DImode)
2573 rtx right = XEXP (x, 1);
2574 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2575 *total = s390_cost->dlr;
2576 else /* 64 by 64 bit division */
2577 *total = s390_cost->dlgr;
2579 else if (GET_MODE (x) == SImode) /* 32 bit division */
2580 *total = s390_cost->dlr;
2581 return false;
2583 case DIV:
2584 case MOD:
2585 if (GET_MODE (x) == DImode)
2587 rtx right = XEXP (x, 1);
2588 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2589 if (TARGET_ZARCH)
2590 *total = s390_cost->dsgfr;
2591 else
2592 *total = s390_cost->dr;
2593 else /* 64 by 64 bit division */
2594 *total = s390_cost->dsgr;
2596 else if (GET_MODE (x) == SImode) /* 32 bit division */
2597 *total = s390_cost->dlr;
2598 else if (GET_MODE (x) == SFmode)
2600 *total = s390_cost->debr;
2602 else if (GET_MODE (x) == DFmode)
2604 *total = s390_cost->ddbr;
2606 else if (GET_MODE (x) == TFmode)
2608 *total = s390_cost->dxbr;
2610 return false;
2612 case SQRT:
2613 if (GET_MODE (x) == SFmode)
2614 *total = s390_cost->sqebr;
2615 else if (GET_MODE (x) == DFmode)
2616 *total = s390_cost->sqdbr;
2617 else /* TFmode */
2618 *total = s390_cost->sqxbr;
2619 return false;
2621 case SIGN_EXTEND:
2622 case ZERO_EXTEND:
2623 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2624 || outer_code == PLUS || outer_code == MINUS
2625 || outer_code == COMPARE)
2626 *total = 0;
2627 return false;
2629 case COMPARE:
2630 *total = COSTS_N_INSNS (1);
2631 if (GET_CODE (XEXP (x, 0)) == AND
2632 && GET_CODE (XEXP (x, 1)) == CONST_INT
2633 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2635 rtx op0 = XEXP (XEXP (x, 0), 0);
2636 rtx op1 = XEXP (XEXP (x, 0), 1);
2637 rtx op2 = XEXP (x, 1);
2639 if (memory_operand (op0, GET_MODE (op0))
2640 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2641 return true;
2642 if (register_operand (op0, GET_MODE (op0))
2643 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2644 return true;
2646 return false;
2648 default:
2649 return false;
2653 /* Return the cost of an address rtx ADDR. */
2655 static int
2656 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2657 addr_space_t as ATTRIBUTE_UNUSED,
2658 bool speed ATTRIBUTE_UNUSED)
2660 struct s390_address ad;
2661 if (!s390_decompose_address (addr, &ad))
2662 return 1000;
2664 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2667 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2668 otherwise return 0. */
2671 tls_symbolic_operand (rtx op)
2673 if (GET_CODE (op) != SYMBOL_REF)
2674 return 0;
2675 return SYMBOL_REF_TLS_MODEL (op);
2678 /* Split DImode access register reference REG (on 64-bit) into its constituent
2679 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2680 gen_highpart cannot be used as they assume all registers are word-sized,
2681 while our access registers have only half that size. */
2683 void
2684 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2686 gcc_assert (TARGET_64BIT);
2687 gcc_assert (ACCESS_REG_P (reg));
2688 gcc_assert (GET_MODE (reg) == DImode);
2689 gcc_assert (!(REGNO (reg) & 1));
2691 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2692 *hi = gen_rtx_REG (SImode, REGNO (reg));
2695 /* Return true if OP contains a symbol reference */
2697 bool
2698 symbolic_reference_mentioned_p (rtx op)
2700 const char *fmt;
2701 int i;
2703 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2704 return 1;
2706 fmt = GET_RTX_FORMAT (GET_CODE (op));
2707 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2709 if (fmt[i] == 'E')
2711 int j;
2713 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2714 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2715 return 1;
2718 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2719 return 1;
2722 return 0;
2725 /* Return true if OP contains a reference to a thread-local symbol. */
2727 bool
2728 tls_symbolic_reference_mentioned_p (rtx op)
2730 const char *fmt;
2731 int i;
2733 if (GET_CODE (op) == SYMBOL_REF)
2734 return tls_symbolic_operand (op);
2736 fmt = GET_RTX_FORMAT (GET_CODE (op));
2737 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2739 if (fmt[i] == 'E')
2741 int j;
2743 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2744 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2745 return true;
2748 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2749 return true;
2752 return false;
2756 /* Return true if OP is a legitimate general operand when
2757 generating PIC code. It is given that flag_pic is on
2758 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2761 legitimate_pic_operand_p (rtx op)
2763 /* Accept all non-symbolic constants. */
2764 if (!SYMBOLIC_CONST (op))
2765 return 1;
2767 /* Reject everything else; must be handled
2768 via emit_symbolic_move. */
2769 return 0;
2772 /* Returns true if the constant value OP is a legitimate general operand.
2773 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2775 static bool
2776 s390_legitimate_constant_p (machine_mode mode, rtx op)
2778 /* Accept all non-symbolic constants. */
2779 if (!SYMBOLIC_CONST (op))
2780 return 1;
2782 /* Accept immediate LARL operands. */
2783 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2784 return 1;
2786 /* Thread-local symbols are never legal constants. This is
2787 so that emit_call knows that computing such addresses
2788 might require a function call. */
2789 if (TLS_SYMBOLIC_CONST (op))
2790 return 0;
2792 /* In the PIC case, symbolic constants must *not* be
2793 forced into the literal pool. We accept them here,
2794 so that they will be handled by emit_symbolic_move. */
2795 if (flag_pic)
2796 return 1;
2798 /* All remaining non-PIC symbolic constants are
2799 forced into the literal pool. */
2800 return 0;
2803 /* Determine if it's legal to put X into the constant pool. This
2804 is not possible if X contains the address of a symbol that is
2805 not constant (TLS) or not known at final link time (PIC). */
2807 static bool
2808 s390_cannot_force_const_mem (machine_mode mode, rtx x)
2810 switch (GET_CODE (x))
2812 case CONST_INT:
2813 case CONST_DOUBLE:
2814 /* Accept all non-symbolic constants. */
2815 return false;
2817 case LABEL_REF:
2818 /* Labels are OK iff we are non-PIC. */
2819 return flag_pic != 0;
2821 case SYMBOL_REF:
2822 /* 'Naked' TLS symbol references are never OK,
2823 non-TLS symbols are OK iff we are non-PIC. */
2824 if (tls_symbolic_operand (x))
2825 return true;
2826 else
2827 return flag_pic != 0;
2829 case CONST:
2830 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2831 case PLUS:
2832 case MINUS:
2833 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2834 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2836 case UNSPEC:
2837 switch (XINT (x, 1))
2839 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2840 case UNSPEC_LTREL_OFFSET:
2841 case UNSPEC_GOT:
2842 case UNSPEC_GOTOFF:
2843 case UNSPEC_PLTOFF:
2844 case UNSPEC_TLSGD:
2845 case UNSPEC_TLSLDM:
2846 case UNSPEC_NTPOFF:
2847 case UNSPEC_DTPOFF:
2848 case UNSPEC_GOTNTPOFF:
2849 case UNSPEC_INDNTPOFF:
2850 return false;
2852 /* If the literal pool shares the code section, be put
2853 execute template placeholders into the pool as well. */
2854 case UNSPEC_INSN:
2855 return TARGET_CPU_ZARCH;
2857 default:
2858 return true;
2860 break;
2862 default:
2863 gcc_unreachable ();
2867 /* Returns true if the constant value OP is a legitimate general
2868 operand during and after reload. The difference to
2869 legitimate_constant_p is that this function will not accept
2870 a constant that would need to be forced to the literal pool
2871 before it can be used as operand.
2872 This function accepts all constants which can be loaded directly
2873 into a GPR. */
2875 bool
2876 legitimate_reload_constant_p (rtx op)
2878 /* Accept la(y) operands. */
2879 if (GET_CODE (op) == CONST_INT
2880 && DISP_IN_RANGE (INTVAL (op)))
2881 return true;
2883 /* Accept l(g)hi/l(g)fi operands. */
2884 if (GET_CODE (op) == CONST_INT
2885 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2886 return true;
2888 /* Accept lliXX operands. */
2889 if (TARGET_ZARCH
2890 && GET_CODE (op) == CONST_INT
2891 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2892 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2893 return true;
2895 if (TARGET_EXTIMM
2896 && GET_CODE (op) == CONST_INT
2897 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2898 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2899 return true;
2901 /* Accept larl operands. */
2902 if (TARGET_CPU_ZARCH
2903 && larl_operand (op, VOIDmode))
2904 return true;
2906 /* Accept floating-point zero operands that fit into a single GPR. */
2907 if (GET_CODE (op) == CONST_DOUBLE
2908 && s390_float_const_zero_p (op)
2909 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2910 return true;
2912 /* Accept double-word operands that can be split. */
2913 if (GET_CODE (op) == CONST_INT
2914 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2916 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2917 rtx hi = operand_subword (op, 0, 0, dword_mode);
2918 rtx lo = operand_subword (op, 1, 0, dword_mode);
2919 return legitimate_reload_constant_p (hi)
2920 && legitimate_reload_constant_p (lo);
2923 /* Everything else cannot be handled without reload. */
2924 return false;
2927 /* Returns true if the constant value OP is a legitimate fp operand
2928 during and after reload.
2929 This function accepts all constants which can be loaded directly
2930 into an FPR. */
2932 static bool
2933 legitimate_reload_fp_constant_p (rtx op)
2935 /* Accept floating-point zero operands if the load zero instruction
2936 can be used. Prior to z196 the load fp zero instruction caused a
2937 performance penalty if the result is used as BFP number. */
2938 if (TARGET_Z196
2939 && GET_CODE (op) == CONST_DOUBLE
2940 && s390_float_const_zero_p (op))
2941 return true;
2943 return false;
2946 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2947 return the class of reg to actually use. */
2949 static reg_class_t
2950 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2952 switch (GET_CODE (op))
2954 /* Constants we cannot reload into general registers
2955 must be forced into the literal pool. */
2956 case CONST_DOUBLE:
2957 case CONST_INT:
2958 if (reg_class_subset_p (GENERAL_REGS, rclass)
2959 && legitimate_reload_constant_p (op))
2960 return GENERAL_REGS;
2961 else if (reg_class_subset_p (ADDR_REGS, rclass)
2962 && legitimate_reload_constant_p (op))
2963 return ADDR_REGS;
2964 else if (reg_class_subset_p (FP_REGS, rclass)
2965 && legitimate_reload_fp_constant_p (op))
2966 return FP_REGS;
2967 return NO_REGS;
2969 /* If a symbolic constant or a PLUS is reloaded,
2970 it is most likely being used as an address, so
2971 prefer ADDR_REGS. If 'class' is not a superset
2972 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2973 case CONST:
2974 /* Symrefs cannot be pushed into the literal pool with -fPIC
2975 so we *MUST NOT* return NO_REGS for these cases
2976 (s390_cannot_force_const_mem will return true).
2978 On the other hand we MUST return NO_REGS for symrefs with
2979 invalid addend which might have been pushed to the literal
2980 pool (no -fPIC). Usually we would expect them to be
2981 handled via secondary reload but this does not happen if
2982 they are used as literal pool slot replacement in reload
2983 inheritance (see emit_input_reload_insns). */
2984 if (TARGET_CPU_ZARCH
2985 && GET_CODE (XEXP (op, 0)) == PLUS
2986 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
2987 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
2989 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
2990 return ADDR_REGS;
2991 else
2992 return NO_REGS;
2994 /* fallthrough */
2995 case LABEL_REF:
2996 case SYMBOL_REF:
2997 if (!legitimate_reload_constant_p (op))
2998 return NO_REGS;
2999 /* fallthrough */
3000 case PLUS:
3001 /* load address will be used. */
3002 if (reg_class_subset_p (ADDR_REGS, rclass))
3003 return ADDR_REGS;
3004 else
3005 return NO_REGS;
3007 default:
3008 break;
3011 return rclass;
3014 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3015 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3016 aligned. */
3018 bool
3019 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3021 HOST_WIDE_INT addend;
3022 rtx symref;
3024 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3025 return false;
3027 if (addend & (alignment - 1))
3028 return false;
3030 if (GET_CODE (symref) == SYMBOL_REF
3031 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3032 return true;
3034 if (GET_CODE (symref) == UNSPEC
3035 && alignment <= UNITS_PER_LONG)
3036 return true;
3038 return false;
3041 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3042 operand SCRATCH is used to reload the even part of the address and
3043 adding one. */
3045 void
3046 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3048 HOST_WIDE_INT addend;
3049 rtx symref;
3051 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3052 gcc_unreachable ();
3054 if (!(addend & 1))
3055 /* Easy case. The addend is even so larl will do fine. */
3056 emit_move_insn (reg, addr);
3057 else
3059 /* We can leave the scratch register untouched if the target
3060 register is a valid base register. */
3061 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3062 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3063 scratch = reg;
3065 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3066 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3068 if (addend != 1)
3069 emit_move_insn (scratch,
3070 gen_rtx_CONST (Pmode,
3071 gen_rtx_PLUS (Pmode, symref,
3072 GEN_INT (addend - 1))));
3073 else
3074 emit_move_insn (scratch, symref);
3076 /* Increment the address using la in order to avoid clobbering cc. */
3077 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3081 /* Generate what is necessary to move between REG and MEM using
3082 SCRATCH. The direction is given by TOMEM. */
3084 void
3085 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3087 /* Reload might have pulled a constant out of the literal pool.
3088 Force it back in. */
3089 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3090 || GET_CODE (mem) == CONST)
3091 mem = force_const_mem (GET_MODE (reg), mem);
3093 gcc_assert (MEM_P (mem));
3095 /* For a load from memory we can leave the scratch register
3096 untouched if the target register is a valid base register. */
3097 if (!tomem
3098 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3099 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3100 && GET_MODE (reg) == GET_MODE (scratch))
3101 scratch = reg;
3103 /* Load address into scratch register. Since we can't have a
3104 secondary reload for a secondary reload we have to cover the case
3105 where larl would need a secondary reload here as well. */
3106 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3108 /* Now we can use a standard load/store to do the move. */
3109 if (tomem)
3110 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3111 else
3112 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3115 /* Inform reload about cases where moving X with a mode MODE to a register in
3116 RCLASS requires an extra scratch or immediate register. Return the class
3117 needed for the immediate register. */
3119 static reg_class_t
3120 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3121 machine_mode mode, secondary_reload_info *sri)
3123 enum reg_class rclass = (enum reg_class) rclass_i;
3125 /* Intermediate register needed. */
3126 if (reg_classes_intersect_p (CC_REGS, rclass))
3127 return GENERAL_REGS;
3129 if (TARGET_Z10)
3131 HOST_WIDE_INT offset;
3132 rtx symref;
3134 /* On z10 several optimizer steps may generate larl operands with
3135 an odd addend. */
3136 if (in_p
3137 && s390_loadrelative_operand_p (x, &symref, &offset)
3138 && mode == Pmode
3139 && !SYMBOL_REF_ALIGN1_P (symref)
3140 && (offset & 1) == 1)
3141 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3142 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3144 /* On z10 we need a scratch register when moving QI, TI or floating
3145 point mode values from or to a memory location with a SYMBOL_REF
3146 or if the symref addend of a SI or DI move is not aligned to the
3147 width of the access. */
3148 if (MEM_P (x)
3149 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
3150 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3151 || (!TARGET_ZARCH && mode == DImode)
3152 || ((mode == HImode || mode == SImode || mode == DImode)
3153 && (!s390_check_symref_alignment (XEXP (x, 0),
3154 GET_MODE_SIZE (mode))))))
3156 #define __SECONDARY_RELOAD_CASE(M,m) \
3157 case M##mode: \
3158 if (TARGET_64BIT) \
3159 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3160 CODE_FOR_reload##m##di_tomem_z10; \
3161 else \
3162 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3163 CODE_FOR_reload##m##si_tomem_z10; \
3164 break;
3166 switch (GET_MODE (x))
3168 __SECONDARY_RELOAD_CASE (QI, qi);
3169 __SECONDARY_RELOAD_CASE (HI, hi);
3170 __SECONDARY_RELOAD_CASE (SI, si);
3171 __SECONDARY_RELOAD_CASE (DI, di);
3172 __SECONDARY_RELOAD_CASE (TI, ti);
3173 __SECONDARY_RELOAD_CASE (SF, sf);
3174 __SECONDARY_RELOAD_CASE (DF, df);
3175 __SECONDARY_RELOAD_CASE (TF, tf);
3176 __SECONDARY_RELOAD_CASE (SD, sd);
3177 __SECONDARY_RELOAD_CASE (DD, dd);
3178 __SECONDARY_RELOAD_CASE (TD, td);
3180 default:
3181 gcc_unreachable ();
3183 #undef __SECONDARY_RELOAD_CASE
3187 /* We need a scratch register when loading a PLUS expression which
3188 is not a legitimate operand of the LOAD ADDRESS instruction. */
3189 /* LRA can deal with transformation of plus op very well -- so we
3190 don't need to prompt LRA in this case. */
3191 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
3192 sri->icode = (TARGET_64BIT ?
3193 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3195 /* Performing a multiword move from or to memory we have to make sure the
3196 second chunk in memory is addressable without causing a displacement
3197 overflow. If that would be the case we calculate the address in
3198 a scratch register. */
3199 if (MEM_P (x)
3200 && GET_CODE (XEXP (x, 0)) == PLUS
3201 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3202 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3203 + GET_MODE_SIZE (mode) - 1))
3205 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3206 in a s_operand address since we may fallback to lm/stm. So we only
3207 have to care about overflows in the b+i+d case. */
3208 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3209 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3210 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3211 /* For FP_REGS no lm/stm is available so this check is triggered
3212 for displacement overflows in b+i+d and b+d like addresses. */
3213 || (reg_classes_intersect_p (FP_REGS, rclass)
3214 && s390_class_max_nregs (FP_REGS, mode) > 1))
3216 if (in_p)
3217 sri->icode = (TARGET_64BIT ?
3218 CODE_FOR_reloaddi_nonoffmem_in :
3219 CODE_FOR_reloadsi_nonoffmem_in);
3220 else
3221 sri->icode = (TARGET_64BIT ?
3222 CODE_FOR_reloaddi_nonoffmem_out :
3223 CODE_FOR_reloadsi_nonoffmem_out);
3227 /* A scratch address register is needed when a symbolic constant is
3228 copied to r0 compiling with -fPIC. In other cases the target
3229 register might be used as temporary (see legitimize_pic_address). */
3230 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3231 sri->icode = (TARGET_64BIT ?
3232 CODE_FOR_reloaddi_PIC_addr :
3233 CODE_FOR_reloadsi_PIC_addr);
3235 /* Either scratch or no register needed. */
3236 return NO_REGS;
3239 /* Generate code to load SRC, which is PLUS that is not a
3240 legitimate operand for the LA instruction, into TARGET.
3241 SCRATCH may be used as scratch register. */
3243 void
3244 s390_expand_plus_operand (rtx target, rtx src,
3245 rtx scratch)
3247 rtx sum1, sum2;
3248 struct s390_address ad;
3250 /* src must be a PLUS; get its two operands. */
3251 gcc_assert (GET_CODE (src) == PLUS);
3252 gcc_assert (GET_MODE (src) == Pmode);
3254 /* Check if any of the two operands is already scheduled
3255 for replacement by reload. This can happen e.g. when
3256 float registers occur in an address. */
3257 sum1 = find_replacement (&XEXP (src, 0));
3258 sum2 = find_replacement (&XEXP (src, 1));
3259 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3261 /* If the address is already strictly valid, there's nothing to do. */
3262 if (!s390_decompose_address (src, &ad)
3263 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3264 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3266 /* Otherwise, one of the operands cannot be an address register;
3267 we reload its value into the scratch register. */
3268 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3270 emit_move_insn (scratch, sum1);
3271 sum1 = scratch;
3273 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3275 emit_move_insn (scratch, sum2);
3276 sum2 = scratch;
3279 /* According to the way these invalid addresses are generated
3280 in reload.c, it should never happen (at least on s390) that
3281 *neither* of the PLUS components, after find_replacements
3282 was applied, is an address register. */
3283 if (sum1 == scratch && sum2 == scratch)
3285 debug_rtx (src);
3286 gcc_unreachable ();
3289 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3292 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3293 is only ever performed on addresses, so we can mark the
3294 sum as legitimate for LA in any case. */
3295 s390_load_address (target, src);
3299 /* Return true if ADDR is a valid memory address.
3300 STRICT specifies whether strict register checking applies. */
3302 static bool
3303 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
3305 struct s390_address ad;
3307 if (TARGET_Z10
3308 && larl_operand (addr, VOIDmode)
3309 && (mode == VOIDmode
3310 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3311 return true;
3313 if (!s390_decompose_address (addr, &ad))
3314 return false;
3316 if (strict)
3318 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3319 return false;
3321 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3322 return false;
3324 else
3326 if (ad.base
3327 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3328 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3329 return false;
3331 if (ad.indx
3332 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3333 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3334 return false;
3336 return true;
3339 /* Return true if OP is a valid operand for the LA instruction.
3340 In 31-bit, we need to prove that the result is used as an
3341 address, as LA performs only a 31-bit addition. */
3343 bool
3344 legitimate_la_operand_p (rtx op)
3346 struct s390_address addr;
3347 if (!s390_decompose_address (op, &addr))
3348 return false;
3350 return (TARGET_64BIT || addr.pointer);
3353 /* Return true if it is valid *and* preferable to use LA to
3354 compute the sum of OP1 and OP2. */
3356 bool
3357 preferred_la_operand_p (rtx op1, rtx op2)
3359 struct s390_address addr;
3361 if (op2 != const0_rtx)
3362 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3364 if (!s390_decompose_address (op1, &addr))
3365 return false;
3366 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3367 return false;
3368 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3369 return false;
3371 /* Avoid LA instructions with index register on z196; it is
3372 preferable to use regular add instructions when possible.
3373 Starting with zEC12 the la with index register is "uncracked"
3374 again. */
3375 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3376 return false;
3378 if (!TARGET_64BIT && !addr.pointer)
3379 return false;
3381 if (addr.pointer)
3382 return true;
3384 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3385 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3386 return true;
3388 return false;
3391 /* Emit a forced load-address operation to load SRC into DST.
3392 This will use the LOAD ADDRESS instruction even in situations
3393 where legitimate_la_operand_p (SRC) returns false. */
3395 void
3396 s390_load_address (rtx dst, rtx src)
3398 if (TARGET_64BIT)
3399 emit_move_insn (dst, src);
3400 else
3401 emit_insn (gen_force_la_31 (dst, src));
3404 /* Return a legitimate reference for ORIG (an address) using the
3405 register REG. If REG is 0, a new pseudo is generated.
3407 There are two types of references that must be handled:
3409 1. Global data references must load the address from the GOT, via
3410 the PIC reg. An insn is emitted to do this load, and the reg is
3411 returned.
3413 2. Static data references, constant pool addresses, and code labels
3414 compute the address as an offset from the GOT, whose base is in
3415 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3416 differentiate them from global data objects. The returned
3417 address is the PIC reg + an unspec constant.
3419 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3420 reg also appears in the address. */
3423 legitimize_pic_address (rtx orig, rtx reg)
3425 rtx addr = orig;
3426 rtx addend = const0_rtx;
3427 rtx new_rtx = orig;
3429 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3431 if (GET_CODE (addr) == CONST)
3432 addr = XEXP (addr, 0);
3434 if (GET_CODE (addr) == PLUS)
3436 addend = XEXP (addr, 1);
3437 addr = XEXP (addr, 0);
3440 if ((GET_CODE (addr) == LABEL_REF
3441 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
3442 || (GET_CODE (addr) == UNSPEC &&
3443 (XINT (addr, 1) == UNSPEC_GOTENT
3444 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3445 && GET_CODE (addend) == CONST_INT)
3447 /* This can be locally addressed. */
3449 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3450 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
3451 gen_rtx_CONST (Pmode, addr) : addr);
3453 if (TARGET_CPU_ZARCH
3454 && larl_operand (const_addr, VOIDmode)
3455 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
3456 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
3458 if (INTVAL (addend) & 1)
3460 /* LARL can't handle odd offsets, so emit a pair of LARL
3461 and LA. */
3462 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3464 if (!DISP_IN_RANGE (INTVAL (addend)))
3466 HOST_WIDE_INT even = INTVAL (addend) - 1;
3467 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
3468 addr = gen_rtx_CONST (Pmode, addr);
3469 addend = const1_rtx;
3472 emit_move_insn (temp, addr);
3473 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
3475 if (reg != 0)
3477 s390_load_address (reg, new_rtx);
3478 new_rtx = reg;
3481 else
3483 /* If the offset is even, we can just use LARL. This
3484 will happen automatically. */
3487 else
3489 /* No larl - Access local symbols relative to the GOT. */
3491 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3493 if (reload_in_progress || reload_completed)
3494 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3496 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3497 if (addend != const0_rtx)
3498 addr = gen_rtx_PLUS (Pmode, addr, addend);
3499 addr = gen_rtx_CONST (Pmode, addr);
3500 addr = force_const_mem (Pmode, addr);
3501 emit_move_insn (temp, addr);
3503 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3504 if (reg != 0)
3506 s390_load_address (reg, new_rtx);
3507 new_rtx = reg;
3511 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
3513 /* A non-local symbol reference without addend.
3515 The symbol ref is wrapped into an UNSPEC to make sure the
3516 proper operand modifier (@GOT or @GOTENT) will be emitted.
3517 This will tell the linker to put the symbol into the GOT.
3519 Additionally the code dereferencing the GOT slot is emitted here.
3521 An addend to the symref needs to be added afterwards.
3522 legitimize_pic_address calls itself recursively to handle
3523 that case. So no need to do it here. */
3525 if (reg == 0)
3526 reg = gen_reg_rtx (Pmode);
3528 if (TARGET_Z10)
3530 /* Use load relative if possible.
3531 lgrl <target>, sym@GOTENT */
3532 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3533 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3534 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3536 emit_move_insn (reg, new_rtx);
3537 new_rtx = reg;
3539 else if (flag_pic == 1)
3541 /* Assume GOT offset is a valid displacement operand (< 4k
3542 or < 512k with z990). This is handled the same way in
3543 both 31- and 64-bit code (@GOT).
3544 lg <target>, sym@GOT(r12) */
3546 if (reload_in_progress || reload_completed)
3547 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3549 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3550 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3551 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3552 new_rtx = gen_const_mem (Pmode, new_rtx);
3553 emit_move_insn (reg, new_rtx);
3554 new_rtx = reg;
3556 else if (TARGET_CPU_ZARCH)
3558 /* If the GOT offset might be >= 4k, we determine the position
3559 of the GOT entry via a PC-relative LARL (@GOTENT).
3560 larl temp, sym@GOTENT
3561 lg <target>, 0(temp) */
3563 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3565 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3566 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3568 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3569 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3570 emit_move_insn (temp, new_rtx);
3572 new_rtx = gen_const_mem (Pmode, temp);
3573 emit_move_insn (reg, new_rtx);
3575 new_rtx = reg;
3577 else
3579 /* If the GOT offset might be >= 4k, we have to load it
3580 from the literal pool (@GOT).
3582 lg temp, lit-litbase(r13)
3583 lg <target>, 0(temp)
3584 lit: .long sym@GOT */
3586 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3588 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3589 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3591 if (reload_in_progress || reload_completed)
3592 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3594 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3595 addr = gen_rtx_CONST (Pmode, addr);
3596 addr = force_const_mem (Pmode, addr);
3597 emit_move_insn (temp, addr);
3599 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3600 new_rtx = gen_const_mem (Pmode, new_rtx);
3601 emit_move_insn (reg, new_rtx);
3602 new_rtx = reg;
3605 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
3607 gcc_assert (XVECLEN (addr, 0) == 1);
3608 switch (XINT (addr, 1))
3610 /* These address symbols (or PLT slots) relative to the GOT
3611 (not GOT slots!). In general this will exceed the
3612 displacement range so these value belong into the literal
3613 pool. */
3614 case UNSPEC_GOTOFF:
3615 case UNSPEC_PLTOFF:
3616 new_rtx = force_const_mem (Pmode, orig);
3617 break;
3619 /* For -fPIC the GOT size might exceed the displacement
3620 range so make sure the value is in the literal pool. */
3621 case UNSPEC_GOT:
3622 if (flag_pic == 2)
3623 new_rtx = force_const_mem (Pmode, orig);
3624 break;
3626 /* For @GOTENT larl is used. This is handled like local
3627 symbol refs. */
3628 case UNSPEC_GOTENT:
3629 gcc_unreachable ();
3630 break;
3632 /* @PLT is OK as is on 64-bit, must be converted to
3633 GOT-relative @PLTOFF on 31-bit. */
3634 case UNSPEC_PLT:
3635 if (!TARGET_CPU_ZARCH)
3637 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3639 if (reload_in_progress || reload_completed)
3640 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3642 addr = XVECEXP (addr, 0, 0);
3643 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3644 UNSPEC_PLTOFF);
3645 if (addend != const0_rtx)
3646 addr = gen_rtx_PLUS (Pmode, addr, addend);
3647 addr = gen_rtx_CONST (Pmode, addr);
3648 addr = force_const_mem (Pmode, addr);
3649 emit_move_insn (temp, addr);
3651 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3652 if (reg != 0)
3654 s390_load_address (reg, new_rtx);
3655 new_rtx = reg;
3658 else
3659 /* On 64 bit larl can be used. This case is handled like
3660 local symbol refs. */
3661 gcc_unreachable ();
3662 break;
3664 /* Everything else cannot happen. */
3665 default:
3666 gcc_unreachable ();
3669 else if (addend != const0_rtx)
3671 /* Otherwise, compute the sum. */
3673 rtx base = legitimize_pic_address (addr, reg);
3674 new_rtx = legitimize_pic_address (addend,
3675 base == reg ? NULL_RTX : reg);
3676 if (GET_CODE (new_rtx) == CONST_INT)
3677 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3678 else
3680 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3682 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3683 new_rtx = XEXP (new_rtx, 1);
3685 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3688 if (GET_CODE (new_rtx) == CONST)
3689 new_rtx = XEXP (new_rtx, 0);
3690 new_rtx = force_operand (new_rtx, 0);
3693 return new_rtx;
3696 /* Load the thread pointer into a register. */
3699 s390_get_thread_pointer (void)
3701 rtx tp = gen_reg_rtx (Pmode);
3703 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3704 mark_reg_pointer (tp, BITS_PER_WORD);
3706 return tp;
3709 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3710 in s390_tls_symbol which always refers to __tls_get_offset.
3711 The returned offset is written to RESULT_REG and an USE rtx is
3712 generated for TLS_CALL. */
3714 static GTY(()) rtx s390_tls_symbol;
3716 static void
3717 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3719 rtx insn;
3721 if (!flag_pic)
3722 emit_insn (s390_load_got ());
3724 if (!s390_tls_symbol)
3725 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3727 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3728 gen_rtx_REG (Pmode, RETURN_REGNUM));
3730 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3731 RTL_CONST_CALL_P (insn) = 1;
3734 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3735 this (thread-local) address. REG may be used as temporary. */
3737 static rtx
3738 legitimize_tls_address (rtx addr, rtx reg)
3740 rtx new_rtx, tls_call, temp, base, r2, insn;
3742 if (GET_CODE (addr) == SYMBOL_REF)
3743 switch (tls_symbolic_operand (addr))
3745 case TLS_MODEL_GLOBAL_DYNAMIC:
3746 start_sequence ();
3747 r2 = gen_rtx_REG (Pmode, 2);
3748 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3749 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3750 new_rtx = force_const_mem (Pmode, new_rtx);
3751 emit_move_insn (r2, new_rtx);
3752 s390_emit_tls_call_insn (r2, tls_call);
3753 insn = get_insns ();
3754 end_sequence ();
3756 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3757 temp = gen_reg_rtx (Pmode);
3758 emit_libcall_block (insn, temp, r2, new_rtx);
3760 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3761 if (reg != 0)
3763 s390_load_address (reg, new_rtx);
3764 new_rtx = reg;
3766 break;
3768 case TLS_MODEL_LOCAL_DYNAMIC:
3769 start_sequence ();
3770 r2 = gen_rtx_REG (Pmode, 2);
3771 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3772 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3773 new_rtx = force_const_mem (Pmode, new_rtx);
3774 emit_move_insn (r2, new_rtx);
3775 s390_emit_tls_call_insn (r2, tls_call);
3776 insn = get_insns ();
3777 end_sequence ();
3779 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3780 temp = gen_reg_rtx (Pmode);
3781 emit_libcall_block (insn, temp, r2, new_rtx);
3783 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3784 base = gen_reg_rtx (Pmode);
3785 s390_load_address (base, new_rtx);
3787 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3788 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3789 new_rtx = force_const_mem (Pmode, new_rtx);
3790 temp = gen_reg_rtx (Pmode);
3791 emit_move_insn (temp, new_rtx);
3793 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3794 if (reg != 0)
3796 s390_load_address (reg, new_rtx);
3797 new_rtx = reg;
3799 break;
3801 case TLS_MODEL_INITIAL_EXEC:
3802 if (flag_pic == 1)
3804 /* Assume GOT offset < 4k. This is handled the same way
3805 in both 31- and 64-bit code. */
3807 if (reload_in_progress || reload_completed)
3808 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3810 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3811 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3812 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3813 new_rtx = gen_const_mem (Pmode, new_rtx);
3814 temp = gen_reg_rtx (Pmode);
3815 emit_move_insn (temp, new_rtx);
3817 else if (TARGET_CPU_ZARCH)
3819 /* If the GOT offset might be >= 4k, we determine the position
3820 of the GOT entry via a PC-relative LARL. */
3822 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3823 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3824 temp = gen_reg_rtx (Pmode);
3825 emit_move_insn (temp, new_rtx);
3827 new_rtx = gen_const_mem (Pmode, temp);
3828 temp = gen_reg_rtx (Pmode);
3829 emit_move_insn (temp, new_rtx);
3831 else if (flag_pic)
3833 /* If the GOT offset might be >= 4k, we have to load it
3834 from the literal pool. */
3836 if (reload_in_progress || reload_completed)
3837 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3839 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3840 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3841 new_rtx = force_const_mem (Pmode, new_rtx);
3842 temp = gen_reg_rtx (Pmode);
3843 emit_move_insn (temp, new_rtx);
3845 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3846 new_rtx = gen_const_mem (Pmode, new_rtx);
3848 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3849 temp = gen_reg_rtx (Pmode);
3850 emit_insn (gen_rtx_SET (temp, new_rtx));
3852 else
3854 /* In position-dependent code, load the absolute address of
3855 the GOT entry from the literal pool. */
3857 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3858 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3859 new_rtx = force_const_mem (Pmode, new_rtx);
3860 temp = gen_reg_rtx (Pmode);
3861 emit_move_insn (temp, new_rtx);
3863 new_rtx = temp;
3864 new_rtx = gen_const_mem (Pmode, new_rtx);
3865 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3866 temp = gen_reg_rtx (Pmode);
3867 emit_insn (gen_rtx_SET (temp, new_rtx));
3870 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3871 if (reg != 0)
3873 s390_load_address (reg, new_rtx);
3874 new_rtx = reg;
3876 break;
3878 case TLS_MODEL_LOCAL_EXEC:
3879 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3880 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3881 new_rtx = force_const_mem (Pmode, new_rtx);
3882 temp = gen_reg_rtx (Pmode);
3883 emit_move_insn (temp, new_rtx);
3885 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3886 if (reg != 0)
3888 s390_load_address (reg, new_rtx);
3889 new_rtx = reg;
3891 break;
3893 default:
3894 gcc_unreachable ();
3897 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3899 switch (XINT (XEXP (addr, 0), 1))
3901 case UNSPEC_INDNTPOFF:
3902 gcc_assert (TARGET_CPU_ZARCH);
3903 new_rtx = addr;
3904 break;
3906 default:
3907 gcc_unreachable ();
3911 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3912 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3914 new_rtx = XEXP (XEXP (addr, 0), 0);
3915 if (GET_CODE (new_rtx) != SYMBOL_REF)
3916 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3918 new_rtx = legitimize_tls_address (new_rtx, reg);
3919 new_rtx = plus_constant (Pmode, new_rtx,
3920 INTVAL (XEXP (XEXP (addr, 0), 1)));
3921 new_rtx = force_operand (new_rtx, 0);
3924 else
3925 gcc_unreachable (); /* for now ... */
3927 return new_rtx;
3930 /* Emit insns making the address in operands[1] valid for a standard
3931 move to operands[0]. operands[1] is replaced by an address which
3932 should be used instead of the former RTX to emit the move
3933 pattern. */
3935 void
3936 emit_symbolic_move (rtx *operands)
3938 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3940 if (GET_CODE (operands[0]) == MEM)
3941 operands[1] = force_reg (Pmode, operands[1]);
3942 else if (TLS_SYMBOLIC_CONST (operands[1]))
3943 operands[1] = legitimize_tls_address (operands[1], temp);
3944 else if (flag_pic)
3945 operands[1] = legitimize_pic_address (operands[1], temp);
3948 /* Try machine-dependent ways of modifying an illegitimate address X
3949 to be legitimate. If we find one, return the new, valid address.
3951 OLDX is the address as it was before break_out_memory_refs was called.
3952 In some cases it is useful to look at this to decide what needs to be done.
3954 MODE is the mode of the operand pointed to by X.
3956 When -fpic is used, special handling is needed for symbolic references.
3957 See comments by legitimize_pic_address for details. */
3959 static rtx
3960 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3961 machine_mode mode ATTRIBUTE_UNUSED)
3963 rtx constant_term = const0_rtx;
3965 if (TLS_SYMBOLIC_CONST (x))
3967 x = legitimize_tls_address (x, 0);
3969 if (s390_legitimate_address_p (mode, x, FALSE))
3970 return x;
3972 else if (GET_CODE (x) == PLUS
3973 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3974 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3976 return x;
3978 else if (flag_pic)
3980 if (SYMBOLIC_CONST (x)
3981 || (GET_CODE (x) == PLUS
3982 && (SYMBOLIC_CONST (XEXP (x, 0))
3983 || SYMBOLIC_CONST (XEXP (x, 1)))))
3984 x = legitimize_pic_address (x, 0);
3986 if (s390_legitimate_address_p (mode, x, FALSE))
3987 return x;
3990 x = eliminate_constant_term (x, &constant_term);
3992 /* Optimize loading of large displacements by splitting them
3993 into the multiple of 4K and the rest; this allows the
3994 former to be CSE'd if possible.
3996 Don't do this if the displacement is added to a register
3997 pointing into the stack frame, as the offsets will
3998 change later anyway. */
4000 if (GET_CODE (constant_term) == CONST_INT
4001 && !TARGET_LONG_DISPLACEMENT
4002 && !DISP_IN_RANGE (INTVAL (constant_term))
4003 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4005 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4006 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4008 rtx temp = gen_reg_rtx (Pmode);
4009 rtx val = force_operand (GEN_INT (upper), temp);
4010 if (val != temp)
4011 emit_move_insn (temp, val);
4013 x = gen_rtx_PLUS (Pmode, x, temp);
4014 constant_term = GEN_INT (lower);
4017 if (GET_CODE (x) == PLUS)
4019 if (GET_CODE (XEXP (x, 0)) == REG)
4021 rtx temp = gen_reg_rtx (Pmode);
4022 rtx val = force_operand (XEXP (x, 1), temp);
4023 if (val != temp)
4024 emit_move_insn (temp, val);
4026 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4029 else if (GET_CODE (XEXP (x, 1)) == REG)
4031 rtx temp = gen_reg_rtx (Pmode);
4032 rtx val = force_operand (XEXP (x, 0), temp);
4033 if (val != temp)
4034 emit_move_insn (temp, val);
4036 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4040 if (constant_term != const0_rtx)
4041 x = gen_rtx_PLUS (Pmode, x, constant_term);
4043 return x;
4046 /* Try a machine-dependent way of reloading an illegitimate address AD
4047 operand. If we find one, push the reload and return the new address.
4049 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4050 and TYPE is the reload type of the current reload. */
4053 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
4054 int opnum, int type)
4056 if (!optimize || TARGET_LONG_DISPLACEMENT)
4057 return NULL_RTX;
4059 if (GET_CODE (ad) == PLUS)
4061 rtx tem = simplify_binary_operation (PLUS, Pmode,
4062 XEXP (ad, 0), XEXP (ad, 1));
4063 if (tem)
4064 ad = tem;
4067 if (GET_CODE (ad) == PLUS
4068 && GET_CODE (XEXP (ad, 0)) == REG
4069 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4070 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4072 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4073 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4074 rtx cst, tem, new_rtx;
4076 cst = GEN_INT (upper);
4077 if (!legitimate_reload_constant_p (cst))
4078 cst = force_const_mem (Pmode, cst);
4080 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4081 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4083 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4084 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4085 opnum, (enum reload_type) type);
4086 return new_rtx;
4089 return NULL_RTX;
4092 /* Emit code to move LEN bytes from DST to SRC. */
4094 bool
4095 s390_expand_movmem (rtx dst, rtx src, rtx len)
4097 /* When tuning for z10 or higher we rely on the Glibc functions to
4098 do the right thing. Only for constant lengths below 64k we will
4099 generate inline code. */
4100 if (s390_tune >= PROCESSOR_2097_Z10
4101 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4102 return false;
4104 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4106 if (INTVAL (len) > 0)
4107 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4110 else if (TARGET_MVCLE)
4112 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4115 else
4117 rtx dst_addr, src_addr, count, blocks, temp;
4118 rtx_code_label *loop_start_label = gen_label_rtx ();
4119 rtx_code_label *loop_end_label = gen_label_rtx ();
4120 rtx_code_label *end_label = gen_label_rtx ();
4121 machine_mode mode;
4123 mode = GET_MODE (len);
4124 if (mode == VOIDmode)
4125 mode = Pmode;
4127 dst_addr = gen_reg_rtx (Pmode);
4128 src_addr = gen_reg_rtx (Pmode);
4129 count = gen_reg_rtx (mode);
4130 blocks = gen_reg_rtx (mode);
4132 convert_move (count, len, 1);
4133 emit_cmp_and_jump_insns (count, const0_rtx,
4134 EQ, NULL_RTX, mode, 1, end_label);
4136 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4137 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4138 dst = change_address (dst, VOIDmode, dst_addr);
4139 src = change_address (src, VOIDmode, src_addr);
4141 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4142 OPTAB_DIRECT);
4143 if (temp != count)
4144 emit_move_insn (count, temp);
4146 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4147 OPTAB_DIRECT);
4148 if (temp != blocks)
4149 emit_move_insn (blocks, temp);
4151 emit_cmp_and_jump_insns (blocks, const0_rtx,
4152 EQ, NULL_RTX, mode, 1, loop_end_label);
4154 emit_label (loop_start_label);
4156 if (TARGET_Z10
4157 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4159 rtx prefetch;
4161 /* Issue a read prefetch for the +3 cache line. */
4162 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4163 const0_rtx, const0_rtx);
4164 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4165 emit_insn (prefetch);
4167 /* Issue a write prefetch for the +3 cache line. */
4168 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4169 const1_rtx, const0_rtx);
4170 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4171 emit_insn (prefetch);
4174 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4175 s390_load_address (dst_addr,
4176 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4177 s390_load_address (src_addr,
4178 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4180 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4181 OPTAB_DIRECT);
4182 if (temp != blocks)
4183 emit_move_insn (blocks, temp);
4185 emit_cmp_and_jump_insns (blocks, const0_rtx,
4186 EQ, NULL_RTX, mode, 1, loop_end_label);
4188 emit_jump (loop_start_label);
4189 emit_label (loop_end_label);
4191 emit_insn (gen_movmem_short (dst, src,
4192 convert_to_mode (Pmode, count, 1)));
4193 emit_label (end_label);
4195 return true;
4198 /* Emit code to set LEN bytes at DST to VAL.
4199 Make use of clrmem if VAL is zero. */
4201 void
4202 s390_expand_setmem (rtx dst, rtx len, rtx val)
4204 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4205 return;
4207 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4209 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4211 if (val == const0_rtx && INTVAL (len) <= 256)
4212 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4213 else
4215 /* Initialize memory by storing the first byte. */
4216 emit_move_insn (adjust_address (dst, QImode, 0), val);
4218 if (INTVAL (len) > 1)
4220 /* Initiate 1 byte overlap move.
4221 The first byte of DST is propagated through DSTP1.
4222 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4223 DST is set to size 1 so the rest of the memory location
4224 does not count as source operand. */
4225 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4226 set_mem_size (dst, 1);
4228 emit_insn (gen_movmem_short (dstp1, dst,
4229 GEN_INT (INTVAL (len) - 2)));
4234 else if (TARGET_MVCLE)
4236 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4237 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4240 else
4242 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4243 rtx_code_label *loop_start_label = gen_label_rtx ();
4244 rtx_code_label *loop_end_label = gen_label_rtx ();
4245 rtx_code_label *end_label = gen_label_rtx ();
4246 machine_mode mode;
4248 mode = GET_MODE (len);
4249 if (mode == VOIDmode)
4250 mode = Pmode;
4252 dst_addr = gen_reg_rtx (Pmode);
4253 count = gen_reg_rtx (mode);
4254 blocks = gen_reg_rtx (mode);
4256 convert_move (count, len, 1);
4257 emit_cmp_and_jump_insns (count, const0_rtx,
4258 EQ, NULL_RTX, mode, 1, end_label);
4260 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4261 dst = change_address (dst, VOIDmode, dst_addr);
4263 if (val == const0_rtx)
4264 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4265 OPTAB_DIRECT);
4266 else
4268 dstp1 = adjust_address (dst, VOIDmode, 1);
4269 set_mem_size (dst, 1);
4271 /* Initialize memory by storing the first byte. */
4272 emit_move_insn (adjust_address (dst, QImode, 0), val);
4274 /* If count is 1 we are done. */
4275 emit_cmp_and_jump_insns (count, const1_rtx,
4276 EQ, NULL_RTX, mode, 1, end_label);
4278 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4279 OPTAB_DIRECT);
4281 if (temp != count)
4282 emit_move_insn (count, temp);
4284 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4285 OPTAB_DIRECT);
4286 if (temp != blocks)
4287 emit_move_insn (blocks, temp);
4289 emit_cmp_and_jump_insns (blocks, const0_rtx,
4290 EQ, NULL_RTX, mode, 1, loop_end_label);
4292 emit_label (loop_start_label);
4294 if (TARGET_Z10
4295 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4297 /* Issue a write prefetch for the +4 cache line. */
4298 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4299 GEN_INT (1024)),
4300 const1_rtx, const0_rtx);
4301 emit_insn (prefetch);
4302 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4305 if (val == const0_rtx)
4306 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4307 else
4308 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4309 s390_load_address (dst_addr,
4310 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4312 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4313 OPTAB_DIRECT);
4314 if (temp != blocks)
4315 emit_move_insn (blocks, temp);
4317 emit_cmp_and_jump_insns (blocks, const0_rtx,
4318 EQ, NULL_RTX, mode, 1, loop_end_label);
4320 emit_jump (loop_start_label);
4321 emit_label (loop_end_label);
4323 if (val == const0_rtx)
4324 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4325 else
4326 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4327 emit_label (end_label);
4331 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4332 and return the result in TARGET. */
4334 bool
4335 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4337 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4338 rtx tmp;
4340 /* When tuning for z10 or higher we rely on the Glibc functions to
4341 do the right thing. Only for constant lengths below 64k we will
4342 generate inline code. */
4343 if (s390_tune >= PROCESSOR_2097_Z10
4344 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4345 return false;
4347 /* As the result of CMPINT is inverted compared to what we need,
4348 we have to swap the operands. */
4349 tmp = op0; op0 = op1; op1 = tmp;
4351 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4353 if (INTVAL (len) > 0)
4355 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4356 emit_insn (gen_cmpint (target, ccreg));
4358 else
4359 emit_move_insn (target, const0_rtx);
4361 else if (TARGET_MVCLE)
4363 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4364 emit_insn (gen_cmpint (target, ccreg));
4366 else
4368 rtx addr0, addr1, count, blocks, temp;
4369 rtx_code_label *loop_start_label = gen_label_rtx ();
4370 rtx_code_label *loop_end_label = gen_label_rtx ();
4371 rtx_code_label *end_label = gen_label_rtx ();
4372 machine_mode mode;
4374 mode = GET_MODE (len);
4375 if (mode == VOIDmode)
4376 mode = Pmode;
4378 addr0 = gen_reg_rtx (Pmode);
4379 addr1 = gen_reg_rtx (Pmode);
4380 count = gen_reg_rtx (mode);
4381 blocks = gen_reg_rtx (mode);
4383 convert_move (count, len, 1);
4384 emit_cmp_and_jump_insns (count, const0_rtx,
4385 EQ, NULL_RTX, mode, 1, end_label);
4387 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4388 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4389 op0 = change_address (op0, VOIDmode, addr0);
4390 op1 = change_address (op1, VOIDmode, addr1);
4392 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4393 OPTAB_DIRECT);
4394 if (temp != count)
4395 emit_move_insn (count, temp);
4397 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4398 OPTAB_DIRECT);
4399 if (temp != blocks)
4400 emit_move_insn (blocks, temp);
4402 emit_cmp_and_jump_insns (blocks, const0_rtx,
4403 EQ, NULL_RTX, mode, 1, loop_end_label);
4405 emit_label (loop_start_label);
4407 if (TARGET_Z10
4408 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4410 rtx prefetch;
4412 /* Issue a read prefetch for the +2 cache line of operand 1. */
4413 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4414 const0_rtx, const0_rtx);
4415 emit_insn (prefetch);
4416 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4418 /* Issue a read prefetch for the +2 cache line of operand 2. */
4419 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4420 const0_rtx, const0_rtx);
4421 emit_insn (prefetch);
4422 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4425 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4426 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4427 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4428 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4429 temp = gen_rtx_SET (pc_rtx, temp);
4430 emit_jump_insn (temp);
4432 s390_load_address (addr0,
4433 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4434 s390_load_address (addr1,
4435 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4437 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4438 OPTAB_DIRECT);
4439 if (temp != blocks)
4440 emit_move_insn (blocks, temp);
4442 emit_cmp_and_jump_insns (blocks, const0_rtx,
4443 EQ, NULL_RTX, mode, 1, loop_end_label);
4445 emit_jump (loop_start_label);
4446 emit_label (loop_end_label);
4448 emit_insn (gen_cmpmem_short (op0, op1,
4449 convert_to_mode (Pmode, count, 1)));
4450 emit_label (end_label);
4452 emit_insn (gen_cmpint (target, ccreg));
4454 return true;
4458 /* Expand conditional increment or decrement using alc/slb instructions.
4459 Should generate code setting DST to either SRC or SRC + INCREMENT,
4460 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4461 Returns true if successful, false otherwise.
4463 That makes it possible to implement some if-constructs without jumps e.g.:
4464 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4465 unsigned int a, b, c;
4466 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4467 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4468 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4469 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4471 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4472 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4473 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4474 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4475 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4477 bool
4478 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4479 rtx dst, rtx src, rtx increment)
4481 machine_mode cmp_mode;
4482 machine_mode cc_mode;
4483 rtx op_res;
4484 rtx insn;
4485 rtvec p;
4486 int ret;
4488 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4489 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4490 cmp_mode = SImode;
4491 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4492 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4493 cmp_mode = DImode;
4494 else
4495 return false;
4497 /* Try ADD LOGICAL WITH CARRY. */
4498 if (increment == const1_rtx)
4500 /* Determine CC mode to use. */
4501 if (cmp_code == EQ || cmp_code == NE)
4503 if (cmp_op1 != const0_rtx)
4505 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4506 NULL_RTX, 0, OPTAB_WIDEN);
4507 cmp_op1 = const0_rtx;
4510 cmp_code = cmp_code == EQ ? LEU : GTU;
4513 if (cmp_code == LTU || cmp_code == LEU)
4515 rtx tem = cmp_op0;
4516 cmp_op0 = cmp_op1;
4517 cmp_op1 = tem;
4518 cmp_code = swap_condition (cmp_code);
4521 switch (cmp_code)
4523 case GTU:
4524 cc_mode = CCUmode;
4525 break;
4527 case GEU:
4528 cc_mode = CCL3mode;
4529 break;
4531 default:
4532 return false;
4535 /* Emit comparison instruction pattern. */
4536 if (!register_operand (cmp_op0, cmp_mode))
4537 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4539 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
4540 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4541 /* We use insn_invalid_p here to add clobbers if required. */
4542 ret = insn_invalid_p (emit_insn (insn), false);
4543 gcc_assert (!ret);
4545 /* Emit ALC instruction pattern. */
4546 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4547 gen_rtx_REG (cc_mode, CC_REGNUM),
4548 const0_rtx);
4550 if (src != const0_rtx)
4552 if (!register_operand (src, GET_MODE (dst)))
4553 src = force_reg (GET_MODE (dst), src);
4555 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4556 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4559 p = rtvec_alloc (2);
4560 RTVEC_ELT (p, 0) =
4561 gen_rtx_SET (dst, op_res);
4562 RTVEC_ELT (p, 1) =
4563 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4564 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4566 return true;
4569 /* Try SUBTRACT LOGICAL WITH BORROW. */
4570 if (increment == constm1_rtx)
4572 /* Determine CC mode to use. */
4573 if (cmp_code == EQ || cmp_code == NE)
4575 if (cmp_op1 != const0_rtx)
4577 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4578 NULL_RTX, 0, OPTAB_WIDEN);
4579 cmp_op1 = const0_rtx;
4582 cmp_code = cmp_code == EQ ? LEU : GTU;
4585 if (cmp_code == GTU || cmp_code == GEU)
4587 rtx tem = cmp_op0;
4588 cmp_op0 = cmp_op1;
4589 cmp_op1 = tem;
4590 cmp_code = swap_condition (cmp_code);
4593 switch (cmp_code)
4595 case LEU:
4596 cc_mode = CCUmode;
4597 break;
4599 case LTU:
4600 cc_mode = CCL3mode;
4601 break;
4603 default:
4604 return false;
4607 /* Emit comparison instruction pattern. */
4608 if (!register_operand (cmp_op0, cmp_mode))
4609 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4611 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
4612 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4613 /* We use insn_invalid_p here to add clobbers if required. */
4614 ret = insn_invalid_p (emit_insn (insn), false);
4615 gcc_assert (!ret);
4617 /* Emit SLB instruction pattern. */
4618 if (!register_operand (src, GET_MODE (dst)))
4619 src = force_reg (GET_MODE (dst), src);
4621 op_res = gen_rtx_MINUS (GET_MODE (dst),
4622 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4623 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4624 gen_rtx_REG (cc_mode, CC_REGNUM),
4625 const0_rtx));
4626 p = rtvec_alloc (2);
4627 RTVEC_ELT (p, 0) =
4628 gen_rtx_SET (dst, op_res);
4629 RTVEC_ELT (p, 1) =
4630 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4631 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4633 return true;
4636 return false;
4639 /* Expand code for the insv template. Return true if successful. */
4641 bool
4642 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4644 int bitsize = INTVAL (op1);
4645 int bitpos = INTVAL (op2);
4646 machine_mode mode = GET_MODE (dest);
4647 machine_mode smode;
4648 int smode_bsize, mode_bsize;
4649 rtx op, clobber;
4651 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
4652 return false;
4654 /* Generate INSERT IMMEDIATE (IILL et al). */
4655 /* (set (ze (reg)) (const_int)). */
4656 if (TARGET_ZARCH
4657 && register_operand (dest, word_mode)
4658 && (bitpos % 16) == 0
4659 && (bitsize % 16) == 0
4660 && const_int_operand (src, VOIDmode))
4662 HOST_WIDE_INT val = INTVAL (src);
4663 int regpos = bitpos + bitsize;
4665 while (regpos > bitpos)
4667 machine_mode putmode;
4668 int putsize;
4670 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4671 putmode = SImode;
4672 else
4673 putmode = HImode;
4675 putsize = GET_MODE_BITSIZE (putmode);
4676 regpos -= putsize;
4677 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4678 GEN_INT (putsize),
4679 GEN_INT (regpos)),
4680 gen_int_mode (val, putmode));
4681 val >>= putsize;
4683 gcc_assert (regpos == bitpos);
4684 return true;
4687 smode = smallest_mode_for_size (bitsize, MODE_INT);
4688 smode_bsize = GET_MODE_BITSIZE (smode);
4689 mode_bsize = GET_MODE_BITSIZE (mode);
4691 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4692 if (bitpos == 0
4693 && (bitsize % BITS_PER_UNIT) == 0
4694 && MEM_P (dest)
4695 && (register_operand (src, word_mode)
4696 || const_int_operand (src, VOIDmode)))
4698 /* Emit standard pattern if possible. */
4699 if (smode_bsize == bitsize)
4701 emit_move_insn (adjust_address (dest, smode, 0),
4702 gen_lowpart (smode, src));
4703 return true;
4706 /* (set (ze (mem)) (const_int)). */
4707 else if (const_int_operand (src, VOIDmode))
4709 int size = bitsize / BITS_PER_UNIT;
4710 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4711 BLKmode,
4712 UNITS_PER_WORD - size);
4714 dest = adjust_address (dest, BLKmode, 0);
4715 set_mem_size (dest, size);
4716 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4717 return true;
4720 /* (set (ze (mem)) (reg)). */
4721 else if (register_operand (src, word_mode))
4723 if (bitsize <= 32)
4724 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4725 const0_rtx), src);
4726 else
4728 /* Emit st,stcmh sequence. */
4729 int stcmh_width = bitsize - 32;
4730 int size = stcmh_width / BITS_PER_UNIT;
4732 emit_move_insn (adjust_address (dest, SImode, size),
4733 gen_lowpart (SImode, src));
4734 set_mem_size (dest, size);
4735 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4736 GEN_INT (stcmh_width),
4737 const0_rtx),
4738 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4740 return true;
4744 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4745 if ((bitpos % BITS_PER_UNIT) == 0
4746 && (bitsize % BITS_PER_UNIT) == 0
4747 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4748 && MEM_P (src)
4749 && (mode == DImode || mode == SImode)
4750 && register_operand (dest, mode))
4752 /* Emit a strict_low_part pattern if possible. */
4753 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4755 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4756 op = gen_rtx_SET (op, gen_lowpart (smode, src));
4757 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4758 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4759 return true;
4762 /* ??? There are more powerful versions of ICM that are not
4763 completely represented in the md file. */
4766 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4767 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4769 machine_mode mode_s = GET_MODE (src);
4771 if (mode_s == VOIDmode)
4773 /* Assume const_int etc already in the proper mode. */
4774 src = force_reg (mode, src);
4776 else if (mode_s != mode)
4778 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4779 src = force_reg (mode_s, src);
4780 src = gen_lowpart (mode, src);
4783 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4784 op = gen_rtx_SET (op, src);
4786 if (!TARGET_ZEC12)
4788 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4789 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
4791 emit_insn (op);
4793 return true;
4796 return false;
4799 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4800 register that holds VAL of mode MODE shifted by COUNT bits. */
4802 static inline rtx
4803 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
4805 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4806 NULL_RTX, 1, OPTAB_DIRECT);
4807 return expand_simple_binop (SImode, ASHIFT, val, count,
4808 NULL_RTX, 1, OPTAB_DIRECT);
4811 /* Structure to hold the initial parameters for a compare_and_swap operation
4812 in HImode and QImode. */
4814 struct alignment_context
4816 rtx memsi; /* SI aligned memory location. */
4817 rtx shift; /* Bit offset with regard to lsb. */
4818 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4819 rtx modemaski; /* ~modemask */
4820 bool aligned; /* True if memory is aligned, false else. */
4823 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4824 structure AC for transparent simplifying, if the memory alignment is known
4825 to be at least 32bit. MEM is the memory location for the actual operation
4826 and MODE its mode. */
4828 static void
4829 init_alignment_context (struct alignment_context *ac, rtx mem,
4830 machine_mode mode)
4832 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4833 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4835 if (ac->aligned)
4836 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4837 else
4839 /* Alignment is unknown. */
4840 rtx byteoffset, addr, align;
4842 /* Force the address into a register. */
4843 addr = force_reg (Pmode, XEXP (mem, 0));
4845 /* Align it to SImode. */
4846 align = expand_simple_binop (Pmode, AND, addr,
4847 GEN_INT (-GET_MODE_SIZE (SImode)),
4848 NULL_RTX, 1, OPTAB_DIRECT);
4849 /* Generate MEM. */
4850 ac->memsi = gen_rtx_MEM (SImode, align);
4851 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4852 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4853 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4855 /* Calculate shiftcount. */
4856 byteoffset = expand_simple_binop (Pmode, AND, addr,
4857 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4858 NULL_RTX, 1, OPTAB_DIRECT);
4859 /* As we already have some offset, evaluate the remaining distance. */
4860 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4861 NULL_RTX, 1, OPTAB_DIRECT);
4864 /* Shift is the byte count, but we need the bitcount. */
4865 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4866 NULL_RTX, 1, OPTAB_DIRECT);
4868 /* Calculate masks. */
4869 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4870 GEN_INT (GET_MODE_MASK (mode)),
4871 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4872 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4873 NULL_RTX, 1);
4876 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4877 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4878 perform the merge in SEQ2. */
4880 static rtx
4881 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4882 machine_mode mode, rtx val, rtx ins)
4884 rtx tmp;
4886 if (ac->aligned)
4888 start_sequence ();
4889 tmp = copy_to_mode_reg (SImode, val);
4890 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4891 const0_rtx, ins))
4893 *seq1 = NULL;
4894 *seq2 = get_insns ();
4895 end_sequence ();
4896 return tmp;
4898 end_sequence ();
4901 /* Failed to use insv. Generate a two part shift and mask. */
4902 start_sequence ();
4903 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4904 *seq1 = get_insns ();
4905 end_sequence ();
4907 start_sequence ();
4908 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4909 *seq2 = get_insns ();
4910 end_sequence ();
4912 return tmp;
4915 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4916 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4917 value to set if CMP == MEM. */
4919 void
4920 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4921 rtx cmp, rtx new_rtx, bool is_weak)
4923 struct alignment_context ac;
4924 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4925 rtx res = gen_reg_rtx (SImode);
4926 rtx_code_label *csloop = NULL, *csend = NULL;
4928 gcc_assert (MEM_P (mem));
4930 init_alignment_context (&ac, mem, mode);
4932 /* Load full word. Subsequent loads are performed by CS. */
4933 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4934 NULL_RTX, 1, OPTAB_DIRECT);
4936 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4937 possible, we try to use insv to make this happen efficiently. If
4938 that fails we'll generate code both inside and outside the loop. */
4939 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4940 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4942 if (seq0)
4943 emit_insn (seq0);
4944 if (seq1)
4945 emit_insn (seq1);
4947 /* Start CS loop. */
4948 if (!is_weak)
4950 /* Begin assuming success. */
4951 emit_move_insn (btarget, const1_rtx);
4953 csloop = gen_label_rtx ();
4954 csend = gen_label_rtx ();
4955 emit_label (csloop);
4958 /* val = "<mem>00..0<mem>"
4959 * cmp = "00..0<cmp>00..0"
4960 * new = "00..0<new>00..0"
4963 emit_insn (seq2);
4964 emit_insn (seq3);
4966 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4967 if (is_weak)
4968 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4969 else
4971 rtx tmp;
4973 /* Jump to end if we're done (likely?). */
4974 s390_emit_jump (csend, cc);
4976 /* Check for changes outside mode, and loop internal if so.
4977 Arrange the moves so that the compare is adjacent to the
4978 branch so that we can generate CRJ. */
4979 tmp = copy_to_reg (val);
4980 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4981 1, OPTAB_DIRECT);
4982 cc = s390_emit_compare (NE, val, tmp);
4983 s390_emit_jump (csloop, cc);
4985 /* Failed. */
4986 emit_move_insn (btarget, const0_rtx);
4987 emit_label (csend);
4990 /* Return the correct part of the bitfield. */
4991 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4992 NULL_RTX, 1, OPTAB_DIRECT), 1);
4995 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4996 and VAL the value to play with. If AFTER is true then store the value
4997 MEM holds after the operation, if AFTER is false then store the value MEM
4998 holds before the operation. If TARGET is zero then discard that value, else
4999 store it to TARGET. */
5001 void
5002 s390_expand_atomic (machine_mode mode, enum rtx_code code,
5003 rtx target, rtx mem, rtx val, bool after)
5005 struct alignment_context ac;
5006 rtx cmp;
5007 rtx new_rtx = gen_reg_rtx (SImode);
5008 rtx orig = gen_reg_rtx (SImode);
5009 rtx_code_label *csloop = gen_label_rtx ();
5011 gcc_assert (!target || register_operand (target, VOIDmode));
5012 gcc_assert (MEM_P (mem));
5014 init_alignment_context (&ac, mem, mode);
5016 /* Shift val to the correct bit positions.
5017 Preserve "icm", but prevent "ex icm". */
5018 if (!(ac.aligned && code == SET && MEM_P (val)))
5019 val = s390_expand_mask_and_shift (val, mode, ac.shift);
5021 /* Further preparation insns. */
5022 if (code == PLUS || code == MINUS)
5023 emit_move_insn (orig, val);
5024 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
5025 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
5026 NULL_RTX, 1, OPTAB_DIRECT);
5028 /* Load full word. Subsequent loads are performed by CS. */
5029 cmp = force_reg (SImode, ac.memsi);
5031 /* Start CS loop. */
5032 emit_label (csloop);
5033 emit_move_insn (new_rtx, cmp);
5035 /* Patch new with val at correct position. */
5036 switch (code)
5038 case PLUS:
5039 case MINUS:
5040 val = expand_simple_binop (SImode, code, new_rtx, orig,
5041 NULL_RTX, 1, OPTAB_DIRECT);
5042 val = expand_simple_binop (SImode, AND, val, ac.modemask,
5043 NULL_RTX, 1, OPTAB_DIRECT);
5044 /* FALLTHRU */
5045 case SET:
5046 if (ac.aligned && MEM_P (val))
5047 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
5048 0, 0, SImode, val);
5049 else
5051 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
5052 NULL_RTX, 1, OPTAB_DIRECT);
5053 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
5054 NULL_RTX, 1, OPTAB_DIRECT);
5056 break;
5057 case AND:
5058 case IOR:
5059 case XOR:
5060 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
5061 NULL_RTX, 1, OPTAB_DIRECT);
5062 break;
5063 case MULT: /* NAND */
5064 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
5065 NULL_RTX, 1, OPTAB_DIRECT);
5066 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
5067 NULL_RTX, 1, OPTAB_DIRECT);
5068 break;
5069 default:
5070 gcc_unreachable ();
5073 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5074 ac.memsi, cmp, new_rtx));
5076 /* Return the correct part of the bitfield. */
5077 if (target)
5078 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5079 after ? new_rtx : cmp, ac.shift,
5080 NULL_RTX, 1, OPTAB_DIRECT), 1);
5083 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5084 We need to emit DTP-relative relocations. */
5086 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5088 static void
5089 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5091 switch (size)
5093 case 4:
5094 fputs ("\t.long\t", file);
5095 break;
5096 case 8:
5097 fputs ("\t.quad\t", file);
5098 break;
5099 default:
5100 gcc_unreachable ();
5102 output_addr_const (file, x);
5103 fputs ("@DTPOFF", file);
5106 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5107 /* Implement TARGET_MANGLE_TYPE. */
5109 static const char *
5110 s390_mangle_type (const_tree type)
5112 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5113 && TARGET_LONG_DOUBLE_128)
5114 return "g";
5116 /* For all other types, use normal C++ mangling. */
5117 return NULL;
5119 #endif
5121 /* In the name of slightly smaller debug output, and to cater to
5122 general assembler lossage, recognize various UNSPEC sequences
5123 and turn them back into a direct symbol reference. */
5125 static rtx
5126 s390_delegitimize_address (rtx orig_x)
5128 rtx x, y;
5130 orig_x = delegitimize_mem_from_attrs (orig_x);
5131 x = orig_x;
5133 /* Extract the symbol ref from:
5134 (plus:SI (reg:SI 12 %r12)
5135 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5136 UNSPEC_GOTOFF/PLTOFF)))
5138 (plus:SI (reg:SI 12 %r12)
5139 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5140 UNSPEC_GOTOFF/PLTOFF)
5141 (const_int 4 [0x4])))) */
5142 if (GET_CODE (x) == PLUS
5143 && REG_P (XEXP (x, 0))
5144 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5145 && GET_CODE (XEXP (x, 1)) == CONST)
5147 HOST_WIDE_INT offset = 0;
5149 /* The const operand. */
5150 y = XEXP (XEXP (x, 1), 0);
5152 if (GET_CODE (y) == PLUS
5153 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5155 offset = INTVAL (XEXP (y, 1));
5156 y = XEXP (y, 0);
5159 if (GET_CODE (y) == UNSPEC
5160 && (XINT (y, 1) == UNSPEC_GOTOFF
5161 || XINT (y, 1) == UNSPEC_PLTOFF))
5162 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5165 if (GET_CODE (x) != MEM)
5166 return orig_x;
5168 x = XEXP (x, 0);
5169 if (GET_CODE (x) == PLUS
5170 && GET_CODE (XEXP (x, 1)) == CONST
5171 && GET_CODE (XEXP (x, 0)) == REG
5172 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5174 y = XEXP (XEXP (x, 1), 0);
5175 if (GET_CODE (y) == UNSPEC
5176 && XINT (y, 1) == UNSPEC_GOT)
5177 y = XVECEXP (y, 0, 0);
5178 else
5179 return orig_x;
5181 else if (GET_CODE (x) == CONST)
5183 /* Extract the symbol ref from:
5184 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5185 UNSPEC_PLT/GOTENT))) */
5187 y = XEXP (x, 0);
5188 if (GET_CODE (y) == UNSPEC
5189 && (XINT (y, 1) == UNSPEC_GOTENT
5190 || XINT (y, 1) == UNSPEC_PLT))
5191 y = XVECEXP (y, 0, 0);
5192 else
5193 return orig_x;
5195 else
5196 return orig_x;
5198 if (GET_MODE (orig_x) != Pmode)
5200 if (GET_MODE (orig_x) == BLKmode)
5201 return orig_x;
5202 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5203 if (y == NULL_RTX)
5204 return orig_x;
5206 return y;
5209 /* Output operand OP to stdio stream FILE.
5210 OP is an address (register + offset) which is not used to address data;
5211 instead the rightmost bits are interpreted as the value. */
5213 static void
5214 print_shift_count_operand (FILE *file, rtx op)
5216 HOST_WIDE_INT offset;
5217 rtx base;
5219 /* Extract base register and offset. */
5220 if (!s390_decompose_shift_count (op, &base, &offset))
5221 gcc_unreachable ();
5223 /* Sanity check. */
5224 if (base)
5226 gcc_assert (GET_CODE (base) == REG);
5227 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5228 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5231 /* Offsets are constricted to twelve bits. */
5232 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5233 if (base)
5234 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5237 /* Assigns the number of NOP halfwords to be emitted before and after the
5238 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
5239 If hotpatching is disabled for the function, the values are set to zero.
5242 static void
5243 s390_function_num_hotpatch_hw (tree decl,
5244 int *hw_before,
5245 int *hw_after)
5247 tree attr;
5249 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
5251 /* Handle the arguments of the hotpatch attribute. The values
5252 specified via attribute might override the cmdline argument
5253 values. */
5254 if (attr)
5256 tree args = TREE_VALUE (attr);
5258 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
5259 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
5261 else
5263 /* Use the values specified by the cmdline arguments. */
5264 *hw_before = s390_hotpatch_hw_before_label;
5265 *hw_after = s390_hotpatch_hw_after_label;
5269 /* Write the extra assembler code needed to declare a function properly. */
5271 void
5272 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
5273 tree decl)
5275 int hw_before, hw_after;
5277 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
5278 if (hw_before > 0)
5280 unsigned int function_alignment;
5281 int i;
5283 /* Add a trampoline code area before the function label and initialize it
5284 with two-byte nop instructions. This area can be overwritten with code
5285 that jumps to a patched version of the function. */
5286 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
5287 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
5288 hw_before);
5289 for (i = 1; i < hw_before; i++)
5290 fputs ("\tnopr\t%r7\n", asm_out_file);
5292 /* Note: The function label must be aligned so that (a) the bytes of the
5293 following nop do not cross a cacheline boundary, and (b) a jump address
5294 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
5295 stored directly before the label without crossing a cacheline
5296 boundary. All this is necessary to make sure the trampoline code can
5297 be changed atomically.
5298 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
5299 if there are NOPs before the function label, the alignment is placed
5300 before them. So it is necessary to duplicate the alignment after the
5301 NOPs. */
5302 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
5303 if (! DECL_USER_ALIGN (decl))
5304 function_alignment = MAX (function_alignment,
5305 (unsigned int) align_functions);
5306 fputs ("\t# alignment for hotpatch\n", asm_out_file);
5307 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
5310 ASM_OUTPUT_LABEL (asm_out_file, fname);
5311 if (hw_after > 0)
5312 asm_fprintf (asm_out_file,
5313 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
5314 hw_after);
5317 /* Output machine-dependent UNSPECs occurring in address constant X
5318 in assembler syntax to stdio stream FILE. Returns true if the
5319 constant X could be recognized, false otherwise. */
5321 static bool
5322 s390_output_addr_const_extra (FILE *file, rtx x)
5324 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5325 switch (XINT (x, 1))
5327 case UNSPEC_GOTENT:
5328 output_addr_const (file, XVECEXP (x, 0, 0));
5329 fprintf (file, "@GOTENT");
5330 return true;
5331 case UNSPEC_GOT:
5332 output_addr_const (file, XVECEXP (x, 0, 0));
5333 fprintf (file, "@GOT");
5334 return true;
5335 case UNSPEC_GOTOFF:
5336 output_addr_const (file, XVECEXP (x, 0, 0));
5337 fprintf (file, "@GOTOFF");
5338 return true;
5339 case UNSPEC_PLT:
5340 output_addr_const (file, XVECEXP (x, 0, 0));
5341 fprintf (file, "@PLT");
5342 return true;
5343 case UNSPEC_PLTOFF:
5344 output_addr_const (file, XVECEXP (x, 0, 0));
5345 fprintf (file, "@PLTOFF");
5346 return true;
5347 case UNSPEC_TLSGD:
5348 output_addr_const (file, XVECEXP (x, 0, 0));
5349 fprintf (file, "@TLSGD");
5350 return true;
5351 case UNSPEC_TLSLDM:
5352 assemble_name (file, get_some_local_dynamic_name ());
5353 fprintf (file, "@TLSLDM");
5354 return true;
5355 case UNSPEC_DTPOFF:
5356 output_addr_const (file, XVECEXP (x, 0, 0));
5357 fprintf (file, "@DTPOFF");
5358 return true;
5359 case UNSPEC_NTPOFF:
5360 output_addr_const (file, XVECEXP (x, 0, 0));
5361 fprintf (file, "@NTPOFF");
5362 return true;
5363 case UNSPEC_GOTNTPOFF:
5364 output_addr_const (file, XVECEXP (x, 0, 0));
5365 fprintf (file, "@GOTNTPOFF");
5366 return true;
5367 case UNSPEC_INDNTPOFF:
5368 output_addr_const (file, XVECEXP (x, 0, 0));
5369 fprintf (file, "@INDNTPOFF");
5370 return true;
5373 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5374 switch (XINT (x, 1))
5376 case UNSPEC_POOL_OFFSET:
5377 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5378 output_addr_const (file, x);
5379 return true;
5381 return false;
5384 /* Output address operand ADDR in assembler syntax to
5385 stdio stream FILE. */
5387 void
5388 print_operand_address (FILE *file, rtx addr)
5390 struct s390_address ad;
5392 if (s390_loadrelative_operand_p (addr, NULL, NULL))
5394 if (!TARGET_Z10)
5396 output_operand_lossage ("symbolic memory references are "
5397 "only supported on z10 or later");
5398 return;
5400 output_addr_const (file, addr);
5401 return;
5404 if (!s390_decompose_address (addr, &ad)
5405 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5406 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5407 output_operand_lossage ("cannot decompose address");
5409 if (ad.disp)
5410 output_addr_const (file, ad.disp);
5411 else
5412 fprintf (file, "0");
5414 if (ad.base && ad.indx)
5415 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5416 reg_names[REGNO (ad.base)]);
5417 else if (ad.base)
5418 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5421 /* Output operand X in assembler syntax to stdio stream FILE.
5422 CODE specified the format flag. The following format flags
5423 are recognized:
5425 'C': print opcode suffix for branch condition.
5426 'D': print opcode suffix for inverse branch condition.
5427 'E': print opcode suffix for branch on index instruction.
5428 'G': print the size of the operand in bytes.
5429 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5430 'M': print the second word of a TImode operand.
5431 'N': print the second word of a DImode operand.
5432 'O': print only the displacement of a memory reference.
5433 'R': print only the base register of a memory reference.
5434 'S': print S-type memory reference (base+displacement).
5435 'Y': print shift count operand.
5437 'b': print integer X as if it's an unsigned byte.
5438 'c': print integer X as if it's an signed byte.
5439 'e': "end" of DImode contiguous bitmask X.
5440 'f': "end" of SImode contiguous bitmask X.
5441 'h': print integer X as if it's a signed halfword.
5442 'i': print the first nonzero HImode part of X.
5443 'j': print the first HImode part unequal to -1 of X.
5444 'k': print the first nonzero SImode part of X.
5445 'm': print the first SImode part unequal to -1 of X.
5446 'o': print integer X as if it's an unsigned 32bit word.
5447 's': "start" of DImode contiguous bitmask X.
5448 't': "start" of SImode contiguous bitmask X.
5449 'x': print integer X as if it's an unsigned halfword.
5452 void
5453 print_operand (FILE *file, rtx x, int code)
5455 HOST_WIDE_INT ival;
5457 switch (code)
5459 case 'C':
5460 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5461 return;
5463 case 'D':
5464 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5465 return;
5467 case 'E':
5468 if (GET_CODE (x) == LE)
5469 fprintf (file, "l");
5470 else if (GET_CODE (x) == GT)
5471 fprintf (file, "h");
5472 else
5473 output_operand_lossage ("invalid comparison operator "
5474 "for 'E' output modifier");
5475 return;
5477 case 'J':
5478 if (GET_CODE (x) == SYMBOL_REF)
5480 fprintf (file, "%s", ":tls_load:");
5481 output_addr_const (file, x);
5483 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5485 fprintf (file, "%s", ":tls_gdcall:");
5486 output_addr_const (file, XVECEXP (x, 0, 0));
5488 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5490 fprintf (file, "%s", ":tls_ldcall:");
5491 const char *name = get_some_local_dynamic_name ();
5492 gcc_assert (name);
5493 assemble_name (file, name);
5495 else
5496 output_operand_lossage ("invalid reference for 'J' output modifier");
5497 return;
5499 case 'G':
5500 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5501 return;
5503 case 'O':
5505 struct s390_address ad;
5506 int ret;
5508 if (!MEM_P (x))
5510 output_operand_lossage ("memory reference expected for "
5511 "'O' output modifier");
5512 return;
5515 ret = s390_decompose_address (XEXP (x, 0), &ad);
5517 if (!ret
5518 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5519 || ad.indx)
5521 output_operand_lossage ("invalid address for 'O' output modifier");
5522 return;
5525 if (ad.disp)
5526 output_addr_const (file, ad.disp);
5527 else
5528 fprintf (file, "0");
5530 return;
5532 case 'R':
5534 struct s390_address ad;
5535 int ret;
5537 if (!MEM_P (x))
5539 output_operand_lossage ("memory reference expected for "
5540 "'R' output modifier");
5541 return;
5544 ret = s390_decompose_address (XEXP (x, 0), &ad);
5546 if (!ret
5547 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5548 || ad.indx)
5550 output_operand_lossage ("invalid address for 'R' output modifier");
5551 return;
5554 if (ad.base)
5555 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5556 else
5557 fprintf (file, "0");
5559 return;
5561 case 'S':
5563 struct s390_address ad;
5564 int ret;
5566 if (!MEM_P (x))
5568 output_operand_lossage ("memory reference expected for "
5569 "'S' output modifier");
5570 return;
5572 ret = s390_decompose_address (XEXP (x, 0), &ad);
5574 if (!ret
5575 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5576 || ad.indx)
5578 output_operand_lossage ("invalid address for 'S' output modifier");
5579 return;
5582 if (ad.disp)
5583 output_addr_const (file, ad.disp);
5584 else
5585 fprintf (file, "0");
5587 if (ad.base)
5588 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5590 return;
5592 case 'N':
5593 if (GET_CODE (x) == REG)
5594 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5595 else if (GET_CODE (x) == MEM)
5596 x = change_address (x, VOIDmode,
5597 plus_constant (Pmode, XEXP (x, 0), 4));
5598 else
5599 output_operand_lossage ("register or memory expression expected "
5600 "for 'N' output modifier");
5601 break;
5603 case 'M':
5604 if (GET_CODE (x) == REG)
5605 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5606 else if (GET_CODE (x) == MEM)
5607 x = change_address (x, VOIDmode,
5608 plus_constant (Pmode, XEXP (x, 0), 8));
5609 else
5610 output_operand_lossage ("register or memory expression expected "
5611 "for 'M' output modifier");
5612 break;
5614 case 'Y':
5615 print_shift_count_operand (file, x);
5616 return;
5619 switch (GET_CODE (x))
5621 case REG:
5622 fprintf (file, "%s", reg_names[REGNO (x)]);
5623 break;
5625 case MEM:
5626 output_address (XEXP (x, 0));
5627 break;
5629 case CONST:
5630 case CODE_LABEL:
5631 case LABEL_REF:
5632 case SYMBOL_REF:
5633 output_addr_const (file, x);
5634 break;
5636 case CONST_INT:
5637 ival = INTVAL (x);
5638 switch (code)
5640 case 0:
5641 break;
5642 case 'b':
5643 ival &= 0xff;
5644 break;
5645 case 'c':
5646 ival = ((ival & 0xff) ^ 0x80) - 0x80;
5647 break;
5648 case 'x':
5649 ival &= 0xffff;
5650 break;
5651 case 'h':
5652 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
5653 break;
5654 case 'i':
5655 ival = s390_extract_part (x, HImode, 0);
5656 break;
5657 case 'j':
5658 ival = s390_extract_part (x, HImode, -1);
5659 break;
5660 case 'k':
5661 ival = s390_extract_part (x, SImode, 0);
5662 break;
5663 case 'm':
5664 ival = s390_extract_part (x, SImode, -1);
5665 break;
5666 case 'o':
5667 ival &= 0xffffffff;
5668 break;
5669 case 'e': case 'f':
5670 case 's': case 't':
5672 int pos, len;
5673 bool ok;
5675 len = (code == 's' || code == 'e' ? 64 : 32);
5676 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
5677 gcc_assert (ok);
5678 if (code == 's' || code == 't')
5679 ival = 64 - pos - len;
5680 else
5681 ival = 64 - 1 - pos;
5683 break;
5684 default:
5685 output_operand_lossage ("invalid constant for output modifier '%c'", code);
5687 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
5688 break;
5690 case CONST_DOUBLE:
5691 gcc_assert (GET_MODE (x) == VOIDmode);
5692 if (code == 'b')
5693 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5694 else if (code == 'x')
5695 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5696 else if (code == 'h')
5697 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5698 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5699 else
5701 if (code == 0)
5702 output_operand_lossage ("invalid constant - try using "
5703 "an output modifier");
5704 else
5705 output_operand_lossage ("invalid constant for output modifier '%c'",
5706 code);
5708 break;
5710 default:
5711 if (code == 0)
5712 output_operand_lossage ("invalid expression - try using "
5713 "an output modifier");
5714 else
5715 output_operand_lossage ("invalid expression for output "
5716 "modifier '%c'", code);
5717 break;
5721 /* Target hook for assembling integer objects. We need to define it
5722 here to work a round a bug in some versions of GAS, which couldn't
5723 handle values smaller than INT_MIN when printed in decimal. */
5725 static bool
5726 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5728 if (size == 8 && aligned_p
5729 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5731 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5732 INTVAL (x));
5733 return true;
5735 return default_assemble_integer (x, size, aligned_p);
5738 /* Returns true if register REGNO is used for forming
5739 a memory address in expression X. */
5741 static bool
5742 reg_used_in_mem_p (int regno, rtx x)
5744 enum rtx_code code = GET_CODE (x);
5745 int i, j;
5746 const char *fmt;
5748 if (code == MEM)
5750 if (refers_to_regno_p (regno, XEXP (x, 0)))
5751 return true;
5753 else if (code == SET
5754 && GET_CODE (SET_DEST (x)) == PC)
5756 if (refers_to_regno_p (regno, SET_SRC (x)))
5757 return true;
5760 fmt = GET_RTX_FORMAT (code);
5761 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5763 if (fmt[i] == 'e'
5764 && reg_used_in_mem_p (regno, XEXP (x, i)))
5765 return true;
5767 else if (fmt[i] == 'E')
5768 for (j = 0; j < XVECLEN (x, i); j++)
5769 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5770 return true;
5772 return false;
5775 /* Returns true if expression DEP_RTX sets an address register
5776 used by instruction INSN to address memory. */
5778 static bool
5779 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
5781 rtx target, pat;
5783 if (NONJUMP_INSN_P (dep_rtx))
5784 dep_rtx = PATTERN (dep_rtx);
5786 if (GET_CODE (dep_rtx) == SET)
5788 target = SET_DEST (dep_rtx);
5789 if (GET_CODE (target) == STRICT_LOW_PART)
5790 target = XEXP (target, 0);
5791 while (GET_CODE (target) == SUBREG)
5792 target = SUBREG_REG (target);
5794 if (GET_CODE (target) == REG)
5796 int regno = REGNO (target);
5798 if (s390_safe_attr_type (insn) == TYPE_LA)
5800 pat = PATTERN (insn);
5801 if (GET_CODE (pat) == PARALLEL)
5803 gcc_assert (XVECLEN (pat, 0) == 2);
5804 pat = XVECEXP (pat, 0, 0);
5806 gcc_assert (GET_CODE (pat) == SET);
5807 return refers_to_regno_p (regno, SET_SRC (pat));
5809 else if (get_attr_atype (insn) == ATYPE_AGEN)
5810 return reg_used_in_mem_p (regno, PATTERN (insn));
5813 return false;
5816 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5819 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
5821 rtx dep_rtx = PATTERN (dep_insn);
5822 int i;
5824 if (GET_CODE (dep_rtx) == SET
5825 && addr_generation_dependency_p (dep_rtx, insn))
5826 return 1;
5827 else if (GET_CODE (dep_rtx) == PARALLEL)
5829 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5831 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5832 return 1;
5835 return 0;
5839 /* A C statement (sans semicolon) to update the integer scheduling priority
5840 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5841 reduce the priority to execute INSN later. Do not define this macro if
5842 you do not need to adjust the scheduling priorities of insns.
5844 A STD instruction should be scheduled earlier,
5845 in order to use the bypass. */
5846 static int
5847 s390_adjust_priority (rtx_insn *insn, int priority)
5849 if (! INSN_P (insn))
5850 return priority;
5852 if (s390_tune != PROCESSOR_2084_Z990
5853 && s390_tune != PROCESSOR_2094_Z9_109
5854 && s390_tune != PROCESSOR_2097_Z10
5855 && s390_tune != PROCESSOR_2817_Z196
5856 && s390_tune != PROCESSOR_2827_ZEC12)
5857 return priority;
5859 switch (s390_safe_attr_type (insn))
5861 case TYPE_FSTOREDF:
5862 case TYPE_FSTORESF:
5863 priority = priority << 3;
5864 break;
5865 case TYPE_STORE:
5866 case TYPE_STM:
5867 priority = priority << 1;
5868 break;
5869 default:
5870 break;
5872 return priority;
5876 /* The number of instructions that can be issued per cycle. */
5878 static int
5879 s390_issue_rate (void)
5881 switch (s390_tune)
5883 case PROCESSOR_2084_Z990:
5884 case PROCESSOR_2094_Z9_109:
5885 case PROCESSOR_2817_Z196:
5886 return 3;
5887 case PROCESSOR_2097_Z10:
5888 case PROCESSOR_2827_ZEC12:
5889 return 2;
5890 default:
5891 return 1;
5895 static int
5896 s390_first_cycle_multipass_dfa_lookahead (void)
5898 return 4;
5901 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5902 Fix up MEMs as required. */
5904 static void
5905 annotate_constant_pool_refs (rtx *x)
5907 int i, j;
5908 const char *fmt;
5910 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5911 || !CONSTANT_POOL_ADDRESS_P (*x));
5913 /* Literal pool references can only occur inside a MEM ... */
5914 if (GET_CODE (*x) == MEM)
5916 rtx memref = XEXP (*x, 0);
5918 if (GET_CODE (memref) == SYMBOL_REF
5919 && CONSTANT_POOL_ADDRESS_P (memref))
5921 rtx base = cfun->machine->base_reg;
5922 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5923 UNSPEC_LTREF);
5925 *x = replace_equiv_address (*x, addr);
5926 return;
5929 if (GET_CODE (memref) == CONST
5930 && GET_CODE (XEXP (memref, 0)) == PLUS
5931 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5932 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5933 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5935 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5936 rtx sym = XEXP (XEXP (memref, 0), 0);
5937 rtx base = cfun->machine->base_reg;
5938 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5939 UNSPEC_LTREF);
5941 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5942 return;
5946 /* ... or a load-address type pattern. */
5947 if (GET_CODE (*x) == SET)
5949 rtx addrref = SET_SRC (*x);
5951 if (GET_CODE (addrref) == SYMBOL_REF
5952 && CONSTANT_POOL_ADDRESS_P (addrref))
5954 rtx base = cfun->machine->base_reg;
5955 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5956 UNSPEC_LTREF);
5958 SET_SRC (*x) = addr;
5959 return;
5962 if (GET_CODE (addrref) == CONST
5963 && GET_CODE (XEXP (addrref, 0)) == PLUS
5964 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5965 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5966 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5968 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5969 rtx sym = XEXP (XEXP (addrref, 0), 0);
5970 rtx base = cfun->machine->base_reg;
5971 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5972 UNSPEC_LTREF);
5974 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5975 return;
5979 /* Annotate LTREL_BASE as well. */
5980 if (GET_CODE (*x) == UNSPEC
5981 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5983 rtx base = cfun->machine->base_reg;
5984 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5985 UNSPEC_LTREL_BASE);
5986 return;
5989 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5990 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5992 if (fmt[i] == 'e')
5994 annotate_constant_pool_refs (&XEXP (*x, i));
5996 else if (fmt[i] == 'E')
5998 for (j = 0; j < XVECLEN (*x, i); j++)
5999 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
6004 /* Split all branches that exceed the maximum distance.
6005 Returns true if this created a new literal pool entry. */
6007 static int
6008 s390_split_branches (void)
6010 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
6011 int new_literal = 0, ret;
6012 rtx_insn *insn;
6013 rtx pat, target;
6014 rtx *label;
6016 /* We need correct insn addresses. */
6018 shorten_branches (get_insns ());
6020 /* Find all branches that exceed 64KB, and split them. */
6022 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6024 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
6025 continue;
6027 pat = PATTERN (insn);
6028 if (GET_CODE (pat) == PARALLEL)
6029 pat = XVECEXP (pat, 0, 0);
6030 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
6031 continue;
6033 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
6035 label = &SET_SRC (pat);
6037 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
6039 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
6040 label = &XEXP (SET_SRC (pat), 1);
6041 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
6042 label = &XEXP (SET_SRC (pat), 2);
6043 else
6044 continue;
6046 else
6047 continue;
6049 if (get_attr_length (insn) <= 4)
6050 continue;
6052 /* We are going to use the return register as scratch register,
6053 make sure it will be saved/restored by the prologue/epilogue. */
6054 cfun_frame_layout.save_return_addr_p = 1;
6056 if (!flag_pic)
6058 new_literal = 1;
6059 rtx mem = force_const_mem (Pmode, *label);
6060 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
6061 insn);
6062 INSN_ADDRESSES_NEW (set_insn, -1);
6063 annotate_constant_pool_refs (&PATTERN (set_insn));
6065 target = temp_reg;
6067 else
6069 new_literal = 1;
6070 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
6071 UNSPEC_LTREL_OFFSET);
6072 target = gen_rtx_CONST (Pmode, target);
6073 target = force_const_mem (Pmode, target);
6074 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
6075 insn);
6076 INSN_ADDRESSES_NEW (set_insn, -1);
6077 annotate_constant_pool_refs (&PATTERN (set_insn));
6079 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
6080 cfun->machine->base_reg),
6081 UNSPEC_LTREL_BASE);
6082 target = gen_rtx_PLUS (Pmode, temp_reg, target);
6085 ret = validate_change (insn, label, target, 0);
6086 gcc_assert (ret);
6089 return new_literal;
6093 /* Find an annotated literal pool symbol referenced in RTX X,
6094 and store it at REF. Will abort if X contains references to
6095 more than one such pool symbol; multiple references to the same
6096 symbol are allowed, however.
6098 The rtx pointed to by REF must be initialized to NULL_RTX
6099 by the caller before calling this routine. */
6101 static void
6102 find_constant_pool_ref (rtx x, rtx *ref)
6104 int i, j;
6105 const char *fmt;
6107 /* Ignore LTREL_BASE references. */
6108 if (GET_CODE (x) == UNSPEC
6109 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6110 return;
6111 /* Likewise POOL_ENTRY insns. */
6112 if (GET_CODE (x) == UNSPEC_VOLATILE
6113 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
6114 return;
6116 gcc_assert (GET_CODE (x) != SYMBOL_REF
6117 || !CONSTANT_POOL_ADDRESS_P (x));
6119 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
6121 rtx sym = XVECEXP (x, 0, 0);
6122 gcc_assert (GET_CODE (sym) == SYMBOL_REF
6123 && CONSTANT_POOL_ADDRESS_P (sym));
6125 if (*ref == NULL_RTX)
6126 *ref = sym;
6127 else
6128 gcc_assert (*ref == sym);
6130 return;
6133 fmt = GET_RTX_FORMAT (GET_CODE (x));
6134 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6136 if (fmt[i] == 'e')
6138 find_constant_pool_ref (XEXP (x, i), ref);
6140 else if (fmt[i] == 'E')
6142 for (j = 0; j < XVECLEN (x, i); j++)
6143 find_constant_pool_ref (XVECEXP (x, i, j), ref);
6148 /* Replace every reference to the annotated literal pool
6149 symbol REF in X by its base plus OFFSET. */
6151 static void
6152 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6154 int i, j;
6155 const char *fmt;
6157 gcc_assert (*x != ref);
6159 if (GET_CODE (*x) == UNSPEC
6160 && XINT (*x, 1) == UNSPEC_LTREF
6161 && XVECEXP (*x, 0, 0) == ref)
6163 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6164 return;
6167 if (GET_CODE (*x) == PLUS
6168 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6169 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6170 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6171 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6173 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6174 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6175 return;
6178 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6179 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6181 if (fmt[i] == 'e')
6183 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6185 else if (fmt[i] == 'E')
6187 for (j = 0; j < XVECLEN (*x, i); j++)
6188 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6193 /* Check whether X contains an UNSPEC_LTREL_BASE.
6194 Return its constant pool symbol if found, NULL_RTX otherwise. */
6196 static rtx
6197 find_ltrel_base (rtx x)
6199 int i, j;
6200 const char *fmt;
6202 if (GET_CODE (x) == UNSPEC
6203 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6204 return XVECEXP (x, 0, 0);
6206 fmt = GET_RTX_FORMAT (GET_CODE (x));
6207 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6209 if (fmt[i] == 'e')
6211 rtx fnd = find_ltrel_base (XEXP (x, i));
6212 if (fnd)
6213 return fnd;
6215 else if (fmt[i] == 'E')
6217 for (j = 0; j < XVECLEN (x, i); j++)
6219 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6220 if (fnd)
6221 return fnd;
6226 return NULL_RTX;
6229 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6231 static void
6232 replace_ltrel_base (rtx *x)
6234 int i, j;
6235 const char *fmt;
6237 if (GET_CODE (*x) == UNSPEC
6238 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6240 *x = XVECEXP (*x, 0, 1);
6241 return;
6244 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6245 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6247 if (fmt[i] == 'e')
6249 replace_ltrel_base (&XEXP (*x, i));
6251 else if (fmt[i] == 'E')
6253 for (j = 0; j < XVECLEN (*x, i); j++)
6254 replace_ltrel_base (&XVECEXP (*x, i, j));
6260 /* We keep a list of constants which we have to add to internal
6261 constant tables in the middle of large functions. */
6263 #define NR_C_MODES 11
6264 machine_mode constant_modes[NR_C_MODES] =
6266 TFmode, TImode, TDmode,
6267 DFmode, DImode, DDmode,
6268 SFmode, SImode, SDmode,
6269 HImode,
6270 QImode
6273 struct constant
6275 struct constant *next;
6276 rtx value;
6277 rtx_code_label *label;
6280 struct constant_pool
6282 struct constant_pool *next;
6283 rtx_insn *first_insn;
6284 rtx_insn *pool_insn;
6285 bitmap insns;
6286 rtx_insn *emit_pool_after;
6288 struct constant *constants[NR_C_MODES];
6289 struct constant *execute;
6290 rtx_code_label *label;
6291 int size;
6294 /* Allocate new constant_pool structure. */
6296 static struct constant_pool *
6297 s390_alloc_pool (void)
6299 struct constant_pool *pool;
6300 int i;
6302 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6303 pool->next = NULL;
6304 for (i = 0; i < NR_C_MODES; i++)
6305 pool->constants[i] = NULL;
6307 pool->execute = NULL;
6308 pool->label = gen_label_rtx ();
6309 pool->first_insn = NULL;
6310 pool->pool_insn = NULL;
6311 pool->insns = BITMAP_ALLOC (NULL);
6312 pool->size = 0;
6313 pool->emit_pool_after = NULL;
6315 return pool;
6318 /* Create new constant pool covering instructions starting at INSN
6319 and chain it to the end of POOL_LIST. */
6321 static struct constant_pool *
6322 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
6324 struct constant_pool *pool, **prev;
6326 pool = s390_alloc_pool ();
6327 pool->first_insn = insn;
6329 for (prev = pool_list; *prev; prev = &(*prev)->next)
6331 *prev = pool;
6333 return pool;
6336 /* End range of instructions covered by POOL at INSN and emit
6337 placeholder insn representing the pool. */
6339 static void
6340 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
6342 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6344 if (!insn)
6345 insn = get_last_insn ();
6347 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6348 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6351 /* Add INSN to the list of insns covered by POOL. */
6353 static void
6354 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6356 bitmap_set_bit (pool->insns, INSN_UID (insn));
6359 /* Return pool out of POOL_LIST that covers INSN. */
6361 static struct constant_pool *
6362 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6364 struct constant_pool *pool;
6366 for (pool = pool_list; pool; pool = pool->next)
6367 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6368 break;
6370 return pool;
6373 /* Add constant VAL of mode MODE to the constant pool POOL. */
6375 static void
6376 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
6378 struct constant *c;
6379 int i;
6381 for (i = 0; i < NR_C_MODES; i++)
6382 if (constant_modes[i] == mode)
6383 break;
6384 gcc_assert (i != NR_C_MODES);
6386 for (c = pool->constants[i]; c != NULL; c = c->next)
6387 if (rtx_equal_p (val, c->value))
6388 break;
6390 if (c == NULL)
6392 c = (struct constant *) xmalloc (sizeof *c);
6393 c->value = val;
6394 c->label = gen_label_rtx ();
6395 c->next = pool->constants[i];
6396 pool->constants[i] = c;
6397 pool->size += GET_MODE_SIZE (mode);
6401 /* Return an rtx that represents the offset of X from the start of
6402 pool POOL. */
6404 static rtx
6405 s390_pool_offset (struct constant_pool *pool, rtx x)
6407 rtx label;
6409 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6410 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6411 UNSPEC_POOL_OFFSET);
6412 return gen_rtx_CONST (GET_MODE (x), x);
6415 /* Find constant VAL of mode MODE in the constant pool POOL.
6416 Return an RTX describing the distance from the start of
6417 the pool to the location of the new constant. */
6419 static rtx
6420 s390_find_constant (struct constant_pool *pool, rtx val,
6421 machine_mode mode)
6423 struct constant *c;
6424 int i;
6426 for (i = 0; i < NR_C_MODES; i++)
6427 if (constant_modes[i] == mode)
6428 break;
6429 gcc_assert (i != NR_C_MODES);
6431 for (c = pool->constants[i]; c != NULL; c = c->next)
6432 if (rtx_equal_p (val, c->value))
6433 break;
6435 gcc_assert (c);
6437 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6440 /* Check whether INSN is an execute. Return the label_ref to its
6441 execute target template if so, NULL_RTX otherwise. */
6443 static rtx
6444 s390_execute_label (rtx insn)
6446 if (NONJUMP_INSN_P (insn)
6447 && GET_CODE (PATTERN (insn)) == PARALLEL
6448 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6449 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6450 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6452 return NULL_RTX;
6455 /* Add execute target for INSN to the constant pool POOL. */
6457 static void
6458 s390_add_execute (struct constant_pool *pool, rtx insn)
6460 struct constant *c;
6462 for (c = pool->execute; c != NULL; c = c->next)
6463 if (INSN_UID (insn) == INSN_UID (c->value))
6464 break;
6466 if (c == NULL)
6468 c = (struct constant *) xmalloc (sizeof *c);
6469 c->value = insn;
6470 c->label = gen_label_rtx ();
6471 c->next = pool->execute;
6472 pool->execute = c;
6473 pool->size += 6;
6477 /* Find execute target for INSN in the constant pool POOL.
6478 Return an RTX describing the distance from the start of
6479 the pool to the location of the execute target. */
6481 static rtx
6482 s390_find_execute (struct constant_pool *pool, rtx insn)
6484 struct constant *c;
6486 for (c = pool->execute; c != NULL; c = c->next)
6487 if (INSN_UID (insn) == INSN_UID (c->value))
6488 break;
6490 gcc_assert (c);
6492 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6495 /* For an execute INSN, extract the execute target template. */
6497 static rtx
6498 s390_execute_target (rtx insn)
6500 rtx pattern = PATTERN (insn);
6501 gcc_assert (s390_execute_label (insn));
6503 if (XVECLEN (pattern, 0) == 2)
6505 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6507 else
6509 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6510 int i;
6512 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6513 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6515 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6518 return pattern;
6521 /* Indicate that INSN cannot be duplicated. This is the case for
6522 execute insns that carry a unique label. */
6524 static bool
6525 s390_cannot_copy_insn_p (rtx_insn *insn)
6527 rtx label = s390_execute_label (insn);
6528 return label && label != const0_rtx;
6531 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6532 do not emit the pool base label. */
6534 static void
6535 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6537 struct constant *c;
6538 rtx_insn *insn = pool->pool_insn;
6539 int i;
6541 /* Switch to rodata section. */
6542 if (TARGET_CPU_ZARCH)
6544 insn = emit_insn_after (gen_pool_section_start (), insn);
6545 INSN_ADDRESSES_NEW (insn, -1);
6548 /* Ensure minimum pool alignment. */
6549 if (TARGET_CPU_ZARCH)
6550 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6551 else
6552 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6553 INSN_ADDRESSES_NEW (insn, -1);
6555 /* Emit pool base label. */
6556 if (!remote_label)
6558 insn = emit_label_after (pool->label, insn);
6559 INSN_ADDRESSES_NEW (insn, -1);
6562 /* Dump constants in descending alignment requirement order,
6563 ensuring proper alignment for every constant. */
6564 for (i = 0; i < NR_C_MODES; i++)
6565 for (c = pool->constants[i]; c; c = c->next)
6567 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6568 rtx value = copy_rtx (c->value);
6569 if (GET_CODE (value) == CONST
6570 && GET_CODE (XEXP (value, 0)) == UNSPEC
6571 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6572 && XVECLEN (XEXP (value, 0), 0) == 1)
6573 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6575 insn = emit_label_after (c->label, insn);
6576 INSN_ADDRESSES_NEW (insn, -1);
6578 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6579 gen_rtvec (1, value),
6580 UNSPECV_POOL_ENTRY);
6581 insn = emit_insn_after (value, insn);
6582 INSN_ADDRESSES_NEW (insn, -1);
6585 /* Ensure minimum alignment for instructions. */
6586 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6587 INSN_ADDRESSES_NEW (insn, -1);
6589 /* Output in-pool execute template insns. */
6590 for (c = pool->execute; c; c = c->next)
6592 insn = emit_label_after (c->label, insn);
6593 INSN_ADDRESSES_NEW (insn, -1);
6595 insn = emit_insn_after (s390_execute_target (c->value), insn);
6596 INSN_ADDRESSES_NEW (insn, -1);
6599 /* Switch back to previous section. */
6600 if (TARGET_CPU_ZARCH)
6602 insn = emit_insn_after (gen_pool_section_end (), insn);
6603 INSN_ADDRESSES_NEW (insn, -1);
6606 insn = emit_barrier_after (insn);
6607 INSN_ADDRESSES_NEW (insn, -1);
6609 /* Remove placeholder insn. */
6610 remove_insn (pool->pool_insn);
6613 /* Free all memory used by POOL. */
6615 static void
6616 s390_free_pool (struct constant_pool *pool)
6618 struct constant *c, *next;
6619 int i;
6621 for (i = 0; i < NR_C_MODES; i++)
6622 for (c = pool->constants[i]; c; c = next)
6624 next = c->next;
6625 free (c);
6628 for (c = pool->execute; c; c = next)
6630 next = c->next;
6631 free (c);
6634 BITMAP_FREE (pool->insns);
6635 free (pool);
6639 /* Collect main literal pool. Return NULL on overflow. */
6641 static struct constant_pool *
6642 s390_mainpool_start (void)
6644 struct constant_pool *pool;
6645 rtx_insn *insn;
6647 pool = s390_alloc_pool ();
6649 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6651 if (NONJUMP_INSN_P (insn)
6652 && GET_CODE (PATTERN (insn)) == SET
6653 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6654 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6656 /* There might be two main_pool instructions if base_reg
6657 is call-clobbered; one for shrink-wrapped code and one
6658 for the rest. We want to keep the first. */
6659 if (pool->pool_insn)
6661 insn = PREV_INSN (insn);
6662 delete_insn (NEXT_INSN (insn));
6663 continue;
6665 pool->pool_insn = insn;
6668 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6670 s390_add_execute (pool, insn);
6672 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6674 rtx pool_ref = NULL_RTX;
6675 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6676 if (pool_ref)
6678 rtx constant = get_pool_constant (pool_ref);
6679 machine_mode mode = get_pool_mode (pool_ref);
6680 s390_add_constant (pool, constant, mode);
6684 /* If hot/cold partitioning is enabled we have to make sure that
6685 the literal pool is emitted in the same section where the
6686 initialization of the literal pool base pointer takes place.
6687 emit_pool_after is only used in the non-overflow case on non
6688 Z cpus where we can emit the literal pool at the end of the
6689 function body within the text section. */
6690 if (NOTE_P (insn)
6691 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6692 && !pool->emit_pool_after)
6693 pool->emit_pool_after = PREV_INSN (insn);
6696 gcc_assert (pool->pool_insn || pool->size == 0);
6698 if (pool->size >= 4096)
6700 /* We're going to chunkify the pool, so remove the main
6701 pool placeholder insn. */
6702 remove_insn (pool->pool_insn);
6704 s390_free_pool (pool);
6705 pool = NULL;
6708 /* If the functions ends with the section where the literal pool
6709 should be emitted set the marker to its end. */
6710 if (pool && !pool->emit_pool_after)
6711 pool->emit_pool_after = get_last_insn ();
6713 return pool;
6716 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6717 Modify the current function to output the pool constants as well as
6718 the pool register setup instruction. */
6720 static void
6721 s390_mainpool_finish (struct constant_pool *pool)
6723 rtx base_reg = cfun->machine->base_reg;
6725 /* If the pool is empty, we're done. */
6726 if (pool->size == 0)
6728 /* We don't actually need a base register after all. */
6729 cfun->machine->base_reg = NULL_RTX;
6731 if (pool->pool_insn)
6732 remove_insn (pool->pool_insn);
6733 s390_free_pool (pool);
6734 return;
6737 /* We need correct insn addresses. */
6738 shorten_branches (get_insns ());
6740 /* On zSeries, we use a LARL to load the pool register. The pool is
6741 located in the .rodata section, so we emit it after the function. */
6742 if (TARGET_CPU_ZARCH)
6744 rtx set = gen_main_base_64 (base_reg, pool->label);
6745 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
6746 INSN_ADDRESSES_NEW (insn, -1);
6747 remove_insn (pool->pool_insn);
6749 insn = get_last_insn ();
6750 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6751 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6753 s390_dump_pool (pool, 0);
6756 /* On S/390, if the total size of the function's code plus literal pool
6757 does not exceed 4096 bytes, we use BASR to set up a function base
6758 pointer, and emit the literal pool at the end of the function. */
6759 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6760 + pool->size + 8 /* alignment slop */ < 4096)
6762 rtx set = gen_main_base_31_small (base_reg, pool->label);
6763 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
6764 INSN_ADDRESSES_NEW (insn, -1);
6765 remove_insn (pool->pool_insn);
6767 insn = emit_label_after (pool->label, insn);
6768 INSN_ADDRESSES_NEW (insn, -1);
6770 /* emit_pool_after will be set by s390_mainpool_start to the
6771 last insn of the section where the literal pool should be
6772 emitted. */
6773 insn = pool->emit_pool_after;
6775 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6776 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6778 s390_dump_pool (pool, 1);
6781 /* Otherwise, we emit an inline literal pool and use BASR to branch
6782 over it, setting up the pool register at the same time. */
6783 else
6785 rtx_code_label *pool_end = gen_label_rtx ();
6787 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
6788 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
6789 JUMP_LABEL (insn) = pool_end;
6790 INSN_ADDRESSES_NEW (insn, -1);
6791 remove_insn (pool->pool_insn);
6793 insn = emit_label_after (pool->label, insn);
6794 INSN_ADDRESSES_NEW (insn, -1);
6796 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6797 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6799 insn = emit_label_after (pool_end, pool->pool_insn);
6800 INSN_ADDRESSES_NEW (insn, -1);
6802 s390_dump_pool (pool, 1);
6806 /* Replace all literal pool references. */
6808 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
6810 if (INSN_P (insn))
6811 replace_ltrel_base (&PATTERN (insn));
6813 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6815 rtx addr, pool_ref = NULL_RTX;
6816 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6817 if (pool_ref)
6819 if (s390_execute_label (insn))
6820 addr = s390_find_execute (pool, insn);
6821 else
6822 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6823 get_pool_mode (pool_ref));
6825 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6826 INSN_CODE (insn) = -1;
6832 /* Free the pool. */
6833 s390_free_pool (pool);
6836 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6837 We have decided we cannot use this pool, so revert all changes
6838 to the current function that were done by s390_mainpool_start. */
6839 static void
6840 s390_mainpool_cancel (struct constant_pool *pool)
6842 /* We didn't actually change the instruction stream, so simply
6843 free the pool memory. */
6844 s390_free_pool (pool);
6848 /* Chunkify the literal pool. */
6850 #define S390_POOL_CHUNK_MIN 0xc00
6851 #define S390_POOL_CHUNK_MAX 0xe00
6853 static struct constant_pool *
6854 s390_chunkify_start (void)
6856 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6857 int extra_size = 0;
6858 bitmap far_labels;
6859 rtx pending_ltrel = NULL_RTX;
6860 rtx_insn *insn;
6862 rtx (*gen_reload_base) (rtx, rtx) =
6863 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6866 /* We need correct insn addresses. */
6868 shorten_branches (get_insns ());
6870 /* Scan all insns and move literals to pool chunks. */
6872 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6874 bool section_switch_p = false;
6876 /* Check for pending LTREL_BASE. */
6877 if (INSN_P (insn))
6879 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6880 if (ltrel_base)
6882 gcc_assert (ltrel_base == pending_ltrel);
6883 pending_ltrel = NULL_RTX;
6887 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6889 if (!curr_pool)
6890 curr_pool = s390_start_pool (&pool_list, insn);
6892 s390_add_execute (curr_pool, insn);
6893 s390_add_pool_insn (curr_pool, insn);
6895 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6897 rtx pool_ref = NULL_RTX;
6898 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6899 if (pool_ref)
6901 rtx constant = get_pool_constant (pool_ref);
6902 machine_mode mode = get_pool_mode (pool_ref);
6904 if (!curr_pool)
6905 curr_pool = s390_start_pool (&pool_list, insn);
6907 s390_add_constant (curr_pool, constant, mode);
6908 s390_add_pool_insn (curr_pool, insn);
6910 /* Don't split the pool chunk between a LTREL_OFFSET load
6911 and the corresponding LTREL_BASE. */
6912 if (GET_CODE (constant) == CONST
6913 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6914 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6916 gcc_assert (!pending_ltrel);
6917 pending_ltrel = pool_ref;
6922 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
6924 if (curr_pool)
6925 s390_add_pool_insn (curr_pool, insn);
6926 /* An LTREL_BASE must follow within the same basic block. */
6927 gcc_assert (!pending_ltrel);
6930 if (NOTE_P (insn))
6931 switch (NOTE_KIND (insn))
6933 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6934 section_switch_p = true;
6935 break;
6936 case NOTE_INSN_VAR_LOCATION:
6937 case NOTE_INSN_CALL_ARG_LOCATION:
6938 continue;
6939 default:
6940 break;
6943 if (!curr_pool
6944 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6945 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6946 continue;
6948 if (TARGET_CPU_ZARCH)
6950 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6951 continue;
6953 s390_end_pool (curr_pool, NULL);
6954 curr_pool = NULL;
6956 else
6958 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6959 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6960 + extra_size;
6962 /* We will later have to insert base register reload insns.
6963 Those will have an effect on code size, which we need to
6964 consider here. This calculation makes rather pessimistic
6965 worst-case assumptions. */
6966 if (LABEL_P (insn))
6967 extra_size += 6;
6969 if (chunk_size < S390_POOL_CHUNK_MIN
6970 && curr_pool->size < S390_POOL_CHUNK_MIN
6971 && !section_switch_p)
6972 continue;
6974 /* Pool chunks can only be inserted after BARRIERs ... */
6975 if (BARRIER_P (insn))
6977 s390_end_pool (curr_pool, insn);
6978 curr_pool = NULL;
6979 extra_size = 0;
6982 /* ... so if we don't find one in time, create one. */
6983 else if (chunk_size > S390_POOL_CHUNK_MAX
6984 || curr_pool->size > S390_POOL_CHUNK_MAX
6985 || section_switch_p)
6987 rtx_insn *label, *jump, *barrier, *next, *prev;
6989 if (!section_switch_p)
6991 /* We can insert the barrier only after a 'real' insn. */
6992 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
6993 continue;
6994 if (get_attr_length (insn) == 0)
6995 continue;
6996 /* Don't separate LTREL_BASE from the corresponding
6997 LTREL_OFFSET load. */
6998 if (pending_ltrel)
6999 continue;
7000 next = insn;
7003 insn = next;
7004 next = NEXT_INSN (insn);
7006 while (next
7007 && NOTE_P (next)
7008 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
7009 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
7011 else
7013 gcc_assert (!pending_ltrel);
7015 /* The old pool has to end before the section switch
7016 note in order to make it part of the current
7017 section. */
7018 insn = PREV_INSN (insn);
7021 label = gen_label_rtx ();
7022 prev = insn;
7023 if (prev && NOTE_P (prev))
7024 prev = prev_nonnote_insn (prev);
7025 if (prev)
7026 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
7027 INSN_LOCATION (prev));
7028 else
7029 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
7030 barrier = emit_barrier_after (jump);
7031 insn = emit_label_after (label, barrier);
7032 JUMP_LABEL (jump) = label;
7033 LABEL_NUSES (label) = 1;
7035 INSN_ADDRESSES_NEW (jump, -1);
7036 INSN_ADDRESSES_NEW (barrier, -1);
7037 INSN_ADDRESSES_NEW (insn, -1);
7039 s390_end_pool (curr_pool, barrier);
7040 curr_pool = NULL;
7041 extra_size = 0;
7046 if (curr_pool)
7047 s390_end_pool (curr_pool, NULL);
7048 gcc_assert (!pending_ltrel);
7050 /* Find all labels that are branched into
7051 from an insn belonging to a different chunk. */
7053 far_labels = BITMAP_ALLOC (NULL);
7055 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7057 rtx_jump_table_data *table;
7059 /* Labels marked with LABEL_PRESERVE_P can be target
7060 of non-local jumps, so we have to mark them.
7061 The same holds for named labels.
7063 Don't do that, however, if it is the label before
7064 a jump table. */
7066 if (LABEL_P (insn)
7067 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
7069 rtx_insn *vec_insn = NEXT_INSN (insn);
7070 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
7071 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
7073 /* Check potential targets in a table jump (casesi_jump). */
7074 else if (tablejump_p (insn, NULL, &table))
7076 rtx vec_pat = PATTERN (table);
7077 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
7079 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
7081 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
7083 if (s390_find_pool (pool_list, label)
7084 != s390_find_pool (pool_list, insn))
7085 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7088 /* If we have a direct jump (conditional or unconditional),
7089 check all potential targets. */
7090 else if (JUMP_P (insn))
7092 rtx pat = PATTERN (insn);
7094 if (GET_CODE (pat) == PARALLEL)
7095 pat = XVECEXP (pat, 0, 0);
7097 if (GET_CODE (pat) == SET)
7099 rtx label = JUMP_LABEL (insn);
7100 if (label && !ANY_RETURN_P (label))
7102 if (s390_find_pool (pool_list, label)
7103 != s390_find_pool (pool_list, insn))
7104 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7110 /* Insert base register reload insns before every pool. */
7112 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7114 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7115 curr_pool->label);
7116 rtx_insn *insn = curr_pool->first_insn;
7117 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
7120 /* Insert base register reload insns at every far label. */
7122 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7123 if (LABEL_P (insn)
7124 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
7126 struct constant_pool *pool = s390_find_pool (pool_list, insn);
7127 if (pool)
7129 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7130 pool->label);
7131 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
7136 BITMAP_FREE (far_labels);
7139 /* Recompute insn addresses. */
7141 init_insn_lengths ();
7142 shorten_branches (get_insns ());
7144 return pool_list;
7147 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7148 After we have decided to use this list, finish implementing
7149 all changes to the current function as required. */
7151 static void
7152 s390_chunkify_finish (struct constant_pool *pool_list)
7154 struct constant_pool *curr_pool = NULL;
7155 rtx_insn *insn;
7158 /* Replace all literal pool references. */
7160 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7162 if (INSN_P (insn))
7163 replace_ltrel_base (&PATTERN (insn));
7165 curr_pool = s390_find_pool (pool_list, insn);
7166 if (!curr_pool)
7167 continue;
7169 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
7171 rtx addr, pool_ref = NULL_RTX;
7172 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7173 if (pool_ref)
7175 if (s390_execute_label (insn))
7176 addr = s390_find_execute (curr_pool, insn);
7177 else
7178 addr = s390_find_constant (curr_pool,
7179 get_pool_constant (pool_ref),
7180 get_pool_mode (pool_ref));
7182 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7183 INSN_CODE (insn) = -1;
7188 /* Dump out all literal pools. */
7190 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7191 s390_dump_pool (curr_pool, 0);
7193 /* Free pool list. */
7195 while (pool_list)
7197 struct constant_pool *next = pool_list->next;
7198 s390_free_pool (pool_list);
7199 pool_list = next;
7203 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7204 We have decided we cannot use this list, so revert all changes
7205 to the current function that were done by s390_chunkify_start. */
7207 static void
7208 s390_chunkify_cancel (struct constant_pool *pool_list)
7210 struct constant_pool *curr_pool = NULL;
7211 rtx_insn *insn;
7213 /* Remove all pool placeholder insns. */
7215 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7217 /* Did we insert an extra barrier? Remove it. */
7218 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
7219 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
7220 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
7222 if (jump && JUMP_P (jump)
7223 && barrier && BARRIER_P (barrier)
7224 && label && LABEL_P (label)
7225 && GET_CODE (PATTERN (jump)) == SET
7226 && SET_DEST (PATTERN (jump)) == pc_rtx
7227 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7228 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7230 remove_insn (jump);
7231 remove_insn (barrier);
7232 remove_insn (label);
7235 remove_insn (curr_pool->pool_insn);
7238 /* Remove all base register reload insns. */
7240 for (insn = get_insns (); insn; )
7242 rtx_insn *next_insn = NEXT_INSN (insn);
7244 if (NONJUMP_INSN_P (insn)
7245 && GET_CODE (PATTERN (insn)) == SET
7246 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7247 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7248 remove_insn (insn);
7250 insn = next_insn;
7253 /* Free pool list. */
7255 while (pool_list)
7257 struct constant_pool *next = pool_list->next;
7258 s390_free_pool (pool_list);
7259 pool_list = next;
7263 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7265 void
7266 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
7268 REAL_VALUE_TYPE r;
7270 switch (GET_MODE_CLASS (mode))
7272 case MODE_FLOAT:
7273 case MODE_DECIMAL_FLOAT:
7274 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7276 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7277 assemble_real (r, mode, align);
7278 break;
7280 case MODE_INT:
7281 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7282 mark_symbol_refs_as_used (exp);
7283 break;
7285 default:
7286 gcc_unreachable ();
7291 /* Return an RTL expression representing the value of the return address
7292 for the frame COUNT steps up from the current frame. FRAME is the
7293 frame pointer of that frame. */
7296 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7298 int offset;
7299 rtx addr;
7301 /* Without backchain, we fail for all but the current frame. */
7303 if (!TARGET_BACKCHAIN && count > 0)
7304 return NULL_RTX;
7306 /* For the current frame, we need to make sure the initial
7307 value of RETURN_REGNUM is actually saved. */
7309 if (count == 0)
7311 /* On non-z architectures branch splitting could overwrite r14. */
7312 if (TARGET_CPU_ZARCH)
7313 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7314 else
7316 cfun_frame_layout.save_return_addr_p = true;
7317 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7321 if (TARGET_PACKED_STACK)
7322 offset = -2 * UNITS_PER_LONG;
7323 else
7324 offset = RETURN_REGNUM * UNITS_PER_LONG;
7326 addr = plus_constant (Pmode, frame, offset);
7327 addr = memory_address (Pmode, addr);
7328 return gen_rtx_MEM (Pmode, addr);
7331 /* Return an RTL expression representing the back chain stored in
7332 the current stack frame. */
7335 s390_back_chain_rtx (void)
7337 rtx chain;
7339 gcc_assert (TARGET_BACKCHAIN);
7341 if (TARGET_PACKED_STACK)
7342 chain = plus_constant (Pmode, stack_pointer_rtx,
7343 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7344 else
7345 chain = stack_pointer_rtx;
7347 chain = gen_rtx_MEM (Pmode, chain);
7348 return chain;
7351 /* Find first call clobbered register unused in a function.
7352 This could be used as base register in a leaf function
7353 or for holding the return address before epilogue. */
7355 static int
7356 find_unused_clobbered_reg (void)
7358 int i;
7359 for (i = 0; i < 6; i++)
7360 if (!df_regs_ever_live_p (i))
7361 return i;
7362 return 0;
7366 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7367 clobbered hard regs in SETREG. */
7369 static void
7370 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7372 char *regs_ever_clobbered = (char *)data;
7373 unsigned int i, regno;
7374 machine_mode mode = GET_MODE (setreg);
7376 if (GET_CODE (setreg) == SUBREG)
7378 rtx inner = SUBREG_REG (setreg);
7379 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
7380 return;
7381 regno = subreg_regno (setreg);
7383 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
7384 regno = REGNO (setreg);
7385 else
7386 return;
7388 for (i = regno;
7389 i < regno + HARD_REGNO_NREGS (regno, mode);
7390 i++)
7391 regs_ever_clobbered[i] = 1;
7394 /* Walks through all basic blocks of the current function looking
7395 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7396 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7397 each of those regs. */
7399 static void
7400 s390_regs_ever_clobbered (char regs_ever_clobbered[])
7402 basic_block cur_bb;
7403 rtx_insn *cur_insn;
7404 unsigned int i;
7406 memset (regs_ever_clobbered, 0, 32);
7408 /* For non-leaf functions we have to consider all call clobbered regs to be
7409 clobbered. */
7410 if (!crtl->is_leaf)
7412 for (i = 0; i < 32; i++)
7413 regs_ever_clobbered[i] = call_really_used_regs[i];
7416 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7417 this work is done by liveness analysis (mark_regs_live_at_end).
7418 Special care is needed for functions containing landing pads. Landing pads
7419 may use the eh registers, but the code which sets these registers is not
7420 contained in that function. Hence s390_regs_ever_clobbered is not able to
7421 deal with this automatically. */
7422 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7423 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7424 if (crtl->calls_eh_return
7425 || (cfun->machine->has_landing_pad_p
7426 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7427 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7429 /* For nonlocal gotos all call-saved registers have to be saved.
7430 This flag is also set for the unwinding code in libgcc.
7431 See expand_builtin_unwind_init. For regs_ever_live this is done by
7432 reload. */
7433 if (crtl->saves_all_registers)
7434 for (i = 0; i < 32; i++)
7435 if (!call_really_used_regs[i])
7436 regs_ever_clobbered[i] = 1;
7438 FOR_EACH_BB_FN (cur_bb, cfun)
7440 FOR_BB_INSNS (cur_bb, cur_insn)
7442 rtx pat;
7444 if (!INSN_P (cur_insn))
7445 continue;
7447 pat = PATTERN (cur_insn);
7449 /* Ignore GPR restore insns. */
7450 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
7452 if (GET_CODE (pat) == SET
7453 && GENERAL_REG_P (SET_DEST (pat)))
7455 /* lgdr */
7456 if (GET_MODE (SET_SRC (pat)) == DImode
7457 && FP_REG_P (SET_SRC (pat)))
7458 continue;
7460 /* l / lg */
7461 if (GET_CODE (SET_SRC (pat)) == MEM)
7462 continue;
7465 /* lm / lmg */
7466 if (GET_CODE (pat) == PARALLEL
7467 && load_multiple_operation (pat, VOIDmode))
7468 continue;
7471 note_stores (pat,
7472 s390_reg_clobbered_rtx,
7473 regs_ever_clobbered);
7478 /* Determine the frame area which actually has to be accessed
7479 in the function epilogue. The values are stored at the
7480 given pointers AREA_BOTTOM (address of the lowest used stack
7481 address) and AREA_TOP (address of the first item which does
7482 not belong to the stack frame). */
7484 static void
7485 s390_frame_area (int *area_bottom, int *area_top)
7487 int b, t;
7489 b = INT_MAX;
7490 t = INT_MIN;
7492 if (cfun_frame_layout.first_restore_gpr != -1)
7494 b = (cfun_frame_layout.gprs_offset
7495 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7496 t = b + (cfun_frame_layout.last_restore_gpr
7497 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7500 if (TARGET_64BIT && cfun_save_high_fprs_p)
7502 b = MIN (b, cfun_frame_layout.f8_offset);
7503 t = MAX (t, (cfun_frame_layout.f8_offset
7504 + cfun_frame_layout.high_fprs * 8));
7507 if (!TARGET_64BIT)
7509 if (cfun_fpr_save_p (FPR4_REGNUM))
7511 b = MIN (b, cfun_frame_layout.f4_offset);
7512 t = MAX (t, cfun_frame_layout.f4_offset + 8);
7514 if (cfun_fpr_save_p (FPR6_REGNUM))
7516 b = MIN (b, cfun_frame_layout.f4_offset + 8);
7517 t = MAX (t, cfun_frame_layout.f4_offset + 16);
7520 *area_bottom = b;
7521 *area_top = t;
7523 /* Update gpr_save_slots in the frame layout trying to make use of
7524 FPRs as GPR save slots.
7525 This is a helper routine of s390_register_info. */
7527 static void
7528 s390_register_info_gprtofpr ()
7530 int save_reg_slot = FPR0_REGNUM;
7531 int i, j;
7533 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
7534 return;
7536 for (i = 15; i >= 6; i--)
7538 if (cfun_gpr_save_slot (i) == 0)
7539 continue;
7541 /* Advance to the next FP register which can be used as a
7542 GPR save slot. */
7543 while ((!call_really_used_regs[save_reg_slot]
7544 || df_regs_ever_live_p (save_reg_slot)
7545 || cfun_fpr_save_p (save_reg_slot))
7546 && FP_REGNO_P (save_reg_slot))
7547 save_reg_slot++;
7548 if (!FP_REGNO_P (save_reg_slot))
7550 /* We only want to use ldgr/lgdr if we can get rid of
7551 stm/lm entirely. So undo the gpr slot allocation in
7552 case we ran out of FPR save slots. */
7553 for (j = 6; j <= 15; j++)
7554 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
7555 cfun_gpr_save_slot (j) = -1;
7556 break;
7558 cfun_gpr_save_slot (i) = save_reg_slot++;
7562 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
7563 stdarg.
7564 This is a helper routine for s390_register_info. */
7566 static void
7567 s390_register_info_stdarg_fpr ()
7569 int i;
7570 int min_fpr;
7571 int max_fpr;
7573 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
7574 f0-f4 for 64 bit. */
7575 if (!cfun->stdarg
7576 || !TARGET_HARD_FLOAT
7577 || !cfun->va_list_fpr_size
7578 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
7579 return;
7581 min_fpr = crtl->args.info.fprs;
7582 max_fpr = min_fpr + cfun->va_list_fpr_size;
7583 if (max_fpr > FP_ARG_NUM_REG)
7584 max_fpr = FP_ARG_NUM_REG;
7586 for (i = min_fpr; i < max_fpr; i++)
7587 cfun_set_fpr_save (i + FPR0_REGNUM);
7590 /* Reserve the GPR save slots for GPRs which need to be saved due to
7591 stdarg.
7592 This is a helper routine for s390_register_info. */
7594 static void
7595 s390_register_info_stdarg_gpr ()
7597 int i;
7598 int min_gpr;
7599 int max_gpr;
7601 if (!cfun->stdarg
7602 || !cfun->va_list_gpr_size
7603 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
7604 return;
7606 min_gpr = crtl->args.info.gprs;
7607 max_gpr = min_gpr + cfun->va_list_gpr_size;
7608 if (max_gpr > GP_ARG_NUM_REG)
7609 max_gpr = GP_ARG_NUM_REG;
7611 for (i = min_gpr; i < max_gpr; i++)
7612 cfun_gpr_save_slot (2 + i) = -1;
7615 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
7616 for registers which need to be saved in function prologue.
7617 This function can be used until the insns emitted for save/restore
7618 of the regs are visible in the RTL stream. */
7620 static void
7621 s390_register_info ()
7623 int i, j;
7624 char clobbered_regs[32];
7626 gcc_assert (!epilogue_completed);
7628 if (reload_completed)
7629 /* After reload we rely on our own routine to determine which
7630 registers need saving. */
7631 s390_regs_ever_clobbered (clobbered_regs);
7632 else
7633 /* During reload we use regs_ever_live as a base since reload
7634 does changes in there which we otherwise would not be aware
7635 of. */
7636 for (i = 0; i < 32; i++)
7637 clobbered_regs[i] = df_regs_ever_live_p (i);
7639 for (i = 0; i < 32; i++)
7640 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
7642 /* Mark the call-saved FPRs which need to be saved.
7643 This needs to be done before checking the special GPRs since the
7644 stack pointer usage depends on whether high FPRs have to be saved
7645 or not. */
7646 cfun_frame_layout.fpr_bitmap = 0;
7647 cfun_frame_layout.high_fprs = 0;
7648 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
7649 if (clobbered_regs[i] && !call_really_used_regs[i])
7651 cfun_set_fpr_save (i);
7652 if (i >= FPR8_REGNUM)
7653 cfun_frame_layout.high_fprs++;
7656 if (flag_pic)
7657 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7658 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7660 clobbered_regs[BASE_REGNUM]
7661 |= (cfun->machine->base_reg
7662 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7664 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
7665 |= !!frame_pointer_needed;
7667 /* On pre z900 machines this might take until machine dependent
7668 reorg to decide.
7669 save_return_addr_p will only be set on non-zarch machines so
7670 there is no risk that r14 goes into an FPR instead of a stack
7671 slot. */
7672 clobbered_regs[RETURN_REGNUM]
7673 |= (!crtl->is_leaf
7674 || TARGET_TPF_PROFILING
7675 || cfun->machine->split_branches_pending_p
7676 || cfun_frame_layout.save_return_addr_p
7677 || crtl->calls_eh_return);
7679 clobbered_regs[STACK_POINTER_REGNUM]
7680 |= (!crtl->is_leaf
7681 || TARGET_TPF_PROFILING
7682 || cfun_save_high_fprs_p
7683 || get_frame_size () > 0
7684 || (reload_completed && cfun_frame_layout.frame_size > 0)
7685 || cfun->calls_alloca);
7687 memset (cfun_frame_layout.gpr_save_slots, 0, 16);
7689 for (i = 6; i < 16; i++)
7690 if (clobbered_regs[i])
7691 cfun_gpr_save_slot (i) = -1;
7693 s390_register_info_stdarg_fpr ();
7694 s390_register_info_gprtofpr ();
7696 /* First find the range of GPRs to be restored. Vararg regs don't
7697 need to be restored so we do it before assigning slots to the
7698 vararg GPRs. */
7699 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7700 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7701 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
7702 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
7704 /* stdarg functions might need to save GPRs 2 to 6. This might
7705 override the GPR->FPR save decision made above for r6 since
7706 vararg regs must go to the stack. */
7707 s390_register_info_stdarg_gpr ();
7709 /* Now the range of GPRs which need saving. */
7710 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7711 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7712 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
7713 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
7716 /* This function is called by s390_optimize_prologue in order to get
7717 rid of unnecessary GPR save/restore instructions. The register info
7718 for the GPRs is re-computed and the ranges are re-calculated. */
7720 static void
7721 s390_optimize_register_info ()
7723 char clobbered_regs[32];
7724 int i, j;
7726 gcc_assert (epilogue_completed);
7727 gcc_assert (!cfun->machine->split_branches_pending_p);
7729 s390_regs_ever_clobbered (clobbered_regs);
7731 for (i = 0; i < 32; i++)
7732 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
7734 /* There is still special treatment needed for cases invisible to
7735 s390_regs_ever_clobbered. */
7736 clobbered_regs[RETURN_REGNUM]
7737 |= (TARGET_TPF_PROFILING
7738 /* When expanding builtin_return_addr in ESA mode we do not
7739 know whether r14 will later be needed as scratch reg when
7740 doing branch splitting. So the builtin always accesses the
7741 r14 save slot and we need to stick to the save/restore
7742 decision for r14 even if it turns out that it didn't get
7743 clobbered. */
7744 || cfun_frame_layout.save_return_addr_p
7745 || crtl->calls_eh_return);
7747 memset (cfun_frame_layout.gpr_save_slots, 0, 6);
7749 for (i = 6; i < 16; i++)
7750 if (!clobbered_regs[i])
7751 cfun_gpr_save_slot (i) = 0;
7753 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7754 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7755 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
7756 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
7758 s390_register_info_stdarg_gpr ();
7760 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7761 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7762 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
7763 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
7766 /* Fill cfun->machine with info about frame of current function. */
7768 static void
7769 s390_frame_info (void)
7771 HOST_WIDE_INT lowest_offset;
7773 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
7774 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
7776 /* The va_arg builtin uses a constant distance of 16 *
7777 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
7778 pointer. So even if we are going to save the stack pointer in an
7779 FPR we need the stack space in order to keep the offsets
7780 correct. */
7781 if (cfun->stdarg && cfun_save_arg_fprs_p)
7783 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
7785 if (cfun_frame_layout.first_save_gpr_slot == -1)
7786 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
7789 cfun_frame_layout.frame_size = get_frame_size ();
7790 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7791 fatal_error (input_location,
7792 "total size of local variables exceeds architecture limit");
7794 if (!TARGET_PACKED_STACK)
7796 /* Fixed stack layout. */
7797 cfun_frame_layout.backchain_offset = 0;
7798 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7799 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7800 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7801 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7802 * UNITS_PER_LONG);
7804 else if (TARGET_BACKCHAIN)
7806 /* Kernel stack layout - packed stack, backchain, no float */
7807 gcc_assert (TARGET_SOFT_FLOAT);
7808 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7809 - UNITS_PER_LONG);
7811 /* The distance between the backchain and the return address
7812 save slot must not change. So we always need a slot for the
7813 stack pointer which resides in between. */
7814 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
7816 cfun_frame_layout.gprs_offset
7817 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
7819 /* FPRs will not be saved. Nevertheless pick sane values to
7820 keep area calculations valid. */
7821 cfun_frame_layout.f0_offset =
7822 cfun_frame_layout.f4_offset =
7823 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
7825 else
7827 int num_fprs;
7829 /* Packed stack layout without backchain. */
7831 /* With stdarg FPRs need their dedicated slots. */
7832 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
7833 : (cfun_fpr_save_p (FPR4_REGNUM) +
7834 cfun_fpr_save_p (FPR6_REGNUM)));
7835 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
7837 num_fprs = (cfun->stdarg ? 2
7838 : (cfun_fpr_save_p (FPR0_REGNUM)
7839 + cfun_fpr_save_p (FPR2_REGNUM)));
7840 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
7842 cfun_frame_layout.gprs_offset
7843 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7845 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
7846 - cfun_frame_layout.high_fprs * 8);
7849 if (cfun_save_high_fprs_p)
7850 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7852 if (!crtl->is_leaf)
7853 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7855 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
7856 sized area at the bottom of the stack. This is required also for
7857 leaf functions. When GCC generates a local stack reference it
7858 will always add STACK_POINTER_OFFSET to all these references. */
7859 if (crtl->is_leaf
7860 && !TARGET_TPF_PROFILING
7861 && cfun_frame_layout.frame_size == 0
7862 && !cfun->calls_alloca)
7863 return;
7865 /* Calculate the number of bytes we have used in our own register
7866 save area. With the packed stack layout we can re-use the
7867 remaining bytes for normal stack elements. */
7869 if (TARGET_PACKED_STACK)
7870 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
7871 cfun_frame_layout.f4_offset),
7872 cfun_frame_layout.gprs_offset);
7873 else
7874 lowest_offset = 0;
7876 if (TARGET_BACKCHAIN)
7877 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
7879 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
7881 /* If under 31 bit an odd number of gprs has to be saved we have to
7882 adjust the frame size to sustain 8 byte alignment of stack
7883 frames. */
7884 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7885 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7886 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7889 /* Generate frame layout. Fills in register and frame data for the current
7890 function in cfun->machine. This routine can be called multiple times;
7891 it will re-do the complete frame layout every time. */
7893 static void
7894 s390_init_frame_layout (void)
7896 HOST_WIDE_INT frame_size;
7897 int base_used;
7899 gcc_assert (!reload_completed);
7901 /* On S/390 machines, we may need to perform branch splitting, which
7902 will require both base and return address register. We have no
7903 choice but to assume we're going to need them until right at the
7904 end of the machine dependent reorg phase. */
7905 if (!TARGET_CPU_ZARCH)
7906 cfun->machine->split_branches_pending_p = true;
7910 frame_size = cfun_frame_layout.frame_size;
7912 /* Try to predict whether we'll need the base register. */
7913 base_used = cfun->machine->split_branches_pending_p
7914 || crtl->uses_const_pool
7915 || (!DISP_IN_RANGE (frame_size)
7916 && !CONST_OK_FOR_K (frame_size));
7918 /* Decide which register to use as literal pool base. In small
7919 leaf functions, try to use an unused call-clobbered register
7920 as base register to avoid save/restore overhead. */
7921 if (!base_used)
7922 cfun->machine->base_reg = NULL_RTX;
7923 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7924 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7925 else
7926 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7928 s390_register_info ();
7929 s390_frame_info ();
7931 while (frame_size != cfun_frame_layout.frame_size);
7934 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
7935 the TX is nonescaping. A transaction is considered escaping if
7936 there is at least one path from tbegin returning CC0 to the
7937 function exit block without an tend.
7939 The check so far has some limitations:
7940 - only single tbegin/tend BBs are supported
7941 - the first cond jump after tbegin must separate the CC0 path from ~CC0
7942 - when CC is copied to a GPR and the CC0 check is done with the GPR
7943 this is not supported
7946 static void
7947 s390_optimize_nonescaping_tx (void)
7949 const unsigned int CC0 = 1 << 3;
7950 basic_block tbegin_bb = NULL;
7951 basic_block tend_bb = NULL;
7952 basic_block bb;
7953 rtx_insn *insn;
7954 bool result = true;
7955 int bb_index;
7956 rtx_insn *tbegin_insn = NULL;
7958 if (!cfun->machine->tbegin_p)
7959 return;
7961 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
7963 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
7965 if (!bb)
7966 continue;
7968 FOR_BB_INSNS (bb, insn)
7970 rtx ite, cc, pat, target;
7971 unsigned HOST_WIDE_INT mask;
7973 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
7974 continue;
7976 pat = PATTERN (insn);
7978 if (GET_CODE (pat) == PARALLEL)
7979 pat = XVECEXP (pat, 0, 0);
7981 if (GET_CODE (pat) != SET
7982 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
7983 continue;
7985 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
7987 rtx_insn *tmp;
7989 tbegin_insn = insn;
7991 /* Just return if the tbegin doesn't have clobbers. */
7992 if (GET_CODE (PATTERN (insn)) != PARALLEL)
7993 return;
7995 if (tbegin_bb != NULL)
7996 return;
7998 /* Find the next conditional jump. */
7999 for (tmp = NEXT_INSN (insn);
8000 tmp != NULL_RTX;
8001 tmp = NEXT_INSN (tmp))
8003 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
8004 return;
8005 if (!JUMP_P (tmp))
8006 continue;
8008 ite = SET_SRC (PATTERN (tmp));
8009 if (GET_CODE (ite) != IF_THEN_ELSE)
8010 continue;
8012 cc = XEXP (XEXP (ite, 0), 0);
8013 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
8014 || GET_MODE (cc) != CCRAWmode
8015 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
8016 return;
8018 if (bb->succs->length () != 2)
8019 return;
8021 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
8022 if (GET_CODE (XEXP (ite, 0)) == NE)
8023 mask ^= 0xf;
8025 if (mask == CC0)
8026 target = XEXP (ite, 1);
8027 else if (mask == (CC0 ^ 0xf))
8028 target = XEXP (ite, 2);
8029 else
8030 return;
8033 edge_iterator ei;
8034 edge e1, e2;
8036 ei = ei_start (bb->succs);
8037 e1 = ei_safe_edge (ei);
8038 ei_next (&ei);
8039 e2 = ei_safe_edge (ei);
8041 if (e2->flags & EDGE_FALLTHRU)
8043 e2 = e1;
8044 e1 = ei_safe_edge (ei);
8047 if (!(e1->flags & EDGE_FALLTHRU))
8048 return;
8050 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
8052 if (tmp == BB_END (bb))
8053 break;
8057 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
8059 if (tend_bb != NULL)
8060 return;
8061 tend_bb = bb;
8066 /* Either we successfully remove the FPR clobbers here or we are not
8067 able to do anything for this TX. Both cases don't qualify for
8068 another look. */
8069 cfun->machine->tbegin_p = false;
8071 if (tbegin_bb == NULL || tend_bb == NULL)
8072 return;
8074 calculate_dominance_info (CDI_POST_DOMINATORS);
8075 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
8076 free_dominance_info (CDI_POST_DOMINATORS);
8078 if (!result)
8079 return;
8081 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
8082 gen_rtvec (2,
8083 XVECEXP (PATTERN (tbegin_insn), 0, 0),
8084 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
8085 INSN_CODE (tbegin_insn) = -1;
8086 df_insn_rescan (tbegin_insn);
8088 return;
8091 /* Return true if it is legal to put a value with MODE into REGNO. */
8093 bool
8094 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
8096 switch (REGNO_REG_CLASS (regno))
8098 case FP_REGS:
8099 if (REGNO_PAIR_OK (regno, mode))
8101 if (mode == SImode || mode == DImode)
8102 return true;
8104 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
8105 return true;
8107 break;
8108 case ADDR_REGS:
8109 if (FRAME_REGNO_P (regno) && mode == Pmode)
8110 return true;
8112 /* fallthrough */
8113 case GENERAL_REGS:
8114 if (REGNO_PAIR_OK (regno, mode))
8116 if (TARGET_ZARCH
8117 || (mode != TFmode && mode != TCmode && mode != TDmode))
8118 return true;
8120 break;
8121 case CC_REGS:
8122 if (GET_MODE_CLASS (mode) == MODE_CC)
8123 return true;
8124 break;
8125 case ACCESS_REGS:
8126 if (REGNO_PAIR_OK (regno, mode))
8128 if (mode == SImode || mode == Pmode)
8129 return true;
8131 break;
8132 default:
8133 return false;
8136 return false;
8139 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
8141 bool
8142 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
8144 /* Once we've decided upon a register to use as base register, it must
8145 no longer be used for any other purpose. */
8146 if (cfun->machine->base_reg)
8147 if (REGNO (cfun->machine->base_reg) == old_reg
8148 || REGNO (cfun->machine->base_reg) == new_reg)
8149 return false;
8151 /* Prevent regrename from using call-saved regs which haven't
8152 actually been saved. This is necessary since regrename assumes
8153 the backend save/restore decisions are based on
8154 df_regs_ever_live. Since we have our own routine we have to tell
8155 regrename manually about it. */
8156 if (GENERAL_REGNO_P (new_reg)
8157 && !call_really_used_regs[new_reg]
8158 && cfun_gpr_save_slot (new_reg) == 0)
8159 return false;
8161 return true;
8164 /* Return nonzero if register REGNO can be used as a scratch register
8165 in peephole2. */
8167 static bool
8168 s390_hard_regno_scratch_ok (unsigned int regno)
8170 /* See s390_hard_regno_rename_ok. */
8171 if (GENERAL_REGNO_P (regno)
8172 && !call_really_used_regs[regno]
8173 && cfun_gpr_save_slot (regno) == 0)
8174 return false;
8176 return true;
8179 /* Maximum number of registers to represent a value of mode MODE
8180 in a register of class RCLASS. */
8183 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
8185 switch (rclass)
8187 case FP_REGS:
8188 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8189 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
8190 else
8191 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
8192 case ACCESS_REGS:
8193 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
8194 default:
8195 break;
8197 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8200 /* Return true if we use LRA instead of reload pass. */
8201 static bool
8202 s390_lra_p (void)
8204 return s390_lra_flag;
8207 /* Return true if register FROM can be eliminated via register TO. */
8209 static bool
8210 s390_can_eliminate (const int from, const int to)
8212 /* On zSeries machines, we have not marked the base register as fixed.
8213 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
8214 If a function requires the base register, we say here that this
8215 elimination cannot be performed. This will cause reload to free
8216 up the base register (as if it were fixed). On the other hand,
8217 if the current function does *not* require the base register, we
8218 say here the elimination succeeds, which in turn allows reload
8219 to allocate the base register for any other purpose. */
8220 if (from == BASE_REGNUM && to == BASE_REGNUM)
8222 if (TARGET_CPU_ZARCH)
8224 s390_init_frame_layout ();
8225 return cfun->machine->base_reg == NULL_RTX;
8228 return false;
8231 /* Everything else must point into the stack frame. */
8232 gcc_assert (to == STACK_POINTER_REGNUM
8233 || to == HARD_FRAME_POINTER_REGNUM);
8235 gcc_assert (from == FRAME_POINTER_REGNUM
8236 || from == ARG_POINTER_REGNUM
8237 || from == RETURN_ADDRESS_POINTER_REGNUM);
8239 /* Make sure we actually saved the return address. */
8240 if (from == RETURN_ADDRESS_POINTER_REGNUM)
8241 if (!crtl->calls_eh_return
8242 && !cfun->stdarg
8243 && !cfun_frame_layout.save_return_addr_p)
8244 return false;
8246 return true;
8249 /* Return offset between register FROM and TO initially after prolog. */
8251 HOST_WIDE_INT
8252 s390_initial_elimination_offset (int from, int to)
8254 HOST_WIDE_INT offset;
8256 /* ??? Why are we called for non-eliminable pairs? */
8257 if (!s390_can_eliminate (from, to))
8258 return 0;
8260 switch (from)
8262 case FRAME_POINTER_REGNUM:
8263 offset = (get_frame_size()
8264 + STACK_POINTER_OFFSET
8265 + crtl->outgoing_args_size);
8266 break;
8268 case ARG_POINTER_REGNUM:
8269 s390_init_frame_layout ();
8270 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
8271 break;
8273 case RETURN_ADDRESS_POINTER_REGNUM:
8274 s390_init_frame_layout ();
8276 if (cfun_frame_layout.first_save_gpr_slot == -1)
8278 /* If it turns out that for stdarg nothing went into the reg
8279 save area we also do not need the return address
8280 pointer. */
8281 if (cfun->stdarg && !cfun_save_arg_fprs_p)
8282 return 0;
8284 gcc_unreachable ();
8287 /* In order to make the following work it is not necessary for
8288 r14 to have a save slot. It is sufficient if one other GPR
8289 got one. Since the GPRs are always stored without gaps we
8290 are able to calculate where the r14 save slot would
8291 reside. */
8292 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
8293 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
8294 UNITS_PER_LONG);
8295 break;
8297 case BASE_REGNUM:
8298 offset = 0;
8299 break;
8301 default:
8302 gcc_unreachable ();
8305 return offset;
8308 /* Emit insn to save fpr REGNUM at offset OFFSET relative
8309 to register BASE. Return generated insn. */
8311 static rtx
8312 save_fpr (rtx base, int offset, int regnum)
8314 rtx addr;
8315 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
8317 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
8318 set_mem_alias_set (addr, get_varargs_alias_set ());
8319 else
8320 set_mem_alias_set (addr, get_frame_alias_set ());
8322 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
8325 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
8326 to register BASE. Return generated insn. */
8328 static rtx
8329 restore_fpr (rtx base, int offset, int regnum)
8331 rtx addr;
8332 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
8333 set_mem_alias_set (addr, get_frame_alias_set ());
8335 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
8338 /* Return true if REGNO is a global register, but not one
8339 of the special ones that need to be saved/restored in anyway. */
8341 static inline bool
8342 global_not_special_regno_p (int regno)
8344 return (global_regs[regno]
8345 /* These registers are special and need to be
8346 restored in any case. */
8347 && !(regno == STACK_POINTER_REGNUM
8348 || regno == RETURN_REGNUM
8349 || regno == BASE_REGNUM
8350 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
8353 /* Generate insn to save registers FIRST to LAST into
8354 the register save area located at offset OFFSET
8355 relative to register BASE. */
8357 static rtx
8358 save_gprs (rtx base, int offset, int first, int last)
8360 rtx addr, insn, note;
8361 int i;
8363 addr = plus_constant (Pmode, base, offset);
8364 addr = gen_rtx_MEM (Pmode, addr);
8366 set_mem_alias_set (addr, get_frame_alias_set ());
8368 /* Special-case single register. */
8369 if (first == last)
8371 if (TARGET_64BIT)
8372 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
8373 else
8374 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
8376 if (!global_not_special_regno_p (first))
8377 RTX_FRAME_RELATED_P (insn) = 1;
8378 return insn;
8382 insn = gen_store_multiple (addr,
8383 gen_rtx_REG (Pmode, first),
8384 GEN_INT (last - first + 1));
8386 if (first <= 6 && cfun->stdarg)
8387 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8389 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
8391 if (first + i <= 6)
8392 set_mem_alias_set (mem, get_varargs_alias_set ());
8395 /* We need to set the FRAME_RELATED flag on all SETs
8396 inside the store-multiple pattern.
8398 However, we must not emit DWARF records for registers 2..5
8399 if they are stored for use by variable arguments ...
8401 ??? Unfortunately, it is not enough to simply not the
8402 FRAME_RELATED flags for those SETs, because the first SET
8403 of the PARALLEL is always treated as if it had the flag
8404 set, even if it does not. Therefore we emit a new pattern
8405 without those registers as REG_FRAME_RELATED_EXPR note. */
8407 if (first >= 6 && !global_not_special_regno_p (first))
8409 rtx pat = PATTERN (insn);
8411 for (i = 0; i < XVECLEN (pat, 0); i++)
8412 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8413 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
8414 0, i)))))
8415 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
8417 RTX_FRAME_RELATED_P (insn) = 1;
8419 else if (last >= 6)
8421 int start;
8423 for (start = first >= 6 ? first : 6; start <= last; start++)
8424 if (!global_not_special_regno_p (start))
8425 break;
8427 if (start > last)
8428 return insn;
8430 addr = plus_constant (Pmode, base,
8431 offset + (start - first) * UNITS_PER_LONG);
8433 if (start == last)
8435 if (TARGET_64BIT)
8436 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
8437 gen_rtx_REG (Pmode, start));
8438 else
8439 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
8440 gen_rtx_REG (Pmode, start));
8441 note = PATTERN (note);
8443 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8444 RTX_FRAME_RELATED_P (insn) = 1;
8446 return insn;
8449 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
8450 gen_rtx_REG (Pmode, start),
8451 GEN_INT (last - start + 1));
8452 note = PATTERN (note);
8454 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8456 for (i = 0; i < XVECLEN (note, 0); i++)
8457 if (GET_CODE (XVECEXP (note, 0, i)) == SET
8458 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
8459 0, i)))))
8460 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
8462 RTX_FRAME_RELATED_P (insn) = 1;
8465 return insn;
8468 /* Generate insn to restore registers FIRST to LAST from
8469 the register save area located at offset OFFSET
8470 relative to register BASE. */
8472 static rtx
8473 restore_gprs (rtx base, int offset, int first, int last)
8475 rtx addr, insn;
8477 addr = plus_constant (Pmode, base, offset);
8478 addr = gen_rtx_MEM (Pmode, addr);
8479 set_mem_alias_set (addr, get_frame_alias_set ());
8481 /* Special-case single register. */
8482 if (first == last)
8484 if (TARGET_64BIT)
8485 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8486 else
8487 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8489 RTX_FRAME_RELATED_P (insn) = 1;
8490 return insn;
8493 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8494 addr,
8495 GEN_INT (last - first + 1));
8496 RTX_FRAME_RELATED_P (insn) = 1;
8497 return insn;
8500 /* Return insn sequence to load the GOT register. */
8502 static GTY(()) rtx got_symbol;
8503 rtx_insn *
8504 s390_load_got (void)
8506 rtx_insn *insns;
8508 /* We cannot use pic_offset_table_rtx here since we use this
8509 function also for non-pic if __tls_get_offset is called and in
8510 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8511 aren't usable. */
8512 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8514 if (!got_symbol)
8516 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8517 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8520 start_sequence ();
8522 if (TARGET_CPU_ZARCH)
8524 emit_move_insn (got_rtx, got_symbol);
8526 else
8528 rtx offset;
8530 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8531 UNSPEC_LTREL_OFFSET);
8532 offset = gen_rtx_CONST (Pmode, offset);
8533 offset = force_const_mem (Pmode, offset);
8535 emit_move_insn (got_rtx, offset);
8537 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8538 UNSPEC_LTREL_BASE);
8539 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8541 emit_move_insn (got_rtx, offset);
8544 insns = get_insns ();
8545 end_sequence ();
8546 return insns;
8549 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8550 and the change to the stack pointer. */
8552 static void
8553 s390_emit_stack_tie (void)
8555 rtx mem = gen_frame_mem (BLKmode,
8556 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8558 emit_insn (gen_stack_tie (mem));
8561 /* Copy GPRS into FPR save slots. */
8563 static void
8564 s390_save_gprs_to_fprs (void)
8566 int i;
8568 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
8569 return;
8571 for (i = 6; i < 16; i++)
8573 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
8575 rtx_insn *insn =
8576 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
8577 gen_rtx_REG (DImode, i));
8578 RTX_FRAME_RELATED_P (insn) = 1;
8583 /* Restore GPRs from FPR save slots. */
8585 static void
8586 s390_restore_gprs_from_fprs (void)
8588 int i;
8590 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
8591 return;
8593 for (i = 6; i < 16; i++)
8595 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
8597 rtx_insn *insn =
8598 emit_move_insn (gen_rtx_REG (DImode, i),
8599 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
8600 df_set_regs_ever_live (i, true);
8601 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
8602 if (i == STACK_POINTER_REGNUM)
8603 add_reg_note (insn, REG_CFA_DEF_CFA,
8604 plus_constant (Pmode, stack_pointer_rtx,
8605 STACK_POINTER_OFFSET));
8606 RTX_FRAME_RELATED_P (insn) = 1;
8612 /* A pass run immediately before shrink-wrapping and prologue and epilogue
8613 generation. */
8615 namespace {
8617 const pass_data pass_data_s390_early_mach =
8619 RTL_PASS, /* type */
8620 "early_mach", /* name */
8621 OPTGROUP_NONE, /* optinfo_flags */
8622 TV_MACH_DEP, /* tv_id */
8623 0, /* properties_required */
8624 0, /* properties_provided */
8625 0, /* properties_destroyed */
8626 0, /* todo_flags_start */
8627 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
8630 class pass_s390_early_mach : public rtl_opt_pass
8632 public:
8633 pass_s390_early_mach (gcc::context *ctxt)
8634 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
8637 /* opt_pass methods: */
8638 virtual unsigned int execute (function *);
8640 }; // class pass_s390_early_mach
8642 unsigned int
8643 pass_s390_early_mach::execute (function *fun)
8645 rtx_insn *insn;
8647 /* Try to get rid of the FPR clobbers. */
8648 s390_optimize_nonescaping_tx ();
8650 /* Re-compute register info. */
8651 s390_register_info ();
8653 /* If we're using a base register, ensure that it is always valid for
8654 the first non-prologue instruction. */
8655 if (fun->machine->base_reg)
8656 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
8658 /* Annotate all constant pool references to let the scheduler know
8659 they implicitly use the base register. */
8660 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8661 if (INSN_P (insn))
8663 annotate_constant_pool_refs (&PATTERN (insn));
8664 df_insn_rescan (insn);
8666 return 0;
8669 } // anon namespace
8671 /* Expand the prologue into a bunch of separate insns. */
8673 void
8674 s390_emit_prologue (void)
8676 rtx insn, addr;
8677 rtx temp_reg;
8678 int i;
8679 int offset;
8680 int next_fpr = 0;
8682 /* Choose best register to use for temp use within prologue.
8683 See below for why TPF must use the register 1. */
8685 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8686 && !crtl->is_leaf
8687 && !TARGET_TPF_PROFILING)
8688 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8689 else
8690 temp_reg = gen_rtx_REG (Pmode, 1);
8692 s390_save_gprs_to_fprs ();
8694 /* Save call saved gprs. */
8695 if (cfun_frame_layout.first_save_gpr != -1)
8697 insn = save_gprs (stack_pointer_rtx,
8698 cfun_frame_layout.gprs_offset +
8699 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8700 - cfun_frame_layout.first_save_gpr_slot),
8701 cfun_frame_layout.first_save_gpr,
8702 cfun_frame_layout.last_save_gpr);
8703 emit_insn (insn);
8706 /* Dummy insn to mark literal pool slot. */
8708 if (cfun->machine->base_reg)
8709 emit_insn (gen_main_pool (cfun->machine->base_reg));
8711 offset = cfun_frame_layout.f0_offset;
8713 /* Save f0 and f2. */
8714 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
8716 if (cfun_fpr_save_p (i))
8718 save_fpr (stack_pointer_rtx, offset, i);
8719 offset += 8;
8721 else if (!TARGET_PACKED_STACK || cfun->stdarg)
8722 offset += 8;
8725 /* Save f4 and f6. */
8726 offset = cfun_frame_layout.f4_offset;
8727 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
8729 if (cfun_fpr_save_p (i))
8731 insn = save_fpr (stack_pointer_rtx, offset, i);
8732 offset += 8;
8734 /* If f4 and f6 are call clobbered they are saved due to
8735 stdargs and therefore are not frame related. */
8736 if (!call_really_used_regs[i])
8737 RTX_FRAME_RELATED_P (insn) = 1;
8739 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
8740 offset += 8;
8743 if (TARGET_PACKED_STACK
8744 && cfun_save_high_fprs_p
8745 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8747 offset = (cfun_frame_layout.f8_offset
8748 + (cfun_frame_layout.high_fprs - 1) * 8);
8750 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
8751 if (cfun_fpr_save_p (i))
8753 insn = save_fpr (stack_pointer_rtx, offset, i);
8755 RTX_FRAME_RELATED_P (insn) = 1;
8756 offset -= 8;
8758 if (offset >= cfun_frame_layout.f8_offset)
8759 next_fpr = i;
8762 if (!TARGET_PACKED_STACK)
8763 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
8765 if (flag_stack_usage_info)
8766 current_function_static_stack_size = cfun_frame_layout.frame_size;
8768 /* Decrement stack pointer. */
8770 if (cfun_frame_layout.frame_size > 0)
8772 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8773 rtx real_frame_off;
8775 if (s390_stack_size)
8777 HOST_WIDE_INT stack_guard;
8779 if (s390_stack_guard)
8780 stack_guard = s390_stack_guard;
8781 else
8783 /* If no value for stack guard is provided the smallest power of 2
8784 larger than the current frame size is chosen. */
8785 stack_guard = 1;
8786 while (stack_guard < cfun_frame_layout.frame_size)
8787 stack_guard <<= 1;
8790 if (cfun_frame_layout.frame_size >= s390_stack_size)
8792 warning (0, "frame size of function %qs is %wd"
8793 " bytes exceeding user provided stack limit of "
8794 "%d bytes. "
8795 "An unconditional trap is added.",
8796 current_function_name(), cfun_frame_layout.frame_size,
8797 s390_stack_size);
8798 emit_insn (gen_trap ());
8800 else
8802 /* stack_guard has to be smaller than s390_stack_size.
8803 Otherwise we would emit an AND with zero which would
8804 not match the test under mask pattern. */
8805 if (stack_guard >= s390_stack_size)
8807 warning (0, "frame size of function %qs is %wd"
8808 " bytes which is more than half the stack size. "
8809 "The dynamic check would not be reliable. "
8810 "No check emitted for this function.",
8811 current_function_name(),
8812 cfun_frame_layout.frame_size);
8814 else
8816 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8817 & ~(stack_guard - 1));
8819 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8820 GEN_INT (stack_check_mask));
8821 if (TARGET_64BIT)
8822 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8823 t, const0_rtx),
8824 t, const0_rtx, const0_rtx));
8825 else
8826 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8827 t, const0_rtx),
8828 t, const0_rtx, const0_rtx));
8833 if (s390_warn_framesize > 0
8834 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8835 warning (0, "frame size of %qs is %wd bytes",
8836 current_function_name (), cfun_frame_layout.frame_size);
8838 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8839 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8841 /* Save incoming stack pointer into temp reg. */
8842 if (TARGET_BACKCHAIN || next_fpr)
8843 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8845 /* Subtract frame size from stack pointer. */
8847 if (DISP_IN_RANGE (INTVAL (frame_off)))
8849 insn = gen_rtx_SET (stack_pointer_rtx,
8850 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8851 frame_off));
8852 insn = emit_insn (insn);
8854 else
8856 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8857 frame_off = force_const_mem (Pmode, frame_off);
8859 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8860 annotate_constant_pool_refs (&PATTERN (insn));
8863 RTX_FRAME_RELATED_P (insn) = 1;
8864 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8865 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8866 gen_rtx_SET (stack_pointer_rtx,
8867 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8868 real_frame_off)));
8870 /* Set backchain. */
8872 if (TARGET_BACKCHAIN)
8874 if (cfun_frame_layout.backchain_offset)
8875 addr = gen_rtx_MEM (Pmode,
8876 plus_constant (Pmode, stack_pointer_rtx,
8877 cfun_frame_layout.backchain_offset));
8878 else
8879 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8880 set_mem_alias_set (addr, get_frame_alias_set ());
8881 insn = emit_insn (gen_move_insn (addr, temp_reg));
8884 /* If we support non-call exceptions (e.g. for Java),
8885 we need to make sure the backchain pointer is set up
8886 before any possibly trapping memory access. */
8887 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8889 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8890 emit_clobber (addr);
8894 /* Save fprs 8 - 15 (64 bit ABI). */
8896 if (cfun_save_high_fprs_p && next_fpr)
8898 /* If the stack might be accessed through a different register
8899 we have to make sure that the stack pointer decrement is not
8900 moved below the use of the stack slots. */
8901 s390_emit_stack_tie ();
8903 insn = emit_insn (gen_add2_insn (temp_reg,
8904 GEN_INT (cfun_frame_layout.f8_offset)));
8906 offset = 0;
8908 for (i = FPR8_REGNUM; i <= next_fpr; i++)
8909 if (cfun_fpr_save_p (i))
8911 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8912 cfun_frame_layout.frame_size
8913 + cfun_frame_layout.f8_offset
8914 + offset);
8916 insn = save_fpr (temp_reg, offset, i);
8917 offset += 8;
8918 RTX_FRAME_RELATED_P (insn) = 1;
8919 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8920 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
8921 gen_rtx_REG (DFmode, i)));
8925 /* Set frame pointer, if needed. */
8927 if (frame_pointer_needed)
8929 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8930 RTX_FRAME_RELATED_P (insn) = 1;
8933 /* Set up got pointer, if needed. */
8935 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8937 rtx_insn *insns = s390_load_got ();
8939 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
8940 annotate_constant_pool_refs (&PATTERN (insn));
8942 emit_insn (insns);
8945 if (TARGET_TPF_PROFILING)
8947 /* Generate a BAS instruction to serve as a function
8948 entry intercept to facilitate the use of tracing
8949 algorithms located at the branch target. */
8950 emit_insn (gen_prologue_tpf ());
8952 /* Emit a blockage here so that all code
8953 lies between the profiling mechanisms. */
8954 emit_insn (gen_blockage ());
8958 /* Expand the epilogue into a bunch of separate insns. */
8960 void
8961 s390_emit_epilogue (bool sibcall)
8963 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8964 int area_bottom, area_top, offset = 0;
8965 int next_offset;
8966 rtvec p;
8967 int i;
8969 if (TARGET_TPF_PROFILING)
8972 /* Generate a BAS instruction to serve as a function
8973 entry intercept to facilitate the use of tracing
8974 algorithms located at the branch target. */
8976 /* Emit a blockage here so that all code
8977 lies between the profiling mechanisms. */
8978 emit_insn (gen_blockage ());
8980 emit_insn (gen_epilogue_tpf ());
8983 /* Check whether to use frame or stack pointer for restore. */
8985 frame_pointer = (frame_pointer_needed
8986 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8988 s390_frame_area (&area_bottom, &area_top);
8990 /* Check whether we can access the register save area.
8991 If not, increment the frame pointer as required. */
8993 if (area_top <= area_bottom)
8995 /* Nothing to restore. */
8997 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8998 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
9000 /* Area is in range. */
9001 offset = cfun_frame_layout.frame_size;
9003 else
9005 rtx insn, frame_off, cfa;
9007 offset = area_bottom < 0 ? -area_bottom : 0;
9008 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
9010 cfa = gen_rtx_SET (frame_pointer,
9011 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
9012 if (DISP_IN_RANGE (INTVAL (frame_off)))
9014 insn = gen_rtx_SET (frame_pointer,
9015 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
9016 insn = emit_insn (insn);
9018 else
9020 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
9021 frame_off = force_const_mem (Pmode, frame_off);
9023 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
9024 annotate_constant_pool_refs (&PATTERN (insn));
9026 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
9027 RTX_FRAME_RELATED_P (insn) = 1;
9030 /* Restore call saved fprs. */
9032 if (TARGET_64BIT)
9034 if (cfun_save_high_fprs_p)
9036 next_offset = cfun_frame_layout.f8_offset;
9037 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
9039 if (cfun_fpr_save_p (i))
9041 restore_fpr (frame_pointer,
9042 offset + next_offset, i);
9043 cfa_restores
9044 = alloc_reg_note (REG_CFA_RESTORE,
9045 gen_rtx_REG (DFmode, i), cfa_restores);
9046 next_offset += 8;
9052 else
9054 next_offset = cfun_frame_layout.f4_offset;
9055 /* f4, f6 */
9056 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
9058 if (cfun_fpr_save_p (i))
9060 restore_fpr (frame_pointer,
9061 offset + next_offset, i);
9062 cfa_restores
9063 = alloc_reg_note (REG_CFA_RESTORE,
9064 gen_rtx_REG (DFmode, i), cfa_restores);
9065 next_offset += 8;
9067 else if (!TARGET_PACKED_STACK)
9068 next_offset += 8;
9073 /* Return register. */
9075 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
9077 /* Restore call saved gprs. */
9079 if (cfun_frame_layout.first_restore_gpr != -1)
9081 rtx insn, addr;
9082 int i;
9084 /* Check for global register and save them
9085 to stack location from where they get restored. */
9087 for (i = cfun_frame_layout.first_restore_gpr;
9088 i <= cfun_frame_layout.last_restore_gpr;
9089 i++)
9091 if (global_not_special_regno_p (i))
9093 addr = plus_constant (Pmode, frame_pointer,
9094 offset + cfun_frame_layout.gprs_offset
9095 + (i - cfun_frame_layout.first_save_gpr_slot)
9096 * UNITS_PER_LONG);
9097 addr = gen_rtx_MEM (Pmode, addr);
9098 set_mem_alias_set (addr, get_frame_alias_set ());
9099 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
9101 else
9102 cfa_restores
9103 = alloc_reg_note (REG_CFA_RESTORE,
9104 gen_rtx_REG (Pmode, i), cfa_restores);
9107 if (! sibcall)
9109 /* Fetch return address from stack before load multiple,
9110 this will do good for scheduling.
9112 Only do this if we already decided that r14 needs to be
9113 saved to a stack slot. (And not just because r14 happens to
9114 be in between two GPRs which need saving.) Otherwise it
9115 would be difficult to take that decision back in
9116 s390_optimize_prologue. */
9117 if (cfun_gpr_save_slot (RETURN_REGNUM) == -1)
9119 int return_regnum = find_unused_clobbered_reg();
9120 if (!return_regnum)
9121 return_regnum = 4;
9122 return_reg = gen_rtx_REG (Pmode, return_regnum);
9124 addr = plus_constant (Pmode, frame_pointer,
9125 offset + cfun_frame_layout.gprs_offset
9126 + (RETURN_REGNUM
9127 - cfun_frame_layout.first_save_gpr_slot)
9128 * UNITS_PER_LONG);
9129 addr = gen_rtx_MEM (Pmode, addr);
9130 set_mem_alias_set (addr, get_frame_alias_set ());
9131 emit_move_insn (return_reg, addr);
9133 /* Once we did that optimization we have to make sure
9134 s390_optimize_prologue does not try to remove the
9135 store of r14 since we will not be able to find the
9136 load issued here. */
9137 cfun_frame_layout.save_return_addr_p = true;
9141 insn = restore_gprs (frame_pointer,
9142 offset + cfun_frame_layout.gprs_offset
9143 + (cfun_frame_layout.first_restore_gpr
9144 - cfun_frame_layout.first_save_gpr_slot)
9145 * UNITS_PER_LONG,
9146 cfun_frame_layout.first_restore_gpr,
9147 cfun_frame_layout.last_restore_gpr);
9148 insn = emit_insn (insn);
9149 REG_NOTES (insn) = cfa_restores;
9150 add_reg_note (insn, REG_CFA_DEF_CFA,
9151 plus_constant (Pmode, stack_pointer_rtx,
9152 STACK_POINTER_OFFSET));
9153 RTX_FRAME_RELATED_P (insn) = 1;
9156 s390_restore_gprs_from_fprs ();
9158 if (! sibcall)
9161 /* Return to caller. */
9163 p = rtvec_alloc (2);
9165 RTVEC_ELT (p, 0) = ret_rtx;
9166 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
9167 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
9171 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
9173 static void
9174 s300_set_up_by_prologue (hard_reg_set_container *regs)
9176 if (cfun->machine->base_reg
9177 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
9178 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
9181 /* Return true if the function can use simple_return to return outside
9182 of a shrink-wrapped region. At present shrink-wrapping is supported
9183 in all cases. */
9185 bool
9186 s390_can_use_simple_return_insn (void)
9188 return true;
9191 /* Return true if the epilogue is guaranteed to contain only a return
9192 instruction and if a direct return can therefore be used instead.
9193 One of the main advantages of using direct return instructions
9194 is that we can then use conditional returns. */
9196 bool
9197 s390_can_use_return_insn (void)
9199 int i;
9201 if (!reload_completed)
9202 return false;
9204 if (crtl->profile)
9205 return false;
9207 if (TARGET_TPF_PROFILING)
9208 return false;
9210 for (i = 0; i < 16; i++)
9211 if (cfun_gpr_save_slot (i))
9212 return false;
9214 /* For 31 bit this is not covered by the frame_size check below
9215 since f4, f6 are saved in the register save area without needing
9216 additional stack space. */
9217 if (!TARGET_64BIT
9218 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
9219 return false;
9221 if (cfun->machine->base_reg
9222 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
9223 return false;
9225 return cfun_frame_layout.frame_size == 0;
9228 /* Return the size in bytes of a function argument of
9229 type TYPE and/or mode MODE. At least one of TYPE or
9230 MODE must be specified. */
9232 static int
9233 s390_function_arg_size (machine_mode mode, const_tree type)
9235 if (type)
9236 return int_size_in_bytes (type);
9238 /* No type info available for some library calls ... */
9239 if (mode != BLKmode)
9240 return GET_MODE_SIZE (mode);
9242 /* If we have neither type nor mode, abort */
9243 gcc_unreachable ();
9246 /* Return true if a function argument of type TYPE and mode MODE
9247 is to be passed in a floating-point register, if available. */
9249 static bool
9250 s390_function_arg_float (machine_mode mode, const_tree type)
9252 int size = s390_function_arg_size (mode, type);
9253 if (size > 8)
9254 return false;
9256 /* Soft-float changes the ABI: no floating-point registers are used. */
9257 if (TARGET_SOFT_FLOAT)
9258 return false;
9260 /* No type info available for some library calls ... */
9261 if (!type)
9262 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
9264 /* The ABI says that record types with a single member are treated
9265 just like that member would be. */
9266 while (TREE_CODE (type) == RECORD_TYPE)
9268 tree field, single = NULL_TREE;
9270 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
9272 if (TREE_CODE (field) != FIELD_DECL)
9273 continue;
9275 if (single == NULL_TREE)
9276 single = TREE_TYPE (field);
9277 else
9278 return false;
9281 if (single == NULL_TREE)
9282 return false;
9283 else
9284 type = single;
9287 return TREE_CODE (type) == REAL_TYPE;
9290 /* Return true if a function argument of type TYPE and mode MODE
9291 is to be passed in an integer register, or a pair of integer
9292 registers, if available. */
9294 static bool
9295 s390_function_arg_integer (machine_mode mode, const_tree type)
9297 int size = s390_function_arg_size (mode, type);
9298 if (size > 8)
9299 return false;
9301 /* No type info available for some library calls ... */
9302 if (!type)
9303 return GET_MODE_CLASS (mode) == MODE_INT
9304 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
9306 /* We accept small integral (and similar) types. */
9307 if (INTEGRAL_TYPE_P (type)
9308 || POINTER_TYPE_P (type)
9309 || TREE_CODE (type) == NULLPTR_TYPE
9310 || TREE_CODE (type) == OFFSET_TYPE
9311 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
9312 return true;
9314 /* We also accept structs of size 1, 2, 4, 8 that are not
9315 passed in floating-point registers. */
9316 if (AGGREGATE_TYPE_P (type)
9317 && exact_log2 (size) >= 0
9318 && !s390_function_arg_float (mode, type))
9319 return true;
9321 return false;
9324 /* Return 1 if a function argument of type TYPE and mode MODE
9325 is to be passed by reference. The ABI specifies that only
9326 structures of size 1, 2, 4, or 8 bytes are passed by value,
9327 all other structures (and complex numbers) are passed by
9328 reference. */
9330 static bool
9331 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
9332 machine_mode mode, const_tree type,
9333 bool named ATTRIBUTE_UNUSED)
9335 int size = s390_function_arg_size (mode, type);
9336 if (size > 8)
9337 return true;
9339 if (type)
9341 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
9342 return 1;
9344 if (TREE_CODE (type) == COMPLEX_TYPE
9345 || TREE_CODE (type) == VECTOR_TYPE)
9346 return 1;
9349 return 0;
9352 /* Update the data in CUM to advance over an argument of mode MODE and
9353 data type TYPE. (TYPE is null for libcalls where that information
9354 may not be available.). The boolean NAMED specifies whether the
9355 argument is a named argument (as opposed to an unnamed argument
9356 matching an ellipsis). */
9358 static void
9359 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9360 const_tree type, bool named ATTRIBUTE_UNUSED)
9362 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9364 if (s390_function_arg_float (mode, type))
9366 cum->fprs += 1;
9368 else if (s390_function_arg_integer (mode, type))
9370 int size = s390_function_arg_size (mode, type);
9371 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
9373 else
9374 gcc_unreachable ();
9377 /* Define where to put the arguments to a function.
9378 Value is zero to push the argument on the stack,
9379 or a hard register in which to store the argument.
9381 MODE is the argument's machine mode.
9382 TYPE is the data type of the argument (as a tree).
9383 This is null for libcalls where that information may
9384 not be available.
9385 CUM is a variable of type CUMULATIVE_ARGS which gives info about
9386 the preceding args and about the function being called.
9387 NAMED is nonzero if this argument is a named parameter
9388 (otherwise it is an extra parameter matching an ellipsis).
9390 On S/390, we use general purpose registers 2 through 6 to
9391 pass integer, pointer, and certain structure arguments, and
9392 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
9393 to pass floating point arguments. All remaining arguments
9394 are pushed to the stack. */
9396 static rtx
9397 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
9398 const_tree type, bool named ATTRIBUTE_UNUSED)
9400 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9402 if (s390_function_arg_float (mode, type))
9404 if (cum->fprs + 1 > FP_ARG_NUM_REG)
9405 return 0;
9406 else
9407 return gen_rtx_REG (mode, cum->fprs + 16);
9409 else if (s390_function_arg_integer (mode, type))
9411 int size = s390_function_arg_size (mode, type);
9412 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9414 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
9415 return 0;
9416 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
9417 return gen_rtx_REG (mode, cum->gprs + 2);
9418 else if (n_gprs == 2)
9420 rtvec p = rtvec_alloc (2);
9422 RTVEC_ELT (p, 0)
9423 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
9424 const0_rtx);
9425 RTVEC_ELT (p, 1)
9426 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
9427 GEN_INT (4));
9429 return gen_rtx_PARALLEL (mode, p);
9433 /* After the real arguments, expand_call calls us once again
9434 with a void_type_node type. Whatever we return here is
9435 passed as operand 2 to the call expanders.
9437 We don't need this feature ... */
9438 else if (type == void_type_node)
9439 return const0_rtx;
9441 gcc_unreachable ();
9444 /* Return true if return values of type TYPE should be returned
9445 in a memory buffer whose address is passed by the caller as
9446 hidden first argument. */
9448 static bool
9449 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
9451 /* We accept small integral (and similar) types. */
9452 if (INTEGRAL_TYPE_P (type)
9453 || POINTER_TYPE_P (type)
9454 || TREE_CODE (type) == OFFSET_TYPE
9455 || TREE_CODE (type) == REAL_TYPE)
9456 return int_size_in_bytes (type) > 8;
9458 /* Aggregates and similar constructs are always returned
9459 in memory. */
9460 if (AGGREGATE_TYPE_P (type)
9461 || TREE_CODE (type) == COMPLEX_TYPE
9462 || TREE_CODE (type) == VECTOR_TYPE)
9463 return true;
9465 /* ??? We get called on all sorts of random stuff from
9466 aggregate_value_p. We can't abort, but it's not clear
9467 what's safe to return. Pretend it's a struct I guess. */
9468 return true;
9471 /* Function arguments and return values are promoted to word size. */
9473 static machine_mode
9474 s390_promote_function_mode (const_tree type, machine_mode mode,
9475 int *punsignedp,
9476 const_tree fntype ATTRIBUTE_UNUSED,
9477 int for_return ATTRIBUTE_UNUSED)
9479 if (INTEGRAL_MODE_P (mode)
9480 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
9482 if (type != NULL_TREE && POINTER_TYPE_P (type))
9483 *punsignedp = POINTERS_EXTEND_UNSIGNED;
9484 return Pmode;
9487 return mode;
9490 /* Define where to return a (scalar) value of type RET_TYPE.
9491 If RET_TYPE is null, define where to return a (scalar)
9492 value of mode MODE from a libcall. */
9494 static rtx
9495 s390_function_and_libcall_value (machine_mode mode,
9496 const_tree ret_type,
9497 const_tree fntype_or_decl,
9498 bool outgoing ATTRIBUTE_UNUSED)
9500 /* For normal functions perform the promotion as
9501 promote_function_mode would do. */
9502 if (ret_type)
9504 int unsignedp = TYPE_UNSIGNED (ret_type);
9505 mode = promote_function_mode (ret_type, mode, &unsignedp,
9506 fntype_or_decl, 1);
9509 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
9510 gcc_assert (GET_MODE_SIZE (mode) <= 8);
9512 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
9513 return gen_rtx_REG (mode, 16);
9514 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
9515 || UNITS_PER_LONG == UNITS_PER_WORD)
9516 return gen_rtx_REG (mode, 2);
9517 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
9519 /* This case is triggered when returning a 64 bit value with
9520 -m31 -mzarch. Although the value would fit into a single
9521 register it has to be forced into a 32 bit register pair in
9522 order to match the ABI. */
9523 rtvec p = rtvec_alloc (2);
9525 RTVEC_ELT (p, 0)
9526 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
9527 RTVEC_ELT (p, 1)
9528 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
9530 return gen_rtx_PARALLEL (mode, p);
9533 gcc_unreachable ();
9536 /* Define where to return a scalar return value of type RET_TYPE. */
9538 static rtx
9539 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
9540 bool outgoing)
9542 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
9543 fn_decl_or_type, outgoing);
9546 /* Define where to return a scalar libcall return value of mode
9547 MODE. */
9549 static rtx
9550 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
9552 return s390_function_and_libcall_value (mode, NULL_TREE,
9553 NULL_TREE, true);
9557 /* Create and return the va_list datatype.
9559 On S/390, va_list is an array type equivalent to
9561 typedef struct __va_list_tag
9563 long __gpr;
9564 long __fpr;
9565 void *__overflow_arg_area;
9566 void *__reg_save_area;
9567 } va_list[1];
9569 where __gpr and __fpr hold the number of general purpose
9570 or floating point arguments used up to now, respectively,
9571 __overflow_arg_area points to the stack location of the
9572 next argument passed on the stack, and __reg_save_area
9573 always points to the start of the register area in the
9574 call frame of the current function. The function prologue
9575 saves all registers used for argument passing into this
9576 area if the function uses variable arguments. */
9578 static tree
9579 s390_build_builtin_va_list (void)
9581 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
9583 record = lang_hooks.types.make_type (RECORD_TYPE);
9585 type_decl =
9586 build_decl (BUILTINS_LOCATION,
9587 TYPE_DECL, get_identifier ("__va_list_tag"), record);
9589 f_gpr = build_decl (BUILTINS_LOCATION,
9590 FIELD_DECL, get_identifier ("__gpr"),
9591 long_integer_type_node);
9592 f_fpr = build_decl (BUILTINS_LOCATION,
9593 FIELD_DECL, get_identifier ("__fpr"),
9594 long_integer_type_node);
9595 f_ovf = build_decl (BUILTINS_LOCATION,
9596 FIELD_DECL, get_identifier ("__overflow_arg_area"),
9597 ptr_type_node);
9598 f_sav = build_decl (BUILTINS_LOCATION,
9599 FIELD_DECL, get_identifier ("__reg_save_area"),
9600 ptr_type_node);
9602 va_list_gpr_counter_field = f_gpr;
9603 va_list_fpr_counter_field = f_fpr;
9605 DECL_FIELD_CONTEXT (f_gpr) = record;
9606 DECL_FIELD_CONTEXT (f_fpr) = record;
9607 DECL_FIELD_CONTEXT (f_ovf) = record;
9608 DECL_FIELD_CONTEXT (f_sav) = record;
9610 TYPE_STUB_DECL (record) = type_decl;
9611 TYPE_NAME (record) = type_decl;
9612 TYPE_FIELDS (record) = f_gpr;
9613 DECL_CHAIN (f_gpr) = f_fpr;
9614 DECL_CHAIN (f_fpr) = f_ovf;
9615 DECL_CHAIN (f_ovf) = f_sav;
9617 layout_type (record);
9619 /* The correct type is an array type of one element. */
9620 return build_array_type (record, build_index_type (size_zero_node));
9623 /* Implement va_start by filling the va_list structure VALIST.
9624 STDARG_P is always true, and ignored.
9625 NEXTARG points to the first anonymous stack argument.
9627 The following global variables are used to initialize
9628 the va_list structure:
9630 crtl->args.info:
9631 holds number of gprs and fprs used for named arguments.
9632 crtl->args.arg_offset_rtx:
9633 holds the offset of the first anonymous stack argument
9634 (relative to the virtual arg pointer). */
9636 static void
9637 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9639 HOST_WIDE_INT n_gpr, n_fpr;
9640 int off;
9641 tree f_gpr, f_fpr, f_ovf, f_sav;
9642 tree gpr, fpr, ovf, sav, t;
9644 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9645 f_fpr = DECL_CHAIN (f_gpr);
9646 f_ovf = DECL_CHAIN (f_fpr);
9647 f_sav = DECL_CHAIN (f_ovf);
9649 valist = build_simple_mem_ref (valist);
9650 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9651 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9652 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9653 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9655 /* Count number of gp and fp argument registers used. */
9657 n_gpr = crtl->args.info.gprs;
9658 n_fpr = crtl->args.info.fprs;
9660 if (cfun->va_list_gpr_size)
9662 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9663 build_int_cst (NULL_TREE, n_gpr));
9664 TREE_SIDE_EFFECTS (t) = 1;
9665 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9668 if (cfun->va_list_fpr_size)
9670 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9671 build_int_cst (NULL_TREE, n_fpr));
9672 TREE_SIDE_EFFECTS (t) = 1;
9673 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9676 /* Find the overflow area. */
9677 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9678 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9680 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9682 off = INTVAL (crtl->args.arg_offset_rtx);
9683 off = off < 0 ? 0 : off;
9684 if (TARGET_DEBUG_ARG)
9685 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9686 (int)n_gpr, (int)n_fpr, off);
9688 t = fold_build_pointer_plus_hwi (t, off);
9690 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9691 TREE_SIDE_EFFECTS (t) = 1;
9692 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9695 /* Find the register save area. */
9696 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9697 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9699 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9700 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9702 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9703 TREE_SIDE_EFFECTS (t) = 1;
9704 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9708 /* Implement va_arg by updating the va_list structure
9709 VALIST as required to retrieve an argument of type
9710 TYPE, and returning that argument.
9712 Generates code equivalent to:
9714 if (integral value) {
9715 if (size <= 4 && args.gpr < 5 ||
9716 size > 4 && args.gpr < 4 )
9717 ret = args.reg_save_area[args.gpr+8]
9718 else
9719 ret = *args.overflow_arg_area++;
9720 } else if (float value) {
9721 if (args.fgpr < 2)
9722 ret = args.reg_save_area[args.fpr+64]
9723 else
9724 ret = *args.overflow_arg_area++;
9725 } else if (aggregate value) {
9726 if (args.gpr < 5)
9727 ret = *args.reg_save_area[args.gpr]
9728 else
9729 ret = **args.overflow_arg_area++;
9730 } */
9732 static tree
9733 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9734 gimple_seq *post_p ATTRIBUTE_UNUSED)
9736 tree f_gpr, f_fpr, f_ovf, f_sav;
9737 tree gpr, fpr, ovf, sav, reg, t, u;
9738 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9739 tree lab_false, lab_over, addr;
9741 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9742 f_fpr = DECL_CHAIN (f_gpr);
9743 f_ovf = DECL_CHAIN (f_fpr);
9744 f_sav = DECL_CHAIN (f_ovf);
9746 valist = build_va_arg_indirect_ref (valist);
9747 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9748 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9749 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9751 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9752 both appear on a lhs. */
9753 valist = unshare_expr (valist);
9754 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9756 size = int_size_in_bytes (type);
9758 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9760 if (TARGET_DEBUG_ARG)
9762 fprintf (stderr, "va_arg: aggregate type");
9763 debug_tree (type);
9766 /* Aggregates are passed by reference. */
9767 indirect_p = 1;
9768 reg = gpr;
9769 n_reg = 1;
9771 /* kernel stack layout on 31 bit: It is assumed here that no padding
9772 will be added by s390_frame_info because for va_args always an even
9773 number of gprs has to be saved r15-r2 = 14 regs. */
9774 sav_ofs = 2 * UNITS_PER_LONG;
9775 sav_scale = UNITS_PER_LONG;
9776 size = UNITS_PER_LONG;
9777 max_reg = GP_ARG_NUM_REG - n_reg;
9779 else if (s390_function_arg_float (TYPE_MODE (type), type))
9781 if (TARGET_DEBUG_ARG)
9783 fprintf (stderr, "va_arg: float type");
9784 debug_tree (type);
9787 /* FP args go in FP registers, if present. */
9788 indirect_p = 0;
9789 reg = fpr;
9790 n_reg = 1;
9791 sav_ofs = 16 * UNITS_PER_LONG;
9792 sav_scale = 8;
9793 max_reg = FP_ARG_NUM_REG - n_reg;
9795 else
9797 if (TARGET_DEBUG_ARG)
9799 fprintf (stderr, "va_arg: other type");
9800 debug_tree (type);
9803 /* Otherwise into GP registers. */
9804 indirect_p = 0;
9805 reg = gpr;
9806 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9808 /* kernel stack layout on 31 bit: It is assumed here that no padding
9809 will be added by s390_frame_info because for va_args always an even
9810 number of gprs has to be saved r15-r2 = 14 regs. */
9811 sav_ofs = 2 * UNITS_PER_LONG;
9813 if (size < UNITS_PER_LONG)
9814 sav_ofs += UNITS_PER_LONG - size;
9816 sav_scale = UNITS_PER_LONG;
9817 max_reg = GP_ARG_NUM_REG - n_reg;
9820 /* Pull the value out of the saved registers ... */
9822 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9823 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9824 addr = create_tmp_var (ptr_type_node, "addr");
9826 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9827 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9828 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9829 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9830 gimplify_and_add (t, pre_p);
9832 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9833 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9834 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9835 t = fold_build_pointer_plus (t, u);
9837 gimplify_assign (addr, t, pre_p);
9839 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9841 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9844 /* ... Otherwise out of the overflow area. */
9846 t = ovf;
9847 if (size < UNITS_PER_LONG)
9848 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9850 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9852 gimplify_assign (addr, t, pre_p);
9854 t = fold_build_pointer_plus_hwi (t, size);
9855 gimplify_assign (ovf, t, pre_p);
9857 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9860 /* Increment register save count. */
9862 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9863 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9864 gimplify_and_add (u, pre_p);
9866 if (indirect_p)
9868 t = build_pointer_type_for_mode (build_pointer_type (type),
9869 ptr_mode, true);
9870 addr = fold_convert (t, addr);
9871 addr = build_va_arg_indirect_ref (addr);
9873 else
9875 t = build_pointer_type_for_mode (type, ptr_mode, true);
9876 addr = fold_convert (t, addr);
9879 return build_va_arg_indirect_ref (addr);
9882 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
9883 expanders.
9884 DEST - Register location where CC will be stored.
9885 TDB - Pointer to a 256 byte area where to store the transaction.
9886 diagnostic block. NULL if TDB is not needed.
9887 RETRY - Retry count value. If non-NULL a retry loop for CC2
9888 is emitted
9889 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
9890 of the tbegin instruction pattern. */
9892 void
9893 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
9895 rtx retry_plus_two = gen_reg_rtx (SImode);
9896 rtx retry_reg = gen_reg_rtx (SImode);
9897 rtx_code_label *retry_label = NULL;
9899 if (retry != NULL_RTX)
9901 emit_move_insn (retry_reg, retry);
9902 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
9903 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
9904 retry_label = gen_label_rtx ();
9905 emit_label (retry_label);
9908 if (clobber_fprs_p)
9909 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK), tdb));
9910 else
9911 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
9912 tdb));
9914 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
9915 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
9916 CC_REGNUM)),
9917 UNSPEC_CC_TO_INT));
9918 if (retry != NULL_RTX)
9920 const int CC0 = 1 << 3;
9921 const int CC1 = 1 << 2;
9922 const int CC3 = 1 << 0;
9923 rtx jump;
9924 rtx count = gen_reg_rtx (SImode);
9925 rtx_code_label *leave_label = gen_label_rtx ();
9927 /* Exit for success and permanent failures. */
9928 jump = s390_emit_jump (leave_label,
9929 gen_rtx_EQ (VOIDmode,
9930 gen_rtx_REG (CCRAWmode, CC_REGNUM),
9931 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
9932 LABEL_NUSES (leave_label) = 1;
9934 /* CC2 - transient failure. Perform retry with ppa. */
9935 emit_move_insn (count, retry_plus_two);
9936 emit_insn (gen_subsi3 (count, count, retry_reg));
9937 emit_insn (gen_tx_assist (count));
9938 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
9939 retry_reg,
9940 retry_reg));
9941 JUMP_LABEL (jump) = retry_label;
9942 LABEL_NUSES (retry_label) = 1;
9943 emit_label (leave_label);
9947 /* Builtins. */
9949 enum s390_builtin
9951 S390_BUILTIN_TBEGIN,
9952 S390_BUILTIN_TBEGIN_NOFLOAT,
9953 S390_BUILTIN_TBEGIN_RETRY,
9954 S390_BUILTIN_TBEGIN_RETRY_NOFLOAT,
9955 S390_BUILTIN_TBEGINC,
9956 S390_BUILTIN_TEND,
9957 S390_BUILTIN_TABORT,
9958 S390_BUILTIN_NON_TX_STORE,
9959 S390_BUILTIN_TX_NESTING_DEPTH,
9960 S390_BUILTIN_TX_ASSIST,
9962 S390_BUILTIN_S390_SFPC,
9963 S390_BUILTIN_S390_EFPC,
9965 S390_BUILTIN_MAX
9968 tree s390_builtin_decls[S390_BUILTIN_MAX];
9970 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX] = {
9971 CODE_FOR_tbegin,
9972 CODE_FOR_tbegin_nofloat,
9973 CODE_FOR_tbegin_retry,
9974 CODE_FOR_tbegin_retry_nofloat,
9975 CODE_FOR_tbeginc,
9976 CODE_FOR_tend,
9977 CODE_FOR_tabort,
9978 CODE_FOR_ntstg,
9979 CODE_FOR_etnd,
9980 CODE_FOR_tx_assist,
9982 CODE_FOR_s390_sfpc,
9983 CODE_FOR_s390_efpc
9986 static void
9987 s390_init_builtins (void)
9989 tree ftype, uint64_type;
9990 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
9991 NULL, NULL);
9992 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
9994 /* void foo (void) */
9995 ftype = build_function_type_list (void_type_node, NULL_TREE);
9996 s390_builtin_decls[S390_BUILTIN_TBEGINC] =
9997 add_builtin_function ("__builtin_tbeginc", ftype, S390_BUILTIN_TBEGINC,
9998 BUILT_IN_MD, NULL, NULL_TREE);
10000 /* void foo (int) */
10001 ftype = build_function_type_list (void_type_node, integer_type_node,
10002 NULL_TREE);
10003 s390_builtin_decls[S390_BUILTIN_TABORT] =
10004 add_builtin_function ("__builtin_tabort", ftype,
10005 S390_BUILTIN_TABORT, BUILT_IN_MD, NULL,
10006 noreturn_attr);
10007 s390_builtin_decls[S390_BUILTIN_TX_ASSIST] =
10008 add_builtin_function ("__builtin_tx_assist", ftype,
10009 S390_BUILTIN_TX_ASSIST, BUILT_IN_MD, NULL, NULL_TREE);
10011 /* void foo (unsigned) */
10012 ftype = build_function_type_list (void_type_node, unsigned_type_node,
10013 NULL_TREE);
10014 s390_builtin_decls[S390_BUILTIN_S390_SFPC] =
10015 add_builtin_function ("__builtin_s390_sfpc", ftype,
10016 S390_BUILTIN_S390_SFPC, BUILT_IN_MD, NULL, NULL_TREE);
10018 /* int foo (void *) */
10019 ftype = build_function_type_list (integer_type_node, ptr_type_node,
10020 NULL_TREE);
10021 s390_builtin_decls[S390_BUILTIN_TBEGIN] =
10022 add_builtin_function ("__builtin_tbegin", ftype, S390_BUILTIN_TBEGIN,
10023 BUILT_IN_MD, NULL, returns_twice_attr);
10024 s390_builtin_decls[S390_BUILTIN_TBEGIN_NOFLOAT] =
10025 add_builtin_function ("__builtin_tbegin_nofloat", ftype,
10026 S390_BUILTIN_TBEGIN_NOFLOAT,
10027 BUILT_IN_MD, NULL, returns_twice_attr);
10029 /* int foo (void *, int) */
10030 ftype = build_function_type_list (integer_type_node, ptr_type_node,
10031 integer_type_node, NULL_TREE);
10032 s390_builtin_decls[S390_BUILTIN_TBEGIN_RETRY] =
10033 add_builtin_function ("__builtin_tbegin_retry", ftype,
10034 S390_BUILTIN_TBEGIN_RETRY,
10035 BUILT_IN_MD,
10036 NULL, returns_twice_attr);
10037 s390_builtin_decls[S390_BUILTIN_TBEGIN_RETRY_NOFLOAT] =
10038 add_builtin_function ("__builtin_tbegin_retry_nofloat", ftype,
10039 S390_BUILTIN_TBEGIN_RETRY_NOFLOAT,
10040 BUILT_IN_MD,
10041 NULL, returns_twice_attr);
10043 /* int foo (void) */
10044 ftype = build_function_type_list (integer_type_node, NULL_TREE);
10045 s390_builtin_decls[S390_BUILTIN_TX_NESTING_DEPTH] =
10046 add_builtin_function ("__builtin_tx_nesting_depth", ftype,
10047 S390_BUILTIN_TX_NESTING_DEPTH,
10048 BUILT_IN_MD, NULL, NULL_TREE);
10049 s390_builtin_decls[S390_BUILTIN_TEND] =
10050 add_builtin_function ("__builtin_tend", ftype,
10051 S390_BUILTIN_TEND, BUILT_IN_MD, NULL, NULL_TREE);
10053 /* unsigned foo (void) */
10054 ftype = build_function_type_list (unsigned_type_node, NULL_TREE);
10055 s390_builtin_decls[S390_BUILTIN_S390_EFPC] =
10056 add_builtin_function ("__builtin_s390_efpc", ftype,
10057 S390_BUILTIN_S390_EFPC, BUILT_IN_MD, NULL, NULL_TREE);
10059 /* void foo (uint64_t *, uint64_t) */
10060 if (TARGET_64BIT)
10061 uint64_type = long_unsigned_type_node;
10062 else
10063 uint64_type = long_long_unsigned_type_node;
10065 ftype = build_function_type_list (void_type_node,
10066 build_pointer_type (uint64_type),
10067 uint64_type, NULL_TREE);
10068 s390_builtin_decls[S390_BUILTIN_NON_TX_STORE] =
10069 add_builtin_function ("__builtin_non_tx_store", ftype,
10070 S390_BUILTIN_NON_TX_STORE,
10071 BUILT_IN_MD, NULL, NULL_TREE);
10074 /* Expand an expression EXP that calls a built-in function,
10075 with result going to TARGET if that's convenient
10076 (and in mode MODE if that's convenient).
10077 SUBTARGET may be used as the target for computing one of EXP's operands.
10078 IGNORE is nonzero if the value is to be ignored. */
10080 static rtx
10081 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10082 machine_mode mode ATTRIBUTE_UNUSED,
10083 int ignore ATTRIBUTE_UNUSED)
10085 #define MAX_ARGS 2
10087 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10088 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10089 enum insn_code icode;
10090 rtx op[MAX_ARGS], pat;
10091 int arity;
10092 bool nonvoid;
10093 tree arg;
10094 call_expr_arg_iterator iter;
10096 if (fcode >= S390_BUILTIN_MAX)
10097 internal_error ("bad builtin fcode");
10098 icode = code_for_builtin[fcode];
10099 if (icode == 0)
10100 internal_error ("bad builtin fcode");
10102 if (!TARGET_HTM && fcode <= S390_BUILTIN_TX_ASSIST)
10103 error ("Transactional execution builtins not enabled (-mhtm)\n");
10105 /* Set a flag in the machine specific cfun part in order to support
10106 saving/restoring of FPRs. */
10107 if (fcode == S390_BUILTIN_TBEGIN || fcode == S390_BUILTIN_TBEGIN_RETRY)
10108 cfun->machine->tbegin_p = true;
10110 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
10112 arity = 0;
10113 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
10115 const struct insn_operand_data *insn_op;
10117 if (arg == error_mark_node)
10118 return NULL_RTX;
10119 if (arity >= MAX_ARGS)
10120 return NULL_RTX;
10122 insn_op = &insn_data[icode].operand[arity + nonvoid];
10124 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
10126 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
10128 if (insn_op->predicate == memory_operand)
10130 /* Don't move a NULL pointer into a register. Otherwise
10131 we have to rely on combine being able to move it back
10132 in order to get an immediate 0 in the instruction. */
10133 if (op[arity] != const0_rtx)
10134 op[arity] = copy_to_mode_reg (Pmode, op[arity]);
10135 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
10137 else
10138 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
10141 arity++;
10144 if (nonvoid)
10146 machine_mode tmode = insn_data[icode].operand[0].mode;
10147 if (!target
10148 || GET_MODE (target) != tmode
10149 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
10150 target = gen_reg_rtx (tmode);
10153 switch (arity)
10155 case 0:
10156 pat = GEN_FCN (icode) (target);
10157 break;
10158 case 1:
10159 if (nonvoid)
10160 pat = GEN_FCN (icode) (target, op[0]);
10161 else
10162 pat = GEN_FCN (icode) (op[0]);
10163 break;
10164 case 2:
10165 if (nonvoid)
10166 pat = GEN_FCN (icode) (target, op[0], op[1]);
10167 else
10168 pat = GEN_FCN (icode) (op[0], op[1]);
10169 break;
10170 default:
10171 gcc_unreachable ();
10173 if (!pat)
10174 return NULL_RTX;
10175 emit_insn (pat);
10177 if (nonvoid)
10178 return target;
10179 else
10180 return const0_rtx;
10183 /* Return the decl for the target specific builtin with the function
10184 code FCODE. */
10186 static tree
10187 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
10189 if (fcode >= S390_BUILTIN_MAX)
10190 return error_mark_node;
10192 return s390_builtin_decls[fcode];
10195 /* We call mcount before the function prologue. So a profiled leaf
10196 function should stay a leaf function. */
10198 static bool
10199 s390_keep_leaf_when_profiled ()
10201 return true;
10204 /* Output assembly code for the trampoline template to
10205 stdio stream FILE.
10207 On S/390, we use gpr 1 internally in the trampoline code;
10208 gpr 0 is used to hold the static chain. */
10210 static void
10211 s390_asm_trampoline_template (FILE *file)
10213 rtx op[2];
10214 op[0] = gen_rtx_REG (Pmode, 0);
10215 op[1] = gen_rtx_REG (Pmode, 1);
10217 if (TARGET_64BIT)
10219 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
10220 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
10221 output_asm_insn ("br\t%1", op); /* 2 byte */
10222 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
10224 else
10226 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
10227 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
10228 output_asm_insn ("br\t%1", op); /* 2 byte */
10229 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
10233 /* Emit RTL insns to initialize the variable parts of a trampoline.
10234 FNADDR is an RTX for the address of the function's pure code.
10235 CXT is an RTX for the static chain value for the function. */
10237 static void
10238 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
10240 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10241 rtx mem;
10243 emit_block_move (m_tramp, assemble_trampoline_template (),
10244 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
10246 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
10247 emit_move_insn (mem, cxt);
10248 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
10249 emit_move_insn (mem, fnaddr);
10252 /* Output assembler code to FILE to increment profiler label # LABELNO
10253 for profiling a function entry. */
10255 void
10256 s390_function_profiler (FILE *file, int labelno)
10258 rtx op[7];
10260 char label[128];
10261 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
10263 fprintf (file, "# function profiler \n");
10265 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
10266 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
10267 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
10269 op[2] = gen_rtx_REG (Pmode, 1);
10270 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
10271 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
10273 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
10274 if (flag_pic)
10276 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
10277 op[4] = gen_rtx_CONST (Pmode, op[4]);
10280 if (TARGET_64BIT)
10282 output_asm_insn ("stg\t%0,%1", op);
10283 output_asm_insn ("larl\t%2,%3", op);
10284 output_asm_insn ("brasl\t%0,%4", op);
10285 output_asm_insn ("lg\t%0,%1", op);
10287 else if (!flag_pic)
10289 op[6] = gen_label_rtx ();
10291 output_asm_insn ("st\t%0,%1", op);
10292 output_asm_insn ("bras\t%2,%l6", op);
10293 output_asm_insn (".long\t%4", op);
10294 output_asm_insn (".long\t%3", op);
10295 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
10296 output_asm_insn ("l\t%0,0(%2)", op);
10297 output_asm_insn ("l\t%2,4(%2)", op);
10298 output_asm_insn ("basr\t%0,%0", op);
10299 output_asm_insn ("l\t%0,%1", op);
10301 else
10303 op[5] = gen_label_rtx ();
10304 op[6] = gen_label_rtx ();
10306 output_asm_insn ("st\t%0,%1", op);
10307 output_asm_insn ("bras\t%2,%l6", op);
10308 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
10309 output_asm_insn (".long\t%4-%l5", op);
10310 output_asm_insn (".long\t%3-%l5", op);
10311 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
10312 output_asm_insn ("lr\t%0,%2", op);
10313 output_asm_insn ("a\t%0,0(%2)", op);
10314 output_asm_insn ("a\t%2,4(%2)", op);
10315 output_asm_insn ("basr\t%0,%0", op);
10316 output_asm_insn ("l\t%0,%1", op);
10320 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
10321 into its SYMBOL_REF_FLAGS. */
10323 static void
10324 s390_encode_section_info (tree decl, rtx rtl, int first)
10326 default_encode_section_info (decl, rtl, first);
10328 if (TREE_CODE (decl) == VAR_DECL)
10330 /* If a variable has a forced alignment to < 2 bytes, mark it
10331 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
10332 operand. */
10333 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
10334 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
10335 if (!DECL_SIZE (decl)
10336 || !DECL_ALIGN (decl)
10337 || !tree_fits_shwi_p (DECL_SIZE (decl))
10338 || (DECL_ALIGN (decl) <= 64
10339 && DECL_ALIGN (decl) != tree_to_shwi (DECL_SIZE (decl))))
10340 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
10343 /* Literal pool references don't have a decl so they are handled
10344 differently here. We rely on the information in the MEM_ALIGN
10345 entry to decide upon natural alignment. */
10346 if (MEM_P (rtl)
10347 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
10348 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
10349 && (MEM_ALIGN (rtl) == 0
10350 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
10351 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
10352 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
10355 /* Output thunk to FILE that implements a C++ virtual function call (with
10356 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
10357 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
10358 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
10359 relative to the resulting this pointer. */
10361 static void
10362 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10363 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10364 tree function)
10366 rtx op[10];
10367 int nonlocal = 0;
10369 /* Make sure unwind info is emitted for the thunk if needed. */
10370 final_start_function (emit_barrier (), file, 1);
10372 /* Operand 0 is the target function. */
10373 op[0] = XEXP (DECL_RTL (function), 0);
10374 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
10376 nonlocal = 1;
10377 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
10378 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
10379 op[0] = gen_rtx_CONST (Pmode, op[0]);
10382 /* Operand 1 is the 'this' pointer. */
10383 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10384 op[1] = gen_rtx_REG (Pmode, 3);
10385 else
10386 op[1] = gen_rtx_REG (Pmode, 2);
10388 /* Operand 2 is the delta. */
10389 op[2] = GEN_INT (delta);
10391 /* Operand 3 is the vcall_offset. */
10392 op[3] = GEN_INT (vcall_offset);
10394 /* Operand 4 is the temporary register. */
10395 op[4] = gen_rtx_REG (Pmode, 1);
10397 /* Operands 5 to 8 can be used as labels. */
10398 op[5] = NULL_RTX;
10399 op[6] = NULL_RTX;
10400 op[7] = NULL_RTX;
10401 op[8] = NULL_RTX;
10403 /* Operand 9 can be used for temporary register. */
10404 op[9] = NULL_RTX;
10406 /* Generate code. */
10407 if (TARGET_64BIT)
10409 /* Setup literal pool pointer if required. */
10410 if ((!DISP_IN_RANGE (delta)
10411 && !CONST_OK_FOR_K (delta)
10412 && !CONST_OK_FOR_Os (delta))
10413 || (!DISP_IN_RANGE (vcall_offset)
10414 && !CONST_OK_FOR_K (vcall_offset)
10415 && !CONST_OK_FOR_Os (vcall_offset)))
10417 op[5] = gen_label_rtx ();
10418 output_asm_insn ("larl\t%4,%5", op);
10421 /* Add DELTA to this pointer. */
10422 if (delta)
10424 if (CONST_OK_FOR_J (delta))
10425 output_asm_insn ("la\t%1,%2(%1)", op);
10426 else if (DISP_IN_RANGE (delta))
10427 output_asm_insn ("lay\t%1,%2(%1)", op);
10428 else if (CONST_OK_FOR_K (delta))
10429 output_asm_insn ("aghi\t%1,%2", op);
10430 else if (CONST_OK_FOR_Os (delta))
10431 output_asm_insn ("agfi\t%1,%2", op);
10432 else
10434 op[6] = gen_label_rtx ();
10435 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
10439 /* Perform vcall adjustment. */
10440 if (vcall_offset)
10442 if (DISP_IN_RANGE (vcall_offset))
10444 output_asm_insn ("lg\t%4,0(%1)", op);
10445 output_asm_insn ("ag\t%1,%3(%4)", op);
10447 else if (CONST_OK_FOR_K (vcall_offset))
10449 output_asm_insn ("lghi\t%4,%3", op);
10450 output_asm_insn ("ag\t%4,0(%1)", op);
10451 output_asm_insn ("ag\t%1,0(%4)", op);
10453 else if (CONST_OK_FOR_Os (vcall_offset))
10455 output_asm_insn ("lgfi\t%4,%3", op);
10456 output_asm_insn ("ag\t%4,0(%1)", op);
10457 output_asm_insn ("ag\t%1,0(%4)", op);
10459 else
10461 op[7] = gen_label_rtx ();
10462 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
10463 output_asm_insn ("ag\t%4,0(%1)", op);
10464 output_asm_insn ("ag\t%1,0(%4)", op);
10468 /* Jump to target. */
10469 output_asm_insn ("jg\t%0", op);
10471 /* Output literal pool if required. */
10472 if (op[5])
10474 output_asm_insn (".align\t4", op);
10475 targetm.asm_out.internal_label (file, "L",
10476 CODE_LABEL_NUMBER (op[5]));
10478 if (op[6])
10480 targetm.asm_out.internal_label (file, "L",
10481 CODE_LABEL_NUMBER (op[6]));
10482 output_asm_insn (".long\t%2", op);
10484 if (op[7])
10486 targetm.asm_out.internal_label (file, "L",
10487 CODE_LABEL_NUMBER (op[7]));
10488 output_asm_insn (".long\t%3", op);
10491 else
10493 /* Setup base pointer if required. */
10494 if (!vcall_offset
10495 || (!DISP_IN_RANGE (delta)
10496 && !CONST_OK_FOR_K (delta)
10497 && !CONST_OK_FOR_Os (delta))
10498 || (!DISP_IN_RANGE (delta)
10499 && !CONST_OK_FOR_K (vcall_offset)
10500 && !CONST_OK_FOR_Os (vcall_offset)))
10502 op[5] = gen_label_rtx ();
10503 output_asm_insn ("basr\t%4,0", op);
10504 targetm.asm_out.internal_label (file, "L",
10505 CODE_LABEL_NUMBER (op[5]));
10508 /* Add DELTA to this pointer. */
10509 if (delta)
10511 if (CONST_OK_FOR_J (delta))
10512 output_asm_insn ("la\t%1,%2(%1)", op);
10513 else if (DISP_IN_RANGE (delta))
10514 output_asm_insn ("lay\t%1,%2(%1)", op);
10515 else if (CONST_OK_FOR_K (delta))
10516 output_asm_insn ("ahi\t%1,%2", op);
10517 else if (CONST_OK_FOR_Os (delta))
10518 output_asm_insn ("afi\t%1,%2", op);
10519 else
10521 op[6] = gen_label_rtx ();
10522 output_asm_insn ("a\t%1,%6-%5(%4)", op);
10526 /* Perform vcall adjustment. */
10527 if (vcall_offset)
10529 if (CONST_OK_FOR_J (vcall_offset))
10531 output_asm_insn ("l\t%4,0(%1)", op);
10532 output_asm_insn ("a\t%1,%3(%4)", op);
10534 else if (DISP_IN_RANGE (vcall_offset))
10536 output_asm_insn ("l\t%4,0(%1)", op);
10537 output_asm_insn ("ay\t%1,%3(%4)", op);
10539 else if (CONST_OK_FOR_K (vcall_offset))
10541 output_asm_insn ("lhi\t%4,%3", op);
10542 output_asm_insn ("a\t%4,0(%1)", op);
10543 output_asm_insn ("a\t%1,0(%4)", op);
10545 else if (CONST_OK_FOR_Os (vcall_offset))
10547 output_asm_insn ("iilf\t%4,%3", op);
10548 output_asm_insn ("a\t%4,0(%1)", op);
10549 output_asm_insn ("a\t%1,0(%4)", op);
10551 else
10553 op[7] = gen_label_rtx ();
10554 output_asm_insn ("l\t%4,%7-%5(%4)", op);
10555 output_asm_insn ("a\t%4,0(%1)", op);
10556 output_asm_insn ("a\t%1,0(%4)", op);
10559 /* We had to clobber the base pointer register.
10560 Re-setup the base pointer (with a different base). */
10561 op[5] = gen_label_rtx ();
10562 output_asm_insn ("basr\t%4,0", op);
10563 targetm.asm_out.internal_label (file, "L",
10564 CODE_LABEL_NUMBER (op[5]));
10567 /* Jump to target. */
10568 op[8] = gen_label_rtx ();
10570 if (!flag_pic)
10571 output_asm_insn ("l\t%4,%8-%5(%4)", op);
10572 else if (!nonlocal)
10573 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10574 /* We cannot call through .plt, since .plt requires %r12 loaded. */
10575 else if (flag_pic == 1)
10577 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10578 output_asm_insn ("l\t%4,%0(%4)", op);
10580 else if (flag_pic == 2)
10582 op[9] = gen_rtx_REG (Pmode, 0);
10583 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
10584 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10585 output_asm_insn ("ar\t%4,%9", op);
10586 output_asm_insn ("l\t%4,0(%4)", op);
10589 output_asm_insn ("br\t%4", op);
10591 /* Output literal pool. */
10592 output_asm_insn (".align\t4", op);
10594 if (nonlocal && flag_pic == 2)
10595 output_asm_insn (".long\t%0", op);
10596 if (nonlocal)
10598 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10599 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
10602 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
10603 if (!flag_pic)
10604 output_asm_insn (".long\t%0", op);
10605 else
10606 output_asm_insn (".long\t%0-%5", op);
10608 if (op[6])
10610 targetm.asm_out.internal_label (file, "L",
10611 CODE_LABEL_NUMBER (op[6]));
10612 output_asm_insn (".long\t%2", op);
10614 if (op[7])
10616 targetm.asm_out.internal_label (file, "L",
10617 CODE_LABEL_NUMBER (op[7]));
10618 output_asm_insn (".long\t%3", op);
10621 final_end_function ();
10624 static bool
10625 s390_valid_pointer_mode (machine_mode mode)
10627 return (mode == SImode || (TARGET_64BIT && mode == DImode));
10630 /* Checks whether the given CALL_EXPR would use a caller
10631 saved register. This is used to decide whether sibling call
10632 optimization could be performed on the respective function
10633 call. */
10635 static bool
10636 s390_call_saved_register_used (tree call_expr)
10638 CUMULATIVE_ARGS cum_v;
10639 cumulative_args_t cum;
10640 tree parameter;
10641 machine_mode mode;
10642 tree type;
10643 rtx parm_rtx;
10644 int reg, i;
10646 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
10647 cum = pack_cumulative_args (&cum_v);
10649 for (i = 0; i < call_expr_nargs (call_expr); i++)
10651 parameter = CALL_EXPR_ARG (call_expr, i);
10652 gcc_assert (parameter);
10654 /* For an undeclared variable passed as parameter we will get
10655 an ERROR_MARK node here. */
10656 if (TREE_CODE (parameter) == ERROR_MARK)
10657 return true;
10659 type = TREE_TYPE (parameter);
10660 gcc_assert (type);
10662 mode = TYPE_MODE (type);
10663 gcc_assert (mode);
10665 if (pass_by_reference (&cum_v, mode, type, true))
10667 mode = Pmode;
10668 type = build_pointer_type (type);
10671 parm_rtx = s390_function_arg (cum, mode, type, 0);
10673 s390_function_arg_advance (cum, mode, type, 0);
10675 if (!parm_rtx)
10676 continue;
10678 if (REG_P (parm_rtx))
10680 for (reg = 0;
10681 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
10682 reg++)
10683 if (!call_used_regs[reg + REGNO (parm_rtx)])
10684 return true;
10687 if (GET_CODE (parm_rtx) == PARALLEL)
10689 int i;
10691 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
10693 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
10695 gcc_assert (REG_P (r));
10697 for (reg = 0;
10698 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
10699 reg++)
10700 if (!call_used_regs[reg + REGNO (r)])
10701 return true;
10706 return false;
10709 /* Return true if the given call expression can be
10710 turned into a sibling call.
10711 DECL holds the declaration of the function to be called whereas
10712 EXP is the call expression itself. */
10714 static bool
10715 s390_function_ok_for_sibcall (tree decl, tree exp)
10717 /* The TPF epilogue uses register 1. */
10718 if (TARGET_TPF_PROFILING)
10719 return false;
10721 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
10722 which would have to be restored before the sibcall. */
10723 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
10724 return false;
10726 /* Register 6 on s390 is available as an argument register but unfortunately
10727 "caller saved". This makes functions needing this register for arguments
10728 not suitable for sibcalls. */
10729 return !s390_call_saved_register_used (exp);
10732 /* Return the fixed registers used for condition codes. */
10734 static bool
10735 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
10737 *p1 = CC_REGNUM;
10738 *p2 = INVALID_REGNUM;
10740 return true;
10743 /* This function is used by the call expanders of the machine description.
10744 It emits the call insn itself together with the necessary operations
10745 to adjust the target address and returns the emitted insn.
10746 ADDR_LOCATION is the target address rtx
10747 TLS_CALL the location of the thread-local symbol
10748 RESULT_REG the register where the result of the call should be stored
10749 RETADDR_REG the register where the return address should be stored
10750 If this parameter is NULL_RTX the call is considered
10751 to be a sibling call. */
10753 rtx_insn *
10754 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
10755 rtx retaddr_reg)
10757 bool plt_call = false;
10758 rtx_insn *insn;
10759 rtx call;
10760 rtx clobber;
10761 rtvec vec;
10763 /* Direct function calls need special treatment. */
10764 if (GET_CODE (addr_location) == SYMBOL_REF)
10766 /* When calling a global routine in PIC mode, we must
10767 replace the symbol itself with the PLT stub. */
10768 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
10770 if (retaddr_reg != NULL_RTX)
10772 addr_location = gen_rtx_UNSPEC (Pmode,
10773 gen_rtvec (1, addr_location),
10774 UNSPEC_PLT);
10775 addr_location = gen_rtx_CONST (Pmode, addr_location);
10776 plt_call = true;
10778 else
10779 /* For -fpic code the PLT entries might use r12 which is
10780 call-saved. Therefore we cannot do a sibcall when
10781 calling directly using a symbol ref. When reaching
10782 this point we decided (in s390_function_ok_for_sibcall)
10783 to do a sibcall for a function pointer but one of the
10784 optimizers was able to get rid of the function pointer
10785 by propagating the symbol ref into the call. This
10786 optimization is illegal for S/390 so we turn the direct
10787 call into a indirect call again. */
10788 addr_location = force_reg (Pmode, addr_location);
10791 /* Unless we can use the bras(l) insn, force the
10792 routine address into a register. */
10793 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
10795 if (flag_pic)
10796 addr_location = legitimize_pic_address (addr_location, 0);
10797 else
10798 addr_location = force_reg (Pmode, addr_location);
10802 /* If it is already an indirect call or the code above moved the
10803 SYMBOL_REF to somewhere else make sure the address can be found in
10804 register 1. */
10805 if (retaddr_reg == NULL_RTX
10806 && GET_CODE (addr_location) != SYMBOL_REF
10807 && !plt_call)
10809 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
10810 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
10813 addr_location = gen_rtx_MEM (QImode, addr_location);
10814 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
10816 if (result_reg != NULL_RTX)
10817 call = gen_rtx_SET (result_reg, call);
10819 if (retaddr_reg != NULL_RTX)
10821 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
10823 if (tls_call != NULL_RTX)
10824 vec = gen_rtvec (3, call, clobber,
10825 gen_rtx_USE (VOIDmode, tls_call));
10826 else
10827 vec = gen_rtvec (2, call, clobber);
10829 call = gen_rtx_PARALLEL (VOIDmode, vec);
10832 insn = emit_call_insn (call);
10834 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
10835 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
10837 /* s390_function_ok_for_sibcall should
10838 have denied sibcalls in this case. */
10839 gcc_assert (retaddr_reg != NULL_RTX);
10840 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
10842 return insn;
10845 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10847 static void
10848 s390_conditional_register_usage (void)
10850 int i;
10852 if (flag_pic)
10854 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10855 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10857 if (TARGET_CPU_ZARCH)
10859 fixed_regs[BASE_REGNUM] = 0;
10860 call_used_regs[BASE_REGNUM] = 0;
10861 fixed_regs[RETURN_REGNUM] = 0;
10862 call_used_regs[RETURN_REGNUM] = 0;
10864 if (TARGET_64BIT)
10866 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10867 call_used_regs[i] = call_really_used_regs[i] = 0;
10869 else
10871 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
10872 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
10875 if (TARGET_SOFT_FLOAT)
10877 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
10878 call_used_regs[i] = fixed_regs[i] = 1;
10882 /* Corresponding function to eh_return expander. */
10884 static GTY(()) rtx s390_tpf_eh_return_symbol;
10885 void
10886 s390_emit_tpf_eh_return (rtx target)
10888 rtx_insn *insn;
10889 rtx reg, orig_ra;
10891 if (!s390_tpf_eh_return_symbol)
10892 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10894 reg = gen_rtx_REG (Pmode, 2);
10895 orig_ra = gen_rtx_REG (Pmode, 3);
10897 emit_move_insn (reg, target);
10898 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
10899 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10900 gen_rtx_REG (Pmode, RETURN_REGNUM));
10901 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10902 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
10904 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10907 /* Rework the prologue/epilogue to avoid saving/restoring
10908 registers unnecessarily. */
10910 static void
10911 s390_optimize_prologue (void)
10913 rtx_insn *insn, *new_insn, *next_insn;
10915 /* Do a final recompute of the frame-related data. */
10916 s390_optimize_register_info ();
10918 /* If all special registers are in fact used, there's nothing we
10919 can do, so no point in walking the insn list. */
10921 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10922 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10923 && (TARGET_CPU_ZARCH
10924 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10925 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10926 return;
10928 /* Search for prologue/epilogue insns and replace them. */
10930 for (insn = get_insns (); insn; insn = next_insn)
10932 int first, last, off;
10933 rtx set, base, offset;
10934 rtx pat;
10936 next_insn = NEXT_INSN (insn);
10938 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
10939 continue;
10941 pat = PATTERN (insn);
10943 /* Remove ldgr/lgdr instructions used for saving and restore
10944 GPRs if possible. */
10945 if (TARGET_Z10
10946 && GET_CODE (pat) == SET
10947 && GET_MODE (SET_SRC (pat)) == DImode
10948 && REG_P (SET_SRC (pat))
10949 && REG_P (SET_DEST (pat)))
10951 int src_regno = REGNO (SET_SRC (pat));
10952 int dest_regno = REGNO (SET_DEST (pat));
10953 int gpr_regno;
10954 int fpr_regno;
10956 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
10957 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
10958 continue;
10960 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
10961 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
10963 /* GPR must be call-saved, FPR must be call-clobbered. */
10964 if (!call_really_used_regs[fpr_regno]
10965 || call_really_used_regs[gpr_regno])
10966 continue;
10968 /* It must not happen that what we once saved in an FPR now
10969 needs a stack slot. */
10970 gcc_assert (cfun_gpr_save_slot (gpr_regno) != -1);
10972 if (cfun_gpr_save_slot (gpr_regno) == 0)
10974 remove_insn (insn);
10975 continue;
10979 if (GET_CODE (pat) == PARALLEL
10980 && store_multiple_operation (pat, VOIDmode))
10982 set = XVECEXP (pat, 0, 0);
10983 first = REGNO (SET_SRC (set));
10984 last = first + XVECLEN (pat, 0) - 1;
10985 offset = const0_rtx;
10986 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10987 off = INTVAL (offset);
10989 if (GET_CODE (base) != REG || off < 0)
10990 continue;
10991 if (cfun_frame_layout.first_save_gpr != -1
10992 && (cfun_frame_layout.first_save_gpr < first
10993 || cfun_frame_layout.last_save_gpr > last))
10994 continue;
10995 if (REGNO (base) != STACK_POINTER_REGNUM
10996 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10997 continue;
10998 if (first > BASE_REGNUM || last < BASE_REGNUM)
10999 continue;
11001 if (cfun_frame_layout.first_save_gpr != -1)
11003 rtx s_pat = save_gprs (base,
11004 off + (cfun_frame_layout.first_save_gpr
11005 - first) * UNITS_PER_LONG,
11006 cfun_frame_layout.first_save_gpr,
11007 cfun_frame_layout.last_save_gpr);
11008 new_insn = emit_insn_before (s_pat, insn);
11009 INSN_ADDRESSES_NEW (new_insn, -1);
11012 remove_insn (insn);
11013 continue;
11016 if (cfun_frame_layout.first_save_gpr == -1
11017 && GET_CODE (pat) == SET
11018 && GENERAL_REG_P (SET_SRC (pat))
11019 && GET_CODE (SET_DEST (pat)) == MEM)
11021 set = pat;
11022 first = REGNO (SET_SRC (set));
11023 offset = const0_rtx;
11024 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
11025 off = INTVAL (offset);
11027 if (GET_CODE (base) != REG || off < 0)
11028 continue;
11029 if (REGNO (base) != STACK_POINTER_REGNUM
11030 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
11031 continue;
11033 remove_insn (insn);
11034 continue;
11037 if (GET_CODE (pat) == PARALLEL
11038 && load_multiple_operation (pat, VOIDmode))
11040 set = XVECEXP (pat, 0, 0);
11041 first = REGNO (SET_DEST (set));
11042 last = first + XVECLEN (pat, 0) - 1;
11043 offset = const0_rtx;
11044 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
11045 off = INTVAL (offset);
11047 if (GET_CODE (base) != REG || off < 0)
11048 continue;
11050 if (cfun_frame_layout.first_restore_gpr != -1
11051 && (cfun_frame_layout.first_restore_gpr < first
11052 || cfun_frame_layout.last_restore_gpr > last))
11053 continue;
11054 if (REGNO (base) != STACK_POINTER_REGNUM
11055 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
11056 continue;
11057 if (first > BASE_REGNUM || last < BASE_REGNUM)
11058 continue;
11060 if (cfun_frame_layout.first_restore_gpr != -1)
11062 rtx rpat = restore_gprs (base,
11063 off + (cfun_frame_layout.first_restore_gpr
11064 - first) * UNITS_PER_LONG,
11065 cfun_frame_layout.first_restore_gpr,
11066 cfun_frame_layout.last_restore_gpr);
11068 /* Remove REG_CFA_RESTOREs for registers that we no
11069 longer need to save. */
11070 REG_NOTES (rpat) = REG_NOTES (insn);
11071 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
11072 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
11073 && ((int) REGNO (XEXP (*ptr, 0))
11074 < cfun_frame_layout.first_restore_gpr))
11075 *ptr = XEXP (*ptr, 1);
11076 else
11077 ptr = &XEXP (*ptr, 1);
11078 new_insn = emit_insn_before (rpat, insn);
11079 RTX_FRAME_RELATED_P (new_insn) = 1;
11080 INSN_ADDRESSES_NEW (new_insn, -1);
11083 remove_insn (insn);
11084 continue;
11087 if (cfun_frame_layout.first_restore_gpr == -1
11088 && GET_CODE (pat) == SET
11089 && GENERAL_REG_P (SET_DEST (pat))
11090 && GET_CODE (SET_SRC (pat)) == MEM)
11092 set = pat;
11093 first = REGNO (SET_DEST (set));
11094 offset = const0_rtx;
11095 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
11096 off = INTVAL (offset);
11098 if (GET_CODE (base) != REG || off < 0)
11099 continue;
11101 if (REGNO (base) != STACK_POINTER_REGNUM
11102 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
11103 continue;
11105 remove_insn (insn);
11106 continue;
11111 /* On z10 and later the dynamic branch prediction must see the
11112 backward jump within a certain windows. If not it falls back to
11113 the static prediction. This function rearranges the loop backward
11114 branch in a way which makes the static prediction always correct.
11115 The function returns true if it added an instruction. */
11116 static bool
11117 s390_fix_long_loop_prediction (rtx_insn *insn)
11119 rtx set = single_set (insn);
11120 rtx code_label, label_ref, new_label;
11121 rtx_insn *uncond_jump;
11122 rtx_insn *cur_insn;
11123 rtx tmp;
11124 int distance;
11126 /* This will exclude branch on count and branch on index patterns
11127 since these are correctly statically predicted. */
11128 if (!set
11129 || SET_DEST (set) != pc_rtx
11130 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
11131 return false;
11133 /* Skip conditional returns. */
11134 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
11135 && XEXP (SET_SRC (set), 2) == pc_rtx)
11136 return false;
11138 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
11139 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
11141 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
11143 code_label = XEXP (label_ref, 0);
11145 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
11146 || INSN_ADDRESSES (INSN_UID (insn)) == -1
11147 || (INSN_ADDRESSES (INSN_UID (insn))
11148 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
11149 return false;
11151 for (distance = 0, cur_insn = PREV_INSN (insn);
11152 distance < PREDICT_DISTANCE - 6;
11153 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
11154 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
11155 return false;
11157 new_label = gen_label_rtx ();
11158 uncond_jump = emit_jump_insn_after (
11159 gen_rtx_SET (pc_rtx,
11160 gen_rtx_LABEL_REF (VOIDmode, code_label)),
11161 insn);
11162 emit_label_after (new_label, uncond_jump);
11164 tmp = XEXP (SET_SRC (set), 1);
11165 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
11166 XEXP (SET_SRC (set), 2) = tmp;
11167 INSN_CODE (insn) = -1;
11169 XEXP (label_ref, 0) = new_label;
11170 JUMP_LABEL (insn) = new_label;
11171 JUMP_LABEL (uncond_jump) = code_label;
11173 return true;
11176 /* Returns 1 if INSN reads the value of REG for purposes not related
11177 to addressing of memory, and 0 otherwise. */
11178 static int
11179 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
11181 return reg_referenced_p (reg, PATTERN (insn))
11182 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
11185 /* Starting from INSN find_cond_jump looks downwards in the insn
11186 stream for a single jump insn which is the last user of the
11187 condition code set in INSN. */
11188 static rtx_insn *
11189 find_cond_jump (rtx_insn *insn)
11191 for (; insn; insn = NEXT_INSN (insn))
11193 rtx ite, cc;
11195 if (LABEL_P (insn))
11196 break;
11198 if (!JUMP_P (insn))
11200 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
11201 break;
11202 continue;
11205 /* This will be triggered by a return. */
11206 if (GET_CODE (PATTERN (insn)) != SET)
11207 break;
11209 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
11210 ite = SET_SRC (PATTERN (insn));
11212 if (GET_CODE (ite) != IF_THEN_ELSE)
11213 break;
11215 cc = XEXP (XEXP (ite, 0), 0);
11216 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
11217 break;
11219 if (find_reg_note (insn, REG_DEAD, cc))
11220 return insn;
11221 break;
11224 return NULL;
11227 /* Swap the condition in COND and the operands in OP0 and OP1 so that
11228 the semantics does not change. If NULL_RTX is passed as COND the
11229 function tries to find the conditional jump starting with INSN. */
11230 static void
11231 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
11233 rtx tmp = *op0;
11235 if (cond == NULL_RTX)
11237 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
11238 rtx set = jump ? single_set (jump) : NULL_RTX;
11240 if (set == NULL_RTX)
11241 return;
11243 cond = XEXP (SET_SRC (set), 0);
11246 *op0 = *op1;
11247 *op1 = tmp;
11248 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
11251 /* On z10, instructions of the compare-and-branch family have the
11252 property to access the register occurring as second operand with
11253 its bits complemented. If such a compare is grouped with a second
11254 instruction that accesses the same register non-complemented, and
11255 if that register's value is delivered via a bypass, then the
11256 pipeline recycles, thereby causing significant performance decline.
11257 This function locates such situations and exchanges the two
11258 operands of the compare. The function return true whenever it
11259 added an insn. */
11260 static bool
11261 s390_z10_optimize_cmp (rtx_insn *insn)
11263 rtx_insn *prev_insn, *next_insn;
11264 bool insn_added_p = false;
11265 rtx cond, *op0, *op1;
11267 if (GET_CODE (PATTERN (insn)) == PARALLEL)
11269 /* Handle compare and branch and branch on count
11270 instructions. */
11271 rtx pattern = single_set (insn);
11273 if (!pattern
11274 || SET_DEST (pattern) != pc_rtx
11275 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
11276 return false;
11278 cond = XEXP (SET_SRC (pattern), 0);
11279 op0 = &XEXP (cond, 0);
11280 op1 = &XEXP (cond, 1);
11282 else if (GET_CODE (PATTERN (insn)) == SET)
11284 rtx src, dest;
11286 /* Handle normal compare instructions. */
11287 src = SET_SRC (PATTERN (insn));
11288 dest = SET_DEST (PATTERN (insn));
11290 if (!REG_P (dest)
11291 || !CC_REGNO_P (REGNO (dest))
11292 || GET_CODE (src) != COMPARE)
11293 return false;
11295 /* s390_swap_cmp will try to find the conditional
11296 jump when passing NULL_RTX as condition. */
11297 cond = NULL_RTX;
11298 op0 = &XEXP (src, 0);
11299 op1 = &XEXP (src, 1);
11301 else
11302 return false;
11304 if (!REG_P (*op0) || !REG_P (*op1))
11305 return false;
11307 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
11308 return false;
11310 /* Swap the COMPARE arguments and its mask if there is a
11311 conflicting access in the previous insn. */
11312 prev_insn = prev_active_insn (insn);
11313 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
11314 && reg_referenced_p (*op1, PATTERN (prev_insn)))
11315 s390_swap_cmp (cond, op0, op1, insn);
11317 /* Check if there is a conflict with the next insn. If there
11318 was no conflict with the previous insn, then swap the
11319 COMPARE arguments and its mask. If we already swapped
11320 the operands, or if swapping them would cause a conflict
11321 with the previous insn, issue a NOP after the COMPARE in
11322 order to separate the two instuctions. */
11323 next_insn = next_active_insn (insn);
11324 if (next_insn != NULL_RTX && INSN_P (next_insn)
11325 && s390_non_addr_reg_read_p (*op1, next_insn))
11327 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
11328 && s390_non_addr_reg_read_p (*op0, prev_insn))
11330 if (REGNO (*op1) == 0)
11331 emit_insn_after (gen_nop1 (), insn);
11332 else
11333 emit_insn_after (gen_nop (), insn);
11334 insn_added_p = true;
11336 else
11337 s390_swap_cmp (cond, op0, op1, insn);
11339 return insn_added_p;
11342 /* Perform machine-dependent processing. */
11344 static void
11345 s390_reorg (void)
11347 bool pool_overflow = false;
11348 int hw_before, hw_after;
11350 /* Make sure all splits have been performed; splits after
11351 machine_dependent_reorg might confuse insn length counts. */
11352 split_all_insns_noflow ();
11354 /* Install the main literal pool and the associated base
11355 register load insns.
11357 In addition, there are two problematic situations we need
11358 to correct:
11360 - the literal pool might be > 4096 bytes in size, so that
11361 some of its elements cannot be directly accessed
11363 - a branch target might be > 64K away from the branch, so that
11364 it is not possible to use a PC-relative instruction.
11366 To fix those, we split the single literal pool into multiple
11367 pool chunks, reloading the pool base register at various
11368 points throughout the function to ensure it always points to
11369 the pool chunk the following code expects, and / or replace
11370 PC-relative branches by absolute branches.
11372 However, the two problems are interdependent: splitting the
11373 literal pool can move a branch further away from its target,
11374 causing the 64K limit to overflow, and on the other hand,
11375 replacing a PC-relative branch by an absolute branch means
11376 we need to put the branch target address into the literal
11377 pool, possibly causing it to overflow.
11379 So, we loop trying to fix up both problems until we manage
11380 to satisfy both conditions at the same time. Note that the
11381 loop is guaranteed to terminate as every pass of the loop
11382 strictly decreases the total number of PC-relative branches
11383 in the function. (This is not completely true as there
11384 might be branch-over-pool insns introduced by chunkify_start.
11385 Those never need to be split however.) */
11387 for (;;)
11389 struct constant_pool *pool = NULL;
11391 /* Collect the literal pool. */
11392 if (!pool_overflow)
11394 pool = s390_mainpool_start ();
11395 if (!pool)
11396 pool_overflow = true;
11399 /* If literal pool overflowed, start to chunkify it. */
11400 if (pool_overflow)
11401 pool = s390_chunkify_start ();
11403 /* Split out-of-range branches. If this has created new
11404 literal pool entries, cancel current chunk list and
11405 recompute it. zSeries machines have large branch
11406 instructions, so we never need to split a branch. */
11407 if (!TARGET_CPU_ZARCH && s390_split_branches ())
11409 if (pool_overflow)
11410 s390_chunkify_cancel (pool);
11411 else
11412 s390_mainpool_cancel (pool);
11414 continue;
11417 /* If we made it up to here, both conditions are satisfied.
11418 Finish up literal pool related changes. */
11419 if (pool_overflow)
11420 s390_chunkify_finish (pool);
11421 else
11422 s390_mainpool_finish (pool);
11424 /* We're done splitting branches. */
11425 cfun->machine->split_branches_pending_p = false;
11426 break;
11429 /* Generate out-of-pool execute target insns. */
11430 if (TARGET_CPU_ZARCH)
11432 rtx_insn *insn, *target;
11433 rtx label;
11435 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11437 label = s390_execute_label (insn);
11438 if (!label)
11439 continue;
11441 gcc_assert (label != const0_rtx);
11443 target = emit_label (XEXP (label, 0));
11444 INSN_ADDRESSES_NEW (target, -1);
11446 target = emit_insn (s390_execute_target (insn));
11447 INSN_ADDRESSES_NEW (target, -1);
11451 /* Try to optimize prologue and epilogue further. */
11452 s390_optimize_prologue ();
11454 /* Walk over the insns and do some >=z10 specific changes. */
11455 if (s390_tune == PROCESSOR_2097_Z10
11456 || s390_tune == PROCESSOR_2817_Z196
11457 || s390_tune == PROCESSOR_2827_ZEC12)
11459 rtx_insn *insn;
11460 bool insn_added_p = false;
11462 /* The insn lengths and addresses have to be up to date for the
11463 following manipulations. */
11464 shorten_branches (get_insns ());
11466 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11468 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
11469 continue;
11471 if (JUMP_P (insn))
11472 insn_added_p |= s390_fix_long_loop_prediction (insn);
11474 if ((GET_CODE (PATTERN (insn)) == PARALLEL
11475 || GET_CODE (PATTERN (insn)) == SET)
11476 && s390_tune == PROCESSOR_2097_Z10)
11477 insn_added_p |= s390_z10_optimize_cmp (insn);
11480 /* Adjust branches if we added new instructions. */
11481 if (insn_added_p)
11482 shorten_branches (get_insns ());
11485 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
11486 if (hw_after > 0)
11488 rtx_insn *insn;
11490 /* Insert NOPs for hotpatching. */
11491 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11493 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
11494 break;
11496 gcc_assert (insn);
11497 /* Output a series of NOPs after the NOTE_INSN_FUNCTION_BEG. */
11498 while (hw_after > 0)
11500 if (hw_after >= 3 && TARGET_CPU_ZARCH)
11502 insn = emit_insn_after (gen_nop_6_byte (), insn);
11503 hw_after -= 3;
11505 else if (hw_after >= 2)
11507 insn = emit_insn_after (gen_nop_4_byte (), insn);
11508 hw_after -= 2;
11510 else
11512 insn = emit_insn_after (gen_nop_2_byte (), insn);
11513 hw_after -= 1;
11516 gcc_assert (hw_after == 0);
11520 /* Return true if INSN is a fp load insn writing register REGNO. */
11521 static inline bool
11522 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
11524 rtx set;
11525 enum attr_type flag = s390_safe_attr_type (insn);
11527 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
11528 return false;
11530 set = single_set (insn);
11532 if (set == NULL_RTX)
11533 return false;
11535 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
11536 return false;
11538 if (REGNO (SET_DEST (set)) != regno)
11539 return false;
11541 return true;
11544 /* This value describes the distance to be avoided between an
11545 aritmetic fp instruction and an fp load writing the same register.
11546 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
11547 fine but the exact value has to be avoided. Otherwise the FP
11548 pipeline will throw an exception causing a major penalty. */
11549 #define Z10_EARLYLOAD_DISTANCE 7
11551 /* Rearrange the ready list in order to avoid the situation described
11552 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
11553 moved to the very end of the ready list. */
11554 static void
11555 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
11557 unsigned int regno;
11558 int nready = *nready_p;
11559 rtx_insn *tmp;
11560 int i;
11561 rtx_insn *insn;
11562 rtx set;
11563 enum attr_type flag;
11564 int distance;
11566 /* Skip DISTANCE - 1 active insns. */
11567 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
11568 distance > 0 && insn != NULL_RTX;
11569 distance--, insn = prev_active_insn (insn))
11570 if (CALL_P (insn) || JUMP_P (insn))
11571 return;
11573 if (insn == NULL_RTX)
11574 return;
11576 set = single_set (insn);
11578 if (set == NULL_RTX || !REG_P (SET_DEST (set))
11579 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
11580 return;
11582 flag = s390_safe_attr_type (insn);
11584 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
11585 return;
11587 regno = REGNO (SET_DEST (set));
11588 i = nready - 1;
11590 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
11591 i--;
11593 if (!i)
11594 return;
11596 tmp = ready[i];
11597 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
11598 ready[0] = tmp;
11602 /* The s390_sched_state variable tracks the state of the current or
11603 the last instruction group.
11605 0,1,2 number of instructions scheduled in the current group
11606 3 the last group is complete - normal insns
11607 4 the last group was a cracked/expanded insn */
11609 static int s390_sched_state;
11611 #define S390_OOO_SCHED_STATE_NORMAL 3
11612 #define S390_OOO_SCHED_STATE_CRACKED 4
11614 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
11615 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
11616 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
11617 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
11619 static unsigned int
11620 s390_get_sched_attrmask (rtx_insn *insn)
11622 unsigned int mask = 0;
11624 if (get_attr_ooo_cracked (insn))
11625 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
11626 if (get_attr_ooo_expanded (insn))
11627 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
11628 if (get_attr_ooo_endgroup (insn))
11629 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
11630 if (get_attr_ooo_groupalone (insn))
11631 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
11632 return mask;
11635 /* Return the scheduling score for INSN. The higher the score the
11636 better. The score is calculated from the OOO scheduling attributes
11637 of INSN and the scheduling state s390_sched_state. */
11638 static int
11639 s390_sched_score (rtx_insn *insn)
11641 unsigned int mask = s390_get_sched_attrmask (insn);
11642 int score = 0;
11644 switch (s390_sched_state)
11646 case 0:
11647 /* Try to put insns into the first slot which would otherwise
11648 break a group. */
11649 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11650 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11651 score += 5;
11652 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11653 score += 10;
11654 case 1:
11655 /* Prefer not cracked insns while trying to put together a
11656 group. */
11657 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11658 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
11659 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
11660 score += 10;
11661 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
11662 score += 5;
11663 break;
11664 case 2:
11665 /* Prefer not cracked insns while trying to put together a
11666 group. */
11667 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11668 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
11669 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
11670 score += 10;
11671 /* Prefer endgroup insns in the last slot. */
11672 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
11673 score += 10;
11674 break;
11675 case S390_OOO_SCHED_STATE_NORMAL:
11676 /* Prefer not cracked insns if the last was not cracked. */
11677 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11678 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
11679 score += 5;
11680 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11681 score += 10;
11682 break;
11683 case S390_OOO_SCHED_STATE_CRACKED:
11684 /* Try to keep cracked insns together to prevent them from
11685 interrupting groups. */
11686 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11687 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11688 score += 5;
11689 break;
11691 return score;
11694 /* This function is called via hook TARGET_SCHED_REORDER before
11695 issuing one insn from list READY which contains *NREADYP entries.
11696 For target z10 it reorders load instructions to avoid early load
11697 conflicts in the floating point pipeline */
11698 static int
11699 s390_sched_reorder (FILE *file, int verbose,
11700 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
11702 if (s390_tune == PROCESSOR_2097_Z10)
11703 if (reload_completed && *nreadyp > 1)
11704 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
11706 if (s390_tune == PROCESSOR_2827_ZEC12
11707 && reload_completed
11708 && *nreadyp > 1)
11710 int i;
11711 int last_index = *nreadyp - 1;
11712 int max_index = -1;
11713 int max_score = -1;
11714 rtx_insn *tmp;
11716 /* Just move the insn with the highest score to the top (the
11717 end) of the list. A full sort is not needed since a conflict
11718 in the hazard recognition cannot happen. So the top insn in
11719 the ready list will always be taken. */
11720 for (i = last_index; i >= 0; i--)
11722 int score;
11724 if (recog_memoized (ready[i]) < 0)
11725 continue;
11727 score = s390_sched_score (ready[i]);
11728 if (score > max_score)
11730 max_score = score;
11731 max_index = i;
11735 if (max_index != -1)
11737 if (max_index != last_index)
11739 tmp = ready[max_index];
11740 ready[max_index] = ready[last_index];
11741 ready[last_index] = tmp;
11743 if (verbose > 5)
11744 fprintf (file,
11745 "move insn %d to the top of list\n",
11746 INSN_UID (ready[last_index]));
11748 else if (verbose > 5)
11749 fprintf (file,
11750 "best insn %d already on top\n",
11751 INSN_UID (ready[last_index]));
11754 if (verbose > 5)
11756 fprintf (file, "ready list ooo attributes - sched state: %d\n",
11757 s390_sched_state);
11759 for (i = last_index; i >= 0; i--)
11761 if (recog_memoized (ready[i]) < 0)
11762 continue;
11763 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
11764 s390_sched_score (ready[i]));
11765 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
11766 PRINT_OOO_ATTR (ooo_cracked);
11767 PRINT_OOO_ATTR (ooo_expanded);
11768 PRINT_OOO_ATTR (ooo_endgroup);
11769 PRINT_OOO_ATTR (ooo_groupalone);
11770 #undef PRINT_OOO_ATTR
11771 fprintf (file, "\n");
11776 return s390_issue_rate ();
11780 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
11781 the scheduler has issued INSN. It stores the last issued insn into
11782 last_scheduled_insn in order to make it available for
11783 s390_sched_reorder. */
11784 static int
11785 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
11787 last_scheduled_insn = insn;
11789 if (s390_tune == PROCESSOR_2827_ZEC12
11790 && reload_completed
11791 && recog_memoized (insn) >= 0)
11793 unsigned int mask = s390_get_sched_attrmask (insn);
11795 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11796 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11797 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
11798 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
11799 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11800 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
11801 else
11803 /* Only normal insns are left (mask == 0). */
11804 switch (s390_sched_state)
11806 case 0:
11807 case 1:
11808 case 2:
11809 case S390_OOO_SCHED_STATE_NORMAL:
11810 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
11811 s390_sched_state = 1;
11812 else
11813 s390_sched_state++;
11815 break;
11816 case S390_OOO_SCHED_STATE_CRACKED:
11817 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
11818 break;
11821 if (verbose > 5)
11823 fprintf (file, "insn %d: ", INSN_UID (insn));
11824 #define PRINT_OOO_ATTR(ATTR) \
11825 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
11826 PRINT_OOO_ATTR (ooo_cracked);
11827 PRINT_OOO_ATTR (ooo_expanded);
11828 PRINT_OOO_ATTR (ooo_endgroup);
11829 PRINT_OOO_ATTR (ooo_groupalone);
11830 #undef PRINT_OOO_ATTR
11831 fprintf (file, "\n");
11832 fprintf (file, "sched state: %d\n", s390_sched_state);
11836 if (GET_CODE (PATTERN (insn)) != USE
11837 && GET_CODE (PATTERN (insn)) != CLOBBER)
11838 return more - 1;
11839 else
11840 return more;
11843 static void
11844 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
11845 int verbose ATTRIBUTE_UNUSED,
11846 int max_ready ATTRIBUTE_UNUSED)
11848 last_scheduled_insn = NULL;
11849 s390_sched_state = 0;
11852 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
11853 a new number struct loop *loop should be unrolled if tuned for cpus with
11854 a built-in stride prefetcher.
11855 The loop is analyzed for memory accesses by calling check_dpu for
11856 each rtx of the loop. Depending on the loop_depth and the amount of
11857 memory accesses a new number <=nunroll is returned to improve the
11858 behaviour of the hardware prefetch unit. */
11859 static unsigned
11860 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
11862 basic_block *bbs;
11863 rtx_insn *insn;
11864 unsigned i;
11865 unsigned mem_count = 0;
11867 if (s390_tune != PROCESSOR_2097_Z10
11868 && s390_tune != PROCESSOR_2817_Z196
11869 && s390_tune != PROCESSOR_2827_ZEC12)
11870 return nunroll;
11872 /* Count the number of memory references within the loop body. */
11873 bbs = get_loop_body (loop);
11874 subrtx_iterator::array_type array;
11875 for (i = 0; i < loop->num_nodes; i++)
11876 FOR_BB_INSNS (bbs[i], insn)
11877 if (INSN_P (insn) && INSN_CODE (insn) != -1)
11878 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
11879 if (MEM_P (*iter))
11880 mem_count += 1;
11881 free (bbs);
11883 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
11884 if (mem_count == 0)
11885 return nunroll;
11887 switch (loop_depth(loop))
11889 case 1:
11890 return MIN (nunroll, 28 / mem_count);
11891 case 2:
11892 return MIN (nunroll, 22 / mem_count);
11893 default:
11894 return MIN (nunroll, 16 / mem_count);
11898 static void
11899 s390_option_override (void)
11901 unsigned int i;
11902 cl_deferred_option *opt;
11903 vec<cl_deferred_option> *v =
11904 (vec<cl_deferred_option> *) s390_deferred_options;
11906 if (v)
11907 FOR_EACH_VEC_ELT (*v, i, opt)
11909 switch (opt->opt_index)
11911 case OPT_mhotpatch_:
11913 int val1;
11914 int val2;
11915 char s[256];
11916 char *t;
11918 strncpy (s, opt->arg, 256);
11919 s[255] = 0;
11920 t = strchr (s, ',');
11921 if (t != NULL)
11923 *t = 0;
11924 t++;
11925 val1 = integral_argument (s);
11926 val2 = integral_argument (t);
11928 else
11930 val1 = -1;
11931 val2 = -1;
11933 if (val1 == -1 || val2 == -1)
11935 /* argument is not a plain number */
11936 error ("arguments to %qs should be non-negative integers",
11937 "-mhotpatch=n,m");
11938 break;
11940 else if (val1 > s390_hotpatch_hw_max
11941 || val2 > s390_hotpatch_hw_max)
11943 error ("argument to %qs is too large (max. %d)",
11944 "-mhotpatch=n,m", s390_hotpatch_hw_max);
11945 break;
11947 s390_hotpatch_hw_before_label = val1;
11948 s390_hotpatch_hw_after_label = val2;
11949 break;
11951 default:
11952 gcc_unreachable ();
11956 /* Set up function hooks. */
11957 init_machine_status = s390_init_machine_status;
11959 /* Architecture mode defaults according to ABI. */
11960 if (!(target_flags_explicit & MASK_ZARCH))
11962 if (TARGET_64BIT)
11963 target_flags |= MASK_ZARCH;
11964 else
11965 target_flags &= ~MASK_ZARCH;
11968 /* Set the march default in case it hasn't been specified on
11969 cmdline. */
11970 if (s390_arch == PROCESSOR_max)
11972 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
11973 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
11974 s390_arch_flags = processor_flags_table[(int)s390_arch];
11977 /* Determine processor to tune for. */
11978 if (s390_tune == PROCESSOR_max)
11980 s390_tune = s390_arch;
11981 s390_tune_flags = s390_arch_flags;
11984 /* Sanity checks. */
11985 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
11986 error ("z/Architecture mode not supported on %s", s390_arch_string);
11987 if (TARGET_64BIT && !TARGET_ZARCH)
11988 error ("64-bit ABI not supported in ESA/390 mode");
11990 /* Use hardware DFP if available and not explicitly disabled by
11991 user. E.g. with -m31 -march=z10 -mzarch */
11992 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
11993 target_flags |= MASK_HARD_DFP;
11995 /* Enable hardware transactions if available and not explicitly
11996 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
11997 if (!(target_flags_explicit & MASK_OPT_HTM) && TARGET_CPU_HTM && TARGET_ZARCH)
11998 target_flags |= MASK_OPT_HTM;
12000 if (TARGET_HARD_DFP && !TARGET_DFP)
12002 if (target_flags_explicit & MASK_HARD_DFP)
12004 if (!TARGET_CPU_DFP)
12005 error ("hardware decimal floating point instructions"
12006 " not available on %s", s390_arch_string);
12007 if (!TARGET_ZARCH)
12008 error ("hardware decimal floating point instructions"
12009 " not available in ESA/390 mode");
12011 else
12012 target_flags &= ~MASK_HARD_DFP;
12015 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
12017 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
12018 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
12020 target_flags &= ~MASK_HARD_DFP;
12023 /* Set processor cost function. */
12024 switch (s390_tune)
12026 case PROCESSOR_2084_Z990:
12027 s390_cost = &z990_cost;
12028 break;
12029 case PROCESSOR_2094_Z9_109:
12030 s390_cost = &z9_109_cost;
12031 break;
12032 case PROCESSOR_2097_Z10:
12033 s390_cost = &z10_cost;
12034 break;
12035 case PROCESSOR_2817_Z196:
12036 s390_cost = &z196_cost;
12037 break;
12038 case PROCESSOR_2827_ZEC12:
12039 s390_cost = &zEC12_cost;
12040 break;
12041 default:
12042 s390_cost = &z900_cost;
12045 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
12046 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
12047 "in combination");
12049 if (s390_stack_size)
12051 if (s390_stack_guard >= s390_stack_size)
12052 error ("stack size must be greater than the stack guard value");
12053 else if (s390_stack_size > 1 << 16)
12054 error ("stack size must not be greater than 64k");
12056 else if (s390_stack_guard)
12057 error ("-mstack-guard implies use of -mstack-size");
12059 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
12060 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
12061 target_flags |= MASK_LONG_DOUBLE_128;
12062 #endif
12064 if (s390_tune == PROCESSOR_2097_Z10
12065 || s390_tune == PROCESSOR_2817_Z196
12066 || s390_tune == PROCESSOR_2827_ZEC12)
12068 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
12069 global_options.x_param_values,
12070 global_options_set.x_param_values);
12071 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
12072 global_options.x_param_values,
12073 global_options_set.x_param_values);
12074 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
12075 global_options.x_param_values,
12076 global_options_set.x_param_values);
12077 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
12078 global_options.x_param_values,
12079 global_options_set.x_param_values);
12082 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
12083 global_options.x_param_values,
12084 global_options_set.x_param_values);
12085 /* values for loop prefetching */
12086 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
12087 global_options.x_param_values,
12088 global_options_set.x_param_values);
12089 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
12090 global_options.x_param_values,
12091 global_options_set.x_param_values);
12092 /* s390 has more than 2 levels and the size is much larger. Since
12093 we are always running virtualized assume that we only get a small
12094 part of the caches above l1. */
12095 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
12096 global_options.x_param_values,
12097 global_options_set.x_param_values);
12098 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
12099 global_options.x_param_values,
12100 global_options_set.x_param_values);
12101 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
12102 global_options.x_param_values,
12103 global_options_set.x_param_values);
12105 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
12106 requires the arch flags to be evaluated already. Since prefetching
12107 is beneficial on s390, we enable it if available. */
12108 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
12109 flag_prefetch_loop_arrays = 1;
12111 /* Use the alternative scheduling-pressure algorithm by default. */
12112 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
12113 global_options.x_param_values,
12114 global_options_set.x_param_values);
12116 if (TARGET_TPF)
12118 /* Don't emit DWARF3/4 unless specifically selected. The TPF
12119 debuggers do not yet support DWARF 3/4. */
12120 if (!global_options_set.x_dwarf_strict)
12121 dwarf_strict = 1;
12122 if (!global_options_set.x_dwarf_version)
12123 dwarf_version = 2;
12126 /* Register a target-specific optimization-and-lowering pass
12127 to run immediately before prologue and epilogue generation.
12129 Registering the pass must be done at start up. It's
12130 convenient to do it here. */
12131 opt_pass *new_pass = new pass_s390_early_mach (g);
12132 struct register_pass_info insert_pass_s390_early_mach =
12134 new_pass, /* pass */
12135 "pro_and_epilogue", /* reference_pass_name */
12136 1, /* ref_pass_instance_number */
12137 PASS_POS_INSERT_BEFORE /* po_op */
12139 register_pass (&insert_pass_s390_early_mach);
12142 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
12144 static bool
12145 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
12146 unsigned int align ATTRIBUTE_UNUSED,
12147 enum by_pieces_operation op ATTRIBUTE_UNUSED,
12148 bool speed_p ATTRIBUTE_UNUSED)
12150 return (size == 1 || size == 2
12151 || size == 4 || (TARGET_ZARCH && size == 8));
12154 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
12156 static void
12157 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
12159 tree sfpc = s390_builtin_decls[S390_BUILTIN_S390_SFPC];
12160 tree efpc = s390_builtin_decls[S390_BUILTIN_S390_EFPC];
12161 tree call_efpc = build_call_expr (efpc, 0);
12162 tree fenv_var = create_tmp_var (unsigned_type_node);
12164 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
12165 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
12166 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
12167 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
12168 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
12169 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
12171 /* Generates the equivalent of feholdexcept (&fenv_var)
12173 fenv_var = __builtin_s390_efpc ();
12174 __builtin_s390_sfpc (fenv_var & mask) */
12175 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
12176 tree new_fpc =
12177 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
12178 build_int_cst (unsigned_type_node,
12179 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
12180 FPC_EXCEPTION_MASK)));
12181 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
12182 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
12184 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
12186 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
12187 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
12188 build_int_cst (unsigned_type_node,
12189 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
12190 *clear = build_call_expr (sfpc, 1, new_fpc);
12192 /* Generates the equivalent of feupdateenv (fenv_var)
12194 old_fpc = __builtin_s390_efpc ();
12195 __builtin_s390_sfpc (fenv_var);
12196 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
12198 old_fpc = create_tmp_var (unsigned_type_node);
12199 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
12200 old_fpc, call_efpc);
12202 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
12204 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
12205 build_int_cst (unsigned_type_node,
12206 FPC_FLAGS_MASK));
12207 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
12208 build_int_cst (unsigned_type_node,
12209 FPC_FLAGS_SHIFT));
12210 tree atomic_feraiseexcept
12211 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
12212 raise_old_except = build_call_expr (atomic_feraiseexcept,
12213 1, raise_old_except);
12215 *update = build2 (COMPOUND_EXPR, void_type_node,
12216 build2 (COMPOUND_EXPR, void_type_node,
12217 store_old_fpc, set_new_fpc),
12218 raise_old_except);
12220 #undef FPC_EXCEPTION_MASK
12221 #undef FPC_FLAGS_MASK
12222 #undef FPC_DXC_MASK
12223 #undef FPC_EXCEPTION_MASK_SHIFT
12224 #undef FPC_FLAGS_SHIFT
12225 #undef FPC_DXC_SHIFT
12228 /* Initialize GCC target structure. */
12230 #undef TARGET_ASM_ALIGNED_HI_OP
12231 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
12232 #undef TARGET_ASM_ALIGNED_DI_OP
12233 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
12234 #undef TARGET_ASM_INTEGER
12235 #define TARGET_ASM_INTEGER s390_assemble_integer
12237 #undef TARGET_ASM_OPEN_PAREN
12238 #define TARGET_ASM_OPEN_PAREN ""
12240 #undef TARGET_ASM_CLOSE_PAREN
12241 #define TARGET_ASM_CLOSE_PAREN ""
12243 #undef TARGET_OPTION_OVERRIDE
12244 #define TARGET_OPTION_OVERRIDE s390_option_override
12246 #undef TARGET_ENCODE_SECTION_INFO
12247 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
12249 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12250 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
12252 #ifdef HAVE_AS_TLS
12253 #undef TARGET_HAVE_TLS
12254 #define TARGET_HAVE_TLS true
12255 #endif
12256 #undef TARGET_CANNOT_FORCE_CONST_MEM
12257 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
12259 #undef TARGET_DELEGITIMIZE_ADDRESS
12260 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
12262 #undef TARGET_LEGITIMIZE_ADDRESS
12263 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
12265 #undef TARGET_RETURN_IN_MEMORY
12266 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
12268 #undef TARGET_INIT_BUILTINS
12269 #define TARGET_INIT_BUILTINS s390_init_builtins
12270 #undef TARGET_EXPAND_BUILTIN
12271 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
12272 #undef TARGET_BUILTIN_DECL
12273 #define TARGET_BUILTIN_DECL s390_builtin_decl
12275 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
12276 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
12278 #undef TARGET_ASM_OUTPUT_MI_THUNK
12279 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
12280 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12281 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12283 #undef TARGET_SCHED_ADJUST_PRIORITY
12284 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
12285 #undef TARGET_SCHED_ISSUE_RATE
12286 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
12287 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12288 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
12290 #undef TARGET_SCHED_VARIABLE_ISSUE
12291 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
12292 #undef TARGET_SCHED_REORDER
12293 #define TARGET_SCHED_REORDER s390_sched_reorder
12294 #undef TARGET_SCHED_INIT
12295 #define TARGET_SCHED_INIT s390_sched_init
12297 #undef TARGET_CANNOT_COPY_INSN_P
12298 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
12299 #undef TARGET_RTX_COSTS
12300 #define TARGET_RTX_COSTS s390_rtx_costs
12301 #undef TARGET_ADDRESS_COST
12302 #define TARGET_ADDRESS_COST s390_address_cost
12303 #undef TARGET_REGISTER_MOVE_COST
12304 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
12305 #undef TARGET_MEMORY_MOVE_COST
12306 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
12308 #undef TARGET_MACHINE_DEPENDENT_REORG
12309 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
12311 #undef TARGET_VALID_POINTER_MODE
12312 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
12314 #undef TARGET_BUILD_BUILTIN_VA_LIST
12315 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
12316 #undef TARGET_EXPAND_BUILTIN_VA_START
12317 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
12318 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12319 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
12321 #undef TARGET_PROMOTE_FUNCTION_MODE
12322 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
12323 #undef TARGET_PASS_BY_REFERENCE
12324 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
12326 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12327 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
12328 #undef TARGET_FUNCTION_ARG
12329 #define TARGET_FUNCTION_ARG s390_function_arg
12330 #undef TARGET_FUNCTION_ARG_ADVANCE
12331 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
12332 #undef TARGET_FUNCTION_VALUE
12333 #define TARGET_FUNCTION_VALUE s390_function_value
12334 #undef TARGET_LIBCALL_VALUE
12335 #define TARGET_LIBCALL_VALUE s390_libcall_value
12337 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
12338 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
12340 #undef TARGET_FIXED_CONDITION_CODE_REGS
12341 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
12343 #undef TARGET_CC_MODES_COMPATIBLE
12344 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
12346 #undef TARGET_INVALID_WITHIN_DOLOOP
12347 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
12349 #ifdef HAVE_AS_TLS
12350 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12351 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
12352 #endif
12354 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12355 #undef TARGET_MANGLE_TYPE
12356 #define TARGET_MANGLE_TYPE s390_mangle_type
12357 #endif
12359 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12360 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
12362 #undef TARGET_PREFERRED_RELOAD_CLASS
12363 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
12365 #undef TARGET_SECONDARY_RELOAD
12366 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
12368 #undef TARGET_LIBGCC_CMP_RETURN_MODE
12369 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
12371 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
12372 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
12374 #undef TARGET_LEGITIMATE_ADDRESS_P
12375 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
12377 #undef TARGET_LEGITIMATE_CONSTANT_P
12378 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
12380 #undef TARGET_LRA_P
12381 #define TARGET_LRA_P s390_lra_p
12383 #undef TARGET_CAN_ELIMINATE
12384 #define TARGET_CAN_ELIMINATE s390_can_eliminate
12386 #undef TARGET_CONDITIONAL_REGISTER_USAGE
12387 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
12389 #undef TARGET_LOOP_UNROLL_ADJUST
12390 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
12392 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
12393 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
12394 #undef TARGET_TRAMPOLINE_INIT
12395 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
12397 #undef TARGET_UNWIND_WORD_MODE
12398 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
12400 #undef TARGET_CANONICALIZE_COMPARISON
12401 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
12403 #undef TARGET_HARD_REGNO_SCRATCH_OK
12404 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
12406 #undef TARGET_ATTRIBUTE_TABLE
12407 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
12409 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
12410 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
12412 #undef TARGET_SET_UP_BY_PROLOGUE
12413 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
12415 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
12416 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
12417 s390_use_by_pieces_infrastructure_p
12419 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
12420 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
12422 struct gcc_target targetm = TARGET_INITIALIZER;
12424 #include "gt-s390.h"