Merge from trunk @222673.
[official-gcc.git] / gcc / config / s390 / s390.c
blobcea493f6a149757290d5f8c75d87323122905f8d
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "hash-set.h"
29 #include "machmode.h"
30 #include "vec.h"
31 #include "double-int.h"
32 #include "input.h"
33 #include "alias.h"
34 #include "symtab.h"
35 #include "wide-int.h"
36 #include "inchash.h"
37 #include "tree.h"
38 #include "fold-const.h"
39 #include "print-tree.h"
40 #include "stringpool.h"
41 #include "stor-layout.h"
42 #include "varasm.h"
43 #include "calls.h"
44 #include "tm_p.h"
45 #include "regs.h"
46 #include "hard-reg-set.h"
47 #include "insn-config.h"
48 #include "conditions.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "flags.h"
52 #include "except.h"
53 #include "function.h"
54 #include "recog.h"
55 #include "hashtab.h"
56 #include "statistics.h"
57 #include "real.h"
58 #include "fixed-value.h"
59 #include "expmed.h"
60 #include "dojump.h"
61 #include "explow.h"
62 #include "emit-rtl.h"
63 #include "stmt.h"
64 #include "expr.h"
65 #include "reload.h"
66 #include "diagnostic-core.h"
67 #include "predict.h"
68 #include "dominance.h"
69 #include "cfg.h"
70 #include "cfgrtl.h"
71 #include "cfganal.h"
72 #include "lcm.h"
73 #include "cfgbuild.h"
74 #include "cfgcleanup.h"
75 #include "basic-block.h"
76 #include "ggc.h"
77 #include "target.h"
78 #include "target-def.h"
79 #include "debug.h"
80 #include "langhooks.h"
81 #include "insn-codes.h"
82 #include "optabs.h"
83 #include "hash-table.h"
84 #include "tree-ssa-alias.h"
85 #include "internal-fn.h"
86 #include "gimple-fold.h"
87 #include "tree-eh.h"
88 #include "gimple-expr.h"
89 #include "is-a.h"
90 #include "gimple.h"
91 #include "gimplify.h"
92 #include "df.h"
93 #include "params.h"
94 #include "cfgloop.h"
95 #include "opts.h"
96 #include "tree-pass.h"
97 #include "context.h"
98 #include "builtins.h"
99 #include "rtl-iter.h"
101 /* Define the specific costs for a given cpu. */
103 struct processor_costs
105 /* multiplication */
106 const int m; /* cost of an M instruction. */
107 const int mghi; /* cost of an MGHI instruction. */
108 const int mh; /* cost of an MH instruction. */
109 const int mhi; /* cost of an MHI instruction. */
110 const int ml; /* cost of an ML instruction. */
111 const int mr; /* cost of an MR instruction. */
112 const int ms; /* cost of an MS instruction. */
113 const int msg; /* cost of an MSG instruction. */
114 const int msgf; /* cost of an MSGF instruction. */
115 const int msgfr; /* cost of an MSGFR instruction. */
116 const int msgr; /* cost of an MSGR instruction. */
117 const int msr; /* cost of an MSR instruction. */
118 const int mult_df; /* cost of multiplication in DFmode. */
119 const int mxbr;
120 /* square root */
121 const int sqxbr; /* cost of square root in TFmode. */
122 const int sqdbr; /* cost of square root in DFmode. */
123 const int sqebr; /* cost of square root in SFmode. */
124 /* multiply and add */
125 const int madbr; /* cost of multiply and add in DFmode. */
126 const int maebr; /* cost of multiply and add in SFmode. */
127 /* division */
128 const int dxbr;
129 const int ddbr;
130 const int debr;
131 const int dlgr;
132 const int dlr;
133 const int dr;
134 const int dsgfr;
135 const int dsgr;
138 const struct processor_costs *s390_cost;
140 static const
141 struct processor_costs z900_cost =
143 COSTS_N_INSNS (5), /* M */
144 COSTS_N_INSNS (10), /* MGHI */
145 COSTS_N_INSNS (5), /* MH */
146 COSTS_N_INSNS (4), /* MHI */
147 COSTS_N_INSNS (5), /* ML */
148 COSTS_N_INSNS (5), /* MR */
149 COSTS_N_INSNS (4), /* MS */
150 COSTS_N_INSNS (15), /* MSG */
151 COSTS_N_INSNS (7), /* MSGF */
152 COSTS_N_INSNS (7), /* MSGFR */
153 COSTS_N_INSNS (10), /* MSGR */
154 COSTS_N_INSNS (4), /* MSR */
155 COSTS_N_INSNS (7), /* multiplication in DFmode */
156 COSTS_N_INSNS (13), /* MXBR */
157 COSTS_N_INSNS (136), /* SQXBR */
158 COSTS_N_INSNS (44), /* SQDBR */
159 COSTS_N_INSNS (35), /* SQEBR */
160 COSTS_N_INSNS (18), /* MADBR */
161 COSTS_N_INSNS (13), /* MAEBR */
162 COSTS_N_INSNS (134), /* DXBR */
163 COSTS_N_INSNS (30), /* DDBR */
164 COSTS_N_INSNS (27), /* DEBR */
165 COSTS_N_INSNS (220), /* DLGR */
166 COSTS_N_INSNS (34), /* DLR */
167 COSTS_N_INSNS (34), /* DR */
168 COSTS_N_INSNS (32), /* DSGFR */
169 COSTS_N_INSNS (32), /* DSGR */
172 static const
173 struct processor_costs z990_cost =
175 COSTS_N_INSNS (4), /* M */
176 COSTS_N_INSNS (2), /* MGHI */
177 COSTS_N_INSNS (2), /* MH */
178 COSTS_N_INSNS (2), /* MHI */
179 COSTS_N_INSNS (4), /* ML */
180 COSTS_N_INSNS (4), /* MR */
181 COSTS_N_INSNS (5), /* MS */
182 COSTS_N_INSNS (6), /* MSG */
183 COSTS_N_INSNS (4), /* MSGF */
184 COSTS_N_INSNS (4), /* MSGFR */
185 COSTS_N_INSNS (4), /* MSGR */
186 COSTS_N_INSNS (4), /* MSR */
187 COSTS_N_INSNS (1), /* multiplication in DFmode */
188 COSTS_N_INSNS (28), /* MXBR */
189 COSTS_N_INSNS (130), /* SQXBR */
190 COSTS_N_INSNS (66), /* SQDBR */
191 COSTS_N_INSNS (38), /* SQEBR */
192 COSTS_N_INSNS (1), /* MADBR */
193 COSTS_N_INSNS (1), /* MAEBR */
194 COSTS_N_INSNS (60), /* DXBR */
195 COSTS_N_INSNS (40), /* DDBR */
196 COSTS_N_INSNS (26), /* DEBR */
197 COSTS_N_INSNS (176), /* DLGR */
198 COSTS_N_INSNS (31), /* DLR */
199 COSTS_N_INSNS (31), /* DR */
200 COSTS_N_INSNS (31), /* DSGFR */
201 COSTS_N_INSNS (31), /* DSGR */
204 static const
205 struct processor_costs z9_109_cost =
207 COSTS_N_INSNS (4), /* M */
208 COSTS_N_INSNS (2), /* MGHI */
209 COSTS_N_INSNS (2), /* MH */
210 COSTS_N_INSNS (2), /* MHI */
211 COSTS_N_INSNS (4), /* ML */
212 COSTS_N_INSNS (4), /* MR */
213 COSTS_N_INSNS (5), /* MS */
214 COSTS_N_INSNS (6), /* MSG */
215 COSTS_N_INSNS (4), /* MSGF */
216 COSTS_N_INSNS (4), /* MSGFR */
217 COSTS_N_INSNS (4), /* MSGR */
218 COSTS_N_INSNS (4), /* MSR */
219 COSTS_N_INSNS (1), /* multiplication in DFmode */
220 COSTS_N_INSNS (28), /* MXBR */
221 COSTS_N_INSNS (130), /* SQXBR */
222 COSTS_N_INSNS (66), /* SQDBR */
223 COSTS_N_INSNS (38), /* SQEBR */
224 COSTS_N_INSNS (1), /* MADBR */
225 COSTS_N_INSNS (1), /* MAEBR */
226 COSTS_N_INSNS (60), /* DXBR */
227 COSTS_N_INSNS (40), /* DDBR */
228 COSTS_N_INSNS (26), /* DEBR */
229 COSTS_N_INSNS (30), /* DLGR */
230 COSTS_N_INSNS (23), /* DLR */
231 COSTS_N_INSNS (23), /* DR */
232 COSTS_N_INSNS (24), /* DSGFR */
233 COSTS_N_INSNS (24), /* DSGR */
236 static const
237 struct processor_costs z10_cost =
239 COSTS_N_INSNS (10), /* M */
240 COSTS_N_INSNS (10), /* MGHI */
241 COSTS_N_INSNS (10), /* MH */
242 COSTS_N_INSNS (10), /* MHI */
243 COSTS_N_INSNS (10), /* ML */
244 COSTS_N_INSNS (10), /* MR */
245 COSTS_N_INSNS (10), /* MS */
246 COSTS_N_INSNS (10), /* MSG */
247 COSTS_N_INSNS (10), /* MSGF */
248 COSTS_N_INSNS (10), /* MSGFR */
249 COSTS_N_INSNS (10), /* MSGR */
250 COSTS_N_INSNS (10), /* MSR */
251 COSTS_N_INSNS (1) , /* multiplication in DFmode */
252 COSTS_N_INSNS (50), /* MXBR */
253 COSTS_N_INSNS (120), /* SQXBR */
254 COSTS_N_INSNS (52), /* SQDBR */
255 COSTS_N_INSNS (38), /* SQEBR */
256 COSTS_N_INSNS (1), /* MADBR */
257 COSTS_N_INSNS (1), /* MAEBR */
258 COSTS_N_INSNS (111), /* DXBR */
259 COSTS_N_INSNS (39), /* DDBR */
260 COSTS_N_INSNS (32), /* DEBR */
261 COSTS_N_INSNS (160), /* DLGR */
262 COSTS_N_INSNS (71), /* DLR */
263 COSTS_N_INSNS (71), /* DR */
264 COSTS_N_INSNS (71), /* DSGFR */
265 COSTS_N_INSNS (71), /* DSGR */
268 static const
269 struct processor_costs z196_cost =
271 COSTS_N_INSNS (7), /* M */
272 COSTS_N_INSNS (5), /* MGHI */
273 COSTS_N_INSNS (5), /* MH */
274 COSTS_N_INSNS (5), /* MHI */
275 COSTS_N_INSNS (7), /* ML */
276 COSTS_N_INSNS (7), /* MR */
277 COSTS_N_INSNS (6), /* MS */
278 COSTS_N_INSNS (8), /* MSG */
279 COSTS_N_INSNS (6), /* MSGF */
280 COSTS_N_INSNS (6), /* MSGFR */
281 COSTS_N_INSNS (8), /* MSGR */
282 COSTS_N_INSNS (6), /* MSR */
283 COSTS_N_INSNS (1) , /* multiplication in DFmode */
284 COSTS_N_INSNS (40), /* MXBR B+40 */
285 COSTS_N_INSNS (100), /* SQXBR B+100 */
286 COSTS_N_INSNS (42), /* SQDBR B+42 */
287 COSTS_N_INSNS (28), /* SQEBR B+28 */
288 COSTS_N_INSNS (1), /* MADBR B */
289 COSTS_N_INSNS (1), /* MAEBR B */
290 COSTS_N_INSNS (101), /* DXBR B+101 */
291 COSTS_N_INSNS (29), /* DDBR */
292 COSTS_N_INSNS (22), /* DEBR */
293 COSTS_N_INSNS (160), /* DLGR cracked */
294 COSTS_N_INSNS (160), /* DLR cracked */
295 COSTS_N_INSNS (160), /* DR expanded */
296 COSTS_N_INSNS (160), /* DSGFR cracked */
297 COSTS_N_INSNS (160), /* DSGR cracked */
300 static const
301 struct processor_costs zEC12_cost =
303 COSTS_N_INSNS (7), /* M */
304 COSTS_N_INSNS (5), /* MGHI */
305 COSTS_N_INSNS (5), /* MH */
306 COSTS_N_INSNS (5), /* MHI */
307 COSTS_N_INSNS (7), /* ML */
308 COSTS_N_INSNS (7), /* MR */
309 COSTS_N_INSNS (6), /* MS */
310 COSTS_N_INSNS (8), /* MSG */
311 COSTS_N_INSNS (6), /* MSGF */
312 COSTS_N_INSNS (6), /* MSGFR */
313 COSTS_N_INSNS (8), /* MSGR */
314 COSTS_N_INSNS (6), /* MSR */
315 COSTS_N_INSNS (1) , /* multiplication in DFmode */
316 COSTS_N_INSNS (40), /* MXBR B+40 */
317 COSTS_N_INSNS (100), /* SQXBR B+100 */
318 COSTS_N_INSNS (42), /* SQDBR B+42 */
319 COSTS_N_INSNS (28), /* SQEBR B+28 */
320 COSTS_N_INSNS (1), /* MADBR B */
321 COSTS_N_INSNS (1), /* MAEBR B */
322 COSTS_N_INSNS (131), /* DXBR B+131 */
323 COSTS_N_INSNS (29), /* DDBR */
324 COSTS_N_INSNS (22), /* DEBR */
325 COSTS_N_INSNS (160), /* DLGR cracked */
326 COSTS_N_INSNS (160), /* DLR cracked */
327 COSTS_N_INSNS (160), /* DR expanded */
328 COSTS_N_INSNS (160), /* DSGFR cracked */
329 COSTS_N_INSNS (160), /* DSGR cracked */
332 extern int reload_completed;
334 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
335 static rtx_insn *last_scheduled_insn;
337 /* Structure used to hold the components of a S/390 memory
338 address. A legitimate address on S/390 is of the general
339 form
340 base + index + displacement
341 where any of the components is optional.
343 base and index are registers of the class ADDR_REGS,
344 displacement is an unsigned 12-bit immediate constant. */
346 struct s390_address
348 rtx base;
349 rtx indx;
350 rtx disp;
351 bool pointer;
352 bool literal_pool;
355 /* The following structure is embedded in the machine
356 specific part of struct function. */
358 struct GTY (()) s390_frame_layout
360 /* Offset within stack frame. */
361 HOST_WIDE_INT gprs_offset;
362 HOST_WIDE_INT f0_offset;
363 HOST_WIDE_INT f4_offset;
364 HOST_WIDE_INT f8_offset;
365 HOST_WIDE_INT backchain_offset;
367 /* Number of first and last gpr where slots in the register
368 save area are reserved for. */
369 int first_save_gpr_slot;
370 int last_save_gpr_slot;
372 /* Location (FP register number) where GPRs (r0-r15) should
373 be saved to.
374 0 - does not need to be saved at all
375 -1 - stack slot */
376 signed char gpr_save_slots[16];
378 /* Number of first and last gpr to be saved, restored. */
379 int first_save_gpr;
380 int first_restore_gpr;
381 int last_save_gpr;
382 int last_restore_gpr;
384 /* Bits standing for floating point registers. Set, if the
385 respective register has to be saved. Starting with reg 16 (f0)
386 at the rightmost bit.
387 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
388 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
389 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
390 unsigned int fpr_bitmap;
392 /* Number of floating point registers f8-f15 which must be saved. */
393 int high_fprs;
395 /* Set if return address needs to be saved.
396 This flag is set by s390_return_addr_rtx if it could not use
397 the initial value of r14 and therefore depends on r14 saved
398 to the stack. */
399 bool save_return_addr_p;
401 /* Size of stack frame. */
402 HOST_WIDE_INT frame_size;
405 /* Define the structure for the machine field in struct function. */
407 struct GTY(()) machine_function
409 struct s390_frame_layout frame_layout;
411 /* Literal pool base register. */
412 rtx base_reg;
414 /* True if we may need to perform branch splitting. */
415 bool split_branches_pending_p;
417 bool has_landing_pad_p;
419 /* True if the current function may contain a tbegin clobbering
420 FPRs. */
421 bool tbegin_p;
424 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
426 #define cfun_frame_layout (cfun->machine->frame_layout)
427 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
428 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
429 ? cfun_frame_layout.fpr_bitmap & 0x0f \
430 : cfun_frame_layout.fpr_bitmap & 0x03))
431 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
432 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
433 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
434 (1 << (REGNO - FPR0_REGNUM)))
435 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
436 (1 << (REGNO - FPR0_REGNUM))))
437 #define cfun_gpr_save_slot(REGNO) \
438 cfun->machine->frame_layout.gpr_save_slots[REGNO]
440 /* Number of GPRs and FPRs used for argument passing. */
441 #define GP_ARG_NUM_REG 5
442 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
444 /* A couple of shortcuts. */
445 #define CONST_OK_FOR_J(x) \
446 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
447 #define CONST_OK_FOR_K(x) \
448 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
449 #define CONST_OK_FOR_Os(x) \
450 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
451 #define CONST_OK_FOR_Op(x) \
452 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
453 #define CONST_OK_FOR_On(x) \
454 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
456 #define REGNO_PAIR_OK(REGNO, MODE) \
457 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
459 /* That's the read ahead of the dynamic branch prediction unit in
460 bytes on a z10 (or higher) CPU. */
461 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
463 static const int s390_hotpatch_hw_max = 1000000;
464 static int s390_hotpatch_hw_before_label = 0;
465 static int s390_hotpatch_hw_after_label = 0;
467 /* Check whether the hotpatch attribute is applied to a function and, if it has
468 an argument, the argument is valid. */
470 static tree
471 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
472 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
474 tree expr;
475 tree expr2;
476 int err;
478 if (TREE_CODE (*node) != FUNCTION_DECL)
480 warning (OPT_Wattributes, "%qE attribute only applies to functions",
481 name);
482 *no_add_attrs = true;
484 if (args != NULL && TREE_CHAIN (args) != NULL)
486 expr = TREE_VALUE (args);
487 expr2 = TREE_VALUE (TREE_CHAIN (args));
489 if (args == NULL || TREE_CHAIN (args) == NULL)
490 err = 1;
491 else if (TREE_CODE (expr) != INTEGER_CST
492 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
493 || wi::gtu_p (expr, s390_hotpatch_hw_max))
494 err = 1;
495 else if (TREE_CODE (expr2) != INTEGER_CST
496 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
497 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
498 err = 1;
499 else
500 err = 0;
501 if (err)
503 error ("requested %qE attribute is not a comma separated pair of"
504 " non-negative integer constants or too large (max. %d)", name,
505 s390_hotpatch_hw_max);
506 *no_add_attrs = true;
509 return NULL_TREE;
512 static const struct attribute_spec s390_attribute_table[] = {
513 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false
515 /* End element. */
516 { NULL, 0, 0, false, false, false, NULL, false }
519 /* Return the alignment for LABEL. We default to the -falign-labels
520 value except for the literal pool base label. */
522 s390_label_align (rtx label)
524 rtx_insn *prev_insn = prev_active_insn (label);
525 rtx set, src;
527 if (prev_insn == NULL_RTX)
528 goto old;
530 set = single_set (prev_insn);
532 if (set == NULL_RTX)
533 goto old;
535 src = SET_SRC (set);
537 /* Don't align literal pool base labels. */
538 if (GET_CODE (src) == UNSPEC
539 && XINT (src, 1) == UNSPEC_MAIN_BASE)
540 return 0;
542 old:
543 return align_labels_log;
546 static machine_mode
547 s390_libgcc_cmp_return_mode (void)
549 return TARGET_64BIT ? DImode : SImode;
552 static machine_mode
553 s390_libgcc_shift_count_mode (void)
555 return TARGET_64BIT ? DImode : SImode;
558 static machine_mode
559 s390_unwind_word_mode (void)
561 return TARGET_64BIT ? DImode : SImode;
564 /* Return true if the back end supports mode MODE. */
565 static bool
566 s390_scalar_mode_supported_p (machine_mode mode)
568 /* In contrast to the default implementation reject TImode constants on 31bit
569 TARGET_ZARCH for ABI compliance. */
570 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
571 return false;
573 if (DECIMAL_FLOAT_MODE_P (mode))
574 return default_decimal_float_supported_p ();
576 return default_scalar_mode_supported_p (mode);
579 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
581 void
582 s390_set_has_landing_pad_p (bool value)
584 cfun->machine->has_landing_pad_p = value;
587 /* If two condition code modes are compatible, return a condition code
588 mode which is compatible with both. Otherwise, return
589 VOIDmode. */
591 static machine_mode
592 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
594 if (m1 == m2)
595 return m1;
597 switch (m1)
599 case CCZmode:
600 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
601 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
602 return m2;
603 return VOIDmode;
605 case CCSmode:
606 case CCUmode:
607 case CCTmode:
608 case CCSRmode:
609 case CCURmode:
610 case CCZ1mode:
611 if (m2 == CCZmode)
612 return m1;
614 return VOIDmode;
616 default:
617 return VOIDmode;
619 return VOIDmode;
622 /* Return true if SET either doesn't set the CC register, or else
623 the source and destination have matching CC modes and that
624 CC mode is at least as constrained as REQ_MODE. */
626 static bool
627 s390_match_ccmode_set (rtx set, machine_mode req_mode)
629 machine_mode set_mode;
631 gcc_assert (GET_CODE (set) == SET);
633 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
634 return 1;
636 set_mode = GET_MODE (SET_DEST (set));
637 switch (set_mode)
639 case CCSmode:
640 case CCSRmode:
641 case CCUmode:
642 case CCURmode:
643 case CCLmode:
644 case CCL1mode:
645 case CCL2mode:
646 case CCL3mode:
647 case CCT1mode:
648 case CCT2mode:
649 case CCT3mode:
650 if (req_mode != set_mode)
651 return 0;
652 break;
654 case CCZmode:
655 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
656 && req_mode != CCSRmode && req_mode != CCURmode)
657 return 0;
658 break;
660 case CCAPmode:
661 case CCANmode:
662 if (req_mode != CCAmode)
663 return 0;
664 break;
666 default:
667 gcc_unreachable ();
670 return (GET_MODE (SET_SRC (set)) == set_mode);
673 /* Return true if every SET in INSN that sets the CC register
674 has source and destination with matching CC modes and that
675 CC mode is at least as constrained as REQ_MODE.
676 If REQ_MODE is VOIDmode, always return false. */
678 bool
679 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
681 int i;
683 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
684 if (req_mode == VOIDmode)
685 return false;
687 if (GET_CODE (PATTERN (insn)) == SET)
688 return s390_match_ccmode_set (PATTERN (insn), req_mode);
690 if (GET_CODE (PATTERN (insn)) == PARALLEL)
691 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
693 rtx set = XVECEXP (PATTERN (insn), 0, i);
694 if (GET_CODE (set) == SET)
695 if (!s390_match_ccmode_set (set, req_mode))
696 return false;
699 return true;
702 /* If a test-under-mask instruction can be used to implement
703 (compare (and ... OP1) OP2), return the CC mode required
704 to do that. Otherwise, return VOIDmode.
705 MIXED is true if the instruction can distinguish between
706 CC1 and CC2 for mixed selected bits (TMxx), it is false
707 if the instruction cannot (TM). */
709 machine_mode
710 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
712 int bit0, bit1;
714 /* ??? Fixme: should work on CONST_DOUBLE as well. */
715 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
716 return VOIDmode;
718 /* Selected bits all zero: CC0.
719 e.g.: int a; if ((a & (16 + 128)) == 0) */
720 if (INTVAL (op2) == 0)
721 return CCTmode;
723 /* Selected bits all one: CC3.
724 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
725 if (INTVAL (op2) == INTVAL (op1))
726 return CCT3mode;
728 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
729 int a;
730 if ((a & (16 + 128)) == 16) -> CCT1
731 if ((a & (16 + 128)) == 128) -> CCT2 */
732 if (mixed)
734 bit1 = exact_log2 (INTVAL (op2));
735 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
736 if (bit0 != -1 && bit1 != -1)
737 return bit0 > bit1 ? CCT1mode : CCT2mode;
740 return VOIDmode;
743 /* Given a comparison code OP (EQ, NE, etc.) and the operands
744 OP0 and OP1 of a COMPARE, return the mode to be used for the
745 comparison. */
747 machine_mode
748 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
750 switch (code)
752 case EQ:
753 case NE:
754 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
755 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
756 return CCAPmode;
757 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
758 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
759 return CCAPmode;
760 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
761 || GET_CODE (op1) == NEG)
762 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
763 return CCLmode;
765 if (GET_CODE (op0) == AND)
767 /* Check whether we can potentially do it via TM. */
768 machine_mode ccmode;
769 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
770 if (ccmode != VOIDmode)
772 /* Relax CCTmode to CCZmode to allow fall-back to AND
773 if that turns out to be beneficial. */
774 return ccmode == CCTmode ? CCZmode : ccmode;
778 if (register_operand (op0, HImode)
779 && GET_CODE (op1) == CONST_INT
780 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
781 return CCT3mode;
782 if (register_operand (op0, QImode)
783 && GET_CODE (op1) == CONST_INT
784 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
785 return CCT3mode;
787 return CCZmode;
789 case LE:
790 case LT:
791 case GE:
792 case GT:
793 /* The only overflow condition of NEG and ABS happens when
794 -INT_MAX is used as parameter, which stays negative. So
795 we have an overflow from a positive value to a negative.
796 Using CCAP mode the resulting cc can be used for comparisons. */
797 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
798 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
799 return CCAPmode;
801 /* If constants are involved in an add instruction it is possible to use
802 the resulting cc for comparisons with zero. Knowing the sign of the
803 constant the overflow behavior gets predictable. e.g.:
804 int a, b; if ((b = a + c) > 0)
805 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
806 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
807 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
808 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
809 /* Avoid INT32_MIN on 32 bit. */
810 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
812 if (INTVAL (XEXP((op0), 1)) < 0)
813 return CCANmode;
814 else
815 return CCAPmode;
817 /* Fall through. */
818 case UNORDERED:
819 case ORDERED:
820 case UNEQ:
821 case UNLE:
822 case UNLT:
823 case UNGE:
824 case UNGT:
825 case LTGT:
826 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
827 && GET_CODE (op1) != CONST_INT)
828 return CCSRmode;
829 return CCSmode;
831 case LTU:
832 case GEU:
833 if (GET_CODE (op0) == PLUS
834 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
835 return CCL1mode;
837 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
838 && GET_CODE (op1) != CONST_INT)
839 return CCURmode;
840 return CCUmode;
842 case LEU:
843 case GTU:
844 if (GET_CODE (op0) == MINUS
845 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
846 return CCL2mode;
848 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
849 && GET_CODE (op1) != CONST_INT)
850 return CCURmode;
851 return CCUmode;
853 default:
854 gcc_unreachable ();
858 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
859 that we can implement more efficiently. */
861 static void
862 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
863 bool op0_preserve_value)
865 if (op0_preserve_value)
866 return;
868 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
869 if ((*code == EQ || *code == NE)
870 && *op1 == const0_rtx
871 && GET_CODE (*op0) == ZERO_EXTRACT
872 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
873 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
874 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
876 rtx inner = XEXP (*op0, 0);
877 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
878 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
879 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
881 if (len > 0 && len < modesize
882 && pos >= 0 && pos + len <= modesize
883 && modesize <= HOST_BITS_PER_WIDE_INT)
885 unsigned HOST_WIDE_INT block;
886 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
887 block <<= modesize - pos - len;
889 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
890 gen_int_mode (block, GET_MODE (inner)));
894 /* Narrow AND of memory against immediate to enable TM. */
895 if ((*code == EQ || *code == NE)
896 && *op1 == const0_rtx
897 && GET_CODE (*op0) == AND
898 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
899 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
901 rtx inner = XEXP (*op0, 0);
902 rtx mask = XEXP (*op0, 1);
904 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
905 if (GET_CODE (inner) == SUBREG
906 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
907 && (GET_MODE_SIZE (GET_MODE (inner))
908 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
909 && ((INTVAL (mask)
910 & GET_MODE_MASK (GET_MODE (inner))
911 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
912 == 0))
913 inner = SUBREG_REG (inner);
915 /* Do not change volatile MEMs. */
916 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
918 int part = s390_single_part (XEXP (*op0, 1),
919 GET_MODE (inner), QImode, 0);
920 if (part >= 0)
922 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
923 inner = adjust_address_nv (inner, QImode, part);
924 *op0 = gen_rtx_AND (QImode, inner, mask);
929 /* Narrow comparisons against 0xffff to HImode if possible. */
930 if ((*code == EQ || *code == NE)
931 && GET_CODE (*op1) == CONST_INT
932 && INTVAL (*op1) == 0xffff
933 && SCALAR_INT_MODE_P (GET_MODE (*op0))
934 && (nonzero_bits (*op0, GET_MODE (*op0))
935 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
937 *op0 = gen_lowpart (HImode, *op0);
938 *op1 = constm1_rtx;
941 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
942 if (GET_CODE (*op0) == UNSPEC
943 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
944 && XVECLEN (*op0, 0) == 1
945 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
946 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
947 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
948 && *op1 == const0_rtx)
950 enum rtx_code new_code = UNKNOWN;
951 switch (*code)
953 case EQ: new_code = EQ; break;
954 case NE: new_code = NE; break;
955 case LT: new_code = GTU; break;
956 case GT: new_code = LTU; break;
957 case LE: new_code = GEU; break;
958 case GE: new_code = LEU; break;
959 default: break;
962 if (new_code != UNKNOWN)
964 *op0 = XVECEXP (*op0, 0, 0);
965 *code = new_code;
969 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
970 if (GET_CODE (*op0) == UNSPEC
971 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
972 && XVECLEN (*op0, 0) == 1
973 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
974 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
975 && CONST_INT_P (*op1))
977 enum rtx_code new_code = UNKNOWN;
978 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
980 case CCZmode:
981 case CCRAWmode:
982 switch (*code)
984 case EQ: new_code = EQ; break;
985 case NE: new_code = NE; break;
986 default: break;
988 break;
989 default: break;
992 if (new_code != UNKNOWN)
994 /* For CCRAWmode put the required cc mask into the second
995 operand. */
996 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
997 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
998 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
999 *op0 = XVECEXP (*op0, 0, 0);
1000 *code = new_code;
1004 /* Simplify cascaded EQ, NE with const0_rtx. */
1005 if ((*code == NE || *code == EQ)
1006 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1007 && GET_MODE (*op0) == SImode
1008 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1009 && REG_P (XEXP (*op0, 0))
1010 && XEXP (*op0, 1) == const0_rtx
1011 && *op1 == const0_rtx)
1013 if ((*code == EQ && GET_CODE (*op0) == NE)
1014 || (*code == NE && GET_CODE (*op0) == EQ))
1015 *code = EQ;
1016 else
1017 *code = NE;
1018 *op0 = XEXP (*op0, 0);
1021 /* Prefer register over memory as first operand. */
1022 if (MEM_P (*op0) && REG_P (*op1))
1024 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1025 *code = (int)swap_condition ((enum rtx_code)*code);
1029 /* Emit a compare instruction suitable to implement the comparison
1030 OP0 CODE OP1. Return the correct condition RTL to be placed in
1031 the IF_THEN_ELSE of the conditional branch testing the result. */
1034 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1036 machine_mode mode = s390_select_ccmode (code, op0, op1);
1037 rtx cc;
1039 /* Do not output a redundant compare instruction if a compare_and_swap
1040 pattern already computed the result and the machine modes are compatible. */
1041 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1043 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1044 == GET_MODE (op0));
1045 cc = op0;
1047 else
1049 cc = gen_rtx_REG (mode, CC_REGNUM);
1050 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
1053 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1056 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1057 matches CMP.
1058 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1059 conditional branch testing the result. */
1061 static rtx
1062 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1063 rtx cmp, rtx new_rtx)
1065 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1066 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1067 const0_rtx);
1070 /* Emit a jump instruction to TARGET and return it. If COND is
1071 NULL_RTX, emit an unconditional jump, else a conditional jump under
1072 condition COND. */
1074 rtx_insn *
1075 s390_emit_jump (rtx target, rtx cond)
1077 rtx insn;
1079 target = gen_rtx_LABEL_REF (VOIDmode, target);
1080 if (cond)
1081 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1083 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
1084 return emit_jump_insn (insn);
1087 /* Return branch condition mask to implement a branch
1088 specified by CODE. Return -1 for invalid comparisons. */
1091 s390_branch_condition_mask (rtx code)
1093 const int CC0 = 1 << 3;
1094 const int CC1 = 1 << 2;
1095 const int CC2 = 1 << 1;
1096 const int CC3 = 1 << 0;
1098 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1099 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1100 gcc_assert (XEXP (code, 1) == const0_rtx
1101 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1102 && CONST_INT_P (XEXP (code, 1))));
1105 switch (GET_MODE (XEXP (code, 0)))
1107 case CCZmode:
1108 case CCZ1mode:
1109 switch (GET_CODE (code))
1111 case EQ: return CC0;
1112 case NE: return CC1 | CC2 | CC3;
1113 default: return -1;
1115 break;
1117 case CCT1mode:
1118 switch (GET_CODE (code))
1120 case EQ: return CC1;
1121 case NE: return CC0 | CC2 | CC3;
1122 default: return -1;
1124 break;
1126 case CCT2mode:
1127 switch (GET_CODE (code))
1129 case EQ: return CC2;
1130 case NE: return CC0 | CC1 | CC3;
1131 default: return -1;
1133 break;
1135 case CCT3mode:
1136 switch (GET_CODE (code))
1138 case EQ: return CC3;
1139 case NE: return CC0 | CC1 | CC2;
1140 default: return -1;
1142 break;
1144 case CCLmode:
1145 switch (GET_CODE (code))
1147 case EQ: return CC0 | CC2;
1148 case NE: return CC1 | CC3;
1149 default: return -1;
1151 break;
1153 case CCL1mode:
1154 switch (GET_CODE (code))
1156 case LTU: return CC2 | CC3; /* carry */
1157 case GEU: return CC0 | CC1; /* no carry */
1158 default: return -1;
1160 break;
1162 case CCL2mode:
1163 switch (GET_CODE (code))
1165 case GTU: return CC0 | CC1; /* borrow */
1166 case LEU: return CC2 | CC3; /* no borrow */
1167 default: return -1;
1169 break;
1171 case CCL3mode:
1172 switch (GET_CODE (code))
1174 case EQ: return CC0 | CC2;
1175 case NE: return CC1 | CC3;
1176 case LTU: return CC1;
1177 case GTU: return CC3;
1178 case LEU: return CC1 | CC2;
1179 case GEU: return CC2 | CC3;
1180 default: return -1;
1183 case CCUmode:
1184 switch (GET_CODE (code))
1186 case EQ: return CC0;
1187 case NE: return CC1 | CC2 | CC3;
1188 case LTU: return CC1;
1189 case GTU: return CC2;
1190 case LEU: return CC0 | CC1;
1191 case GEU: return CC0 | CC2;
1192 default: return -1;
1194 break;
1196 case CCURmode:
1197 switch (GET_CODE (code))
1199 case EQ: return CC0;
1200 case NE: return CC2 | CC1 | CC3;
1201 case LTU: return CC2;
1202 case GTU: return CC1;
1203 case LEU: return CC0 | CC2;
1204 case GEU: return CC0 | CC1;
1205 default: return -1;
1207 break;
1209 case CCAPmode:
1210 switch (GET_CODE (code))
1212 case EQ: return CC0;
1213 case NE: return CC1 | CC2 | CC3;
1214 case LT: return CC1 | CC3;
1215 case GT: return CC2;
1216 case LE: return CC0 | CC1 | CC3;
1217 case GE: return CC0 | CC2;
1218 default: return -1;
1220 break;
1222 case CCANmode:
1223 switch (GET_CODE (code))
1225 case EQ: return CC0;
1226 case NE: return CC1 | CC2 | CC3;
1227 case LT: return CC1;
1228 case GT: return CC2 | CC3;
1229 case LE: return CC0 | CC1;
1230 case GE: return CC0 | CC2 | CC3;
1231 default: return -1;
1233 break;
1235 case CCSmode:
1236 switch (GET_CODE (code))
1238 case EQ: return CC0;
1239 case NE: return CC1 | CC2 | CC3;
1240 case LT: return CC1;
1241 case GT: return CC2;
1242 case LE: return CC0 | CC1;
1243 case GE: return CC0 | CC2;
1244 case UNORDERED: return CC3;
1245 case ORDERED: return CC0 | CC1 | CC2;
1246 case UNEQ: return CC0 | CC3;
1247 case UNLT: return CC1 | CC3;
1248 case UNGT: return CC2 | CC3;
1249 case UNLE: return CC0 | CC1 | CC3;
1250 case UNGE: return CC0 | CC2 | CC3;
1251 case LTGT: return CC1 | CC2;
1252 default: return -1;
1254 break;
1256 case CCSRmode:
1257 switch (GET_CODE (code))
1259 case EQ: return CC0;
1260 case NE: return CC2 | CC1 | CC3;
1261 case LT: return CC2;
1262 case GT: return CC1;
1263 case LE: return CC0 | CC2;
1264 case GE: return CC0 | CC1;
1265 case UNORDERED: return CC3;
1266 case ORDERED: return CC0 | CC2 | CC1;
1267 case UNEQ: return CC0 | CC3;
1268 case UNLT: return CC2 | CC3;
1269 case UNGT: return CC1 | CC3;
1270 case UNLE: return CC0 | CC2 | CC3;
1271 case UNGE: return CC0 | CC1 | CC3;
1272 case LTGT: return CC2 | CC1;
1273 default: return -1;
1275 break;
1277 case CCRAWmode:
1278 switch (GET_CODE (code))
1280 case EQ:
1281 return INTVAL (XEXP (code, 1));
1282 case NE:
1283 return (INTVAL (XEXP (code, 1))) ^ 0xf;
1284 default:
1285 gcc_unreachable ();
1288 default:
1289 return -1;
1294 /* Return branch condition mask to implement a compare and branch
1295 specified by CODE. Return -1 for invalid comparisons. */
1298 s390_compare_and_branch_condition_mask (rtx code)
1300 const int CC0 = 1 << 3;
1301 const int CC1 = 1 << 2;
1302 const int CC2 = 1 << 1;
1304 switch (GET_CODE (code))
1306 case EQ:
1307 return CC0;
1308 case NE:
1309 return CC1 | CC2;
1310 case LT:
1311 case LTU:
1312 return CC1;
1313 case GT:
1314 case GTU:
1315 return CC2;
1316 case LE:
1317 case LEU:
1318 return CC0 | CC1;
1319 case GE:
1320 case GEU:
1321 return CC0 | CC2;
1322 default:
1323 gcc_unreachable ();
1325 return -1;
1328 /* If INV is false, return assembler mnemonic string to implement
1329 a branch specified by CODE. If INV is true, return mnemonic
1330 for the corresponding inverted branch. */
1332 static const char *
1333 s390_branch_condition_mnemonic (rtx code, int inv)
1335 int mask;
1337 static const char *const mnemonic[16] =
1339 NULL, "o", "h", "nle",
1340 "l", "nhe", "lh", "ne",
1341 "e", "nlh", "he", "nl",
1342 "le", "nh", "no", NULL
1345 if (GET_CODE (XEXP (code, 0)) == REG
1346 && REGNO (XEXP (code, 0)) == CC_REGNUM
1347 && (XEXP (code, 1) == const0_rtx
1348 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1349 && CONST_INT_P (XEXP (code, 1)))))
1350 mask = s390_branch_condition_mask (code);
1351 else
1352 mask = s390_compare_and_branch_condition_mask (code);
1354 gcc_assert (mask >= 0);
1356 if (inv)
1357 mask ^= 15;
1359 gcc_assert (mask >= 1 && mask <= 14);
1361 return mnemonic[mask];
1364 /* Return the part of op which has a value different from def.
1365 The size of the part is determined by mode.
1366 Use this function only if you already know that op really
1367 contains such a part. */
1369 unsigned HOST_WIDE_INT
1370 s390_extract_part (rtx op, machine_mode mode, int def)
1372 unsigned HOST_WIDE_INT value = 0;
1373 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1374 int part_bits = GET_MODE_BITSIZE (mode);
1375 unsigned HOST_WIDE_INT part_mask
1376 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1377 int i;
1379 for (i = 0; i < max_parts; i++)
1381 if (i == 0)
1382 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1383 else
1384 value >>= part_bits;
1386 if ((value & part_mask) != (def & part_mask))
1387 return value & part_mask;
1390 gcc_unreachable ();
1393 /* If OP is an integer constant of mode MODE with exactly one
1394 part of mode PART_MODE unequal to DEF, return the number of that
1395 part. Otherwise, return -1. */
1398 s390_single_part (rtx op,
1399 machine_mode mode,
1400 machine_mode part_mode,
1401 int def)
1403 unsigned HOST_WIDE_INT value = 0;
1404 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1405 unsigned HOST_WIDE_INT part_mask
1406 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1407 int i, part = -1;
1409 if (GET_CODE (op) != CONST_INT)
1410 return -1;
1412 for (i = 0; i < n_parts; i++)
1414 if (i == 0)
1415 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1416 else
1417 value >>= GET_MODE_BITSIZE (part_mode);
1419 if ((value & part_mask) != (def & part_mask))
1421 if (part != -1)
1422 return -1;
1423 else
1424 part = i;
1427 return part == -1 ? -1 : n_parts - 1 - part;
1430 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1431 bits and no other bits are set in IN. POS and LENGTH can be used
1432 to obtain the start position and the length of the bitfield.
1434 POS gives the position of the first bit of the bitfield counting
1435 from the lowest order bit starting with zero. In order to use this
1436 value for S/390 instructions this has to be converted to "bits big
1437 endian" style. */
1439 bool
1440 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1441 int *pos, int *length)
1443 int tmp_pos = 0;
1444 int tmp_length = 0;
1445 int i;
1446 unsigned HOST_WIDE_INT mask = 1ULL;
1447 bool contiguous = false;
1449 for (i = 0; i < size; mask <<= 1, i++)
1451 if (contiguous)
1453 if (mask & in)
1454 tmp_length++;
1455 else
1456 break;
1458 else
1460 if (mask & in)
1462 contiguous = true;
1463 tmp_length++;
1465 else
1466 tmp_pos++;
1470 if (!tmp_length)
1471 return false;
1473 /* Calculate a mask for all bits beyond the contiguous bits. */
1474 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1476 if (mask & in)
1477 return false;
1479 if (tmp_length + tmp_pos - 1 > size)
1480 return false;
1482 if (length)
1483 *length = tmp_length;
1485 if (pos)
1486 *pos = tmp_pos;
1488 return true;
1491 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1492 equivalent to a shift followed by the AND. In particular, CONTIG
1493 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1494 for ROTL indicate a rotate to the right. */
1496 bool
1497 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
1499 int pos, len;
1500 bool ok;
1502 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
1503 gcc_assert (ok);
1505 return ((rotl >= 0 && rotl <= pos)
1506 || (rotl < 0 && -rotl <= bitsize - len - pos));
1509 /* Check whether we can (and want to) split a double-word
1510 move in mode MODE from SRC to DST into two single-word
1511 moves, moving the subword FIRST_SUBWORD first. */
1513 bool
1514 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
1516 /* Floating point registers cannot be split. */
1517 if (FP_REG_P (src) || FP_REG_P (dst))
1518 return false;
1520 /* We don't need to split if operands are directly accessible. */
1521 if (s_operand (src, mode) || s_operand (dst, mode))
1522 return false;
1524 /* Non-offsettable memory references cannot be split. */
1525 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1526 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1527 return false;
1529 /* Moving the first subword must not clobber a register
1530 needed to move the second subword. */
1531 if (register_operand (dst, mode))
1533 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1534 if (reg_overlap_mentioned_p (subreg, src))
1535 return false;
1538 return true;
1541 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1542 and [MEM2, MEM2 + SIZE] do overlap and false
1543 otherwise. */
1545 bool
1546 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1548 rtx addr1, addr2, addr_delta;
1549 HOST_WIDE_INT delta;
1551 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1552 return true;
1554 if (size == 0)
1555 return false;
1557 addr1 = XEXP (mem1, 0);
1558 addr2 = XEXP (mem2, 0);
1560 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1562 /* This overlapping check is used by peepholes merging memory block operations.
1563 Overlapping operations would otherwise be recognized by the S/390 hardware
1564 and would fall back to a slower implementation. Allowing overlapping
1565 operations would lead to slow code but not to wrong code. Therefore we are
1566 somewhat optimistic if we cannot prove that the memory blocks are
1567 overlapping.
1568 That's why we return false here although this may accept operations on
1569 overlapping memory areas. */
1570 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1571 return false;
1573 delta = INTVAL (addr_delta);
1575 if (delta == 0
1576 || (delta > 0 && delta < size)
1577 || (delta < 0 && -delta < size))
1578 return true;
1580 return false;
1583 /* Check whether the address of memory reference MEM2 equals exactly
1584 the address of memory reference MEM1 plus DELTA. Return true if
1585 we can prove this to be the case, false otherwise. */
1587 bool
1588 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1590 rtx addr1, addr2, addr_delta;
1592 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1593 return false;
1595 addr1 = XEXP (mem1, 0);
1596 addr2 = XEXP (mem2, 0);
1598 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1599 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1600 return false;
1602 return true;
1605 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1607 void
1608 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
1609 rtx *operands)
1611 machine_mode wmode = mode;
1612 rtx dst = operands[0];
1613 rtx src1 = operands[1];
1614 rtx src2 = operands[2];
1615 rtx op, clob, tem;
1617 /* If we cannot handle the operation directly, use a temp register. */
1618 if (!s390_logical_operator_ok_p (operands))
1619 dst = gen_reg_rtx (mode);
1621 /* QImode and HImode patterns make sense only if we have a destination
1622 in memory. Otherwise perform the operation in SImode. */
1623 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1624 wmode = SImode;
1626 /* Widen operands if required. */
1627 if (mode != wmode)
1629 if (GET_CODE (dst) == SUBREG
1630 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1631 dst = tem;
1632 else if (REG_P (dst))
1633 dst = gen_rtx_SUBREG (wmode, dst, 0);
1634 else
1635 dst = gen_reg_rtx (wmode);
1637 if (GET_CODE (src1) == SUBREG
1638 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1639 src1 = tem;
1640 else if (GET_MODE (src1) != VOIDmode)
1641 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1643 if (GET_CODE (src2) == SUBREG
1644 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1645 src2 = tem;
1646 else if (GET_MODE (src2) != VOIDmode)
1647 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1650 /* Emit the instruction. */
1651 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1652 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1653 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1655 /* Fix up the destination if needed. */
1656 if (dst != operands[0])
1657 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1660 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1662 bool
1663 s390_logical_operator_ok_p (rtx *operands)
1665 /* If the destination operand is in memory, it needs to coincide
1666 with one of the source operands. After reload, it has to be
1667 the first source operand. */
1668 if (GET_CODE (operands[0]) == MEM)
1669 return rtx_equal_p (operands[0], operands[1])
1670 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1672 return true;
1675 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1676 operand IMMOP to switch from SS to SI type instructions. */
1678 void
1679 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1681 int def = code == AND ? -1 : 0;
1682 HOST_WIDE_INT mask;
1683 int part;
1685 gcc_assert (GET_CODE (*memop) == MEM);
1686 gcc_assert (!MEM_VOLATILE_P (*memop));
1688 mask = s390_extract_part (*immop, QImode, def);
1689 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1690 gcc_assert (part >= 0);
1692 *memop = adjust_address (*memop, QImode, part);
1693 *immop = gen_int_mode (mask, QImode);
1697 /* How to allocate a 'struct machine_function'. */
1699 static struct machine_function *
1700 s390_init_machine_status (void)
1702 return ggc_cleared_alloc<machine_function> ();
1705 /* Map for smallest class containing reg regno. */
1707 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1708 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1709 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1710 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1711 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1712 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1713 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1714 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1715 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1716 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1717 ACCESS_REGS, ACCESS_REGS
1720 /* Return attribute type of insn. */
1722 static enum attr_type
1723 s390_safe_attr_type (rtx_insn *insn)
1725 if (recog_memoized (insn) >= 0)
1726 return get_attr_type (insn);
1727 else
1728 return TYPE_NONE;
1731 /* Return true if DISP is a valid short displacement. */
1733 static bool
1734 s390_short_displacement (rtx disp)
1736 /* No displacement is OK. */
1737 if (!disp)
1738 return true;
1740 /* Without the long displacement facility we don't need to
1741 distingiush between long and short displacement. */
1742 if (!TARGET_LONG_DISPLACEMENT)
1743 return true;
1745 /* Integer displacement in range. */
1746 if (GET_CODE (disp) == CONST_INT)
1747 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1749 /* GOT offset is not OK, the GOT can be large. */
1750 if (GET_CODE (disp) == CONST
1751 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1752 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1753 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1754 return false;
1756 /* All other symbolic constants are literal pool references,
1757 which are OK as the literal pool must be small. */
1758 if (GET_CODE (disp) == CONST)
1759 return true;
1761 return false;
1764 /* Decompose a RTL expression ADDR for a memory address into
1765 its components, returned in OUT.
1767 Returns false if ADDR is not a valid memory address, true
1768 otherwise. If OUT is NULL, don't return the components,
1769 but check for validity only.
1771 Note: Only addresses in canonical form are recognized.
1772 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1773 canonical form so that they will be recognized. */
1775 static int
1776 s390_decompose_address (rtx addr, struct s390_address *out)
1778 HOST_WIDE_INT offset = 0;
1779 rtx base = NULL_RTX;
1780 rtx indx = NULL_RTX;
1781 rtx disp = NULL_RTX;
1782 rtx orig_disp;
1783 bool pointer = false;
1784 bool base_ptr = false;
1785 bool indx_ptr = false;
1786 bool literal_pool = false;
1788 /* We may need to substitute the literal pool base register into the address
1789 below. However, at this point we do not know which register is going to
1790 be used as base, so we substitute the arg pointer register. This is going
1791 to be treated as holding a pointer below -- it shouldn't be used for any
1792 other purpose. */
1793 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1795 /* Decompose address into base + index + displacement. */
1797 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1798 base = addr;
1800 else if (GET_CODE (addr) == PLUS)
1802 rtx op0 = XEXP (addr, 0);
1803 rtx op1 = XEXP (addr, 1);
1804 enum rtx_code code0 = GET_CODE (op0);
1805 enum rtx_code code1 = GET_CODE (op1);
1807 if (code0 == REG || code0 == UNSPEC)
1809 if (code1 == REG || code1 == UNSPEC)
1811 indx = op0; /* index + base */
1812 base = op1;
1815 else
1817 base = op0; /* base + displacement */
1818 disp = op1;
1822 else if (code0 == PLUS)
1824 indx = XEXP (op0, 0); /* index + base + disp */
1825 base = XEXP (op0, 1);
1826 disp = op1;
1829 else
1831 return false;
1835 else
1836 disp = addr; /* displacement */
1838 /* Extract integer part of displacement. */
1839 orig_disp = disp;
1840 if (disp)
1842 if (GET_CODE (disp) == CONST_INT)
1844 offset = INTVAL (disp);
1845 disp = NULL_RTX;
1847 else if (GET_CODE (disp) == CONST
1848 && GET_CODE (XEXP (disp, 0)) == PLUS
1849 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1851 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1852 disp = XEXP (XEXP (disp, 0), 0);
1856 /* Strip off CONST here to avoid special case tests later. */
1857 if (disp && GET_CODE (disp) == CONST)
1858 disp = XEXP (disp, 0);
1860 /* We can convert literal pool addresses to
1861 displacements by basing them off the base register. */
1862 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1864 /* Either base or index must be free to hold the base register. */
1865 if (!base)
1866 base = fake_pool_base, literal_pool = true;
1867 else if (!indx)
1868 indx = fake_pool_base, literal_pool = true;
1869 else
1870 return false;
1872 /* Mark up the displacement. */
1873 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1874 UNSPEC_LTREL_OFFSET);
1877 /* Validate base register. */
1878 if (base)
1880 if (GET_CODE (base) == UNSPEC)
1881 switch (XINT (base, 1))
1883 case UNSPEC_LTREF:
1884 if (!disp)
1885 disp = gen_rtx_UNSPEC (Pmode,
1886 gen_rtvec (1, XVECEXP (base, 0, 0)),
1887 UNSPEC_LTREL_OFFSET);
1888 else
1889 return false;
1891 base = XVECEXP (base, 0, 1);
1892 break;
1894 case UNSPEC_LTREL_BASE:
1895 if (XVECLEN (base, 0) == 1)
1896 base = fake_pool_base, literal_pool = true;
1897 else
1898 base = XVECEXP (base, 0, 1);
1899 break;
1901 default:
1902 return false;
1905 if (!REG_P (base)
1906 || (GET_MODE (base) != SImode
1907 && GET_MODE (base) != Pmode))
1908 return false;
1910 if (REGNO (base) == STACK_POINTER_REGNUM
1911 || REGNO (base) == FRAME_POINTER_REGNUM
1912 || ((reload_completed || reload_in_progress)
1913 && frame_pointer_needed
1914 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1915 || REGNO (base) == ARG_POINTER_REGNUM
1916 || (flag_pic
1917 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1918 pointer = base_ptr = true;
1920 if ((reload_completed || reload_in_progress)
1921 && base == cfun->machine->base_reg)
1922 pointer = base_ptr = literal_pool = true;
1925 /* Validate index register. */
1926 if (indx)
1928 if (GET_CODE (indx) == UNSPEC)
1929 switch (XINT (indx, 1))
1931 case UNSPEC_LTREF:
1932 if (!disp)
1933 disp = gen_rtx_UNSPEC (Pmode,
1934 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1935 UNSPEC_LTREL_OFFSET);
1936 else
1937 return false;
1939 indx = XVECEXP (indx, 0, 1);
1940 break;
1942 case UNSPEC_LTREL_BASE:
1943 if (XVECLEN (indx, 0) == 1)
1944 indx = fake_pool_base, literal_pool = true;
1945 else
1946 indx = XVECEXP (indx, 0, 1);
1947 break;
1949 default:
1950 return false;
1953 if (!REG_P (indx)
1954 || (GET_MODE (indx) != SImode
1955 && GET_MODE (indx) != Pmode))
1956 return false;
1958 if (REGNO (indx) == STACK_POINTER_REGNUM
1959 || REGNO (indx) == FRAME_POINTER_REGNUM
1960 || ((reload_completed || reload_in_progress)
1961 && frame_pointer_needed
1962 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1963 || REGNO (indx) == ARG_POINTER_REGNUM
1964 || (flag_pic
1965 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1966 pointer = indx_ptr = true;
1968 if ((reload_completed || reload_in_progress)
1969 && indx == cfun->machine->base_reg)
1970 pointer = indx_ptr = literal_pool = true;
1973 /* Prefer to use pointer as base, not index. */
1974 if (base && indx && !base_ptr
1975 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1977 rtx tmp = base;
1978 base = indx;
1979 indx = tmp;
1982 /* Validate displacement. */
1983 if (!disp)
1985 /* If virtual registers are involved, the displacement will change later
1986 anyway as the virtual registers get eliminated. This could make a
1987 valid displacement invalid, but it is more likely to make an invalid
1988 displacement valid, because we sometimes access the register save area
1989 via negative offsets to one of those registers.
1990 Thus we don't check the displacement for validity here. If after
1991 elimination the displacement turns out to be invalid after all,
1992 this is fixed up by reload in any case. */
1993 /* LRA maintains always displacements up to date and we need to
1994 know the displacement is right during all LRA not only at the
1995 final elimination. */
1996 if (lra_in_progress
1997 || (base != arg_pointer_rtx
1998 && indx != arg_pointer_rtx
1999 && base != return_address_pointer_rtx
2000 && indx != return_address_pointer_rtx
2001 && base != frame_pointer_rtx
2002 && indx != frame_pointer_rtx
2003 && base != virtual_stack_vars_rtx
2004 && indx != virtual_stack_vars_rtx))
2005 if (!DISP_IN_RANGE (offset))
2006 return false;
2008 else
2010 /* All the special cases are pointers. */
2011 pointer = true;
2013 /* In the small-PIC case, the linker converts @GOT
2014 and @GOTNTPOFF offsets to possible displacements. */
2015 if (GET_CODE (disp) == UNSPEC
2016 && (XINT (disp, 1) == UNSPEC_GOT
2017 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2018 && flag_pic == 1)
2023 /* Accept pool label offsets. */
2024 else if (GET_CODE (disp) == UNSPEC
2025 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2028 /* Accept literal pool references. */
2029 else if (GET_CODE (disp) == UNSPEC
2030 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2032 /* In case CSE pulled a non literal pool reference out of
2033 the pool we have to reject the address. This is
2034 especially important when loading the GOT pointer on non
2035 zarch CPUs. In this case the literal pool contains an lt
2036 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2037 will most likely exceed the displacement. */
2038 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2039 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2040 return false;
2042 orig_disp = gen_rtx_CONST (Pmode, disp);
2043 if (offset)
2045 /* If we have an offset, make sure it does not
2046 exceed the size of the constant pool entry. */
2047 rtx sym = XVECEXP (disp, 0, 0);
2048 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2049 return false;
2051 orig_disp = plus_constant (Pmode, orig_disp, offset);
2055 else
2056 return false;
2059 if (!base && !indx)
2060 pointer = true;
2062 if (out)
2064 out->base = base;
2065 out->indx = indx;
2066 out->disp = orig_disp;
2067 out->pointer = pointer;
2068 out->literal_pool = literal_pool;
2071 return true;
2074 /* Decompose a RTL expression OP for a shift count into its components,
2075 and return the base register in BASE and the offset in OFFSET.
2077 Return true if OP is a valid shift count, false if not. */
2079 bool
2080 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2082 HOST_WIDE_INT off = 0;
2084 /* We can have an integer constant, an address register,
2085 or a sum of the two. */
2086 if (GET_CODE (op) == CONST_INT)
2088 off = INTVAL (op);
2089 op = NULL_RTX;
2091 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2093 off = INTVAL (XEXP (op, 1));
2094 op = XEXP (op, 0);
2096 while (op && GET_CODE (op) == SUBREG)
2097 op = SUBREG_REG (op);
2099 if (op && GET_CODE (op) != REG)
2100 return false;
2102 if (offset)
2103 *offset = off;
2104 if (base)
2105 *base = op;
2107 return true;
2111 /* Return true if CODE is a valid address without index. */
2113 bool
2114 s390_legitimate_address_without_index_p (rtx op)
2116 struct s390_address addr;
2118 if (!s390_decompose_address (XEXP (op, 0), &addr))
2119 return false;
2120 if (addr.indx)
2121 return false;
2123 return true;
2127 /* Return TRUE if ADDR is an operand valid for a load/store relative
2128 instruction. Be aware that the alignment of the operand needs to
2129 be checked separately.
2130 Valid addresses are single references or a sum of a reference and a
2131 constant integer. Return these parts in SYMREF and ADDEND. You can
2132 pass NULL in REF and/or ADDEND if you are not interested in these
2133 values. Literal pool references are *not* considered symbol
2134 references. */
2136 static bool
2137 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2139 HOST_WIDE_INT tmpaddend = 0;
2141 if (GET_CODE (addr) == CONST)
2142 addr = XEXP (addr, 0);
2144 if (GET_CODE (addr) == PLUS)
2146 if (!CONST_INT_P (XEXP (addr, 1)))
2147 return false;
2149 tmpaddend = INTVAL (XEXP (addr, 1));
2150 addr = XEXP (addr, 0);
2153 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2154 || (GET_CODE (addr) == UNSPEC
2155 && (XINT (addr, 1) == UNSPEC_GOTENT
2156 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2158 if (symref)
2159 *symref = addr;
2160 if (addend)
2161 *addend = tmpaddend;
2163 return true;
2165 return false;
2168 /* Return true if the address in OP is valid for constraint letter C
2169 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2170 pool MEMs should be accepted. Only the Q, R, S, T constraint
2171 letters are allowed for C. */
2173 static int
2174 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2176 struct s390_address addr;
2177 bool decomposed = false;
2179 /* This check makes sure that no symbolic address (except literal
2180 pool references) are accepted by the R or T constraints. */
2181 if (s390_loadrelative_operand_p (op, NULL, NULL))
2182 return 0;
2184 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2185 if (!lit_pool_ok)
2187 if (!s390_decompose_address (op, &addr))
2188 return 0;
2189 if (addr.literal_pool)
2190 return 0;
2191 decomposed = true;
2194 switch (c)
2196 case 'Q': /* no index short displacement */
2197 if (!decomposed && !s390_decompose_address (op, &addr))
2198 return 0;
2199 if (addr.indx)
2200 return 0;
2201 if (!s390_short_displacement (addr.disp))
2202 return 0;
2203 break;
2205 case 'R': /* with index short displacement */
2206 if (TARGET_LONG_DISPLACEMENT)
2208 if (!decomposed && !s390_decompose_address (op, &addr))
2209 return 0;
2210 if (!s390_short_displacement (addr.disp))
2211 return 0;
2213 /* Any invalid address here will be fixed up by reload,
2214 so accept it for the most generic constraint. */
2215 break;
2217 case 'S': /* no index long displacement */
2218 if (!TARGET_LONG_DISPLACEMENT)
2219 return 0;
2220 if (!decomposed && !s390_decompose_address (op, &addr))
2221 return 0;
2222 if (addr.indx)
2223 return 0;
2224 if (s390_short_displacement (addr.disp))
2225 return 0;
2226 break;
2228 case 'T': /* with index long displacement */
2229 if (!TARGET_LONG_DISPLACEMENT)
2230 return 0;
2231 /* Any invalid address here will be fixed up by reload,
2232 so accept it for the most generic constraint. */
2233 if ((decomposed || s390_decompose_address (op, &addr))
2234 && s390_short_displacement (addr.disp))
2235 return 0;
2236 break;
2237 default:
2238 return 0;
2240 return 1;
2244 /* Evaluates constraint strings described by the regular expression
2245 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2246 the constraint given in STR, or 0 else. */
2249 s390_mem_constraint (const char *str, rtx op)
2251 char c = str[0];
2253 switch (c)
2255 case 'A':
2256 /* Check for offsettable variants of memory constraints. */
2257 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2258 return 0;
2259 if ((reload_completed || reload_in_progress)
2260 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2261 return 0;
2262 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2263 case 'B':
2264 /* Check for non-literal-pool variants of memory constraints. */
2265 if (!MEM_P (op))
2266 return 0;
2267 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2268 case 'Q':
2269 case 'R':
2270 case 'S':
2271 case 'T':
2272 if (GET_CODE (op) != MEM)
2273 return 0;
2274 return s390_check_qrst_address (c, XEXP (op, 0), true);
2275 case 'U':
2276 return (s390_check_qrst_address ('Q', op, true)
2277 || s390_check_qrst_address ('R', op, true));
2278 case 'W':
2279 return (s390_check_qrst_address ('S', op, true)
2280 || s390_check_qrst_address ('T', op, true));
2281 case 'Y':
2282 /* Simply check for the basic form of a shift count. Reload will
2283 take care of making sure we have a proper base register. */
2284 if (!s390_decompose_shift_count (op, NULL, NULL))
2285 return 0;
2286 break;
2287 case 'Z':
2288 return s390_check_qrst_address (str[1], op, true);
2289 default:
2290 return 0;
2292 return 1;
2296 /* Evaluates constraint strings starting with letter O. Input
2297 parameter C is the second letter following the "O" in the constraint
2298 string. Returns 1 if VALUE meets the respective constraint and 0
2299 otherwise. */
2302 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2304 if (!TARGET_EXTIMM)
2305 return 0;
2307 switch (c)
2309 case 's':
2310 return trunc_int_for_mode (value, SImode) == value;
2312 case 'p':
2313 return value == 0
2314 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2316 case 'n':
2317 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2319 default:
2320 gcc_unreachable ();
2325 /* Evaluates constraint strings starting with letter N. Parameter STR
2326 contains the letters following letter "N" in the constraint string.
2327 Returns true if VALUE matches the constraint. */
2330 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2332 machine_mode mode, part_mode;
2333 int def;
2334 int part, part_goal;
2337 if (str[0] == 'x')
2338 part_goal = -1;
2339 else
2340 part_goal = str[0] - '0';
2342 switch (str[1])
2344 case 'Q':
2345 part_mode = QImode;
2346 break;
2347 case 'H':
2348 part_mode = HImode;
2349 break;
2350 case 'S':
2351 part_mode = SImode;
2352 break;
2353 default:
2354 return 0;
2357 switch (str[2])
2359 case 'H':
2360 mode = HImode;
2361 break;
2362 case 'S':
2363 mode = SImode;
2364 break;
2365 case 'D':
2366 mode = DImode;
2367 break;
2368 default:
2369 return 0;
2372 switch (str[3])
2374 case '0':
2375 def = 0;
2376 break;
2377 case 'F':
2378 def = -1;
2379 break;
2380 default:
2381 return 0;
2384 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2385 return 0;
2387 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2388 if (part < 0)
2389 return 0;
2390 if (part_goal != -1 && part_goal != part)
2391 return 0;
2393 return 1;
2397 /* Returns true if the input parameter VALUE is a float zero. */
2400 s390_float_const_zero_p (rtx value)
2402 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2403 && value == CONST0_RTX (GET_MODE (value)));
2406 /* Implement TARGET_REGISTER_MOVE_COST. */
2408 static int
2409 s390_register_move_cost (machine_mode mode,
2410 reg_class_t from, reg_class_t to)
2412 /* On s390, copy between fprs and gprs is expensive. */
2414 /* It becomes somewhat faster having ldgr/lgdr. */
2415 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
2417 /* ldgr is single cycle. */
2418 if (reg_classes_intersect_p (from, GENERAL_REGS)
2419 && reg_classes_intersect_p (to, FP_REGS))
2420 return 1;
2421 /* lgdr needs 3 cycles. */
2422 if (reg_classes_intersect_p (to, GENERAL_REGS)
2423 && reg_classes_intersect_p (from, FP_REGS))
2424 return 3;
2427 /* Otherwise copying is done via memory. */
2428 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2429 && reg_classes_intersect_p (to, FP_REGS))
2430 || (reg_classes_intersect_p (from, FP_REGS)
2431 && reg_classes_intersect_p (to, GENERAL_REGS)))
2432 return 10;
2434 return 1;
2437 /* Implement TARGET_MEMORY_MOVE_COST. */
2439 static int
2440 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2441 reg_class_t rclass ATTRIBUTE_UNUSED,
2442 bool in ATTRIBUTE_UNUSED)
2444 return 2;
2447 /* Compute a (partial) cost for rtx X. Return true if the complete
2448 cost has been computed, and false if subexpressions should be
2449 scanned. In either case, *TOTAL contains the cost result.
2450 CODE contains GET_CODE (x), OUTER_CODE contains the code
2451 of the superexpression of x. */
2453 static bool
2454 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2455 int *total, bool speed ATTRIBUTE_UNUSED)
2457 switch (code)
2459 case CONST:
2460 case CONST_INT:
2461 case LABEL_REF:
2462 case SYMBOL_REF:
2463 case CONST_DOUBLE:
2464 case MEM:
2465 *total = 0;
2466 return true;
2468 case ASHIFT:
2469 case ASHIFTRT:
2470 case LSHIFTRT:
2471 case ROTATE:
2472 case ROTATERT:
2473 case AND:
2474 case IOR:
2475 case XOR:
2476 case NEG:
2477 case NOT:
2478 *total = COSTS_N_INSNS (1);
2479 return false;
2481 case PLUS:
2482 case MINUS:
2483 *total = COSTS_N_INSNS (1);
2484 return false;
2486 case MULT:
2487 switch (GET_MODE (x))
2489 case SImode:
2491 rtx left = XEXP (x, 0);
2492 rtx right = XEXP (x, 1);
2493 if (GET_CODE (right) == CONST_INT
2494 && CONST_OK_FOR_K (INTVAL (right)))
2495 *total = s390_cost->mhi;
2496 else if (GET_CODE (left) == SIGN_EXTEND)
2497 *total = s390_cost->mh;
2498 else
2499 *total = s390_cost->ms; /* msr, ms, msy */
2500 break;
2502 case DImode:
2504 rtx left = XEXP (x, 0);
2505 rtx right = XEXP (x, 1);
2506 if (TARGET_ZARCH)
2508 if (GET_CODE (right) == CONST_INT
2509 && CONST_OK_FOR_K (INTVAL (right)))
2510 *total = s390_cost->mghi;
2511 else if (GET_CODE (left) == SIGN_EXTEND)
2512 *total = s390_cost->msgf;
2513 else
2514 *total = s390_cost->msg; /* msgr, msg */
2516 else /* TARGET_31BIT */
2518 if (GET_CODE (left) == SIGN_EXTEND
2519 && GET_CODE (right) == SIGN_EXTEND)
2520 /* mulsidi case: mr, m */
2521 *total = s390_cost->m;
2522 else if (GET_CODE (left) == ZERO_EXTEND
2523 && GET_CODE (right) == ZERO_EXTEND
2524 && TARGET_CPU_ZARCH)
2525 /* umulsidi case: ml, mlr */
2526 *total = s390_cost->ml;
2527 else
2528 /* Complex calculation is required. */
2529 *total = COSTS_N_INSNS (40);
2531 break;
2533 case SFmode:
2534 case DFmode:
2535 *total = s390_cost->mult_df;
2536 break;
2537 case TFmode:
2538 *total = s390_cost->mxbr;
2539 break;
2540 default:
2541 return false;
2543 return false;
2545 case FMA:
2546 switch (GET_MODE (x))
2548 case DFmode:
2549 *total = s390_cost->madbr;
2550 break;
2551 case SFmode:
2552 *total = s390_cost->maebr;
2553 break;
2554 default:
2555 return false;
2557 /* Negate in the third argument is free: FMSUB. */
2558 if (GET_CODE (XEXP (x, 2)) == NEG)
2560 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2561 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2562 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2563 return true;
2565 return false;
2567 case UDIV:
2568 case UMOD:
2569 if (GET_MODE (x) == TImode) /* 128 bit division */
2570 *total = s390_cost->dlgr;
2571 else if (GET_MODE (x) == DImode)
2573 rtx right = XEXP (x, 1);
2574 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2575 *total = s390_cost->dlr;
2576 else /* 64 by 64 bit division */
2577 *total = s390_cost->dlgr;
2579 else if (GET_MODE (x) == SImode) /* 32 bit division */
2580 *total = s390_cost->dlr;
2581 return false;
2583 case DIV:
2584 case MOD:
2585 if (GET_MODE (x) == DImode)
2587 rtx right = XEXP (x, 1);
2588 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2589 if (TARGET_ZARCH)
2590 *total = s390_cost->dsgfr;
2591 else
2592 *total = s390_cost->dr;
2593 else /* 64 by 64 bit division */
2594 *total = s390_cost->dsgr;
2596 else if (GET_MODE (x) == SImode) /* 32 bit division */
2597 *total = s390_cost->dlr;
2598 else if (GET_MODE (x) == SFmode)
2600 *total = s390_cost->debr;
2602 else if (GET_MODE (x) == DFmode)
2604 *total = s390_cost->ddbr;
2606 else if (GET_MODE (x) == TFmode)
2608 *total = s390_cost->dxbr;
2610 return false;
2612 case SQRT:
2613 if (GET_MODE (x) == SFmode)
2614 *total = s390_cost->sqebr;
2615 else if (GET_MODE (x) == DFmode)
2616 *total = s390_cost->sqdbr;
2617 else /* TFmode */
2618 *total = s390_cost->sqxbr;
2619 return false;
2621 case SIGN_EXTEND:
2622 case ZERO_EXTEND:
2623 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2624 || outer_code == PLUS || outer_code == MINUS
2625 || outer_code == COMPARE)
2626 *total = 0;
2627 return false;
2629 case COMPARE:
2630 *total = COSTS_N_INSNS (1);
2631 if (GET_CODE (XEXP (x, 0)) == AND
2632 && GET_CODE (XEXP (x, 1)) == CONST_INT
2633 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2635 rtx op0 = XEXP (XEXP (x, 0), 0);
2636 rtx op1 = XEXP (XEXP (x, 0), 1);
2637 rtx op2 = XEXP (x, 1);
2639 if (memory_operand (op0, GET_MODE (op0))
2640 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2641 return true;
2642 if (register_operand (op0, GET_MODE (op0))
2643 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2644 return true;
2646 return false;
2648 default:
2649 return false;
2653 /* Return the cost of an address rtx ADDR. */
2655 static int
2656 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2657 addr_space_t as ATTRIBUTE_UNUSED,
2658 bool speed ATTRIBUTE_UNUSED)
2660 struct s390_address ad;
2661 if (!s390_decompose_address (addr, &ad))
2662 return 1000;
2664 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2667 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2668 otherwise return 0. */
2671 tls_symbolic_operand (rtx op)
2673 if (GET_CODE (op) != SYMBOL_REF)
2674 return 0;
2675 return SYMBOL_REF_TLS_MODEL (op);
2678 /* Split DImode access register reference REG (on 64-bit) into its constituent
2679 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2680 gen_highpart cannot be used as they assume all registers are word-sized,
2681 while our access registers have only half that size. */
2683 void
2684 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2686 gcc_assert (TARGET_64BIT);
2687 gcc_assert (ACCESS_REG_P (reg));
2688 gcc_assert (GET_MODE (reg) == DImode);
2689 gcc_assert (!(REGNO (reg) & 1));
2691 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2692 *hi = gen_rtx_REG (SImode, REGNO (reg));
2695 /* Return true if OP contains a symbol reference */
2697 bool
2698 symbolic_reference_mentioned_p (rtx op)
2700 const char *fmt;
2701 int i;
2703 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2704 return 1;
2706 fmt = GET_RTX_FORMAT (GET_CODE (op));
2707 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2709 if (fmt[i] == 'E')
2711 int j;
2713 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2714 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2715 return 1;
2718 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2719 return 1;
2722 return 0;
2725 /* Return true if OP contains a reference to a thread-local symbol. */
2727 bool
2728 tls_symbolic_reference_mentioned_p (rtx op)
2730 const char *fmt;
2731 int i;
2733 if (GET_CODE (op) == SYMBOL_REF)
2734 return tls_symbolic_operand (op);
2736 fmt = GET_RTX_FORMAT (GET_CODE (op));
2737 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2739 if (fmt[i] == 'E')
2741 int j;
2743 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2744 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2745 return true;
2748 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2749 return true;
2752 return false;
2756 /* Return true if OP is a legitimate general operand when
2757 generating PIC code. It is given that flag_pic is on
2758 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2761 legitimate_pic_operand_p (rtx op)
2763 /* Accept all non-symbolic constants. */
2764 if (!SYMBOLIC_CONST (op))
2765 return 1;
2767 /* Reject everything else; must be handled
2768 via emit_symbolic_move. */
2769 return 0;
2772 /* Returns true if the constant value OP is a legitimate general operand.
2773 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2775 static bool
2776 s390_legitimate_constant_p (machine_mode mode, rtx op)
2778 /* Accept all non-symbolic constants. */
2779 if (!SYMBOLIC_CONST (op))
2780 return 1;
2782 /* Accept immediate LARL operands. */
2783 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2784 return 1;
2786 /* Thread-local symbols are never legal constants. This is
2787 so that emit_call knows that computing such addresses
2788 might require a function call. */
2789 if (TLS_SYMBOLIC_CONST (op))
2790 return 0;
2792 /* In the PIC case, symbolic constants must *not* be
2793 forced into the literal pool. We accept them here,
2794 so that they will be handled by emit_symbolic_move. */
2795 if (flag_pic)
2796 return 1;
2798 /* All remaining non-PIC symbolic constants are
2799 forced into the literal pool. */
2800 return 0;
2803 /* Determine if it's legal to put X into the constant pool. This
2804 is not possible if X contains the address of a symbol that is
2805 not constant (TLS) or not known at final link time (PIC). */
2807 static bool
2808 s390_cannot_force_const_mem (machine_mode mode, rtx x)
2810 switch (GET_CODE (x))
2812 case CONST_INT:
2813 case CONST_DOUBLE:
2814 /* Accept all non-symbolic constants. */
2815 return false;
2817 case LABEL_REF:
2818 /* Labels are OK iff we are non-PIC. */
2819 return flag_pic != 0;
2821 case SYMBOL_REF:
2822 /* 'Naked' TLS symbol references are never OK,
2823 non-TLS symbols are OK iff we are non-PIC. */
2824 if (tls_symbolic_operand (x))
2825 return true;
2826 else
2827 return flag_pic != 0;
2829 case CONST:
2830 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2831 case PLUS:
2832 case MINUS:
2833 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2834 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2836 case UNSPEC:
2837 switch (XINT (x, 1))
2839 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2840 case UNSPEC_LTREL_OFFSET:
2841 case UNSPEC_GOT:
2842 case UNSPEC_GOTOFF:
2843 case UNSPEC_PLTOFF:
2844 case UNSPEC_TLSGD:
2845 case UNSPEC_TLSLDM:
2846 case UNSPEC_NTPOFF:
2847 case UNSPEC_DTPOFF:
2848 case UNSPEC_GOTNTPOFF:
2849 case UNSPEC_INDNTPOFF:
2850 return false;
2852 /* If the literal pool shares the code section, be put
2853 execute template placeholders into the pool as well. */
2854 case UNSPEC_INSN:
2855 return TARGET_CPU_ZARCH;
2857 default:
2858 return true;
2860 break;
2862 default:
2863 gcc_unreachable ();
2867 /* Returns true if the constant value OP is a legitimate general
2868 operand during and after reload. The difference to
2869 legitimate_constant_p is that this function will not accept
2870 a constant that would need to be forced to the literal pool
2871 before it can be used as operand.
2872 This function accepts all constants which can be loaded directly
2873 into a GPR. */
2875 bool
2876 legitimate_reload_constant_p (rtx op)
2878 /* Accept la(y) operands. */
2879 if (GET_CODE (op) == CONST_INT
2880 && DISP_IN_RANGE (INTVAL (op)))
2881 return true;
2883 /* Accept l(g)hi/l(g)fi operands. */
2884 if (GET_CODE (op) == CONST_INT
2885 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2886 return true;
2888 /* Accept lliXX operands. */
2889 if (TARGET_ZARCH
2890 && GET_CODE (op) == CONST_INT
2891 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2892 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2893 return true;
2895 if (TARGET_EXTIMM
2896 && GET_CODE (op) == CONST_INT
2897 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2898 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2899 return true;
2901 /* Accept larl operands. */
2902 if (TARGET_CPU_ZARCH
2903 && larl_operand (op, VOIDmode))
2904 return true;
2906 /* Accept floating-point zero operands that fit into a single GPR. */
2907 if (GET_CODE (op) == CONST_DOUBLE
2908 && s390_float_const_zero_p (op)
2909 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2910 return true;
2912 /* Accept double-word operands that can be split. */
2913 if (GET_CODE (op) == CONST_INT
2914 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2916 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2917 rtx hi = operand_subword (op, 0, 0, dword_mode);
2918 rtx lo = operand_subword (op, 1, 0, dword_mode);
2919 return legitimate_reload_constant_p (hi)
2920 && legitimate_reload_constant_p (lo);
2923 /* Everything else cannot be handled without reload. */
2924 return false;
2927 /* Returns true if the constant value OP is a legitimate fp operand
2928 during and after reload.
2929 This function accepts all constants which can be loaded directly
2930 into an FPR. */
2932 static bool
2933 legitimate_reload_fp_constant_p (rtx op)
2935 /* Accept floating-point zero operands if the load zero instruction
2936 can be used. Prior to z196 the load fp zero instruction caused a
2937 performance penalty if the result is used as BFP number. */
2938 if (TARGET_Z196
2939 && GET_CODE (op) == CONST_DOUBLE
2940 && s390_float_const_zero_p (op))
2941 return true;
2943 return false;
2946 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2947 return the class of reg to actually use. */
2949 static reg_class_t
2950 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2952 switch (GET_CODE (op))
2954 /* Constants we cannot reload into general registers
2955 must be forced into the literal pool. */
2956 case CONST_DOUBLE:
2957 case CONST_INT:
2958 if (reg_class_subset_p (GENERAL_REGS, rclass)
2959 && legitimate_reload_constant_p (op))
2960 return GENERAL_REGS;
2961 else if (reg_class_subset_p (ADDR_REGS, rclass)
2962 && legitimate_reload_constant_p (op))
2963 return ADDR_REGS;
2964 else if (reg_class_subset_p (FP_REGS, rclass)
2965 && legitimate_reload_fp_constant_p (op))
2966 return FP_REGS;
2967 return NO_REGS;
2969 /* If a symbolic constant or a PLUS is reloaded,
2970 it is most likely being used as an address, so
2971 prefer ADDR_REGS. If 'class' is not a superset
2972 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2973 case CONST:
2974 /* Symrefs cannot be pushed into the literal pool with -fPIC
2975 so we *MUST NOT* return NO_REGS for these cases
2976 (s390_cannot_force_const_mem will return true).
2978 On the other hand we MUST return NO_REGS for symrefs with
2979 invalid addend which might have been pushed to the literal
2980 pool (no -fPIC). Usually we would expect them to be
2981 handled via secondary reload but this does not happen if
2982 they are used as literal pool slot replacement in reload
2983 inheritance (see emit_input_reload_insns). */
2984 if (TARGET_CPU_ZARCH
2985 && GET_CODE (XEXP (op, 0)) == PLUS
2986 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
2987 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
2989 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
2990 return ADDR_REGS;
2991 else
2992 return NO_REGS;
2994 /* fallthrough */
2995 case LABEL_REF:
2996 case SYMBOL_REF:
2997 if (!legitimate_reload_constant_p (op))
2998 return NO_REGS;
2999 /* fallthrough */
3000 case PLUS:
3001 /* load address will be used. */
3002 if (reg_class_subset_p (ADDR_REGS, rclass))
3003 return ADDR_REGS;
3004 else
3005 return NO_REGS;
3007 default:
3008 break;
3011 return rclass;
3014 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3015 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3016 aligned. */
3018 bool
3019 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3021 HOST_WIDE_INT addend;
3022 rtx symref;
3024 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3025 return false;
3027 if (addend & (alignment - 1))
3028 return false;
3030 if (GET_CODE (symref) == SYMBOL_REF
3031 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3032 return true;
3034 if (GET_CODE (symref) == UNSPEC
3035 && alignment <= UNITS_PER_LONG)
3036 return true;
3038 return false;
3041 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3042 operand SCRATCH is used to reload the even part of the address and
3043 adding one. */
3045 void
3046 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3048 HOST_WIDE_INT addend;
3049 rtx symref;
3051 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3052 gcc_unreachable ();
3054 if (!(addend & 1))
3055 /* Easy case. The addend is even so larl will do fine. */
3056 emit_move_insn (reg, addr);
3057 else
3059 /* We can leave the scratch register untouched if the target
3060 register is a valid base register. */
3061 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3062 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3063 scratch = reg;
3065 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3066 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3068 if (addend != 1)
3069 emit_move_insn (scratch,
3070 gen_rtx_CONST (Pmode,
3071 gen_rtx_PLUS (Pmode, symref,
3072 GEN_INT (addend - 1))));
3073 else
3074 emit_move_insn (scratch, symref);
3076 /* Increment the address using la in order to avoid clobbering cc. */
3077 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3081 /* Generate what is necessary to move between REG and MEM using
3082 SCRATCH. The direction is given by TOMEM. */
3084 void
3085 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3087 /* Reload might have pulled a constant out of the literal pool.
3088 Force it back in. */
3089 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3090 || GET_CODE (mem) == CONST)
3091 mem = force_const_mem (GET_MODE (reg), mem);
3093 gcc_assert (MEM_P (mem));
3095 /* For a load from memory we can leave the scratch register
3096 untouched if the target register is a valid base register. */
3097 if (!tomem
3098 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3099 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3100 && GET_MODE (reg) == GET_MODE (scratch))
3101 scratch = reg;
3103 /* Load address into scratch register. Since we can't have a
3104 secondary reload for a secondary reload we have to cover the case
3105 where larl would need a secondary reload here as well. */
3106 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3108 /* Now we can use a standard load/store to do the move. */
3109 if (tomem)
3110 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3111 else
3112 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3115 /* Inform reload about cases where moving X with a mode MODE to a register in
3116 RCLASS requires an extra scratch or immediate register. Return the class
3117 needed for the immediate register. */
3119 static reg_class_t
3120 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3121 machine_mode mode, secondary_reload_info *sri)
3123 enum reg_class rclass = (enum reg_class) rclass_i;
3125 /* Intermediate register needed. */
3126 if (reg_classes_intersect_p (CC_REGS, rclass))
3127 return GENERAL_REGS;
3129 if (TARGET_Z10)
3131 HOST_WIDE_INT offset;
3132 rtx symref;
3134 /* On z10 several optimizer steps may generate larl operands with
3135 an odd addend. */
3136 if (in_p
3137 && s390_loadrelative_operand_p (x, &symref, &offset)
3138 && mode == Pmode
3139 && !SYMBOL_REF_ALIGN1_P (symref)
3140 && (offset & 1) == 1)
3141 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3142 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3144 /* On z10 we need a scratch register when moving QI, TI or floating
3145 point mode values from or to a memory location with a SYMBOL_REF
3146 or if the symref addend of a SI or DI move is not aligned to the
3147 width of the access. */
3148 if (MEM_P (x)
3149 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
3150 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3151 || (!TARGET_ZARCH && mode == DImode)
3152 || ((mode == HImode || mode == SImode || mode == DImode)
3153 && (!s390_check_symref_alignment (XEXP (x, 0),
3154 GET_MODE_SIZE (mode))))))
3156 #define __SECONDARY_RELOAD_CASE(M,m) \
3157 case M##mode: \
3158 if (TARGET_64BIT) \
3159 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3160 CODE_FOR_reload##m##di_tomem_z10; \
3161 else \
3162 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3163 CODE_FOR_reload##m##si_tomem_z10; \
3164 break;
3166 switch (GET_MODE (x))
3168 __SECONDARY_RELOAD_CASE (QI, qi);
3169 __SECONDARY_RELOAD_CASE (HI, hi);
3170 __SECONDARY_RELOAD_CASE (SI, si);
3171 __SECONDARY_RELOAD_CASE (DI, di);
3172 __SECONDARY_RELOAD_CASE (TI, ti);
3173 __SECONDARY_RELOAD_CASE (SF, sf);
3174 __SECONDARY_RELOAD_CASE (DF, df);
3175 __SECONDARY_RELOAD_CASE (TF, tf);
3176 __SECONDARY_RELOAD_CASE (SD, sd);
3177 __SECONDARY_RELOAD_CASE (DD, dd);
3178 __SECONDARY_RELOAD_CASE (TD, td);
3180 default:
3181 gcc_unreachable ();
3183 #undef __SECONDARY_RELOAD_CASE
3187 /* We need a scratch register when loading a PLUS expression which
3188 is not a legitimate operand of the LOAD ADDRESS instruction. */
3189 /* LRA can deal with transformation of plus op very well -- so we
3190 don't need to prompt LRA in this case. */
3191 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
3192 sri->icode = (TARGET_64BIT ?
3193 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3195 /* Performing a multiword move from or to memory we have to make sure the
3196 second chunk in memory is addressable without causing a displacement
3197 overflow. If that would be the case we calculate the address in
3198 a scratch register. */
3199 if (MEM_P (x)
3200 && GET_CODE (XEXP (x, 0)) == PLUS
3201 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3202 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3203 + GET_MODE_SIZE (mode) - 1))
3205 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3206 in a s_operand address since we may fallback to lm/stm. So we only
3207 have to care about overflows in the b+i+d case. */
3208 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3209 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3210 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3211 /* For FP_REGS no lm/stm is available so this check is triggered
3212 for displacement overflows in b+i+d and b+d like addresses. */
3213 || (reg_classes_intersect_p (FP_REGS, rclass)
3214 && s390_class_max_nregs (FP_REGS, mode) > 1))
3216 if (in_p)
3217 sri->icode = (TARGET_64BIT ?
3218 CODE_FOR_reloaddi_nonoffmem_in :
3219 CODE_FOR_reloadsi_nonoffmem_in);
3220 else
3221 sri->icode = (TARGET_64BIT ?
3222 CODE_FOR_reloaddi_nonoffmem_out :
3223 CODE_FOR_reloadsi_nonoffmem_out);
3227 /* A scratch address register is needed when a symbolic constant is
3228 copied to r0 compiling with -fPIC. In other cases the target
3229 register might be used as temporary (see legitimize_pic_address). */
3230 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3231 sri->icode = (TARGET_64BIT ?
3232 CODE_FOR_reloaddi_PIC_addr :
3233 CODE_FOR_reloadsi_PIC_addr);
3235 /* Either scratch or no register needed. */
3236 return NO_REGS;
3239 /* Generate code to load SRC, which is PLUS that is not a
3240 legitimate operand for the LA instruction, into TARGET.
3241 SCRATCH may be used as scratch register. */
3243 void
3244 s390_expand_plus_operand (rtx target, rtx src,
3245 rtx scratch)
3247 rtx sum1, sum2;
3248 struct s390_address ad;
3250 /* src must be a PLUS; get its two operands. */
3251 gcc_assert (GET_CODE (src) == PLUS);
3252 gcc_assert (GET_MODE (src) == Pmode);
3254 /* Check if any of the two operands is already scheduled
3255 for replacement by reload. This can happen e.g. when
3256 float registers occur in an address. */
3257 sum1 = find_replacement (&XEXP (src, 0));
3258 sum2 = find_replacement (&XEXP (src, 1));
3259 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3261 /* If the address is already strictly valid, there's nothing to do. */
3262 if (!s390_decompose_address (src, &ad)
3263 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3264 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3266 /* Otherwise, one of the operands cannot be an address register;
3267 we reload its value into the scratch register. */
3268 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3270 emit_move_insn (scratch, sum1);
3271 sum1 = scratch;
3273 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3275 emit_move_insn (scratch, sum2);
3276 sum2 = scratch;
3279 /* According to the way these invalid addresses are generated
3280 in reload.c, it should never happen (at least on s390) that
3281 *neither* of the PLUS components, after find_replacements
3282 was applied, is an address register. */
3283 if (sum1 == scratch && sum2 == scratch)
3285 debug_rtx (src);
3286 gcc_unreachable ();
3289 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3292 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3293 is only ever performed on addresses, so we can mark the
3294 sum as legitimate for LA in any case. */
3295 s390_load_address (target, src);
3299 /* Return true if ADDR is a valid memory address.
3300 STRICT specifies whether strict register checking applies. */
3302 static bool
3303 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
3305 struct s390_address ad;
3307 if (TARGET_Z10
3308 && larl_operand (addr, VOIDmode)
3309 && (mode == VOIDmode
3310 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3311 return true;
3313 if (!s390_decompose_address (addr, &ad))
3314 return false;
3316 if (strict)
3318 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3319 return false;
3321 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3322 return false;
3324 else
3326 if (ad.base
3327 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3328 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3329 return false;
3331 if (ad.indx
3332 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3333 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3334 return false;
3336 return true;
3339 /* Return true if OP is a valid operand for the LA instruction.
3340 In 31-bit, we need to prove that the result is used as an
3341 address, as LA performs only a 31-bit addition. */
3343 bool
3344 legitimate_la_operand_p (rtx op)
3346 struct s390_address addr;
3347 if (!s390_decompose_address (op, &addr))
3348 return false;
3350 return (TARGET_64BIT || addr.pointer);
3353 /* Return true if it is valid *and* preferable to use LA to
3354 compute the sum of OP1 and OP2. */
3356 bool
3357 preferred_la_operand_p (rtx op1, rtx op2)
3359 struct s390_address addr;
3361 if (op2 != const0_rtx)
3362 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3364 if (!s390_decompose_address (op1, &addr))
3365 return false;
3366 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3367 return false;
3368 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3369 return false;
3371 /* Avoid LA instructions with index register on z196; it is
3372 preferable to use regular add instructions when possible.
3373 Starting with zEC12 the la with index register is "uncracked"
3374 again. */
3375 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3376 return false;
3378 if (!TARGET_64BIT && !addr.pointer)
3379 return false;
3381 if (addr.pointer)
3382 return true;
3384 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3385 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3386 return true;
3388 return false;
3391 /* Emit a forced load-address operation to load SRC into DST.
3392 This will use the LOAD ADDRESS instruction even in situations
3393 where legitimate_la_operand_p (SRC) returns false. */
3395 void
3396 s390_load_address (rtx dst, rtx src)
3398 if (TARGET_64BIT)
3399 emit_move_insn (dst, src);
3400 else
3401 emit_insn (gen_force_la_31 (dst, src));
3404 /* Return a legitimate reference for ORIG (an address) using the
3405 register REG. If REG is 0, a new pseudo is generated.
3407 There are two types of references that must be handled:
3409 1. Global data references must load the address from the GOT, via
3410 the PIC reg. An insn is emitted to do this load, and the reg is
3411 returned.
3413 2. Static data references, constant pool addresses, and code labels
3414 compute the address as an offset from the GOT, whose base is in
3415 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3416 differentiate them from global data objects. The returned
3417 address is the PIC reg + an unspec constant.
3419 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3420 reg also appears in the address. */
3423 legitimize_pic_address (rtx orig, rtx reg)
3425 rtx addr = orig;
3426 rtx addend = const0_rtx;
3427 rtx new_rtx = orig;
3429 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3431 if (GET_CODE (addr) == CONST)
3432 addr = XEXP (addr, 0);
3434 if (GET_CODE (addr) == PLUS)
3436 addend = XEXP (addr, 1);
3437 addr = XEXP (addr, 0);
3440 if ((GET_CODE (addr) == LABEL_REF
3441 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
3442 || (GET_CODE (addr) == UNSPEC &&
3443 (XINT (addr, 1) == UNSPEC_GOTENT
3444 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3445 && GET_CODE (addend) == CONST_INT)
3447 /* This can be locally addressed. */
3449 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3450 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
3451 gen_rtx_CONST (Pmode, addr) : addr);
3453 if (TARGET_CPU_ZARCH
3454 && larl_operand (const_addr, VOIDmode)
3455 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
3456 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
3458 if (INTVAL (addend) & 1)
3460 /* LARL can't handle odd offsets, so emit a pair of LARL
3461 and LA. */
3462 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3464 if (!DISP_IN_RANGE (INTVAL (addend)))
3466 HOST_WIDE_INT even = INTVAL (addend) - 1;
3467 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
3468 addr = gen_rtx_CONST (Pmode, addr);
3469 addend = const1_rtx;
3472 emit_move_insn (temp, addr);
3473 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
3475 if (reg != 0)
3477 s390_load_address (reg, new_rtx);
3478 new_rtx = reg;
3481 else
3483 /* If the offset is even, we can just use LARL. This
3484 will happen automatically. */
3487 else
3489 /* No larl - Access local symbols relative to the GOT. */
3491 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3493 if (reload_in_progress || reload_completed)
3494 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3496 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3497 if (addend != const0_rtx)
3498 addr = gen_rtx_PLUS (Pmode, addr, addend);
3499 addr = gen_rtx_CONST (Pmode, addr);
3500 addr = force_const_mem (Pmode, addr);
3501 emit_move_insn (temp, addr);
3503 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3504 if (reg != 0)
3506 s390_load_address (reg, new_rtx);
3507 new_rtx = reg;
3511 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
3513 /* A non-local symbol reference without addend.
3515 The symbol ref is wrapped into an UNSPEC to make sure the
3516 proper operand modifier (@GOT or @GOTENT) will be emitted.
3517 This will tell the linker to put the symbol into the GOT.
3519 Additionally the code dereferencing the GOT slot is emitted here.
3521 An addend to the symref needs to be added afterwards.
3522 legitimize_pic_address calls itself recursively to handle
3523 that case. So no need to do it here. */
3525 if (reg == 0)
3526 reg = gen_reg_rtx (Pmode);
3528 if (TARGET_Z10)
3530 /* Use load relative if possible.
3531 lgrl <target>, sym@GOTENT */
3532 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3533 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3534 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3536 emit_move_insn (reg, new_rtx);
3537 new_rtx = reg;
3539 else if (flag_pic == 1)
3541 /* Assume GOT offset is a valid displacement operand (< 4k
3542 or < 512k with z990). This is handled the same way in
3543 both 31- and 64-bit code (@GOT).
3544 lg <target>, sym@GOT(r12) */
3546 if (reload_in_progress || reload_completed)
3547 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3549 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3550 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3551 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3552 new_rtx = gen_const_mem (Pmode, new_rtx);
3553 emit_move_insn (reg, new_rtx);
3554 new_rtx = reg;
3556 else if (TARGET_CPU_ZARCH)
3558 /* If the GOT offset might be >= 4k, we determine the position
3559 of the GOT entry via a PC-relative LARL (@GOTENT).
3560 larl temp, sym@GOTENT
3561 lg <target>, 0(temp) */
3563 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3565 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3566 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3568 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3569 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3570 emit_move_insn (temp, new_rtx);
3572 new_rtx = gen_const_mem (Pmode, temp);
3573 emit_move_insn (reg, new_rtx);
3575 new_rtx = reg;
3577 else
3579 /* If the GOT offset might be >= 4k, we have to load it
3580 from the literal pool (@GOT).
3582 lg temp, lit-litbase(r13)
3583 lg <target>, 0(temp)
3584 lit: .long sym@GOT */
3586 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3588 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3589 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3591 if (reload_in_progress || reload_completed)
3592 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3594 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3595 addr = gen_rtx_CONST (Pmode, addr);
3596 addr = force_const_mem (Pmode, addr);
3597 emit_move_insn (temp, addr);
3599 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3600 new_rtx = gen_const_mem (Pmode, new_rtx);
3601 emit_move_insn (reg, new_rtx);
3602 new_rtx = reg;
3605 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
3607 gcc_assert (XVECLEN (addr, 0) == 1);
3608 switch (XINT (addr, 1))
3610 /* These address symbols (or PLT slots) relative to the GOT
3611 (not GOT slots!). In general this will exceed the
3612 displacement range so these value belong into the literal
3613 pool. */
3614 case UNSPEC_GOTOFF:
3615 case UNSPEC_PLTOFF:
3616 new_rtx = force_const_mem (Pmode, orig);
3617 break;
3619 /* For -fPIC the GOT size might exceed the displacement
3620 range so make sure the value is in the literal pool. */
3621 case UNSPEC_GOT:
3622 if (flag_pic == 2)
3623 new_rtx = force_const_mem (Pmode, orig);
3624 break;
3626 /* For @GOTENT larl is used. This is handled like local
3627 symbol refs. */
3628 case UNSPEC_GOTENT:
3629 gcc_unreachable ();
3630 break;
3632 /* @PLT is OK as is on 64-bit, must be converted to
3633 GOT-relative @PLTOFF on 31-bit. */
3634 case UNSPEC_PLT:
3635 if (!TARGET_CPU_ZARCH)
3637 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3639 if (reload_in_progress || reload_completed)
3640 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3642 addr = XVECEXP (addr, 0, 0);
3643 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3644 UNSPEC_PLTOFF);
3645 if (addend != const0_rtx)
3646 addr = gen_rtx_PLUS (Pmode, addr, addend);
3647 addr = gen_rtx_CONST (Pmode, addr);
3648 addr = force_const_mem (Pmode, addr);
3649 emit_move_insn (temp, addr);
3651 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3652 if (reg != 0)
3654 s390_load_address (reg, new_rtx);
3655 new_rtx = reg;
3658 else
3659 /* On 64 bit larl can be used. This case is handled like
3660 local symbol refs. */
3661 gcc_unreachable ();
3662 break;
3664 /* Everything else cannot happen. */
3665 default:
3666 gcc_unreachable ();
3669 else if (addend != const0_rtx)
3671 /* Otherwise, compute the sum. */
3673 rtx base = legitimize_pic_address (addr, reg);
3674 new_rtx = legitimize_pic_address (addend,
3675 base == reg ? NULL_RTX : reg);
3676 if (GET_CODE (new_rtx) == CONST_INT)
3677 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3678 else
3680 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3682 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3683 new_rtx = XEXP (new_rtx, 1);
3685 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3688 if (GET_CODE (new_rtx) == CONST)
3689 new_rtx = XEXP (new_rtx, 0);
3690 new_rtx = force_operand (new_rtx, 0);
3693 return new_rtx;
3696 /* Load the thread pointer into a register. */
3699 s390_get_thread_pointer (void)
3701 rtx tp = gen_reg_rtx (Pmode);
3703 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3704 mark_reg_pointer (tp, BITS_PER_WORD);
3706 return tp;
3709 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3710 in s390_tls_symbol which always refers to __tls_get_offset.
3711 The returned offset is written to RESULT_REG and an USE rtx is
3712 generated for TLS_CALL. */
3714 static GTY(()) rtx s390_tls_symbol;
3716 static void
3717 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3719 rtx insn;
3721 if (!flag_pic)
3722 emit_insn (s390_load_got ());
3724 if (!s390_tls_symbol)
3725 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3727 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3728 gen_rtx_REG (Pmode, RETURN_REGNUM));
3730 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3731 RTL_CONST_CALL_P (insn) = 1;
3734 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3735 this (thread-local) address. REG may be used as temporary. */
3737 static rtx
3738 legitimize_tls_address (rtx addr, rtx reg)
3740 rtx new_rtx, tls_call, temp, base, r2, insn;
3742 if (GET_CODE (addr) == SYMBOL_REF)
3743 switch (tls_symbolic_operand (addr))
3745 case TLS_MODEL_GLOBAL_DYNAMIC:
3746 start_sequence ();
3747 r2 = gen_rtx_REG (Pmode, 2);
3748 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3749 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3750 new_rtx = force_const_mem (Pmode, new_rtx);
3751 emit_move_insn (r2, new_rtx);
3752 s390_emit_tls_call_insn (r2, tls_call);
3753 insn = get_insns ();
3754 end_sequence ();
3756 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3757 temp = gen_reg_rtx (Pmode);
3758 emit_libcall_block (insn, temp, r2, new_rtx);
3760 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3761 if (reg != 0)
3763 s390_load_address (reg, new_rtx);
3764 new_rtx = reg;
3766 break;
3768 case TLS_MODEL_LOCAL_DYNAMIC:
3769 start_sequence ();
3770 r2 = gen_rtx_REG (Pmode, 2);
3771 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3772 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3773 new_rtx = force_const_mem (Pmode, new_rtx);
3774 emit_move_insn (r2, new_rtx);
3775 s390_emit_tls_call_insn (r2, tls_call);
3776 insn = get_insns ();
3777 end_sequence ();
3779 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3780 temp = gen_reg_rtx (Pmode);
3781 emit_libcall_block (insn, temp, r2, new_rtx);
3783 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3784 base = gen_reg_rtx (Pmode);
3785 s390_load_address (base, new_rtx);
3787 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3788 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3789 new_rtx = force_const_mem (Pmode, new_rtx);
3790 temp = gen_reg_rtx (Pmode);
3791 emit_move_insn (temp, new_rtx);
3793 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3794 if (reg != 0)
3796 s390_load_address (reg, new_rtx);
3797 new_rtx = reg;
3799 break;
3801 case TLS_MODEL_INITIAL_EXEC:
3802 if (flag_pic == 1)
3804 /* Assume GOT offset < 4k. This is handled the same way
3805 in both 31- and 64-bit code. */
3807 if (reload_in_progress || reload_completed)
3808 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3810 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3811 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3812 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3813 new_rtx = gen_const_mem (Pmode, new_rtx);
3814 temp = gen_reg_rtx (Pmode);
3815 emit_move_insn (temp, new_rtx);
3817 else if (TARGET_CPU_ZARCH)
3819 /* If the GOT offset might be >= 4k, we determine the position
3820 of the GOT entry via a PC-relative LARL. */
3822 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3823 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3824 temp = gen_reg_rtx (Pmode);
3825 emit_move_insn (temp, new_rtx);
3827 new_rtx = gen_const_mem (Pmode, temp);
3828 temp = gen_reg_rtx (Pmode);
3829 emit_move_insn (temp, new_rtx);
3831 else if (flag_pic)
3833 /* If the GOT offset might be >= 4k, we have to load it
3834 from the literal pool. */
3836 if (reload_in_progress || reload_completed)
3837 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3839 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3840 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3841 new_rtx = force_const_mem (Pmode, new_rtx);
3842 temp = gen_reg_rtx (Pmode);
3843 emit_move_insn (temp, new_rtx);
3845 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3846 new_rtx = gen_const_mem (Pmode, new_rtx);
3848 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3849 temp = gen_reg_rtx (Pmode);
3850 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3852 else
3854 /* In position-dependent code, load the absolute address of
3855 the GOT entry from the literal pool. */
3857 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3858 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3859 new_rtx = force_const_mem (Pmode, new_rtx);
3860 temp = gen_reg_rtx (Pmode);
3861 emit_move_insn (temp, new_rtx);
3863 new_rtx = temp;
3864 new_rtx = gen_const_mem (Pmode, new_rtx);
3865 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3866 temp = gen_reg_rtx (Pmode);
3867 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3870 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3871 if (reg != 0)
3873 s390_load_address (reg, new_rtx);
3874 new_rtx = reg;
3876 break;
3878 case TLS_MODEL_LOCAL_EXEC:
3879 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3880 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3881 new_rtx = force_const_mem (Pmode, new_rtx);
3882 temp = gen_reg_rtx (Pmode);
3883 emit_move_insn (temp, new_rtx);
3885 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3886 if (reg != 0)
3888 s390_load_address (reg, new_rtx);
3889 new_rtx = reg;
3891 break;
3893 default:
3894 gcc_unreachable ();
3897 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3899 switch (XINT (XEXP (addr, 0), 1))
3901 case UNSPEC_INDNTPOFF:
3902 gcc_assert (TARGET_CPU_ZARCH);
3903 new_rtx = addr;
3904 break;
3906 default:
3907 gcc_unreachable ();
3911 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3912 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3914 new_rtx = XEXP (XEXP (addr, 0), 0);
3915 if (GET_CODE (new_rtx) != SYMBOL_REF)
3916 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3918 new_rtx = legitimize_tls_address (new_rtx, reg);
3919 new_rtx = plus_constant (Pmode, new_rtx,
3920 INTVAL (XEXP (XEXP (addr, 0), 1)));
3921 new_rtx = force_operand (new_rtx, 0);
3924 else
3925 gcc_unreachable (); /* for now ... */
3927 return new_rtx;
3930 /* Emit insns making the address in operands[1] valid for a standard
3931 move to operands[0]. operands[1] is replaced by an address which
3932 should be used instead of the former RTX to emit the move
3933 pattern. */
3935 void
3936 emit_symbolic_move (rtx *operands)
3938 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3940 if (GET_CODE (operands[0]) == MEM)
3941 operands[1] = force_reg (Pmode, operands[1]);
3942 else if (TLS_SYMBOLIC_CONST (operands[1]))
3943 operands[1] = legitimize_tls_address (operands[1], temp);
3944 else if (flag_pic)
3945 operands[1] = legitimize_pic_address (operands[1], temp);
3948 /* Try machine-dependent ways of modifying an illegitimate address X
3949 to be legitimate. If we find one, return the new, valid address.
3951 OLDX is the address as it was before break_out_memory_refs was called.
3952 In some cases it is useful to look at this to decide what needs to be done.
3954 MODE is the mode of the operand pointed to by X.
3956 When -fpic is used, special handling is needed for symbolic references.
3957 See comments by legitimize_pic_address for details. */
3959 static rtx
3960 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3961 machine_mode mode ATTRIBUTE_UNUSED)
3963 rtx constant_term = const0_rtx;
3965 if (TLS_SYMBOLIC_CONST (x))
3967 x = legitimize_tls_address (x, 0);
3969 if (s390_legitimate_address_p (mode, x, FALSE))
3970 return x;
3972 else if (GET_CODE (x) == PLUS
3973 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3974 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3976 return x;
3978 else if (flag_pic)
3980 if (SYMBOLIC_CONST (x)
3981 || (GET_CODE (x) == PLUS
3982 && (SYMBOLIC_CONST (XEXP (x, 0))
3983 || SYMBOLIC_CONST (XEXP (x, 1)))))
3984 x = legitimize_pic_address (x, 0);
3986 if (s390_legitimate_address_p (mode, x, FALSE))
3987 return x;
3990 x = eliminate_constant_term (x, &constant_term);
3992 /* Optimize loading of large displacements by splitting them
3993 into the multiple of 4K and the rest; this allows the
3994 former to be CSE'd if possible.
3996 Don't do this if the displacement is added to a register
3997 pointing into the stack frame, as the offsets will
3998 change later anyway. */
4000 if (GET_CODE (constant_term) == CONST_INT
4001 && !TARGET_LONG_DISPLACEMENT
4002 && !DISP_IN_RANGE (INTVAL (constant_term))
4003 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4005 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4006 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4008 rtx temp = gen_reg_rtx (Pmode);
4009 rtx val = force_operand (GEN_INT (upper), temp);
4010 if (val != temp)
4011 emit_move_insn (temp, val);
4013 x = gen_rtx_PLUS (Pmode, x, temp);
4014 constant_term = GEN_INT (lower);
4017 if (GET_CODE (x) == PLUS)
4019 if (GET_CODE (XEXP (x, 0)) == REG)
4021 rtx temp = gen_reg_rtx (Pmode);
4022 rtx val = force_operand (XEXP (x, 1), temp);
4023 if (val != temp)
4024 emit_move_insn (temp, val);
4026 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4029 else if (GET_CODE (XEXP (x, 1)) == REG)
4031 rtx temp = gen_reg_rtx (Pmode);
4032 rtx val = force_operand (XEXP (x, 0), temp);
4033 if (val != temp)
4034 emit_move_insn (temp, val);
4036 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4040 if (constant_term != const0_rtx)
4041 x = gen_rtx_PLUS (Pmode, x, constant_term);
4043 return x;
4046 /* Try a machine-dependent way of reloading an illegitimate address AD
4047 operand. If we find one, push the reload and return the new address.
4049 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4050 and TYPE is the reload type of the current reload. */
4053 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
4054 int opnum, int type)
4056 if (!optimize || TARGET_LONG_DISPLACEMENT)
4057 return NULL_RTX;
4059 if (GET_CODE (ad) == PLUS)
4061 rtx tem = simplify_binary_operation (PLUS, Pmode,
4062 XEXP (ad, 0), XEXP (ad, 1));
4063 if (tem)
4064 ad = tem;
4067 if (GET_CODE (ad) == PLUS
4068 && GET_CODE (XEXP (ad, 0)) == REG
4069 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4070 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4072 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4073 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4074 rtx cst, tem, new_rtx;
4076 cst = GEN_INT (upper);
4077 if (!legitimate_reload_constant_p (cst))
4078 cst = force_const_mem (Pmode, cst);
4080 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4081 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4083 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4084 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4085 opnum, (enum reload_type) type);
4086 return new_rtx;
4089 return NULL_RTX;
4092 /* Emit code to move LEN bytes from DST to SRC. */
4094 bool
4095 s390_expand_movmem (rtx dst, rtx src, rtx len)
4097 /* When tuning for z10 or higher we rely on the Glibc functions to
4098 do the right thing. Only for constant lengths below 64k we will
4099 generate inline code. */
4100 if (s390_tune >= PROCESSOR_2097_Z10
4101 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4102 return false;
4104 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4106 if (INTVAL (len) > 0)
4107 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4110 else if (TARGET_MVCLE)
4112 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4115 else
4117 rtx dst_addr, src_addr, count, blocks, temp;
4118 rtx_code_label *loop_start_label = gen_label_rtx ();
4119 rtx_code_label *loop_end_label = gen_label_rtx ();
4120 rtx_code_label *end_label = gen_label_rtx ();
4121 machine_mode mode;
4123 mode = GET_MODE (len);
4124 if (mode == VOIDmode)
4125 mode = Pmode;
4127 dst_addr = gen_reg_rtx (Pmode);
4128 src_addr = gen_reg_rtx (Pmode);
4129 count = gen_reg_rtx (mode);
4130 blocks = gen_reg_rtx (mode);
4132 convert_move (count, len, 1);
4133 emit_cmp_and_jump_insns (count, const0_rtx,
4134 EQ, NULL_RTX, mode, 1, end_label);
4136 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4137 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4138 dst = change_address (dst, VOIDmode, dst_addr);
4139 src = change_address (src, VOIDmode, src_addr);
4141 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4142 OPTAB_DIRECT);
4143 if (temp != count)
4144 emit_move_insn (count, temp);
4146 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4147 OPTAB_DIRECT);
4148 if (temp != blocks)
4149 emit_move_insn (blocks, temp);
4151 emit_cmp_and_jump_insns (blocks, const0_rtx,
4152 EQ, NULL_RTX, mode, 1, loop_end_label);
4154 emit_label (loop_start_label);
4156 if (TARGET_Z10
4157 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4159 rtx prefetch;
4161 /* Issue a read prefetch for the +3 cache line. */
4162 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4163 const0_rtx, const0_rtx);
4164 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4165 emit_insn (prefetch);
4167 /* Issue a write prefetch for the +3 cache line. */
4168 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4169 const1_rtx, const0_rtx);
4170 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4171 emit_insn (prefetch);
4174 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4175 s390_load_address (dst_addr,
4176 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4177 s390_load_address (src_addr,
4178 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4180 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4181 OPTAB_DIRECT);
4182 if (temp != blocks)
4183 emit_move_insn (blocks, temp);
4185 emit_cmp_and_jump_insns (blocks, const0_rtx,
4186 EQ, NULL_RTX, mode, 1, loop_end_label);
4188 emit_jump (loop_start_label);
4189 emit_label (loop_end_label);
4191 emit_insn (gen_movmem_short (dst, src,
4192 convert_to_mode (Pmode, count, 1)));
4193 emit_label (end_label);
4195 return true;
4198 /* Emit code to set LEN bytes at DST to VAL.
4199 Make use of clrmem if VAL is zero. */
4201 void
4202 s390_expand_setmem (rtx dst, rtx len, rtx val)
4204 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4205 return;
4207 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4209 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4211 if (val == const0_rtx && INTVAL (len) <= 256)
4212 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4213 else
4215 /* Initialize memory by storing the first byte. */
4216 emit_move_insn (adjust_address (dst, QImode, 0), val);
4218 if (INTVAL (len) > 1)
4220 /* Initiate 1 byte overlap move.
4221 The first byte of DST is propagated through DSTP1.
4222 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4223 DST is set to size 1 so the rest of the memory location
4224 does not count as source operand. */
4225 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4226 set_mem_size (dst, 1);
4228 emit_insn (gen_movmem_short (dstp1, dst,
4229 GEN_INT (INTVAL (len) - 2)));
4234 else if (TARGET_MVCLE)
4236 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4237 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4240 else
4242 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4243 rtx_code_label *loop_start_label = gen_label_rtx ();
4244 rtx_code_label *loop_end_label = gen_label_rtx ();
4245 rtx_code_label *end_label = gen_label_rtx ();
4246 machine_mode mode;
4248 mode = GET_MODE (len);
4249 if (mode == VOIDmode)
4250 mode = Pmode;
4252 dst_addr = gen_reg_rtx (Pmode);
4253 count = gen_reg_rtx (mode);
4254 blocks = gen_reg_rtx (mode);
4256 convert_move (count, len, 1);
4257 emit_cmp_and_jump_insns (count, const0_rtx,
4258 EQ, NULL_RTX, mode, 1, end_label);
4260 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4261 dst = change_address (dst, VOIDmode, dst_addr);
4263 if (val == const0_rtx)
4264 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4265 OPTAB_DIRECT);
4266 else
4268 dstp1 = adjust_address (dst, VOIDmode, 1);
4269 set_mem_size (dst, 1);
4271 /* Initialize memory by storing the first byte. */
4272 emit_move_insn (adjust_address (dst, QImode, 0), val);
4274 /* If count is 1 we are done. */
4275 emit_cmp_and_jump_insns (count, const1_rtx,
4276 EQ, NULL_RTX, mode, 1, end_label);
4278 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4279 OPTAB_DIRECT);
4281 if (temp != count)
4282 emit_move_insn (count, temp);
4284 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4285 OPTAB_DIRECT);
4286 if (temp != blocks)
4287 emit_move_insn (blocks, temp);
4289 emit_cmp_and_jump_insns (blocks, const0_rtx,
4290 EQ, NULL_RTX, mode, 1, loop_end_label);
4292 emit_label (loop_start_label);
4294 if (TARGET_Z10
4295 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4297 /* Issue a write prefetch for the +4 cache line. */
4298 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4299 GEN_INT (1024)),
4300 const1_rtx, const0_rtx);
4301 emit_insn (prefetch);
4302 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4305 if (val == const0_rtx)
4306 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4307 else
4308 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4309 s390_load_address (dst_addr,
4310 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4312 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4313 OPTAB_DIRECT);
4314 if (temp != blocks)
4315 emit_move_insn (blocks, temp);
4317 emit_cmp_and_jump_insns (blocks, const0_rtx,
4318 EQ, NULL_RTX, mode, 1, loop_end_label);
4320 emit_jump (loop_start_label);
4321 emit_label (loop_end_label);
4323 if (val == const0_rtx)
4324 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4325 else
4326 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4327 emit_label (end_label);
4331 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4332 and return the result in TARGET. */
4334 bool
4335 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4337 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4338 rtx tmp;
4340 /* When tuning for z10 or higher we rely on the Glibc functions to
4341 do the right thing. Only for constant lengths below 64k we will
4342 generate inline code. */
4343 if (s390_tune >= PROCESSOR_2097_Z10
4344 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4345 return false;
4347 /* As the result of CMPINT is inverted compared to what we need,
4348 we have to swap the operands. */
4349 tmp = op0; op0 = op1; op1 = tmp;
4351 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4353 if (INTVAL (len) > 0)
4355 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4356 emit_insn (gen_cmpint (target, ccreg));
4358 else
4359 emit_move_insn (target, const0_rtx);
4361 else if (TARGET_MVCLE)
4363 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4364 emit_insn (gen_cmpint (target, ccreg));
4366 else
4368 rtx addr0, addr1, count, blocks, temp;
4369 rtx_code_label *loop_start_label = gen_label_rtx ();
4370 rtx_code_label *loop_end_label = gen_label_rtx ();
4371 rtx_code_label *end_label = gen_label_rtx ();
4372 machine_mode mode;
4374 mode = GET_MODE (len);
4375 if (mode == VOIDmode)
4376 mode = Pmode;
4378 addr0 = gen_reg_rtx (Pmode);
4379 addr1 = gen_reg_rtx (Pmode);
4380 count = gen_reg_rtx (mode);
4381 blocks = gen_reg_rtx (mode);
4383 convert_move (count, len, 1);
4384 emit_cmp_and_jump_insns (count, const0_rtx,
4385 EQ, NULL_RTX, mode, 1, end_label);
4387 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4388 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4389 op0 = change_address (op0, VOIDmode, addr0);
4390 op1 = change_address (op1, VOIDmode, addr1);
4392 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4393 OPTAB_DIRECT);
4394 if (temp != count)
4395 emit_move_insn (count, temp);
4397 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4398 OPTAB_DIRECT);
4399 if (temp != blocks)
4400 emit_move_insn (blocks, temp);
4402 emit_cmp_and_jump_insns (blocks, const0_rtx,
4403 EQ, NULL_RTX, mode, 1, loop_end_label);
4405 emit_label (loop_start_label);
4407 if (TARGET_Z10
4408 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4410 rtx prefetch;
4412 /* Issue a read prefetch for the +2 cache line of operand 1. */
4413 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4414 const0_rtx, const0_rtx);
4415 emit_insn (prefetch);
4416 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4418 /* Issue a read prefetch for the +2 cache line of operand 2. */
4419 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4420 const0_rtx, const0_rtx);
4421 emit_insn (prefetch);
4422 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4425 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4426 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4427 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4428 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4429 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4430 emit_jump_insn (temp);
4432 s390_load_address (addr0,
4433 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4434 s390_load_address (addr1,
4435 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4437 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4438 OPTAB_DIRECT);
4439 if (temp != blocks)
4440 emit_move_insn (blocks, temp);
4442 emit_cmp_and_jump_insns (blocks, const0_rtx,
4443 EQ, NULL_RTX, mode, 1, loop_end_label);
4445 emit_jump (loop_start_label);
4446 emit_label (loop_end_label);
4448 emit_insn (gen_cmpmem_short (op0, op1,
4449 convert_to_mode (Pmode, count, 1)));
4450 emit_label (end_label);
4452 emit_insn (gen_cmpint (target, ccreg));
4454 return true;
4458 /* Expand conditional increment or decrement using alc/slb instructions.
4459 Should generate code setting DST to either SRC or SRC + INCREMENT,
4460 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4461 Returns true if successful, false otherwise.
4463 That makes it possible to implement some if-constructs without jumps e.g.:
4464 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4465 unsigned int a, b, c;
4466 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4467 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4468 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4469 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4471 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4472 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4473 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4474 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4475 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4477 bool
4478 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4479 rtx dst, rtx src, rtx increment)
4481 machine_mode cmp_mode;
4482 machine_mode cc_mode;
4483 rtx op_res;
4484 rtx insn;
4485 rtvec p;
4486 int ret;
4488 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4489 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4490 cmp_mode = SImode;
4491 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4492 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4493 cmp_mode = DImode;
4494 else
4495 return false;
4497 /* Try ADD LOGICAL WITH CARRY. */
4498 if (increment == const1_rtx)
4500 /* Determine CC mode to use. */
4501 if (cmp_code == EQ || cmp_code == NE)
4503 if (cmp_op1 != const0_rtx)
4505 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4506 NULL_RTX, 0, OPTAB_WIDEN);
4507 cmp_op1 = const0_rtx;
4510 cmp_code = cmp_code == EQ ? LEU : GTU;
4513 if (cmp_code == LTU || cmp_code == LEU)
4515 rtx tem = cmp_op0;
4516 cmp_op0 = cmp_op1;
4517 cmp_op1 = tem;
4518 cmp_code = swap_condition (cmp_code);
4521 switch (cmp_code)
4523 case GTU:
4524 cc_mode = CCUmode;
4525 break;
4527 case GEU:
4528 cc_mode = CCL3mode;
4529 break;
4531 default:
4532 return false;
4535 /* Emit comparison instruction pattern. */
4536 if (!register_operand (cmp_op0, cmp_mode))
4537 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4539 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4540 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4541 /* We use insn_invalid_p here to add clobbers if required. */
4542 ret = insn_invalid_p (emit_insn (insn), false);
4543 gcc_assert (!ret);
4545 /* Emit ALC instruction pattern. */
4546 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4547 gen_rtx_REG (cc_mode, CC_REGNUM),
4548 const0_rtx);
4550 if (src != const0_rtx)
4552 if (!register_operand (src, GET_MODE (dst)))
4553 src = force_reg (GET_MODE (dst), src);
4555 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4556 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4559 p = rtvec_alloc (2);
4560 RTVEC_ELT (p, 0) =
4561 gen_rtx_SET (VOIDmode, dst, op_res);
4562 RTVEC_ELT (p, 1) =
4563 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4564 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4566 return true;
4569 /* Try SUBTRACT LOGICAL WITH BORROW. */
4570 if (increment == constm1_rtx)
4572 /* Determine CC mode to use. */
4573 if (cmp_code == EQ || cmp_code == NE)
4575 if (cmp_op1 != const0_rtx)
4577 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4578 NULL_RTX, 0, OPTAB_WIDEN);
4579 cmp_op1 = const0_rtx;
4582 cmp_code = cmp_code == EQ ? LEU : GTU;
4585 if (cmp_code == GTU || cmp_code == GEU)
4587 rtx tem = cmp_op0;
4588 cmp_op0 = cmp_op1;
4589 cmp_op1 = tem;
4590 cmp_code = swap_condition (cmp_code);
4593 switch (cmp_code)
4595 case LEU:
4596 cc_mode = CCUmode;
4597 break;
4599 case LTU:
4600 cc_mode = CCL3mode;
4601 break;
4603 default:
4604 return false;
4607 /* Emit comparison instruction pattern. */
4608 if (!register_operand (cmp_op0, cmp_mode))
4609 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4611 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4612 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4613 /* We use insn_invalid_p here to add clobbers if required. */
4614 ret = insn_invalid_p (emit_insn (insn), false);
4615 gcc_assert (!ret);
4617 /* Emit SLB instruction pattern. */
4618 if (!register_operand (src, GET_MODE (dst)))
4619 src = force_reg (GET_MODE (dst), src);
4621 op_res = gen_rtx_MINUS (GET_MODE (dst),
4622 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4623 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4624 gen_rtx_REG (cc_mode, CC_REGNUM),
4625 const0_rtx));
4626 p = rtvec_alloc (2);
4627 RTVEC_ELT (p, 0) =
4628 gen_rtx_SET (VOIDmode, dst, op_res);
4629 RTVEC_ELT (p, 1) =
4630 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4631 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4633 return true;
4636 return false;
4639 /* Expand code for the insv template. Return true if successful. */
4641 bool
4642 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4644 int bitsize = INTVAL (op1);
4645 int bitpos = INTVAL (op2);
4646 machine_mode mode = GET_MODE (dest);
4647 machine_mode smode;
4648 int smode_bsize, mode_bsize;
4649 rtx op, clobber;
4651 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
4652 return false;
4654 /* Generate INSERT IMMEDIATE (IILL et al). */
4655 /* (set (ze (reg)) (const_int)). */
4656 if (TARGET_ZARCH
4657 && register_operand (dest, word_mode)
4658 && (bitpos % 16) == 0
4659 && (bitsize % 16) == 0
4660 && const_int_operand (src, VOIDmode))
4662 HOST_WIDE_INT val = INTVAL (src);
4663 int regpos = bitpos + bitsize;
4665 while (regpos > bitpos)
4667 machine_mode putmode;
4668 int putsize;
4670 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4671 putmode = SImode;
4672 else
4673 putmode = HImode;
4675 putsize = GET_MODE_BITSIZE (putmode);
4676 regpos -= putsize;
4677 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4678 GEN_INT (putsize),
4679 GEN_INT (regpos)),
4680 gen_int_mode (val, putmode));
4681 val >>= putsize;
4683 gcc_assert (regpos == bitpos);
4684 return true;
4687 smode = smallest_mode_for_size (bitsize, MODE_INT);
4688 smode_bsize = GET_MODE_BITSIZE (smode);
4689 mode_bsize = GET_MODE_BITSIZE (mode);
4691 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4692 if (bitpos == 0
4693 && (bitsize % BITS_PER_UNIT) == 0
4694 && MEM_P (dest)
4695 && (register_operand (src, word_mode)
4696 || const_int_operand (src, VOIDmode)))
4698 /* Emit standard pattern if possible. */
4699 if (smode_bsize == bitsize)
4701 emit_move_insn (adjust_address (dest, smode, 0),
4702 gen_lowpart (smode, src));
4703 return true;
4706 /* (set (ze (mem)) (const_int)). */
4707 else if (const_int_operand (src, VOIDmode))
4709 int size = bitsize / BITS_PER_UNIT;
4710 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4711 BLKmode,
4712 UNITS_PER_WORD - size);
4714 dest = adjust_address (dest, BLKmode, 0);
4715 set_mem_size (dest, size);
4716 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4717 return true;
4720 /* (set (ze (mem)) (reg)). */
4721 else if (register_operand (src, word_mode))
4723 if (bitsize <= 32)
4724 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4725 const0_rtx), src);
4726 else
4728 /* Emit st,stcmh sequence. */
4729 int stcmh_width = bitsize - 32;
4730 int size = stcmh_width / BITS_PER_UNIT;
4732 emit_move_insn (adjust_address (dest, SImode, size),
4733 gen_lowpart (SImode, src));
4734 set_mem_size (dest, size);
4735 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4736 GEN_INT (stcmh_width),
4737 const0_rtx),
4738 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4740 return true;
4744 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4745 if ((bitpos % BITS_PER_UNIT) == 0
4746 && (bitsize % BITS_PER_UNIT) == 0
4747 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4748 && MEM_P (src)
4749 && (mode == DImode || mode == SImode)
4750 && register_operand (dest, mode))
4752 /* Emit a strict_low_part pattern if possible. */
4753 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4755 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4756 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4757 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4758 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4759 return true;
4762 /* ??? There are more powerful versions of ICM that are not
4763 completely represented in the md file. */
4766 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4767 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4769 machine_mode mode_s = GET_MODE (src);
4771 if (mode_s == VOIDmode)
4773 /* Assume const_int etc already in the proper mode. */
4774 src = force_reg (mode, src);
4776 else if (mode_s != mode)
4778 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4779 src = force_reg (mode_s, src);
4780 src = gen_lowpart (mode, src);
4783 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4784 op = gen_rtx_SET (VOIDmode, op, src);
4786 if (!TARGET_ZEC12)
4788 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4789 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
4791 emit_insn (op);
4793 return true;
4796 return false;
4799 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4800 register that holds VAL of mode MODE shifted by COUNT bits. */
4802 static inline rtx
4803 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
4805 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4806 NULL_RTX, 1, OPTAB_DIRECT);
4807 return expand_simple_binop (SImode, ASHIFT, val, count,
4808 NULL_RTX, 1, OPTAB_DIRECT);
4811 /* Structure to hold the initial parameters for a compare_and_swap operation
4812 in HImode and QImode. */
4814 struct alignment_context
4816 rtx memsi; /* SI aligned memory location. */
4817 rtx shift; /* Bit offset with regard to lsb. */
4818 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4819 rtx modemaski; /* ~modemask */
4820 bool aligned; /* True if memory is aligned, false else. */
4823 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4824 structure AC for transparent simplifying, if the memory alignment is known
4825 to be at least 32bit. MEM is the memory location for the actual operation
4826 and MODE its mode. */
4828 static void
4829 init_alignment_context (struct alignment_context *ac, rtx mem,
4830 machine_mode mode)
4832 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4833 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4835 if (ac->aligned)
4836 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4837 else
4839 /* Alignment is unknown. */
4840 rtx byteoffset, addr, align;
4842 /* Force the address into a register. */
4843 addr = force_reg (Pmode, XEXP (mem, 0));
4845 /* Align it to SImode. */
4846 align = expand_simple_binop (Pmode, AND, addr,
4847 GEN_INT (-GET_MODE_SIZE (SImode)),
4848 NULL_RTX, 1, OPTAB_DIRECT);
4849 /* Generate MEM. */
4850 ac->memsi = gen_rtx_MEM (SImode, align);
4851 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4852 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4853 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4855 /* Calculate shiftcount. */
4856 byteoffset = expand_simple_binop (Pmode, AND, addr,
4857 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4858 NULL_RTX, 1, OPTAB_DIRECT);
4859 /* As we already have some offset, evaluate the remaining distance. */
4860 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4861 NULL_RTX, 1, OPTAB_DIRECT);
4864 /* Shift is the byte count, but we need the bitcount. */
4865 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4866 NULL_RTX, 1, OPTAB_DIRECT);
4868 /* Calculate masks. */
4869 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4870 GEN_INT (GET_MODE_MASK (mode)),
4871 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4872 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4873 NULL_RTX, 1);
4876 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4877 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4878 perform the merge in SEQ2. */
4880 static rtx
4881 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4882 machine_mode mode, rtx val, rtx ins)
4884 rtx tmp;
4886 if (ac->aligned)
4888 start_sequence ();
4889 tmp = copy_to_mode_reg (SImode, val);
4890 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4891 const0_rtx, ins))
4893 *seq1 = NULL;
4894 *seq2 = get_insns ();
4895 end_sequence ();
4896 return tmp;
4898 end_sequence ();
4901 /* Failed to use insv. Generate a two part shift and mask. */
4902 start_sequence ();
4903 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4904 *seq1 = get_insns ();
4905 end_sequence ();
4907 start_sequence ();
4908 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4909 *seq2 = get_insns ();
4910 end_sequence ();
4912 return tmp;
4915 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4916 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4917 value to set if CMP == MEM. */
4919 void
4920 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4921 rtx cmp, rtx new_rtx, bool is_weak)
4923 struct alignment_context ac;
4924 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4925 rtx res = gen_reg_rtx (SImode);
4926 rtx_code_label *csloop = NULL, *csend = NULL;
4928 gcc_assert (MEM_P (mem));
4930 init_alignment_context (&ac, mem, mode);
4932 /* Load full word. Subsequent loads are performed by CS. */
4933 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4934 NULL_RTX, 1, OPTAB_DIRECT);
4936 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4937 possible, we try to use insv to make this happen efficiently. If
4938 that fails we'll generate code both inside and outside the loop. */
4939 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4940 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4942 if (seq0)
4943 emit_insn (seq0);
4944 if (seq1)
4945 emit_insn (seq1);
4947 /* Start CS loop. */
4948 if (!is_weak)
4950 /* Begin assuming success. */
4951 emit_move_insn (btarget, const1_rtx);
4953 csloop = gen_label_rtx ();
4954 csend = gen_label_rtx ();
4955 emit_label (csloop);
4958 /* val = "<mem>00..0<mem>"
4959 * cmp = "00..0<cmp>00..0"
4960 * new = "00..0<new>00..0"
4963 emit_insn (seq2);
4964 emit_insn (seq3);
4966 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4967 if (is_weak)
4968 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4969 else
4971 rtx tmp;
4973 /* Jump to end if we're done (likely?). */
4974 s390_emit_jump (csend, cc);
4976 /* Check for changes outside mode, and loop internal if so.
4977 Arrange the moves so that the compare is adjacent to the
4978 branch so that we can generate CRJ. */
4979 tmp = copy_to_reg (val);
4980 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4981 1, OPTAB_DIRECT);
4982 cc = s390_emit_compare (NE, val, tmp);
4983 s390_emit_jump (csloop, cc);
4985 /* Failed. */
4986 emit_move_insn (btarget, const0_rtx);
4987 emit_label (csend);
4990 /* Return the correct part of the bitfield. */
4991 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4992 NULL_RTX, 1, OPTAB_DIRECT), 1);
4995 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4996 and VAL the value to play with. If AFTER is true then store the value
4997 MEM holds after the operation, if AFTER is false then store the value MEM
4998 holds before the operation. If TARGET is zero then discard that value, else
4999 store it to TARGET. */
5001 void
5002 s390_expand_atomic (machine_mode mode, enum rtx_code code,
5003 rtx target, rtx mem, rtx val, bool after)
5005 struct alignment_context ac;
5006 rtx cmp;
5007 rtx new_rtx = gen_reg_rtx (SImode);
5008 rtx orig = gen_reg_rtx (SImode);
5009 rtx_code_label *csloop = gen_label_rtx ();
5011 gcc_assert (!target || register_operand (target, VOIDmode));
5012 gcc_assert (MEM_P (mem));
5014 init_alignment_context (&ac, mem, mode);
5016 /* Shift val to the correct bit positions.
5017 Preserve "icm", but prevent "ex icm". */
5018 if (!(ac.aligned && code == SET && MEM_P (val)))
5019 val = s390_expand_mask_and_shift (val, mode, ac.shift);
5021 /* Further preparation insns. */
5022 if (code == PLUS || code == MINUS)
5023 emit_move_insn (orig, val);
5024 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
5025 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
5026 NULL_RTX, 1, OPTAB_DIRECT);
5028 /* Load full word. Subsequent loads are performed by CS. */
5029 cmp = force_reg (SImode, ac.memsi);
5031 /* Start CS loop. */
5032 emit_label (csloop);
5033 emit_move_insn (new_rtx, cmp);
5035 /* Patch new with val at correct position. */
5036 switch (code)
5038 case PLUS:
5039 case MINUS:
5040 val = expand_simple_binop (SImode, code, new_rtx, orig,
5041 NULL_RTX, 1, OPTAB_DIRECT);
5042 val = expand_simple_binop (SImode, AND, val, ac.modemask,
5043 NULL_RTX, 1, OPTAB_DIRECT);
5044 /* FALLTHRU */
5045 case SET:
5046 if (ac.aligned && MEM_P (val))
5047 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
5048 0, 0, SImode, val);
5049 else
5051 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
5052 NULL_RTX, 1, OPTAB_DIRECT);
5053 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
5054 NULL_RTX, 1, OPTAB_DIRECT);
5056 break;
5057 case AND:
5058 case IOR:
5059 case XOR:
5060 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
5061 NULL_RTX, 1, OPTAB_DIRECT);
5062 break;
5063 case MULT: /* NAND */
5064 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
5065 NULL_RTX, 1, OPTAB_DIRECT);
5066 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
5067 NULL_RTX, 1, OPTAB_DIRECT);
5068 break;
5069 default:
5070 gcc_unreachable ();
5073 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5074 ac.memsi, cmp, new_rtx));
5076 /* Return the correct part of the bitfield. */
5077 if (target)
5078 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5079 after ? new_rtx : cmp, ac.shift,
5080 NULL_RTX, 1, OPTAB_DIRECT), 1);
5083 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5084 We need to emit DTP-relative relocations. */
5086 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5088 static void
5089 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5091 switch (size)
5093 case 4:
5094 fputs ("\t.long\t", file);
5095 break;
5096 case 8:
5097 fputs ("\t.quad\t", file);
5098 break;
5099 default:
5100 gcc_unreachable ();
5102 output_addr_const (file, x);
5103 fputs ("@DTPOFF", file);
5106 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5107 /* Implement TARGET_MANGLE_TYPE. */
5109 static const char *
5110 s390_mangle_type (const_tree type)
5112 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5113 && TARGET_LONG_DOUBLE_128)
5114 return "g";
5116 /* For all other types, use normal C++ mangling. */
5117 return NULL;
5119 #endif
5121 /* In the name of slightly smaller debug output, and to cater to
5122 general assembler lossage, recognize various UNSPEC sequences
5123 and turn them back into a direct symbol reference. */
5125 static rtx
5126 s390_delegitimize_address (rtx orig_x)
5128 rtx x, y;
5130 orig_x = delegitimize_mem_from_attrs (orig_x);
5131 x = orig_x;
5133 /* Extract the symbol ref from:
5134 (plus:SI (reg:SI 12 %r12)
5135 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5136 UNSPEC_GOTOFF/PLTOFF)))
5138 (plus:SI (reg:SI 12 %r12)
5139 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5140 UNSPEC_GOTOFF/PLTOFF)
5141 (const_int 4 [0x4])))) */
5142 if (GET_CODE (x) == PLUS
5143 && REG_P (XEXP (x, 0))
5144 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5145 && GET_CODE (XEXP (x, 1)) == CONST)
5147 HOST_WIDE_INT offset = 0;
5149 /* The const operand. */
5150 y = XEXP (XEXP (x, 1), 0);
5152 if (GET_CODE (y) == PLUS
5153 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5155 offset = INTVAL (XEXP (y, 1));
5156 y = XEXP (y, 0);
5159 if (GET_CODE (y) == UNSPEC
5160 && (XINT (y, 1) == UNSPEC_GOTOFF
5161 || XINT (y, 1) == UNSPEC_PLTOFF))
5162 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5165 if (GET_CODE (x) != MEM)
5166 return orig_x;
5168 x = XEXP (x, 0);
5169 if (GET_CODE (x) == PLUS
5170 && GET_CODE (XEXP (x, 1)) == CONST
5171 && GET_CODE (XEXP (x, 0)) == REG
5172 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5174 y = XEXP (XEXP (x, 1), 0);
5175 if (GET_CODE (y) == UNSPEC
5176 && XINT (y, 1) == UNSPEC_GOT)
5177 y = XVECEXP (y, 0, 0);
5178 else
5179 return orig_x;
5181 else if (GET_CODE (x) == CONST)
5183 /* Extract the symbol ref from:
5184 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5185 UNSPEC_PLT/GOTENT))) */
5187 y = XEXP (x, 0);
5188 if (GET_CODE (y) == UNSPEC
5189 && (XINT (y, 1) == UNSPEC_GOTENT
5190 || XINT (y, 1) == UNSPEC_PLT))
5191 y = XVECEXP (y, 0, 0);
5192 else
5193 return orig_x;
5195 else
5196 return orig_x;
5198 if (GET_MODE (orig_x) != Pmode)
5200 if (GET_MODE (orig_x) == BLKmode)
5201 return orig_x;
5202 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5203 if (y == NULL_RTX)
5204 return orig_x;
5206 return y;
5209 /* Output operand OP to stdio stream FILE.
5210 OP is an address (register + offset) which is not used to address data;
5211 instead the rightmost bits are interpreted as the value. */
5213 static void
5214 print_shift_count_operand (FILE *file, rtx op)
5216 HOST_WIDE_INT offset;
5217 rtx base;
5219 /* Extract base register and offset. */
5220 if (!s390_decompose_shift_count (op, &base, &offset))
5221 gcc_unreachable ();
5223 /* Sanity check. */
5224 if (base)
5226 gcc_assert (GET_CODE (base) == REG);
5227 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5228 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5231 /* Offsets are constricted to twelve bits. */
5232 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5233 if (base)
5234 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5237 /* Assigns the number of NOP halfwords to be emitted before and after the
5238 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
5239 If hotpatching is disabled for the function, the values are set to zero.
5242 static void
5243 s390_function_num_hotpatch_hw (tree decl,
5244 int *hw_before,
5245 int *hw_after)
5247 tree attr;
5249 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
5251 /* Handle the arguments of the hotpatch attribute. The values
5252 specified via attribute might override the cmdline argument
5253 values. */
5254 if (attr)
5256 tree args = TREE_VALUE (attr);
5258 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
5259 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
5261 else
5263 /* Use the values specified by the cmdline arguments. */
5264 *hw_before = s390_hotpatch_hw_before_label;
5265 *hw_after = s390_hotpatch_hw_after_label;
5269 /* Write the extra assembler code needed to declare a function properly. */
5271 void
5272 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
5273 tree decl)
5275 int hw_before, hw_after;
5277 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
5278 if (hw_before > 0)
5280 unsigned int function_alignment;
5281 int i;
5283 /* Add a trampoline code area before the function label and initialize it
5284 with two-byte nop instructions. This area can be overwritten with code
5285 that jumps to a patched version of the function. */
5286 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
5287 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
5288 hw_before);
5289 for (i = 1; i < hw_before; i++)
5290 fputs ("\tnopr\t%r7\n", asm_out_file);
5292 /* Note: The function label must be aligned so that (a) the bytes of the
5293 following nop do not cross a cacheline boundary, and (b) a jump address
5294 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
5295 stored directly before the label without crossing a cacheline
5296 boundary. All this is necessary to make sure the trampoline code can
5297 be changed atomically.
5298 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
5299 if there are NOPs before the function label, the alignment is placed
5300 before them. So it is necessary to duplicate the alignment after the
5301 NOPs. */
5302 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
5303 if (! DECL_USER_ALIGN (decl))
5304 function_alignment = MAX (function_alignment,
5305 (unsigned int) align_functions);
5306 fputs ("\t# alignment for hotpatch\n", asm_out_file);
5307 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
5310 ASM_OUTPUT_LABEL (asm_out_file, fname);
5311 if (hw_after > 0)
5312 asm_fprintf (asm_out_file,
5313 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
5314 hw_after);
5317 /* Output machine-dependent UNSPECs occurring in address constant X
5318 in assembler syntax to stdio stream FILE. Returns true if the
5319 constant X could be recognized, false otherwise. */
5321 static bool
5322 s390_output_addr_const_extra (FILE *file, rtx x)
5324 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5325 switch (XINT (x, 1))
5327 case UNSPEC_GOTENT:
5328 output_addr_const (file, XVECEXP (x, 0, 0));
5329 fprintf (file, "@GOTENT");
5330 return true;
5331 case UNSPEC_GOT:
5332 output_addr_const (file, XVECEXP (x, 0, 0));
5333 fprintf (file, "@GOT");
5334 return true;
5335 case UNSPEC_GOTOFF:
5336 output_addr_const (file, XVECEXP (x, 0, 0));
5337 fprintf (file, "@GOTOFF");
5338 return true;
5339 case UNSPEC_PLT:
5340 output_addr_const (file, XVECEXP (x, 0, 0));
5341 fprintf (file, "@PLT");
5342 return true;
5343 case UNSPEC_PLTOFF:
5344 output_addr_const (file, XVECEXP (x, 0, 0));
5345 fprintf (file, "@PLTOFF");
5346 return true;
5347 case UNSPEC_TLSGD:
5348 output_addr_const (file, XVECEXP (x, 0, 0));
5349 fprintf (file, "@TLSGD");
5350 return true;
5351 case UNSPEC_TLSLDM:
5352 assemble_name (file, get_some_local_dynamic_name ());
5353 fprintf (file, "@TLSLDM");
5354 return true;
5355 case UNSPEC_DTPOFF:
5356 output_addr_const (file, XVECEXP (x, 0, 0));
5357 fprintf (file, "@DTPOFF");
5358 return true;
5359 case UNSPEC_NTPOFF:
5360 output_addr_const (file, XVECEXP (x, 0, 0));
5361 fprintf (file, "@NTPOFF");
5362 return true;
5363 case UNSPEC_GOTNTPOFF:
5364 output_addr_const (file, XVECEXP (x, 0, 0));
5365 fprintf (file, "@GOTNTPOFF");
5366 return true;
5367 case UNSPEC_INDNTPOFF:
5368 output_addr_const (file, XVECEXP (x, 0, 0));
5369 fprintf (file, "@INDNTPOFF");
5370 return true;
5373 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5374 switch (XINT (x, 1))
5376 case UNSPEC_POOL_OFFSET:
5377 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5378 output_addr_const (file, x);
5379 return true;
5381 return false;
5384 /* Output address operand ADDR in assembler syntax to
5385 stdio stream FILE. */
5387 void
5388 print_operand_address (FILE *file, rtx addr)
5390 struct s390_address ad;
5392 if (s390_loadrelative_operand_p (addr, NULL, NULL))
5394 if (!TARGET_Z10)
5396 output_operand_lossage ("symbolic memory references are "
5397 "only supported on z10 or later");
5398 return;
5400 output_addr_const (file, addr);
5401 return;
5404 if (!s390_decompose_address (addr, &ad)
5405 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5406 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5407 output_operand_lossage ("cannot decompose address");
5409 if (ad.disp)
5410 output_addr_const (file, ad.disp);
5411 else
5412 fprintf (file, "0");
5414 if (ad.base && ad.indx)
5415 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5416 reg_names[REGNO (ad.base)]);
5417 else if (ad.base)
5418 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5421 /* Output operand X in assembler syntax to stdio stream FILE.
5422 CODE specified the format flag. The following format flags
5423 are recognized:
5425 'C': print opcode suffix for branch condition.
5426 'D': print opcode suffix for inverse branch condition.
5427 'E': print opcode suffix for branch on index instruction.
5428 'G': print the size of the operand in bytes.
5429 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5430 'M': print the second word of a TImode operand.
5431 'N': print the second word of a DImode operand.
5432 'O': print only the displacement of a memory reference.
5433 'R': print only the base register of a memory reference.
5434 'S': print S-type memory reference (base+displacement).
5435 'Y': print shift count operand.
5437 'b': print integer X as if it's an unsigned byte.
5438 'c': print integer X as if it's an signed byte.
5439 'e': "end" of DImode contiguous bitmask X.
5440 'f': "end" of SImode contiguous bitmask X.
5441 'h': print integer X as if it's a signed halfword.
5442 'i': print the first nonzero HImode part of X.
5443 'j': print the first HImode part unequal to -1 of X.
5444 'k': print the first nonzero SImode part of X.
5445 'm': print the first SImode part unequal to -1 of X.
5446 'o': print integer X as if it's an unsigned 32bit word.
5447 's': "start" of DImode contiguous bitmask X.
5448 't': "start" of SImode contiguous bitmask X.
5449 'x': print integer X as if it's an unsigned halfword.
5452 void
5453 print_operand (FILE *file, rtx x, int code)
5455 HOST_WIDE_INT ival;
5457 switch (code)
5459 case 'C':
5460 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5461 return;
5463 case 'D':
5464 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5465 return;
5467 case 'E':
5468 if (GET_CODE (x) == LE)
5469 fprintf (file, "l");
5470 else if (GET_CODE (x) == GT)
5471 fprintf (file, "h");
5472 else
5473 output_operand_lossage ("invalid comparison operator "
5474 "for 'E' output modifier");
5475 return;
5477 case 'J':
5478 if (GET_CODE (x) == SYMBOL_REF)
5480 fprintf (file, "%s", ":tls_load:");
5481 output_addr_const (file, x);
5483 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5485 fprintf (file, "%s", ":tls_gdcall:");
5486 output_addr_const (file, XVECEXP (x, 0, 0));
5488 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5490 fprintf (file, "%s", ":tls_ldcall:");
5491 const char *name = get_some_local_dynamic_name ();
5492 gcc_assert (name);
5493 assemble_name (file, name);
5495 else
5496 output_operand_lossage ("invalid reference for 'J' output modifier");
5497 return;
5499 case 'G':
5500 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5501 return;
5503 case 'O':
5505 struct s390_address ad;
5506 int ret;
5508 if (!MEM_P (x))
5510 output_operand_lossage ("memory reference expected for "
5511 "'O' output modifier");
5512 return;
5515 ret = s390_decompose_address (XEXP (x, 0), &ad);
5517 if (!ret
5518 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5519 || ad.indx)
5521 output_operand_lossage ("invalid address for 'O' output modifier");
5522 return;
5525 if (ad.disp)
5526 output_addr_const (file, ad.disp);
5527 else
5528 fprintf (file, "0");
5530 return;
5532 case 'R':
5534 struct s390_address ad;
5535 int ret;
5537 if (!MEM_P (x))
5539 output_operand_lossage ("memory reference expected for "
5540 "'R' output modifier");
5541 return;
5544 ret = s390_decompose_address (XEXP (x, 0), &ad);
5546 if (!ret
5547 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5548 || ad.indx)
5550 output_operand_lossage ("invalid address for 'R' output modifier");
5551 return;
5554 if (ad.base)
5555 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5556 else
5557 fprintf (file, "0");
5559 return;
5561 case 'S':
5563 struct s390_address ad;
5564 int ret;
5566 if (!MEM_P (x))
5568 output_operand_lossage ("memory reference expected for "
5569 "'S' output modifier");
5570 return;
5572 ret = s390_decompose_address (XEXP (x, 0), &ad);
5574 if (!ret
5575 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5576 || ad.indx)
5578 output_operand_lossage ("invalid address for 'S' output modifier");
5579 return;
5582 if (ad.disp)
5583 output_addr_const (file, ad.disp);
5584 else
5585 fprintf (file, "0");
5587 if (ad.base)
5588 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5590 return;
5592 case 'N':
5593 if (GET_CODE (x) == REG)
5594 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5595 else if (GET_CODE (x) == MEM)
5596 x = change_address (x, VOIDmode,
5597 plus_constant (Pmode, XEXP (x, 0), 4));
5598 else
5599 output_operand_lossage ("register or memory expression expected "
5600 "for 'N' output modifier");
5601 break;
5603 case 'M':
5604 if (GET_CODE (x) == REG)
5605 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5606 else if (GET_CODE (x) == MEM)
5607 x = change_address (x, VOIDmode,
5608 plus_constant (Pmode, XEXP (x, 0), 8));
5609 else
5610 output_operand_lossage ("register or memory expression expected "
5611 "for 'M' output modifier");
5612 break;
5614 case 'Y':
5615 print_shift_count_operand (file, x);
5616 return;
5619 switch (GET_CODE (x))
5621 case REG:
5622 fprintf (file, "%s", reg_names[REGNO (x)]);
5623 break;
5625 case MEM:
5626 output_address (XEXP (x, 0));
5627 break;
5629 case CONST:
5630 case CODE_LABEL:
5631 case LABEL_REF:
5632 case SYMBOL_REF:
5633 output_addr_const (file, x);
5634 break;
5636 case CONST_INT:
5637 ival = INTVAL (x);
5638 switch (code)
5640 case 0:
5641 break;
5642 case 'b':
5643 ival &= 0xff;
5644 break;
5645 case 'c':
5646 ival = ((ival & 0xff) ^ 0x80) - 0x80;
5647 break;
5648 case 'x':
5649 ival &= 0xffff;
5650 break;
5651 case 'h':
5652 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
5653 break;
5654 case 'i':
5655 ival = s390_extract_part (x, HImode, 0);
5656 break;
5657 case 'j':
5658 ival = s390_extract_part (x, HImode, -1);
5659 break;
5660 case 'k':
5661 ival = s390_extract_part (x, SImode, 0);
5662 break;
5663 case 'm':
5664 ival = s390_extract_part (x, SImode, -1);
5665 break;
5666 case 'o':
5667 ival &= 0xffffffff;
5668 break;
5669 case 'e': case 'f':
5670 case 's': case 't':
5672 int pos, len;
5673 bool ok;
5675 len = (code == 's' || code == 'e' ? 64 : 32);
5676 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
5677 gcc_assert (ok);
5678 if (code == 's' || code == 't')
5679 ival = 64 - pos - len;
5680 else
5681 ival = 64 - 1 - pos;
5683 break;
5684 default:
5685 output_operand_lossage ("invalid constant for output modifier '%c'", code);
5687 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
5688 break;
5690 case CONST_DOUBLE:
5691 gcc_assert (GET_MODE (x) == VOIDmode);
5692 if (code == 'b')
5693 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5694 else if (code == 'x')
5695 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5696 else if (code == 'h')
5697 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5698 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5699 else
5701 if (code == 0)
5702 output_operand_lossage ("invalid constant - try using "
5703 "an output modifier");
5704 else
5705 output_operand_lossage ("invalid constant for output modifier '%c'",
5706 code);
5708 break;
5710 default:
5711 if (code == 0)
5712 output_operand_lossage ("invalid expression - try using "
5713 "an output modifier");
5714 else
5715 output_operand_lossage ("invalid expression for output "
5716 "modifier '%c'", code);
5717 break;
5721 /* Target hook for assembling integer objects. We need to define it
5722 here to work a round a bug in some versions of GAS, which couldn't
5723 handle values smaller than INT_MIN when printed in decimal. */
5725 static bool
5726 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5728 if (size == 8 && aligned_p
5729 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5731 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5732 INTVAL (x));
5733 return true;
5735 return default_assemble_integer (x, size, aligned_p);
5738 /* Returns true if register REGNO is used for forming
5739 a memory address in expression X. */
5741 static bool
5742 reg_used_in_mem_p (int regno, rtx x)
5744 enum rtx_code code = GET_CODE (x);
5745 int i, j;
5746 const char *fmt;
5748 if (code == MEM)
5750 if (refers_to_regno_p (regno, XEXP (x, 0)))
5751 return true;
5753 else if (code == SET
5754 && GET_CODE (SET_DEST (x)) == PC)
5756 if (refers_to_regno_p (regno, SET_SRC (x)))
5757 return true;
5760 fmt = GET_RTX_FORMAT (code);
5761 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5763 if (fmt[i] == 'e'
5764 && reg_used_in_mem_p (regno, XEXP (x, i)))
5765 return true;
5767 else if (fmt[i] == 'E')
5768 for (j = 0; j < XVECLEN (x, i); j++)
5769 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5770 return true;
5772 return false;
5775 /* Returns true if expression DEP_RTX sets an address register
5776 used by instruction INSN to address memory. */
5778 static bool
5779 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
5781 rtx target, pat;
5783 if (NONJUMP_INSN_P (dep_rtx))
5784 dep_rtx = PATTERN (dep_rtx);
5786 if (GET_CODE (dep_rtx) == SET)
5788 target = SET_DEST (dep_rtx);
5789 if (GET_CODE (target) == STRICT_LOW_PART)
5790 target = XEXP (target, 0);
5791 while (GET_CODE (target) == SUBREG)
5792 target = SUBREG_REG (target);
5794 if (GET_CODE (target) == REG)
5796 int regno = REGNO (target);
5798 if (s390_safe_attr_type (insn) == TYPE_LA)
5800 pat = PATTERN (insn);
5801 if (GET_CODE (pat) == PARALLEL)
5803 gcc_assert (XVECLEN (pat, 0) == 2);
5804 pat = XVECEXP (pat, 0, 0);
5806 gcc_assert (GET_CODE (pat) == SET);
5807 return refers_to_regno_p (regno, SET_SRC (pat));
5809 else if (get_attr_atype (insn) == ATYPE_AGEN)
5810 return reg_used_in_mem_p (regno, PATTERN (insn));
5813 return false;
5816 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5819 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
5821 rtx dep_rtx = PATTERN (dep_insn);
5822 int i;
5824 if (GET_CODE (dep_rtx) == SET
5825 && addr_generation_dependency_p (dep_rtx, insn))
5826 return 1;
5827 else if (GET_CODE (dep_rtx) == PARALLEL)
5829 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5831 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5832 return 1;
5835 return 0;
5839 /* A C statement (sans semicolon) to update the integer scheduling priority
5840 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5841 reduce the priority to execute INSN later. Do not define this macro if
5842 you do not need to adjust the scheduling priorities of insns.
5844 A STD instruction should be scheduled earlier,
5845 in order to use the bypass. */
5846 static int
5847 s390_adjust_priority (rtx_insn *insn, int priority)
5849 if (! INSN_P (insn))
5850 return priority;
5852 if (s390_tune != PROCESSOR_2084_Z990
5853 && s390_tune != PROCESSOR_2094_Z9_109
5854 && s390_tune != PROCESSOR_2097_Z10
5855 && s390_tune != PROCESSOR_2817_Z196
5856 && s390_tune != PROCESSOR_2827_ZEC12)
5857 return priority;
5859 switch (s390_safe_attr_type (insn))
5861 case TYPE_FSTOREDF:
5862 case TYPE_FSTORESF:
5863 priority = priority << 3;
5864 break;
5865 case TYPE_STORE:
5866 case TYPE_STM:
5867 priority = priority << 1;
5868 break;
5869 default:
5870 break;
5872 return priority;
5876 /* The number of instructions that can be issued per cycle. */
5878 static int
5879 s390_issue_rate (void)
5881 switch (s390_tune)
5883 case PROCESSOR_2084_Z990:
5884 case PROCESSOR_2094_Z9_109:
5885 case PROCESSOR_2817_Z196:
5886 return 3;
5887 case PROCESSOR_2097_Z10:
5888 case PROCESSOR_2827_ZEC12:
5889 return 2;
5890 default:
5891 return 1;
5895 static int
5896 s390_first_cycle_multipass_dfa_lookahead (void)
5898 return 4;
5901 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5902 Fix up MEMs as required. */
5904 static void
5905 annotate_constant_pool_refs (rtx *x)
5907 int i, j;
5908 const char *fmt;
5910 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5911 || !CONSTANT_POOL_ADDRESS_P (*x));
5913 /* Literal pool references can only occur inside a MEM ... */
5914 if (GET_CODE (*x) == MEM)
5916 rtx memref = XEXP (*x, 0);
5918 if (GET_CODE (memref) == SYMBOL_REF
5919 && CONSTANT_POOL_ADDRESS_P (memref))
5921 rtx base = cfun->machine->base_reg;
5922 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5923 UNSPEC_LTREF);
5925 *x = replace_equiv_address (*x, addr);
5926 return;
5929 if (GET_CODE (memref) == CONST
5930 && GET_CODE (XEXP (memref, 0)) == PLUS
5931 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5932 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5933 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5935 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5936 rtx sym = XEXP (XEXP (memref, 0), 0);
5937 rtx base = cfun->machine->base_reg;
5938 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5939 UNSPEC_LTREF);
5941 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5942 return;
5946 /* ... or a load-address type pattern. */
5947 if (GET_CODE (*x) == SET)
5949 rtx addrref = SET_SRC (*x);
5951 if (GET_CODE (addrref) == SYMBOL_REF
5952 && CONSTANT_POOL_ADDRESS_P (addrref))
5954 rtx base = cfun->machine->base_reg;
5955 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5956 UNSPEC_LTREF);
5958 SET_SRC (*x) = addr;
5959 return;
5962 if (GET_CODE (addrref) == CONST
5963 && GET_CODE (XEXP (addrref, 0)) == PLUS
5964 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5965 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5966 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5968 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5969 rtx sym = XEXP (XEXP (addrref, 0), 0);
5970 rtx base = cfun->machine->base_reg;
5971 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5972 UNSPEC_LTREF);
5974 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5975 return;
5979 /* Annotate LTREL_BASE as well. */
5980 if (GET_CODE (*x) == UNSPEC
5981 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5983 rtx base = cfun->machine->base_reg;
5984 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5985 UNSPEC_LTREL_BASE);
5986 return;
5989 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5990 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5992 if (fmt[i] == 'e')
5994 annotate_constant_pool_refs (&XEXP (*x, i));
5996 else if (fmt[i] == 'E')
5998 for (j = 0; j < XVECLEN (*x, i); j++)
5999 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
6004 /* Split all branches that exceed the maximum distance.
6005 Returns true if this created a new literal pool entry. */
6007 static int
6008 s390_split_branches (void)
6010 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
6011 int new_literal = 0, ret;
6012 rtx_insn *insn;
6013 rtx pat, target;
6014 rtx *label;
6016 /* We need correct insn addresses. */
6018 shorten_branches (get_insns ());
6020 /* Find all branches that exceed 64KB, and split them. */
6022 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6024 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
6025 continue;
6027 pat = PATTERN (insn);
6028 if (GET_CODE (pat) == PARALLEL)
6029 pat = XVECEXP (pat, 0, 0);
6030 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
6031 continue;
6033 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
6035 label = &SET_SRC (pat);
6037 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
6039 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
6040 label = &XEXP (SET_SRC (pat), 1);
6041 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
6042 label = &XEXP (SET_SRC (pat), 2);
6043 else
6044 continue;
6046 else
6047 continue;
6049 if (get_attr_length (insn) <= 4)
6050 continue;
6052 /* We are going to use the return register as scratch register,
6053 make sure it will be saved/restored by the prologue/epilogue. */
6054 cfun_frame_layout.save_return_addr_p = 1;
6056 if (!flag_pic)
6058 new_literal = 1;
6059 rtx mem = force_const_mem (Pmode, *label);
6060 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, mem), insn);
6061 INSN_ADDRESSES_NEW (set_insn, -1);
6062 annotate_constant_pool_refs (&PATTERN (set_insn));
6064 target = temp_reg;
6066 else
6068 new_literal = 1;
6069 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
6070 UNSPEC_LTREL_OFFSET);
6071 target = gen_rtx_CONST (Pmode, target);
6072 target = force_const_mem (Pmode, target);
6073 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
6074 INSN_ADDRESSES_NEW (set_insn, -1);
6075 annotate_constant_pool_refs (&PATTERN (set_insn));
6077 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
6078 cfun->machine->base_reg),
6079 UNSPEC_LTREL_BASE);
6080 target = gen_rtx_PLUS (Pmode, temp_reg, target);
6083 ret = validate_change (insn, label, target, 0);
6084 gcc_assert (ret);
6087 return new_literal;
6091 /* Find an annotated literal pool symbol referenced in RTX X,
6092 and store it at REF. Will abort if X contains references to
6093 more than one such pool symbol; multiple references to the same
6094 symbol are allowed, however.
6096 The rtx pointed to by REF must be initialized to NULL_RTX
6097 by the caller before calling this routine. */
6099 static void
6100 find_constant_pool_ref (rtx x, rtx *ref)
6102 int i, j;
6103 const char *fmt;
6105 /* Ignore LTREL_BASE references. */
6106 if (GET_CODE (x) == UNSPEC
6107 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6108 return;
6109 /* Likewise POOL_ENTRY insns. */
6110 if (GET_CODE (x) == UNSPEC_VOLATILE
6111 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
6112 return;
6114 gcc_assert (GET_CODE (x) != SYMBOL_REF
6115 || !CONSTANT_POOL_ADDRESS_P (x));
6117 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
6119 rtx sym = XVECEXP (x, 0, 0);
6120 gcc_assert (GET_CODE (sym) == SYMBOL_REF
6121 && CONSTANT_POOL_ADDRESS_P (sym));
6123 if (*ref == NULL_RTX)
6124 *ref = sym;
6125 else
6126 gcc_assert (*ref == sym);
6128 return;
6131 fmt = GET_RTX_FORMAT (GET_CODE (x));
6132 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6134 if (fmt[i] == 'e')
6136 find_constant_pool_ref (XEXP (x, i), ref);
6138 else if (fmt[i] == 'E')
6140 for (j = 0; j < XVECLEN (x, i); j++)
6141 find_constant_pool_ref (XVECEXP (x, i, j), ref);
6146 /* Replace every reference to the annotated literal pool
6147 symbol REF in X by its base plus OFFSET. */
6149 static void
6150 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6152 int i, j;
6153 const char *fmt;
6155 gcc_assert (*x != ref);
6157 if (GET_CODE (*x) == UNSPEC
6158 && XINT (*x, 1) == UNSPEC_LTREF
6159 && XVECEXP (*x, 0, 0) == ref)
6161 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6162 return;
6165 if (GET_CODE (*x) == PLUS
6166 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6167 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6168 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6169 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6171 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6172 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6173 return;
6176 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6177 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6179 if (fmt[i] == 'e')
6181 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6183 else if (fmt[i] == 'E')
6185 for (j = 0; j < XVECLEN (*x, i); j++)
6186 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6191 /* Check whether X contains an UNSPEC_LTREL_BASE.
6192 Return its constant pool symbol if found, NULL_RTX otherwise. */
6194 static rtx
6195 find_ltrel_base (rtx x)
6197 int i, j;
6198 const char *fmt;
6200 if (GET_CODE (x) == UNSPEC
6201 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6202 return XVECEXP (x, 0, 0);
6204 fmt = GET_RTX_FORMAT (GET_CODE (x));
6205 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6207 if (fmt[i] == 'e')
6209 rtx fnd = find_ltrel_base (XEXP (x, i));
6210 if (fnd)
6211 return fnd;
6213 else if (fmt[i] == 'E')
6215 for (j = 0; j < XVECLEN (x, i); j++)
6217 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6218 if (fnd)
6219 return fnd;
6224 return NULL_RTX;
6227 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6229 static void
6230 replace_ltrel_base (rtx *x)
6232 int i, j;
6233 const char *fmt;
6235 if (GET_CODE (*x) == UNSPEC
6236 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6238 *x = XVECEXP (*x, 0, 1);
6239 return;
6242 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6243 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6245 if (fmt[i] == 'e')
6247 replace_ltrel_base (&XEXP (*x, i));
6249 else if (fmt[i] == 'E')
6251 for (j = 0; j < XVECLEN (*x, i); j++)
6252 replace_ltrel_base (&XVECEXP (*x, i, j));
6258 /* We keep a list of constants which we have to add to internal
6259 constant tables in the middle of large functions. */
6261 #define NR_C_MODES 11
6262 machine_mode constant_modes[NR_C_MODES] =
6264 TFmode, TImode, TDmode,
6265 DFmode, DImode, DDmode,
6266 SFmode, SImode, SDmode,
6267 HImode,
6268 QImode
6271 struct constant
6273 struct constant *next;
6274 rtx value;
6275 rtx_code_label *label;
6278 struct constant_pool
6280 struct constant_pool *next;
6281 rtx_insn *first_insn;
6282 rtx_insn *pool_insn;
6283 bitmap insns;
6284 rtx_insn *emit_pool_after;
6286 struct constant *constants[NR_C_MODES];
6287 struct constant *execute;
6288 rtx_code_label *label;
6289 int size;
6292 /* Allocate new constant_pool structure. */
6294 static struct constant_pool *
6295 s390_alloc_pool (void)
6297 struct constant_pool *pool;
6298 int i;
6300 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6301 pool->next = NULL;
6302 for (i = 0; i < NR_C_MODES; i++)
6303 pool->constants[i] = NULL;
6305 pool->execute = NULL;
6306 pool->label = gen_label_rtx ();
6307 pool->first_insn = NULL;
6308 pool->pool_insn = NULL;
6309 pool->insns = BITMAP_ALLOC (NULL);
6310 pool->size = 0;
6311 pool->emit_pool_after = NULL;
6313 return pool;
6316 /* Create new constant pool covering instructions starting at INSN
6317 and chain it to the end of POOL_LIST. */
6319 static struct constant_pool *
6320 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
6322 struct constant_pool *pool, **prev;
6324 pool = s390_alloc_pool ();
6325 pool->first_insn = insn;
6327 for (prev = pool_list; *prev; prev = &(*prev)->next)
6329 *prev = pool;
6331 return pool;
6334 /* End range of instructions covered by POOL at INSN and emit
6335 placeholder insn representing the pool. */
6337 static void
6338 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
6340 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6342 if (!insn)
6343 insn = get_last_insn ();
6345 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6346 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6349 /* Add INSN to the list of insns covered by POOL. */
6351 static void
6352 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6354 bitmap_set_bit (pool->insns, INSN_UID (insn));
6357 /* Return pool out of POOL_LIST that covers INSN. */
6359 static struct constant_pool *
6360 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6362 struct constant_pool *pool;
6364 for (pool = pool_list; pool; pool = pool->next)
6365 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6366 break;
6368 return pool;
6371 /* Add constant VAL of mode MODE to the constant pool POOL. */
6373 static void
6374 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
6376 struct constant *c;
6377 int i;
6379 for (i = 0; i < NR_C_MODES; i++)
6380 if (constant_modes[i] == mode)
6381 break;
6382 gcc_assert (i != NR_C_MODES);
6384 for (c = pool->constants[i]; c != NULL; c = c->next)
6385 if (rtx_equal_p (val, c->value))
6386 break;
6388 if (c == NULL)
6390 c = (struct constant *) xmalloc (sizeof *c);
6391 c->value = val;
6392 c->label = gen_label_rtx ();
6393 c->next = pool->constants[i];
6394 pool->constants[i] = c;
6395 pool->size += GET_MODE_SIZE (mode);
6399 /* Return an rtx that represents the offset of X from the start of
6400 pool POOL. */
6402 static rtx
6403 s390_pool_offset (struct constant_pool *pool, rtx x)
6405 rtx label;
6407 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6408 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6409 UNSPEC_POOL_OFFSET);
6410 return gen_rtx_CONST (GET_MODE (x), x);
6413 /* Find constant VAL of mode MODE in the constant pool POOL.
6414 Return an RTX describing the distance from the start of
6415 the pool to the location of the new constant. */
6417 static rtx
6418 s390_find_constant (struct constant_pool *pool, rtx val,
6419 machine_mode mode)
6421 struct constant *c;
6422 int i;
6424 for (i = 0; i < NR_C_MODES; i++)
6425 if (constant_modes[i] == mode)
6426 break;
6427 gcc_assert (i != NR_C_MODES);
6429 for (c = pool->constants[i]; c != NULL; c = c->next)
6430 if (rtx_equal_p (val, c->value))
6431 break;
6433 gcc_assert (c);
6435 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6438 /* Check whether INSN is an execute. Return the label_ref to its
6439 execute target template if so, NULL_RTX otherwise. */
6441 static rtx
6442 s390_execute_label (rtx insn)
6444 if (NONJUMP_INSN_P (insn)
6445 && GET_CODE (PATTERN (insn)) == PARALLEL
6446 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6447 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6448 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6450 return NULL_RTX;
6453 /* Add execute target for INSN to the constant pool POOL. */
6455 static void
6456 s390_add_execute (struct constant_pool *pool, rtx insn)
6458 struct constant *c;
6460 for (c = pool->execute; c != NULL; c = c->next)
6461 if (INSN_UID (insn) == INSN_UID (c->value))
6462 break;
6464 if (c == NULL)
6466 c = (struct constant *) xmalloc (sizeof *c);
6467 c->value = insn;
6468 c->label = gen_label_rtx ();
6469 c->next = pool->execute;
6470 pool->execute = c;
6471 pool->size += 6;
6475 /* Find execute target for INSN in the constant pool POOL.
6476 Return an RTX describing the distance from the start of
6477 the pool to the location of the execute target. */
6479 static rtx
6480 s390_find_execute (struct constant_pool *pool, rtx insn)
6482 struct constant *c;
6484 for (c = pool->execute; c != NULL; c = c->next)
6485 if (INSN_UID (insn) == INSN_UID (c->value))
6486 break;
6488 gcc_assert (c);
6490 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6493 /* For an execute INSN, extract the execute target template. */
6495 static rtx
6496 s390_execute_target (rtx insn)
6498 rtx pattern = PATTERN (insn);
6499 gcc_assert (s390_execute_label (insn));
6501 if (XVECLEN (pattern, 0) == 2)
6503 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6505 else
6507 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6508 int i;
6510 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6511 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6513 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6516 return pattern;
6519 /* Indicate that INSN cannot be duplicated. This is the case for
6520 execute insns that carry a unique label. */
6522 static bool
6523 s390_cannot_copy_insn_p (rtx_insn *insn)
6525 rtx label = s390_execute_label (insn);
6526 return label && label != const0_rtx;
6529 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6530 do not emit the pool base label. */
6532 static void
6533 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6535 struct constant *c;
6536 rtx_insn *insn = pool->pool_insn;
6537 int i;
6539 /* Switch to rodata section. */
6540 if (TARGET_CPU_ZARCH)
6542 insn = emit_insn_after (gen_pool_section_start (), insn);
6543 INSN_ADDRESSES_NEW (insn, -1);
6546 /* Ensure minimum pool alignment. */
6547 if (TARGET_CPU_ZARCH)
6548 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6549 else
6550 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6551 INSN_ADDRESSES_NEW (insn, -1);
6553 /* Emit pool base label. */
6554 if (!remote_label)
6556 insn = emit_label_after (pool->label, insn);
6557 INSN_ADDRESSES_NEW (insn, -1);
6560 /* Dump constants in descending alignment requirement order,
6561 ensuring proper alignment for every constant. */
6562 for (i = 0; i < NR_C_MODES; i++)
6563 for (c = pool->constants[i]; c; c = c->next)
6565 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6566 rtx value = copy_rtx (c->value);
6567 if (GET_CODE (value) == CONST
6568 && GET_CODE (XEXP (value, 0)) == UNSPEC
6569 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6570 && XVECLEN (XEXP (value, 0), 0) == 1)
6571 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6573 insn = emit_label_after (c->label, insn);
6574 INSN_ADDRESSES_NEW (insn, -1);
6576 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6577 gen_rtvec (1, value),
6578 UNSPECV_POOL_ENTRY);
6579 insn = emit_insn_after (value, insn);
6580 INSN_ADDRESSES_NEW (insn, -1);
6583 /* Ensure minimum alignment for instructions. */
6584 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6585 INSN_ADDRESSES_NEW (insn, -1);
6587 /* Output in-pool execute template insns. */
6588 for (c = pool->execute; c; c = c->next)
6590 insn = emit_label_after (c->label, insn);
6591 INSN_ADDRESSES_NEW (insn, -1);
6593 insn = emit_insn_after (s390_execute_target (c->value), insn);
6594 INSN_ADDRESSES_NEW (insn, -1);
6597 /* Switch back to previous section. */
6598 if (TARGET_CPU_ZARCH)
6600 insn = emit_insn_after (gen_pool_section_end (), insn);
6601 INSN_ADDRESSES_NEW (insn, -1);
6604 insn = emit_barrier_after (insn);
6605 INSN_ADDRESSES_NEW (insn, -1);
6607 /* Remove placeholder insn. */
6608 remove_insn (pool->pool_insn);
6611 /* Free all memory used by POOL. */
6613 static void
6614 s390_free_pool (struct constant_pool *pool)
6616 struct constant *c, *next;
6617 int i;
6619 for (i = 0; i < NR_C_MODES; i++)
6620 for (c = pool->constants[i]; c; c = next)
6622 next = c->next;
6623 free (c);
6626 for (c = pool->execute; c; c = next)
6628 next = c->next;
6629 free (c);
6632 BITMAP_FREE (pool->insns);
6633 free (pool);
6637 /* Collect main literal pool. Return NULL on overflow. */
6639 static struct constant_pool *
6640 s390_mainpool_start (void)
6642 struct constant_pool *pool;
6643 rtx_insn *insn;
6645 pool = s390_alloc_pool ();
6647 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6649 if (NONJUMP_INSN_P (insn)
6650 && GET_CODE (PATTERN (insn)) == SET
6651 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6652 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6654 /* There might be two main_pool instructions if base_reg
6655 is call-clobbered; one for shrink-wrapped code and one
6656 for the rest. We want to keep the first. */
6657 if (pool->pool_insn)
6659 insn = PREV_INSN (insn);
6660 delete_insn (NEXT_INSN (insn));
6661 continue;
6663 pool->pool_insn = insn;
6666 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6668 s390_add_execute (pool, insn);
6670 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6672 rtx pool_ref = NULL_RTX;
6673 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6674 if (pool_ref)
6676 rtx constant = get_pool_constant (pool_ref);
6677 machine_mode mode = get_pool_mode (pool_ref);
6678 s390_add_constant (pool, constant, mode);
6682 /* If hot/cold partitioning is enabled we have to make sure that
6683 the literal pool is emitted in the same section where the
6684 initialization of the literal pool base pointer takes place.
6685 emit_pool_after is only used in the non-overflow case on non
6686 Z cpus where we can emit the literal pool at the end of the
6687 function body within the text section. */
6688 if (NOTE_P (insn)
6689 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6690 && !pool->emit_pool_after)
6691 pool->emit_pool_after = PREV_INSN (insn);
6694 gcc_assert (pool->pool_insn || pool->size == 0);
6696 if (pool->size >= 4096)
6698 /* We're going to chunkify the pool, so remove the main
6699 pool placeholder insn. */
6700 remove_insn (pool->pool_insn);
6702 s390_free_pool (pool);
6703 pool = NULL;
6706 /* If the functions ends with the section where the literal pool
6707 should be emitted set the marker to its end. */
6708 if (pool && !pool->emit_pool_after)
6709 pool->emit_pool_after = get_last_insn ();
6711 return pool;
6714 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6715 Modify the current function to output the pool constants as well as
6716 the pool register setup instruction. */
6718 static void
6719 s390_mainpool_finish (struct constant_pool *pool)
6721 rtx base_reg = cfun->machine->base_reg;
6723 /* If the pool is empty, we're done. */
6724 if (pool->size == 0)
6726 /* We don't actually need a base register after all. */
6727 cfun->machine->base_reg = NULL_RTX;
6729 if (pool->pool_insn)
6730 remove_insn (pool->pool_insn);
6731 s390_free_pool (pool);
6732 return;
6735 /* We need correct insn addresses. */
6736 shorten_branches (get_insns ());
6738 /* On zSeries, we use a LARL to load the pool register. The pool is
6739 located in the .rodata section, so we emit it after the function. */
6740 if (TARGET_CPU_ZARCH)
6742 rtx set = gen_main_base_64 (base_reg, pool->label);
6743 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
6744 INSN_ADDRESSES_NEW (insn, -1);
6745 remove_insn (pool->pool_insn);
6747 insn = get_last_insn ();
6748 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6749 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6751 s390_dump_pool (pool, 0);
6754 /* On S/390, if the total size of the function's code plus literal pool
6755 does not exceed 4096 bytes, we use BASR to set up a function base
6756 pointer, and emit the literal pool at the end of the function. */
6757 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6758 + pool->size + 8 /* alignment slop */ < 4096)
6760 rtx set = gen_main_base_31_small (base_reg, pool->label);
6761 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
6762 INSN_ADDRESSES_NEW (insn, -1);
6763 remove_insn (pool->pool_insn);
6765 insn = emit_label_after (pool->label, insn);
6766 INSN_ADDRESSES_NEW (insn, -1);
6768 /* emit_pool_after will be set by s390_mainpool_start to the
6769 last insn of the section where the literal pool should be
6770 emitted. */
6771 insn = pool->emit_pool_after;
6773 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6774 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6776 s390_dump_pool (pool, 1);
6779 /* Otherwise, we emit an inline literal pool and use BASR to branch
6780 over it, setting up the pool register at the same time. */
6781 else
6783 rtx_code_label *pool_end = gen_label_rtx ();
6785 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
6786 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
6787 JUMP_LABEL (insn) = pool_end;
6788 INSN_ADDRESSES_NEW (insn, -1);
6789 remove_insn (pool->pool_insn);
6791 insn = emit_label_after (pool->label, insn);
6792 INSN_ADDRESSES_NEW (insn, -1);
6794 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6795 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6797 insn = emit_label_after (pool_end, pool->pool_insn);
6798 INSN_ADDRESSES_NEW (insn, -1);
6800 s390_dump_pool (pool, 1);
6804 /* Replace all literal pool references. */
6806 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
6808 if (INSN_P (insn))
6809 replace_ltrel_base (&PATTERN (insn));
6811 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6813 rtx addr, pool_ref = NULL_RTX;
6814 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6815 if (pool_ref)
6817 if (s390_execute_label (insn))
6818 addr = s390_find_execute (pool, insn);
6819 else
6820 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6821 get_pool_mode (pool_ref));
6823 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6824 INSN_CODE (insn) = -1;
6830 /* Free the pool. */
6831 s390_free_pool (pool);
6834 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6835 We have decided we cannot use this pool, so revert all changes
6836 to the current function that were done by s390_mainpool_start. */
6837 static void
6838 s390_mainpool_cancel (struct constant_pool *pool)
6840 /* We didn't actually change the instruction stream, so simply
6841 free the pool memory. */
6842 s390_free_pool (pool);
6846 /* Chunkify the literal pool. */
6848 #define S390_POOL_CHUNK_MIN 0xc00
6849 #define S390_POOL_CHUNK_MAX 0xe00
6851 static struct constant_pool *
6852 s390_chunkify_start (void)
6854 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6855 int extra_size = 0;
6856 bitmap far_labels;
6857 rtx pending_ltrel = NULL_RTX;
6858 rtx_insn *insn;
6860 rtx (*gen_reload_base) (rtx, rtx) =
6861 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6864 /* We need correct insn addresses. */
6866 shorten_branches (get_insns ());
6868 /* Scan all insns and move literals to pool chunks. */
6870 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6872 bool section_switch_p = false;
6874 /* Check for pending LTREL_BASE. */
6875 if (INSN_P (insn))
6877 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6878 if (ltrel_base)
6880 gcc_assert (ltrel_base == pending_ltrel);
6881 pending_ltrel = NULL_RTX;
6885 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6887 if (!curr_pool)
6888 curr_pool = s390_start_pool (&pool_list, insn);
6890 s390_add_execute (curr_pool, insn);
6891 s390_add_pool_insn (curr_pool, insn);
6893 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6895 rtx pool_ref = NULL_RTX;
6896 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6897 if (pool_ref)
6899 rtx constant = get_pool_constant (pool_ref);
6900 machine_mode mode = get_pool_mode (pool_ref);
6902 if (!curr_pool)
6903 curr_pool = s390_start_pool (&pool_list, insn);
6905 s390_add_constant (curr_pool, constant, mode);
6906 s390_add_pool_insn (curr_pool, insn);
6908 /* Don't split the pool chunk between a LTREL_OFFSET load
6909 and the corresponding LTREL_BASE. */
6910 if (GET_CODE (constant) == CONST
6911 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6912 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6914 gcc_assert (!pending_ltrel);
6915 pending_ltrel = pool_ref;
6920 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
6922 if (curr_pool)
6923 s390_add_pool_insn (curr_pool, insn);
6924 /* An LTREL_BASE must follow within the same basic block. */
6925 gcc_assert (!pending_ltrel);
6928 if (NOTE_P (insn))
6929 switch (NOTE_KIND (insn))
6931 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6932 section_switch_p = true;
6933 break;
6934 case NOTE_INSN_VAR_LOCATION:
6935 case NOTE_INSN_CALL_ARG_LOCATION:
6936 continue;
6937 default:
6938 break;
6941 if (!curr_pool
6942 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6943 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6944 continue;
6946 if (TARGET_CPU_ZARCH)
6948 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6949 continue;
6951 s390_end_pool (curr_pool, NULL);
6952 curr_pool = NULL;
6954 else
6956 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6957 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6958 + extra_size;
6960 /* We will later have to insert base register reload insns.
6961 Those will have an effect on code size, which we need to
6962 consider here. This calculation makes rather pessimistic
6963 worst-case assumptions. */
6964 if (LABEL_P (insn))
6965 extra_size += 6;
6967 if (chunk_size < S390_POOL_CHUNK_MIN
6968 && curr_pool->size < S390_POOL_CHUNK_MIN
6969 && !section_switch_p)
6970 continue;
6972 /* Pool chunks can only be inserted after BARRIERs ... */
6973 if (BARRIER_P (insn))
6975 s390_end_pool (curr_pool, insn);
6976 curr_pool = NULL;
6977 extra_size = 0;
6980 /* ... so if we don't find one in time, create one. */
6981 else if (chunk_size > S390_POOL_CHUNK_MAX
6982 || curr_pool->size > S390_POOL_CHUNK_MAX
6983 || section_switch_p)
6985 rtx_insn *label, *jump, *barrier, *next, *prev;
6987 if (!section_switch_p)
6989 /* We can insert the barrier only after a 'real' insn. */
6990 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
6991 continue;
6992 if (get_attr_length (insn) == 0)
6993 continue;
6994 /* Don't separate LTREL_BASE from the corresponding
6995 LTREL_OFFSET load. */
6996 if (pending_ltrel)
6997 continue;
6998 next = insn;
7001 insn = next;
7002 next = NEXT_INSN (insn);
7004 while (next
7005 && NOTE_P (next)
7006 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
7007 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
7009 else
7011 gcc_assert (!pending_ltrel);
7013 /* The old pool has to end before the section switch
7014 note in order to make it part of the current
7015 section. */
7016 insn = PREV_INSN (insn);
7019 label = gen_label_rtx ();
7020 prev = insn;
7021 if (prev && NOTE_P (prev))
7022 prev = prev_nonnote_insn (prev);
7023 if (prev)
7024 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
7025 INSN_LOCATION (prev));
7026 else
7027 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
7028 barrier = emit_barrier_after (jump);
7029 insn = emit_label_after (label, barrier);
7030 JUMP_LABEL (jump) = label;
7031 LABEL_NUSES (label) = 1;
7033 INSN_ADDRESSES_NEW (jump, -1);
7034 INSN_ADDRESSES_NEW (barrier, -1);
7035 INSN_ADDRESSES_NEW (insn, -1);
7037 s390_end_pool (curr_pool, barrier);
7038 curr_pool = NULL;
7039 extra_size = 0;
7044 if (curr_pool)
7045 s390_end_pool (curr_pool, NULL);
7046 gcc_assert (!pending_ltrel);
7048 /* Find all labels that are branched into
7049 from an insn belonging to a different chunk. */
7051 far_labels = BITMAP_ALLOC (NULL);
7053 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7055 rtx_jump_table_data *table;
7057 /* Labels marked with LABEL_PRESERVE_P can be target
7058 of non-local jumps, so we have to mark them.
7059 The same holds for named labels.
7061 Don't do that, however, if it is the label before
7062 a jump table. */
7064 if (LABEL_P (insn)
7065 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
7067 rtx_insn *vec_insn = NEXT_INSN (insn);
7068 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
7069 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
7071 /* Check potential targets in a table jump (casesi_jump). */
7072 else if (tablejump_p (insn, NULL, &table))
7074 rtx vec_pat = PATTERN (table);
7075 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
7077 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
7079 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
7081 if (s390_find_pool (pool_list, label)
7082 != s390_find_pool (pool_list, insn))
7083 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7086 /* If we have a direct jump (conditional or unconditional),
7087 check all potential targets. */
7088 else if (JUMP_P (insn))
7090 rtx pat = PATTERN (insn);
7092 if (GET_CODE (pat) == PARALLEL)
7093 pat = XVECEXP (pat, 0, 0);
7095 if (GET_CODE (pat) == SET)
7097 rtx label = JUMP_LABEL (insn);
7098 if (label && !ANY_RETURN_P (label))
7100 if (s390_find_pool (pool_list, label)
7101 != s390_find_pool (pool_list, insn))
7102 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7108 /* Insert base register reload insns before every pool. */
7110 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7112 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7113 curr_pool->label);
7114 rtx_insn *insn = curr_pool->first_insn;
7115 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
7118 /* Insert base register reload insns at every far label. */
7120 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7121 if (LABEL_P (insn)
7122 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
7124 struct constant_pool *pool = s390_find_pool (pool_list, insn);
7125 if (pool)
7127 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7128 pool->label);
7129 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
7134 BITMAP_FREE (far_labels);
7137 /* Recompute insn addresses. */
7139 init_insn_lengths ();
7140 shorten_branches (get_insns ());
7142 return pool_list;
7145 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7146 After we have decided to use this list, finish implementing
7147 all changes to the current function as required. */
7149 static void
7150 s390_chunkify_finish (struct constant_pool *pool_list)
7152 struct constant_pool *curr_pool = NULL;
7153 rtx_insn *insn;
7156 /* Replace all literal pool references. */
7158 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7160 if (INSN_P (insn))
7161 replace_ltrel_base (&PATTERN (insn));
7163 curr_pool = s390_find_pool (pool_list, insn);
7164 if (!curr_pool)
7165 continue;
7167 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
7169 rtx addr, pool_ref = NULL_RTX;
7170 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7171 if (pool_ref)
7173 if (s390_execute_label (insn))
7174 addr = s390_find_execute (curr_pool, insn);
7175 else
7176 addr = s390_find_constant (curr_pool,
7177 get_pool_constant (pool_ref),
7178 get_pool_mode (pool_ref));
7180 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7181 INSN_CODE (insn) = -1;
7186 /* Dump out all literal pools. */
7188 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7189 s390_dump_pool (curr_pool, 0);
7191 /* Free pool list. */
7193 while (pool_list)
7195 struct constant_pool *next = pool_list->next;
7196 s390_free_pool (pool_list);
7197 pool_list = next;
7201 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7202 We have decided we cannot use this list, so revert all changes
7203 to the current function that were done by s390_chunkify_start. */
7205 static void
7206 s390_chunkify_cancel (struct constant_pool *pool_list)
7208 struct constant_pool *curr_pool = NULL;
7209 rtx_insn *insn;
7211 /* Remove all pool placeholder insns. */
7213 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7215 /* Did we insert an extra barrier? Remove it. */
7216 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
7217 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
7218 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
7220 if (jump && JUMP_P (jump)
7221 && barrier && BARRIER_P (barrier)
7222 && label && LABEL_P (label)
7223 && GET_CODE (PATTERN (jump)) == SET
7224 && SET_DEST (PATTERN (jump)) == pc_rtx
7225 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7226 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7228 remove_insn (jump);
7229 remove_insn (barrier);
7230 remove_insn (label);
7233 remove_insn (curr_pool->pool_insn);
7236 /* Remove all base register reload insns. */
7238 for (insn = get_insns (); insn; )
7240 rtx_insn *next_insn = NEXT_INSN (insn);
7242 if (NONJUMP_INSN_P (insn)
7243 && GET_CODE (PATTERN (insn)) == SET
7244 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7245 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7246 remove_insn (insn);
7248 insn = next_insn;
7251 /* Free pool list. */
7253 while (pool_list)
7255 struct constant_pool *next = pool_list->next;
7256 s390_free_pool (pool_list);
7257 pool_list = next;
7261 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7263 void
7264 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
7266 REAL_VALUE_TYPE r;
7268 switch (GET_MODE_CLASS (mode))
7270 case MODE_FLOAT:
7271 case MODE_DECIMAL_FLOAT:
7272 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7274 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7275 assemble_real (r, mode, align, false);
7276 break;
7278 case MODE_INT:
7279 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7280 mark_symbol_refs_as_used (exp);
7281 break;
7283 default:
7284 gcc_unreachable ();
7289 /* Return an RTL expression representing the value of the return address
7290 for the frame COUNT steps up from the current frame. FRAME is the
7291 frame pointer of that frame. */
7294 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7296 int offset;
7297 rtx addr;
7299 /* Without backchain, we fail for all but the current frame. */
7301 if (!TARGET_BACKCHAIN && count > 0)
7302 return NULL_RTX;
7304 /* For the current frame, we need to make sure the initial
7305 value of RETURN_REGNUM is actually saved. */
7307 if (count == 0)
7309 /* On non-z architectures branch splitting could overwrite r14. */
7310 if (TARGET_CPU_ZARCH)
7311 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7312 else
7314 cfun_frame_layout.save_return_addr_p = true;
7315 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7319 if (TARGET_PACKED_STACK)
7320 offset = -2 * UNITS_PER_LONG;
7321 else
7322 offset = RETURN_REGNUM * UNITS_PER_LONG;
7324 addr = plus_constant (Pmode, frame, offset);
7325 addr = memory_address (Pmode, addr);
7326 return gen_rtx_MEM (Pmode, addr);
7329 /* Return an RTL expression representing the back chain stored in
7330 the current stack frame. */
7333 s390_back_chain_rtx (void)
7335 rtx chain;
7337 gcc_assert (TARGET_BACKCHAIN);
7339 if (TARGET_PACKED_STACK)
7340 chain = plus_constant (Pmode, stack_pointer_rtx,
7341 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7342 else
7343 chain = stack_pointer_rtx;
7345 chain = gen_rtx_MEM (Pmode, chain);
7346 return chain;
7349 /* Find first call clobbered register unused in a function.
7350 This could be used as base register in a leaf function
7351 or for holding the return address before epilogue. */
7353 static int
7354 find_unused_clobbered_reg (void)
7356 int i;
7357 for (i = 0; i < 6; i++)
7358 if (!df_regs_ever_live_p (i))
7359 return i;
7360 return 0;
7364 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7365 clobbered hard regs in SETREG. */
7367 static void
7368 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7370 char *regs_ever_clobbered = (char *)data;
7371 unsigned int i, regno;
7372 machine_mode mode = GET_MODE (setreg);
7374 if (GET_CODE (setreg) == SUBREG)
7376 rtx inner = SUBREG_REG (setreg);
7377 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
7378 return;
7379 regno = subreg_regno (setreg);
7381 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
7382 regno = REGNO (setreg);
7383 else
7384 return;
7386 for (i = regno;
7387 i < regno + HARD_REGNO_NREGS (regno, mode);
7388 i++)
7389 regs_ever_clobbered[i] = 1;
7392 /* Walks through all basic blocks of the current function looking
7393 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7394 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7395 each of those regs. */
7397 static void
7398 s390_regs_ever_clobbered (char regs_ever_clobbered[])
7400 basic_block cur_bb;
7401 rtx_insn *cur_insn;
7402 unsigned int i;
7404 memset (regs_ever_clobbered, 0, 32);
7406 /* For non-leaf functions we have to consider all call clobbered regs to be
7407 clobbered. */
7408 if (!crtl->is_leaf)
7410 for (i = 0; i < 32; i++)
7411 regs_ever_clobbered[i] = call_really_used_regs[i];
7414 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7415 this work is done by liveness analysis (mark_regs_live_at_end).
7416 Special care is needed for functions containing landing pads. Landing pads
7417 may use the eh registers, but the code which sets these registers is not
7418 contained in that function. Hence s390_regs_ever_clobbered is not able to
7419 deal with this automatically. */
7420 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7421 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7422 if (crtl->calls_eh_return
7423 || (cfun->machine->has_landing_pad_p
7424 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7425 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7427 /* For nonlocal gotos all call-saved registers have to be saved.
7428 This flag is also set for the unwinding code in libgcc.
7429 See expand_builtin_unwind_init. For regs_ever_live this is done by
7430 reload. */
7431 if (crtl->saves_all_registers)
7432 for (i = 0; i < 32; i++)
7433 if (!call_really_used_regs[i])
7434 regs_ever_clobbered[i] = 1;
7436 FOR_EACH_BB_FN (cur_bb, cfun)
7438 FOR_BB_INSNS (cur_bb, cur_insn)
7440 rtx pat;
7442 if (!INSN_P (cur_insn))
7443 continue;
7445 pat = PATTERN (cur_insn);
7447 /* Ignore GPR restore insns. */
7448 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
7450 if (GET_CODE (pat) == SET
7451 && GENERAL_REG_P (SET_DEST (pat)))
7453 /* lgdr */
7454 if (GET_MODE (SET_SRC (pat)) == DImode
7455 && FP_REG_P (SET_SRC (pat)))
7456 continue;
7458 /* l / lg */
7459 if (GET_CODE (SET_SRC (pat)) == MEM)
7460 continue;
7463 /* lm / lmg */
7464 if (GET_CODE (pat) == PARALLEL
7465 && load_multiple_operation (pat, VOIDmode))
7466 continue;
7469 note_stores (pat,
7470 s390_reg_clobbered_rtx,
7471 regs_ever_clobbered);
7476 /* Determine the frame area which actually has to be accessed
7477 in the function epilogue. The values are stored at the
7478 given pointers AREA_BOTTOM (address of the lowest used stack
7479 address) and AREA_TOP (address of the first item which does
7480 not belong to the stack frame). */
7482 static void
7483 s390_frame_area (int *area_bottom, int *area_top)
7485 int b, t;
7487 b = INT_MAX;
7488 t = INT_MIN;
7490 if (cfun_frame_layout.first_restore_gpr != -1)
7492 b = (cfun_frame_layout.gprs_offset
7493 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7494 t = b + (cfun_frame_layout.last_restore_gpr
7495 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7498 if (TARGET_64BIT && cfun_save_high_fprs_p)
7500 b = MIN (b, cfun_frame_layout.f8_offset);
7501 t = MAX (t, (cfun_frame_layout.f8_offset
7502 + cfun_frame_layout.high_fprs * 8));
7505 if (!TARGET_64BIT)
7507 if (cfun_fpr_save_p (FPR4_REGNUM))
7509 b = MIN (b, cfun_frame_layout.f4_offset);
7510 t = MAX (t, cfun_frame_layout.f4_offset + 8);
7512 if (cfun_fpr_save_p (FPR6_REGNUM))
7514 b = MIN (b, cfun_frame_layout.f4_offset + 8);
7515 t = MAX (t, cfun_frame_layout.f4_offset + 16);
7518 *area_bottom = b;
7519 *area_top = t;
7521 /* Update gpr_save_slots in the frame layout trying to make use of
7522 FPRs as GPR save slots.
7523 This is a helper routine of s390_register_info. */
7525 static void
7526 s390_register_info_gprtofpr ()
7528 int save_reg_slot = FPR0_REGNUM;
7529 int i, j;
7531 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
7532 return;
7534 for (i = 15; i >= 6; i--)
7536 if (cfun_gpr_save_slot (i) == 0)
7537 continue;
7539 /* Advance to the next FP register which can be used as a
7540 GPR save slot. */
7541 while ((!call_really_used_regs[save_reg_slot]
7542 || df_regs_ever_live_p (save_reg_slot)
7543 || cfun_fpr_save_p (save_reg_slot))
7544 && FP_REGNO_P (save_reg_slot))
7545 save_reg_slot++;
7546 if (!FP_REGNO_P (save_reg_slot))
7548 /* We only want to use ldgr/lgdr if we can get rid of
7549 stm/lm entirely. So undo the gpr slot allocation in
7550 case we ran out of FPR save slots. */
7551 for (j = 6; j <= 15; j++)
7552 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
7553 cfun_gpr_save_slot (j) = -1;
7554 break;
7556 cfun_gpr_save_slot (i) = save_reg_slot++;
7560 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
7561 stdarg.
7562 This is a helper routine for s390_register_info. */
7564 static void
7565 s390_register_info_stdarg_fpr ()
7567 int i;
7568 int min_fpr;
7569 int max_fpr;
7571 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
7572 f0-f4 for 64 bit. */
7573 if (!cfun->stdarg
7574 || !TARGET_HARD_FLOAT
7575 || !cfun->va_list_fpr_size
7576 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
7577 return;
7579 min_fpr = crtl->args.info.fprs;
7580 max_fpr = min_fpr + cfun->va_list_fpr_size;
7581 if (max_fpr > FP_ARG_NUM_REG)
7582 max_fpr = FP_ARG_NUM_REG;
7584 for (i = min_fpr; i < max_fpr; i++)
7585 cfun_set_fpr_save (i + FPR0_REGNUM);
7588 /* Reserve the GPR save slots for GPRs which need to be saved due to
7589 stdarg.
7590 This is a helper routine for s390_register_info. */
7592 static void
7593 s390_register_info_stdarg_gpr ()
7595 int i;
7596 int min_gpr;
7597 int max_gpr;
7599 if (!cfun->stdarg
7600 || !cfun->va_list_gpr_size
7601 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
7602 return;
7604 min_gpr = crtl->args.info.gprs;
7605 max_gpr = min_gpr + cfun->va_list_gpr_size;
7606 if (max_gpr > GP_ARG_NUM_REG)
7607 max_gpr = GP_ARG_NUM_REG;
7609 for (i = min_gpr; i < max_gpr; i++)
7610 cfun_gpr_save_slot (2 + i) = -1;
7613 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
7614 for registers which need to be saved in function prologue.
7615 This function can be used until the insns emitted for save/restore
7616 of the regs are visible in the RTL stream. */
7618 static void
7619 s390_register_info ()
7621 int i, j;
7622 char clobbered_regs[32];
7624 gcc_assert (!epilogue_completed);
7626 if (reload_completed)
7627 /* After reload we rely on our own routine to determine which
7628 registers need saving. */
7629 s390_regs_ever_clobbered (clobbered_regs);
7630 else
7631 /* During reload we use regs_ever_live as a base since reload
7632 does changes in there which we otherwise would not be aware
7633 of. */
7634 for (i = 0; i < 32; i++)
7635 clobbered_regs[i] = df_regs_ever_live_p (i);
7637 for (i = 0; i < 32; i++)
7638 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
7640 /* Mark the call-saved FPRs which need to be saved.
7641 This needs to be done before checking the special GPRs since the
7642 stack pointer usage depends on whether high FPRs have to be saved
7643 or not. */
7644 cfun_frame_layout.fpr_bitmap = 0;
7645 cfun_frame_layout.high_fprs = 0;
7646 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
7647 if (clobbered_regs[i] && !call_really_used_regs[i])
7649 cfun_set_fpr_save (i);
7650 if (i >= FPR8_REGNUM)
7651 cfun_frame_layout.high_fprs++;
7654 if (flag_pic)
7655 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7656 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7658 clobbered_regs[BASE_REGNUM]
7659 |= (cfun->machine->base_reg
7660 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7662 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
7663 |= !!frame_pointer_needed;
7665 /* On pre z900 machines this might take until machine dependent
7666 reorg to decide.
7667 save_return_addr_p will only be set on non-zarch machines so
7668 there is no risk that r14 goes into an FPR instead of a stack
7669 slot. */
7670 clobbered_regs[RETURN_REGNUM]
7671 |= (!crtl->is_leaf
7672 || TARGET_TPF_PROFILING
7673 || cfun->machine->split_branches_pending_p
7674 || cfun_frame_layout.save_return_addr_p
7675 || crtl->calls_eh_return);
7677 clobbered_regs[STACK_POINTER_REGNUM]
7678 |= (!crtl->is_leaf
7679 || TARGET_TPF_PROFILING
7680 || cfun_save_high_fprs_p
7681 || get_frame_size () > 0
7682 || (reload_completed && cfun_frame_layout.frame_size > 0)
7683 || cfun->calls_alloca);
7685 memset (cfun_frame_layout.gpr_save_slots, 0, 16);
7687 for (i = 6; i < 16; i++)
7688 if (clobbered_regs[i])
7689 cfun_gpr_save_slot (i) = -1;
7691 s390_register_info_stdarg_fpr ();
7692 s390_register_info_gprtofpr ();
7694 /* First find the range of GPRs to be restored. Vararg regs don't
7695 need to be restored so we do it before assigning slots to the
7696 vararg GPRs. */
7697 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7698 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7699 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
7700 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
7702 /* stdarg functions might need to save GPRs 2 to 6. This might
7703 override the GPR->FPR save decision made above for r6 since
7704 vararg regs must go to the stack. */
7705 s390_register_info_stdarg_gpr ();
7707 /* Now the range of GPRs which need saving. */
7708 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7709 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7710 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
7711 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
7714 /* This function is called by s390_optimize_prologue in order to get
7715 rid of unnecessary GPR save/restore instructions. The register info
7716 for the GPRs is re-computed and the ranges are re-calculated. */
7718 static void
7719 s390_optimize_register_info ()
7721 char clobbered_regs[32];
7722 int i, j;
7724 gcc_assert (epilogue_completed);
7725 gcc_assert (!cfun->machine->split_branches_pending_p);
7727 s390_regs_ever_clobbered (clobbered_regs);
7729 for (i = 0; i < 32; i++)
7730 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
7732 /* There is still special treatment needed for cases invisible to
7733 s390_regs_ever_clobbered. */
7734 clobbered_regs[RETURN_REGNUM]
7735 |= (TARGET_TPF_PROFILING
7736 /* When expanding builtin_return_addr in ESA mode we do not
7737 know whether r14 will later be needed as scratch reg when
7738 doing branch splitting. So the builtin always accesses the
7739 r14 save slot and we need to stick to the save/restore
7740 decision for r14 even if it turns out that it didn't get
7741 clobbered. */
7742 || cfun_frame_layout.save_return_addr_p
7743 || crtl->calls_eh_return);
7745 memset (cfun_frame_layout.gpr_save_slots, 0, 6);
7747 for (i = 6; i < 16; i++)
7748 if (!clobbered_regs[i])
7749 cfun_gpr_save_slot (i) = 0;
7751 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7752 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7753 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
7754 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
7756 s390_register_info_stdarg_gpr ();
7758 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7759 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7760 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
7761 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
7764 /* Fill cfun->machine with info about frame of current function. */
7766 static void
7767 s390_frame_info (void)
7769 HOST_WIDE_INT lowest_offset;
7771 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
7772 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
7774 /* The va_arg builtin uses a constant distance of 16 *
7775 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
7776 pointer. So even if we are going to save the stack pointer in an
7777 FPR we need the stack space in order to keep the offsets
7778 correct. */
7779 if (cfun->stdarg && cfun_save_arg_fprs_p)
7781 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
7783 if (cfun_frame_layout.first_save_gpr_slot == -1)
7784 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
7787 cfun_frame_layout.frame_size = get_frame_size ();
7788 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7789 fatal_error (input_location,
7790 "total size of local variables exceeds architecture limit");
7792 if (!TARGET_PACKED_STACK)
7794 /* Fixed stack layout. */
7795 cfun_frame_layout.backchain_offset = 0;
7796 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7797 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7798 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7799 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7800 * UNITS_PER_LONG);
7802 else if (TARGET_BACKCHAIN)
7804 /* Kernel stack layout - packed stack, backchain, no float */
7805 gcc_assert (TARGET_SOFT_FLOAT);
7806 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7807 - UNITS_PER_LONG);
7809 /* The distance between the backchain and the return address
7810 save slot must not change. So we always need a slot for the
7811 stack pointer which resides in between. */
7812 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
7814 cfun_frame_layout.gprs_offset
7815 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
7817 /* FPRs will not be saved. Nevertheless pick sane values to
7818 keep area calculations valid. */
7819 cfun_frame_layout.f0_offset =
7820 cfun_frame_layout.f4_offset =
7821 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
7823 else
7825 int num_fprs;
7827 /* Packed stack layout without backchain. */
7829 /* With stdarg FPRs need their dedicated slots. */
7830 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
7831 : (cfun_fpr_save_p (FPR4_REGNUM) +
7832 cfun_fpr_save_p (FPR6_REGNUM)));
7833 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
7835 num_fprs = (cfun->stdarg ? 2
7836 : (cfun_fpr_save_p (FPR0_REGNUM)
7837 + cfun_fpr_save_p (FPR2_REGNUM)));
7838 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
7840 cfun_frame_layout.gprs_offset
7841 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7843 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
7844 - cfun_frame_layout.high_fprs * 8);
7847 if (cfun_save_high_fprs_p)
7848 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7850 if (!crtl->is_leaf)
7851 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7853 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
7854 sized area at the bottom of the stack. This is required also for
7855 leaf functions. When GCC generates a local stack reference it
7856 will always add STACK_POINTER_OFFSET to all these references. */
7857 if (crtl->is_leaf
7858 && !TARGET_TPF_PROFILING
7859 && cfun_frame_layout.frame_size == 0
7860 && !cfun->calls_alloca)
7861 return;
7863 /* Calculate the number of bytes we have used in our own register
7864 save area. With the packed stack layout we can re-use the
7865 remaining bytes for normal stack elements. */
7867 if (TARGET_PACKED_STACK)
7868 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
7869 cfun_frame_layout.f4_offset),
7870 cfun_frame_layout.gprs_offset);
7871 else
7872 lowest_offset = 0;
7874 if (TARGET_BACKCHAIN)
7875 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
7877 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
7879 /* If under 31 bit an odd number of gprs has to be saved we have to
7880 adjust the frame size to sustain 8 byte alignment of stack
7881 frames. */
7882 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7883 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7884 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7887 /* Generate frame layout. Fills in register and frame data for the current
7888 function in cfun->machine. This routine can be called multiple times;
7889 it will re-do the complete frame layout every time. */
7891 static void
7892 s390_init_frame_layout (void)
7894 HOST_WIDE_INT frame_size;
7895 int base_used;
7897 gcc_assert (!reload_completed);
7899 /* On S/390 machines, we may need to perform branch splitting, which
7900 will require both base and return address register. We have no
7901 choice but to assume we're going to need them until right at the
7902 end of the machine dependent reorg phase. */
7903 if (!TARGET_CPU_ZARCH)
7904 cfun->machine->split_branches_pending_p = true;
7908 frame_size = cfun_frame_layout.frame_size;
7910 /* Try to predict whether we'll need the base register. */
7911 base_used = cfun->machine->split_branches_pending_p
7912 || crtl->uses_const_pool
7913 || (!DISP_IN_RANGE (frame_size)
7914 && !CONST_OK_FOR_K (frame_size));
7916 /* Decide which register to use as literal pool base. In small
7917 leaf functions, try to use an unused call-clobbered register
7918 as base register to avoid save/restore overhead. */
7919 if (!base_used)
7920 cfun->machine->base_reg = NULL_RTX;
7921 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7922 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7923 else
7924 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7926 s390_register_info ();
7927 s390_frame_info ();
7929 while (frame_size != cfun_frame_layout.frame_size);
7932 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
7933 the TX is nonescaping. A transaction is considered escaping if
7934 there is at least one path from tbegin returning CC0 to the
7935 function exit block without an tend.
7937 The check so far has some limitations:
7938 - only single tbegin/tend BBs are supported
7939 - the first cond jump after tbegin must separate the CC0 path from ~CC0
7940 - when CC is copied to a GPR and the CC0 check is done with the GPR
7941 this is not supported
7944 static void
7945 s390_optimize_nonescaping_tx (void)
7947 const unsigned int CC0 = 1 << 3;
7948 basic_block tbegin_bb = NULL;
7949 basic_block tend_bb = NULL;
7950 basic_block bb;
7951 rtx_insn *insn;
7952 bool result = true;
7953 int bb_index;
7954 rtx_insn *tbegin_insn = NULL;
7956 if (!cfun->machine->tbegin_p)
7957 return;
7959 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
7961 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
7963 if (!bb)
7964 continue;
7966 FOR_BB_INSNS (bb, insn)
7968 rtx ite, cc, pat, target;
7969 unsigned HOST_WIDE_INT mask;
7971 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
7972 continue;
7974 pat = PATTERN (insn);
7976 if (GET_CODE (pat) == PARALLEL)
7977 pat = XVECEXP (pat, 0, 0);
7979 if (GET_CODE (pat) != SET
7980 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
7981 continue;
7983 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
7985 rtx_insn *tmp;
7987 tbegin_insn = insn;
7989 /* Just return if the tbegin doesn't have clobbers. */
7990 if (GET_CODE (PATTERN (insn)) != PARALLEL)
7991 return;
7993 if (tbegin_bb != NULL)
7994 return;
7996 /* Find the next conditional jump. */
7997 for (tmp = NEXT_INSN (insn);
7998 tmp != NULL_RTX;
7999 tmp = NEXT_INSN (tmp))
8001 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
8002 return;
8003 if (!JUMP_P (tmp))
8004 continue;
8006 ite = SET_SRC (PATTERN (tmp));
8007 if (GET_CODE (ite) != IF_THEN_ELSE)
8008 continue;
8010 cc = XEXP (XEXP (ite, 0), 0);
8011 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
8012 || GET_MODE (cc) != CCRAWmode
8013 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
8014 return;
8016 if (bb->succs->length () != 2)
8017 return;
8019 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
8020 if (GET_CODE (XEXP (ite, 0)) == NE)
8021 mask ^= 0xf;
8023 if (mask == CC0)
8024 target = XEXP (ite, 1);
8025 else if (mask == (CC0 ^ 0xf))
8026 target = XEXP (ite, 2);
8027 else
8028 return;
8031 edge_iterator ei;
8032 edge e1, e2;
8034 ei = ei_start (bb->succs);
8035 e1 = ei_safe_edge (ei);
8036 ei_next (&ei);
8037 e2 = ei_safe_edge (ei);
8039 if (e2->flags & EDGE_FALLTHRU)
8041 e2 = e1;
8042 e1 = ei_safe_edge (ei);
8045 if (!(e1->flags & EDGE_FALLTHRU))
8046 return;
8048 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
8050 if (tmp == BB_END (bb))
8051 break;
8055 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
8057 if (tend_bb != NULL)
8058 return;
8059 tend_bb = bb;
8064 /* Either we successfully remove the FPR clobbers here or we are not
8065 able to do anything for this TX. Both cases don't qualify for
8066 another look. */
8067 cfun->machine->tbegin_p = false;
8069 if (tbegin_bb == NULL || tend_bb == NULL)
8070 return;
8072 calculate_dominance_info (CDI_POST_DOMINATORS);
8073 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
8074 free_dominance_info (CDI_POST_DOMINATORS);
8076 if (!result)
8077 return;
8079 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
8080 gen_rtvec (2,
8081 XVECEXP (PATTERN (tbegin_insn), 0, 0),
8082 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
8083 INSN_CODE (tbegin_insn) = -1;
8084 df_insn_rescan (tbegin_insn);
8086 return;
8089 /* Return true if it is legal to put a value with MODE into REGNO. */
8091 bool
8092 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
8094 switch (REGNO_REG_CLASS (regno))
8096 case FP_REGS:
8097 if (REGNO_PAIR_OK (regno, mode))
8099 if (mode == SImode || mode == DImode)
8100 return true;
8102 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
8103 return true;
8105 break;
8106 case ADDR_REGS:
8107 if (FRAME_REGNO_P (regno) && mode == Pmode)
8108 return true;
8110 /* fallthrough */
8111 case GENERAL_REGS:
8112 if (REGNO_PAIR_OK (regno, mode))
8114 if (TARGET_ZARCH
8115 || (mode != TFmode && mode != TCmode && mode != TDmode))
8116 return true;
8118 break;
8119 case CC_REGS:
8120 if (GET_MODE_CLASS (mode) == MODE_CC)
8121 return true;
8122 break;
8123 case ACCESS_REGS:
8124 if (REGNO_PAIR_OK (regno, mode))
8126 if (mode == SImode || mode == Pmode)
8127 return true;
8129 break;
8130 default:
8131 return false;
8134 return false;
8137 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
8139 bool
8140 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
8142 /* Once we've decided upon a register to use as base register, it must
8143 no longer be used for any other purpose. */
8144 if (cfun->machine->base_reg)
8145 if (REGNO (cfun->machine->base_reg) == old_reg
8146 || REGNO (cfun->machine->base_reg) == new_reg)
8147 return false;
8149 /* Prevent regrename from using call-saved regs which haven't
8150 actually been saved. This is necessary since regrename assumes
8151 the backend save/restore decisions are based on
8152 df_regs_ever_live. Since we have our own routine we have to tell
8153 regrename manually about it. */
8154 if (GENERAL_REGNO_P (new_reg)
8155 && !call_really_used_regs[new_reg]
8156 && cfun_gpr_save_slot (new_reg) == 0)
8157 return false;
8159 return true;
8162 /* Return nonzero if register REGNO can be used as a scratch register
8163 in peephole2. */
8165 static bool
8166 s390_hard_regno_scratch_ok (unsigned int regno)
8168 /* See s390_hard_regno_rename_ok. */
8169 if (GENERAL_REGNO_P (regno)
8170 && !call_really_used_regs[regno]
8171 && cfun_gpr_save_slot (regno) == 0)
8172 return false;
8174 return true;
8177 /* Maximum number of registers to represent a value of mode MODE
8178 in a register of class RCLASS. */
8181 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
8183 switch (rclass)
8185 case FP_REGS:
8186 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8187 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
8188 else
8189 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
8190 case ACCESS_REGS:
8191 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
8192 default:
8193 break;
8195 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8198 /* Return true if we use LRA instead of reload pass. */
8199 static bool
8200 s390_lra_p (void)
8202 return s390_lra_flag;
8205 /* Return true if register FROM can be eliminated via register TO. */
8207 static bool
8208 s390_can_eliminate (const int from, const int to)
8210 /* On zSeries machines, we have not marked the base register as fixed.
8211 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
8212 If a function requires the base register, we say here that this
8213 elimination cannot be performed. This will cause reload to free
8214 up the base register (as if it were fixed). On the other hand,
8215 if the current function does *not* require the base register, we
8216 say here the elimination succeeds, which in turn allows reload
8217 to allocate the base register for any other purpose. */
8218 if (from == BASE_REGNUM && to == BASE_REGNUM)
8220 if (TARGET_CPU_ZARCH)
8222 s390_init_frame_layout ();
8223 return cfun->machine->base_reg == NULL_RTX;
8226 return false;
8229 /* Everything else must point into the stack frame. */
8230 gcc_assert (to == STACK_POINTER_REGNUM
8231 || to == HARD_FRAME_POINTER_REGNUM);
8233 gcc_assert (from == FRAME_POINTER_REGNUM
8234 || from == ARG_POINTER_REGNUM
8235 || from == RETURN_ADDRESS_POINTER_REGNUM);
8237 /* Make sure we actually saved the return address. */
8238 if (from == RETURN_ADDRESS_POINTER_REGNUM)
8239 if (!crtl->calls_eh_return
8240 && !cfun->stdarg
8241 && !cfun_frame_layout.save_return_addr_p)
8242 return false;
8244 return true;
8247 /* Return offset between register FROM and TO initially after prolog. */
8249 HOST_WIDE_INT
8250 s390_initial_elimination_offset (int from, int to)
8252 HOST_WIDE_INT offset;
8254 /* ??? Why are we called for non-eliminable pairs? */
8255 if (!s390_can_eliminate (from, to))
8256 return 0;
8258 switch (from)
8260 case FRAME_POINTER_REGNUM:
8261 offset = (get_frame_size()
8262 + STACK_POINTER_OFFSET
8263 + crtl->outgoing_args_size);
8264 break;
8266 case ARG_POINTER_REGNUM:
8267 s390_init_frame_layout ();
8268 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
8269 break;
8271 case RETURN_ADDRESS_POINTER_REGNUM:
8272 s390_init_frame_layout ();
8274 if (cfun_frame_layout.first_save_gpr_slot == -1)
8276 /* If it turns out that for stdarg nothing went into the reg
8277 save area we also do not need the return address
8278 pointer. */
8279 if (cfun->stdarg && !cfun_save_arg_fprs_p)
8280 return 0;
8282 gcc_unreachable ();
8285 /* In order to make the following work it is not necessary for
8286 r14 to have a save slot. It is sufficient if one other GPR
8287 got one. Since the GPRs are always stored without gaps we
8288 are able to calculate where the r14 save slot would
8289 reside. */
8290 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
8291 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
8292 UNITS_PER_LONG);
8293 break;
8295 case BASE_REGNUM:
8296 offset = 0;
8297 break;
8299 default:
8300 gcc_unreachable ();
8303 return offset;
8306 /* Emit insn to save fpr REGNUM at offset OFFSET relative
8307 to register BASE. Return generated insn. */
8309 static rtx
8310 save_fpr (rtx base, int offset, int regnum)
8312 rtx addr;
8313 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
8315 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
8316 set_mem_alias_set (addr, get_varargs_alias_set ());
8317 else
8318 set_mem_alias_set (addr, get_frame_alias_set ());
8320 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
8323 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
8324 to register BASE. Return generated insn. */
8326 static rtx
8327 restore_fpr (rtx base, int offset, int regnum)
8329 rtx addr;
8330 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
8331 set_mem_alias_set (addr, get_frame_alias_set ());
8333 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
8336 /* Return true if REGNO is a global register, but not one
8337 of the special ones that need to be saved/restored in anyway. */
8339 static inline bool
8340 global_not_special_regno_p (int regno)
8342 return (global_regs[regno]
8343 /* These registers are special and need to be
8344 restored in any case. */
8345 && !(regno == STACK_POINTER_REGNUM
8346 || regno == RETURN_REGNUM
8347 || regno == BASE_REGNUM
8348 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
8351 /* Generate insn to save registers FIRST to LAST into
8352 the register save area located at offset OFFSET
8353 relative to register BASE. */
8355 static rtx
8356 save_gprs (rtx base, int offset, int first, int last)
8358 rtx addr, insn, note;
8359 int i;
8361 addr = plus_constant (Pmode, base, offset);
8362 addr = gen_rtx_MEM (Pmode, addr);
8364 set_mem_alias_set (addr, get_frame_alias_set ());
8366 /* Special-case single register. */
8367 if (first == last)
8369 if (TARGET_64BIT)
8370 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
8371 else
8372 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
8374 if (!global_not_special_regno_p (first))
8375 RTX_FRAME_RELATED_P (insn) = 1;
8376 return insn;
8380 insn = gen_store_multiple (addr,
8381 gen_rtx_REG (Pmode, first),
8382 GEN_INT (last - first + 1));
8384 if (first <= 6 && cfun->stdarg)
8385 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8387 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
8389 if (first + i <= 6)
8390 set_mem_alias_set (mem, get_varargs_alias_set ());
8393 /* We need to set the FRAME_RELATED flag on all SETs
8394 inside the store-multiple pattern.
8396 However, we must not emit DWARF records for registers 2..5
8397 if they are stored for use by variable arguments ...
8399 ??? Unfortunately, it is not enough to simply not the
8400 FRAME_RELATED flags for those SETs, because the first SET
8401 of the PARALLEL is always treated as if it had the flag
8402 set, even if it does not. Therefore we emit a new pattern
8403 without those registers as REG_FRAME_RELATED_EXPR note. */
8405 if (first >= 6 && !global_not_special_regno_p (first))
8407 rtx pat = PATTERN (insn);
8409 for (i = 0; i < XVECLEN (pat, 0); i++)
8410 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8411 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
8412 0, i)))))
8413 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
8415 RTX_FRAME_RELATED_P (insn) = 1;
8417 else if (last >= 6)
8419 int start;
8421 for (start = first >= 6 ? first : 6; start <= last; start++)
8422 if (!global_not_special_regno_p (start))
8423 break;
8425 if (start > last)
8426 return insn;
8428 addr = plus_constant (Pmode, base,
8429 offset + (start - first) * UNITS_PER_LONG);
8431 if (start == last)
8433 if (TARGET_64BIT)
8434 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
8435 gen_rtx_REG (Pmode, start));
8436 else
8437 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
8438 gen_rtx_REG (Pmode, start));
8439 note = PATTERN (note);
8441 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8442 RTX_FRAME_RELATED_P (insn) = 1;
8444 return insn;
8447 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
8448 gen_rtx_REG (Pmode, start),
8449 GEN_INT (last - start + 1));
8450 note = PATTERN (note);
8452 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8454 for (i = 0; i < XVECLEN (note, 0); i++)
8455 if (GET_CODE (XVECEXP (note, 0, i)) == SET
8456 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
8457 0, i)))))
8458 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
8460 RTX_FRAME_RELATED_P (insn) = 1;
8463 return insn;
8466 /* Generate insn to restore registers FIRST to LAST from
8467 the register save area located at offset OFFSET
8468 relative to register BASE. */
8470 static rtx
8471 restore_gprs (rtx base, int offset, int first, int last)
8473 rtx addr, insn;
8475 addr = plus_constant (Pmode, base, offset);
8476 addr = gen_rtx_MEM (Pmode, addr);
8477 set_mem_alias_set (addr, get_frame_alias_set ());
8479 /* Special-case single register. */
8480 if (first == last)
8482 if (TARGET_64BIT)
8483 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8484 else
8485 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8487 RTX_FRAME_RELATED_P (insn) = 1;
8488 return insn;
8491 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8492 addr,
8493 GEN_INT (last - first + 1));
8494 RTX_FRAME_RELATED_P (insn) = 1;
8495 return insn;
8498 /* Return insn sequence to load the GOT register. */
8500 static GTY(()) rtx got_symbol;
8501 rtx_insn *
8502 s390_load_got (void)
8504 rtx_insn *insns;
8506 /* We cannot use pic_offset_table_rtx here since we use this
8507 function also for non-pic if __tls_get_offset is called and in
8508 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8509 aren't usable. */
8510 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8512 if (!got_symbol)
8514 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8515 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8518 start_sequence ();
8520 if (TARGET_CPU_ZARCH)
8522 emit_move_insn (got_rtx, got_symbol);
8524 else
8526 rtx offset;
8528 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8529 UNSPEC_LTREL_OFFSET);
8530 offset = gen_rtx_CONST (Pmode, offset);
8531 offset = force_const_mem (Pmode, offset);
8533 emit_move_insn (got_rtx, offset);
8535 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8536 UNSPEC_LTREL_BASE);
8537 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8539 emit_move_insn (got_rtx, offset);
8542 insns = get_insns ();
8543 end_sequence ();
8544 return insns;
8547 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8548 and the change to the stack pointer. */
8550 static void
8551 s390_emit_stack_tie (void)
8553 rtx mem = gen_frame_mem (BLKmode,
8554 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8556 emit_insn (gen_stack_tie (mem));
8559 /* Copy GPRS into FPR save slots. */
8561 static void
8562 s390_save_gprs_to_fprs (void)
8564 int i;
8566 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
8567 return;
8569 for (i = 6; i < 16; i++)
8571 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
8573 rtx_insn *insn =
8574 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
8575 gen_rtx_REG (DImode, i));
8576 RTX_FRAME_RELATED_P (insn) = 1;
8581 /* Restore GPRs from FPR save slots. */
8583 static void
8584 s390_restore_gprs_from_fprs (void)
8586 int i;
8588 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
8589 return;
8591 for (i = 6; i < 16; i++)
8593 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
8595 rtx_insn *insn =
8596 emit_move_insn (gen_rtx_REG (DImode, i),
8597 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
8598 df_set_regs_ever_live (i, true);
8599 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
8600 if (i == STACK_POINTER_REGNUM)
8601 add_reg_note (insn, REG_CFA_DEF_CFA,
8602 plus_constant (Pmode, stack_pointer_rtx,
8603 STACK_POINTER_OFFSET));
8604 RTX_FRAME_RELATED_P (insn) = 1;
8610 /* A pass run immediately before shrink-wrapping and prologue and epilogue
8611 generation. */
8613 namespace {
8615 const pass_data pass_data_s390_early_mach =
8617 RTL_PASS, /* type */
8618 "early_mach", /* name */
8619 OPTGROUP_NONE, /* optinfo_flags */
8620 TV_MACH_DEP, /* tv_id */
8621 0, /* properties_required */
8622 0, /* properties_provided */
8623 0, /* properties_destroyed */
8624 0, /* todo_flags_start */
8625 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
8628 class pass_s390_early_mach : public rtl_opt_pass
8630 public:
8631 pass_s390_early_mach (gcc::context *ctxt)
8632 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
8635 /* opt_pass methods: */
8636 virtual unsigned int execute (function *);
8638 }; // class pass_s390_early_mach
8640 unsigned int
8641 pass_s390_early_mach::execute (function *fun)
8643 rtx_insn *insn;
8645 /* Try to get rid of the FPR clobbers. */
8646 s390_optimize_nonescaping_tx ();
8648 /* Re-compute register info. */
8649 s390_register_info ();
8651 /* If we're using a base register, ensure that it is always valid for
8652 the first non-prologue instruction. */
8653 if (fun->machine->base_reg)
8654 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
8656 /* Annotate all constant pool references to let the scheduler know
8657 they implicitly use the base register. */
8658 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8659 if (INSN_P (insn))
8661 annotate_constant_pool_refs (&PATTERN (insn));
8662 df_insn_rescan (insn);
8664 return 0;
8667 } // anon namespace
8669 /* Expand the prologue into a bunch of separate insns. */
8671 void
8672 s390_emit_prologue (void)
8674 rtx insn, addr;
8675 rtx temp_reg;
8676 int i;
8677 int offset;
8678 int next_fpr = 0;
8680 /* Choose best register to use for temp use within prologue.
8681 See below for why TPF must use the register 1. */
8683 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8684 && !crtl->is_leaf
8685 && !TARGET_TPF_PROFILING)
8686 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8687 else
8688 temp_reg = gen_rtx_REG (Pmode, 1);
8690 s390_save_gprs_to_fprs ();
8692 /* Save call saved gprs. */
8693 if (cfun_frame_layout.first_save_gpr != -1)
8695 insn = save_gprs (stack_pointer_rtx,
8696 cfun_frame_layout.gprs_offset +
8697 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8698 - cfun_frame_layout.first_save_gpr_slot),
8699 cfun_frame_layout.first_save_gpr,
8700 cfun_frame_layout.last_save_gpr);
8701 emit_insn (insn);
8704 /* Dummy insn to mark literal pool slot. */
8706 if (cfun->machine->base_reg)
8707 emit_insn (gen_main_pool (cfun->machine->base_reg));
8709 offset = cfun_frame_layout.f0_offset;
8711 /* Save f0 and f2. */
8712 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
8714 if (cfun_fpr_save_p (i))
8716 save_fpr (stack_pointer_rtx, offset, i);
8717 offset += 8;
8719 else if (!TARGET_PACKED_STACK || cfun->stdarg)
8720 offset += 8;
8723 /* Save f4 and f6. */
8724 offset = cfun_frame_layout.f4_offset;
8725 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
8727 if (cfun_fpr_save_p (i))
8729 insn = save_fpr (stack_pointer_rtx, offset, i);
8730 offset += 8;
8732 /* If f4 and f6 are call clobbered they are saved due to
8733 stdargs and therefore are not frame related. */
8734 if (!call_really_used_regs[i])
8735 RTX_FRAME_RELATED_P (insn) = 1;
8737 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
8738 offset += 8;
8741 if (TARGET_PACKED_STACK
8742 && cfun_save_high_fprs_p
8743 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8745 offset = (cfun_frame_layout.f8_offset
8746 + (cfun_frame_layout.high_fprs - 1) * 8);
8748 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
8749 if (cfun_fpr_save_p (i))
8751 insn = save_fpr (stack_pointer_rtx, offset, i);
8753 RTX_FRAME_RELATED_P (insn) = 1;
8754 offset -= 8;
8756 if (offset >= cfun_frame_layout.f8_offset)
8757 next_fpr = i;
8760 if (!TARGET_PACKED_STACK)
8761 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
8763 if (flag_stack_usage_info)
8764 current_function_static_stack_size = cfun_frame_layout.frame_size;
8766 /* Decrement stack pointer. */
8768 if (cfun_frame_layout.frame_size > 0)
8770 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8771 rtx real_frame_off;
8773 if (s390_stack_size)
8775 HOST_WIDE_INT stack_guard;
8777 if (s390_stack_guard)
8778 stack_guard = s390_stack_guard;
8779 else
8781 /* If no value for stack guard is provided the smallest power of 2
8782 larger than the current frame size is chosen. */
8783 stack_guard = 1;
8784 while (stack_guard < cfun_frame_layout.frame_size)
8785 stack_guard <<= 1;
8788 if (cfun_frame_layout.frame_size >= s390_stack_size)
8790 warning (0, "frame size of function %qs is %wd"
8791 " bytes exceeding user provided stack limit of "
8792 "%d bytes. "
8793 "An unconditional trap is added.",
8794 current_function_name(), cfun_frame_layout.frame_size,
8795 s390_stack_size);
8796 emit_insn (gen_trap ());
8798 else
8800 /* stack_guard has to be smaller than s390_stack_size.
8801 Otherwise we would emit an AND with zero which would
8802 not match the test under mask pattern. */
8803 if (stack_guard >= s390_stack_size)
8805 warning (0, "frame size of function %qs is %wd"
8806 " bytes which is more than half the stack size. "
8807 "The dynamic check would not be reliable. "
8808 "No check emitted for this function.",
8809 current_function_name(),
8810 cfun_frame_layout.frame_size);
8812 else
8814 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8815 & ~(stack_guard - 1));
8817 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8818 GEN_INT (stack_check_mask));
8819 if (TARGET_64BIT)
8820 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8821 t, const0_rtx),
8822 t, const0_rtx, const0_rtx));
8823 else
8824 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8825 t, const0_rtx),
8826 t, const0_rtx, const0_rtx));
8831 if (s390_warn_framesize > 0
8832 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8833 warning (0, "frame size of %qs is %wd bytes",
8834 current_function_name (), cfun_frame_layout.frame_size);
8836 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8837 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8839 /* Save incoming stack pointer into temp reg. */
8840 if (TARGET_BACKCHAIN || next_fpr)
8841 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8843 /* Subtract frame size from stack pointer. */
8845 if (DISP_IN_RANGE (INTVAL (frame_off)))
8847 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8848 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8849 frame_off));
8850 insn = emit_insn (insn);
8852 else
8854 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8855 frame_off = force_const_mem (Pmode, frame_off);
8857 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8858 annotate_constant_pool_refs (&PATTERN (insn));
8861 RTX_FRAME_RELATED_P (insn) = 1;
8862 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8863 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8864 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8865 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8866 real_frame_off)));
8868 /* Set backchain. */
8870 if (TARGET_BACKCHAIN)
8872 if (cfun_frame_layout.backchain_offset)
8873 addr = gen_rtx_MEM (Pmode,
8874 plus_constant (Pmode, stack_pointer_rtx,
8875 cfun_frame_layout.backchain_offset));
8876 else
8877 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8878 set_mem_alias_set (addr, get_frame_alias_set ());
8879 insn = emit_insn (gen_move_insn (addr, temp_reg));
8882 /* If we support non-call exceptions (e.g. for Java),
8883 we need to make sure the backchain pointer is set up
8884 before any possibly trapping memory access. */
8885 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8887 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8888 emit_clobber (addr);
8892 /* Save fprs 8 - 15 (64 bit ABI). */
8894 if (cfun_save_high_fprs_p && next_fpr)
8896 /* If the stack might be accessed through a different register
8897 we have to make sure that the stack pointer decrement is not
8898 moved below the use of the stack slots. */
8899 s390_emit_stack_tie ();
8901 insn = emit_insn (gen_add2_insn (temp_reg,
8902 GEN_INT (cfun_frame_layout.f8_offset)));
8904 offset = 0;
8906 for (i = FPR8_REGNUM; i <= next_fpr; i++)
8907 if (cfun_fpr_save_p (i))
8909 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8910 cfun_frame_layout.frame_size
8911 + cfun_frame_layout.f8_offset
8912 + offset);
8914 insn = save_fpr (temp_reg, offset, i);
8915 offset += 8;
8916 RTX_FRAME_RELATED_P (insn) = 1;
8917 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8918 gen_rtx_SET (VOIDmode,
8919 gen_rtx_MEM (DFmode, addr),
8920 gen_rtx_REG (DFmode, i)));
8924 /* Set frame pointer, if needed. */
8926 if (frame_pointer_needed)
8928 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8929 RTX_FRAME_RELATED_P (insn) = 1;
8932 /* Set up got pointer, if needed. */
8934 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8936 rtx_insn *insns = s390_load_got ();
8938 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
8939 annotate_constant_pool_refs (&PATTERN (insn));
8941 emit_insn (insns);
8944 if (TARGET_TPF_PROFILING)
8946 /* Generate a BAS instruction to serve as a function
8947 entry intercept to facilitate the use of tracing
8948 algorithms located at the branch target. */
8949 emit_insn (gen_prologue_tpf ());
8951 /* Emit a blockage here so that all code
8952 lies between the profiling mechanisms. */
8953 emit_insn (gen_blockage ());
8957 /* Expand the epilogue into a bunch of separate insns. */
8959 void
8960 s390_emit_epilogue (bool sibcall)
8962 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8963 int area_bottom, area_top, offset = 0;
8964 int next_offset;
8965 rtvec p;
8966 int i;
8968 if (TARGET_TPF_PROFILING)
8971 /* Generate a BAS instruction to serve as a function
8972 entry intercept to facilitate the use of tracing
8973 algorithms located at the branch target. */
8975 /* Emit a blockage here so that all code
8976 lies between the profiling mechanisms. */
8977 emit_insn (gen_blockage ());
8979 emit_insn (gen_epilogue_tpf ());
8982 /* Check whether to use frame or stack pointer for restore. */
8984 frame_pointer = (frame_pointer_needed
8985 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8987 s390_frame_area (&area_bottom, &area_top);
8989 /* Check whether we can access the register save area.
8990 If not, increment the frame pointer as required. */
8992 if (area_top <= area_bottom)
8994 /* Nothing to restore. */
8996 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8997 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8999 /* Area is in range. */
9000 offset = cfun_frame_layout.frame_size;
9002 else
9004 rtx insn, frame_off, cfa;
9006 offset = area_bottom < 0 ? -area_bottom : 0;
9007 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
9009 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
9010 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
9011 if (DISP_IN_RANGE (INTVAL (frame_off)))
9013 insn = gen_rtx_SET (VOIDmode, frame_pointer,
9014 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
9015 insn = emit_insn (insn);
9017 else
9019 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
9020 frame_off = force_const_mem (Pmode, frame_off);
9022 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
9023 annotate_constant_pool_refs (&PATTERN (insn));
9025 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
9026 RTX_FRAME_RELATED_P (insn) = 1;
9029 /* Restore call saved fprs. */
9031 if (TARGET_64BIT)
9033 if (cfun_save_high_fprs_p)
9035 next_offset = cfun_frame_layout.f8_offset;
9036 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
9038 if (cfun_fpr_save_p (i))
9040 restore_fpr (frame_pointer,
9041 offset + next_offset, i);
9042 cfa_restores
9043 = alloc_reg_note (REG_CFA_RESTORE,
9044 gen_rtx_REG (DFmode, i), cfa_restores);
9045 next_offset += 8;
9051 else
9053 next_offset = cfun_frame_layout.f4_offset;
9054 /* f4, f6 */
9055 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
9057 if (cfun_fpr_save_p (i))
9059 restore_fpr (frame_pointer,
9060 offset + next_offset, i);
9061 cfa_restores
9062 = alloc_reg_note (REG_CFA_RESTORE,
9063 gen_rtx_REG (DFmode, i), cfa_restores);
9064 next_offset += 8;
9066 else if (!TARGET_PACKED_STACK)
9067 next_offset += 8;
9072 /* Return register. */
9074 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
9076 /* Restore call saved gprs. */
9078 if (cfun_frame_layout.first_restore_gpr != -1)
9080 rtx insn, addr;
9081 int i;
9083 /* Check for global register and save them
9084 to stack location from where they get restored. */
9086 for (i = cfun_frame_layout.first_restore_gpr;
9087 i <= cfun_frame_layout.last_restore_gpr;
9088 i++)
9090 if (global_not_special_regno_p (i))
9092 addr = plus_constant (Pmode, frame_pointer,
9093 offset + cfun_frame_layout.gprs_offset
9094 + (i - cfun_frame_layout.first_save_gpr_slot)
9095 * UNITS_PER_LONG);
9096 addr = gen_rtx_MEM (Pmode, addr);
9097 set_mem_alias_set (addr, get_frame_alias_set ());
9098 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
9100 else
9101 cfa_restores
9102 = alloc_reg_note (REG_CFA_RESTORE,
9103 gen_rtx_REG (Pmode, i), cfa_restores);
9106 if (! sibcall)
9108 /* Fetch return address from stack before load multiple,
9109 this will do good for scheduling.
9111 Only do this if we already decided that r14 needs to be
9112 saved to a stack slot. (And not just because r14 happens to
9113 be in between two GPRs which need saving.) Otherwise it
9114 would be difficult to take that decision back in
9115 s390_optimize_prologue. */
9116 if (cfun_gpr_save_slot (RETURN_REGNUM) == -1)
9118 int return_regnum = find_unused_clobbered_reg();
9119 if (!return_regnum)
9120 return_regnum = 4;
9121 return_reg = gen_rtx_REG (Pmode, return_regnum);
9123 addr = plus_constant (Pmode, frame_pointer,
9124 offset + cfun_frame_layout.gprs_offset
9125 + (RETURN_REGNUM
9126 - cfun_frame_layout.first_save_gpr_slot)
9127 * UNITS_PER_LONG);
9128 addr = gen_rtx_MEM (Pmode, addr);
9129 set_mem_alias_set (addr, get_frame_alias_set ());
9130 emit_move_insn (return_reg, addr);
9132 /* Once we did that optimization we have to make sure
9133 s390_optimize_prologue does not try to remove the
9134 store of r14 since we will not be able to find the
9135 load issued here. */
9136 cfun_frame_layout.save_return_addr_p = true;
9140 insn = restore_gprs (frame_pointer,
9141 offset + cfun_frame_layout.gprs_offset
9142 + (cfun_frame_layout.first_restore_gpr
9143 - cfun_frame_layout.first_save_gpr_slot)
9144 * UNITS_PER_LONG,
9145 cfun_frame_layout.first_restore_gpr,
9146 cfun_frame_layout.last_restore_gpr);
9147 insn = emit_insn (insn);
9148 REG_NOTES (insn) = cfa_restores;
9149 add_reg_note (insn, REG_CFA_DEF_CFA,
9150 plus_constant (Pmode, stack_pointer_rtx,
9151 STACK_POINTER_OFFSET));
9152 RTX_FRAME_RELATED_P (insn) = 1;
9155 s390_restore_gprs_from_fprs ();
9157 if (! sibcall)
9160 /* Return to caller. */
9162 p = rtvec_alloc (2);
9164 RTVEC_ELT (p, 0) = ret_rtx;
9165 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
9166 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
9170 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
9172 static void
9173 s300_set_up_by_prologue (hard_reg_set_container *regs)
9175 if (cfun->machine->base_reg
9176 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
9177 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
9180 /* Return true if the function can use simple_return to return outside
9181 of a shrink-wrapped region. At present shrink-wrapping is supported
9182 in all cases. */
9184 bool
9185 s390_can_use_simple_return_insn (void)
9187 return true;
9190 /* Return true if the epilogue is guaranteed to contain only a return
9191 instruction and if a direct return can therefore be used instead.
9192 One of the main advantages of using direct return instructions
9193 is that we can then use conditional returns. */
9195 bool
9196 s390_can_use_return_insn (void)
9198 int i;
9200 if (!reload_completed)
9201 return false;
9203 if (crtl->profile)
9204 return false;
9206 if (TARGET_TPF_PROFILING)
9207 return false;
9209 for (i = 0; i < 16; i++)
9210 if (cfun_gpr_save_slot (i))
9211 return false;
9213 /* For 31 bit this is not covered by the frame_size check below
9214 since f4, f6 are saved in the register save area without needing
9215 additional stack space. */
9216 if (!TARGET_64BIT
9217 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
9218 return false;
9220 if (cfun->machine->base_reg
9221 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
9222 return false;
9224 return cfun_frame_layout.frame_size == 0;
9227 /* Return the size in bytes of a function argument of
9228 type TYPE and/or mode MODE. At least one of TYPE or
9229 MODE must be specified. */
9231 static int
9232 s390_function_arg_size (machine_mode mode, const_tree type)
9234 if (type)
9235 return int_size_in_bytes (type);
9237 /* No type info available for some library calls ... */
9238 if (mode != BLKmode)
9239 return GET_MODE_SIZE (mode);
9241 /* If we have neither type nor mode, abort */
9242 gcc_unreachable ();
9245 /* Return true if a function argument of type TYPE and mode MODE
9246 is to be passed in a floating-point register, if available. */
9248 static bool
9249 s390_function_arg_float (machine_mode mode, const_tree type)
9251 int size = s390_function_arg_size (mode, type);
9252 if (size > 8)
9253 return false;
9255 /* Soft-float changes the ABI: no floating-point registers are used. */
9256 if (TARGET_SOFT_FLOAT)
9257 return false;
9259 /* No type info available for some library calls ... */
9260 if (!type)
9261 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
9263 /* The ABI says that record types with a single member are treated
9264 just like that member would be. */
9265 while (TREE_CODE (type) == RECORD_TYPE)
9267 tree field, single = NULL_TREE;
9269 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
9271 if (TREE_CODE (field) != FIELD_DECL)
9272 continue;
9274 if (single == NULL_TREE)
9275 single = TREE_TYPE (field);
9276 else
9277 return false;
9280 if (single == NULL_TREE)
9281 return false;
9282 else
9283 type = single;
9286 return TREE_CODE (type) == REAL_TYPE;
9289 /* Return true if a function argument of type TYPE and mode MODE
9290 is to be passed in an integer register, or a pair of integer
9291 registers, if available. */
9293 static bool
9294 s390_function_arg_integer (machine_mode mode, const_tree type)
9296 int size = s390_function_arg_size (mode, type);
9297 if (size > 8)
9298 return false;
9300 /* No type info available for some library calls ... */
9301 if (!type)
9302 return GET_MODE_CLASS (mode) == MODE_INT
9303 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
9305 /* We accept small integral (and similar) types. */
9306 if (INTEGRAL_TYPE_P (type)
9307 || POINTER_TYPE_P (type)
9308 || TREE_CODE (type) == NULLPTR_TYPE
9309 || TREE_CODE (type) == OFFSET_TYPE
9310 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
9311 return true;
9313 /* We also accept structs of size 1, 2, 4, 8 that are not
9314 passed in floating-point registers. */
9315 if (AGGREGATE_TYPE_P (type)
9316 && exact_log2 (size) >= 0
9317 && !s390_function_arg_float (mode, type))
9318 return true;
9320 return false;
9323 /* Return 1 if a function argument of type TYPE and mode MODE
9324 is to be passed by reference. The ABI specifies that only
9325 structures of size 1, 2, 4, or 8 bytes are passed by value,
9326 all other structures (and complex numbers) are passed by
9327 reference. */
9329 static bool
9330 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
9331 machine_mode mode, const_tree type,
9332 bool named ATTRIBUTE_UNUSED)
9334 int size = s390_function_arg_size (mode, type);
9335 if (size > 8)
9336 return true;
9338 if (type)
9340 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
9341 return 1;
9343 if (TREE_CODE (type) == COMPLEX_TYPE
9344 || TREE_CODE (type) == VECTOR_TYPE)
9345 return 1;
9348 return 0;
9351 /* Update the data in CUM to advance over an argument of mode MODE and
9352 data type TYPE. (TYPE is null for libcalls where that information
9353 may not be available.). The boolean NAMED specifies whether the
9354 argument is a named argument (as opposed to an unnamed argument
9355 matching an ellipsis). */
9357 static void
9358 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9359 const_tree type, bool named ATTRIBUTE_UNUSED)
9361 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9363 if (s390_function_arg_float (mode, type))
9365 cum->fprs += 1;
9367 else if (s390_function_arg_integer (mode, type))
9369 int size = s390_function_arg_size (mode, type);
9370 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
9372 else
9373 gcc_unreachable ();
9376 /* Define where to put the arguments to a function.
9377 Value is zero to push the argument on the stack,
9378 or a hard register in which to store the argument.
9380 MODE is the argument's machine mode.
9381 TYPE is the data type of the argument (as a tree).
9382 This is null for libcalls where that information may
9383 not be available.
9384 CUM is a variable of type CUMULATIVE_ARGS which gives info about
9385 the preceding args and about the function being called.
9386 NAMED is nonzero if this argument is a named parameter
9387 (otherwise it is an extra parameter matching an ellipsis).
9389 On S/390, we use general purpose registers 2 through 6 to
9390 pass integer, pointer, and certain structure arguments, and
9391 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
9392 to pass floating point arguments. All remaining arguments
9393 are pushed to the stack. */
9395 static rtx
9396 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
9397 const_tree type, bool named ATTRIBUTE_UNUSED)
9399 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9401 if (s390_function_arg_float (mode, type))
9403 if (cum->fprs + 1 > FP_ARG_NUM_REG)
9404 return 0;
9405 else
9406 return gen_rtx_REG (mode, cum->fprs + 16);
9408 else if (s390_function_arg_integer (mode, type))
9410 int size = s390_function_arg_size (mode, type);
9411 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9413 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
9414 return 0;
9415 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
9416 return gen_rtx_REG (mode, cum->gprs + 2);
9417 else if (n_gprs == 2)
9419 rtvec p = rtvec_alloc (2);
9421 RTVEC_ELT (p, 0)
9422 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
9423 const0_rtx);
9424 RTVEC_ELT (p, 1)
9425 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
9426 GEN_INT (4));
9428 return gen_rtx_PARALLEL (mode, p);
9432 /* After the real arguments, expand_call calls us once again
9433 with a void_type_node type. Whatever we return here is
9434 passed as operand 2 to the call expanders.
9436 We don't need this feature ... */
9437 else if (type == void_type_node)
9438 return const0_rtx;
9440 gcc_unreachable ();
9443 /* Return true if return values of type TYPE should be returned
9444 in a memory buffer whose address is passed by the caller as
9445 hidden first argument. */
9447 static bool
9448 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
9450 /* We accept small integral (and similar) types. */
9451 if (INTEGRAL_TYPE_P (type)
9452 || POINTER_TYPE_P (type)
9453 || TREE_CODE (type) == OFFSET_TYPE
9454 || TREE_CODE (type) == REAL_TYPE)
9455 return int_size_in_bytes (type) > 8;
9457 /* Aggregates and similar constructs are always returned
9458 in memory. */
9459 if (AGGREGATE_TYPE_P (type)
9460 || TREE_CODE (type) == COMPLEX_TYPE
9461 || TREE_CODE (type) == VECTOR_TYPE)
9462 return true;
9464 /* ??? We get called on all sorts of random stuff from
9465 aggregate_value_p. We can't abort, but it's not clear
9466 what's safe to return. Pretend it's a struct I guess. */
9467 return true;
9470 /* Function arguments and return values are promoted to word size. */
9472 static machine_mode
9473 s390_promote_function_mode (const_tree type, machine_mode mode,
9474 int *punsignedp,
9475 const_tree fntype ATTRIBUTE_UNUSED,
9476 int for_return ATTRIBUTE_UNUSED)
9478 if (INTEGRAL_MODE_P (mode)
9479 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
9481 if (type != NULL_TREE && POINTER_TYPE_P (type))
9482 *punsignedp = POINTERS_EXTEND_UNSIGNED;
9483 return Pmode;
9486 return mode;
9489 /* Define where to return a (scalar) value of type RET_TYPE.
9490 If RET_TYPE is null, define where to return a (scalar)
9491 value of mode MODE from a libcall. */
9493 static rtx
9494 s390_function_and_libcall_value (machine_mode mode,
9495 const_tree ret_type,
9496 const_tree fntype_or_decl,
9497 bool outgoing ATTRIBUTE_UNUSED)
9499 /* For normal functions perform the promotion as
9500 promote_function_mode would do. */
9501 if (ret_type)
9503 int unsignedp = TYPE_UNSIGNED (ret_type);
9504 mode = promote_function_mode (ret_type, mode, &unsignedp,
9505 fntype_or_decl, 1);
9508 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
9509 gcc_assert (GET_MODE_SIZE (mode) <= 8);
9511 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
9512 return gen_rtx_REG (mode, 16);
9513 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
9514 || UNITS_PER_LONG == UNITS_PER_WORD)
9515 return gen_rtx_REG (mode, 2);
9516 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
9518 /* This case is triggered when returning a 64 bit value with
9519 -m31 -mzarch. Although the value would fit into a single
9520 register it has to be forced into a 32 bit register pair in
9521 order to match the ABI. */
9522 rtvec p = rtvec_alloc (2);
9524 RTVEC_ELT (p, 0)
9525 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
9526 RTVEC_ELT (p, 1)
9527 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
9529 return gen_rtx_PARALLEL (mode, p);
9532 gcc_unreachable ();
9535 /* Define where to return a scalar return value of type RET_TYPE. */
9537 static rtx
9538 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
9539 bool outgoing)
9541 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
9542 fn_decl_or_type, outgoing);
9545 /* Define where to return a scalar libcall return value of mode
9546 MODE. */
9548 static rtx
9549 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
9551 return s390_function_and_libcall_value (mode, NULL_TREE,
9552 NULL_TREE, true);
9556 /* Create and return the va_list datatype.
9558 On S/390, va_list is an array type equivalent to
9560 typedef struct __va_list_tag
9562 long __gpr;
9563 long __fpr;
9564 void *__overflow_arg_area;
9565 void *__reg_save_area;
9566 } va_list[1];
9568 where __gpr and __fpr hold the number of general purpose
9569 or floating point arguments used up to now, respectively,
9570 __overflow_arg_area points to the stack location of the
9571 next argument passed on the stack, and __reg_save_area
9572 always points to the start of the register area in the
9573 call frame of the current function. The function prologue
9574 saves all registers used for argument passing into this
9575 area if the function uses variable arguments. */
9577 static tree
9578 s390_build_builtin_va_list (void)
9580 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
9582 record = lang_hooks.types.make_type (RECORD_TYPE);
9584 type_decl =
9585 build_decl (BUILTINS_LOCATION,
9586 TYPE_DECL, get_identifier ("__va_list_tag"), record);
9588 f_gpr = build_decl (BUILTINS_LOCATION,
9589 FIELD_DECL, get_identifier ("__gpr"),
9590 long_integer_type_node);
9591 f_fpr = build_decl (BUILTINS_LOCATION,
9592 FIELD_DECL, get_identifier ("__fpr"),
9593 long_integer_type_node);
9594 f_ovf = build_decl (BUILTINS_LOCATION,
9595 FIELD_DECL, get_identifier ("__overflow_arg_area"),
9596 ptr_type_node);
9597 f_sav = build_decl (BUILTINS_LOCATION,
9598 FIELD_DECL, get_identifier ("__reg_save_area"),
9599 ptr_type_node);
9601 va_list_gpr_counter_field = f_gpr;
9602 va_list_fpr_counter_field = f_fpr;
9604 DECL_FIELD_CONTEXT (f_gpr) = record;
9605 DECL_FIELD_CONTEXT (f_fpr) = record;
9606 DECL_FIELD_CONTEXT (f_ovf) = record;
9607 DECL_FIELD_CONTEXT (f_sav) = record;
9609 TYPE_STUB_DECL (record) = type_decl;
9610 TYPE_NAME (record) = type_decl;
9611 TYPE_FIELDS (record) = f_gpr;
9612 DECL_CHAIN (f_gpr) = f_fpr;
9613 DECL_CHAIN (f_fpr) = f_ovf;
9614 DECL_CHAIN (f_ovf) = f_sav;
9616 layout_type (record);
9618 /* The correct type is an array type of one element. */
9619 return build_array_type (record, build_index_type (size_zero_node));
9622 /* Implement va_start by filling the va_list structure VALIST.
9623 STDARG_P is always true, and ignored.
9624 NEXTARG points to the first anonymous stack argument.
9626 The following global variables are used to initialize
9627 the va_list structure:
9629 crtl->args.info:
9630 holds number of gprs and fprs used for named arguments.
9631 crtl->args.arg_offset_rtx:
9632 holds the offset of the first anonymous stack argument
9633 (relative to the virtual arg pointer). */
9635 static void
9636 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9638 HOST_WIDE_INT n_gpr, n_fpr;
9639 int off;
9640 tree f_gpr, f_fpr, f_ovf, f_sav;
9641 tree gpr, fpr, ovf, sav, t;
9643 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9644 f_fpr = DECL_CHAIN (f_gpr);
9645 f_ovf = DECL_CHAIN (f_fpr);
9646 f_sav = DECL_CHAIN (f_ovf);
9648 valist = build_simple_mem_ref (valist);
9649 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9650 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9651 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9652 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9654 /* Count number of gp and fp argument registers used. */
9656 n_gpr = crtl->args.info.gprs;
9657 n_fpr = crtl->args.info.fprs;
9659 if (cfun->va_list_gpr_size)
9661 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9662 build_int_cst (NULL_TREE, n_gpr));
9663 TREE_SIDE_EFFECTS (t) = 1;
9664 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9667 if (cfun->va_list_fpr_size)
9669 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9670 build_int_cst (NULL_TREE, n_fpr));
9671 TREE_SIDE_EFFECTS (t) = 1;
9672 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9675 /* Find the overflow area. */
9676 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9677 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9679 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9681 off = INTVAL (crtl->args.arg_offset_rtx);
9682 off = off < 0 ? 0 : off;
9683 if (TARGET_DEBUG_ARG)
9684 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9685 (int)n_gpr, (int)n_fpr, off);
9687 t = fold_build_pointer_plus_hwi (t, off);
9689 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9690 TREE_SIDE_EFFECTS (t) = 1;
9691 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9694 /* Find the register save area. */
9695 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9696 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9698 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9699 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9701 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9702 TREE_SIDE_EFFECTS (t) = 1;
9703 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9707 /* Implement va_arg by updating the va_list structure
9708 VALIST as required to retrieve an argument of type
9709 TYPE, and returning that argument.
9711 Generates code equivalent to:
9713 if (integral value) {
9714 if (size <= 4 && args.gpr < 5 ||
9715 size > 4 && args.gpr < 4 )
9716 ret = args.reg_save_area[args.gpr+8]
9717 else
9718 ret = *args.overflow_arg_area++;
9719 } else if (float value) {
9720 if (args.fgpr < 2)
9721 ret = args.reg_save_area[args.fpr+64]
9722 else
9723 ret = *args.overflow_arg_area++;
9724 } else if (aggregate value) {
9725 if (args.gpr < 5)
9726 ret = *args.reg_save_area[args.gpr]
9727 else
9728 ret = **args.overflow_arg_area++;
9729 } */
9731 static tree
9732 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9733 gimple_seq *post_p ATTRIBUTE_UNUSED)
9735 tree f_gpr, f_fpr, f_ovf, f_sav;
9736 tree gpr, fpr, ovf, sav, reg, t, u;
9737 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9738 tree lab_false, lab_over, addr;
9740 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9741 f_fpr = DECL_CHAIN (f_gpr);
9742 f_ovf = DECL_CHAIN (f_fpr);
9743 f_sav = DECL_CHAIN (f_ovf);
9745 valist = build_va_arg_indirect_ref (valist);
9746 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9747 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9748 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9750 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9751 both appear on a lhs. */
9752 valist = unshare_expr (valist);
9753 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9755 size = int_size_in_bytes (type);
9757 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9759 if (TARGET_DEBUG_ARG)
9761 fprintf (stderr, "va_arg: aggregate type");
9762 debug_tree (type);
9765 /* Aggregates are passed by reference. */
9766 indirect_p = 1;
9767 reg = gpr;
9768 n_reg = 1;
9770 /* kernel stack layout on 31 bit: It is assumed here that no padding
9771 will be added by s390_frame_info because for va_args always an even
9772 number of gprs has to be saved r15-r2 = 14 regs. */
9773 sav_ofs = 2 * UNITS_PER_LONG;
9774 sav_scale = UNITS_PER_LONG;
9775 size = UNITS_PER_LONG;
9776 max_reg = GP_ARG_NUM_REG - n_reg;
9778 else if (s390_function_arg_float (TYPE_MODE (type), type))
9780 if (TARGET_DEBUG_ARG)
9782 fprintf (stderr, "va_arg: float type");
9783 debug_tree (type);
9786 /* FP args go in FP registers, if present. */
9787 indirect_p = 0;
9788 reg = fpr;
9789 n_reg = 1;
9790 sav_ofs = 16 * UNITS_PER_LONG;
9791 sav_scale = 8;
9792 max_reg = FP_ARG_NUM_REG - n_reg;
9794 else
9796 if (TARGET_DEBUG_ARG)
9798 fprintf (stderr, "va_arg: other type");
9799 debug_tree (type);
9802 /* Otherwise into GP registers. */
9803 indirect_p = 0;
9804 reg = gpr;
9805 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9807 /* kernel stack layout on 31 bit: It is assumed here that no padding
9808 will be added by s390_frame_info because for va_args always an even
9809 number of gprs has to be saved r15-r2 = 14 regs. */
9810 sav_ofs = 2 * UNITS_PER_LONG;
9812 if (size < UNITS_PER_LONG)
9813 sav_ofs += UNITS_PER_LONG - size;
9815 sav_scale = UNITS_PER_LONG;
9816 max_reg = GP_ARG_NUM_REG - n_reg;
9819 /* Pull the value out of the saved registers ... */
9821 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9822 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9823 addr = create_tmp_var (ptr_type_node, "addr");
9825 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9826 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9827 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9828 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9829 gimplify_and_add (t, pre_p);
9831 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9832 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9833 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9834 t = fold_build_pointer_plus (t, u);
9836 gimplify_assign (addr, t, pre_p);
9838 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9840 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9843 /* ... Otherwise out of the overflow area. */
9845 t = ovf;
9846 if (size < UNITS_PER_LONG)
9847 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9849 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9851 gimplify_assign (addr, t, pre_p);
9853 t = fold_build_pointer_plus_hwi (t, size);
9854 gimplify_assign (ovf, t, pre_p);
9856 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9859 /* Increment register save count. */
9861 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9862 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9863 gimplify_and_add (u, pre_p);
9865 if (indirect_p)
9867 t = build_pointer_type_for_mode (build_pointer_type (type),
9868 ptr_mode, true);
9869 addr = fold_convert (t, addr);
9870 addr = build_va_arg_indirect_ref (addr);
9872 else
9874 t = build_pointer_type_for_mode (type, ptr_mode, true);
9875 addr = fold_convert (t, addr);
9878 return build_va_arg_indirect_ref (addr);
9881 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
9882 expanders.
9883 DEST - Register location where CC will be stored.
9884 TDB - Pointer to a 256 byte area where to store the transaction.
9885 diagnostic block. NULL if TDB is not needed.
9886 RETRY - Retry count value. If non-NULL a retry loop for CC2
9887 is emitted
9888 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
9889 of the tbegin instruction pattern. */
9891 void
9892 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
9894 rtx retry_plus_two = gen_reg_rtx (SImode);
9895 rtx retry_reg = gen_reg_rtx (SImode);
9896 rtx_code_label *retry_label = NULL;
9898 if (retry != NULL_RTX)
9900 emit_move_insn (retry_reg, retry);
9901 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
9902 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
9903 retry_label = gen_label_rtx ();
9904 emit_label (retry_label);
9907 if (clobber_fprs_p)
9908 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK), tdb));
9909 else
9910 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
9911 tdb));
9913 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
9914 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
9915 CC_REGNUM)),
9916 UNSPEC_CC_TO_INT));
9917 if (retry != NULL_RTX)
9919 const int CC0 = 1 << 3;
9920 const int CC1 = 1 << 2;
9921 const int CC3 = 1 << 0;
9922 rtx jump;
9923 rtx count = gen_reg_rtx (SImode);
9924 rtx_code_label *leave_label = gen_label_rtx ();
9926 /* Exit for success and permanent failures. */
9927 jump = s390_emit_jump (leave_label,
9928 gen_rtx_EQ (VOIDmode,
9929 gen_rtx_REG (CCRAWmode, CC_REGNUM),
9930 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
9931 LABEL_NUSES (leave_label) = 1;
9933 /* CC2 - transient failure. Perform retry with ppa. */
9934 emit_move_insn (count, retry_plus_two);
9935 emit_insn (gen_subsi3 (count, count, retry_reg));
9936 emit_insn (gen_tx_assist (count));
9937 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
9938 retry_reg,
9939 retry_reg));
9940 JUMP_LABEL (jump) = retry_label;
9941 LABEL_NUSES (retry_label) = 1;
9942 emit_label (leave_label);
9946 /* Builtins. */
9948 enum s390_builtin
9950 S390_BUILTIN_TBEGIN,
9951 S390_BUILTIN_TBEGIN_NOFLOAT,
9952 S390_BUILTIN_TBEGIN_RETRY,
9953 S390_BUILTIN_TBEGIN_RETRY_NOFLOAT,
9954 S390_BUILTIN_TBEGINC,
9955 S390_BUILTIN_TEND,
9956 S390_BUILTIN_TABORT,
9957 S390_BUILTIN_NON_TX_STORE,
9958 S390_BUILTIN_TX_NESTING_DEPTH,
9959 S390_BUILTIN_TX_ASSIST,
9961 S390_BUILTIN_S390_SFPC,
9962 S390_BUILTIN_S390_EFPC,
9964 S390_BUILTIN_MAX
9967 tree s390_builtin_decls[S390_BUILTIN_MAX];
9969 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX] = {
9970 CODE_FOR_tbegin,
9971 CODE_FOR_tbegin_nofloat,
9972 CODE_FOR_tbegin_retry,
9973 CODE_FOR_tbegin_retry_nofloat,
9974 CODE_FOR_tbeginc,
9975 CODE_FOR_tend,
9976 CODE_FOR_tabort,
9977 CODE_FOR_ntstg,
9978 CODE_FOR_etnd,
9979 CODE_FOR_tx_assist,
9981 CODE_FOR_s390_sfpc,
9982 CODE_FOR_s390_efpc
9985 static void
9986 s390_init_builtins (void)
9988 tree ftype, uint64_type;
9989 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
9990 NULL, NULL);
9991 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
9993 /* void foo (void) */
9994 ftype = build_function_type_list (void_type_node, NULL_TREE);
9995 s390_builtin_decls[S390_BUILTIN_TBEGINC] =
9996 add_builtin_function ("__builtin_tbeginc", ftype, S390_BUILTIN_TBEGINC,
9997 BUILT_IN_MD, NULL, NULL_TREE);
9999 /* void foo (int) */
10000 ftype = build_function_type_list (void_type_node, integer_type_node,
10001 NULL_TREE);
10002 s390_builtin_decls[S390_BUILTIN_TABORT] =
10003 add_builtin_function ("__builtin_tabort", ftype,
10004 S390_BUILTIN_TABORT, BUILT_IN_MD, NULL,
10005 noreturn_attr);
10006 s390_builtin_decls[S390_BUILTIN_TX_ASSIST] =
10007 add_builtin_function ("__builtin_tx_assist", ftype,
10008 S390_BUILTIN_TX_ASSIST, BUILT_IN_MD, NULL, NULL_TREE);
10010 /* void foo (unsigned) */
10011 ftype = build_function_type_list (void_type_node, unsigned_type_node,
10012 NULL_TREE);
10013 s390_builtin_decls[S390_BUILTIN_S390_SFPC] =
10014 add_builtin_function ("__builtin_s390_sfpc", ftype,
10015 S390_BUILTIN_S390_SFPC, BUILT_IN_MD, NULL, NULL_TREE);
10017 /* int foo (void *) */
10018 ftype = build_function_type_list (integer_type_node, ptr_type_node,
10019 NULL_TREE);
10020 s390_builtin_decls[S390_BUILTIN_TBEGIN] =
10021 add_builtin_function ("__builtin_tbegin", ftype, S390_BUILTIN_TBEGIN,
10022 BUILT_IN_MD, NULL, returns_twice_attr);
10023 s390_builtin_decls[S390_BUILTIN_TBEGIN_NOFLOAT] =
10024 add_builtin_function ("__builtin_tbegin_nofloat", ftype,
10025 S390_BUILTIN_TBEGIN_NOFLOAT,
10026 BUILT_IN_MD, NULL, returns_twice_attr);
10028 /* int foo (void *, int) */
10029 ftype = build_function_type_list (integer_type_node, ptr_type_node,
10030 integer_type_node, NULL_TREE);
10031 s390_builtin_decls[S390_BUILTIN_TBEGIN_RETRY] =
10032 add_builtin_function ("__builtin_tbegin_retry", ftype,
10033 S390_BUILTIN_TBEGIN_RETRY,
10034 BUILT_IN_MD,
10035 NULL, returns_twice_attr);
10036 s390_builtin_decls[S390_BUILTIN_TBEGIN_RETRY_NOFLOAT] =
10037 add_builtin_function ("__builtin_tbegin_retry_nofloat", ftype,
10038 S390_BUILTIN_TBEGIN_RETRY_NOFLOAT,
10039 BUILT_IN_MD,
10040 NULL, returns_twice_attr);
10042 /* int foo (void) */
10043 ftype = build_function_type_list (integer_type_node, NULL_TREE);
10044 s390_builtin_decls[S390_BUILTIN_TX_NESTING_DEPTH] =
10045 add_builtin_function ("__builtin_tx_nesting_depth", ftype,
10046 S390_BUILTIN_TX_NESTING_DEPTH,
10047 BUILT_IN_MD, NULL, NULL_TREE);
10048 s390_builtin_decls[S390_BUILTIN_TEND] =
10049 add_builtin_function ("__builtin_tend", ftype,
10050 S390_BUILTIN_TEND, BUILT_IN_MD, NULL, NULL_TREE);
10052 /* unsigned foo (void) */
10053 ftype = build_function_type_list (unsigned_type_node, NULL_TREE);
10054 s390_builtin_decls[S390_BUILTIN_S390_EFPC] =
10055 add_builtin_function ("__builtin_s390_efpc", ftype,
10056 S390_BUILTIN_S390_EFPC, BUILT_IN_MD, NULL, NULL_TREE);
10058 /* void foo (uint64_t *, uint64_t) */
10059 if (TARGET_64BIT)
10060 uint64_type = long_unsigned_type_node;
10061 else
10062 uint64_type = long_long_unsigned_type_node;
10064 ftype = build_function_type_list (void_type_node,
10065 build_pointer_type (uint64_type),
10066 uint64_type, NULL_TREE);
10067 s390_builtin_decls[S390_BUILTIN_NON_TX_STORE] =
10068 add_builtin_function ("__builtin_non_tx_store", ftype,
10069 S390_BUILTIN_NON_TX_STORE,
10070 BUILT_IN_MD, NULL, NULL_TREE);
10073 /* Expand an expression EXP that calls a built-in function,
10074 with result going to TARGET if that's convenient
10075 (and in mode MODE if that's convenient).
10076 SUBTARGET may be used as the target for computing one of EXP's operands.
10077 IGNORE is nonzero if the value is to be ignored. */
10079 static rtx
10080 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10081 machine_mode mode ATTRIBUTE_UNUSED,
10082 int ignore ATTRIBUTE_UNUSED)
10084 #define MAX_ARGS 2
10086 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10087 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10088 enum insn_code icode;
10089 rtx op[MAX_ARGS], pat;
10090 int arity;
10091 bool nonvoid;
10092 tree arg;
10093 call_expr_arg_iterator iter;
10095 if (fcode >= S390_BUILTIN_MAX)
10096 internal_error ("bad builtin fcode");
10097 icode = code_for_builtin[fcode];
10098 if (icode == 0)
10099 internal_error ("bad builtin fcode");
10101 if (!TARGET_HTM && fcode <= S390_BUILTIN_TX_ASSIST)
10102 error ("Transactional execution builtins not enabled (-mhtm)\n");
10104 /* Set a flag in the machine specific cfun part in order to support
10105 saving/restoring of FPRs. */
10106 if (fcode == S390_BUILTIN_TBEGIN || fcode == S390_BUILTIN_TBEGIN_RETRY)
10107 cfun->machine->tbegin_p = true;
10109 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
10111 arity = 0;
10112 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
10114 const struct insn_operand_data *insn_op;
10116 if (arg == error_mark_node)
10117 return NULL_RTX;
10118 if (arity >= MAX_ARGS)
10119 return NULL_RTX;
10121 insn_op = &insn_data[icode].operand[arity + nonvoid];
10123 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
10125 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
10127 if (insn_op->predicate == memory_operand)
10129 /* Don't move a NULL pointer into a register. Otherwise
10130 we have to rely on combine being able to move it back
10131 in order to get an immediate 0 in the instruction. */
10132 if (op[arity] != const0_rtx)
10133 op[arity] = copy_to_mode_reg (Pmode, op[arity]);
10134 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
10136 else
10137 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
10140 arity++;
10143 if (nonvoid)
10145 machine_mode tmode = insn_data[icode].operand[0].mode;
10146 if (!target
10147 || GET_MODE (target) != tmode
10148 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
10149 target = gen_reg_rtx (tmode);
10152 switch (arity)
10154 case 0:
10155 pat = GEN_FCN (icode) (target);
10156 break;
10157 case 1:
10158 if (nonvoid)
10159 pat = GEN_FCN (icode) (target, op[0]);
10160 else
10161 pat = GEN_FCN (icode) (op[0]);
10162 break;
10163 case 2:
10164 if (nonvoid)
10165 pat = GEN_FCN (icode) (target, op[0], op[1]);
10166 else
10167 pat = GEN_FCN (icode) (op[0], op[1]);
10168 break;
10169 default:
10170 gcc_unreachable ();
10172 if (!pat)
10173 return NULL_RTX;
10174 emit_insn (pat);
10176 if (nonvoid)
10177 return target;
10178 else
10179 return const0_rtx;
10182 /* Return the decl for the target specific builtin with the function
10183 code FCODE. */
10185 static tree
10186 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
10188 if (fcode >= S390_BUILTIN_MAX)
10189 return error_mark_node;
10191 return s390_builtin_decls[fcode];
10194 /* We call mcount before the function prologue. So a profiled leaf
10195 function should stay a leaf function. */
10197 static bool
10198 s390_keep_leaf_when_profiled ()
10200 return true;
10203 /* Output assembly code for the trampoline template to
10204 stdio stream FILE.
10206 On S/390, we use gpr 1 internally in the trampoline code;
10207 gpr 0 is used to hold the static chain. */
10209 static void
10210 s390_asm_trampoline_template (FILE *file)
10212 rtx op[2];
10213 op[0] = gen_rtx_REG (Pmode, 0);
10214 op[1] = gen_rtx_REG (Pmode, 1);
10216 if (TARGET_64BIT)
10218 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
10219 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
10220 output_asm_insn ("br\t%1", op); /* 2 byte */
10221 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
10223 else
10225 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
10226 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
10227 output_asm_insn ("br\t%1", op); /* 2 byte */
10228 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
10232 /* Emit RTL insns to initialize the variable parts of a trampoline.
10233 FNADDR is an RTX for the address of the function's pure code.
10234 CXT is an RTX for the static chain value for the function. */
10236 static void
10237 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
10239 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10240 rtx mem;
10242 emit_block_move (m_tramp, assemble_trampoline_template (),
10243 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
10245 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
10246 emit_move_insn (mem, cxt);
10247 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
10248 emit_move_insn (mem, fnaddr);
10251 /* Output assembler code to FILE to increment profiler label # LABELNO
10252 for profiling a function entry. */
10254 void
10255 s390_function_profiler (FILE *file, int labelno)
10257 rtx op[7];
10259 char label[128];
10260 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
10262 fprintf (file, "# function profiler \n");
10264 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
10265 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
10266 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
10268 op[2] = gen_rtx_REG (Pmode, 1);
10269 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
10270 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
10272 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
10273 if (flag_pic)
10275 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
10276 op[4] = gen_rtx_CONST (Pmode, op[4]);
10279 if (TARGET_64BIT)
10281 output_asm_insn ("stg\t%0,%1", op);
10282 output_asm_insn ("larl\t%2,%3", op);
10283 output_asm_insn ("brasl\t%0,%4", op);
10284 output_asm_insn ("lg\t%0,%1", op);
10286 else if (!flag_pic)
10288 op[6] = gen_label_rtx ();
10290 output_asm_insn ("st\t%0,%1", op);
10291 output_asm_insn ("bras\t%2,%l6", op);
10292 output_asm_insn (".long\t%4", op);
10293 output_asm_insn (".long\t%3", op);
10294 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
10295 output_asm_insn ("l\t%0,0(%2)", op);
10296 output_asm_insn ("l\t%2,4(%2)", op);
10297 output_asm_insn ("basr\t%0,%0", op);
10298 output_asm_insn ("l\t%0,%1", op);
10300 else
10302 op[5] = gen_label_rtx ();
10303 op[6] = gen_label_rtx ();
10305 output_asm_insn ("st\t%0,%1", op);
10306 output_asm_insn ("bras\t%2,%l6", op);
10307 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
10308 output_asm_insn (".long\t%4-%l5", op);
10309 output_asm_insn (".long\t%3-%l5", op);
10310 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
10311 output_asm_insn ("lr\t%0,%2", op);
10312 output_asm_insn ("a\t%0,0(%2)", op);
10313 output_asm_insn ("a\t%2,4(%2)", op);
10314 output_asm_insn ("basr\t%0,%0", op);
10315 output_asm_insn ("l\t%0,%1", op);
10319 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
10320 into its SYMBOL_REF_FLAGS. */
10322 static void
10323 s390_encode_section_info (tree decl, rtx rtl, int first)
10325 default_encode_section_info (decl, rtl, first);
10327 if (TREE_CODE (decl) == VAR_DECL)
10329 /* If a variable has a forced alignment to < 2 bytes, mark it
10330 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
10331 operand. */
10332 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
10333 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
10334 if (!DECL_SIZE (decl)
10335 || !DECL_ALIGN (decl)
10336 || !tree_fits_shwi_p (DECL_SIZE (decl))
10337 || (DECL_ALIGN (decl) <= 64
10338 && DECL_ALIGN (decl) != tree_to_shwi (DECL_SIZE (decl))))
10339 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
10342 /* Literal pool references don't have a decl so they are handled
10343 differently here. We rely on the information in the MEM_ALIGN
10344 entry to decide upon natural alignment. */
10345 if (MEM_P (rtl)
10346 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
10347 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
10348 && (MEM_ALIGN (rtl) == 0
10349 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
10350 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
10351 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
10354 /* Output thunk to FILE that implements a C++ virtual function call (with
10355 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
10356 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
10357 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
10358 relative to the resulting this pointer. */
10360 static void
10361 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10362 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10363 tree function)
10365 rtx op[10];
10366 int nonlocal = 0;
10368 /* Make sure unwind info is emitted for the thunk if needed. */
10369 final_start_function (emit_barrier (), file, 1);
10371 /* Operand 0 is the target function. */
10372 op[0] = XEXP (DECL_RTL (function), 0);
10373 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
10375 nonlocal = 1;
10376 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
10377 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
10378 op[0] = gen_rtx_CONST (Pmode, op[0]);
10381 /* Operand 1 is the 'this' pointer. */
10382 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10383 op[1] = gen_rtx_REG (Pmode, 3);
10384 else
10385 op[1] = gen_rtx_REG (Pmode, 2);
10387 /* Operand 2 is the delta. */
10388 op[2] = GEN_INT (delta);
10390 /* Operand 3 is the vcall_offset. */
10391 op[3] = GEN_INT (vcall_offset);
10393 /* Operand 4 is the temporary register. */
10394 op[4] = gen_rtx_REG (Pmode, 1);
10396 /* Operands 5 to 8 can be used as labels. */
10397 op[5] = NULL_RTX;
10398 op[6] = NULL_RTX;
10399 op[7] = NULL_RTX;
10400 op[8] = NULL_RTX;
10402 /* Operand 9 can be used for temporary register. */
10403 op[9] = NULL_RTX;
10405 /* Generate code. */
10406 if (TARGET_64BIT)
10408 /* Setup literal pool pointer if required. */
10409 if ((!DISP_IN_RANGE (delta)
10410 && !CONST_OK_FOR_K (delta)
10411 && !CONST_OK_FOR_Os (delta))
10412 || (!DISP_IN_RANGE (vcall_offset)
10413 && !CONST_OK_FOR_K (vcall_offset)
10414 && !CONST_OK_FOR_Os (vcall_offset)))
10416 op[5] = gen_label_rtx ();
10417 output_asm_insn ("larl\t%4,%5", op);
10420 /* Add DELTA to this pointer. */
10421 if (delta)
10423 if (CONST_OK_FOR_J (delta))
10424 output_asm_insn ("la\t%1,%2(%1)", op);
10425 else if (DISP_IN_RANGE (delta))
10426 output_asm_insn ("lay\t%1,%2(%1)", op);
10427 else if (CONST_OK_FOR_K (delta))
10428 output_asm_insn ("aghi\t%1,%2", op);
10429 else if (CONST_OK_FOR_Os (delta))
10430 output_asm_insn ("agfi\t%1,%2", op);
10431 else
10433 op[6] = gen_label_rtx ();
10434 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
10438 /* Perform vcall adjustment. */
10439 if (vcall_offset)
10441 if (DISP_IN_RANGE (vcall_offset))
10443 output_asm_insn ("lg\t%4,0(%1)", op);
10444 output_asm_insn ("ag\t%1,%3(%4)", op);
10446 else if (CONST_OK_FOR_K (vcall_offset))
10448 output_asm_insn ("lghi\t%4,%3", op);
10449 output_asm_insn ("ag\t%4,0(%1)", op);
10450 output_asm_insn ("ag\t%1,0(%4)", op);
10452 else if (CONST_OK_FOR_Os (vcall_offset))
10454 output_asm_insn ("lgfi\t%4,%3", op);
10455 output_asm_insn ("ag\t%4,0(%1)", op);
10456 output_asm_insn ("ag\t%1,0(%4)", op);
10458 else
10460 op[7] = gen_label_rtx ();
10461 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
10462 output_asm_insn ("ag\t%4,0(%1)", op);
10463 output_asm_insn ("ag\t%1,0(%4)", op);
10467 /* Jump to target. */
10468 output_asm_insn ("jg\t%0", op);
10470 /* Output literal pool if required. */
10471 if (op[5])
10473 output_asm_insn (".align\t4", op);
10474 targetm.asm_out.internal_label (file, "L",
10475 CODE_LABEL_NUMBER (op[5]));
10477 if (op[6])
10479 targetm.asm_out.internal_label (file, "L",
10480 CODE_LABEL_NUMBER (op[6]));
10481 output_asm_insn (".long\t%2", op);
10483 if (op[7])
10485 targetm.asm_out.internal_label (file, "L",
10486 CODE_LABEL_NUMBER (op[7]));
10487 output_asm_insn (".long\t%3", op);
10490 else
10492 /* Setup base pointer if required. */
10493 if (!vcall_offset
10494 || (!DISP_IN_RANGE (delta)
10495 && !CONST_OK_FOR_K (delta)
10496 && !CONST_OK_FOR_Os (delta))
10497 || (!DISP_IN_RANGE (delta)
10498 && !CONST_OK_FOR_K (vcall_offset)
10499 && !CONST_OK_FOR_Os (vcall_offset)))
10501 op[5] = gen_label_rtx ();
10502 output_asm_insn ("basr\t%4,0", op);
10503 targetm.asm_out.internal_label (file, "L",
10504 CODE_LABEL_NUMBER (op[5]));
10507 /* Add DELTA to this pointer. */
10508 if (delta)
10510 if (CONST_OK_FOR_J (delta))
10511 output_asm_insn ("la\t%1,%2(%1)", op);
10512 else if (DISP_IN_RANGE (delta))
10513 output_asm_insn ("lay\t%1,%2(%1)", op);
10514 else if (CONST_OK_FOR_K (delta))
10515 output_asm_insn ("ahi\t%1,%2", op);
10516 else if (CONST_OK_FOR_Os (delta))
10517 output_asm_insn ("afi\t%1,%2", op);
10518 else
10520 op[6] = gen_label_rtx ();
10521 output_asm_insn ("a\t%1,%6-%5(%4)", op);
10525 /* Perform vcall adjustment. */
10526 if (vcall_offset)
10528 if (CONST_OK_FOR_J (vcall_offset))
10530 output_asm_insn ("l\t%4,0(%1)", op);
10531 output_asm_insn ("a\t%1,%3(%4)", op);
10533 else if (DISP_IN_RANGE (vcall_offset))
10535 output_asm_insn ("l\t%4,0(%1)", op);
10536 output_asm_insn ("ay\t%1,%3(%4)", op);
10538 else if (CONST_OK_FOR_K (vcall_offset))
10540 output_asm_insn ("lhi\t%4,%3", op);
10541 output_asm_insn ("a\t%4,0(%1)", op);
10542 output_asm_insn ("a\t%1,0(%4)", op);
10544 else if (CONST_OK_FOR_Os (vcall_offset))
10546 output_asm_insn ("iilf\t%4,%3", op);
10547 output_asm_insn ("a\t%4,0(%1)", op);
10548 output_asm_insn ("a\t%1,0(%4)", op);
10550 else
10552 op[7] = gen_label_rtx ();
10553 output_asm_insn ("l\t%4,%7-%5(%4)", op);
10554 output_asm_insn ("a\t%4,0(%1)", op);
10555 output_asm_insn ("a\t%1,0(%4)", op);
10558 /* We had to clobber the base pointer register.
10559 Re-setup the base pointer (with a different base). */
10560 op[5] = gen_label_rtx ();
10561 output_asm_insn ("basr\t%4,0", op);
10562 targetm.asm_out.internal_label (file, "L",
10563 CODE_LABEL_NUMBER (op[5]));
10566 /* Jump to target. */
10567 op[8] = gen_label_rtx ();
10569 if (!flag_pic)
10570 output_asm_insn ("l\t%4,%8-%5(%4)", op);
10571 else if (!nonlocal)
10572 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10573 /* We cannot call through .plt, since .plt requires %r12 loaded. */
10574 else if (flag_pic == 1)
10576 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10577 output_asm_insn ("l\t%4,%0(%4)", op);
10579 else if (flag_pic == 2)
10581 op[9] = gen_rtx_REG (Pmode, 0);
10582 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
10583 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10584 output_asm_insn ("ar\t%4,%9", op);
10585 output_asm_insn ("l\t%4,0(%4)", op);
10588 output_asm_insn ("br\t%4", op);
10590 /* Output literal pool. */
10591 output_asm_insn (".align\t4", op);
10593 if (nonlocal && flag_pic == 2)
10594 output_asm_insn (".long\t%0", op);
10595 if (nonlocal)
10597 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10598 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
10601 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
10602 if (!flag_pic)
10603 output_asm_insn (".long\t%0", op);
10604 else
10605 output_asm_insn (".long\t%0-%5", op);
10607 if (op[6])
10609 targetm.asm_out.internal_label (file, "L",
10610 CODE_LABEL_NUMBER (op[6]));
10611 output_asm_insn (".long\t%2", op);
10613 if (op[7])
10615 targetm.asm_out.internal_label (file, "L",
10616 CODE_LABEL_NUMBER (op[7]));
10617 output_asm_insn (".long\t%3", op);
10620 final_end_function ();
10623 static bool
10624 s390_valid_pointer_mode (machine_mode mode)
10626 return (mode == SImode || (TARGET_64BIT && mode == DImode));
10629 /* Checks whether the given CALL_EXPR would use a caller
10630 saved register. This is used to decide whether sibling call
10631 optimization could be performed on the respective function
10632 call. */
10634 static bool
10635 s390_call_saved_register_used (tree call_expr)
10637 CUMULATIVE_ARGS cum_v;
10638 cumulative_args_t cum;
10639 tree parameter;
10640 machine_mode mode;
10641 tree type;
10642 rtx parm_rtx;
10643 int reg, i;
10645 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
10646 cum = pack_cumulative_args (&cum_v);
10648 for (i = 0; i < call_expr_nargs (call_expr); i++)
10650 parameter = CALL_EXPR_ARG (call_expr, i);
10651 gcc_assert (parameter);
10653 /* For an undeclared variable passed as parameter we will get
10654 an ERROR_MARK node here. */
10655 if (TREE_CODE (parameter) == ERROR_MARK)
10656 return true;
10658 type = TREE_TYPE (parameter);
10659 gcc_assert (type);
10661 mode = TYPE_MODE (type);
10662 gcc_assert (mode);
10664 if (pass_by_reference (&cum_v, mode, type, true))
10666 mode = Pmode;
10667 type = build_pointer_type (type);
10670 parm_rtx = s390_function_arg (cum, mode, type, 0);
10672 s390_function_arg_advance (cum, mode, type, 0);
10674 if (!parm_rtx)
10675 continue;
10677 if (REG_P (parm_rtx))
10679 for (reg = 0;
10680 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
10681 reg++)
10682 if (!call_used_regs[reg + REGNO (parm_rtx)])
10683 return true;
10686 if (GET_CODE (parm_rtx) == PARALLEL)
10688 int i;
10690 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
10692 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
10694 gcc_assert (REG_P (r));
10696 for (reg = 0;
10697 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
10698 reg++)
10699 if (!call_used_regs[reg + REGNO (r)])
10700 return true;
10705 return false;
10708 /* Return true if the given call expression can be
10709 turned into a sibling call.
10710 DECL holds the declaration of the function to be called whereas
10711 EXP is the call expression itself. */
10713 static bool
10714 s390_function_ok_for_sibcall (tree decl, tree exp)
10716 /* The TPF epilogue uses register 1. */
10717 if (TARGET_TPF_PROFILING)
10718 return false;
10720 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
10721 which would have to be restored before the sibcall. */
10722 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
10723 return false;
10725 /* Register 6 on s390 is available as an argument register but unfortunately
10726 "caller saved". This makes functions needing this register for arguments
10727 not suitable for sibcalls. */
10728 return !s390_call_saved_register_used (exp);
10731 /* Return the fixed registers used for condition codes. */
10733 static bool
10734 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
10736 *p1 = CC_REGNUM;
10737 *p2 = INVALID_REGNUM;
10739 return true;
10742 /* This function is used by the call expanders of the machine description.
10743 It emits the call insn itself together with the necessary operations
10744 to adjust the target address and returns the emitted insn.
10745 ADDR_LOCATION is the target address rtx
10746 TLS_CALL the location of the thread-local symbol
10747 RESULT_REG the register where the result of the call should be stored
10748 RETADDR_REG the register where the return address should be stored
10749 If this parameter is NULL_RTX the call is considered
10750 to be a sibling call. */
10752 rtx_insn *
10753 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
10754 rtx retaddr_reg)
10756 bool plt_call = false;
10757 rtx_insn *insn;
10758 rtx call;
10759 rtx clobber;
10760 rtvec vec;
10762 /* Direct function calls need special treatment. */
10763 if (GET_CODE (addr_location) == SYMBOL_REF)
10765 /* When calling a global routine in PIC mode, we must
10766 replace the symbol itself with the PLT stub. */
10767 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
10769 if (retaddr_reg != NULL_RTX)
10771 addr_location = gen_rtx_UNSPEC (Pmode,
10772 gen_rtvec (1, addr_location),
10773 UNSPEC_PLT);
10774 addr_location = gen_rtx_CONST (Pmode, addr_location);
10775 plt_call = true;
10777 else
10778 /* For -fpic code the PLT entries might use r12 which is
10779 call-saved. Therefore we cannot do a sibcall when
10780 calling directly using a symbol ref. When reaching
10781 this point we decided (in s390_function_ok_for_sibcall)
10782 to do a sibcall for a function pointer but one of the
10783 optimizers was able to get rid of the function pointer
10784 by propagating the symbol ref into the call. This
10785 optimization is illegal for S/390 so we turn the direct
10786 call into a indirect call again. */
10787 addr_location = force_reg (Pmode, addr_location);
10790 /* Unless we can use the bras(l) insn, force the
10791 routine address into a register. */
10792 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
10794 if (flag_pic)
10795 addr_location = legitimize_pic_address (addr_location, 0);
10796 else
10797 addr_location = force_reg (Pmode, addr_location);
10801 /* If it is already an indirect call or the code above moved the
10802 SYMBOL_REF to somewhere else make sure the address can be found in
10803 register 1. */
10804 if (retaddr_reg == NULL_RTX
10805 && GET_CODE (addr_location) != SYMBOL_REF
10806 && !plt_call)
10808 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
10809 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
10812 addr_location = gen_rtx_MEM (QImode, addr_location);
10813 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
10815 if (result_reg != NULL_RTX)
10816 call = gen_rtx_SET (VOIDmode, result_reg, call);
10818 if (retaddr_reg != NULL_RTX)
10820 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
10822 if (tls_call != NULL_RTX)
10823 vec = gen_rtvec (3, call, clobber,
10824 gen_rtx_USE (VOIDmode, tls_call));
10825 else
10826 vec = gen_rtvec (2, call, clobber);
10828 call = gen_rtx_PARALLEL (VOIDmode, vec);
10831 insn = emit_call_insn (call);
10833 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
10834 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
10836 /* s390_function_ok_for_sibcall should
10837 have denied sibcalls in this case. */
10838 gcc_assert (retaddr_reg != NULL_RTX);
10839 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
10841 return insn;
10844 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10846 static void
10847 s390_conditional_register_usage (void)
10849 int i;
10851 if (flag_pic)
10853 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10854 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10856 if (TARGET_CPU_ZARCH)
10858 fixed_regs[BASE_REGNUM] = 0;
10859 call_used_regs[BASE_REGNUM] = 0;
10860 fixed_regs[RETURN_REGNUM] = 0;
10861 call_used_regs[RETURN_REGNUM] = 0;
10863 if (TARGET_64BIT)
10865 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10866 call_used_regs[i] = call_really_used_regs[i] = 0;
10868 else
10870 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
10871 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
10874 if (TARGET_SOFT_FLOAT)
10876 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
10877 call_used_regs[i] = fixed_regs[i] = 1;
10881 /* Corresponding function to eh_return expander. */
10883 static GTY(()) rtx s390_tpf_eh_return_symbol;
10884 void
10885 s390_emit_tpf_eh_return (rtx target)
10887 rtx_insn *insn;
10888 rtx reg, orig_ra;
10890 if (!s390_tpf_eh_return_symbol)
10891 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10893 reg = gen_rtx_REG (Pmode, 2);
10894 orig_ra = gen_rtx_REG (Pmode, 3);
10896 emit_move_insn (reg, target);
10897 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
10898 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10899 gen_rtx_REG (Pmode, RETURN_REGNUM));
10900 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10901 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
10903 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10906 /* Rework the prologue/epilogue to avoid saving/restoring
10907 registers unnecessarily. */
10909 static void
10910 s390_optimize_prologue (void)
10912 rtx_insn *insn, *new_insn, *next_insn;
10914 /* Do a final recompute of the frame-related data. */
10915 s390_optimize_register_info ();
10917 /* If all special registers are in fact used, there's nothing we
10918 can do, so no point in walking the insn list. */
10920 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10921 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10922 && (TARGET_CPU_ZARCH
10923 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10924 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10925 return;
10927 /* Search for prologue/epilogue insns and replace them. */
10929 for (insn = get_insns (); insn; insn = next_insn)
10931 int first, last, off;
10932 rtx set, base, offset;
10933 rtx pat;
10935 next_insn = NEXT_INSN (insn);
10937 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
10938 continue;
10940 pat = PATTERN (insn);
10942 /* Remove ldgr/lgdr instructions used for saving and restore
10943 GPRs if possible. */
10944 if (TARGET_Z10
10945 && GET_CODE (pat) == SET
10946 && GET_MODE (SET_SRC (pat)) == DImode
10947 && REG_P (SET_SRC (pat))
10948 && REG_P (SET_DEST (pat)))
10950 int src_regno = REGNO (SET_SRC (pat));
10951 int dest_regno = REGNO (SET_DEST (pat));
10952 int gpr_regno;
10953 int fpr_regno;
10955 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
10956 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
10957 continue;
10959 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
10960 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
10962 /* GPR must be call-saved, FPR must be call-clobbered. */
10963 if (!call_really_used_regs[fpr_regno]
10964 || call_really_used_regs[gpr_regno])
10965 continue;
10967 /* It must not happen that what we once saved in an FPR now
10968 needs a stack slot. */
10969 gcc_assert (cfun_gpr_save_slot (gpr_regno) != -1);
10971 if (cfun_gpr_save_slot (gpr_regno) == 0)
10973 remove_insn (insn);
10974 continue;
10978 if (GET_CODE (pat) == PARALLEL
10979 && store_multiple_operation (pat, VOIDmode))
10981 set = XVECEXP (pat, 0, 0);
10982 first = REGNO (SET_SRC (set));
10983 last = first + XVECLEN (pat, 0) - 1;
10984 offset = const0_rtx;
10985 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10986 off = INTVAL (offset);
10988 if (GET_CODE (base) != REG || off < 0)
10989 continue;
10990 if (cfun_frame_layout.first_save_gpr != -1
10991 && (cfun_frame_layout.first_save_gpr < first
10992 || cfun_frame_layout.last_save_gpr > last))
10993 continue;
10994 if (REGNO (base) != STACK_POINTER_REGNUM
10995 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10996 continue;
10997 if (first > BASE_REGNUM || last < BASE_REGNUM)
10998 continue;
11000 if (cfun_frame_layout.first_save_gpr != -1)
11002 rtx s_pat = save_gprs (base,
11003 off + (cfun_frame_layout.first_save_gpr
11004 - first) * UNITS_PER_LONG,
11005 cfun_frame_layout.first_save_gpr,
11006 cfun_frame_layout.last_save_gpr);
11007 new_insn = emit_insn_before (s_pat, insn);
11008 INSN_ADDRESSES_NEW (new_insn, -1);
11011 remove_insn (insn);
11012 continue;
11015 if (cfun_frame_layout.first_save_gpr == -1
11016 && GET_CODE (pat) == SET
11017 && GENERAL_REG_P (SET_SRC (pat))
11018 && GET_CODE (SET_DEST (pat)) == MEM)
11020 set = pat;
11021 first = REGNO (SET_SRC (set));
11022 offset = const0_rtx;
11023 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
11024 off = INTVAL (offset);
11026 if (GET_CODE (base) != REG || off < 0)
11027 continue;
11028 if (REGNO (base) != STACK_POINTER_REGNUM
11029 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
11030 continue;
11032 remove_insn (insn);
11033 continue;
11036 if (GET_CODE (pat) == PARALLEL
11037 && load_multiple_operation (pat, VOIDmode))
11039 set = XVECEXP (pat, 0, 0);
11040 first = REGNO (SET_DEST (set));
11041 last = first + XVECLEN (pat, 0) - 1;
11042 offset = const0_rtx;
11043 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
11044 off = INTVAL (offset);
11046 if (GET_CODE (base) != REG || off < 0)
11047 continue;
11049 if (cfun_frame_layout.first_restore_gpr != -1
11050 && (cfun_frame_layout.first_restore_gpr < first
11051 || cfun_frame_layout.last_restore_gpr > last))
11052 continue;
11053 if (REGNO (base) != STACK_POINTER_REGNUM
11054 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
11055 continue;
11056 if (first > BASE_REGNUM || last < BASE_REGNUM)
11057 continue;
11059 if (cfun_frame_layout.first_restore_gpr != -1)
11061 rtx rpat = restore_gprs (base,
11062 off + (cfun_frame_layout.first_restore_gpr
11063 - first) * UNITS_PER_LONG,
11064 cfun_frame_layout.first_restore_gpr,
11065 cfun_frame_layout.last_restore_gpr);
11067 /* Remove REG_CFA_RESTOREs for registers that we no
11068 longer need to save. */
11069 REG_NOTES (rpat) = REG_NOTES (insn);
11070 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
11071 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
11072 && ((int) REGNO (XEXP (*ptr, 0))
11073 < cfun_frame_layout.first_restore_gpr))
11074 *ptr = XEXP (*ptr, 1);
11075 else
11076 ptr = &XEXP (*ptr, 1);
11077 new_insn = emit_insn_before (rpat, insn);
11078 RTX_FRAME_RELATED_P (new_insn) = 1;
11079 INSN_ADDRESSES_NEW (new_insn, -1);
11082 remove_insn (insn);
11083 continue;
11086 if (cfun_frame_layout.first_restore_gpr == -1
11087 && GET_CODE (pat) == SET
11088 && GENERAL_REG_P (SET_DEST (pat))
11089 && GET_CODE (SET_SRC (pat)) == MEM)
11091 set = pat;
11092 first = REGNO (SET_DEST (set));
11093 offset = const0_rtx;
11094 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
11095 off = INTVAL (offset);
11097 if (GET_CODE (base) != REG || off < 0)
11098 continue;
11100 if (REGNO (base) != STACK_POINTER_REGNUM
11101 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
11102 continue;
11104 remove_insn (insn);
11105 continue;
11110 /* On z10 and later the dynamic branch prediction must see the
11111 backward jump within a certain windows. If not it falls back to
11112 the static prediction. This function rearranges the loop backward
11113 branch in a way which makes the static prediction always correct.
11114 The function returns true if it added an instruction. */
11115 static bool
11116 s390_fix_long_loop_prediction (rtx_insn *insn)
11118 rtx set = single_set (insn);
11119 rtx code_label, label_ref, new_label;
11120 rtx_insn *uncond_jump;
11121 rtx_insn *cur_insn;
11122 rtx tmp;
11123 int distance;
11125 /* This will exclude branch on count and branch on index patterns
11126 since these are correctly statically predicted. */
11127 if (!set
11128 || SET_DEST (set) != pc_rtx
11129 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
11130 return false;
11132 /* Skip conditional returns. */
11133 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
11134 && XEXP (SET_SRC (set), 2) == pc_rtx)
11135 return false;
11137 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
11138 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
11140 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
11142 code_label = XEXP (label_ref, 0);
11144 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
11145 || INSN_ADDRESSES (INSN_UID (insn)) == -1
11146 || (INSN_ADDRESSES (INSN_UID (insn))
11147 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
11148 return false;
11150 for (distance = 0, cur_insn = PREV_INSN (insn);
11151 distance < PREDICT_DISTANCE - 6;
11152 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
11153 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
11154 return false;
11156 new_label = gen_label_rtx ();
11157 uncond_jump = emit_jump_insn_after (
11158 gen_rtx_SET (VOIDmode, pc_rtx,
11159 gen_rtx_LABEL_REF (VOIDmode, code_label)),
11160 insn);
11161 emit_label_after (new_label, uncond_jump);
11163 tmp = XEXP (SET_SRC (set), 1);
11164 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
11165 XEXP (SET_SRC (set), 2) = tmp;
11166 INSN_CODE (insn) = -1;
11168 XEXP (label_ref, 0) = new_label;
11169 JUMP_LABEL (insn) = new_label;
11170 JUMP_LABEL (uncond_jump) = code_label;
11172 return true;
11175 /* Returns 1 if INSN reads the value of REG for purposes not related
11176 to addressing of memory, and 0 otherwise. */
11177 static int
11178 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
11180 return reg_referenced_p (reg, PATTERN (insn))
11181 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
11184 /* Starting from INSN find_cond_jump looks downwards in the insn
11185 stream for a single jump insn which is the last user of the
11186 condition code set in INSN. */
11187 static rtx_insn *
11188 find_cond_jump (rtx_insn *insn)
11190 for (; insn; insn = NEXT_INSN (insn))
11192 rtx ite, cc;
11194 if (LABEL_P (insn))
11195 break;
11197 if (!JUMP_P (insn))
11199 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
11200 break;
11201 continue;
11204 /* This will be triggered by a return. */
11205 if (GET_CODE (PATTERN (insn)) != SET)
11206 break;
11208 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
11209 ite = SET_SRC (PATTERN (insn));
11211 if (GET_CODE (ite) != IF_THEN_ELSE)
11212 break;
11214 cc = XEXP (XEXP (ite, 0), 0);
11215 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
11216 break;
11218 if (find_reg_note (insn, REG_DEAD, cc))
11219 return insn;
11220 break;
11223 return NULL;
11226 /* Swap the condition in COND and the operands in OP0 and OP1 so that
11227 the semantics does not change. If NULL_RTX is passed as COND the
11228 function tries to find the conditional jump starting with INSN. */
11229 static void
11230 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
11232 rtx tmp = *op0;
11234 if (cond == NULL_RTX)
11236 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
11237 rtx set = jump ? single_set (jump) : NULL_RTX;
11239 if (set == NULL_RTX)
11240 return;
11242 cond = XEXP (SET_SRC (set), 0);
11245 *op0 = *op1;
11246 *op1 = tmp;
11247 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
11250 /* On z10, instructions of the compare-and-branch family have the
11251 property to access the register occurring as second operand with
11252 its bits complemented. If such a compare is grouped with a second
11253 instruction that accesses the same register non-complemented, and
11254 if that register's value is delivered via a bypass, then the
11255 pipeline recycles, thereby causing significant performance decline.
11256 This function locates such situations and exchanges the two
11257 operands of the compare. The function return true whenever it
11258 added an insn. */
11259 static bool
11260 s390_z10_optimize_cmp (rtx_insn *insn)
11262 rtx_insn *prev_insn, *next_insn;
11263 bool insn_added_p = false;
11264 rtx cond, *op0, *op1;
11266 if (GET_CODE (PATTERN (insn)) == PARALLEL)
11268 /* Handle compare and branch and branch on count
11269 instructions. */
11270 rtx pattern = single_set (insn);
11272 if (!pattern
11273 || SET_DEST (pattern) != pc_rtx
11274 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
11275 return false;
11277 cond = XEXP (SET_SRC (pattern), 0);
11278 op0 = &XEXP (cond, 0);
11279 op1 = &XEXP (cond, 1);
11281 else if (GET_CODE (PATTERN (insn)) == SET)
11283 rtx src, dest;
11285 /* Handle normal compare instructions. */
11286 src = SET_SRC (PATTERN (insn));
11287 dest = SET_DEST (PATTERN (insn));
11289 if (!REG_P (dest)
11290 || !CC_REGNO_P (REGNO (dest))
11291 || GET_CODE (src) != COMPARE)
11292 return false;
11294 /* s390_swap_cmp will try to find the conditional
11295 jump when passing NULL_RTX as condition. */
11296 cond = NULL_RTX;
11297 op0 = &XEXP (src, 0);
11298 op1 = &XEXP (src, 1);
11300 else
11301 return false;
11303 if (!REG_P (*op0) || !REG_P (*op1))
11304 return false;
11306 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
11307 return false;
11309 /* Swap the COMPARE arguments and its mask if there is a
11310 conflicting access in the previous insn. */
11311 prev_insn = prev_active_insn (insn);
11312 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
11313 && reg_referenced_p (*op1, PATTERN (prev_insn)))
11314 s390_swap_cmp (cond, op0, op1, insn);
11316 /* Check if there is a conflict with the next insn. If there
11317 was no conflict with the previous insn, then swap the
11318 COMPARE arguments and its mask. If we already swapped
11319 the operands, or if swapping them would cause a conflict
11320 with the previous insn, issue a NOP after the COMPARE in
11321 order to separate the two instuctions. */
11322 next_insn = next_active_insn (insn);
11323 if (next_insn != NULL_RTX && INSN_P (next_insn)
11324 && s390_non_addr_reg_read_p (*op1, next_insn))
11326 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
11327 && s390_non_addr_reg_read_p (*op0, prev_insn))
11329 if (REGNO (*op1) == 0)
11330 emit_insn_after (gen_nop1 (), insn);
11331 else
11332 emit_insn_after (gen_nop (), insn);
11333 insn_added_p = true;
11335 else
11336 s390_swap_cmp (cond, op0, op1, insn);
11338 return insn_added_p;
11341 /* Perform machine-dependent processing. */
11343 static void
11344 s390_reorg (void)
11346 bool pool_overflow = false;
11347 int hw_before, hw_after;
11349 /* Make sure all splits have been performed; splits after
11350 machine_dependent_reorg might confuse insn length counts. */
11351 split_all_insns_noflow ();
11353 /* Install the main literal pool and the associated base
11354 register load insns.
11356 In addition, there are two problematic situations we need
11357 to correct:
11359 - the literal pool might be > 4096 bytes in size, so that
11360 some of its elements cannot be directly accessed
11362 - a branch target might be > 64K away from the branch, so that
11363 it is not possible to use a PC-relative instruction.
11365 To fix those, we split the single literal pool into multiple
11366 pool chunks, reloading the pool base register at various
11367 points throughout the function to ensure it always points to
11368 the pool chunk the following code expects, and / or replace
11369 PC-relative branches by absolute branches.
11371 However, the two problems are interdependent: splitting the
11372 literal pool can move a branch further away from its target,
11373 causing the 64K limit to overflow, and on the other hand,
11374 replacing a PC-relative branch by an absolute branch means
11375 we need to put the branch target address into the literal
11376 pool, possibly causing it to overflow.
11378 So, we loop trying to fix up both problems until we manage
11379 to satisfy both conditions at the same time. Note that the
11380 loop is guaranteed to terminate as every pass of the loop
11381 strictly decreases the total number of PC-relative branches
11382 in the function. (This is not completely true as there
11383 might be branch-over-pool insns introduced by chunkify_start.
11384 Those never need to be split however.) */
11386 for (;;)
11388 struct constant_pool *pool = NULL;
11390 /* Collect the literal pool. */
11391 if (!pool_overflow)
11393 pool = s390_mainpool_start ();
11394 if (!pool)
11395 pool_overflow = true;
11398 /* If literal pool overflowed, start to chunkify it. */
11399 if (pool_overflow)
11400 pool = s390_chunkify_start ();
11402 /* Split out-of-range branches. If this has created new
11403 literal pool entries, cancel current chunk list and
11404 recompute it. zSeries machines have large branch
11405 instructions, so we never need to split a branch. */
11406 if (!TARGET_CPU_ZARCH && s390_split_branches ())
11408 if (pool_overflow)
11409 s390_chunkify_cancel (pool);
11410 else
11411 s390_mainpool_cancel (pool);
11413 continue;
11416 /* If we made it up to here, both conditions are satisfied.
11417 Finish up literal pool related changes. */
11418 if (pool_overflow)
11419 s390_chunkify_finish (pool);
11420 else
11421 s390_mainpool_finish (pool);
11423 /* We're done splitting branches. */
11424 cfun->machine->split_branches_pending_p = false;
11425 break;
11428 /* Generate out-of-pool execute target insns. */
11429 if (TARGET_CPU_ZARCH)
11431 rtx_insn *insn, *target;
11432 rtx label;
11434 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11436 label = s390_execute_label (insn);
11437 if (!label)
11438 continue;
11440 gcc_assert (label != const0_rtx);
11442 target = emit_label (XEXP (label, 0));
11443 INSN_ADDRESSES_NEW (target, -1);
11445 target = emit_insn (s390_execute_target (insn));
11446 INSN_ADDRESSES_NEW (target, -1);
11450 /* Try to optimize prologue and epilogue further. */
11451 s390_optimize_prologue ();
11453 /* Walk over the insns and do some >=z10 specific changes. */
11454 if (s390_tune == PROCESSOR_2097_Z10
11455 || s390_tune == PROCESSOR_2817_Z196
11456 || s390_tune == PROCESSOR_2827_ZEC12)
11458 rtx_insn *insn;
11459 bool insn_added_p = false;
11461 /* The insn lengths and addresses have to be up to date for the
11462 following manipulations. */
11463 shorten_branches (get_insns ());
11465 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11467 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
11468 continue;
11470 if (JUMP_P (insn))
11471 insn_added_p |= s390_fix_long_loop_prediction (insn);
11473 if ((GET_CODE (PATTERN (insn)) == PARALLEL
11474 || GET_CODE (PATTERN (insn)) == SET)
11475 && s390_tune == PROCESSOR_2097_Z10)
11476 insn_added_p |= s390_z10_optimize_cmp (insn);
11479 /* Adjust branches if we added new instructions. */
11480 if (insn_added_p)
11481 shorten_branches (get_insns ());
11484 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
11485 if (hw_after > 0)
11487 rtx_insn *insn;
11489 /* Insert NOPs for hotpatching. */
11490 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11492 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
11493 break;
11495 gcc_assert (insn);
11496 /* Output a series of NOPs after the NOTE_INSN_FUNCTION_BEG. */
11497 while (hw_after > 0)
11499 if (hw_after >= 3 && TARGET_CPU_ZARCH)
11501 insn = emit_insn_after (gen_nop_6_byte (), insn);
11502 hw_after -= 3;
11504 else if (hw_after >= 2)
11506 insn = emit_insn_after (gen_nop_4_byte (), insn);
11507 hw_after -= 2;
11509 else
11511 insn = emit_insn_after (gen_nop_2_byte (), insn);
11512 hw_after -= 1;
11515 gcc_assert (hw_after == 0);
11519 /* Return true if INSN is a fp load insn writing register REGNO. */
11520 static inline bool
11521 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
11523 rtx set;
11524 enum attr_type flag = s390_safe_attr_type (insn);
11526 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
11527 return false;
11529 set = single_set (insn);
11531 if (set == NULL_RTX)
11532 return false;
11534 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
11535 return false;
11537 if (REGNO (SET_DEST (set)) != regno)
11538 return false;
11540 return true;
11543 /* This value describes the distance to be avoided between an
11544 aritmetic fp instruction and an fp load writing the same register.
11545 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
11546 fine but the exact value has to be avoided. Otherwise the FP
11547 pipeline will throw an exception causing a major penalty. */
11548 #define Z10_EARLYLOAD_DISTANCE 7
11550 /* Rearrange the ready list in order to avoid the situation described
11551 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
11552 moved to the very end of the ready list. */
11553 static void
11554 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
11556 unsigned int regno;
11557 int nready = *nready_p;
11558 rtx_insn *tmp;
11559 int i;
11560 rtx_insn *insn;
11561 rtx set;
11562 enum attr_type flag;
11563 int distance;
11565 /* Skip DISTANCE - 1 active insns. */
11566 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
11567 distance > 0 && insn != NULL_RTX;
11568 distance--, insn = prev_active_insn (insn))
11569 if (CALL_P (insn) || JUMP_P (insn))
11570 return;
11572 if (insn == NULL_RTX)
11573 return;
11575 set = single_set (insn);
11577 if (set == NULL_RTX || !REG_P (SET_DEST (set))
11578 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
11579 return;
11581 flag = s390_safe_attr_type (insn);
11583 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
11584 return;
11586 regno = REGNO (SET_DEST (set));
11587 i = nready - 1;
11589 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
11590 i--;
11592 if (!i)
11593 return;
11595 tmp = ready[i];
11596 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
11597 ready[0] = tmp;
11601 /* The s390_sched_state variable tracks the state of the current or
11602 the last instruction group.
11604 0,1,2 number of instructions scheduled in the current group
11605 3 the last group is complete - normal insns
11606 4 the last group was a cracked/expanded insn */
11608 static int s390_sched_state;
11610 #define S390_OOO_SCHED_STATE_NORMAL 3
11611 #define S390_OOO_SCHED_STATE_CRACKED 4
11613 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
11614 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
11615 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
11616 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
11618 static unsigned int
11619 s390_get_sched_attrmask (rtx_insn *insn)
11621 unsigned int mask = 0;
11623 if (get_attr_ooo_cracked (insn))
11624 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
11625 if (get_attr_ooo_expanded (insn))
11626 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
11627 if (get_attr_ooo_endgroup (insn))
11628 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
11629 if (get_attr_ooo_groupalone (insn))
11630 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
11631 return mask;
11634 /* Return the scheduling score for INSN. The higher the score the
11635 better. The score is calculated from the OOO scheduling attributes
11636 of INSN and the scheduling state s390_sched_state. */
11637 static int
11638 s390_sched_score (rtx_insn *insn)
11640 unsigned int mask = s390_get_sched_attrmask (insn);
11641 int score = 0;
11643 switch (s390_sched_state)
11645 case 0:
11646 /* Try to put insns into the first slot which would otherwise
11647 break a group. */
11648 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11649 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11650 score += 5;
11651 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11652 score += 10;
11653 case 1:
11654 /* Prefer not cracked insns while trying to put together a
11655 group. */
11656 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11657 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
11658 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
11659 score += 10;
11660 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
11661 score += 5;
11662 break;
11663 case 2:
11664 /* Prefer not cracked insns while trying to put together a
11665 group. */
11666 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11667 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
11668 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
11669 score += 10;
11670 /* Prefer endgroup insns in the last slot. */
11671 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
11672 score += 10;
11673 break;
11674 case S390_OOO_SCHED_STATE_NORMAL:
11675 /* Prefer not cracked insns if the last was not cracked. */
11676 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11677 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
11678 score += 5;
11679 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11680 score += 10;
11681 break;
11682 case S390_OOO_SCHED_STATE_CRACKED:
11683 /* Try to keep cracked insns together to prevent them from
11684 interrupting groups. */
11685 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11686 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11687 score += 5;
11688 break;
11690 return score;
11693 /* This function is called via hook TARGET_SCHED_REORDER before
11694 issuing one insn from list READY which contains *NREADYP entries.
11695 For target z10 it reorders load instructions to avoid early load
11696 conflicts in the floating point pipeline */
11697 static int
11698 s390_sched_reorder (FILE *file, int verbose,
11699 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
11701 if (s390_tune == PROCESSOR_2097_Z10)
11702 if (reload_completed && *nreadyp > 1)
11703 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
11705 if (s390_tune == PROCESSOR_2827_ZEC12
11706 && reload_completed
11707 && *nreadyp > 1)
11709 int i;
11710 int last_index = *nreadyp - 1;
11711 int max_index = -1;
11712 int max_score = -1;
11713 rtx_insn *tmp;
11715 /* Just move the insn with the highest score to the top (the
11716 end) of the list. A full sort is not needed since a conflict
11717 in the hazard recognition cannot happen. So the top insn in
11718 the ready list will always be taken. */
11719 for (i = last_index; i >= 0; i--)
11721 int score;
11723 if (recog_memoized (ready[i]) < 0)
11724 continue;
11726 score = s390_sched_score (ready[i]);
11727 if (score > max_score)
11729 max_score = score;
11730 max_index = i;
11734 if (max_index != -1)
11736 if (max_index != last_index)
11738 tmp = ready[max_index];
11739 ready[max_index] = ready[last_index];
11740 ready[last_index] = tmp;
11742 if (verbose > 5)
11743 fprintf (file,
11744 "move insn %d to the top of list\n",
11745 INSN_UID (ready[last_index]));
11747 else if (verbose > 5)
11748 fprintf (file,
11749 "best insn %d already on top\n",
11750 INSN_UID (ready[last_index]));
11753 if (verbose > 5)
11755 fprintf (file, "ready list ooo attributes - sched state: %d\n",
11756 s390_sched_state);
11758 for (i = last_index; i >= 0; i--)
11760 if (recog_memoized (ready[i]) < 0)
11761 continue;
11762 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
11763 s390_sched_score (ready[i]));
11764 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
11765 PRINT_OOO_ATTR (ooo_cracked);
11766 PRINT_OOO_ATTR (ooo_expanded);
11767 PRINT_OOO_ATTR (ooo_endgroup);
11768 PRINT_OOO_ATTR (ooo_groupalone);
11769 #undef PRINT_OOO_ATTR
11770 fprintf (file, "\n");
11775 return s390_issue_rate ();
11779 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
11780 the scheduler has issued INSN. It stores the last issued insn into
11781 last_scheduled_insn in order to make it available for
11782 s390_sched_reorder. */
11783 static int
11784 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
11786 last_scheduled_insn = insn;
11788 if (s390_tune == PROCESSOR_2827_ZEC12
11789 && reload_completed
11790 && recog_memoized (insn) >= 0)
11792 unsigned int mask = s390_get_sched_attrmask (insn);
11794 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11795 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11796 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
11797 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
11798 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11799 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
11800 else
11802 /* Only normal insns are left (mask == 0). */
11803 switch (s390_sched_state)
11805 case 0:
11806 case 1:
11807 case 2:
11808 case S390_OOO_SCHED_STATE_NORMAL:
11809 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
11810 s390_sched_state = 1;
11811 else
11812 s390_sched_state++;
11814 break;
11815 case S390_OOO_SCHED_STATE_CRACKED:
11816 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
11817 break;
11820 if (verbose > 5)
11822 fprintf (file, "insn %d: ", INSN_UID (insn));
11823 #define PRINT_OOO_ATTR(ATTR) \
11824 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
11825 PRINT_OOO_ATTR (ooo_cracked);
11826 PRINT_OOO_ATTR (ooo_expanded);
11827 PRINT_OOO_ATTR (ooo_endgroup);
11828 PRINT_OOO_ATTR (ooo_groupalone);
11829 #undef PRINT_OOO_ATTR
11830 fprintf (file, "\n");
11831 fprintf (file, "sched state: %d\n", s390_sched_state);
11835 if (GET_CODE (PATTERN (insn)) != USE
11836 && GET_CODE (PATTERN (insn)) != CLOBBER)
11837 return more - 1;
11838 else
11839 return more;
11842 static void
11843 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
11844 int verbose ATTRIBUTE_UNUSED,
11845 int max_ready ATTRIBUTE_UNUSED)
11847 last_scheduled_insn = NULL;
11848 s390_sched_state = 0;
11851 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
11852 a new number struct loop *loop should be unrolled if tuned for cpus with
11853 a built-in stride prefetcher.
11854 The loop is analyzed for memory accesses by calling check_dpu for
11855 each rtx of the loop. Depending on the loop_depth and the amount of
11856 memory accesses a new number <=nunroll is returned to improve the
11857 behaviour of the hardware prefetch unit. */
11858 static unsigned
11859 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
11861 basic_block *bbs;
11862 rtx_insn *insn;
11863 unsigned i;
11864 unsigned mem_count = 0;
11866 if (s390_tune != PROCESSOR_2097_Z10
11867 && s390_tune != PROCESSOR_2817_Z196
11868 && s390_tune != PROCESSOR_2827_ZEC12)
11869 return nunroll;
11871 /* Count the number of memory references within the loop body. */
11872 bbs = get_loop_body (loop);
11873 subrtx_iterator::array_type array;
11874 for (i = 0; i < loop->num_nodes; i++)
11875 FOR_BB_INSNS (bbs[i], insn)
11876 if (INSN_P (insn) && INSN_CODE (insn) != -1)
11877 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
11878 if (MEM_P (*iter))
11879 mem_count += 1;
11880 free (bbs);
11882 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
11883 if (mem_count == 0)
11884 return nunroll;
11886 switch (loop_depth(loop))
11888 case 1:
11889 return MIN (nunroll, 28 / mem_count);
11890 case 2:
11891 return MIN (nunroll, 22 / mem_count);
11892 default:
11893 return MIN (nunroll, 16 / mem_count);
11897 static void
11898 s390_option_override (void)
11900 unsigned int i;
11901 cl_deferred_option *opt;
11902 vec<cl_deferred_option> *v =
11903 (vec<cl_deferred_option> *) s390_deferred_options;
11905 if (v)
11906 FOR_EACH_VEC_ELT (*v, i, opt)
11908 switch (opt->opt_index)
11910 case OPT_mhotpatch_:
11912 int val1;
11913 int val2;
11914 char s[256];
11915 char *t;
11917 strncpy (s, opt->arg, 256);
11918 s[255] = 0;
11919 t = strchr (s, ',');
11920 if (t != NULL)
11922 *t = 0;
11923 t++;
11924 val1 = integral_argument (s);
11925 val2 = integral_argument (t);
11927 else
11929 val1 = -1;
11930 val2 = -1;
11932 if (val1 == -1 || val2 == -1)
11934 /* argument is not a plain number */
11935 error ("arguments to %qs should be non-negative integers",
11936 "-mhotpatch=n,m");
11937 break;
11939 else if (val1 > s390_hotpatch_hw_max
11940 || val2 > s390_hotpatch_hw_max)
11942 error ("argument to %qs is too large (max. %d)",
11943 "-mhotpatch=n,m", s390_hotpatch_hw_max);
11944 break;
11946 s390_hotpatch_hw_before_label = val1;
11947 s390_hotpatch_hw_after_label = val2;
11948 break;
11950 default:
11951 gcc_unreachable ();
11955 /* Set up function hooks. */
11956 init_machine_status = s390_init_machine_status;
11958 /* Architecture mode defaults according to ABI. */
11959 if (!(target_flags_explicit & MASK_ZARCH))
11961 if (TARGET_64BIT)
11962 target_flags |= MASK_ZARCH;
11963 else
11964 target_flags &= ~MASK_ZARCH;
11967 /* Set the march default in case it hasn't been specified on
11968 cmdline. */
11969 if (s390_arch == PROCESSOR_max)
11971 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
11972 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
11973 s390_arch_flags = processor_flags_table[(int)s390_arch];
11976 /* Determine processor to tune for. */
11977 if (s390_tune == PROCESSOR_max)
11979 s390_tune = s390_arch;
11980 s390_tune_flags = s390_arch_flags;
11983 /* Sanity checks. */
11984 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
11985 error ("z/Architecture mode not supported on %s", s390_arch_string);
11986 if (TARGET_64BIT && !TARGET_ZARCH)
11987 error ("64-bit ABI not supported in ESA/390 mode");
11989 /* Use hardware DFP if available and not explicitly disabled by
11990 user. E.g. with -m31 -march=z10 -mzarch */
11991 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
11992 target_flags |= MASK_HARD_DFP;
11994 /* Enable hardware transactions if available and not explicitly
11995 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
11996 if (!(target_flags_explicit & MASK_OPT_HTM) && TARGET_CPU_HTM && TARGET_ZARCH)
11997 target_flags |= MASK_OPT_HTM;
11999 if (TARGET_HARD_DFP && !TARGET_DFP)
12001 if (target_flags_explicit & MASK_HARD_DFP)
12003 if (!TARGET_CPU_DFP)
12004 error ("hardware decimal floating point instructions"
12005 " not available on %s", s390_arch_string);
12006 if (!TARGET_ZARCH)
12007 error ("hardware decimal floating point instructions"
12008 " not available in ESA/390 mode");
12010 else
12011 target_flags &= ~MASK_HARD_DFP;
12014 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
12016 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
12017 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
12019 target_flags &= ~MASK_HARD_DFP;
12022 /* Set processor cost function. */
12023 switch (s390_tune)
12025 case PROCESSOR_2084_Z990:
12026 s390_cost = &z990_cost;
12027 break;
12028 case PROCESSOR_2094_Z9_109:
12029 s390_cost = &z9_109_cost;
12030 break;
12031 case PROCESSOR_2097_Z10:
12032 s390_cost = &z10_cost;
12033 break;
12034 case PROCESSOR_2817_Z196:
12035 s390_cost = &z196_cost;
12036 break;
12037 case PROCESSOR_2827_ZEC12:
12038 s390_cost = &zEC12_cost;
12039 break;
12040 default:
12041 s390_cost = &z900_cost;
12044 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
12045 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
12046 "in combination");
12048 if (s390_stack_size)
12050 if (s390_stack_guard >= s390_stack_size)
12051 error ("stack size must be greater than the stack guard value");
12052 else if (s390_stack_size > 1 << 16)
12053 error ("stack size must not be greater than 64k");
12055 else if (s390_stack_guard)
12056 error ("-mstack-guard implies use of -mstack-size");
12058 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
12059 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
12060 target_flags |= MASK_LONG_DOUBLE_128;
12061 #endif
12063 if (s390_tune == PROCESSOR_2097_Z10
12064 || s390_tune == PROCESSOR_2817_Z196
12065 || s390_tune == PROCESSOR_2827_ZEC12)
12067 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
12068 global_options.x_param_values,
12069 global_options_set.x_param_values);
12070 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
12071 global_options.x_param_values,
12072 global_options_set.x_param_values);
12073 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
12074 global_options.x_param_values,
12075 global_options_set.x_param_values);
12076 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
12077 global_options.x_param_values,
12078 global_options_set.x_param_values);
12081 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
12082 global_options.x_param_values,
12083 global_options_set.x_param_values);
12084 /* values for loop prefetching */
12085 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
12086 global_options.x_param_values,
12087 global_options_set.x_param_values);
12088 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
12089 global_options.x_param_values,
12090 global_options_set.x_param_values);
12091 /* s390 has more than 2 levels and the size is much larger. Since
12092 we are always running virtualized assume that we only get a small
12093 part of the caches above l1. */
12094 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
12095 global_options.x_param_values,
12096 global_options_set.x_param_values);
12097 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
12098 global_options.x_param_values,
12099 global_options_set.x_param_values);
12100 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
12101 global_options.x_param_values,
12102 global_options_set.x_param_values);
12104 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
12105 requires the arch flags to be evaluated already. Since prefetching
12106 is beneficial on s390, we enable it if available. */
12107 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
12108 flag_prefetch_loop_arrays = 1;
12110 /* Use the alternative scheduling-pressure algorithm by default. */
12111 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
12112 global_options.x_param_values,
12113 global_options_set.x_param_values);
12115 if (TARGET_TPF)
12117 /* Don't emit DWARF3/4 unless specifically selected. The TPF
12118 debuggers do not yet support DWARF 3/4. */
12119 if (!global_options_set.x_dwarf_strict)
12120 dwarf_strict = 1;
12121 if (!global_options_set.x_dwarf_version)
12122 dwarf_version = 2;
12125 /* Register a target-specific optimization-and-lowering pass
12126 to run immediately before prologue and epilogue generation.
12128 Registering the pass must be done at start up. It's
12129 convenient to do it here. */
12130 opt_pass *new_pass = new pass_s390_early_mach (g);
12131 struct register_pass_info insert_pass_s390_early_mach =
12133 new_pass, /* pass */
12134 "pro_and_epilogue", /* reference_pass_name */
12135 1, /* ref_pass_instance_number */
12136 PASS_POS_INSERT_BEFORE /* po_op */
12138 register_pass (&insert_pass_s390_early_mach);
12141 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
12143 static bool
12144 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
12145 unsigned int align ATTRIBUTE_UNUSED,
12146 enum by_pieces_operation op ATTRIBUTE_UNUSED,
12147 bool speed_p ATTRIBUTE_UNUSED)
12149 return (size == 1 || size == 2
12150 || size == 4 || (TARGET_ZARCH && size == 8));
12153 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
12155 static void
12156 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
12158 tree sfpc = s390_builtin_decls[S390_BUILTIN_S390_SFPC];
12159 tree efpc = s390_builtin_decls[S390_BUILTIN_S390_EFPC];
12160 tree call_efpc = build_call_expr (efpc, 0);
12161 tree fenv_var = create_tmp_var (unsigned_type_node);
12163 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
12164 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
12165 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
12166 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
12167 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
12168 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
12170 /* Generates the equivalent of feholdexcept (&fenv_var)
12172 fenv_var = __builtin_s390_efpc ();
12173 __builtin_s390_sfpc (fenv_var & mask) */
12174 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
12175 tree new_fpc =
12176 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
12177 build_int_cst (unsigned_type_node,
12178 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
12179 FPC_EXCEPTION_MASK)));
12180 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
12181 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
12183 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
12185 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
12186 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
12187 build_int_cst (unsigned_type_node,
12188 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
12189 *clear = build_call_expr (sfpc, 1, new_fpc);
12191 /* Generates the equivalent of feupdateenv (fenv_var)
12193 old_fpc = __builtin_s390_efpc ();
12194 __builtin_s390_sfpc (fenv_var);
12195 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
12197 old_fpc = create_tmp_var (unsigned_type_node);
12198 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
12199 old_fpc, call_efpc);
12201 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
12203 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
12204 build_int_cst (unsigned_type_node,
12205 FPC_FLAGS_MASK));
12206 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
12207 build_int_cst (unsigned_type_node,
12208 FPC_FLAGS_SHIFT));
12209 tree atomic_feraiseexcept
12210 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
12211 raise_old_except = build_call_expr (atomic_feraiseexcept,
12212 1, raise_old_except);
12214 *update = build2 (COMPOUND_EXPR, void_type_node,
12215 build2 (COMPOUND_EXPR, void_type_node,
12216 store_old_fpc, set_new_fpc),
12217 raise_old_except);
12219 #undef FPC_EXCEPTION_MASK
12220 #undef FPC_FLAGS_MASK
12221 #undef FPC_DXC_MASK
12222 #undef FPC_EXCEPTION_MASK_SHIFT
12223 #undef FPC_FLAGS_SHIFT
12224 #undef FPC_DXC_SHIFT
12227 /* Initialize GCC target structure. */
12229 #undef TARGET_ASM_ALIGNED_HI_OP
12230 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
12231 #undef TARGET_ASM_ALIGNED_DI_OP
12232 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
12233 #undef TARGET_ASM_INTEGER
12234 #define TARGET_ASM_INTEGER s390_assemble_integer
12236 #undef TARGET_ASM_OPEN_PAREN
12237 #define TARGET_ASM_OPEN_PAREN ""
12239 #undef TARGET_ASM_CLOSE_PAREN
12240 #define TARGET_ASM_CLOSE_PAREN ""
12242 #undef TARGET_OPTION_OVERRIDE
12243 #define TARGET_OPTION_OVERRIDE s390_option_override
12245 #undef TARGET_ENCODE_SECTION_INFO
12246 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
12248 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12249 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
12251 #ifdef HAVE_AS_TLS
12252 #undef TARGET_HAVE_TLS
12253 #define TARGET_HAVE_TLS true
12254 #endif
12255 #undef TARGET_CANNOT_FORCE_CONST_MEM
12256 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
12258 #undef TARGET_DELEGITIMIZE_ADDRESS
12259 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
12261 #undef TARGET_LEGITIMIZE_ADDRESS
12262 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
12264 #undef TARGET_RETURN_IN_MEMORY
12265 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
12267 #undef TARGET_INIT_BUILTINS
12268 #define TARGET_INIT_BUILTINS s390_init_builtins
12269 #undef TARGET_EXPAND_BUILTIN
12270 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
12271 #undef TARGET_BUILTIN_DECL
12272 #define TARGET_BUILTIN_DECL s390_builtin_decl
12274 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
12275 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
12277 #undef TARGET_ASM_OUTPUT_MI_THUNK
12278 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
12279 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12280 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12282 #undef TARGET_SCHED_ADJUST_PRIORITY
12283 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
12284 #undef TARGET_SCHED_ISSUE_RATE
12285 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
12286 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12287 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
12289 #undef TARGET_SCHED_VARIABLE_ISSUE
12290 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
12291 #undef TARGET_SCHED_REORDER
12292 #define TARGET_SCHED_REORDER s390_sched_reorder
12293 #undef TARGET_SCHED_INIT
12294 #define TARGET_SCHED_INIT s390_sched_init
12296 #undef TARGET_CANNOT_COPY_INSN_P
12297 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
12298 #undef TARGET_RTX_COSTS
12299 #define TARGET_RTX_COSTS s390_rtx_costs
12300 #undef TARGET_ADDRESS_COST
12301 #define TARGET_ADDRESS_COST s390_address_cost
12302 #undef TARGET_REGISTER_MOVE_COST
12303 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
12304 #undef TARGET_MEMORY_MOVE_COST
12305 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
12307 #undef TARGET_MACHINE_DEPENDENT_REORG
12308 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
12310 #undef TARGET_VALID_POINTER_MODE
12311 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
12313 #undef TARGET_BUILD_BUILTIN_VA_LIST
12314 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
12315 #undef TARGET_EXPAND_BUILTIN_VA_START
12316 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
12317 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12318 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
12320 #undef TARGET_PROMOTE_FUNCTION_MODE
12321 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
12322 #undef TARGET_PASS_BY_REFERENCE
12323 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
12325 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12326 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
12327 #undef TARGET_FUNCTION_ARG
12328 #define TARGET_FUNCTION_ARG s390_function_arg
12329 #undef TARGET_FUNCTION_ARG_ADVANCE
12330 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
12331 #undef TARGET_FUNCTION_VALUE
12332 #define TARGET_FUNCTION_VALUE s390_function_value
12333 #undef TARGET_LIBCALL_VALUE
12334 #define TARGET_LIBCALL_VALUE s390_libcall_value
12336 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
12337 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
12339 #undef TARGET_FIXED_CONDITION_CODE_REGS
12340 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
12342 #undef TARGET_CC_MODES_COMPATIBLE
12343 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
12345 #undef TARGET_INVALID_WITHIN_DOLOOP
12346 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
12348 #ifdef HAVE_AS_TLS
12349 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12350 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
12351 #endif
12353 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12354 #undef TARGET_MANGLE_TYPE
12355 #define TARGET_MANGLE_TYPE s390_mangle_type
12356 #endif
12358 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12359 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
12361 #undef TARGET_PREFERRED_RELOAD_CLASS
12362 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
12364 #undef TARGET_SECONDARY_RELOAD
12365 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
12367 #undef TARGET_LIBGCC_CMP_RETURN_MODE
12368 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
12370 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
12371 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
12373 #undef TARGET_LEGITIMATE_ADDRESS_P
12374 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
12376 #undef TARGET_LEGITIMATE_CONSTANT_P
12377 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
12379 #undef TARGET_LRA_P
12380 #define TARGET_LRA_P s390_lra_p
12382 #undef TARGET_CAN_ELIMINATE
12383 #define TARGET_CAN_ELIMINATE s390_can_eliminate
12385 #undef TARGET_CONDITIONAL_REGISTER_USAGE
12386 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
12388 #undef TARGET_LOOP_UNROLL_ADJUST
12389 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
12391 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
12392 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
12393 #undef TARGET_TRAMPOLINE_INIT
12394 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
12396 #undef TARGET_UNWIND_WORD_MODE
12397 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
12399 #undef TARGET_CANONICALIZE_COMPARISON
12400 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
12402 #undef TARGET_HARD_REGNO_SCRATCH_OK
12403 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
12405 #undef TARGET_ATTRIBUTE_TABLE
12406 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
12408 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
12409 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
12411 #undef TARGET_SET_UP_BY_PROLOGUE
12412 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
12414 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
12415 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
12416 s390_use_by_pieces_infrastructure_p
12418 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
12419 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
12421 struct gcc_target targetm = TARGET_INITIALIZER;
12423 #include "gt-s390.h"